pmap.c revision 196769
1193323Sed/*-
2193323Sed * Copyright (c) 1991 Regents of the University of California.
3193323Sed * All rights reserved.
4193323Sed * Copyright (c) 1994 John S. Dyson
5193323Sed * All rights reserved.
6193323Sed * Copyright (c) 1994 David Greenman
7193323Sed * All rights reserved.
8193323Sed * Copyright (c) 2005-2008 Alan L. Cox <alc@cs.rice.edu>
9193323Sed * All rights reserved.
10193323Sed *
11193323Sed * This code is derived from software contributed to Berkeley by
12239462Sdim * the Systems Programming Group of the University of Utah Computer
13193323Sed * Science Department and William Jolitz of UUNET Technologies Inc.
14193323Sed *
15193323Sed * Redistribution and use in source and binary forms, with or without
16193323Sed * modification, are permitted provided that the following conditions
17193323Sed * are met:
18249423Sdim * 1. Redistributions of source code must retain the above copyright
19193323Sed *    notice, this list of conditions and the following disclaimer.
20193323Sed * 2. Redistributions in binary form must reproduce the above copyright
21202375Srdivacky *    notice, this list of conditions and the following disclaimer in the
22193323Sed *    documentation and/or other materials provided with the distribution.
23193323Sed * 3. All advertising materials mentioning features or use of this software
24239462Sdim *    must display the following acknowledgement:
25239462Sdim *	This product includes software developed by the University of
26239462Sdim *	California, Berkeley and its contributors.
27193323Sed * 4. Neither the name of the University nor the names of its contributors
28193323Sed *    may be used to endorse or promote products derived from this software
29218893Sdim *    without specific prior written permission.
30218893Sdim *
31218893Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32218893Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33218893Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34193323Sed * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35212904Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36193323Sed * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37193323Sed * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38193323Sed * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39239462Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40193323Sed * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41193323Sed * SUCH DAMAGE.
42193323Sed *
43193323Sed *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
44193323Sed */
45193323Sed/*-
46198090Srdivacky * Copyright (c) 2003 Networks Associates Technology, Inc.
47193323Sed * All rights reserved.
48198396Srdivacky *
49198396Srdivacky * This software was developed for the FreeBSD Project by Jake Burkholder,
50198396Srdivacky * Safeport Network Services, and Network Associates Laboratories, the
51198396Srdivacky * Security Research Division of Network Associates, Inc. under
52198396Srdivacky * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
53198396Srdivacky * CHATS research program.
54198396Srdivacky *
55198396Srdivacky * Redistribution and use in source and binary forms, with or without
56198396Srdivacky * modification, are permitted provided that the following conditions
57198396Srdivacky * are met:
58198396Srdivacky * 1. Redistributions of source code must retain the above copyright
59198396Srdivacky *    notice, this list of conditions and the following disclaimer.
60198396Srdivacky * 2. Redistributions in binary form must reproduce the above copyright
61198396Srdivacky *    notice, this list of conditions and the following disclaimer in the
62198396Srdivacky *    documentation and/or other materials provided with the distribution.
63198396Srdivacky *
64198396Srdivacky * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
65198396Srdivacky * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
66198396Srdivacky * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
67200581Srdivacky * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
68198396Srdivacky * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
69198396Srdivacky * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
70200581Srdivacky * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
71200581Srdivacky * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
72198396Srdivacky * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
73198396Srdivacky * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
74198396Srdivacky * SUCH DAMAGE.
75198396Srdivacky */
76202375Srdivacky
77243830Sdim#include <sys/cdefs.h>
78202375Srdivacky__FBSDID("$FreeBSD: head/sys/i386/i386/pmap.c 196769 2009-09-02 16:02:48Z jkim $");
79202375Srdivacky
80202375Srdivacky/*
81243830Sdim *	Manages physical address maps.
82 *
83 *	In addition to hardware address maps, this
84 *	module is called upon to provide software-use-only
85 *	maps which may or may not be stored in the same
86 *	form as hardware maps.  These pseudo-maps are
87 *	used to store intermediate results from copy
88 *	operations to and from address spaces.
89 *
90 *	Since the information managed by this module is
91 *	also stored by the logical address mapping module,
92 *	this module may throw away valid virtual-to-physical
93 *	mappings at almost any time.  However, invalidations
94 *	of virtual-to-physical mappings must be done as
95 *	requested.
96 *
97 *	In order to cope with hardware architectures which
98 *	make virtual-to-physical map invalidates expensive,
99 *	this module may delay invalidate or reduced protection
100 *	operations until such time as they are actually
101 *	necessary.  This module is given full information as
102 *	to which processors are currently using which maps,
103 *	and to when physical maps must be made correct.
104 */
105
106#include "opt_cpu.h"
107#include "opt_pmap.h"
108#include "opt_msgbuf.h"
109#include "opt_smp.h"
110#include "opt_xbox.h"
111
112#include <sys/param.h>
113#include <sys/systm.h>
114#include <sys/kernel.h>
115#include <sys/ktr.h>
116#include <sys/lock.h>
117#include <sys/malloc.h>
118#include <sys/mman.h>
119#include <sys/msgbuf.h>
120#include <sys/mutex.h>
121#include <sys/proc.h>
122#include <sys/sf_buf.h>
123#include <sys/sx.h>
124#include <sys/vmmeter.h>
125#include <sys/sched.h>
126#include <sys/sysctl.h>
127#ifdef SMP
128#include <sys/smp.h>
129#endif
130
131#include <vm/vm.h>
132#include <vm/vm_param.h>
133#include <vm/vm_kern.h>
134#include <vm/vm_page.h>
135#include <vm/vm_map.h>
136#include <vm/vm_object.h>
137#include <vm/vm_extern.h>
138#include <vm/vm_pageout.h>
139#include <vm/vm_pager.h>
140#include <vm/vm_reserv.h>
141#include <vm/uma.h>
142
143#include <machine/cpu.h>
144#include <machine/cputypes.h>
145#include <machine/md_var.h>
146#include <machine/pcb.h>
147#include <machine/specialreg.h>
148#ifdef SMP
149#include <machine/smp.h>
150#endif
151
152#ifdef XBOX
153#include <machine/xbox.h>
154#endif
155
156#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
157#define CPU_ENABLE_SSE
158#endif
159
160#ifndef PMAP_SHPGPERPROC
161#define PMAP_SHPGPERPROC 200
162#endif
163
164#if !defined(DIAGNOSTIC)
165#define PMAP_INLINE	__gnu89_inline
166#else
167#define PMAP_INLINE
168#endif
169
170#define PV_STATS
171#ifdef PV_STATS
172#define PV_STAT(x)	do { x ; } while (0)
173#else
174#define PV_STAT(x)	do { } while (0)
175#endif
176
177#define	pa_index(pa)	((pa) >> PDRSHIFT)
178#define	pa_to_pvh(pa)	(&pv_table[pa_index(pa)])
179
180/*
181 * Get PDEs and PTEs for user/kernel address space
182 */
183#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
184#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
185
186#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
187#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
188#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
189#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
190#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
191
192#define pmap_pte_set_w(pte, v)	((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
193    atomic_clear_int((u_int *)(pte), PG_W))
194#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
195
196struct pmap kernel_pmap_store;
197LIST_HEAD(pmaplist, pmap);
198static struct pmaplist allpmaps;
199static struct mtx allpmaps_lock;
200
201vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
202vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
203int pgeflag = 0;		/* PG_G or-in */
204int pseflag = 0;		/* PG_PS or-in */
205
206static int nkpt;
207vm_offset_t kernel_vm_end;
208extern u_int32_t KERNend;
209
210#ifdef PAE
211pt_entry_t pg_nx;
212static uma_zone_t pdptzone;
213#endif
214
215static int pat_works = 0;		/* Is page attribute table sane? */
216
217SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
218
219static int pg_ps_enabled;
220SYSCTL_INT(_vm_pmap, OID_AUTO, pg_ps_enabled, CTLFLAG_RD, &pg_ps_enabled, 0,
221    "Are large page mappings enabled?");
222
223/*
224 * Data for the pv entry allocation mechanism
225 */
226static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
227static struct md_page *pv_table;
228static int shpgperproc = PMAP_SHPGPERPROC;
229
230struct pv_chunk *pv_chunkbase;		/* KVA block for pv_chunks */
231int pv_maxchunks;			/* How many chunks we have KVA for */
232vm_offset_t pv_vafree;			/* freelist stored in the PTE */
233
234/*
235 * All those kernel PT submaps that BSD is so fond of
236 */
237struct sysmaps {
238	struct	mtx lock;
239	pt_entry_t *CMAP1;
240	pt_entry_t *CMAP2;
241	caddr_t	CADDR1;
242	caddr_t	CADDR2;
243};
244static struct sysmaps sysmaps_pcpu[MAXCPU];
245pt_entry_t *CMAP1 = 0;
246static pt_entry_t *CMAP3;
247caddr_t CADDR1 = 0, ptvmmap = 0;
248static caddr_t CADDR3;
249struct msgbuf *msgbufp = 0;
250
251/*
252 * Crashdump maps.
253 */
254static caddr_t crashdumpmap;
255
256static pt_entry_t *PMAP1 = 0, *PMAP2;
257static pt_entry_t *PADDR1 = 0, *PADDR2;
258#ifdef SMP
259static int PMAP1cpu;
260static int PMAP1changedcpu;
261SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD,
262	   &PMAP1changedcpu, 0,
263	   "Number of times pmap_pte_quick changed CPU with same PMAP1");
264#endif
265static int PMAP1changed;
266SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD,
267	   &PMAP1changed, 0,
268	   "Number of times pmap_pte_quick changed PMAP1");
269static int PMAP1unchanged;
270SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD,
271	   &PMAP1unchanged, 0,
272	   "Number of times pmap_pte_quick didn't change PMAP1");
273static struct mtx PMAP2mutex;
274
275static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
276static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
277static void	pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
278static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
279static void	pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa);
280static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
281static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
282		    vm_offset_t va);
283static int	pmap_pvh_wired_mappings(struct md_page *pvh, int count);
284
285static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
286static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m,
287    vm_prot_t prot);
288static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
289    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
290static void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
291static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
292static boolean_t pmap_is_modified_pvh(struct md_page *pvh);
293static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
294static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
295static void pmap_pde_attr(pd_entry_t *pde, int cache_bits);
296static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va);
297static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva,
298    vm_prot_t prot);
299static void pmap_pte_attr(pt_entry_t *pte, int cache_bits);
300static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
301    vm_page_t *free);
302static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva,
303    vm_page_t *free);
304static void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte);
305static void pmap_remove_page(struct pmap *pmap, vm_offset_t va,
306    vm_page_t *free);
307static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
308					vm_offset_t va);
309static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
310static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
311    vm_page_t m);
312
313static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
314
315static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags);
316static int _pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free);
317static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
318static void pmap_pte_release(pt_entry_t *pte);
319static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t *);
320static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
321#ifdef PAE
322static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
323#endif
324
325CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
326CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
327
328/*
329 * If you get an error here, then you set KVA_PAGES wrong! See the
330 * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
331 * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
332 */
333CTASSERT(KERNBASE % (1 << 24) == 0);
334
335/*
336 * Move the kernel virtual free pointer to the next
337 * 4MB.  This is used to help improve performance
338 * by using a large (4MB) page for much of the kernel
339 * (.text, .data, .bss)
340 */
341static vm_offset_t
342pmap_kmem_choose(vm_offset_t addr)
343{
344	vm_offset_t newaddr = addr;
345
346#ifndef DISABLE_PSE
347	if (cpu_feature & CPUID_PSE)
348		newaddr = (addr + PDRMASK) & ~PDRMASK;
349#endif
350	return newaddr;
351}
352
353/*
354 *	Bootstrap the system enough to run with virtual memory.
355 *
356 *	On the i386 this is called after mapping has already been enabled
357 *	and just syncs the pmap module with what has already been done.
358 *	[We can't call it easily with mapping off since the kernel is not
359 *	mapped with PA == VA, hence we would have to relocate every address
360 *	from the linked base (virtual) address "KERNBASE" to the actual
361 *	(physical) address starting relative to 0]
362 */
363void
364pmap_bootstrap(vm_paddr_t firstaddr)
365{
366	vm_offset_t va;
367	pt_entry_t *pte, *unused;
368	struct sysmaps *sysmaps;
369	int i;
370
371	/*
372	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
373	 * large. It should instead be correctly calculated in locore.s and
374	 * not based on 'first' (which is a physical address, not a virtual
375	 * address, for the start of unused physical memory). The kernel
376	 * page tables are NOT double mapped and thus should not be included
377	 * in this calculation.
378	 */
379	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
380	virtual_avail = pmap_kmem_choose(virtual_avail);
381
382	virtual_end = VM_MAX_KERNEL_ADDRESS;
383
384	/*
385	 * Initialize the kernel pmap (which is statically allocated).
386	 */
387	PMAP_LOCK_INIT(kernel_pmap);
388	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
389#ifdef PAE
390	kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
391#endif
392	kernel_pmap->pm_root = NULL;
393	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
394	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
395	LIST_INIT(&allpmaps);
396	mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
397	mtx_lock_spin(&allpmaps_lock);
398	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
399	mtx_unlock_spin(&allpmaps_lock);
400	nkpt = NKPT;
401
402	/*
403	 * Reserve some special page table entries/VA space for temporary
404	 * mapping of pages.
405	 */
406#define	SYSMAP(c, p, v, n)	\
407	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
408
409	va = virtual_avail;
410	pte = vtopte(va);
411
412	/*
413	 * CMAP1/CMAP2 are used for zeroing and copying pages.
414	 * CMAP3 is used for the idle process page zeroing.
415	 */
416	for (i = 0; i < MAXCPU; i++) {
417		sysmaps = &sysmaps_pcpu[i];
418		mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
419		SYSMAP(caddr_t, sysmaps->CMAP1, sysmaps->CADDR1, 1)
420		SYSMAP(caddr_t, sysmaps->CMAP2, sysmaps->CADDR2, 1)
421	}
422	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
423	SYSMAP(caddr_t, CMAP3, CADDR3, 1)
424	*CMAP3 = 0;
425
426	/*
427	 * Crashdump maps.
428	 */
429	SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
430
431	/*
432	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
433	 */
434	SYSMAP(caddr_t, unused, ptvmmap, 1)
435
436	/*
437	 * msgbufp is used to map the system message buffer.
438	 */
439	SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
440
441	/*
442	 * ptemap is used for pmap_pte_quick
443	 */
444	SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
445	SYSMAP(pt_entry_t *, PMAP2, PADDR2, 1);
446
447	mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
448
449	virtual_avail = va;
450
451	*CMAP1 = 0;
452
453	/*
454	 * Leave in place an identity mapping (virt == phys) for the low 1 MB
455	 * physical memory region that is used by the ACPI wakeup code.  This
456	 * mapping must not have PG_G set.
457	 */
458#ifdef XBOX
459	/* FIXME: This is gross, but needed for the XBOX. Since we are in such
460	 * an early stadium, we cannot yet neatly map video memory ... :-(
461	 * Better fixes are very welcome! */
462	if (!arch_i386_is_xbox)
463#endif
464	for (i = 1; i < NKPT; i++)
465		PTD[i] = 0;
466
467	/* Initialize the PAT MSR if present. */
468	pmap_init_pat();
469
470	/* Turn on PG_G on kernel page(s) */
471	pmap_set_pg();
472}
473
474/*
475 * Setup the PAT MSR.
476 */
477void
478pmap_init_pat(void)
479{
480	uint64_t pat_msr;
481	char *sysenv;
482	static int pat_tested = 0;
483
484	/* Bail if this CPU doesn't implement PAT. */
485	if (!(cpu_feature & CPUID_PAT))
486		return;
487
488	/*
489	 * Due to some Intel errata, we can only safely use the lower 4
490	 * PAT entries.
491	 *
492	 *   Intel Pentium III Processor Specification Update
493	 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
494	 * or Mode C Paging)
495	 *
496	 *   Intel Pentium IV  Processor Specification Update
497	 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
498	 *
499	 * Some Apple Macs based on nVidia chipsets cannot enter ACPI mode
500	 * via SMI# when we use upper 4 PAT entries for unknown reason.
501	 */
502	if (!pat_tested) {
503		if (cpu_vendor_id != CPU_VENDOR_INTEL ||
504		    (I386_CPU_FAMILY(cpu_id) == 6 &&
505		    I386_CPU_MODEL(cpu_id) >= 0xe)) {
506			pat_works = 1;
507			sysenv = getenv("smbios.system.product");
508			if (sysenv != NULL) {
509				if (strncmp(sysenv, "MacBook5,1", 10) == 0 ||
510				    strncmp(sysenv, "MacBookPro5,5", 13) == 0 ||
511				    strncmp(sysenv, "Macmini3,1", 10) == 0)
512					pat_works = 0;
513				freeenv(sysenv);
514			}
515		}
516		pat_tested = 1;
517	}
518
519	/* Initialize default PAT entries. */
520	pat_msr = PAT_VALUE(0, PAT_WRITE_BACK) |
521	    PAT_VALUE(1, PAT_WRITE_THROUGH) |
522	    PAT_VALUE(2, PAT_UNCACHED) |
523	    PAT_VALUE(3, PAT_UNCACHEABLE) |
524	    PAT_VALUE(4, PAT_WRITE_BACK) |
525	    PAT_VALUE(5, PAT_WRITE_THROUGH) |
526	    PAT_VALUE(6, PAT_UNCACHED) |
527	    PAT_VALUE(7, PAT_UNCACHEABLE);
528
529	if (pat_works) {
530		/*
531		 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
532		 * Program 4 and 5 as WP and WC.
533		 * Leave 6 and 7 as UC and UC-.
534		 */
535		pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
536		pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
537		    PAT_VALUE(5, PAT_WRITE_COMBINING);
538	} else {
539		/*
540		 * Just replace PAT Index 2 with WC instead of UC-.
541		 */
542		pat_msr &= ~PAT_MASK(2);
543		pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
544	}
545	wrmsr(MSR_PAT, pat_msr);
546}
547
548/*
549 * Set PG_G on kernel pages.  Only the BSP calls this when SMP is turned on.
550 */
551void
552pmap_set_pg(void)
553{
554	pd_entry_t pdir;
555	pt_entry_t *pte;
556	vm_offset_t va, endva;
557	int i;
558
559	if (pgeflag == 0)
560		return;
561
562	i = KERNLOAD/NBPDR;
563	endva = KERNBASE + KERNend;
564
565	if (pseflag) {
566		va = KERNBASE + KERNLOAD;
567		while (va  < endva) {
568			pdir = kernel_pmap->pm_pdir[KPTDI+i];
569			pdir |= pgeflag;
570			kernel_pmap->pm_pdir[KPTDI+i] = PTD[KPTDI+i] = pdir;
571			invltlb();	/* Play it safe, invltlb() every time */
572			i++;
573			va += NBPDR;
574		}
575	} else {
576		va = (vm_offset_t)btext;
577		while (va < endva) {
578			pte = vtopte(va);
579			if (*pte)
580				*pte |= pgeflag;
581			invltlb();	/* Play it safe, invltlb() every time */
582			va += PAGE_SIZE;
583		}
584	}
585}
586
587/*
588 * Initialize a vm_page's machine-dependent fields.
589 */
590void
591pmap_page_init(vm_page_t m)
592{
593
594	TAILQ_INIT(&m->md.pv_list);
595	m->md.pat_mode = PAT_WRITE_BACK;
596}
597
598#ifdef PAE
599static void *
600pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
601{
602
603	/* Inform UMA that this allocator uses kernel_map/object. */
604	*flags = UMA_SLAB_KERNEL;
605	return ((void *)kmem_alloc_contig(kernel_map, bytes, wait, 0x0ULL,
606	    0xffffffffULL, 1, 0, VM_MEMATTR_DEFAULT));
607}
608#endif
609
610/*
611 * ABuse the pte nodes for unmapped kva to thread a kva freelist through.
612 * Requirements:
613 *  - Must deal with pages in order to ensure that none of the PG_* bits
614 *    are ever set, PG_V in particular.
615 *  - Assumes we can write to ptes without pte_store() atomic ops, even
616 *    on PAE systems.  This should be ok.
617 *  - Assumes nothing will ever test these addresses for 0 to indicate
618 *    no mapping instead of correctly checking PG_V.
619 *  - Assumes a vm_offset_t will fit in a pte (true for i386).
620 * Because PG_V is never set, there can be no mappings to invalidate.
621 */
622static vm_offset_t
623pmap_ptelist_alloc(vm_offset_t *head)
624{
625	pt_entry_t *pte;
626	vm_offset_t va;
627
628	va = *head;
629	if (va == 0)
630		return (va);	/* Out of memory */
631	pte = vtopte(va);
632	*head = *pte;
633	if (*head & PG_V)
634		panic("pmap_ptelist_alloc: va with PG_V set!");
635	*pte = 0;
636	return (va);
637}
638
639static void
640pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
641{
642	pt_entry_t *pte;
643
644	if (va & PG_V)
645		panic("pmap_ptelist_free: freeing va with PG_V set!");
646	pte = vtopte(va);
647	*pte = *head;		/* virtual! PG_V is 0 though */
648	*head = va;
649}
650
651static void
652pmap_ptelist_init(vm_offset_t *head, void *base, int npages)
653{
654	int i;
655	vm_offset_t va;
656
657	*head = 0;
658	for (i = npages - 1; i >= 0; i--) {
659		va = (vm_offset_t)base + i * PAGE_SIZE;
660		pmap_ptelist_free(head, va);
661	}
662}
663
664
665/*
666 *	Initialize the pmap module.
667 *	Called by vm_init, to initialize any structures that the pmap
668 *	system needs to map virtual memory.
669 */
670void
671pmap_init(void)
672{
673	vm_page_t mpte;
674	vm_size_t s;
675	int i, pv_npg;
676
677	/*
678	 * Initialize the vm page array entries for the kernel pmap's
679	 * page table pages.
680	 */
681	for (i = 0; i < nkpt; i++) {
682		mpte = PHYS_TO_VM_PAGE(PTD[i + KPTDI] & PG_FRAME);
683		KASSERT(mpte >= vm_page_array &&
684		    mpte < &vm_page_array[vm_page_array_size],
685		    ("pmap_init: page table page is out of range"));
686		mpte->pindex = i + KPTDI;
687		mpte->phys_addr = PTD[i + KPTDI] & PG_FRAME;
688	}
689
690	/*
691	 * Initialize the address space (zone) for the pv entries.  Set a
692	 * high water mark so that the system can recover from excessive
693	 * numbers of pv entries.
694	 */
695	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
696	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
697	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
698	pv_entry_max = roundup(pv_entry_max, _NPCPV);
699	pv_entry_high_water = 9 * (pv_entry_max / 10);
700
701	/*
702	 * Are large page mappings enabled?
703	 */
704	TUNABLE_INT_FETCH("vm.pmap.pg_ps_enabled", &pg_ps_enabled);
705
706	/*
707	 * Calculate the size of the pv head table for superpages.
708	 */
709	for (i = 0; phys_avail[i + 1]; i += 2);
710	pv_npg = round_4mpage(phys_avail[(i - 2) + 1]) / NBPDR;
711
712	/*
713	 * Allocate memory for the pv head table for superpages.
714	 */
715	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
716	s = round_page(s);
717	pv_table = (struct md_page *)kmem_alloc(kernel_map, s);
718	for (i = 0; i < pv_npg; i++)
719		TAILQ_INIT(&pv_table[i].pv_list);
720
721	pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc);
722	pv_chunkbase = (struct pv_chunk *)kmem_alloc_nofault(kernel_map,
723	    PAGE_SIZE * pv_maxchunks);
724	if (pv_chunkbase == NULL)
725		panic("pmap_init: not enough kvm for pv chunks");
726	pmap_ptelist_init(&pv_vafree, pv_chunkbase, pv_maxchunks);
727#ifdef PAE
728	pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
729	    NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
730	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
731	uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
732#endif
733}
734
735
736SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0,
737	"Max number of PV entries");
738SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0,
739	"Page share factor per proc");
740
741SYSCTL_NODE(_vm_pmap, OID_AUTO, pde, CTLFLAG_RD, 0,
742    "2/4MB page mapping counters");
743
744static u_long pmap_pde_demotions;
745SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, demotions, CTLFLAG_RD,
746    &pmap_pde_demotions, 0, "2/4MB page demotions");
747
748static u_long pmap_pde_mappings;
749SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, mappings, CTLFLAG_RD,
750    &pmap_pde_mappings, 0, "2/4MB page mappings");
751
752static u_long pmap_pde_p_failures;
753SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, p_failures, CTLFLAG_RD,
754    &pmap_pde_p_failures, 0, "2/4MB page promotion failures");
755
756static u_long pmap_pde_promotions;
757SYSCTL_ULONG(_vm_pmap_pde, OID_AUTO, promotions, CTLFLAG_RD,
758    &pmap_pde_promotions, 0, "2/4MB page promotions");
759
760/***************************************************
761 * Low level helper routines.....
762 ***************************************************/
763
764/*
765 * Determine the appropriate bits to set in a PTE or PDE for a specified
766 * caching mode.
767 */
768int
769pmap_cache_bits(int mode, boolean_t is_pde)
770{
771	int pat_flag, pat_index, cache_bits;
772
773	/* The PAT bit is different for PTE's and PDE's. */
774	pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
775
776	/* If we don't support PAT, map extended modes to older ones. */
777	if (!(cpu_feature & CPUID_PAT)) {
778		switch (mode) {
779		case PAT_UNCACHEABLE:
780		case PAT_WRITE_THROUGH:
781		case PAT_WRITE_BACK:
782			break;
783		case PAT_UNCACHED:
784		case PAT_WRITE_COMBINING:
785		case PAT_WRITE_PROTECTED:
786			mode = PAT_UNCACHEABLE;
787			break;
788		}
789	}
790
791	/* Map the caching mode to a PAT index. */
792	if (pat_works) {
793		switch (mode) {
794		case PAT_UNCACHEABLE:
795			pat_index = 3;
796			break;
797		case PAT_WRITE_THROUGH:
798			pat_index = 1;
799			break;
800		case PAT_WRITE_BACK:
801			pat_index = 0;
802			break;
803		case PAT_UNCACHED:
804			pat_index = 2;
805			break;
806		case PAT_WRITE_COMBINING:
807			pat_index = 5;
808			break;
809		case PAT_WRITE_PROTECTED:
810			pat_index = 4;
811			break;
812		default:
813			panic("Unknown caching mode %d\n", mode);
814		}
815	} else {
816		switch (mode) {
817		case PAT_UNCACHED:
818		case PAT_UNCACHEABLE:
819		case PAT_WRITE_PROTECTED:
820			pat_index = 3;
821			break;
822		case PAT_WRITE_THROUGH:
823			pat_index = 1;
824			break;
825		case PAT_WRITE_BACK:
826			pat_index = 0;
827			break;
828		case PAT_WRITE_COMBINING:
829			pat_index = 2;
830			break;
831		default:
832			panic("Unknown caching mode %d\n", mode);
833		}
834	}
835
836	/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
837	cache_bits = 0;
838	if (pat_index & 0x4)
839		cache_bits |= pat_flag;
840	if (pat_index & 0x2)
841		cache_bits |= PG_NC_PCD;
842	if (pat_index & 0x1)
843		cache_bits |= PG_NC_PWT;
844	return (cache_bits);
845}
846#ifdef SMP
847/*
848 * For SMP, these functions have to use the IPI mechanism for coherence.
849 *
850 * N.B.: Before calling any of the following TLB invalidation functions,
851 * the calling processor must ensure that all stores updating a non-
852 * kernel page table are globally performed.  Otherwise, another
853 * processor could cache an old, pre-update entry without being
854 * invalidated.  This can happen one of two ways: (1) The pmap becomes
855 * active on another processor after its pm_active field is checked by
856 * one of the following functions but before a store updating the page
857 * table is globally performed. (2) The pmap becomes active on another
858 * processor before its pm_active field is checked but due to
859 * speculative loads one of the following functions stills reads the
860 * pmap as inactive on the other processor.
861 *
862 * The kernel page table is exempt because its pm_active field is
863 * immutable.  The kernel page table is always active on every
864 * processor.
865 */
866void
867pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
868{
869	u_int cpumask;
870	u_int other_cpus;
871
872	sched_pin();
873	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
874		invlpg(va);
875		smp_invlpg(va);
876	} else {
877		cpumask = PCPU_GET(cpumask);
878		other_cpus = PCPU_GET(other_cpus);
879		if (pmap->pm_active & cpumask)
880			invlpg(va);
881		if (pmap->pm_active & other_cpus)
882			smp_masked_invlpg(pmap->pm_active & other_cpus, va);
883	}
884	sched_unpin();
885}
886
887void
888pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
889{
890	u_int cpumask;
891	u_int other_cpus;
892	vm_offset_t addr;
893
894	sched_pin();
895	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
896		for (addr = sva; addr < eva; addr += PAGE_SIZE)
897			invlpg(addr);
898		smp_invlpg_range(sva, eva);
899	} else {
900		cpumask = PCPU_GET(cpumask);
901		other_cpus = PCPU_GET(other_cpus);
902		if (pmap->pm_active & cpumask)
903			for (addr = sva; addr < eva; addr += PAGE_SIZE)
904				invlpg(addr);
905		if (pmap->pm_active & other_cpus)
906			smp_masked_invlpg_range(pmap->pm_active & other_cpus,
907			    sva, eva);
908	}
909	sched_unpin();
910}
911
912void
913pmap_invalidate_all(pmap_t pmap)
914{
915	u_int cpumask;
916	u_int other_cpus;
917
918	sched_pin();
919	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
920		invltlb();
921		smp_invltlb();
922	} else {
923		cpumask = PCPU_GET(cpumask);
924		other_cpus = PCPU_GET(other_cpus);
925		if (pmap->pm_active & cpumask)
926			invltlb();
927		if (pmap->pm_active & other_cpus)
928			smp_masked_invltlb(pmap->pm_active & other_cpus);
929	}
930	sched_unpin();
931}
932
933void
934pmap_invalidate_cache(void)
935{
936
937	sched_pin();
938	wbinvd();
939	smp_cache_flush();
940	sched_unpin();
941}
942#else /* !SMP */
943/*
944 * Normal, non-SMP, 486+ invalidation functions.
945 * We inline these within pmap.c for speed.
946 */
947PMAP_INLINE void
948pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
949{
950
951	if (pmap == kernel_pmap || pmap->pm_active)
952		invlpg(va);
953}
954
955PMAP_INLINE void
956pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
957{
958	vm_offset_t addr;
959
960	if (pmap == kernel_pmap || pmap->pm_active)
961		for (addr = sva; addr < eva; addr += PAGE_SIZE)
962			invlpg(addr);
963}
964
965PMAP_INLINE void
966pmap_invalidate_all(pmap_t pmap)
967{
968
969	if (pmap == kernel_pmap || pmap->pm_active)
970		invltlb();
971}
972
973PMAP_INLINE void
974pmap_invalidate_cache(void)
975{
976
977	wbinvd();
978}
979#endif /* !SMP */
980
981void
982pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva)
983{
984
985	KASSERT((sva & PAGE_MASK) == 0,
986	    ("pmap_invalidate_cache_range: sva not page-aligned"));
987	KASSERT((eva & PAGE_MASK) == 0,
988	    ("pmap_invalidate_cache_range: eva not page-aligned"));
989
990	if (cpu_feature & CPUID_SS)
991		; /* If "Self Snoop" is supported, do nothing. */
992	else if (cpu_feature & CPUID_CLFSH) {
993
994		/*
995		 * Otherwise, do per-cache line flush.  Use the mfence
996		 * instruction to insure that previous stores are
997		 * included in the write-back.  The processor
998		 * propagates flush to other processors in the cache
999		 * coherence domain.
1000		 */
1001		mfence();
1002		for (; sva < eva; sva += cpu_clflush_line_size)
1003			clflush(sva);
1004		mfence();
1005	} else {
1006
1007		/*
1008		 * No targeted cache flush methods are supported by CPU,
1009		 * globally invalidate cache as a last resort.
1010		 */
1011		pmap_invalidate_cache();
1012	}
1013}
1014
1015/*
1016 * Are we current address space or kernel?  N.B. We return FALSE when
1017 * a pmap's page table is in use because a kernel thread is borrowing
1018 * it.  The borrowed page table can change spontaneously, making any
1019 * dependence on its continued use subject to a race condition.
1020 */
1021static __inline int
1022pmap_is_current(pmap_t pmap)
1023{
1024
1025	return (pmap == kernel_pmap ||
1026		(pmap == vmspace_pmap(curthread->td_proc->p_vmspace) &&
1027	    (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME)));
1028}
1029
1030/*
1031 * If the given pmap is not the current or kernel pmap, the returned pte must
1032 * be released by passing it to pmap_pte_release().
1033 */
1034pt_entry_t *
1035pmap_pte(pmap_t pmap, vm_offset_t va)
1036{
1037	pd_entry_t newpf;
1038	pd_entry_t *pde;
1039
1040	pde = pmap_pde(pmap, va);
1041	if (*pde & PG_PS)
1042		return (pde);
1043	if (*pde != 0) {
1044		/* are we current address space or kernel? */
1045		if (pmap_is_current(pmap))
1046			return (vtopte(va));
1047		mtx_lock(&PMAP2mutex);
1048		newpf = *pde & PG_FRAME;
1049		if ((*PMAP2 & PG_FRAME) != newpf) {
1050			*PMAP2 = newpf | PG_RW | PG_V | PG_A | PG_M;
1051			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
1052		}
1053		return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
1054	}
1055	return (0);
1056}
1057
1058/*
1059 * Releases a pte that was obtained from pmap_pte().  Be prepared for the pte
1060 * being NULL.
1061 */
1062static __inline void
1063pmap_pte_release(pt_entry_t *pte)
1064{
1065
1066	if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
1067		mtx_unlock(&PMAP2mutex);
1068}
1069
1070static __inline void
1071invlcaddr(void *caddr)
1072{
1073
1074	invlpg((u_int)caddr);
1075}
1076
1077/*
1078 * Super fast pmap_pte routine best used when scanning
1079 * the pv lists.  This eliminates many coarse-grained
1080 * invltlb calls.  Note that many of the pv list
1081 * scans are across different pmaps.  It is very wasteful
1082 * to do an entire invltlb for checking a single mapping.
1083 *
1084 * If the given pmap is not the current pmap, vm_page_queue_mtx
1085 * must be held and curthread pinned to a CPU.
1086 */
1087static pt_entry_t *
1088pmap_pte_quick(pmap_t pmap, vm_offset_t va)
1089{
1090	pd_entry_t newpf;
1091	pd_entry_t *pde;
1092
1093	pde = pmap_pde(pmap, va);
1094	if (*pde & PG_PS)
1095		return (pde);
1096	if (*pde != 0) {
1097		/* are we current address space or kernel? */
1098		if (pmap_is_current(pmap))
1099			return (vtopte(va));
1100		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1101		KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
1102		newpf = *pde & PG_FRAME;
1103		if ((*PMAP1 & PG_FRAME) != newpf) {
1104			*PMAP1 = newpf | PG_RW | PG_V | PG_A | PG_M;
1105#ifdef SMP
1106			PMAP1cpu = PCPU_GET(cpuid);
1107#endif
1108			invlcaddr(PADDR1);
1109			PMAP1changed++;
1110		} else
1111#ifdef SMP
1112		if (PMAP1cpu != PCPU_GET(cpuid)) {
1113			PMAP1cpu = PCPU_GET(cpuid);
1114			invlcaddr(PADDR1);
1115			PMAP1changedcpu++;
1116		} else
1117#endif
1118			PMAP1unchanged++;
1119		return (PADDR1 + (i386_btop(va) & (NPTEPG - 1)));
1120	}
1121	return (0);
1122}
1123
1124/*
1125 *	Routine:	pmap_extract
1126 *	Function:
1127 *		Extract the physical page address associated
1128 *		with the given map/virtual_address pair.
1129 */
1130vm_paddr_t
1131pmap_extract(pmap_t pmap, vm_offset_t va)
1132{
1133	vm_paddr_t rtval;
1134	pt_entry_t *pte;
1135	pd_entry_t pde;
1136
1137	rtval = 0;
1138	PMAP_LOCK(pmap);
1139	pde = pmap->pm_pdir[va >> PDRSHIFT];
1140	if (pde != 0) {
1141		if ((pde & PG_PS) != 0)
1142			rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
1143		else {
1144			pte = pmap_pte(pmap, va);
1145			rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
1146			pmap_pte_release(pte);
1147		}
1148	}
1149	PMAP_UNLOCK(pmap);
1150	return (rtval);
1151}
1152
1153/*
1154 *	Routine:	pmap_extract_and_hold
1155 *	Function:
1156 *		Atomically extract and hold the physical page
1157 *		with the given pmap and virtual address pair
1158 *		if that mapping permits the given protection.
1159 */
1160vm_page_t
1161pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1162{
1163	pd_entry_t pde;
1164	pt_entry_t pte;
1165	vm_page_t m;
1166
1167	m = NULL;
1168	vm_page_lock_queues();
1169	PMAP_LOCK(pmap);
1170	pde = *pmap_pde(pmap, va);
1171	if (pde != 0) {
1172		if (pde & PG_PS) {
1173			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
1174				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
1175				    (va & PDRMASK));
1176				vm_page_hold(m);
1177			}
1178		} else {
1179			sched_pin();
1180			pte = *pmap_pte_quick(pmap, va);
1181			if (pte != 0 &&
1182			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
1183				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
1184				vm_page_hold(m);
1185			}
1186			sched_unpin();
1187		}
1188	}
1189	vm_page_unlock_queues();
1190	PMAP_UNLOCK(pmap);
1191	return (m);
1192}
1193
1194/***************************************************
1195 * Low level mapping routines.....
1196 ***************************************************/
1197
1198/*
1199 * Add a wired page to the kva.
1200 * Note: not SMP coherent.
1201 */
1202PMAP_INLINE void
1203pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1204{
1205	pt_entry_t *pte;
1206
1207	pte = vtopte(va);
1208	pte_store(pte, pa | PG_RW | PG_V | pgeflag);
1209}
1210
1211static __inline void
1212pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1213{
1214	pt_entry_t *pte;
1215
1216	pte = vtopte(va);
1217	pte_store(pte, pa | PG_RW | PG_V | pgeflag | pmap_cache_bits(mode, 0));
1218}
1219
1220/*
1221 * Remove a page from the kernel pagetables.
1222 * Note: not SMP coherent.
1223 */
1224PMAP_INLINE void
1225pmap_kremove(vm_offset_t va)
1226{
1227	pt_entry_t *pte;
1228
1229	pte = vtopte(va);
1230	pte_clear(pte);
1231}
1232
1233/*
1234 *	Used to map a range of physical addresses into kernel
1235 *	virtual address space.
1236 *
1237 *	The value passed in '*virt' is a suggested virtual address for
1238 *	the mapping. Architectures which can support a direct-mapped
1239 *	physical to virtual region can return the appropriate address
1240 *	within that region, leaving '*virt' unchanged. Other
1241 *	architectures should map the pages starting at '*virt' and
1242 *	update '*virt' with the first usable address after the mapped
1243 *	region.
1244 */
1245vm_offset_t
1246pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1247{
1248	vm_offset_t va, sva;
1249
1250	va = sva = *virt;
1251	while (start < end) {
1252		pmap_kenter(va, start);
1253		va += PAGE_SIZE;
1254		start += PAGE_SIZE;
1255	}
1256	pmap_invalidate_range(kernel_pmap, sva, va);
1257	*virt = va;
1258	return (sva);
1259}
1260
1261
1262/*
1263 * Add a list of wired pages to the kva
1264 * this routine is only used for temporary
1265 * kernel mappings that do not need to have
1266 * page modification or references recorded.
1267 * Note that old mappings are simply written
1268 * over.  The page *must* be wired.
1269 * Note: SMP coherent.  Uses a ranged shootdown IPI.
1270 */
1271void
1272pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1273{
1274	pt_entry_t *endpte, oldpte, *pte;
1275
1276	oldpte = 0;
1277	pte = vtopte(sva);
1278	endpte = pte + count;
1279	while (pte < endpte) {
1280		oldpte |= *pte;
1281		pte_store(pte, VM_PAGE_TO_PHYS(*ma) | pgeflag |
1282		    pmap_cache_bits((*ma)->md.pat_mode, 0) | PG_RW | PG_V);
1283		pte++;
1284		ma++;
1285	}
1286	if ((oldpte & PG_V) != 0)
1287		pmap_invalidate_range(kernel_pmap, sva, sva + count *
1288		    PAGE_SIZE);
1289}
1290
1291/*
1292 * This routine tears out page mappings from the
1293 * kernel -- it is meant only for temporary mappings.
1294 * Note: SMP coherent.  Uses a ranged shootdown IPI.
1295 */
1296void
1297pmap_qremove(vm_offset_t sva, int count)
1298{
1299	vm_offset_t va;
1300
1301	va = sva;
1302	while (count-- > 0) {
1303		pmap_kremove(va);
1304		va += PAGE_SIZE;
1305	}
1306	pmap_invalidate_range(kernel_pmap, sva, va);
1307}
1308
1309/***************************************************
1310 * Page table page management routines.....
1311 ***************************************************/
1312static __inline void
1313pmap_free_zero_pages(vm_page_t free)
1314{
1315	vm_page_t m;
1316
1317	while (free != NULL) {
1318		m = free;
1319		free = m->right;
1320		/* Preserve the page's PG_ZERO setting. */
1321		vm_page_free_toq(m);
1322	}
1323}
1324
1325/*
1326 * Schedule the specified unused page table page to be freed.  Specifically,
1327 * add the page to the specified list of pages that will be released to the
1328 * physical memory manager after the TLB has been updated.
1329 */
1330static __inline void
1331pmap_add_delayed_free_list(vm_page_t m, vm_page_t *free, boolean_t set_PG_ZERO)
1332{
1333
1334	if (set_PG_ZERO)
1335		m->flags |= PG_ZERO;
1336	else
1337		m->flags &= ~PG_ZERO;
1338	m->right = *free;
1339	*free = m;
1340}
1341
1342/*
1343 * Inserts the specified page table page into the specified pmap's collection
1344 * of idle page table pages.  Each of a pmap's page table pages is responsible
1345 * for mapping a distinct range of virtual addresses.  The pmap's collection is
1346 * ordered by this virtual address range.
1347 */
1348static void
1349pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
1350{
1351	vm_page_t root;
1352
1353	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1354	root = pmap->pm_root;
1355	if (root == NULL) {
1356		mpte->left = NULL;
1357		mpte->right = NULL;
1358	} else {
1359		root = vm_page_splay(mpte->pindex, root);
1360		if (mpte->pindex < root->pindex) {
1361			mpte->left = root->left;
1362			mpte->right = root;
1363			root->left = NULL;
1364		} else if (mpte->pindex == root->pindex)
1365			panic("pmap_insert_pt_page: pindex already inserted");
1366		else {
1367			mpte->right = root->right;
1368			mpte->left = root;
1369			root->right = NULL;
1370		}
1371	}
1372	pmap->pm_root = mpte;
1373}
1374
1375/*
1376 * Looks for a page table page mapping the specified virtual address in the
1377 * specified pmap's collection of idle page table pages.  Returns NULL if there
1378 * is no page table page corresponding to the specified virtual address.
1379 */
1380static vm_page_t
1381pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
1382{
1383	vm_page_t mpte;
1384	vm_pindex_t pindex = va >> PDRSHIFT;
1385
1386	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1387	if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) {
1388		mpte = vm_page_splay(pindex, mpte);
1389		if ((pmap->pm_root = mpte)->pindex != pindex)
1390			mpte = NULL;
1391	}
1392	return (mpte);
1393}
1394
1395/*
1396 * Removes the specified page table page from the specified pmap's collection
1397 * of idle page table pages.  The specified page table page must be a member of
1398 * the pmap's collection.
1399 */
1400static void
1401pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
1402{
1403	vm_page_t root;
1404
1405	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1406	if (mpte != pmap->pm_root)
1407		vm_page_splay(mpte->pindex, pmap->pm_root);
1408	if (mpte->left == NULL)
1409		root = mpte->right;
1410	else {
1411		root = vm_page_splay(mpte->pindex, mpte->left);
1412		root->right = mpte->right;
1413	}
1414	pmap->pm_root = root;
1415}
1416
1417/*
1418 * This routine unholds page table pages, and if the hold count
1419 * drops to zero, then it decrements the wire count.
1420 */
1421static __inline int
1422pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
1423{
1424
1425	--m->wire_count;
1426	if (m->wire_count == 0)
1427		return _pmap_unwire_pte_hold(pmap, m, free);
1428	else
1429		return 0;
1430}
1431
1432static int
1433_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m, vm_page_t *free)
1434{
1435	vm_offset_t pteva;
1436
1437	/*
1438	 * unmap the page table page
1439	 */
1440	pmap->pm_pdir[m->pindex] = 0;
1441	--pmap->pm_stats.resident_count;
1442
1443	/*
1444	 * This is a release store so that the ordinary store unmapping
1445	 * the page table page is globally performed before TLB shoot-
1446	 * down is begun.
1447	 */
1448	atomic_subtract_rel_int(&cnt.v_wire_count, 1);
1449
1450	/*
1451	 * Do an invltlb to make the invalidated mapping
1452	 * take effect immediately.
1453	 */
1454	pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
1455	pmap_invalidate_page(pmap, pteva);
1456
1457	/*
1458	 * Put page on a list so that it is released after
1459	 * *ALL* TLB shootdown is done
1460	 */
1461	pmap_add_delayed_free_list(m, free, TRUE);
1462
1463	return 1;
1464}
1465
1466/*
1467 * After removing a page table entry, this routine is used to
1468 * conditionally free the page, and manage the hold/wire counts.
1469 */
1470static int
1471pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t *free)
1472{
1473	pd_entry_t ptepde;
1474	vm_page_t mpte;
1475
1476	if (va >= VM_MAXUSER_ADDRESS)
1477		return 0;
1478	ptepde = *pmap_pde(pmap, va);
1479	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1480	return pmap_unwire_pte_hold(pmap, mpte, free);
1481}
1482
1483void
1484pmap_pinit0(pmap_t pmap)
1485{
1486
1487	PMAP_LOCK_INIT(pmap);
1488	pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1489#ifdef PAE
1490	pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1491#endif
1492	pmap->pm_root = NULL;
1493	pmap->pm_active = 0;
1494	PCPU_SET(curpmap, pmap);
1495	TAILQ_INIT(&pmap->pm_pvchunk);
1496	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1497	mtx_lock_spin(&allpmaps_lock);
1498	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1499	mtx_unlock_spin(&allpmaps_lock);
1500}
1501
1502/*
1503 * Initialize a preallocated and zeroed pmap structure,
1504 * such as one in a vmspace structure.
1505 */
1506int
1507pmap_pinit(pmap_t pmap)
1508{
1509	vm_page_t m, ptdpg[NPGPTD];
1510	vm_paddr_t pa;
1511	static int color;
1512	int i;
1513
1514	PMAP_LOCK_INIT(pmap);
1515
1516	/*
1517	 * No need to allocate page table space yet but we do need a valid
1518	 * page directory table.
1519	 */
1520	if (pmap->pm_pdir == NULL) {
1521		pmap->pm_pdir = (pd_entry_t *)kmem_alloc_nofault(kernel_map,
1522		    NBPTD);
1523
1524		if (pmap->pm_pdir == NULL) {
1525			PMAP_LOCK_DESTROY(pmap);
1526			return (0);
1527		}
1528#ifdef PAE
1529		pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
1530		KASSERT(((vm_offset_t)pmap->pm_pdpt &
1531		    ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
1532		    ("pmap_pinit: pdpt misaligned"));
1533		KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
1534		    ("pmap_pinit: pdpt above 4g"));
1535#endif
1536		pmap->pm_root = NULL;
1537	}
1538	KASSERT(pmap->pm_root == NULL,
1539	    ("pmap_pinit: pmap has reserved page table page(s)"));
1540
1541	/*
1542	 * allocate the page directory page(s)
1543	 */
1544	for (i = 0; i < NPGPTD;) {
1545		m = vm_page_alloc(NULL, color++,
1546		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1547		    VM_ALLOC_ZERO);
1548		if (m == NULL)
1549			VM_WAIT;
1550		else {
1551			ptdpg[i++] = m;
1552		}
1553	}
1554
1555	pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1556
1557	for (i = 0; i < NPGPTD; i++) {
1558		if ((ptdpg[i]->flags & PG_ZERO) == 0)
1559			bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
1560	}
1561
1562	mtx_lock_spin(&allpmaps_lock);
1563	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1564	mtx_unlock_spin(&allpmaps_lock);
1565	/* Wire in kernel global address entries. */
1566	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1567
1568	/* install self-referential address mapping entry(s) */
1569	for (i = 0; i < NPGPTD; i++) {
1570		pa = VM_PAGE_TO_PHYS(ptdpg[i]);
1571		pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
1572#ifdef PAE
1573		pmap->pm_pdpt[i] = pa | PG_V;
1574#endif
1575	}
1576
1577	pmap->pm_active = 0;
1578	TAILQ_INIT(&pmap->pm_pvchunk);
1579	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1580
1581	return (1);
1582}
1583
1584/*
1585 * this routine is called if the page table page is not
1586 * mapped correctly.
1587 */
1588static vm_page_t
1589_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags)
1590{
1591	vm_paddr_t ptepa;
1592	vm_page_t m;
1593
1594	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1595	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1596	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1597
1598	/*
1599	 * Allocate a page table page.
1600	 */
1601	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1602	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1603		if (flags & M_WAITOK) {
1604			PMAP_UNLOCK(pmap);
1605			vm_page_unlock_queues();
1606			VM_WAIT;
1607			vm_page_lock_queues();
1608			PMAP_LOCK(pmap);
1609		}
1610
1611		/*
1612		 * Indicate the need to retry.  While waiting, the page table
1613		 * page may have been allocated.
1614		 */
1615		return (NULL);
1616	}
1617	if ((m->flags & PG_ZERO) == 0)
1618		pmap_zero_page(m);
1619
1620	/*
1621	 * Map the pagetable page into the process address space, if
1622	 * it isn't already there.
1623	 */
1624
1625	pmap->pm_stats.resident_count++;
1626
1627	ptepa = VM_PAGE_TO_PHYS(m);
1628	pmap->pm_pdir[ptepindex] =
1629		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1630
1631	return m;
1632}
1633
1634static vm_page_t
1635pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1636{
1637	unsigned ptepindex;
1638	pd_entry_t ptepa;
1639	vm_page_t m;
1640
1641	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1642	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1643	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1644
1645	/*
1646	 * Calculate pagetable page index
1647	 */
1648	ptepindex = va >> PDRSHIFT;
1649retry:
1650	/*
1651	 * Get the page directory entry
1652	 */
1653	ptepa = pmap->pm_pdir[ptepindex];
1654
1655	/*
1656	 * This supports switching from a 4MB page to a
1657	 * normal 4K page.
1658	 */
1659	if (ptepa & PG_PS) {
1660		(void)pmap_demote_pde(pmap, &pmap->pm_pdir[ptepindex], va);
1661		ptepa = pmap->pm_pdir[ptepindex];
1662	}
1663
1664	/*
1665	 * If the page table page is mapped, we just increment the
1666	 * hold count, and activate it.
1667	 */
1668	if (ptepa) {
1669		m = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
1670		m->wire_count++;
1671	} else {
1672		/*
1673		 * Here if the pte page isn't mapped, or if it has
1674		 * been deallocated.
1675		 */
1676		m = _pmap_allocpte(pmap, ptepindex, flags);
1677		if (m == NULL && (flags & M_WAITOK))
1678			goto retry;
1679	}
1680	return (m);
1681}
1682
1683
1684/***************************************************
1685* Pmap allocation/deallocation routines.
1686 ***************************************************/
1687
1688#ifdef SMP
1689/*
1690 * Deal with a SMP shootdown of other users of the pmap that we are
1691 * trying to dispose of.  This can be a bit hairy.
1692 */
1693static cpumask_t *lazymask;
1694static u_int lazyptd;
1695static volatile u_int lazywait;
1696
1697void pmap_lazyfix_action(void);
1698
1699void
1700pmap_lazyfix_action(void)
1701{
1702	cpumask_t mymask = PCPU_GET(cpumask);
1703
1704#ifdef COUNT_IPIS
1705	(*ipi_lazypmap_counts[PCPU_GET(cpuid)])++;
1706#endif
1707	if (rcr3() == lazyptd)
1708		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1709	atomic_clear_int(lazymask, mymask);
1710	atomic_store_rel_int(&lazywait, 1);
1711}
1712
1713static void
1714pmap_lazyfix_self(cpumask_t mymask)
1715{
1716
1717	if (rcr3() == lazyptd)
1718		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1719	atomic_clear_int(lazymask, mymask);
1720}
1721
1722
1723static void
1724pmap_lazyfix(pmap_t pmap)
1725{
1726	cpumask_t mymask, mask;
1727	u_int spins;
1728
1729	while ((mask = pmap->pm_active) != 0) {
1730		spins = 50000000;
1731		mask = mask & -mask;	/* Find least significant set bit */
1732		mtx_lock_spin(&smp_ipi_mtx);
1733#ifdef PAE
1734		lazyptd = vtophys(pmap->pm_pdpt);
1735#else
1736		lazyptd = vtophys(pmap->pm_pdir);
1737#endif
1738		mymask = PCPU_GET(cpumask);
1739		if (mask == mymask) {
1740			lazymask = &pmap->pm_active;
1741			pmap_lazyfix_self(mymask);
1742		} else {
1743			atomic_store_rel_int((u_int *)&lazymask,
1744			    (u_int)&pmap->pm_active);
1745			atomic_store_rel_int(&lazywait, 0);
1746			ipi_selected(mask, IPI_LAZYPMAP);
1747			while (lazywait == 0) {
1748				ia32_pause();
1749				if (--spins == 0)
1750					break;
1751			}
1752		}
1753		mtx_unlock_spin(&smp_ipi_mtx);
1754		if (spins == 0)
1755			printf("pmap_lazyfix: spun for 50000000\n");
1756	}
1757}
1758
1759#else	/* SMP */
1760
1761/*
1762 * Cleaning up on uniprocessor is easy.  For various reasons, we're
1763 * unlikely to have to even execute this code, including the fact
1764 * that the cleanup is deferred until the parent does a wait(2), which
1765 * means that another userland process has run.
1766 */
1767static void
1768pmap_lazyfix(pmap_t pmap)
1769{
1770	u_int cr3;
1771
1772	cr3 = vtophys(pmap->pm_pdir);
1773	if (cr3 == rcr3()) {
1774		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1775		pmap->pm_active &= ~(PCPU_GET(cpumask));
1776	}
1777}
1778#endif	/* SMP */
1779
1780/*
1781 * Release any resources held by the given physical map.
1782 * Called when a pmap initialized by pmap_pinit is being released.
1783 * Should only be called if the map contains no valid mappings.
1784 */
1785void
1786pmap_release(pmap_t pmap)
1787{
1788	vm_page_t m, ptdpg[NPGPTD];
1789	int i;
1790
1791	KASSERT(pmap->pm_stats.resident_count == 0,
1792	    ("pmap_release: pmap resident count %ld != 0",
1793	    pmap->pm_stats.resident_count));
1794	KASSERT(pmap->pm_root == NULL,
1795	    ("pmap_release: pmap has reserved page table page(s)"));
1796
1797	pmap_lazyfix(pmap);
1798	mtx_lock_spin(&allpmaps_lock);
1799	LIST_REMOVE(pmap, pm_list);
1800	mtx_unlock_spin(&allpmaps_lock);
1801
1802	for (i = 0; i < NPGPTD; i++)
1803		ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i] &
1804		    PG_FRAME);
1805
1806	bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
1807	    sizeof(*pmap->pm_pdir));
1808
1809	pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
1810
1811	for (i = 0; i < NPGPTD; i++) {
1812		m = ptdpg[i];
1813#ifdef PAE
1814		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
1815		    ("pmap_release: got wrong ptd page"));
1816#endif
1817		m->wire_count--;
1818		atomic_subtract_int(&cnt.v_wire_count, 1);
1819		vm_page_free_zero(m);
1820	}
1821	PMAP_LOCK_DESTROY(pmap);
1822}
1823
1824static int
1825kvm_size(SYSCTL_HANDLER_ARGS)
1826{
1827	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1828
1829	return sysctl_handle_long(oidp, &ksize, 0, req);
1830}
1831SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1832    0, 0, kvm_size, "IU", "Size of KVM");
1833
1834static int
1835kvm_free(SYSCTL_HANDLER_ARGS)
1836{
1837	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1838
1839	return sysctl_handle_long(oidp, &kfree, 0, req);
1840}
1841SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1842    0, 0, kvm_free, "IU", "Amount of KVM free");
1843
1844/*
1845 * grow the number of kernel page table entries, if needed
1846 */
1847void
1848pmap_growkernel(vm_offset_t addr)
1849{
1850	struct pmap *pmap;
1851	vm_paddr_t ptppaddr;
1852	vm_page_t nkpg;
1853	pd_entry_t newpdir;
1854	pt_entry_t *pde;
1855
1856	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1857	if (kernel_vm_end == 0) {
1858		kernel_vm_end = KERNBASE;
1859		nkpt = 0;
1860		while (pdir_pde(PTD, kernel_vm_end)) {
1861			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1862			nkpt++;
1863			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1864				kernel_vm_end = kernel_map->max_offset;
1865				break;
1866			}
1867		}
1868	}
1869	addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1870	if (addr - 1 >= kernel_map->max_offset)
1871		addr = kernel_map->max_offset;
1872	while (kernel_vm_end < addr) {
1873		if (pdir_pde(PTD, kernel_vm_end)) {
1874			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1875			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1876				kernel_vm_end = kernel_map->max_offset;
1877				break;
1878			}
1879			continue;
1880		}
1881
1882		nkpg = vm_page_alloc(NULL, kernel_vm_end >> PDRSHIFT,
1883		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1884		    VM_ALLOC_ZERO);
1885		if (nkpg == NULL)
1886			panic("pmap_growkernel: no memory to grow kernel");
1887
1888		nkpt++;
1889
1890		if ((nkpg->flags & PG_ZERO) == 0)
1891			pmap_zero_page(nkpg);
1892		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1893		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1894		pdir_pde(PTD, kernel_vm_end) = newpdir;
1895
1896		mtx_lock_spin(&allpmaps_lock);
1897		LIST_FOREACH(pmap, &allpmaps, pm_list) {
1898			pde = pmap_pde(pmap, kernel_vm_end);
1899			pde_store(pde, newpdir);
1900		}
1901		mtx_unlock_spin(&allpmaps_lock);
1902		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1903		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1904			kernel_vm_end = kernel_map->max_offset;
1905			break;
1906		}
1907	}
1908}
1909
1910
1911/***************************************************
1912 * page management routines.
1913 ***************************************************/
1914
1915CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1916CTASSERT(_NPCM == 11);
1917
1918static __inline struct pv_chunk *
1919pv_to_chunk(pv_entry_t pv)
1920{
1921
1922	return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
1923}
1924
1925#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1926
1927#define	PC_FREE0_9	0xfffffffful	/* Free values for index 0 through 9 */
1928#define	PC_FREE10	0x0000fffful	/* Free values for index 10 */
1929
1930static uint32_t pc_freemask[11] = {
1931	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1932	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1933	PC_FREE0_9, PC_FREE0_9, PC_FREE0_9,
1934	PC_FREE0_9, PC_FREE10
1935};
1936
1937SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1938	"Current number of pv entries");
1939
1940#ifdef PV_STATS
1941static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1942
1943SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1944	"Current number of pv entry chunks");
1945SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1946	"Current number of pv entry chunks allocated");
1947SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1948	"Current number of pv entry chunks frees");
1949SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1950	"Number of times tried to get a chunk page but failed.");
1951
1952static long pv_entry_frees, pv_entry_allocs;
1953static int pv_entry_spare;
1954
1955SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1956	"Current number of pv entry frees");
1957SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1958	"Current number of pv entry allocs");
1959SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1960	"Current number of spare pv entries");
1961
1962static int pmap_collect_inactive, pmap_collect_active;
1963
1964SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
1965	"Current number times pmap_collect called on inactive queue");
1966SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
1967	"Current number times pmap_collect called on active queue");
1968#endif
1969
1970/*
1971 * We are in a serious low memory condition.  Resort to
1972 * drastic measures to free some pages so we can allocate
1973 * another pv entry chunk.  This is normally called to
1974 * unmap inactive pages, and if necessary, active pages.
1975 */
1976static void
1977pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
1978{
1979	struct md_page *pvh;
1980	pd_entry_t *pde;
1981	pmap_t pmap;
1982	pt_entry_t *pte, tpte;
1983	pv_entry_t next_pv, pv;
1984	vm_offset_t va;
1985	vm_page_t m, free;
1986
1987	sched_pin();
1988	TAILQ_FOREACH(m, &vpq->pl, pageq) {
1989		if (m->hold_count || m->busy)
1990			continue;
1991		TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1992			va = pv->pv_va;
1993			pmap = PV_PMAP(pv);
1994			/* Avoid deadlock and lock recursion. */
1995			if (pmap > locked_pmap)
1996				PMAP_LOCK(pmap);
1997			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1998				continue;
1999			pmap->pm_stats.resident_count--;
2000			pde = pmap_pde(pmap, va);
2001			KASSERT((*pde & PG_PS) == 0, ("pmap_collect: found"
2002			    " a 4mpage in page %p's pv list", m));
2003			pte = pmap_pte_quick(pmap, va);
2004			tpte = pte_load_clear(pte);
2005			KASSERT((tpte & PG_W) == 0,
2006			    ("pmap_collect: wired pte %#jx", (uintmax_t)tpte));
2007			if (tpte & PG_A)
2008				vm_page_flag_set(m, PG_REFERENCED);
2009			if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2010				vm_page_dirty(m);
2011			free = NULL;
2012			pmap_unuse_pt(pmap, va, &free);
2013			pmap_invalidate_page(pmap, va);
2014			pmap_free_zero_pages(free);
2015			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2016			if (TAILQ_EMPTY(&m->md.pv_list)) {
2017				pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2018				if (TAILQ_EMPTY(&pvh->pv_list))
2019					vm_page_flag_clear(m, PG_WRITEABLE);
2020			}
2021			free_pv_entry(pmap, pv);
2022			if (pmap != locked_pmap)
2023				PMAP_UNLOCK(pmap);
2024		}
2025	}
2026	sched_unpin();
2027}
2028
2029
2030/*
2031 * free the pv_entry back to the free list
2032 */
2033static void
2034free_pv_entry(pmap_t pmap, pv_entry_t pv)
2035{
2036	vm_page_t m;
2037	struct pv_chunk *pc;
2038	int idx, field, bit;
2039
2040	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2041	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2042	PV_STAT(pv_entry_frees++);
2043	PV_STAT(pv_entry_spare++);
2044	pv_entry_count--;
2045	pc = pv_to_chunk(pv);
2046	idx = pv - &pc->pc_pventry[0];
2047	field = idx / 32;
2048	bit = idx % 32;
2049	pc->pc_map[field] |= 1ul << bit;
2050	/* move to head of list */
2051	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2052	for (idx = 0; idx < _NPCM; idx++)
2053		if (pc->pc_map[idx] != pc_freemask[idx]) {
2054			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2055			return;
2056		}
2057	PV_STAT(pv_entry_spare -= _NPCPV);
2058	PV_STAT(pc_chunk_count--);
2059	PV_STAT(pc_chunk_frees++);
2060	/* entire chunk is free, return it */
2061	m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
2062	pmap_qremove((vm_offset_t)pc, 1);
2063	vm_page_unwire(m, 0);
2064	vm_page_free(m);
2065	pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
2066}
2067
2068/*
2069 * get a new pv_entry, allocating a block from the system
2070 * when needed.
2071 */
2072static pv_entry_t
2073get_pv_entry(pmap_t pmap, int try)
2074{
2075	static const struct timeval printinterval = { 60, 0 };
2076	static struct timeval lastprint;
2077	static vm_pindex_t colour;
2078	struct vpgqueues *pq;
2079	int bit, field;
2080	pv_entry_t pv;
2081	struct pv_chunk *pc;
2082	vm_page_t m;
2083
2084	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2085	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2086	PV_STAT(pv_entry_allocs++);
2087	pv_entry_count++;
2088	if (pv_entry_count > pv_entry_high_water)
2089		if (ratecheck(&lastprint, &printinterval))
2090			printf("Approaching the limit on PV entries, consider "
2091			    "increasing either the vm.pmap.shpgperproc or the "
2092			    "vm.pmap.pv_entry_max tunable.\n");
2093	pq = NULL;
2094retry:
2095	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2096	if (pc != NULL) {
2097		for (field = 0; field < _NPCM; field++) {
2098			if (pc->pc_map[field]) {
2099				bit = bsfl(pc->pc_map[field]);
2100				break;
2101			}
2102		}
2103		if (field < _NPCM) {
2104			pv = &pc->pc_pventry[field * 32 + bit];
2105			pc->pc_map[field] &= ~(1ul << bit);
2106			/* If this was the last item, move it to tail */
2107			for (field = 0; field < _NPCM; field++)
2108				if (pc->pc_map[field] != 0) {
2109					PV_STAT(pv_entry_spare--);
2110					return (pv);	/* not full, return */
2111				}
2112			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2113			TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2114			PV_STAT(pv_entry_spare--);
2115			return (pv);
2116		}
2117	}
2118	/*
2119	 * Access to the ptelist "pv_vafree" is synchronized by the page
2120	 * queues lock.  If "pv_vafree" is currently non-empty, it will
2121	 * remain non-empty until pmap_ptelist_alloc() completes.
2122	 */
2123	if (pv_vafree == 0 || (m = vm_page_alloc(NULL, colour, (pq ==
2124	    &vm_page_queues[PQ_ACTIVE] ? VM_ALLOC_SYSTEM : VM_ALLOC_NORMAL) |
2125	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
2126		if (try) {
2127			pv_entry_count--;
2128			PV_STAT(pc_chunk_tryfail++);
2129			return (NULL);
2130		}
2131		/*
2132		 * Reclaim pv entries: At first, destroy mappings to
2133		 * inactive pages.  After that, if a pv chunk entry
2134		 * is still needed, destroy mappings to active pages.
2135		 */
2136		if (pq == NULL) {
2137			PV_STAT(pmap_collect_inactive++);
2138			pq = &vm_page_queues[PQ_INACTIVE];
2139		} else if (pq == &vm_page_queues[PQ_INACTIVE]) {
2140			PV_STAT(pmap_collect_active++);
2141			pq = &vm_page_queues[PQ_ACTIVE];
2142		} else
2143			panic("get_pv_entry: increase vm.pmap.shpgperproc");
2144		pmap_collect(pmap, pq);
2145		goto retry;
2146	}
2147	PV_STAT(pc_chunk_count++);
2148	PV_STAT(pc_chunk_allocs++);
2149	colour++;
2150	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
2151	pmap_qenter((vm_offset_t)pc, &m, 1);
2152	pc->pc_pmap = pmap;
2153	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
2154	for (field = 1; field < _NPCM; field++)
2155		pc->pc_map[field] = pc_freemask[field];
2156	pv = &pc->pc_pventry[0];
2157	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2158	PV_STAT(pv_entry_spare += _NPCPV - 1);
2159	return (pv);
2160}
2161
2162static __inline pv_entry_t
2163pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2164{
2165	pv_entry_t pv;
2166
2167	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2168	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
2169		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2170			TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
2171			break;
2172		}
2173	}
2174	return (pv);
2175}
2176
2177static void
2178pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2179{
2180	struct md_page *pvh;
2181	pv_entry_t pv;
2182	vm_offset_t va_last;
2183	vm_page_t m;
2184
2185	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2186	KASSERT((pa & PDRMASK) == 0,
2187	    ("pmap_pv_demote_pde: pa is not 4mpage aligned"));
2188
2189	/*
2190	 * Transfer the 4mpage's pv entry for this mapping to the first
2191	 * page's pv list.
2192	 */
2193	pvh = pa_to_pvh(pa);
2194	va = trunc_4mpage(va);
2195	pv = pmap_pvh_remove(pvh, pmap, va);
2196	KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found"));
2197	m = PHYS_TO_VM_PAGE(pa);
2198	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2199	/* Instantiate the remaining NPTEPG - 1 pv entries. */
2200	va_last = va + NBPDR - PAGE_SIZE;
2201	do {
2202		m++;
2203		KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2204		    ("pmap_pv_demote_pde: page %p is not managed", m));
2205		va += PAGE_SIZE;
2206		pmap_insert_entry(pmap, va, m);
2207	} while (va < va_last);
2208}
2209
2210static void
2211pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2212{
2213	struct md_page *pvh;
2214	pv_entry_t pv;
2215	vm_offset_t va_last;
2216	vm_page_t m;
2217
2218	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2219	KASSERT((pa & PDRMASK) == 0,
2220	    ("pmap_pv_promote_pde: pa is not 4mpage aligned"));
2221
2222	/*
2223	 * Transfer the first page's pv entry for this mapping to the
2224	 * 4mpage's pv list.  Aside from avoiding the cost of a call
2225	 * to get_pv_entry(), a transfer avoids the possibility that
2226	 * get_pv_entry() calls pmap_collect() and that pmap_collect()
2227	 * removes one of the mappings that is being promoted.
2228	 */
2229	m = PHYS_TO_VM_PAGE(pa);
2230	va = trunc_4mpage(va);
2231	pv = pmap_pvh_remove(&m->md, pmap, va);
2232	KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found"));
2233	pvh = pa_to_pvh(pa);
2234	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
2235	/* Free the remaining NPTEPG - 1 pv entries. */
2236	va_last = va + NBPDR - PAGE_SIZE;
2237	do {
2238		m++;
2239		va += PAGE_SIZE;
2240		pmap_pvh_free(&m->md, pmap, va);
2241	} while (va < va_last);
2242}
2243
2244static void
2245pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2246{
2247	pv_entry_t pv;
2248
2249	pv = pmap_pvh_remove(pvh, pmap, va);
2250	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2251	free_pv_entry(pmap, pv);
2252}
2253
2254static void
2255pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
2256{
2257	struct md_page *pvh;
2258
2259	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2260	pmap_pvh_free(&m->md, pmap, va);
2261	if (TAILQ_EMPTY(&m->md.pv_list)) {
2262		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2263		if (TAILQ_EMPTY(&pvh->pv_list))
2264			vm_page_flag_clear(m, PG_WRITEABLE);
2265	}
2266}
2267
2268/*
2269 * Create a pv entry for page at pa for
2270 * (pmap, va).
2271 */
2272static void
2273pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2274{
2275	pv_entry_t pv;
2276
2277	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2278	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2279	pv = get_pv_entry(pmap, FALSE);
2280	pv->pv_va = va;
2281	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2282}
2283
2284/*
2285 * Conditionally create a pv entry.
2286 */
2287static boolean_t
2288pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
2289{
2290	pv_entry_t pv;
2291
2292	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2293	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2294	if (pv_entry_count < pv_entry_high_water &&
2295	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2296		pv->pv_va = va;
2297		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2298		return (TRUE);
2299	} else
2300		return (FALSE);
2301}
2302
2303/*
2304 * Create the pv entries for each of the pages within a superpage.
2305 */
2306static boolean_t
2307pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa)
2308{
2309	struct md_page *pvh;
2310	pv_entry_t pv;
2311
2312	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2313	if (pv_entry_count < pv_entry_high_water &&
2314	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
2315		pv->pv_va = va;
2316		pvh = pa_to_pvh(pa);
2317		TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list);
2318		return (TRUE);
2319	} else
2320		return (FALSE);
2321}
2322
2323/*
2324 * Fills a page table page with mappings to consecutive physical pages.
2325 */
2326static void
2327pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte)
2328{
2329	pt_entry_t *pte;
2330
2331	for (pte = firstpte; pte < firstpte + NPTEPG; pte++) {
2332		*pte = newpte;
2333		newpte += PAGE_SIZE;
2334	}
2335}
2336
2337/*
2338 * Tries to demote a 2- or 4MB page mapping.  If demotion fails, the
2339 * 2- or 4MB page mapping is invalidated.
2340 */
2341static boolean_t
2342pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2343{
2344	pd_entry_t newpde, oldpde;
2345	pmap_t allpmaps_entry;
2346	pt_entry_t *firstpte, newpte;
2347	vm_paddr_t mptepa;
2348	vm_page_t free, mpte;
2349
2350	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2351	oldpde = *pde;
2352	KASSERT((oldpde & (PG_PS | PG_V)) == (PG_PS | PG_V),
2353	    ("pmap_demote_pde: oldpde is missing PG_PS and/or PG_V"));
2354	mpte = pmap_lookup_pt_page(pmap, va);
2355	if (mpte != NULL)
2356		pmap_remove_pt_page(pmap, mpte);
2357	else {
2358		KASSERT((oldpde & PG_W) == 0,
2359		    ("pmap_demote_pde: page table page for a wired mapping"
2360		    " is missing"));
2361
2362		/*
2363		 * Invalidate the 2- or 4MB page mapping and return
2364		 * "failure" if the mapping was never accessed or the
2365		 * allocation of the new page table page fails.
2366		 */
2367		if ((oldpde & PG_A) == 0 || (mpte = vm_page_alloc(NULL,
2368		    va >> PDRSHIFT, VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL |
2369		    VM_ALLOC_WIRED)) == NULL) {
2370			free = NULL;
2371			pmap_remove_pde(pmap, pde, trunc_4mpage(va), &free);
2372			pmap_invalidate_page(pmap, trunc_4mpage(va));
2373			pmap_free_zero_pages(free);
2374			CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x"
2375			    " in pmap %p", va, pmap);
2376			return (FALSE);
2377		}
2378		if (va < VM_MAXUSER_ADDRESS)
2379			pmap->pm_stats.resident_count++;
2380	}
2381	mptepa = VM_PAGE_TO_PHYS(mpte);
2382
2383	/*
2384	 * Temporarily map the page table page (mpte) into the kernel's
2385	 * address space at either PADDR1 or PADDR2.
2386	 */
2387	if (curthread->td_pinned > 0 && mtx_owned(&vm_page_queue_mtx)) {
2388		if ((*PMAP1 & PG_FRAME) != mptepa) {
2389			*PMAP1 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2390#ifdef SMP
2391			PMAP1cpu = PCPU_GET(cpuid);
2392#endif
2393			invlcaddr(PADDR1);
2394			PMAP1changed++;
2395		} else
2396#ifdef SMP
2397		if (PMAP1cpu != PCPU_GET(cpuid)) {
2398			PMAP1cpu = PCPU_GET(cpuid);
2399			invlcaddr(PADDR1);
2400			PMAP1changedcpu++;
2401		} else
2402#endif
2403			PMAP1unchanged++;
2404		firstpte = PADDR1;
2405	} else {
2406		mtx_lock(&PMAP2mutex);
2407		if ((*PMAP2 & PG_FRAME) != mptepa) {
2408			*PMAP2 = mptepa | PG_RW | PG_V | PG_A | PG_M;
2409			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
2410		}
2411		firstpte = PADDR2;
2412	}
2413	newpde = mptepa | PG_M | PG_A | (oldpde & PG_U) | PG_RW | PG_V;
2414	KASSERT((oldpde & PG_A) != 0,
2415	    ("pmap_demote_pde: oldpde is missing PG_A"));
2416	KASSERT((oldpde & (PG_M | PG_RW)) != PG_RW,
2417	    ("pmap_demote_pde: oldpde is missing PG_M"));
2418	newpte = oldpde & ~PG_PS;
2419	if ((newpte & PG_PDE_PAT) != 0)
2420		newpte ^= PG_PDE_PAT | PG_PTE_PAT;
2421
2422	/*
2423	 * If the page table page is new, initialize it.
2424	 */
2425	if (mpte->wire_count == 1) {
2426		mpte->wire_count = NPTEPG;
2427		pmap_fill_ptp(firstpte, newpte);
2428	}
2429	KASSERT((*firstpte & PG_FRAME) == (newpte & PG_FRAME),
2430	    ("pmap_demote_pde: firstpte and newpte map different physical"
2431	    " addresses"));
2432
2433	/*
2434	 * If the mapping has changed attributes, update the page table
2435	 * entries.
2436	 */
2437	if ((*firstpte & PG_PTE_PROMOTE) != (newpte & PG_PTE_PROMOTE))
2438		pmap_fill_ptp(firstpte, newpte);
2439
2440	/*
2441	 * Demote the mapping.  This pmap is locked.  The old PDE has
2442	 * PG_A set.  If the old PDE has PG_RW set, it also has PG_M
2443	 * set.  Thus, there is no danger of a race with another
2444	 * processor changing the setting of PG_A and/or PG_M between
2445	 * the read above and the store below.
2446	 */
2447	if (pmap == kernel_pmap) {
2448		/*
2449		 * A harmless race exists between this loop and the bcopy()
2450		 * in pmap_pinit() that initializes the kernel segment of
2451		 * the new page table.  Specifically, that bcopy() may copy
2452		 * the new PDE from the PTD, which is first in allpmaps, to
2453		 * the new page table before this loop updates that new
2454		 * page table.
2455		 */
2456		mtx_lock_spin(&allpmaps_lock);
2457		LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
2458			pde = pmap_pde(allpmaps_entry, va);
2459			KASSERT(*pde == newpde || (*pde & PG_PTE_PROMOTE) ==
2460			    (oldpde & PG_PTE_PROMOTE),
2461			    ("pmap_demote_pde: pde was %#jx, expected %#jx",
2462			    (uintmax_t)*pde, (uintmax_t)oldpde));
2463			pde_store(pde, newpde);
2464		}
2465		mtx_unlock_spin(&allpmaps_lock);
2466	} else
2467		pde_store(pde, newpde);
2468	if (firstpte == PADDR2)
2469		mtx_unlock(&PMAP2mutex);
2470
2471	/*
2472	 * Invalidate the recursive mapping of the page table page.
2473	 */
2474	pmap_invalidate_page(pmap, (vm_offset_t)vtopte(va));
2475
2476	/*
2477	 * Demote the pv entry.  This depends on the earlier demotion
2478	 * of the mapping.  Specifically, the (re)creation of a per-
2479	 * page pv entry might trigger the execution of pmap_collect(),
2480	 * which might reclaim a newly (re)created per-page pv entry
2481	 * and destroy the associated mapping.  In order to destroy
2482	 * the mapping, the PDE must have already changed from mapping
2483	 * the 2mpage to referencing the page table page.
2484	 */
2485	if ((oldpde & PG_MANAGED) != 0)
2486		pmap_pv_demote_pde(pmap, va, oldpde & PG_PS_FRAME);
2487
2488	pmap_pde_demotions++;
2489	CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x"
2490	    " in pmap %p", va, pmap);
2491	return (TRUE);
2492}
2493
2494/*
2495 * pmap_remove_pde: do the things to unmap a superpage in a process
2496 */
2497static void
2498pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva,
2499    vm_page_t *free)
2500{
2501	struct md_page *pvh;
2502	pd_entry_t oldpde;
2503	vm_offset_t eva, va;
2504	vm_page_t m, mpte;
2505
2506	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2507	KASSERT((sva & PDRMASK) == 0,
2508	    ("pmap_remove_pde: sva is not 4mpage aligned"));
2509	oldpde = pte_load_clear(pdq);
2510	if (oldpde & PG_W)
2511		pmap->pm_stats.wired_count -= NBPDR / PAGE_SIZE;
2512
2513	/*
2514	 * Machines that don't support invlpg, also don't support
2515	 * PG_G.
2516	 */
2517	if (oldpde & PG_G)
2518		pmap_invalidate_page(kernel_pmap, sva);
2519	pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2520	if (oldpde & PG_MANAGED) {
2521		pvh = pa_to_pvh(oldpde & PG_PS_FRAME);
2522		pmap_pvh_free(pvh, pmap, sva);
2523		eva = sva + NBPDR;
2524		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
2525		    va < eva; va += PAGE_SIZE, m++) {
2526			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
2527				vm_page_dirty(m);
2528			if (oldpde & PG_A)
2529				vm_page_flag_set(m, PG_REFERENCED);
2530			if (TAILQ_EMPTY(&m->md.pv_list) &&
2531			    TAILQ_EMPTY(&pvh->pv_list))
2532				vm_page_flag_clear(m, PG_WRITEABLE);
2533		}
2534	}
2535	if (pmap == kernel_pmap) {
2536		if (!pmap_demote_pde(pmap, pdq, sva))
2537			panic("pmap_remove_pde: failed demotion");
2538	} else {
2539		mpte = pmap_lookup_pt_page(pmap, sva);
2540		if (mpte != NULL) {
2541			pmap_remove_pt_page(pmap, mpte);
2542			pmap->pm_stats.resident_count--;
2543			KASSERT(mpte->wire_count == NPTEPG,
2544			    ("pmap_remove_pde: pte page wire count error"));
2545			mpte->wire_count = 0;
2546			pmap_add_delayed_free_list(mpte, free, FALSE);
2547			atomic_subtract_int(&cnt.v_wire_count, 1);
2548		}
2549	}
2550}
2551
2552/*
2553 * pmap_remove_pte: do the things to unmap a page in a process
2554 */
2555static int
2556pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, vm_page_t *free)
2557{
2558	pt_entry_t oldpte;
2559	vm_page_t m;
2560
2561	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2562	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2563	oldpte = pte_load_clear(ptq);
2564	if (oldpte & PG_W)
2565		pmap->pm_stats.wired_count -= 1;
2566	/*
2567	 * Machines that don't support invlpg, also don't support
2568	 * PG_G.
2569	 */
2570	if (oldpte & PG_G)
2571		pmap_invalidate_page(kernel_pmap, va);
2572	pmap->pm_stats.resident_count -= 1;
2573	if (oldpte & PG_MANAGED) {
2574		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
2575		if ((oldpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2576			vm_page_dirty(m);
2577		if (oldpte & PG_A)
2578			vm_page_flag_set(m, PG_REFERENCED);
2579		pmap_remove_entry(pmap, m, va);
2580	}
2581	return (pmap_unuse_pt(pmap, va, free));
2582}
2583
2584/*
2585 * Remove a single page from a process address space
2586 */
2587static void
2588pmap_remove_page(pmap_t pmap, vm_offset_t va, vm_page_t *free)
2589{
2590	pt_entry_t *pte;
2591
2592	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2593	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
2594	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2595	if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
2596		return;
2597	pmap_remove_pte(pmap, pte, va, free);
2598	pmap_invalidate_page(pmap, va);
2599}
2600
2601/*
2602 *	Remove the given range of addresses from the specified map.
2603 *
2604 *	It is assumed that the start and end are properly
2605 *	rounded to the page size.
2606 */
2607void
2608pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2609{
2610	vm_offset_t pdnxt;
2611	pd_entry_t ptpaddr;
2612	pt_entry_t *pte;
2613	vm_page_t free = NULL;
2614	int anyvalid;
2615
2616	/*
2617	 * Perform an unsynchronized read.  This is, however, safe.
2618	 */
2619	if (pmap->pm_stats.resident_count == 0)
2620		return;
2621
2622	anyvalid = 0;
2623
2624	vm_page_lock_queues();
2625	sched_pin();
2626	PMAP_LOCK(pmap);
2627
2628	/*
2629	 * special handling of removing one page.  a very
2630	 * common operation and easy to short circuit some
2631	 * code.
2632	 */
2633	if ((sva + PAGE_SIZE == eva) &&
2634	    ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
2635		pmap_remove_page(pmap, sva, &free);
2636		goto out;
2637	}
2638
2639	for (; sva < eva; sva = pdnxt) {
2640		unsigned pdirindex;
2641
2642		/*
2643		 * Calculate index for next page table.
2644		 */
2645		pdnxt = (sva + NBPDR) & ~PDRMASK;
2646		if (pdnxt < sva)
2647			pdnxt = eva;
2648		if (pmap->pm_stats.resident_count == 0)
2649			break;
2650
2651		pdirindex = sva >> PDRSHIFT;
2652		ptpaddr = pmap->pm_pdir[pdirindex];
2653
2654		/*
2655		 * Weed out invalid mappings. Note: we assume that the page
2656		 * directory table is always allocated, and in kernel virtual.
2657		 */
2658		if (ptpaddr == 0)
2659			continue;
2660
2661		/*
2662		 * Check for large page.
2663		 */
2664		if ((ptpaddr & PG_PS) != 0) {
2665			/*
2666			 * Are we removing the entire large page?  If not,
2667			 * demote the mapping and fall through.
2668			 */
2669			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
2670				/*
2671				 * The TLB entry for a PG_G mapping is
2672				 * invalidated by pmap_remove_pde().
2673				 */
2674				if ((ptpaddr & PG_G) == 0)
2675					anyvalid = 1;
2676				pmap_remove_pde(pmap,
2677				    &pmap->pm_pdir[pdirindex], sva, &free);
2678				continue;
2679			} else if (!pmap_demote_pde(pmap,
2680			    &pmap->pm_pdir[pdirindex], sva)) {
2681				/* The large page mapping was destroyed. */
2682				continue;
2683			}
2684		}
2685
2686		/*
2687		 * Limit our scan to either the end of the va represented
2688		 * by the current page table page, or to the end of the
2689		 * range being removed.
2690		 */
2691		if (pdnxt > eva)
2692			pdnxt = eva;
2693
2694		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
2695		    sva += PAGE_SIZE) {
2696			if (*pte == 0)
2697				continue;
2698
2699			/*
2700			 * The TLB entry for a PG_G mapping is invalidated
2701			 * by pmap_remove_pte().
2702			 */
2703			if ((*pte & PG_G) == 0)
2704				anyvalid = 1;
2705			if (pmap_remove_pte(pmap, pte, sva, &free))
2706				break;
2707		}
2708	}
2709out:
2710	sched_unpin();
2711	if (anyvalid)
2712		pmap_invalidate_all(pmap);
2713	vm_page_unlock_queues();
2714	PMAP_UNLOCK(pmap);
2715	pmap_free_zero_pages(free);
2716}
2717
2718/*
2719 *	Routine:	pmap_remove_all
2720 *	Function:
2721 *		Removes this physical page from
2722 *		all physical maps in which it resides.
2723 *		Reflects back modify bits to the pager.
2724 *
2725 *	Notes:
2726 *		Original versions of this routine were very
2727 *		inefficient because they iteratively called
2728 *		pmap_remove (slow...)
2729 */
2730
2731void
2732pmap_remove_all(vm_page_t m)
2733{
2734	struct md_page *pvh;
2735	pv_entry_t pv;
2736	pmap_t pmap;
2737	pt_entry_t *pte, tpte;
2738	pd_entry_t *pde;
2739	vm_offset_t va;
2740	vm_page_t free;
2741
2742	KASSERT((m->flags & PG_FICTITIOUS) == 0,
2743	    ("pmap_remove_all: page %p is fictitious", m));
2744	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2745	sched_pin();
2746	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2747	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2748		va = pv->pv_va;
2749		pmap = PV_PMAP(pv);
2750		PMAP_LOCK(pmap);
2751		pde = pmap_pde(pmap, va);
2752		(void)pmap_demote_pde(pmap, pde, va);
2753		PMAP_UNLOCK(pmap);
2754	}
2755	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2756		pmap = PV_PMAP(pv);
2757		PMAP_LOCK(pmap);
2758		pmap->pm_stats.resident_count--;
2759		pde = pmap_pde(pmap, pv->pv_va);
2760		KASSERT((*pde & PG_PS) == 0, ("pmap_remove_all: found"
2761		    " a 4mpage in page %p's pv list", m));
2762		pte = pmap_pte_quick(pmap, pv->pv_va);
2763		tpte = pte_load_clear(pte);
2764		if (tpte & PG_W)
2765			pmap->pm_stats.wired_count--;
2766		if (tpte & PG_A)
2767			vm_page_flag_set(m, PG_REFERENCED);
2768
2769		/*
2770		 * Update the vm_page_t clean and reference bits.
2771		 */
2772		if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW))
2773			vm_page_dirty(m);
2774		free = NULL;
2775		pmap_unuse_pt(pmap, pv->pv_va, &free);
2776		pmap_invalidate_page(pmap, pv->pv_va);
2777		pmap_free_zero_pages(free);
2778		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2779		free_pv_entry(pmap, pv);
2780		PMAP_UNLOCK(pmap);
2781	}
2782	vm_page_flag_clear(m, PG_WRITEABLE);
2783	sched_unpin();
2784}
2785
2786/*
2787 * pmap_protect_pde: do the things to protect a 4mpage in a process
2788 */
2789static boolean_t
2790pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot)
2791{
2792	pd_entry_t newpde, oldpde;
2793	vm_offset_t eva, va;
2794	vm_page_t m;
2795	boolean_t anychanged;
2796
2797	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2798	KASSERT((sva & PDRMASK) == 0,
2799	    ("pmap_protect_pde: sva is not 4mpage aligned"));
2800	anychanged = FALSE;
2801retry:
2802	oldpde = newpde = *pde;
2803	if (oldpde & PG_MANAGED) {
2804		eva = sva + NBPDR;
2805		for (va = sva, m = PHYS_TO_VM_PAGE(oldpde & PG_PS_FRAME);
2806		    va < eva; va += PAGE_SIZE, m++) {
2807			/*
2808			 * In contrast to the analogous operation on a 4KB page
2809			 * mapping, the mapping's PG_A flag is not cleared and
2810			 * the page's PG_REFERENCED flag is not set.  The
2811			 * reason is that pmap_demote_pde() expects that a 2/4MB
2812			 * page mapping with a stored page table page has PG_A
2813			 * set.
2814			 */
2815			if ((oldpde & (PG_M | PG_RW)) == (PG_M | PG_RW))
2816				vm_page_dirty(m);
2817		}
2818	}
2819	if ((prot & VM_PROT_WRITE) == 0)
2820		newpde &= ~(PG_RW | PG_M);
2821#ifdef PAE
2822	if ((prot & VM_PROT_EXECUTE) == 0)
2823		newpde |= pg_nx;
2824#endif
2825	if (newpde != oldpde) {
2826		if (!pde_cmpset(pde, oldpde, newpde))
2827			goto retry;
2828		if (oldpde & PG_G)
2829			pmap_invalidate_page(pmap, sva);
2830		else
2831			anychanged = TRUE;
2832	}
2833	return (anychanged);
2834}
2835
2836/*
2837 *	Set the physical protection on the
2838 *	specified range of this map as requested.
2839 */
2840void
2841pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2842{
2843	vm_offset_t pdnxt;
2844	pd_entry_t ptpaddr;
2845	pt_entry_t *pte;
2846	int anychanged;
2847
2848	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2849		pmap_remove(pmap, sva, eva);
2850		return;
2851	}
2852
2853#ifdef PAE
2854	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
2855	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
2856		return;
2857#else
2858	if (prot & VM_PROT_WRITE)
2859		return;
2860#endif
2861
2862	anychanged = 0;
2863
2864	vm_page_lock_queues();
2865	sched_pin();
2866	PMAP_LOCK(pmap);
2867	for (; sva < eva; sva = pdnxt) {
2868		pt_entry_t obits, pbits;
2869		unsigned pdirindex;
2870
2871		pdnxt = (sva + NBPDR) & ~PDRMASK;
2872		if (pdnxt < sva)
2873			pdnxt = eva;
2874
2875		pdirindex = sva >> PDRSHIFT;
2876		ptpaddr = pmap->pm_pdir[pdirindex];
2877
2878		/*
2879		 * Weed out invalid mappings. Note: we assume that the page
2880		 * directory table is always allocated, and in kernel virtual.
2881		 */
2882		if (ptpaddr == 0)
2883			continue;
2884
2885		/*
2886		 * Check for large page.
2887		 */
2888		if ((ptpaddr & PG_PS) != 0) {
2889			/*
2890			 * Are we protecting the entire large page?  If not,
2891			 * demote the mapping and fall through.
2892			 */
2893			if (sva + NBPDR == pdnxt && eva >= pdnxt) {
2894				/*
2895				 * The TLB entry for a PG_G mapping is
2896				 * invalidated by pmap_protect_pde().
2897				 */
2898				if (pmap_protect_pde(pmap,
2899				    &pmap->pm_pdir[pdirindex], sva, prot))
2900					anychanged = 1;
2901				continue;
2902			} else if (!pmap_demote_pde(pmap,
2903			    &pmap->pm_pdir[pdirindex], sva)) {
2904				/* The large page mapping was destroyed. */
2905				continue;
2906			}
2907		}
2908
2909		if (pdnxt > eva)
2910			pdnxt = eva;
2911
2912		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
2913		    sva += PAGE_SIZE) {
2914			vm_page_t m;
2915
2916retry:
2917			/*
2918			 * Regardless of whether a pte is 32 or 64 bits in
2919			 * size, PG_RW, PG_A, and PG_M are among the least
2920			 * significant 32 bits.
2921			 */
2922			obits = pbits = *pte;
2923			if ((pbits & PG_V) == 0)
2924				continue;
2925			if (pbits & PG_MANAGED) {
2926				m = NULL;
2927				if (pbits & PG_A) {
2928					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
2929					vm_page_flag_set(m, PG_REFERENCED);
2930					pbits &= ~PG_A;
2931				}
2932				if ((pbits & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
2933					if (m == NULL)
2934						m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
2935					vm_page_dirty(m);
2936				}
2937			}
2938
2939			if ((prot & VM_PROT_WRITE) == 0)
2940				pbits &= ~(PG_RW | PG_M);
2941#ifdef PAE
2942			if ((prot & VM_PROT_EXECUTE) == 0)
2943				pbits |= pg_nx;
2944#endif
2945
2946			if (pbits != obits) {
2947#ifdef PAE
2948				if (!atomic_cmpset_64(pte, obits, pbits))
2949					goto retry;
2950#else
2951				if (!atomic_cmpset_int((u_int *)pte, obits,
2952				    pbits))
2953					goto retry;
2954#endif
2955				if (obits & PG_G)
2956					pmap_invalidate_page(pmap, sva);
2957				else
2958					anychanged = 1;
2959			}
2960		}
2961	}
2962	sched_unpin();
2963	if (anychanged)
2964		pmap_invalidate_all(pmap);
2965	vm_page_unlock_queues();
2966	PMAP_UNLOCK(pmap);
2967}
2968
2969/*
2970 * Tries to promote the 512 or 1024, contiguous 4KB page mappings that are
2971 * within a single page table page (PTP) to a single 2- or 4MB page mapping.
2972 * For promotion to occur, two conditions must be met: (1) the 4KB page
2973 * mappings must map aligned, contiguous physical memory and (2) the 4KB page
2974 * mappings must have identical characteristics.
2975 *
2976 * Managed (PG_MANAGED) mappings within the kernel address space are not
2977 * promoted.  The reason is that kernel PDEs are replicated in each pmap but
2978 * pmap_clear_ptes() and pmap_ts_referenced() only read the PDE from the kernel
2979 * pmap.
2980 */
2981static void
2982pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va)
2983{
2984	pd_entry_t newpde;
2985	pmap_t allpmaps_entry;
2986	pt_entry_t *firstpte, oldpte, pa, *pte;
2987	vm_offset_t oldpteva;
2988	vm_page_t mpte;
2989
2990	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2991
2992	/*
2993	 * Examine the first PTE in the specified PTP.  Abort if this PTE is
2994	 * either invalid, unused, or does not map the first 4KB physical page
2995	 * within a 2- or 4MB page.
2996	 */
2997	firstpte = vtopte(trunc_4mpage(va));
2998setpde:
2999	newpde = *firstpte;
3000	if ((newpde & ((PG_FRAME & PDRMASK) | PG_A | PG_V)) != (PG_A | PG_V)) {
3001		pmap_pde_p_failures++;
3002		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3003		    " in pmap %p", va, pmap);
3004		return;
3005	}
3006	if ((*firstpte & PG_MANAGED) != 0 && pmap == kernel_pmap) {
3007		pmap_pde_p_failures++;
3008		CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3009		    " in pmap %p", va, pmap);
3010		return;
3011	}
3012	if ((newpde & (PG_M | PG_RW)) == PG_RW) {
3013		/*
3014		 * When PG_M is already clear, PG_RW can be cleared without
3015		 * a TLB invalidation.
3016		 */
3017		if (!atomic_cmpset_int((u_int *)firstpte, newpde, newpde &
3018		    ~PG_RW))
3019			goto setpde;
3020		newpde &= ~PG_RW;
3021	}
3022
3023	/*
3024	 * Examine each of the other PTEs in the specified PTP.  Abort if this
3025	 * PTE maps an unexpected 4KB physical page or does not have identical
3026	 * characteristics to the first PTE.
3027	 */
3028	pa = (newpde & (PG_PS_FRAME | PG_A | PG_V)) + NBPDR - PAGE_SIZE;
3029	for (pte = firstpte + NPTEPG - 1; pte > firstpte; pte--) {
3030setpte:
3031		oldpte = *pte;
3032		if ((oldpte & (PG_FRAME | PG_A | PG_V)) != pa) {
3033			pmap_pde_p_failures++;
3034			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3035			    " in pmap %p", va, pmap);
3036			return;
3037		}
3038		if ((oldpte & (PG_M | PG_RW)) == PG_RW) {
3039			/*
3040			 * When PG_M is already clear, PG_RW can be cleared
3041			 * without a TLB invalidation.
3042			 */
3043			if (!atomic_cmpset_int((u_int *)pte, oldpte,
3044			    oldpte & ~PG_RW))
3045				goto setpte;
3046			oldpte &= ~PG_RW;
3047			oldpteva = (oldpte & PG_FRAME & PDRMASK) |
3048			    (va & ~PDRMASK);
3049			CTR2(KTR_PMAP, "pmap_promote_pde: protect for va %#x"
3050			    " in pmap %p", oldpteva, pmap);
3051		}
3052		if ((oldpte & PG_PTE_PROMOTE) != (newpde & PG_PTE_PROMOTE)) {
3053			pmap_pde_p_failures++;
3054			CTR2(KTR_PMAP, "pmap_promote_pde: failure for va %#x"
3055			    " in pmap %p", va, pmap);
3056			return;
3057		}
3058		pa -= PAGE_SIZE;
3059	}
3060
3061	/*
3062	 * Save the page table page in its current state until the PDE
3063	 * mapping the superpage is demoted by pmap_demote_pde() or
3064	 * destroyed by pmap_remove_pde().
3065	 */
3066	mpte = PHYS_TO_VM_PAGE(*pde & PG_FRAME);
3067	KASSERT(mpte >= vm_page_array &&
3068	    mpte < &vm_page_array[vm_page_array_size],
3069	    ("pmap_promote_pde: page table page is out of range"));
3070	KASSERT(mpte->pindex == va >> PDRSHIFT,
3071	    ("pmap_promote_pde: page table page's pindex is wrong"));
3072	pmap_insert_pt_page(pmap, mpte);
3073
3074	/*
3075	 * Promote the pv entries.
3076	 */
3077	if ((newpde & PG_MANAGED) != 0)
3078		pmap_pv_promote_pde(pmap, va, newpde & PG_PS_FRAME);
3079
3080	/*
3081	 * Propagate the PAT index to its proper position.
3082	 */
3083	if ((newpde & PG_PTE_PAT) != 0)
3084		newpde ^= PG_PDE_PAT | PG_PTE_PAT;
3085
3086	/*
3087	 * Map the superpage.
3088	 */
3089	if (pmap == kernel_pmap) {
3090		mtx_lock_spin(&allpmaps_lock);
3091		LIST_FOREACH(allpmaps_entry, &allpmaps, pm_list) {
3092			pde = pmap_pde(allpmaps_entry, va);
3093			pde_store(pde, PG_PS | newpde);
3094		}
3095		mtx_unlock_spin(&allpmaps_lock);
3096	} else
3097		pde_store(pde, PG_PS | newpde);
3098
3099	pmap_pde_promotions++;
3100	CTR2(KTR_PMAP, "pmap_promote_pde: success for va %#x"
3101	    " in pmap %p", va, pmap);
3102}
3103
3104/*
3105 *	Insert the given physical page (p) at
3106 *	the specified virtual address (v) in the
3107 *	target physical map with the protection requested.
3108 *
3109 *	If specified, the page will be wired down, meaning
3110 *	that the related pte can not be reclaimed.
3111 *
3112 *	NB:  This is the only routine which MAY NOT lazy-evaluate
3113 *	or lose information.  That is, this routine must actually
3114 *	insert this page into the given map NOW.
3115 */
3116void
3117pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m,
3118    vm_prot_t prot, boolean_t wired)
3119{
3120	vm_paddr_t pa;
3121	pd_entry_t *pde;
3122	pt_entry_t *pte;
3123	vm_paddr_t opa;
3124	pt_entry_t origpte, newpte;
3125	vm_page_t mpte, om;
3126	boolean_t invlva;
3127
3128	va = trunc_page(va);
3129	KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig"));
3130	KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS,
3131	    ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va));
3132
3133	mpte = NULL;
3134
3135	vm_page_lock_queues();
3136	PMAP_LOCK(pmap);
3137	sched_pin();
3138
3139	/*
3140	 * In the case that a page table page is not
3141	 * resident, we are creating it here.
3142	 */
3143	if (va < VM_MAXUSER_ADDRESS) {
3144		mpte = pmap_allocpte(pmap, va, M_WAITOK);
3145	}
3146
3147	pde = pmap_pde(pmap, va);
3148	if ((*pde & PG_PS) != 0)
3149		panic("pmap_enter: attempted pmap_enter on 4MB page");
3150	pte = pmap_pte_quick(pmap, va);
3151
3152	/*
3153	 * Page Directory table entry not valid, we need a new PT page
3154	 */
3155	if (pte == NULL) {
3156		panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x",
3157			(uintmax_t)pmap->pm_pdir[PTDPTDI], va);
3158	}
3159
3160	pa = VM_PAGE_TO_PHYS(m);
3161	om = NULL;
3162	origpte = *pte;
3163	opa = origpte & PG_FRAME;
3164
3165	/*
3166	 * Mapping has not changed, must be protection or wiring change.
3167	 */
3168	if (origpte && (opa == pa)) {
3169		/*
3170		 * Wiring change, just update stats. We don't worry about
3171		 * wiring PT pages as they remain resident as long as there
3172		 * are valid mappings in them. Hence, if a user page is wired,
3173		 * the PT page will be also.
3174		 */
3175		if (wired && ((origpte & PG_W) == 0))
3176			pmap->pm_stats.wired_count++;
3177		else if (!wired && (origpte & PG_W))
3178			pmap->pm_stats.wired_count--;
3179
3180		/*
3181		 * Remove extra pte reference
3182		 */
3183		if (mpte)
3184			mpte->wire_count--;
3185
3186		/*
3187		 * We might be turning off write access to the page,
3188		 * so we go ahead and sense modify status.
3189		 */
3190		if (origpte & PG_MANAGED) {
3191			om = m;
3192			pa |= PG_MANAGED;
3193		}
3194		goto validate;
3195	}
3196	/*
3197	 * Mapping has changed, invalidate old range and fall through to
3198	 * handle validating new mapping.
3199	 */
3200	if (opa) {
3201		if (origpte & PG_W)
3202			pmap->pm_stats.wired_count--;
3203		if (origpte & PG_MANAGED) {
3204			om = PHYS_TO_VM_PAGE(opa);
3205			pmap_remove_entry(pmap, om, va);
3206		}
3207		if (mpte != NULL) {
3208			mpte->wire_count--;
3209			KASSERT(mpte->wire_count > 0,
3210			    ("pmap_enter: missing reference to page table page,"
3211			     " va: 0x%x", va));
3212		}
3213	} else
3214		pmap->pm_stats.resident_count++;
3215
3216	/*
3217	 * Enter on the PV list if part of our managed memory.
3218	 */
3219	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
3220		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
3221		    ("pmap_enter: managed mapping within the clean submap"));
3222		pmap_insert_entry(pmap, va, m);
3223		pa |= PG_MANAGED;
3224	}
3225
3226	/*
3227	 * Increment counters
3228	 */
3229	if (wired)
3230		pmap->pm_stats.wired_count++;
3231
3232validate:
3233	/*
3234	 * Now validate mapping with desired protection/wiring.
3235	 */
3236	newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V);
3237	if ((prot & VM_PROT_WRITE) != 0) {
3238		newpte |= PG_RW;
3239		vm_page_flag_set(m, PG_WRITEABLE);
3240	}
3241#ifdef PAE
3242	if ((prot & VM_PROT_EXECUTE) == 0)
3243		newpte |= pg_nx;
3244#endif
3245	if (wired)
3246		newpte |= PG_W;
3247	if (va < VM_MAXUSER_ADDRESS)
3248		newpte |= PG_U;
3249	if (pmap == kernel_pmap)
3250		newpte |= pgeflag;
3251
3252	/*
3253	 * if the mapping or permission bits are different, we need
3254	 * to update the pte.
3255	 */
3256	if ((origpte & ~(PG_M|PG_A)) != newpte) {
3257		newpte |= PG_A;
3258		if ((access & VM_PROT_WRITE) != 0)
3259			newpte |= PG_M;
3260		if (origpte & PG_V) {
3261			invlva = FALSE;
3262			origpte = pte_load_store(pte, newpte);
3263			if (origpte & PG_A) {
3264				if (origpte & PG_MANAGED)
3265					vm_page_flag_set(om, PG_REFERENCED);
3266				if (opa != VM_PAGE_TO_PHYS(m))
3267					invlva = TRUE;
3268#ifdef PAE
3269				if ((origpte & PG_NX) == 0 &&
3270				    (newpte & PG_NX) != 0)
3271					invlva = TRUE;
3272#endif
3273			}
3274			if ((origpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
3275				if ((origpte & PG_MANAGED) != 0)
3276					vm_page_dirty(om);
3277				if ((prot & VM_PROT_WRITE) == 0)
3278					invlva = TRUE;
3279			}
3280			if (invlva)
3281				pmap_invalidate_page(pmap, va);
3282		} else
3283			pte_store(pte, newpte);
3284	}
3285
3286	/*
3287	 * If both the page table page and the reservation are fully
3288	 * populated, then attempt promotion.
3289	 */
3290	if ((mpte == NULL || mpte->wire_count == NPTEPG) &&
3291	    pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0)
3292		pmap_promote_pde(pmap, pde, va);
3293
3294	sched_unpin();
3295	vm_page_unlock_queues();
3296	PMAP_UNLOCK(pmap);
3297}
3298
3299/*
3300 * Tries to create a 2- or 4MB page mapping.  Returns TRUE if successful and
3301 * FALSE otherwise.  Fails if (1) a page table page cannot be allocated without
3302 * blocking, (2) a mapping already exists at the specified virtual address, or
3303 * (3) a pv entry cannot be allocated without reclaiming another pv entry.
3304 */
3305static boolean_t
3306pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3307{
3308	pd_entry_t *pde, newpde;
3309
3310	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3311	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3312	pde = pmap_pde(pmap, va);
3313	if (*pde != 0) {
3314		CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3315		    " in pmap %p", va, pmap);
3316		return (FALSE);
3317	}
3318	newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) |
3319	    PG_PS | PG_V;
3320	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
3321		newpde |= PG_MANAGED;
3322
3323		/*
3324		 * Abort this mapping if its PV entry could not be created.
3325		 */
3326		if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m))) {
3327			CTR2(KTR_PMAP, "pmap_enter_pde: failure for va %#lx"
3328			    " in pmap %p", va, pmap);
3329			return (FALSE);
3330		}
3331	}
3332#ifdef PAE
3333	if ((prot & VM_PROT_EXECUTE) == 0)
3334		newpde |= pg_nx;
3335#endif
3336	if (va < VM_MAXUSER_ADDRESS)
3337		newpde |= PG_U;
3338
3339	/*
3340	 * Increment counters.
3341	 */
3342	pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
3343
3344	/*
3345	 * Map the superpage.
3346	 */
3347	pde_store(pde, newpde);
3348
3349	pmap_pde_mappings++;
3350	CTR2(KTR_PMAP, "pmap_enter_pde: success for va %#lx"
3351	    " in pmap %p", va, pmap);
3352	return (TRUE);
3353}
3354
3355/*
3356 * Maps a sequence of resident pages belonging to the same object.
3357 * The sequence begins with the given page m_start.  This page is
3358 * mapped at the given virtual address start.  Each subsequent page is
3359 * mapped at a virtual address that is offset from start by the same
3360 * amount as the page is offset from m_start within the object.  The
3361 * last page in the sequence is the page with the largest offset from
3362 * m_start that can be mapped at a virtual address less than the given
3363 * virtual address end.  Not every virtual page between start and end
3364 * is mapped; only those for which a resident page exists with the
3365 * corresponding offset from m_start are mapped.
3366 */
3367void
3368pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
3369    vm_page_t m_start, vm_prot_t prot)
3370{
3371	vm_offset_t va;
3372	vm_page_t m, mpte;
3373	vm_pindex_t diff, psize;
3374
3375	VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
3376	psize = atop(end - start);
3377	mpte = NULL;
3378	m = m_start;
3379	PMAP_LOCK(pmap);
3380	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
3381		va = start + ptoa(diff);
3382		if ((va & PDRMASK) == 0 && va + NBPDR <= end &&
3383		    (VM_PAGE_TO_PHYS(m) & PDRMASK) == 0 &&
3384		    pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0 &&
3385		    pmap_enter_pde(pmap, va, m, prot))
3386			m = &m[NBPDR / PAGE_SIZE - 1];
3387		else
3388			mpte = pmap_enter_quick_locked(pmap, va, m, prot,
3389			    mpte);
3390		m = TAILQ_NEXT(m, listq);
3391	}
3392 	PMAP_UNLOCK(pmap);
3393}
3394
3395/*
3396 * this code makes some *MAJOR* assumptions:
3397 * 1. Current pmap & pmap exists.
3398 * 2. Not wired.
3399 * 3. Read access.
3400 * 4. No page table pages.
3401 * but is *MUCH* faster than pmap_enter...
3402 */
3403
3404void
3405pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3406{
3407
3408	PMAP_LOCK(pmap);
3409	(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
3410	PMAP_UNLOCK(pmap);
3411}
3412
3413static vm_page_t
3414pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3415    vm_prot_t prot, vm_page_t mpte)
3416{
3417	pt_entry_t *pte;
3418	vm_paddr_t pa;
3419	vm_page_t free;
3420
3421	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3422	    (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
3423	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3424	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3425	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3426
3427	/*
3428	 * In the case that a page table page is not
3429	 * resident, we are creating it here.
3430	 */
3431	if (va < VM_MAXUSER_ADDRESS) {
3432		unsigned ptepindex;
3433		pd_entry_t ptepa;
3434
3435		/*
3436		 * Calculate pagetable page index
3437		 */
3438		ptepindex = va >> PDRSHIFT;
3439		if (mpte && (mpte->pindex == ptepindex)) {
3440			mpte->wire_count++;
3441		} else {
3442			/*
3443			 * Get the page directory entry
3444			 */
3445			ptepa = pmap->pm_pdir[ptepindex];
3446
3447			/*
3448			 * If the page table page is mapped, we just increment
3449			 * the hold count, and activate it.
3450			 */
3451			if (ptepa) {
3452				if (ptepa & PG_PS)
3453					return (NULL);
3454				mpte = PHYS_TO_VM_PAGE(ptepa & PG_FRAME);
3455				mpte->wire_count++;
3456			} else {
3457				mpte = _pmap_allocpte(pmap, ptepindex,
3458				    M_NOWAIT);
3459				if (mpte == NULL)
3460					return (mpte);
3461			}
3462		}
3463	} else {
3464		mpte = NULL;
3465	}
3466
3467	/*
3468	 * This call to vtopte makes the assumption that we are
3469	 * entering the page into the current pmap.  In order to support
3470	 * quick entry into any pmap, one would likely use pmap_pte_quick.
3471	 * But that isn't as quick as vtopte.
3472	 */
3473	pte = vtopte(va);
3474	if (*pte) {
3475		if (mpte != NULL) {
3476			mpte->wire_count--;
3477			mpte = NULL;
3478		}
3479		return (mpte);
3480	}
3481
3482	/*
3483	 * Enter on the PV list if part of our managed memory.
3484	 */
3485	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
3486	    !pmap_try_insert_pv_entry(pmap, va, m)) {
3487		if (mpte != NULL) {
3488			free = NULL;
3489			if (pmap_unwire_pte_hold(pmap, mpte, &free)) {
3490				pmap_invalidate_page(pmap, va);
3491				pmap_free_zero_pages(free);
3492			}
3493
3494			mpte = NULL;
3495		}
3496		return (mpte);
3497	}
3498
3499	/*
3500	 * Increment counters
3501	 */
3502	pmap->pm_stats.resident_count++;
3503
3504	pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0);
3505#ifdef PAE
3506	if ((prot & VM_PROT_EXECUTE) == 0)
3507		pa |= pg_nx;
3508#endif
3509
3510	/*
3511	 * Now validate mapping with RO protection
3512	 */
3513	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
3514		pte_store(pte, pa | PG_V | PG_U);
3515	else
3516		pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
3517	return mpte;
3518}
3519
3520/*
3521 * Make a temporary mapping for a physical address.  This is only intended
3522 * to be used for panic dumps.
3523 */
3524void *
3525pmap_kenter_temporary(vm_paddr_t pa, int i)
3526{
3527	vm_offset_t va;
3528
3529	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
3530	pmap_kenter(va, pa);
3531	invlpg(va);
3532	return ((void *)crashdumpmap);
3533}
3534
3535/*
3536 * This code maps large physical mmap regions into the
3537 * processor address space.  Note that some shortcuts
3538 * are taken, but the code works.
3539 */
3540void
3541pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3542    vm_pindex_t pindex, vm_size_t size)
3543{
3544	pd_entry_t *pde;
3545	vm_paddr_t pa, ptepa;
3546	vm_page_t p;
3547	int pat_mode;
3548
3549	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
3550	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3551	    ("pmap_object_init_pt: non-device object"));
3552	if (pseflag &&
3553	    (addr & (NBPDR - 1)) == 0 && (size & (NBPDR - 1)) == 0) {
3554		if (!vm_object_populate(object, pindex, pindex + atop(size)))
3555			return;
3556		p = vm_page_lookup(object, pindex);
3557		KASSERT(p->valid == VM_PAGE_BITS_ALL,
3558		    ("pmap_object_init_pt: invalid page %p", p));
3559		pat_mode = p->md.pat_mode;
3560
3561		/*
3562		 * Abort the mapping if the first page is not physically
3563		 * aligned to a 2/4MB page boundary.
3564		 */
3565		ptepa = VM_PAGE_TO_PHYS(p);
3566		if (ptepa & (NBPDR - 1))
3567			return;
3568
3569		/*
3570		 * Skip the first page.  Abort the mapping if the rest of
3571		 * the pages are not physically contiguous or have differing
3572		 * memory attributes.
3573		 */
3574		p = TAILQ_NEXT(p, listq);
3575		for (pa = ptepa + PAGE_SIZE; pa < ptepa + size;
3576		    pa += PAGE_SIZE) {
3577			KASSERT(p->valid == VM_PAGE_BITS_ALL,
3578			    ("pmap_object_init_pt: invalid page %p", p));
3579			if (pa != VM_PAGE_TO_PHYS(p) ||
3580			    pat_mode != p->md.pat_mode)
3581				return;
3582			p = TAILQ_NEXT(p, listq);
3583		}
3584
3585		/*
3586		 * Map using 2/4MB pages.  Since "ptepa" is 2/4M aligned and
3587		 * "size" is a multiple of 2/4M, adding the PAT setting to
3588		 * "pa" will not affect the termination of this loop.
3589		 */
3590		PMAP_LOCK(pmap);
3591		for (pa = ptepa | pmap_cache_bits(pat_mode, 1); pa < ptepa +
3592		    size; pa += NBPDR) {
3593			pde = pmap_pde(pmap, addr);
3594			if (*pde == 0) {
3595				pde_store(pde, pa | PG_PS | PG_M | PG_A |
3596				    PG_U | PG_RW | PG_V);
3597				pmap->pm_stats.resident_count += NBPDR /
3598				    PAGE_SIZE;
3599				pmap_pde_mappings++;
3600			}
3601			/* Else continue on if the PDE is already valid. */
3602			addr += NBPDR;
3603		}
3604		PMAP_UNLOCK(pmap);
3605	}
3606}
3607
3608/*
3609 *	Routine:	pmap_change_wiring
3610 *	Function:	Change the wiring attribute for a map/virtual-address
3611 *			pair.
3612 *	In/out conditions:
3613 *			The mapping must already exist in the pmap.
3614 */
3615void
3616pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
3617{
3618	pd_entry_t *pde;
3619	pt_entry_t *pte;
3620	boolean_t are_queues_locked;
3621
3622	are_queues_locked = FALSE;
3623retry:
3624	PMAP_LOCK(pmap);
3625	pde = pmap_pde(pmap, va);
3626	if ((*pde & PG_PS) != 0) {
3627		if (!wired != ((*pde & PG_W) == 0)) {
3628			if (!are_queues_locked) {
3629				are_queues_locked = TRUE;
3630				if (!mtx_trylock(&vm_page_queue_mtx)) {
3631					PMAP_UNLOCK(pmap);
3632					vm_page_lock_queues();
3633					goto retry;
3634				}
3635			}
3636			if (!pmap_demote_pde(pmap, pde, va))
3637				panic("pmap_change_wiring: demotion failed");
3638		} else
3639			goto out;
3640	}
3641	pte = pmap_pte(pmap, va);
3642
3643	if (wired && !pmap_pte_w(pte))
3644		pmap->pm_stats.wired_count++;
3645	else if (!wired && pmap_pte_w(pte))
3646		pmap->pm_stats.wired_count--;
3647
3648	/*
3649	 * Wiring is not a hardware characteristic so there is no need to
3650	 * invalidate TLB.
3651	 */
3652	pmap_pte_set_w(pte, wired);
3653	pmap_pte_release(pte);
3654out:
3655	if (are_queues_locked)
3656		vm_page_unlock_queues();
3657	PMAP_UNLOCK(pmap);
3658}
3659
3660
3661
3662/*
3663 *	Copy the range specified by src_addr/len
3664 *	from the source map to the range dst_addr/len
3665 *	in the destination map.
3666 *
3667 *	This routine is only advisory and need not do anything.
3668 */
3669
3670void
3671pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3672    vm_offset_t src_addr)
3673{
3674	vm_page_t   free;
3675	vm_offset_t addr;
3676	vm_offset_t end_addr = src_addr + len;
3677	vm_offset_t pdnxt;
3678
3679	if (dst_addr != src_addr)
3680		return;
3681
3682	if (!pmap_is_current(src_pmap))
3683		return;
3684
3685	vm_page_lock_queues();
3686	if (dst_pmap < src_pmap) {
3687		PMAP_LOCK(dst_pmap);
3688		PMAP_LOCK(src_pmap);
3689	} else {
3690		PMAP_LOCK(src_pmap);
3691		PMAP_LOCK(dst_pmap);
3692	}
3693	sched_pin();
3694	for (addr = src_addr; addr < end_addr; addr = pdnxt) {
3695		pt_entry_t *src_pte, *dst_pte;
3696		vm_page_t dstmpte, srcmpte;
3697		pd_entry_t srcptepaddr;
3698		unsigned ptepindex;
3699
3700		KASSERT(addr < UPT_MIN_ADDRESS,
3701		    ("pmap_copy: invalid to pmap_copy page tables"));
3702
3703		pdnxt = (addr + NBPDR) & ~PDRMASK;
3704		if (pdnxt < addr)
3705			pdnxt = end_addr;
3706		ptepindex = addr >> PDRSHIFT;
3707
3708		srcptepaddr = src_pmap->pm_pdir[ptepindex];
3709		if (srcptepaddr == 0)
3710			continue;
3711
3712		if (srcptepaddr & PG_PS) {
3713			if (dst_pmap->pm_pdir[ptepindex] == 0 &&
3714			    ((srcptepaddr & PG_MANAGED) == 0 ||
3715			    pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr &
3716			    PG_PS_FRAME))) {
3717				dst_pmap->pm_pdir[ptepindex] = srcptepaddr &
3718				    ~PG_W;
3719				dst_pmap->pm_stats.resident_count +=
3720				    NBPDR / PAGE_SIZE;
3721			}
3722			continue;
3723		}
3724
3725		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
3726		KASSERT(srcmpte->wire_count > 0,
3727		    ("pmap_copy: source page table page is unused"));
3728
3729		if (pdnxt > end_addr)
3730			pdnxt = end_addr;
3731
3732		src_pte = vtopte(addr);
3733		while (addr < pdnxt) {
3734			pt_entry_t ptetemp;
3735			ptetemp = *src_pte;
3736			/*
3737			 * we only virtual copy managed pages
3738			 */
3739			if ((ptetemp & PG_MANAGED) != 0) {
3740				dstmpte = pmap_allocpte(dst_pmap, addr,
3741				    M_NOWAIT);
3742				if (dstmpte == NULL)
3743					goto out;
3744				dst_pte = pmap_pte_quick(dst_pmap, addr);
3745				if (*dst_pte == 0 &&
3746				    pmap_try_insert_pv_entry(dst_pmap, addr,
3747				    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
3748					/*
3749					 * Clear the wired, modified, and
3750					 * accessed (referenced) bits
3751					 * during the copy.
3752					 */
3753					*dst_pte = ptetemp & ~(PG_W | PG_M |
3754					    PG_A);
3755					dst_pmap->pm_stats.resident_count++;
3756	 			} else {
3757					free = NULL;
3758					if (pmap_unwire_pte_hold(dst_pmap,
3759					    dstmpte, &free)) {
3760						pmap_invalidate_page(dst_pmap,
3761						    addr);
3762						pmap_free_zero_pages(free);
3763					}
3764					goto out;
3765				}
3766				if (dstmpte->wire_count >= srcmpte->wire_count)
3767					break;
3768			}
3769			addr += PAGE_SIZE;
3770			src_pte++;
3771		}
3772	}
3773out:
3774	sched_unpin();
3775	vm_page_unlock_queues();
3776	PMAP_UNLOCK(src_pmap);
3777	PMAP_UNLOCK(dst_pmap);
3778}
3779
3780static __inline void
3781pagezero(void *page)
3782{
3783#if defined(I686_CPU)
3784	if (cpu_class == CPUCLASS_686) {
3785#if defined(CPU_ENABLE_SSE)
3786		if (cpu_feature & CPUID_SSE2)
3787			sse2_pagezero(page);
3788		else
3789#endif
3790			i686_pagezero(page);
3791	} else
3792#endif
3793		bzero(page, PAGE_SIZE);
3794}
3795
3796/*
3797 *	pmap_zero_page zeros the specified hardware page by mapping
3798 *	the page into KVM and using bzero to clear its contents.
3799 */
3800void
3801pmap_zero_page(vm_page_t m)
3802{
3803	struct sysmaps *sysmaps;
3804
3805	sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
3806	mtx_lock(&sysmaps->lock);
3807	if (*sysmaps->CMAP2)
3808		panic("pmap_zero_page: CMAP2 busy");
3809	sched_pin();
3810	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
3811	    pmap_cache_bits(m->md.pat_mode, 0);
3812	invlcaddr(sysmaps->CADDR2);
3813	pagezero(sysmaps->CADDR2);
3814	*sysmaps->CMAP2 = 0;
3815	sched_unpin();
3816	mtx_unlock(&sysmaps->lock);
3817}
3818
3819/*
3820 *	pmap_zero_page_area zeros the specified hardware page by mapping
3821 *	the page into KVM and using bzero to clear its contents.
3822 *
3823 *	off and size may not cover an area beyond a single hardware page.
3824 */
3825void
3826pmap_zero_page_area(vm_page_t m, int off, int size)
3827{
3828	struct sysmaps *sysmaps;
3829
3830	sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
3831	mtx_lock(&sysmaps->lock);
3832	if (*sysmaps->CMAP2)
3833		panic("pmap_zero_page_area: CMAP2 busy");
3834	sched_pin();
3835	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
3836	    pmap_cache_bits(m->md.pat_mode, 0);
3837	invlcaddr(sysmaps->CADDR2);
3838	if (off == 0 && size == PAGE_SIZE)
3839		pagezero(sysmaps->CADDR2);
3840	else
3841		bzero((char *)sysmaps->CADDR2 + off, size);
3842	*sysmaps->CMAP2 = 0;
3843	sched_unpin();
3844	mtx_unlock(&sysmaps->lock);
3845}
3846
3847/*
3848 *	pmap_zero_page_idle zeros the specified hardware page by mapping
3849 *	the page into KVM and using bzero to clear its contents.  This
3850 *	is intended to be called from the vm_pagezero process only and
3851 *	outside of Giant.
3852 */
3853void
3854pmap_zero_page_idle(vm_page_t m)
3855{
3856
3857	if (*CMAP3)
3858		panic("pmap_zero_page_idle: CMAP3 busy");
3859	sched_pin();
3860	*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M |
3861	    pmap_cache_bits(m->md.pat_mode, 0);
3862	invlcaddr(CADDR3);
3863	pagezero(CADDR3);
3864	*CMAP3 = 0;
3865	sched_unpin();
3866}
3867
3868/*
3869 *	pmap_copy_page copies the specified (machine independent)
3870 *	page by mapping the page into virtual memory and using
3871 *	bcopy to copy the page, one machine dependent page at a
3872 *	time.
3873 */
3874void
3875pmap_copy_page(vm_page_t src, vm_page_t dst)
3876{
3877	struct sysmaps *sysmaps;
3878
3879	sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
3880	mtx_lock(&sysmaps->lock);
3881	if (*sysmaps->CMAP1)
3882		panic("pmap_copy_page: CMAP1 busy");
3883	if (*sysmaps->CMAP2)
3884		panic("pmap_copy_page: CMAP2 busy");
3885	sched_pin();
3886	invlpg((u_int)sysmaps->CADDR1);
3887	invlpg((u_int)sysmaps->CADDR2);
3888	*sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A |
3889	    pmap_cache_bits(src->md.pat_mode, 0);
3890	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M |
3891	    pmap_cache_bits(dst->md.pat_mode, 0);
3892	bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
3893	*sysmaps->CMAP1 = 0;
3894	*sysmaps->CMAP2 = 0;
3895	sched_unpin();
3896	mtx_unlock(&sysmaps->lock);
3897}
3898
3899/*
3900 * Returns true if the pmap's pv is one of the first
3901 * 16 pvs linked to from this page.  This count may
3902 * be changed upwards or downwards in the future; it
3903 * is only necessary that true be returned for a small
3904 * subset of pmaps for proper page aging.
3905 */
3906boolean_t
3907pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3908{
3909	struct md_page *pvh;
3910	pv_entry_t pv;
3911	int loops = 0;
3912
3913	if (m->flags & PG_FICTITIOUS)
3914		return FALSE;
3915
3916	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3917	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3918		if (PV_PMAP(pv) == pmap) {
3919			return TRUE;
3920		}
3921		loops++;
3922		if (loops >= 16)
3923			break;
3924	}
3925	if (loops < 16) {
3926		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3927		TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
3928			if (PV_PMAP(pv) == pmap)
3929				return (TRUE);
3930			loops++;
3931			if (loops >= 16)
3932				break;
3933		}
3934	}
3935	return (FALSE);
3936}
3937
3938/*
3939 *	pmap_page_wired_mappings:
3940 *
3941 *	Return the number of managed mappings to the given physical page
3942 *	that are wired.
3943 */
3944int
3945pmap_page_wired_mappings(vm_page_t m)
3946{
3947	int count;
3948
3949	count = 0;
3950	if ((m->flags & PG_FICTITIOUS) != 0)
3951		return (count);
3952	count = pmap_pvh_wired_mappings(&m->md, count);
3953	return (pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), count));
3954}
3955
3956/*
3957 *	pmap_pvh_wired_mappings:
3958 *
3959 *	Return the updated number "count" of managed mappings that are wired.
3960 */
3961static int
3962pmap_pvh_wired_mappings(struct md_page *pvh, int count)
3963{
3964	pmap_t pmap;
3965	pt_entry_t *pte;
3966	pv_entry_t pv;
3967
3968	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3969	sched_pin();
3970	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
3971		pmap = PV_PMAP(pv);
3972		PMAP_LOCK(pmap);
3973		pte = pmap_pte_quick(pmap, pv->pv_va);
3974		if ((*pte & PG_W) != 0)
3975			count++;
3976		PMAP_UNLOCK(pmap);
3977	}
3978	sched_unpin();
3979	return (count);
3980}
3981
3982/*
3983 * Returns TRUE if the given page is mapped individually or as part of
3984 * a 4mpage.  Otherwise, returns FALSE.
3985 */
3986boolean_t
3987pmap_page_is_mapped(vm_page_t m)
3988{
3989	struct md_page *pvh;
3990
3991	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
3992		return (FALSE);
3993	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3994	if (TAILQ_EMPTY(&m->md.pv_list)) {
3995		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3996		return (!TAILQ_EMPTY(&pvh->pv_list));
3997	} else
3998		return (TRUE);
3999}
4000
4001/*
4002 * Remove all pages from specified address space
4003 * this aids process exit speeds.  Also, this code
4004 * is special cased for current process only, but
4005 * can have the more generic (and slightly slower)
4006 * mode enabled.  This is much faster than pmap_remove
4007 * in the case of running down an entire address space.
4008 */
4009void
4010pmap_remove_pages(pmap_t pmap)
4011{
4012	pt_entry_t *pte, tpte;
4013	vm_page_t free = NULL;
4014	vm_page_t m, mpte, mt;
4015	pv_entry_t pv;
4016	struct md_page *pvh;
4017	struct pv_chunk *pc, *npc;
4018	int field, idx;
4019	int32_t bit;
4020	uint32_t inuse, bitmask;
4021	int allfree;
4022
4023	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
4024		printf("warning: pmap_remove_pages called with non-current pmap\n");
4025		return;
4026	}
4027	vm_page_lock_queues();
4028	PMAP_LOCK(pmap);
4029	sched_pin();
4030	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
4031		allfree = 1;
4032		for (field = 0; field < _NPCM; field++) {
4033			inuse = (~(pc->pc_map[field])) & pc_freemask[field];
4034			while (inuse != 0) {
4035				bit = bsfl(inuse);
4036				bitmask = 1UL << bit;
4037				idx = field * 32 + bit;
4038				pv = &pc->pc_pventry[idx];
4039				inuse &= ~bitmask;
4040
4041				pte = pmap_pde(pmap, pv->pv_va);
4042				tpte = *pte;
4043				if ((tpte & PG_PS) == 0) {
4044					pte = vtopte(pv->pv_va);
4045					tpte = *pte & ~PG_PTE_PAT;
4046				}
4047
4048				if (tpte == 0) {
4049					printf(
4050					    "TPTE at %p  IS ZERO @ VA %08x\n",
4051					    pte, pv->pv_va);
4052					panic("bad pte");
4053				}
4054
4055/*
4056 * We cannot remove wired pages from a process' mapping at this time
4057 */
4058				if (tpte & PG_W) {
4059					allfree = 0;
4060					continue;
4061				}
4062
4063				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
4064				KASSERT(m->phys_addr == (tpte & PG_FRAME),
4065				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
4066				    m, (uintmax_t)m->phys_addr,
4067				    (uintmax_t)tpte));
4068
4069				KASSERT(m < &vm_page_array[vm_page_array_size],
4070					("pmap_remove_pages: bad tpte %#jx",
4071					(uintmax_t)tpte));
4072
4073				pte_clear(pte);
4074
4075				/*
4076				 * Update the vm_page_t clean/reference bits.
4077				 */
4078				if ((tpte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4079					if ((tpte & PG_PS) != 0) {
4080						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4081							vm_page_dirty(mt);
4082					} else
4083						vm_page_dirty(m);
4084				}
4085
4086				/* Mark free */
4087				PV_STAT(pv_entry_frees++);
4088				PV_STAT(pv_entry_spare++);
4089				pv_entry_count--;
4090				pc->pc_map[field] |= bitmask;
4091				if ((tpte & PG_PS) != 0) {
4092					pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
4093					pvh = pa_to_pvh(tpte & PG_PS_FRAME);
4094					TAILQ_REMOVE(&pvh->pv_list, pv, pv_list);
4095					if (TAILQ_EMPTY(&pvh->pv_list)) {
4096						for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++)
4097							if (TAILQ_EMPTY(&mt->md.pv_list))
4098								vm_page_flag_clear(mt, PG_WRITEABLE);
4099					}
4100					mpte = pmap_lookup_pt_page(pmap, pv->pv_va);
4101					if (mpte != NULL) {
4102						pmap_remove_pt_page(pmap, mpte);
4103						pmap->pm_stats.resident_count--;
4104						KASSERT(mpte->wire_count == NPTEPG,
4105						    ("pmap_remove_pages: pte page wire count error"));
4106						mpte->wire_count = 0;
4107						pmap_add_delayed_free_list(mpte, &free, FALSE);
4108						atomic_subtract_int(&cnt.v_wire_count, 1);
4109					}
4110				} else {
4111					pmap->pm_stats.resident_count--;
4112					TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
4113					if (TAILQ_EMPTY(&m->md.pv_list)) {
4114						pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4115						if (TAILQ_EMPTY(&pvh->pv_list))
4116							vm_page_flag_clear(m, PG_WRITEABLE);
4117					}
4118					pmap_unuse_pt(pmap, pv->pv_va, &free);
4119				}
4120			}
4121		}
4122		if (allfree) {
4123			PV_STAT(pv_entry_spare -= _NPCPV);
4124			PV_STAT(pc_chunk_count--);
4125			PV_STAT(pc_chunk_frees++);
4126			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
4127			m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc));
4128			pmap_qremove((vm_offset_t)pc, 1);
4129			vm_page_unwire(m, 0);
4130			vm_page_free(m);
4131			pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
4132		}
4133	}
4134	sched_unpin();
4135	pmap_invalidate_all(pmap);
4136	vm_page_unlock_queues();
4137	PMAP_UNLOCK(pmap);
4138	pmap_free_zero_pages(free);
4139}
4140
4141/*
4142 *	pmap_is_modified:
4143 *
4144 *	Return whether or not the specified physical page was modified
4145 *	in any physical maps.
4146 */
4147boolean_t
4148pmap_is_modified(vm_page_t m)
4149{
4150
4151	if (m->flags & PG_FICTITIOUS)
4152		return (FALSE);
4153	if (pmap_is_modified_pvh(&m->md))
4154		return (TRUE);
4155	return (pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m))));
4156}
4157
4158/*
4159 * Returns TRUE if any of the given mappings were used to modify
4160 * physical memory.  Otherwise, returns FALSE.  Both page and 2mpage
4161 * mappings are supported.
4162 */
4163static boolean_t
4164pmap_is_modified_pvh(struct md_page *pvh)
4165{
4166	pv_entry_t pv;
4167	pt_entry_t *pte;
4168	pmap_t pmap;
4169	boolean_t rv;
4170
4171	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4172	rv = FALSE;
4173	sched_pin();
4174	TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) {
4175		pmap = PV_PMAP(pv);
4176		PMAP_LOCK(pmap);
4177		pte = pmap_pte_quick(pmap, pv->pv_va);
4178		rv = (*pte & (PG_M | PG_RW)) == (PG_M | PG_RW);
4179		PMAP_UNLOCK(pmap);
4180		if (rv)
4181			break;
4182	}
4183	sched_unpin();
4184	return (rv);
4185}
4186
4187/*
4188 *	pmap_is_prefaultable:
4189 *
4190 *	Return whether or not the specified virtual address is elgible
4191 *	for prefault.
4192 */
4193boolean_t
4194pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
4195{
4196	pd_entry_t *pde;
4197	pt_entry_t *pte;
4198	boolean_t rv;
4199
4200	rv = FALSE;
4201	PMAP_LOCK(pmap);
4202	pde = pmap_pde(pmap, addr);
4203	if (*pde != 0 && (*pde & PG_PS) == 0) {
4204		pte = vtopte(addr);
4205		rv = *pte == 0;
4206	}
4207	PMAP_UNLOCK(pmap);
4208	return (rv);
4209}
4210
4211/*
4212 * Clear the write and modified bits in each of the given page's mappings.
4213 */
4214void
4215pmap_remove_write(vm_page_t m)
4216{
4217	struct md_page *pvh;
4218	pv_entry_t next_pv, pv;
4219	pmap_t pmap;
4220	pd_entry_t *pde;
4221	pt_entry_t oldpte, *pte;
4222	vm_offset_t va;
4223
4224	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4225	if ((m->flags & PG_FICTITIOUS) != 0 ||
4226	    (m->flags & PG_WRITEABLE) == 0)
4227		return;
4228	sched_pin();
4229	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4230	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
4231		va = pv->pv_va;
4232		pmap = PV_PMAP(pv);
4233		PMAP_LOCK(pmap);
4234		pde = pmap_pde(pmap, va);
4235		if ((*pde & PG_RW) != 0)
4236			(void)pmap_demote_pde(pmap, pde, va);
4237		PMAP_UNLOCK(pmap);
4238	}
4239	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4240		pmap = PV_PMAP(pv);
4241		PMAP_LOCK(pmap);
4242		pde = pmap_pde(pmap, pv->pv_va);
4243		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_write: found"
4244		    " a 4mpage in page %p's pv list", m));
4245		pte = pmap_pte_quick(pmap, pv->pv_va);
4246retry:
4247		oldpte = *pte;
4248		if ((oldpte & PG_RW) != 0) {
4249			/*
4250			 * Regardless of whether a pte is 32 or 64 bits
4251			 * in size, PG_RW and PG_M are among the least
4252			 * significant 32 bits.
4253			 */
4254			if (!atomic_cmpset_int((u_int *)pte, oldpte,
4255			    oldpte & ~(PG_RW | PG_M)))
4256				goto retry;
4257			if ((oldpte & PG_M) != 0)
4258				vm_page_dirty(m);
4259			pmap_invalidate_page(pmap, pv->pv_va);
4260		}
4261		PMAP_UNLOCK(pmap);
4262	}
4263	vm_page_flag_clear(m, PG_WRITEABLE);
4264	sched_unpin();
4265}
4266
4267/*
4268 *	pmap_ts_referenced:
4269 *
4270 *	Return a count of reference bits for a page, clearing those bits.
4271 *	It is not necessary for every reference bit to be cleared, but it
4272 *	is necessary that 0 only be returned when there are truly no
4273 *	reference bits set.
4274 *
4275 *	XXX: The exact number of bits to check and clear is a matter that
4276 *	should be tested and standardized at some point in the future for
4277 *	optimal aging of shared pages.
4278 */
4279int
4280pmap_ts_referenced(vm_page_t m)
4281{
4282	struct md_page *pvh;
4283	pv_entry_t pv, pvf, pvn;
4284	pmap_t pmap;
4285	pd_entry_t oldpde, *pde;
4286	pt_entry_t *pte;
4287	vm_offset_t va;
4288	int rtval = 0;
4289
4290	if (m->flags & PG_FICTITIOUS)
4291		return (rtval);
4292	sched_pin();
4293	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4294	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4295	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) {
4296		va = pv->pv_va;
4297		pmap = PV_PMAP(pv);
4298		PMAP_LOCK(pmap);
4299		pde = pmap_pde(pmap, va);
4300		oldpde = *pde;
4301		if ((oldpde & PG_A) != 0) {
4302			if (pmap_demote_pde(pmap, pde, va)) {
4303				if ((oldpde & PG_W) == 0) {
4304					/*
4305					 * Remove the mapping to a single page
4306					 * so that a subsequent access may
4307					 * repromote.  Since the underlying
4308					 * page table page is fully populated,
4309					 * this removal never frees a page
4310					 * table page.
4311					 */
4312					va += VM_PAGE_TO_PHYS(m) - (oldpde &
4313					    PG_PS_FRAME);
4314					pmap_remove_page(pmap, va, NULL);
4315					rtval++;
4316					if (rtval > 4) {
4317						PMAP_UNLOCK(pmap);
4318						return (rtval);
4319					}
4320				}
4321			}
4322		}
4323		PMAP_UNLOCK(pmap);
4324	}
4325	if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
4326		pvf = pv;
4327		do {
4328			pvn = TAILQ_NEXT(pv, pv_list);
4329			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
4330			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
4331			pmap = PV_PMAP(pv);
4332			PMAP_LOCK(pmap);
4333			pde = pmap_pde(pmap, pv->pv_va);
4334			KASSERT((*pde & PG_PS) == 0, ("pmap_ts_referenced:"
4335			    " found a 4mpage in page %p's pv list", m));
4336			pte = pmap_pte_quick(pmap, pv->pv_va);
4337			if ((*pte & PG_A) != 0) {
4338				atomic_clear_int((u_int *)pte, PG_A);
4339				pmap_invalidate_page(pmap, pv->pv_va);
4340				rtval++;
4341				if (rtval > 4)
4342					pvn = NULL;
4343			}
4344			PMAP_UNLOCK(pmap);
4345		} while ((pv = pvn) != NULL && pv != pvf);
4346	}
4347	sched_unpin();
4348	return (rtval);
4349}
4350
4351/*
4352 *	Clear the modify bits on the specified physical page.
4353 */
4354void
4355pmap_clear_modify(vm_page_t m)
4356{
4357	struct md_page *pvh;
4358	pv_entry_t next_pv, pv;
4359	pmap_t pmap;
4360	pd_entry_t oldpde, *pde;
4361	pt_entry_t oldpte, *pte;
4362	vm_offset_t va;
4363
4364	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4365	if ((m->flags & PG_FICTITIOUS) != 0)
4366		return;
4367	sched_pin();
4368	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4369	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
4370		va = pv->pv_va;
4371		pmap = PV_PMAP(pv);
4372		PMAP_LOCK(pmap);
4373		pde = pmap_pde(pmap, va);
4374		oldpde = *pde;
4375		if ((oldpde & PG_RW) != 0) {
4376			if (pmap_demote_pde(pmap, pde, va)) {
4377				if ((oldpde & PG_W) == 0) {
4378					/*
4379					 * Write protect the mapping to a
4380					 * single page so that a subsequent
4381					 * write access may repromote.
4382					 */
4383					va += VM_PAGE_TO_PHYS(m) - (oldpde &
4384					    PG_PS_FRAME);
4385					pte = pmap_pte_quick(pmap, va);
4386					oldpte = *pte;
4387					if ((oldpte & PG_V) != 0) {
4388						/*
4389						 * Regardless of whether a pte is 32 or 64 bits
4390						 * in size, PG_RW and PG_M are among the least
4391						 * significant 32 bits.
4392						 */
4393						while (!atomic_cmpset_int((u_int *)pte,
4394						    oldpte,
4395						    oldpte & ~(PG_M | PG_RW)))
4396							oldpte = *pte;
4397						vm_page_dirty(m);
4398						pmap_invalidate_page(pmap, va);
4399					}
4400				}
4401			}
4402		}
4403		PMAP_UNLOCK(pmap);
4404	}
4405	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4406		pmap = PV_PMAP(pv);
4407		PMAP_LOCK(pmap);
4408		pde = pmap_pde(pmap, pv->pv_va);
4409		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_modify: found"
4410		    " a 4mpage in page %p's pv list", m));
4411		pte = pmap_pte_quick(pmap, pv->pv_va);
4412		if ((*pte & (PG_M | PG_RW)) == (PG_M | PG_RW)) {
4413			/*
4414			 * Regardless of whether a pte is 32 or 64 bits
4415			 * in size, PG_M is among the least significant
4416			 * 32 bits.
4417			 */
4418			atomic_clear_int((u_int *)pte, PG_M);
4419			pmap_invalidate_page(pmap, pv->pv_va);
4420		}
4421		PMAP_UNLOCK(pmap);
4422	}
4423	sched_unpin();
4424}
4425
4426/*
4427 *	pmap_clear_reference:
4428 *
4429 *	Clear the reference bit on the specified physical page.
4430 */
4431void
4432pmap_clear_reference(vm_page_t m)
4433{
4434	struct md_page *pvh;
4435	pv_entry_t next_pv, pv;
4436	pmap_t pmap;
4437	pd_entry_t oldpde, *pde;
4438	pt_entry_t *pte;
4439	vm_offset_t va;
4440
4441	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
4442	if ((m->flags & PG_FICTITIOUS) != 0)
4443		return;
4444	sched_pin();
4445	pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
4446	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) {
4447		va = pv->pv_va;
4448		pmap = PV_PMAP(pv);
4449		PMAP_LOCK(pmap);
4450		pde = pmap_pde(pmap, va);
4451		oldpde = *pde;
4452		if ((oldpde & PG_A) != 0) {
4453			if (pmap_demote_pde(pmap, pde, va)) {
4454				/*
4455				 * Remove the mapping to a single page so
4456				 * that a subsequent access may repromote.
4457				 * Since the underlying page table page is
4458				 * fully populated, this removal never frees
4459				 * a page table page.
4460				 */
4461				va += VM_PAGE_TO_PHYS(m) - (oldpde &
4462				    PG_PS_FRAME);
4463				pmap_remove_page(pmap, va, NULL);
4464			}
4465		}
4466		PMAP_UNLOCK(pmap);
4467	}
4468	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4469		pmap = PV_PMAP(pv);
4470		PMAP_LOCK(pmap);
4471		pde = pmap_pde(pmap, pv->pv_va);
4472		KASSERT((*pde & PG_PS) == 0, ("pmap_clear_reference: found"
4473		    " a 4mpage in page %p's pv list", m));
4474		pte = pmap_pte_quick(pmap, pv->pv_va);
4475		if ((*pte & PG_A) != 0) {
4476			/*
4477			 * Regardless of whether a pte is 32 or 64 bits
4478			 * in size, PG_A is among the least significant
4479			 * 32 bits.
4480			 */
4481			atomic_clear_int((u_int *)pte, PG_A);
4482			pmap_invalidate_page(pmap, pv->pv_va);
4483		}
4484		PMAP_UNLOCK(pmap);
4485	}
4486	sched_unpin();
4487}
4488
4489/*
4490 * Miscellaneous support routines follow
4491 */
4492
4493/* Adjust the cache mode for a 4KB page mapped via a PTE. */
4494static __inline void
4495pmap_pte_attr(pt_entry_t *pte, int cache_bits)
4496{
4497	u_int opte, npte;
4498
4499	/*
4500	 * The cache mode bits are all in the low 32-bits of the
4501	 * PTE, so we can just spin on updating the low 32-bits.
4502	 */
4503	do {
4504		opte = *(u_int *)pte;
4505		npte = opte & ~PG_PTE_CACHE;
4506		npte |= cache_bits;
4507	} while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
4508}
4509
4510/* Adjust the cache mode for a 2/4MB page mapped via a PDE. */
4511static __inline void
4512pmap_pde_attr(pd_entry_t *pde, int cache_bits)
4513{
4514	u_int opde, npde;
4515
4516	/*
4517	 * The cache mode bits are all in the low 32-bits of the
4518	 * PDE, so we can just spin on updating the low 32-bits.
4519	 */
4520	do {
4521		opde = *(u_int *)pde;
4522		npde = opde & ~PG_PDE_CACHE;
4523		npde |= cache_bits;
4524	} while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
4525}
4526
4527/*
4528 * Map a set of physical memory pages into the kernel virtual
4529 * address space. Return a pointer to where it is mapped. This
4530 * routine is intended to be used for mapping device memory,
4531 * NOT real memory.
4532 */
4533void *
4534pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
4535{
4536	vm_offset_t va, offset;
4537	vm_size_t tmpsize;
4538
4539	offset = pa & PAGE_MASK;
4540	size = roundup(offset + size, PAGE_SIZE);
4541	pa = pa & PG_FRAME;
4542
4543	if (pa < KERNLOAD && pa + size <= KERNLOAD)
4544		va = KERNBASE + pa;
4545	else
4546		va = kmem_alloc_nofault(kernel_map, size);
4547	if (!va)
4548		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
4549
4550	for (tmpsize = 0; tmpsize < size; tmpsize += PAGE_SIZE)
4551		pmap_kenter_attr(va + tmpsize, pa + tmpsize, mode);
4552	pmap_invalidate_range(kernel_pmap, va, va + tmpsize);
4553	pmap_invalidate_cache_range(va, va + size);
4554	return ((void *)(va + offset));
4555}
4556
4557void *
4558pmap_mapdev(vm_paddr_t pa, vm_size_t size)
4559{
4560
4561	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
4562}
4563
4564void *
4565pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4566{
4567
4568	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
4569}
4570
4571void
4572pmap_unmapdev(vm_offset_t va, vm_size_t size)
4573{
4574	vm_offset_t base, offset, tmpva;
4575
4576	if (va >= KERNBASE && va + size <= KERNBASE + KERNLOAD)
4577		return;
4578	base = trunc_page(va);
4579	offset = va & PAGE_MASK;
4580	size = roundup(offset + size, PAGE_SIZE);
4581	for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
4582		pmap_kremove(tmpva);
4583	pmap_invalidate_range(kernel_pmap, va, tmpva);
4584	kmem_free(kernel_map, base, size);
4585}
4586
4587/*
4588 * Sets the memory attribute for the specified page.
4589 */
4590void
4591pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4592{
4593	struct sysmaps *sysmaps;
4594	vm_offset_t sva, eva;
4595
4596	m->md.pat_mode = ma;
4597	if ((m->flags & PG_FICTITIOUS) != 0)
4598		return;
4599
4600	/*
4601	 * If "m" is a normal page, flush it from the cache.
4602	 * See pmap_invalidate_cache_range().
4603	 *
4604	 * First, try to find an existing mapping of the page by sf
4605	 * buffer. sf_buf_invalidate_cache() modifies mapping and
4606	 * flushes the cache.
4607	 */
4608	if (sf_buf_invalidate_cache(m))
4609		return;
4610
4611	/*
4612	 * If page is not mapped by sf buffer, but CPU does not
4613	 * support self snoop, map the page transient and do
4614	 * invalidation. In the worst case, whole cache is flushed by
4615	 * pmap_invalidate_cache_range().
4616	 */
4617	if ((cpu_feature & (CPUID_SS|CPUID_CLFSH)) == CPUID_CLFSH) {
4618		sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
4619		mtx_lock(&sysmaps->lock);
4620		if (*sysmaps->CMAP2)
4621			panic("pmap_page_set_memattr: CMAP2 busy");
4622		sched_pin();
4623		*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) |
4624		    PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0);
4625		invlcaddr(sysmaps->CADDR2);
4626		sva = (vm_offset_t)sysmaps->CADDR2;
4627		eva = sva + PAGE_SIZE;
4628	} else
4629		sva = eva = 0; /* gcc */
4630	pmap_invalidate_cache_range(sva, eva);
4631	if (sva != 0) {
4632		*sysmaps->CMAP2 = 0;
4633		sched_unpin();
4634		mtx_unlock(&sysmaps->lock);
4635	}
4636}
4637
4638/*
4639 * Changes the specified virtual address range's memory type to that given by
4640 * the parameter "mode".  The specified virtual address range must be
4641 * completely contained within either the kernel map.
4642 *
4643 * Returns zero if the change completed successfully, and either EINVAL or
4644 * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
4645 * of the virtual address range was not mapped, and ENOMEM is returned if
4646 * there was insufficient memory available to complete the change.
4647 */
4648int
4649pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
4650{
4651	vm_offset_t base, offset, tmpva;
4652	pd_entry_t *pde;
4653	pt_entry_t *pte;
4654	int cache_bits_pte, cache_bits_pde;
4655	boolean_t changed;
4656
4657	base = trunc_page(va);
4658	offset = va & PAGE_MASK;
4659	size = roundup(offset + size, PAGE_SIZE);
4660
4661	/*
4662	 * Only supported on kernel virtual addresses above the recursive map.
4663	 */
4664	if (base < VM_MIN_KERNEL_ADDRESS)
4665		return (EINVAL);
4666
4667	cache_bits_pde = pmap_cache_bits(mode, 1);
4668	cache_bits_pte = pmap_cache_bits(mode, 0);
4669	changed = FALSE;
4670
4671	/*
4672	 * Pages that aren't mapped aren't supported.  Also break down
4673	 * 2/4MB pages into 4KB pages if required.
4674	 */
4675	PMAP_LOCK(kernel_pmap);
4676	for (tmpva = base; tmpva < base + size; ) {
4677		pde = pmap_pde(kernel_pmap, tmpva);
4678		if (*pde == 0) {
4679			PMAP_UNLOCK(kernel_pmap);
4680			return (EINVAL);
4681		}
4682		if (*pde & PG_PS) {
4683			/*
4684			 * If the current 2/4MB page already has
4685			 * the required memory type, then we need not
4686			 * demote this page.  Just increment tmpva to
4687			 * the next 2/4MB page frame.
4688			 */
4689			if ((*pde & PG_PDE_CACHE) == cache_bits_pde) {
4690				tmpva = trunc_4mpage(tmpva) + NBPDR;
4691				continue;
4692			}
4693
4694			/*
4695			 * If the current offset aligns with a 2/4MB
4696			 * page frame and there is at least 2/4MB left
4697			 * within the range, then we need not break
4698			 * down this page into 4KB pages.
4699			 */
4700			if ((tmpva & PDRMASK) == 0 &&
4701			    tmpva + PDRMASK < base + size) {
4702				tmpva += NBPDR;
4703				continue;
4704			}
4705			if (!pmap_demote_pde(kernel_pmap, pde, tmpva)) {
4706				PMAP_UNLOCK(kernel_pmap);
4707				return (ENOMEM);
4708			}
4709		}
4710		pte = vtopte(tmpva);
4711		if (*pte == 0) {
4712			PMAP_UNLOCK(kernel_pmap);
4713			return (EINVAL);
4714		}
4715		tmpva += PAGE_SIZE;
4716	}
4717	PMAP_UNLOCK(kernel_pmap);
4718
4719	/*
4720	 * Ok, all the pages exist, so run through them updating their
4721	 * cache mode if required.
4722	 */
4723	for (tmpva = base; tmpva < base + size; ) {
4724		pde = pmap_pde(kernel_pmap, tmpva);
4725		if (*pde & PG_PS) {
4726			if ((*pde & PG_PDE_CACHE) != cache_bits_pde) {
4727				pmap_pde_attr(pde, cache_bits_pde);
4728				changed = TRUE;
4729			}
4730			tmpva = trunc_4mpage(tmpva) + NBPDR;
4731		} else {
4732			pte = vtopte(tmpva);
4733			if ((*pte & PG_PTE_CACHE) != cache_bits_pte) {
4734				pmap_pte_attr(pte, cache_bits_pte);
4735				changed = TRUE;
4736			}
4737			tmpva += PAGE_SIZE;
4738		}
4739	}
4740
4741	/*
4742	 * Flush CPU caches to make sure any data isn't cached that
4743	 * shouldn't be, etc.
4744	 */
4745	if (changed) {
4746		pmap_invalidate_range(kernel_pmap, base, tmpva);
4747		pmap_invalidate_cache_range(base, tmpva);
4748	}
4749	return (0);
4750}
4751
4752/*
4753 * perform the pmap work for mincore
4754 */
4755int
4756pmap_mincore(pmap_t pmap, vm_offset_t addr)
4757{
4758	pd_entry_t *pdep;
4759	pt_entry_t *ptep, pte;
4760	vm_paddr_t pa;
4761	vm_page_t m;
4762	int val = 0;
4763
4764	PMAP_LOCK(pmap);
4765	pdep = pmap_pde(pmap, addr);
4766	if (*pdep != 0) {
4767		if (*pdep & PG_PS) {
4768			pte = *pdep;
4769			val = MINCORE_SUPER;
4770			/* Compute the physical address of the 4KB page. */
4771			pa = ((*pdep & PG_PS_FRAME) | (addr & PDRMASK)) &
4772			    PG_FRAME;
4773		} else {
4774			ptep = pmap_pte(pmap, addr);
4775			pte = *ptep;
4776			pmap_pte_release(ptep);
4777			pa = pte & PG_FRAME;
4778		}
4779	} else {
4780		pte = 0;
4781		pa = 0;
4782	}
4783	PMAP_UNLOCK(pmap);
4784
4785	if (pte != 0) {
4786		val |= MINCORE_INCORE;
4787		if ((pte & PG_MANAGED) == 0)
4788			return val;
4789
4790		m = PHYS_TO_VM_PAGE(pa);
4791
4792		/*
4793		 * Modified by us
4794		 */
4795		if ((pte & (PG_M | PG_RW)) == (PG_M | PG_RW))
4796			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
4797		else {
4798			/*
4799			 * Modified by someone else
4800			 */
4801			vm_page_lock_queues();
4802			if (m->dirty || pmap_is_modified(m))
4803				val |= MINCORE_MODIFIED_OTHER;
4804			vm_page_unlock_queues();
4805		}
4806		/*
4807		 * Referenced by us
4808		 */
4809		if (pte & PG_A)
4810			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
4811		else {
4812			/*
4813			 * Referenced by someone else
4814			 */
4815			vm_page_lock_queues();
4816			if ((m->flags & PG_REFERENCED) ||
4817			    pmap_ts_referenced(m)) {
4818				val |= MINCORE_REFERENCED_OTHER;
4819				vm_page_flag_set(m, PG_REFERENCED);
4820			}
4821			vm_page_unlock_queues();
4822		}
4823	}
4824	return val;
4825}
4826
4827void
4828pmap_activate(struct thread *td)
4829{
4830	pmap_t	pmap, oldpmap;
4831	u_int32_t  cr3;
4832
4833	critical_enter();
4834	pmap = vmspace_pmap(td->td_proc->p_vmspace);
4835	oldpmap = PCPU_GET(curpmap);
4836#if defined(SMP)
4837	atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
4838	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
4839#else
4840	oldpmap->pm_active &= ~1;
4841	pmap->pm_active |= 1;
4842#endif
4843#ifdef PAE
4844	cr3 = vtophys(pmap->pm_pdpt);
4845#else
4846	cr3 = vtophys(pmap->pm_pdir);
4847#endif
4848	/*
4849	 * pmap_activate is for the current thread on the current cpu
4850	 */
4851	td->td_pcb->pcb_cr3 = cr3;
4852	load_cr3(cr3);
4853	PCPU_SET(curpmap, pmap);
4854	critical_exit();
4855}
4856
4857/*
4858 *	Increase the starting virtual address of the given mapping if a
4859 *	different alignment might result in more superpage mappings.
4860 */
4861void
4862pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4863    vm_offset_t *addr, vm_size_t size)
4864{
4865	vm_offset_t superpage_offset;
4866
4867	if (size < NBPDR)
4868		return;
4869	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
4870		offset += ptoa(object->pg_color);
4871	superpage_offset = offset & PDRMASK;
4872	if (size - ((NBPDR - superpage_offset) & PDRMASK) < NBPDR ||
4873	    (*addr & PDRMASK) == superpage_offset)
4874		return;
4875	if ((*addr & PDRMASK) < superpage_offset)
4876		*addr = (*addr & ~PDRMASK) + superpage_offset;
4877	else
4878		*addr = ((*addr + PDRMASK) & ~PDRMASK) + superpage_offset;
4879}
4880
4881
4882#if defined(PMAP_DEBUG)
4883pmap_pid_dump(int pid)
4884{
4885	pmap_t pmap;
4886	struct proc *p;
4887	int npte = 0;
4888	int index;
4889
4890	sx_slock(&allproc_lock);
4891	FOREACH_PROC_IN_SYSTEM(p) {
4892		if (p->p_pid != pid)
4893			continue;
4894
4895		if (p->p_vmspace) {
4896			int i,j;
4897			index = 0;
4898			pmap = vmspace_pmap(p->p_vmspace);
4899			for (i = 0; i < NPDEPTD; i++) {
4900				pd_entry_t *pde;
4901				pt_entry_t *pte;
4902				vm_offset_t base = i << PDRSHIFT;
4903
4904				pde = &pmap->pm_pdir[i];
4905				if (pde && pmap_pde_v(pde)) {
4906					for (j = 0; j < NPTEPG; j++) {
4907						vm_offset_t va = base + (j << PAGE_SHIFT);
4908						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
4909							if (index) {
4910								index = 0;
4911								printf("\n");
4912							}
4913							sx_sunlock(&allproc_lock);
4914							return npte;
4915						}
4916						pte = pmap_pte(pmap, va);
4917						if (pte && pmap_pte_v(pte)) {
4918							pt_entry_t pa;
4919							vm_page_t m;
4920							pa = *pte;
4921							m = PHYS_TO_VM_PAGE(pa & PG_FRAME);
4922							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
4923								va, pa, m->hold_count, m->wire_count, m->flags);
4924							npte++;
4925							index++;
4926							if (index >= 2) {
4927								index = 0;
4928								printf("\n");
4929							} else {
4930								printf(" ");
4931							}
4932						}
4933					}
4934				}
4935			}
4936		}
4937	}
4938	sx_sunlock(&allproc_lock);
4939	return npte;
4940}
4941#endif
4942
4943#if defined(DEBUG)
4944
4945static void	pads(pmap_t pm);
4946void		pmap_pvdump(vm_offset_t pa);
4947
4948/* print address space of pmap*/
4949static void
4950pads(pmap_t pm)
4951{
4952	int i, j;
4953	vm_paddr_t va;
4954	pt_entry_t *ptep;
4955
4956	if (pm == kernel_pmap)
4957		return;
4958	for (i = 0; i < NPDEPTD; i++)
4959		if (pm->pm_pdir[i])
4960			for (j = 0; j < NPTEPG; j++) {
4961				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
4962				if (pm == kernel_pmap && va < KERNBASE)
4963					continue;
4964				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
4965					continue;
4966				ptep = pmap_pte(pm, va);
4967				if (pmap_pte_v(ptep))
4968					printf("%x:%x ", va, *ptep);
4969			};
4970
4971}
4972
4973void
4974pmap_pvdump(vm_paddr_t pa)
4975{
4976	pv_entry_t pv;
4977	pmap_t pmap;
4978	vm_page_t m;
4979
4980	printf("pa %x", pa);
4981	m = PHYS_TO_VM_PAGE(pa);
4982	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
4983		pmap = PV_PMAP(pv);
4984		printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va);
4985		pads(pmap);
4986	}
4987	printf(" ");
4988}
4989#endif
4990