pmap.c revision 164912
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm
9 * All rights reserved.
10 * Copyright (c) 2005 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
12 *
13 * This code is derived from software contributed to Berkeley by
14 * the Systems Programming Group of the University of Utah Computer
15 * Science Department and William Jolitz of UUNET Technologies Inc.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
19 * are met:
20 * 1. Redistributions of source code must retain the above copyright
21 *    notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 *    notice, this list of conditions and the following disclaimer in the
24 *    documentation and/or other materials provided with the distribution.
25 * 3. All advertising materials mentioning features or use of this software
26 *    must display the following acknowledgement:
27 *	This product includes software developed by the University of
28 *	California, Berkeley and its contributors.
29 * 4. Neither the name of the University nor the names of its contributors
30 *    may be used to endorse or promote products derived from this software
31 *    without specific prior written permission.
32 *
33 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
34 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
35 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
37 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
38 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
39 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
40 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
41 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
42 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
43 * SUCH DAMAGE.
44 *
45 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
46 */
47/*-
48 * Copyright (c) 2003 Networks Associates Technology, Inc.
49 * All rights reserved.
50 *
51 * This software was developed for the FreeBSD Project by Jake Burkholder,
52 * Safeport Network Services, and Network Associates Laboratories, the
53 * Security Research Division of Network Associates, Inc. under
54 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
55 * CHATS research program.
56 *
57 * Redistribution and use in source and binary forms, with or without
58 * modification, are permitted provided that the following conditions
59 * are met:
60 * 1. Redistributions of source code must retain the above copyright
61 *    notice, this list of conditions and the following disclaimer.
62 * 2. Redistributions in binary form must reproduce the above copyright
63 *    notice, this list of conditions and the following disclaimer in the
64 *    documentation and/or other materials provided with the distribution.
65 *
66 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
67 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
68 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
69 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
70 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
71 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
72 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
73 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
74 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
75 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
76 * SUCH DAMAGE.
77 */
78
79#include <sys/cdefs.h>
80__FBSDID("$FreeBSD: head/sys/amd64/amd64/pmap.c 164912 2006-12-05 11:31:33Z ru $");
81
82/*
83 *	Manages physical address maps.
84 *
85 *	In addition to hardware address maps, this
86 *	module is called upon to provide software-use-only
87 *	maps which may or may not be stored in the same
88 *	form as hardware maps.  These pseudo-maps are
89 *	used to store intermediate results from copy
90 *	operations to and from address spaces.
91 *
92 *	Since the information managed by this module is
93 *	also stored by the logical address mapping module,
94 *	this module may throw away valid virtual-to-physical
95 *	mappings at almost any time.  However, invalidations
96 *	of virtual-to-physical mappings must be done as
97 *	requested.
98 *
99 *	In order to cope with hardware architectures which
100 *	make virtual-to-physical map invalidates expensive,
101 *	this module may delay invalidate or reduced protection
102 *	operations until such time as they are actually
103 *	necessary.  This module is given full information as
104 *	to which processors are currently using which maps,
105 *	and to when physical maps must be made correct.
106 */
107
108#include "opt_msgbuf.h"
109#include "opt_pmap.h"
110
111#include <sys/param.h>
112#include <sys/systm.h>
113#include <sys/kernel.h>
114#include <sys/lock.h>
115#include <sys/malloc.h>
116#include <sys/mman.h>
117#include <sys/msgbuf.h>
118#include <sys/mutex.h>
119#include <sys/proc.h>
120#include <sys/sx.h>
121#include <sys/vmmeter.h>
122#include <sys/sched.h>
123#include <sys/sysctl.h>
124#ifdef SMP
125#include <sys/smp.h>
126#endif
127
128#include <vm/vm.h>
129#include <vm/vm_param.h>
130#include <vm/vm_kern.h>
131#include <vm/vm_page.h>
132#include <vm/vm_map.h>
133#include <vm/vm_object.h>
134#include <vm/vm_extern.h>
135#include <vm/vm_pageout.h>
136#include <vm/vm_pager.h>
137#include <vm/uma.h>
138
139#include <machine/cpu.h>
140#include <machine/cputypes.h>
141#include <machine/md_var.h>
142#include <machine/pcb.h>
143#include <machine/specialreg.h>
144#ifdef SMP
145#include <machine/smp.h>
146#endif
147
148#ifndef PMAP_SHPGPERPROC
149#define PMAP_SHPGPERPROC 200
150#endif
151
152#if defined(DIAGNOSTIC)
153#define PMAP_DIAGNOSTIC
154#endif
155
156#if !defined(PMAP_DIAGNOSTIC)
157#define PMAP_INLINE __inline
158#else
159#define PMAP_INLINE
160#endif
161
162#define PV_STATS
163#ifdef PV_STATS
164#define PV_STAT(x)	do { x ; } while (0)
165#else
166#define PV_STAT(x)	do { } while (0)
167#endif
168
169struct pmap kernel_pmap_store;
170
171static vm_paddr_t avail_start;	/* PA of first available physical page */
172vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
173vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
174
175static int nkpt;
176static int ndmpdp;
177static vm_paddr_t dmaplimit;
178vm_offset_t kernel_vm_end;
179pt_entry_t pg_nx;
180
181static u_int64_t	KPTphys;	/* phys addr of kernel level 1 */
182static u_int64_t	KPDphys;	/* phys addr of kernel level 2 */
183u_int64_t		KPDPphys;	/* phys addr of kernel level 3 */
184u_int64_t		KPML4phys;	/* phys addr of kernel level 4 */
185
186static u_int64_t	DMPDphys;	/* phys addr of direct mapped level 2 */
187static u_int64_t	DMPDPphys;	/* phys addr of direct mapped level 3 */
188
189/*
190 * Data for the pv entry allocation mechanism
191 */
192static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
193static int shpgperproc = PMAP_SHPGPERPROC;
194
195/*
196 * All those kernel PT submaps that BSD is so fond of
197 */
198pt_entry_t *CMAP1 = 0;
199caddr_t CADDR1 = 0;
200struct msgbuf *msgbufp = 0;
201
202/*
203 * Crashdump maps.
204 */
205static caddr_t crashdumpmap;
206
207static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
208static pv_entry_t get_pv_entry(pmap_t locked_pmap, int try);
209
210static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
211    vm_page_t m, vm_prot_t prot, vm_page_t mpte);
212static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq,
213		vm_offset_t sva, pd_entry_t ptepde);
214static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde);
215static void pmap_remove_entry(struct pmap *pmap, vm_page_t m,
216		vm_offset_t va);
217static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m);
218static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
219    vm_page_t m);
220
221static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags);
222static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
223
224static vm_page_t _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags);
225static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m);
226static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t);
227static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
228
229CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
230CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
231
232/*
233 * Move the kernel virtual free pointer to the next
234 * 2MB.  This is used to help improve performance
235 * by using a large (2MB) page for much of the kernel
236 * (.text, .data, .bss)
237 */
238static vm_offset_t
239pmap_kmem_choose(vm_offset_t addr)
240{
241	vm_offset_t newaddr = addr;
242
243	newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
244	return newaddr;
245}
246
247/********************/
248/* Inline functions */
249/********************/
250
251/* Return a non-clipped PD index for a given VA */
252static __inline vm_pindex_t
253pmap_pde_pindex(vm_offset_t va)
254{
255	return va >> PDRSHIFT;
256}
257
258
259/* Return various clipped indexes for a given VA */
260static __inline vm_pindex_t
261pmap_pte_index(vm_offset_t va)
262{
263
264	return ((va >> PAGE_SHIFT) & ((1ul << NPTEPGSHIFT) - 1));
265}
266
267static __inline vm_pindex_t
268pmap_pde_index(vm_offset_t va)
269{
270
271	return ((va >> PDRSHIFT) & ((1ul << NPDEPGSHIFT) - 1));
272}
273
274static __inline vm_pindex_t
275pmap_pdpe_index(vm_offset_t va)
276{
277
278	return ((va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1));
279}
280
281static __inline vm_pindex_t
282pmap_pml4e_index(vm_offset_t va)
283{
284
285	return ((va >> PML4SHIFT) & ((1ul << NPML4EPGSHIFT) - 1));
286}
287
288/* Return a pointer to the PML4 slot that corresponds to a VA */
289static __inline pml4_entry_t *
290pmap_pml4e(pmap_t pmap, vm_offset_t va)
291{
292
293	if (!pmap)
294		return NULL;
295	return (&pmap->pm_pml4[pmap_pml4e_index(va)]);
296}
297
298/* Return a pointer to the PDP slot that corresponds to a VA */
299static __inline pdp_entry_t *
300pmap_pml4e_to_pdpe(pml4_entry_t *pml4e, vm_offset_t va)
301{
302	pdp_entry_t *pdpe;
303
304	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(*pml4e & PG_FRAME);
305	return (&pdpe[pmap_pdpe_index(va)]);
306}
307
308/* Return a pointer to the PDP slot that corresponds to a VA */
309static __inline pdp_entry_t *
310pmap_pdpe(pmap_t pmap, vm_offset_t va)
311{
312	pml4_entry_t *pml4e;
313
314	pml4e = pmap_pml4e(pmap, va);
315	if (pml4e == NULL || (*pml4e & PG_V) == 0)
316		return NULL;
317	return (pmap_pml4e_to_pdpe(pml4e, va));
318}
319
320/* Return a pointer to the PD slot that corresponds to a VA */
321static __inline pd_entry_t *
322pmap_pdpe_to_pde(pdp_entry_t *pdpe, vm_offset_t va)
323{
324	pd_entry_t *pde;
325
326	pde = (pd_entry_t *)PHYS_TO_DMAP(*pdpe & PG_FRAME);
327	return (&pde[pmap_pde_index(va)]);
328}
329
330/* Return a pointer to the PD slot that corresponds to a VA */
331static __inline pd_entry_t *
332pmap_pde(pmap_t pmap, vm_offset_t va)
333{
334	pdp_entry_t *pdpe;
335
336	pdpe = pmap_pdpe(pmap, va);
337	if (pdpe == NULL || (*pdpe & PG_V) == 0)
338		 return NULL;
339	return (pmap_pdpe_to_pde(pdpe, va));
340}
341
342/* Return a pointer to the PT slot that corresponds to a VA */
343static __inline pt_entry_t *
344pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va)
345{
346	pt_entry_t *pte;
347
348	pte = (pt_entry_t *)PHYS_TO_DMAP(*pde & PG_FRAME);
349	return (&pte[pmap_pte_index(va)]);
350}
351
352/* Return a pointer to the PT slot that corresponds to a VA */
353static __inline pt_entry_t *
354pmap_pte(pmap_t pmap, vm_offset_t va)
355{
356	pd_entry_t *pde;
357
358	pde = pmap_pde(pmap, va);
359	if (pde == NULL || (*pde & PG_V) == 0)
360		return NULL;
361	if ((*pde & PG_PS) != 0)	/* compat with i386 pmap_pte() */
362		return ((pt_entry_t *)pde);
363	return (pmap_pde_to_pte(pde, va));
364}
365
366
367static __inline pt_entry_t *
368pmap_pte_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *ptepde)
369{
370	pd_entry_t *pde;
371
372	pde = pmap_pde(pmap, va);
373	if (pde == NULL || (*pde & PG_V) == 0)
374		return NULL;
375	*ptepde = *pde;
376	if ((*pde & PG_PS) != 0)	/* compat with i386 pmap_pte() */
377		return ((pt_entry_t *)pde);
378	return (pmap_pde_to_pte(pde, va));
379}
380
381
382PMAP_INLINE pt_entry_t *
383vtopte(vm_offset_t va)
384{
385	u_int64_t mask = ((1ul << (NPTEPGSHIFT + NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
386
387	return (PTmap + ((va >> PAGE_SHIFT) & mask));
388}
389
390static __inline pd_entry_t *
391vtopde(vm_offset_t va)
392{
393	u_int64_t mask = ((1ul << (NPDEPGSHIFT + NPDPEPGSHIFT + NPML4EPGSHIFT)) - 1);
394
395	return (PDmap + ((va >> PDRSHIFT) & mask));
396}
397
398static u_int64_t
399allocpages(int n)
400{
401	u_int64_t ret;
402
403	ret = avail_start;
404	bzero((void *)ret, n * PAGE_SIZE);
405	avail_start += n * PAGE_SIZE;
406	return (ret);
407}
408
409static void
410create_pagetables(void)
411{
412	int i;
413
414	/* Allocate pages */
415	KPTphys = allocpages(NKPT);
416	KPML4phys = allocpages(1);
417	KPDPphys = allocpages(NKPML4E);
418	KPDphys = allocpages(NKPDPE);
419
420	ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT;
421	if (ndmpdp < 4)		/* Minimum 4GB of dirmap */
422		ndmpdp = 4;
423	DMPDPphys = allocpages(NDMPML4E);
424	DMPDphys = allocpages(ndmpdp);
425	dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT;
426
427	/* Fill in the underlying page table pages */
428	/* Read-only from zero to physfree */
429	/* XXX not fully used, underneath 2M pages */
430	for (i = 0; (i << PAGE_SHIFT) < avail_start; i++) {
431		((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT;
432		((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G;
433	}
434
435	/* Now map the page tables at their location within PTmap */
436	for (i = 0; i < NKPT; i++) {
437		((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT);
438		((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V;
439	}
440
441	/* Map from zero to end of allocations under 2M pages */
442	/* This replaces some of the KPTphys entries above */
443	for (i = 0; (i << PDRSHIFT) < avail_start; i++) {
444		((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT;
445		((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
446	}
447
448	/* And connect up the PD to the PDP */
449	for (i = 0; i < NKPDPE; i++) {
450		((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT);
451		((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U;
452	}
453
454
455	/* Now set up the direct map space using 2MB pages */
456	for (i = 0; i < NPDEPG * ndmpdp; i++) {
457		((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT;
458		((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G;
459	}
460
461	/* And the direct map space's PDP */
462	for (i = 0; i < ndmpdp; i++) {
463		((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT);
464		((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U;
465	}
466
467	/* And recursively map PML4 to itself in order to get PTmap */
468	((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;
469	((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U;
470
471	/* Connect the Direct Map slot up to the PML4 */
472	((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;
473	((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U;
474
475	/* Connect the KVA slot up to the PML4 */
476	((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;
477	((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;
478}
479
480/*
481 *	Bootstrap the system enough to run with virtual memory.
482 *
483 *	On amd64 this is called after mapping has already been enabled
484 *	and just syncs the pmap module with what has already been done.
485 *	[We can't call it easily with mapping off since the kernel is not
486 *	mapped with PA == VA, hence we would have to relocate every address
487 *	from the linked base (virtual) address "KERNBASE" to the actual
488 *	(physical) address starting relative to 0]
489 */
490void
491pmap_bootstrap(vm_paddr_t *firstaddr)
492{
493	vm_offset_t va;
494	pt_entry_t *pte, *unused;
495
496	avail_start = *firstaddr;
497
498	/*
499	 * Create an initial set of page tables to run the kernel in.
500	 */
501	create_pagetables();
502	*firstaddr = avail_start;
503
504	virtual_avail = (vm_offset_t) KERNBASE + avail_start;
505	virtual_avail = pmap_kmem_choose(virtual_avail);
506
507	virtual_end = VM_MAX_KERNEL_ADDRESS;
508
509
510	/* XXX do %cr0 as well */
511	load_cr4(rcr4() | CR4_PGE | CR4_PSE);
512	load_cr3(KPML4phys);
513
514	/*
515	 * Initialize the kernel pmap (which is statically allocated).
516	 */
517	PMAP_LOCK_INIT(kernel_pmap);
518	kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys);
519	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
520	TAILQ_INIT(&kernel_pmap->pm_pvchunk);
521	nkpt = NKPT;
522
523	/*
524	 * Reserve some special page table entries/VA space for temporary
525	 * mapping of pages.
526	 */
527#define	SYSMAP(c, p, v, n)	\
528	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
529
530	va = virtual_avail;
531	pte = vtopte(va);
532
533	/*
534	 * CMAP1 is only used for the memory test.
535	 */
536	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
537
538	/*
539	 * Crashdump maps.
540	 */
541	SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS)
542
543	/*
544	 * msgbufp is used to map the system message buffer.
545	 */
546	SYSMAP(struct msgbuf *, unused, msgbufp, atop(round_page(MSGBUF_SIZE)))
547
548	virtual_avail = va;
549
550	*CMAP1 = 0;
551
552	invltlb();
553
554	/* Initialize the PAT MSR. */
555	pmap_init_pat();
556}
557
558/*
559 * Setup the PAT MSR.
560 */
561void
562pmap_init_pat(void)
563{
564	uint64_t pat_msr;
565
566	/* Bail if this CPU doesn't implement PAT. */
567	if (!(cpu_feature & CPUID_PAT))
568		panic("no PAT??");
569
570#ifdef PAT_WORKS
571	/*
572	 * Leave the indices 0-3 at the default of WB, WT, UC, and UC-.
573	 * Program 4 and 5 as WP and WC.
574	 * Leave 6 and 7 as UC and UC-.
575	 */
576	pat_msr = rdmsr(MSR_PAT);
577	pat_msr &= ~(PAT_MASK(4) | PAT_MASK(5));
578	pat_msr |= PAT_VALUE(4, PAT_WRITE_PROTECTED) |
579	    PAT_VALUE(5, PAT_WRITE_COMBINING);
580#else
581	/*
582	 * Due to some Intel errata, we can only safely use the lower 4
583	 * PAT entries.  Thus, just replace PAT Index 2 with WC instead
584	 * of UC-.
585	 *
586	 *   Intel Pentium III Processor Specification Update
587	 * Errata E.27 (Upper Four PAT Entries Not Usable With Mode B
588	 * or Mode C Paging)
589	 *
590	 *   Intel Pentium IV  Processor Specification Update
591	 * Errata N46 (PAT Index MSB May Be Calculated Incorrectly)
592	 */
593	pat_msr = rdmsr(MSR_PAT);
594	pat_msr &= ~PAT_MASK(2);
595	pat_msr |= PAT_VALUE(2, PAT_WRITE_COMBINING);
596#endif
597	wrmsr(MSR_PAT, pat_msr);
598}
599
600/*
601 *	Initialize a vm_page's machine-dependent fields.
602 */
603void
604pmap_page_init(vm_page_t m)
605{
606
607	TAILQ_INIT(&m->md.pv_list);
608	m->md.pv_list_count = 0;
609}
610
611/*
612 *	Initialize the pmap module.
613 *	Called by vm_init, to initialize any structures that the pmap
614 *	system needs to map virtual memory.
615 */
616void
617pmap_init(void)
618{
619
620	/*
621	 * Initialize the address space (zone) for the pv entries.  Set a
622	 * high water mark so that the system can recover from excessive
623	 * numbers of pv entries.
624	 */
625	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
626	pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
627	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
628	pv_entry_high_water = 9 * (pv_entry_max / 10);
629}
630
631SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
632static int
633pmap_pventry_proc(SYSCTL_HANDLER_ARGS)
634{
635	int error;
636
637	error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
638	if (error == 0 && req->newptr) {
639		shpgperproc = (pv_entry_max - cnt.v_page_count) / maxproc;
640		pv_entry_high_water = 9 * (pv_entry_max / 10);
641	}
642	return (error);
643}
644SYSCTL_PROC(_vm_pmap, OID_AUTO, pv_entry_max, CTLTYPE_INT|CTLFLAG_RW,
645    &pv_entry_max, 0, pmap_pventry_proc, "IU", "Max number of PV entries");
646
647static int
648pmap_shpgperproc_proc(SYSCTL_HANDLER_ARGS)
649{
650	int error;
651
652	error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
653	if (error == 0 && req->newptr) {
654		pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
655		pv_entry_high_water = 9 * (pv_entry_max / 10);
656	}
657	return (error);
658}
659SYSCTL_PROC(_vm_pmap, OID_AUTO, shpgperproc, CTLTYPE_INT|CTLFLAG_RW,
660    &shpgperproc, 0, pmap_shpgperproc_proc, "IU", "Page share factor per proc");
661
662
663/***************************************************
664 * Low level helper routines.....
665 ***************************************************/
666
667/*
668 * Determine the appropriate bits to set in a PTE or PDE for a specified
669 * caching mode.
670 */
671static int
672pmap_cache_bits(int mode, boolean_t is_pde)
673{
674	int pat_flag, pat_index, cache_bits;
675
676	/* The PAT bit is different for PTE's and PDE's. */
677	pat_flag = is_pde ? PG_PDE_PAT : PG_PTE_PAT;
678
679	/* If we don't support PAT, map extended modes to older ones. */
680	if (!(cpu_feature & CPUID_PAT)) {
681		switch (mode) {
682		case PAT_UNCACHEABLE:
683		case PAT_WRITE_THROUGH:
684		case PAT_WRITE_BACK:
685			break;
686		case PAT_UNCACHED:
687		case PAT_WRITE_COMBINING:
688		case PAT_WRITE_PROTECTED:
689			mode = PAT_UNCACHEABLE;
690			break;
691		}
692	}
693
694	/* Map the caching mode to a PAT index. */
695	switch (mode) {
696#ifdef PAT_WORKS
697	case PAT_UNCACHEABLE:
698		pat_index = 3;
699		break;
700	case PAT_WRITE_THROUGH:
701		pat_index = 1;
702		break;
703	case PAT_WRITE_BACK:
704		pat_index = 0;
705		break;
706	case PAT_UNCACHED:
707		pat_index = 2;
708		break;
709	case PAT_WRITE_COMBINING:
710		pat_index = 5;
711		break;
712	case PAT_WRITE_PROTECTED:
713		pat_index = 4;
714		break;
715#else
716	case PAT_UNCACHED:
717	case PAT_UNCACHEABLE:
718	case PAT_WRITE_PROTECTED:
719		pat_index = 3;
720		break;
721	case PAT_WRITE_THROUGH:
722		pat_index = 1;
723		break;
724	case PAT_WRITE_BACK:
725		pat_index = 0;
726		break;
727	case PAT_WRITE_COMBINING:
728		pat_index = 2;
729		break;
730#endif
731	default:
732		panic("Unknown caching mode %d\n", mode);
733	}
734
735	/* Map the 3-bit index value into the PAT, PCD, and PWT bits. */
736	cache_bits = 0;
737	if (pat_index & 0x4)
738		cache_bits |= pat_flag;
739	if (pat_index & 0x2)
740		cache_bits |= PG_NC_PCD;
741	if (pat_index & 0x1)
742		cache_bits |= PG_NC_PWT;
743	return (cache_bits);
744}
745#ifdef SMP
746/*
747 * For SMP, these functions have to use the IPI mechanism for coherence.
748 */
749void
750pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
751{
752	u_int cpumask;
753	u_int other_cpus;
754
755	if (smp_started) {
756		if (!(read_rflags() & PSL_I))
757			panic("%s: interrupts disabled", __func__);
758		mtx_lock_spin(&smp_ipi_mtx);
759	} else
760		critical_enter();
761	/*
762	 * We need to disable interrupt preemption but MUST NOT have
763	 * interrupts disabled here.
764	 * XXX we may need to hold schedlock to get a coherent pm_active
765	 * XXX critical sections disable interrupts again
766	 */
767	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
768		invlpg(va);
769		smp_invlpg(va);
770	} else {
771		cpumask = PCPU_GET(cpumask);
772		other_cpus = PCPU_GET(other_cpus);
773		if (pmap->pm_active & cpumask)
774			invlpg(va);
775		if (pmap->pm_active & other_cpus)
776			smp_masked_invlpg(pmap->pm_active & other_cpus, va);
777	}
778	if (smp_started)
779		mtx_unlock_spin(&smp_ipi_mtx);
780	else
781		critical_exit();
782}
783
784void
785pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
786{
787	u_int cpumask;
788	u_int other_cpus;
789	vm_offset_t addr;
790
791	if (smp_started) {
792		if (!(read_rflags() & PSL_I))
793			panic("%s: interrupts disabled", __func__);
794		mtx_lock_spin(&smp_ipi_mtx);
795	} else
796		critical_enter();
797	/*
798	 * We need to disable interrupt preemption but MUST NOT have
799	 * interrupts disabled here.
800	 * XXX we may need to hold schedlock to get a coherent pm_active
801	 * XXX critical sections disable interrupts again
802	 */
803	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
804		for (addr = sva; addr < eva; addr += PAGE_SIZE)
805			invlpg(addr);
806		smp_invlpg_range(sva, eva);
807	} else {
808		cpumask = PCPU_GET(cpumask);
809		other_cpus = PCPU_GET(other_cpus);
810		if (pmap->pm_active & cpumask)
811			for (addr = sva; addr < eva; addr += PAGE_SIZE)
812				invlpg(addr);
813		if (pmap->pm_active & other_cpus)
814			smp_masked_invlpg_range(pmap->pm_active & other_cpus,
815			    sva, eva);
816	}
817	if (smp_started)
818		mtx_unlock_spin(&smp_ipi_mtx);
819	else
820		critical_exit();
821}
822
823void
824pmap_invalidate_all(pmap_t pmap)
825{
826	u_int cpumask;
827	u_int other_cpus;
828
829	if (smp_started) {
830		if (!(read_rflags() & PSL_I))
831			panic("%s: interrupts disabled", __func__);
832		mtx_lock_spin(&smp_ipi_mtx);
833	} else
834		critical_enter();
835	/*
836	 * We need to disable interrupt preemption but MUST NOT have
837	 * interrupts disabled here.
838	 * XXX we may need to hold schedlock to get a coherent pm_active
839	 * XXX critical sections disable interrupts again
840	 */
841	if (pmap == kernel_pmap || pmap->pm_active == all_cpus) {
842		invltlb();
843		smp_invltlb();
844	} else {
845		cpumask = PCPU_GET(cpumask);
846		other_cpus = PCPU_GET(other_cpus);
847		if (pmap->pm_active & cpumask)
848			invltlb();
849		if (pmap->pm_active & other_cpus)
850			smp_masked_invltlb(pmap->pm_active & other_cpus);
851	}
852	if (smp_started)
853		mtx_unlock_spin(&smp_ipi_mtx);
854	else
855		critical_exit();
856}
857
858void
859pmap_invalidate_cache(void)
860{
861
862	if (smp_started) {
863		if (!(read_rflags() & PSL_I))
864			panic("%s: interrupts disabled", __func__);
865		mtx_lock_spin(&smp_ipi_mtx);
866	} else
867		critical_enter();
868	/*
869	 * We need to disable interrupt preemption but MUST NOT have
870	 * interrupts disabled here.
871	 * XXX we may need to hold schedlock to get a coherent pm_active
872	 * XXX critical sections disable interrupts again
873	 */
874	wbinvd();
875	smp_cache_flush();
876	if (smp_started)
877		mtx_unlock_spin(&smp_ipi_mtx);
878	else
879		critical_exit();
880}
881#else /* !SMP */
882/*
883 * Normal, non-SMP, invalidation functions.
884 * We inline these within pmap.c for speed.
885 */
886PMAP_INLINE void
887pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
888{
889
890	if (pmap == kernel_pmap || pmap->pm_active)
891		invlpg(va);
892}
893
894PMAP_INLINE void
895pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
896{
897	vm_offset_t addr;
898
899	if (pmap == kernel_pmap || pmap->pm_active)
900		for (addr = sva; addr < eva; addr += PAGE_SIZE)
901			invlpg(addr);
902}
903
904PMAP_INLINE void
905pmap_invalidate_all(pmap_t pmap)
906{
907
908	if (pmap == kernel_pmap || pmap->pm_active)
909		invltlb();
910}
911
912PMAP_INLINE void
913pmap_invalidate_cache(void)
914{
915
916	wbinvd();
917}
918#endif /* !SMP */
919
920/*
921 * Are we current address space or kernel?
922 */
923static __inline int
924pmap_is_current(pmap_t pmap)
925{
926	return (pmap == kernel_pmap ||
927	    (pmap->pm_pml4[PML4PML4I] & PG_FRAME) == (PML4pml4e[0] & PG_FRAME));
928}
929
930/*
931 *	Routine:	pmap_extract
932 *	Function:
933 *		Extract the physical page address associated
934 *		with the given map/virtual_address pair.
935 */
936vm_paddr_t
937pmap_extract(pmap_t pmap, vm_offset_t va)
938{
939	vm_paddr_t rtval;
940	pt_entry_t *pte;
941	pd_entry_t pde, *pdep;
942
943	rtval = 0;
944	PMAP_LOCK(pmap);
945	pdep = pmap_pde(pmap, va);
946	if (pdep != NULL) {
947		pde = *pdep;
948		if (pde) {
949			if ((pde & PG_PS) != 0) {
950				rtval = (pde & PG_PS_FRAME) | (va & PDRMASK);
951				PMAP_UNLOCK(pmap);
952				return rtval;
953			}
954			pte = pmap_pde_to_pte(pdep, va);
955			rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
956		}
957	}
958	PMAP_UNLOCK(pmap);
959	return (rtval);
960}
961
962/*
963 *	Routine:	pmap_extract_and_hold
964 *	Function:
965 *		Atomically extract and hold the physical page
966 *		with the given pmap and virtual address pair
967 *		if that mapping permits the given protection.
968 */
969vm_page_t
970pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
971{
972	pd_entry_t pde, *pdep;
973	pt_entry_t pte;
974	vm_page_t m;
975
976	m = NULL;
977	vm_page_lock_queues();
978	PMAP_LOCK(pmap);
979	pdep = pmap_pde(pmap, va);
980	if (pdep != NULL && (pde = *pdep)) {
981		if (pde & PG_PS) {
982			if ((pde & PG_RW) || (prot & VM_PROT_WRITE) == 0) {
983				m = PHYS_TO_VM_PAGE((pde & PG_PS_FRAME) |
984				    (va & PDRMASK));
985				vm_page_hold(m);
986			}
987		} else {
988			pte = *pmap_pde_to_pte(pdep, va);
989			if ((pte & PG_V) &&
990			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
991				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
992				vm_page_hold(m);
993			}
994		}
995	}
996	vm_page_unlock_queues();
997	PMAP_UNLOCK(pmap);
998	return (m);
999}
1000
1001vm_paddr_t
1002pmap_kextract(vm_offset_t va)
1003{
1004	pd_entry_t *pde;
1005	vm_paddr_t pa;
1006
1007	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1008		pa = DMAP_TO_PHYS(va);
1009	} else {
1010		pde = vtopde(va);
1011		if (*pde & PG_PS) {
1012			pa = (*pde & PG_PS_FRAME) | (va & PDRMASK);
1013		} else {
1014			pa = *vtopte(va);
1015			pa = (pa & PG_FRAME) | (va & PAGE_MASK);
1016		}
1017	}
1018	return pa;
1019}
1020
1021/***************************************************
1022 * Low level mapping routines.....
1023 ***************************************************/
1024
1025/*
1026 * Add a wired page to the kva.
1027 * Note: not SMP coherent.
1028 */
1029PMAP_INLINE void
1030pmap_kenter(vm_offset_t va, vm_paddr_t pa)
1031{
1032	pt_entry_t *pte;
1033
1034	pte = vtopte(va);
1035	pte_store(pte, pa | PG_RW | PG_V | PG_G);
1036}
1037
1038PMAP_INLINE void
1039pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
1040{
1041	pt_entry_t *pte;
1042
1043	pte = vtopte(va);
1044	pte_store(pte, pa | PG_RW | PG_V | PG_G | pmap_cache_bits(mode, 0));
1045}
1046
1047/*
1048 * Remove a page from the kernel pagetables.
1049 * Note: not SMP coherent.
1050 */
1051PMAP_INLINE void
1052pmap_kremove(vm_offset_t va)
1053{
1054	pt_entry_t *pte;
1055
1056	pte = vtopte(va);
1057	pte_clear(pte);
1058}
1059
1060/*
1061 *	Used to map a range of physical addresses into kernel
1062 *	virtual address space.
1063 *
1064 *	The value passed in '*virt' is a suggested virtual address for
1065 *	the mapping. Architectures which can support a direct-mapped
1066 *	physical to virtual region can return the appropriate address
1067 *	within that region, leaving '*virt' unchanged. Other
1068 *	architectures should map the pages starting at '*virt' and
1069 *	update '*virt' with the first usable address after the mapped
1070 *	region.
1071 */
1072vm_offset_t
1073pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1074{
1075	return PHYS_TO_DMAP(start);
1076}
1077
1078
1079/*
1080 * Add a list of wired pages to the kva
1081 * this routine is only used for temporary
1082 * kernel mappings that do not need to have
1083 * page modification or references recorded.
1084 * Note that old mappings are simply written
1085 * over.  The page *must* be wired.
1086 * Note: SMP coherent.  Uses a ranged shootdown IPI.
1087 */
1088void
1089pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1090{
1091	pt_entry_t *endpte, oldpte, *pte;
1092
1093	oldpte = 0;
1094	pte = vtopte(sva);
1095	endpte = pte + count;
1096	while (pte < endpte) {
1097		oldpte |= *pte;
1098		pte_store(pte, VM_PAGE_TO_PHYS(*ma) | PG_G | PG_RW | PG_V);
1099		pte++;
1100		ma++;
1101	}
1102	if ((oldpte & PG_V) != 0)
1103		pmap_invalidate_range(kernel_pmap, sva, sva + count *
1104		    PAGE_SIZE);
1105}
1106
1107/*
1108 * This routine tears out page mappings from the
1109 * kernel -- it is meant only for temporary mappings.
1110 * Note: SMP coherent.  Uses a ranged shootdown IPI.
1111 */
1112void
1113pmap_qremove(vm_offset_t sva, int count)
1114{
1115	vm_offset_t va;
1116
1117	va = sva;
1118	while (count-- > 0) {
1119		pmap_kremove(va);
1120		va += PAGE_SIZE;
1121	}
1122	pmap_invalidate_range(kernel_pmap, sva, va);
1123}
1124
1125/***************************************************
1126 * Page table page management routines.....
1127 ***************************************************/
1128
1129/*
1130 * This routine unholds page table pages, and if the hold count
1131 * drops to zero, then it decrements the wire count.
1132 */
1133static PMAP_INLINE int
1134pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
1135{
1136
1137	--m->wire_count;
1138	if (m->wire_count == 0)
1139		return _pmap_unwire_pte_hold(pmap, va, m);
1140	else
1141		return 0;
1142}
1143
1144static int
1145_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m)
1146{
1147	vm_offset_t pteva;
1148
1149	/*
1150	 * unmap the page table page
1151	 */
1152	if (m->pindex >= (NUPDE + NUPDPE)) {
1153		/* PDP page */
1154		pml4_entry_t *pml4;
1155		pml4 = pmap_pml4e(pmap, va);
1156		pteva = (vm_offset_t) PDPmap + amd64_ptob(m->pindex - (NUPDE + NUPDPE));
1157		*pml4 = 0;
1158	} else if (m->pindex >= NUPDE) {
1159		/* PD page */
1160		pdp_entry_t *pdp;
1161		pdp = pmap_pdpe(pmap, va);
1162		pteva = (vm_offset_t) PDmap + amd64_ptob(m->pindex - NUPDE);
1163		*pdp = 0;
1164	} else {
1165		/* PTE page */
1166		pd_entry_t *pd;
1167		pd = pmap_pde(pmap, va);
1168		pteva = (vm_offset_t) PTmap + amd64_ptob(m->pindex);
1169		*pd = 0;
1170	}
1171	--pmap->pm_stats.resident_count;
1172	if (m->pindex < NUPDE) {
1173		/* We just released a PT, unhold the matching PD */
1174		vm_page_t pdpg;
1175
1176		pdpg = PHYS_TO_VM_PAGE(*pmap_pdpe(pmap, va) & PG_FRAME);
1177		pmap_unwire_pte_hold(pmap, va, pdpg);
1178	}
1179	if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
1180		/* We just released a PD, unhold the matching PDP */
1181		vm_page_t pdppg;
1182
1183		pdppg = PHYS_TO_VM_PAGE(*pmap_pml4e(pmap, va) & PG_FRAME);
1184		pmap_unwire_pte_hold(pmap, va, pdppg);
1185	}
1186
1187	/*
1188	 * Do an invltlb to make the invalidated mapping
1189	 * take effect immediately.
1190	 */
1191	pmap_invalidate_page(pmap, pteva);
1192
1193	vm_page_free_zero(m);
1194	atomic_subtract_int(&cnt.v_wire_count, 1);
1195	return 1;
1196}
1197
1198/*
1199 * After removing a page table entry, this routine is used to
1200 * conditionally free the page, and manage the hold/wire counts.
1201 */
1202static int
1203pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde)
1204{
1205	vm_page_t mpte;
1206
1207	if (va >= VM_MAXUSER_ADDRESS)
1208		return 0;
1209	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1210	mpte = PHYS_TO_VM_PAGE(ptepde & PG_FRAME);
1211	return pmap_unwire_pte_hold(pmap, va, mpte);
1212}
1213
1214void
1215pmap_pinit0(pmap_t pmap)
1216{
1217
1218	PMAP_LOCK_INIT(pmap);
1219	pmap->pm_pml4 = (pml4_entry_t *)(KERNBASE + KPML4phys);
1220	pmap->pm_active = 0;
1221	TAILQ_INIT(&pmap->pm_pvchunk);
1222	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1223}
1224
1225/*
1226 * Initialize a preallocated and zeroed pmap structure,
1227 * such as one in a vmspace structure.
1228 */
1229void
1230pmap_pinit(pmap_t pmap)
1231{
1232	vm_page_t pml4pg;
1233	static vm_pindex_t color;
1234
1235	PMAP_LOCK_INIT(pmap);
1236
1237	/*
1238	 * allocate the page directory page
1239	 */
1240	while ((pml4pg = vm_page_alloc(NULL, color++, VM_ALLOC_NOOBJ |
1241	    VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1242		VM_WAIT;
1243
1244	pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pml4pg));
1245
1246	if ((pml4pg->flags & PG_ZERO) == 0)
1247		pagezero(pmap->pm_pml4);
1248
1249	/* Wire in kernel global address entries. */
1250	pmap->pm_pml4[KPML4I] = KPDPphys | PG_RW | PG_V | PG_U;
1251	pmap->pm_pml4[DMPML4I] = DMPDPphys | PG_RW | PG_V | PG_U;
1252
1253	/* install self-referential address mapping entry(s) */
1254	pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M;
1255
1256	pmap->pm_active = 0;
1257	TAILQ_INIT(&pmap->pm_pvchunk);
1258	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1259}
1260
1261/*
1262 * this routine is called if the page table page is not
1263 * mapped correctly.
1264 *
1265 * Note: If a page allocation fails at page table level two or three,
1266 * one or two pages may be held during the wait, only to be released
1267 * afterwards.  This conservative approach is easily argued to avoid
1268 * race conditions.
1269 */
1270static vm_page_t
1271_pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, int flags)
1272{
1273	vm_page_t m, pdppg, pdpg;
1274
1275	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1276	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1277	    ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1278
1279	/*
1280	 * Allocate a page table page.
1281	 */
1282	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1283	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1284		if (flags & M_WAITOK) {
1285			PMAP_UNLOCK(pmap);
1286			vm_page_unlock_queues();
1287			VM_WAIT;
1288			vm_page_lock_queues();
1289			PMAP_LOCK(pmap);
1290		}
1291
1292		/*
1293		 * Indicate the need to retry.  While waiting, the page table
1294		 * page may have been allocated.
1295		 */
1296		return (NULL);
1297	}
1298	if ((m->flags & PG_ZERO) == 0)
1299		pmap_zero_page(m);
1300
1301	/*
1302	 * Map the pagetable page into the process address space, if
1303	 * it isn't already there.
1304	 */
1305
1306	pmap->pm_stats.resident_count++;
1307
1308	if (ptepindex >= (NUPDE + NUPDPE)) {
1309		pml4_entry_t *pml4;
1310		vm_pindex_t pml4index;
1311
1312		/* Wire up a new PDPE page */
1313		pml4index = ptepindex - (NUPDE + NUPDPE);
1314		pml4 = &pmap->pm_pml4[pml4index];
1315		*pml4 = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1316
1317	} else if (ptepindex >= NUPDE) {
1318		vm_pindex_t pml4index;
1319		vm_pindex_t pdpindex;
1320		pml4_entry_t *pml4;
1321		pdp_entry_t *pdp;
1322
1323		/* Wire up a new PDE page */
1324		pdpindex = ptepindex - NUPDE;
1325		pml4index = pdpindex >> NPML4EPGSHIFT;
1326
1327		pml4 = &pmap->pm_pml4[pml4index];
1328		if ((*pml4 & PG_V) == 0) {
1329			/* Have to allocate a new pdp, recurse */
1330			if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index,
1331			    flags) == NULL) {
1332				--m->wire_count;
1333				vm_page_free(m);
1334				return (NULL);
1335			}
1336		} else {
1337			/* Add reference to pdp page */
1338			pdppg = PHYS_TO_VM_PAGE(*pml4 & PG_FRAME);
1339			pdppg->wire_count++;
1340		}
1341		pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1342
1343		/* Now find the pdp page */
1344		pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1345		*pdp = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1346
1347	} else {
1348		vm_pindex_t pml4index;
1349		vm_pindex_t pdpindex;
1350		pml4_entry_t *pml4;
1351		pdp_entry_t *pdp;
1352		pd_entry_t *pd;
1353
1354		/* Wire up a new PTE page */
1355		pdpindex = ptepindex >> NPDPEPGSHIFT;
1356		pml4index = pdpindex >> NPML4EPGSHIFT;
1357
1358		/* First, find the pdp and check that its valid. */
1359		pml4 = &pmap->pm_pml4[pml4index];
1360		if ((*pml4 & PG_V) == 0) {
1361			/* Have to allocate a new pd, recurse */
1362			if (_pmap_allocpte(pmap, NUPDE + pdpindex,
1363			    flags) == NULL) {
1364				--m->wire_count;
1365				vm_page_free(m);
1366				return (NULL);
1367			}
1368			pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1369			pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1370		} else {
1371			pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
1372			pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
1373			if ((*pdp & PG_V) == 0) {
1374				/* Have to allocate a new pd, recurse */
1375				if (_pmap_allocpte(pmap, NUPDE + pdpindex,
1376				    flags) == NULL) {
1377					--m->wire_count;
1378					vm_page_free(m);
1379					return (NULL);
1380				}
1381			} else {
1382				/* Add reference to the pd page */
1383				pdpg = PHYS_TO_VM_PAGE(*pdp & PG_FRAME);
1384				pdpg->wire_count++;
1385			}
1386		}
1387		pd = (pd_entry_t *)PHYS_TO_DMAP(*pdp & PG_FRAME);
1388
1389		/* Now we know where the page directory page is */
1390		pd = &pd[ptepindex & ((1ul << NPDEPGSHIFT) - 1)];
1391		*pd = VM_PAGE_TO_PHYS(m) | PG_U | PG_RW | PG_V | PG_A | PG_M;
1392	}
1393
1394	return m;
1395}
1396
1397static vm_page_t
1398pmap_allocpde(pmap_t pmap, vm_offset_t va, int flags)
1399{
1400	vm_pindex_t pdpindex, ptepindex;
1401	pdp_entry_t *pdpe;
1402	vm_page_t pdpg;
1403
1404	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1405	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1406	    ("pmap_allocpde: flags is neither M_NOWAIT nor M_WAITOK"));
1407retry:
1408	pdpe = pmap_pdpe(pmap, va);
1409	if (pdpe != NULL && (*pdpe & PG_V) != 0) {
1410		/* Add a reference to the pd page. */
1411		pdpg = PHYS_TO_VM_PAGE(*pdpe & PG_FRAME);
1412		pdpg->wire_count++;
1413	} else {
1414		/* Allocate a pd page. */
1415		ptepindex = pmap_pde_pindex(va);
1416		pdpindex = ptepindex >> NPDPEPGSHIFT;
1417		pdpg = _pmap_allocpte(pmap, NUPDE + pdpindex, flags);
1418		if (pdpg == NULL && (flags & M_WAITOK))
1419			goto retry;
1420	}
1421	return (pdpg);
1422}
1423
1424static vm_page_t
1425pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags)
1426{
1427	vm_pindex_t ptepindex;
1428	pd_entry_t *pd;
1429	vm_page_t m;
1430
1431	KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
1432	    (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
1433	    ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
1434
1435	/*
1436	 * Calculate pagetable page index
1437	 */
1438	ptepindex = pmap_pde_pindex(va);
1439retry:
1440	/*
1441	 * Get the page directory entry
1442	 */
1443	pd = pmap_pde(pmap, va);
1444
1445	/*
1446	 * This supports switching from a 2MB page to a
1447	 * normal 4K page.
1448	 */
1449	if (pd != 0 && (*pd & (PG_PS | PG_V)) == (PG_PS | PG_V)) {
1450		*pd = 0;
1451		pd = 0;
1452		pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1453		pmap_unuse_pt(pmap, va, *pmap_pdpe(pmap, va));
1454		pmap_invalidate_all(kernel_pmap);
1455	}
1456
1457	/*
1458	 * If the page table page is mapped, we just increment the
1459	 * hold count, and activate it.
1460	 */
1461	if (pd != 0 && (*pd & PG_V) != 0) {
1462		m = PHYS_TO_VM_PAGE(*pd & PG_FRAME);
1463		m->wire_count++;
1464	} else {
1465		/*
1466		 * Here if the pte page isn't mapped, or if it has been
1467		 * deallocated.
1468		 */
1469		m = _pmap_allocpte(pmap, ptepindex, flags);
1470		if (m == NULL && (flags & M_WAITOK))
1471			goto retry;
1472	}
1473	return (m);
1474}
1475
1476
1477/***************************************************
1478 * Pmap allocation/deallocation routines.
1479 ***************************************************/
1480
1481/*
1482 * Release any resources held by the given physical map.
1483 * Called when a pmap initialized by pmap_pinit is being released.
1484 * Should only be called if the map contains no valid mappings.
1485 */
1486void
1487pmap_release(pmap_t pmap)
1488{
1489	vm_page_t m;
1490
1491	KASSERT(pmap->pm_stats.resident_count == 0,
1492	    ("pmap_release: pmap resident count %ld != 0",
1493	    pmap->pm_stats.resident_count));
1494
1495	m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME);
1496
1497	pmap->pm_pml4[KPML4I] = 0;	/* KVA */
1498	pmap->pm_pml4[DMPML4I] = 0;	/* Direct Map */
1499	pmap->pm_pml4[PML4PML4I] = 0;	/* Recursive Mapping */
1500
1501	vm_page_lock_queues();
1502	m->wire_count--;
1503	atomic_subtract_int(&cnt.v_wire_count, 1);
1504	vm_page_free_zero(m);
1505	vm_page_unlock_queues();
1506	PMAP_LOCK_DESTROY(pmap);
1507}
1508
1509static int
1510kvm_size(SYSCTL_HANDLER_ARGS)
1511{
1512	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1513
1514	return sysctl_handle_long(oidp, &ksize, 0, req);
1515}
1516SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1517    0, 0, kvm_size, "LU", "Size of KVM");
1518
1519static int
1520kvm_free(SYSCTL_HANDLER_ARGS)
1521{
1522	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1523
1524	return sysctl_handle_long(oidp, &kfree, 0, req);
1525}
1526SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1527    0, 0, kvm_free, "LU", "Amount of KVM free");
1528
1529/*
1530 * grow the number of kernel page table entries, if needed
1531 */
1532void
1533pmap_growkernel(vm_offset_t addr)
1534{
1535	vm_paddr_t paddr;
1536	vm_page_t nkpg;
1537	pd_entry_t *pde, newpdir;
1538	pdp_entry_t newpdp;
1539
1540	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1541	if (kernel_vm_end == 0) {
1542		kernel_vm_end = KERNBASE;
1543		nkpt = 0;
1544		while ((*pmap_pde(kernel_pmap, kernel_vm_end) & PG_V) != 0) {
1545			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1546			nkpt++;
1547		}
1548	}
1549	addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1550	while (kernel_vm_end < addr) {
1551		pde = pmap_pde(kernel_pmap, kernel_vm_end);
1552		if (pde == NULL) {
1553			/* We need a new PDP entry */
1554			nkpg = vm_page_alloc(NULL, nkpt,
1555			    VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1556			if (!nkpg)
1557				panic("pmap_growkernel: no memory to grow kernel");
1558			pmap_zero_page(nkpg);
1559			paddr = VM_PAGE_TO_PHYS(nkpg);
1560			newpdp = (pdp_entry_t)
1561				(paddr | PG_V | PG_RW | PG_A | PG_M);
1562			*pmap_pdpe(kernel_pmap, kernel_vm_end) = newpdp;
1563			continue; /* try again */
1564		}
1565		if ((*pde & PG_V) != 0) {
1566			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1567			continue;
1568		}
1569
1570		/*
1571		 * This index is bogus, but out of the way
1572		 */
1573		nkpg = vm_page_alloc(NULL, nkpt,
1574		    VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1575		if (!nkpg)
1576			panic("pmap_growkernel: no memory to grow kernel");
1577
1578		nkpt++;
1579
1580		pmap_zero_page(nkpg);
1581		paddr = VM_PAGE_TO_PHYS(nkpg);
1582		newpdir = (pd_entry_t) (paddr | PG_V | PG_RW | PG_A | PG_M);
1583		*pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
1584
1585		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1586	}
1587}
1588
1589
1590/***************************************************
1591 * page management routines.
1592 ***************************************************/
1593
1594CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1595CTASSERT(_NPCM == 3);
1596CTASSERT(_NPCPV == 168);
1597
1598static __inline struct pv_chunk *
1599pv_to_chunk(pv_entry_t pv)
1600{
1601
1602	return (struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK);
1603}
1604
1605#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1606
1607#define	PC_FREE0	0xfffffffffffffffful
1608#define	PC_FREE1	0xfffffffffffffffful
1609#define	PC_FREE2	0x000000fffffffffful
1610
1611static uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1612
1613SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1614	"Current number of pv entries");
1615
1616#ifdef PV_STATS
1617static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1618
1619SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1620	"Current number of pv entry chunks");
1621SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1622	"Current number of pv entry chunks allocated");
1623SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1624	"Current number of pv entry chunks frees");
1625SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1626	"Number of times tried to get a chunk page but failed.");
1627
1628static long pv_entry_frees, pv_entry_allocs;
1629static int pv_entry_spare;
1630
1631SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1632	"Current number of pv entry frees");
1633SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1634	"Current number of pv entry allocs");
1635SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1636	"Current number of spare pv entries");
1637
1638static int pmap_collect_inactive, pmap_collect_active;
1639
1640SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_inactive, CTLFLAG_RD, &pmap_collect_inactive, 0,
1641	"Current number times pmap_collect called on inactive queue");
1642SYSCTL_INT(_vm_pmap, OID_AUTO, pmap_collect_active, CTLFLAG_RD, &pmap_collect_active, 0,
1643	"Current number times pmap_collect called on active queue");
1644#endif
1645
1646/*
1647 * We are in a serious low memory condition.  Resort to
1648 * drastic measures to free some pages so we can allocate
1649 * another pv entry chunk.  This is normally called to
1650 * unmap inactive pages, and if necessary, active pages.
1651 */
1652static void
1653pmap_collect(pmap_t locked_pmap, struct vpgqueues *vpq)
1654{
1655	pd_entry_t ptepde;
1656	pmap_t pmap;
1657	pt_entry_t *pte, tpte;
1658	pv_entry_t next_pv, pv;
1659	vm_offset_t va;
1660	vm_page_t m;
1661
1662	TAILQ_FOREACH(m, &vpq->pl, pageq) {
1663		if (m->hold_count || m->busy)
1664			continue;
1665		TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) {
1666			va = pv->pv_va;
1667			pmap = PV_PMAP(pv);
1668			/* Avoid deadlock and lock recursion. */
1669			if (pmap > locked_pmap)
1670				PMAP_LOCK(pmap);
1671			else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap))
1672				continue;
1673			pmap->pm_stats.resident_count--;
1674			pte = pmap_pte_pde(pmap, va, &ptepde);
1675			tpte = pte_load_clear(pte);
1676			KASSERT((tpte & PG_W) == 0,
1677			    ("pmap_collect: wired pte %#lx", tpte));
1678			if (tpte & PG_A)
1679				vm_page_flag_set(m, PG_REFERENCED);
1680			if (tpte & PG_M) {
1681				KASSERT((tpte & PG_RW),
1682	("pmap_collect: modified page not writable: va: %#lx, pte: %#lx",
1683				    va, tpte));
1684				vm_page_dirty(m);
1685			}
1686			pmap_invalidate_page(pmap, va);
1687			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1688			if (TAILQ_EMPTY(&m->md.pv_list))
1689				vm_page_flag_clear(m, PG_WRITEABLE);
1690			m->md.pv_list_count--;
1691			pmap_unuse_pt(pmap, va, ptepde);
1692			free_pv_entry(pmap, pv);
1693			if (pmap != locked_pmap)
1694				PMAP_UNLOCK(pmap);
1695		}
1696	}
1697}
1698
1699
1700/*
1701 * free the pv_entry back to the free list
1702 */
1703static void
1704free_pv_entry(pmap_t pmap, pv_entry_t pv)
1705{
1706	vm_page_t m;
1707	struct pv_chunk *pc;
1708	int idx, field, bit;
1709
1710	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1711	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1712	PV_STAT(pv_entry_frees++);
1713	PV_STAT(pv_entry_spare++);
1714	pv_entry_count--;
1715	pc = pv_to_chunk(pv);
1716	idx = pv - &pc->pc_pventry[0];
1717	field = idx / 64;
1718	bit = idx % 64;
1719	pc->pc_map[field] |= 1ul << bit;
1720	/* move to head of list */
1721	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1722	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1723	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
1724	    pc->pc_map[2] != PC_FREE2)
1725		return;
1726	PV_STAT(pv_entry_spare -= _NPCPV);
1727	PV_STAT(pc_chunk_count--);
1728	PV_STAT(pc_chunk_frees++);
1729	/* entire chunk is free, return it */
1730	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1731	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1732	dump_drop_page(m->phys_addr);
1733	vm_page_free(m);
1734}
1735
1736/*
1737 * get a new pv_entry, allocating a block from the system
1738 * when needed.
1739 */
1740static pv_entry_t
1741get_pv_entry(pmap_t pmap, int try)
1742{
1743	static const struct timeval printinterval = { 60, 0 };
1744	static struct timeval lastprint;
1745	static vm_pindex_t colour;
1746	int bit, field, page_req;
1747	pv_entry_t pv;
1748	struct pv_chunk *pc;
1749	vm_page_t m;
1750
1751	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1752	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1753	PV_STAT(pv_entry_allocs++);
1754	pv_entry_count++;
1755	if (pv_entry_count > pv_entry_high_water)
1756		pagedaemon_wakeup();
1757	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1758	if (pc != NULL) {
1759		for (field = 0; field < _NPCM; field++) {
1760			if (pc->pc_map[field]) {
1761				bit = bsfq(pc->pc_map[field]);
1762				break;
1763			}
1764		}
1765		if (field < _NPCM) {
1766			pv = &pc->pc_pventry[field * 64 + bit];
1767			pc->pc_map[field] &= ~(1ul << bit);
1768			/* If this was the last item, move it to tail */
1769			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
1770			    pc->pc_map[2] == 0) {
1771				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1772				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
1773			}
1774			PV_STAT(pv_entry_spare--);
1775			return (pv);
1776		}
1777	}
1778	/* No free items, allocate another chunk */
1779	page_req = try ? VM_ALLOC_NORMAL : VM_ALLOC_SYSTEM;
1780	m = vm_page_alloc(NULL, colour, page_req | VM_ALLOC_NOOBJ);
1781	if (m == NULL) {
1782		if (try) {
1783			pv_entry_count--;
1784			PV_STAT(pc_chunk_tryfail++);
1785			return (NULL);
1786		}
1787		/*
1788		 * Reclaim pv entries: At first, destroy mappings to inactive
1789		 * pages.  After that, if a pv chunk entry is still needed,
1790		 * destroy mappings to active pages.
1791		 */
1792		if (ratecheck(&lastprint, &printinterval))
1793			printf("Approaching the limit on PV entries, consider "
1794			    "increasing sysctl vm.pmap.shpgperproc or "
1795			    "vm.pmap.pv_entry_max\n");
1796		PV_STAT(pmap_collect_inactive++);
1797		pmap_collect(pmap, &vm_page_queues[PQ_INACTIVE]);
1798		m = vm_page_alloc(NULL, colour,
1799		    VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
1800		if (m == NULL) {
1801			PV_STAT(pmap_collect_active++);
1802			pmap_collect(pmap, &vm_page_queues[PQ_ACTIVE]);
1803			m = vm_page_alloc(NULL, colour,
1804			    VM_ALLOC_SYSTEM | VM_ALLOC_NOOBJ);
1805			if (m == NULL)
1806				panic("get_pv_entry: increase vm.pmap.shpgperproc");
1807		}
1808	}
1809	PV_STAT(pc_chunk_count++);
1810	PV_STAT(pc_chunk_allocs++);
1811	colour++;
1812	dump_add_page(m->phys_addr);
1813	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1814	pc->pc_pmap = pmap;
1815	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
1816	pc->pc_map[1] = PC_FREE1;
1817	pc->pc_map[2] = PC_FREE2;
1818	pv = &pc->pc_pventry[0];
1819	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1820	PV_STAT(pv_entry_spare += _NPCPV - 1);
1821	return (pv);
1822}
1823
1824static void
1825pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1826{
1827	pv_entry_t pv;
1828
1829	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1830	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1831	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1832		if (pmap == PV_PMAP(pv) && va == pv->pv_va)
1833			break;
1834	}
1835	KASSERT(pv != NULL, ("pmap_remove_entry: pv not found"));
1836	TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1837	m->md.pv_list_count--;
1838	if (TAILQ_EMPTY(&m->md.pv_list))
1839		vm_page_flag_clear(m, PG_WRITEABLE);
1840	free_pv_entry(pmap, pv);
1841}
1842
1843/*
1844 * Create a pv entry for page at pa for
1845 * (pmap, va).
1846 */
1847static void
1848pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1849{
1850	pv_entry_t pv;
1851
1852	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1853	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1854	pv = get_pv_entry(pmap, FALSE);
1855	pv->pv_va = va;
1856	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1857	m->md.pv_list_count++;
1858}
1859
1860/*
1861 * Conditionally create a pv entry.
1862 */
1863static boolean_t
1864pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m)
1865{
1866	pv_entry_t pv;
1867
1868	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1869	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1870	if (pv_entry_count < pv_entry_high_water &&
1871	    (pv = get_pv_entry(pmap, TRUE)) != NULL) {
1872		pv->pv_va = va;
1873		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1874		m->md.pv_list_count++;
1875		return (TRUE);
1876	} else
1877		return (FALSE);
1878}
1879
1880/*
1881 * pmap_remove_pte: do the things to unmap a page in a process
1882 */
1883static int
1884pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va, pd_entry_t ptepde)
1885{
1886	pt_entry_t oldpte;
1887	vm_page_t m;
1888
1889	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1890	oldpte = pte_load_clear(ptq);
1891	if (oldpte & PG_W)
1892		pmap->pm_stats.wired_count -= 1;
1893	/*
1894	 * Machines that don't support invlpg, also don't support
1895	 * PG_G.
1896	 */
1897	if (oldpte & PG_G)
1898		pmap_invalidate_page(kernel_pmap, va);
1899	pmap->pm_stats.resident_count -= 1;
1900	if (oldpte & PG_MANAGED) {
1901		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
1902		if (oldpte & PG_M) {
1903			KASSERT((oldpte & PG_RW),
1904	("pmap_remove_pte: modified page not writable: va: %#lx, pte: %#lx",
1905			    va, oldpte));
1906			vm_page_dirty(m);
1907		}
1908		if (oldpte & PG_A)
1909			vm_page_flag_set(m, PG_REFERENCED);
1910		pmap_remove_entry(pmap, m, va);
1911	}
1912	return (pmap_unuse_pt(pmap, va, ptepde));
1913}
1914
1915/*
1916 * Remove a single page from a process address space
1917 */
1918static void
1919pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde)
1920{
1921	pt_entry_t *pte;
1922
1923	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1924	if ((*pde & PG_V) == 0)
1925		return;
1926	pte = pmap_pde_to_pte(pde, va);
1927	if ((*pte & PG_V) == 0)
1928		return;
1929	pmap_remove_pte(pmap, pte, va, *pde);
1930	pmap_invalidate_page(pmap, va);
1931}
1932
1933/*
1934 *	Remove the given range of addresses from the specified map.
1935 *
1936 *	It is assumed that the start and end are properly
1937 *	rounded to the page size.
1938 */
1939void
1940pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1941{
1942	vm_offset_t va_next;
1943	pml4_entry_t *pml4e;
1944	pdp_entry_t *pdpe;
1945	pd_entry_t ptpaddr, *pde;
1946	pt_entry_t *pte;
1947	int anyvalid;
1948
1949	/*
1950	 * Perform an unsynchronized read.  This is, however, safe.
1951	 */
1952	if (pmap->pm_stats.resident_count == 0)
1953		return;
1954
1955	anyvalid = 0;
1956
1957	vm_page_lock_queues();
1958	PMAP_LOCK(pmap);
1959
1960	/*
1961	 * special handling of removing one page.  a very
1962	 * common operation and easy to short circuit some
1963	 * code.
1964	 */
1965	if (sva + PAGE_SIZE == eva) {
1966		pde = pmap_pde(pmap, sva);
1967		if (pde && (*pde & PG_PS) == 0) {
1968			pmap_remove_page(pmap, sva, pde);
1969			goto out;
1970		}
1971	}
1972
1973	for (; sva < eva; sva = va_next) {
1974
1975		if (pmap->pm_stats.resident_count == 0)
1976			break;
1977
1978		pml4e = pmap_pml4e(pmap, sva);
1979		if ((*pml4e & PG_V) == 0) {
1980			va_next = (sva + NBPML4) & ~PML4MASK;
1981			continue;
1982		}
1983
1984		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
1985		if ((*pdpe & PG_V) == 0) {
1986			va_next = (sva + NBPDP) & ~PDPMASK;
1987			continue;
1988		}
1989
1990		/*
1991		 * Calculate index for next page table.
1992		 */
1993		va_next = (sva + NBPDR) & ~PDRMASK;
1994
1995		pde = pmap_pdpe_to_pde(pdpe, sva);
1996		ptpaddr = *pde;
1997
1998		/*
1999		 * Weed out invalid mappings.
2000		 */
2001		if (ptpaddr == 0)
2002			continue;
2003
2004		/*
2005		 * Check for large page.
2006		 */
2007		if ((ptpaddr & PG_PS) != 0) {
2008			*pde = 0;
2009			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2010			pmap_unuse_pt(pmap, sva, *pdpe);
2011			anyvalid = 1;
2012			continue;
2013		}
2014
2015		/*
2016		 * Limit our scan to either the end of the va represented
2017		 * by the current page table page, or to the end of the
2018		 * range being removed.
2019		 */
2020		if (va_next > eva)
2021			va_next = eva;
2022
2023		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2024		    sva += PAGE_SIZE) {
2025			if (*pte == 0)
2026				continue;
2027
2028			/*
2029			 * The TLB entry for a PG_G mapping is invalidated
2030			 * by pmap_remove_pte().
2031			 */
2032			if ((*pte & PG_G) == 0)
2033				anyvalid = 1;
2034			if (pmap_remove_pte(pmap, pte, sva, ptpaddr))
2035				break;
2036		}
2037	}
2038out:
2039	vm_page_unlock_queues();
2040	if (anyvalid)
2041		pmap_invalidate_all(pmap);
2042	PMAP_UNLOCK(pmap);
2043}
2044
2045/*
2046 *	Routine:	pmap_remove_all
2047 *	Function:
2048 *		Removes this physical page from
2049 *		all physical maps in which it resides.
2050 *		Reflects back modify bits to the pager.
2051 *
2052 *	Notes:
2053 *		Original versions of this routine were very
2054 *		inefficient because they iteratively called
2055 *		pmap_remove (slow...)
2056 */
2057
2058void
2059pmap_remove_all(vm_page_t m)
2060{
2061	pv_entry_t pv;
2062	pmap_t pmap;
2063	pt_entry_t *pte, tpte;
2064	pd_entry_t ptepde;
2065
2066#if defined(PMAP_DIAGNOSTIC)
2067	/*
2068	 * XXX This makes pmap_remove_all() illegal for non-managed pages!
2069	 */
2070	if (m->flags & PG_FICTITIOUS) {
2071		panic("pmap_remove_all: illegal for unmanaged page, va: 0x%lx",
2072		    VM_PAGE_TO_PHYS(m));
2073	}
2074#endif
2075	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2076	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2077		pmap = PV_PMAP(pv);
2078		PMAP_LOCK(pmap);
2079		pmap->pm_stats.resident_count--;
2080		pte = pmap_pte_pde(pmap, pv->pv_va, &ptepde);
2081		tpte = pte_load_clear(pte);
2082		if (tpte & PG_W)
2083			pmap->pm_stats.wired_count--;
2084		if (tpte & PG_A)
2085			vm_page_flag_set(m, PG_REFERENCED);
2086
2087		/*
2088		 * Update the vm_page_t clean and reference bits.
2089		 */
2090		if (tpte & PG_M) {
2091			KASSERT((tpte & PG_RW),
2092	("pmap_remove_all: modified page not writable: va: %#lx, pte: %#lx",
2093			    pv->pv_va, tpte));
2094			vm_page_dirty(m);
2095		}
2096		pmap_invalidate_page(pmap, pv->pv_va);
2097		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2098		m->md.pv_list_count--;
2099		pmap_unuse_pt(pmap, pv->pv_va, ptepde);
2100		free_pv_entry(pmap, pv);
2101		PMAP_UNLOCK(pmap);
2102	}
2103	vm_page_flag_clear(m, PG_WRITEABLE);
2104}
2105
2106/*
2107 *	Set the physical protection on the
2108 *	specified range of this map as requested.
2109 */
2110void
2111pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2112{
2113	vm_offset_t va_next;
2114	pml4_entry_t *pml4e;
2115	pdp_entry_t *pdpe;
2116	pd_entry_t ptpaddr, *pde;
2117	pt_entry_t *pte;
2118	int anychanged;
2119
2120	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2121		pmap_remove(pmap, sva, eva);
2122		return;
2123	}
2124
2125	if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) ==
2126	    (VM_PROT_WRITE|VM_PROT_EXECUTE))
2127		return;
2128
2129	anychanged = 0;
2130
2131	vm_page_lock_queues();
2132	PMAP_LOCK(pmap);
2133	for (; sva < eva; sva = va_next) {
2134
2135		pml4e = pmap_pml4e(pmap, sva);
2136		if ((*pml4e & PG_V) == 0) {
2137			va_next = (sva + NBPML4) & ~PML4MASK;
2138			continue;
2139		}
2140
2141		pdpe = pmap_pml4e_to_pdpe(pml4e, sva);
2142		if ((*pdpe & PG_V) == 0) {
2143			va_next = (sva + NBPDP) & ~PDPMASK;
2144			continue;
2145		}
2146
2147		va_next = (sva + NBPDR) & ~PDRMASK;
2148
2149		pde = pmap_pdpe_to_pde(pdpe, sva);
2150		ptpaddr = *pde;
2151
2152		/*
2153		 * Weed out invalid mappings.
2154		 */
2155		if (ptpaddr == 0)
2156			continue;
2157
2158		/*
2159		 * Check for large page.
2160		 */
2161		if ((ptpaddr & PG_PS) != 0) {
2162			if ((prot & VM_PROT_WRITE) == 0)
2163				*pde &= ~(PG_M|PG_RW);
2164			if ((prot & VM_PROT_EXECUTE) == 0)
2165				*pde |= pg_nx;
2166			anychanged = 1;
2167			continue;
2168		}
2169
2170		if (va_next > eva)
2171			va_next = eva;
2172
2173		for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++,
2174		    sva += PAGE_SIZE) {
2175			pt_entry_t obits, pbits;
2176			vm_page_t m;
2177
2178retry:
2179			obits = pbits = *pte;
2180			if ((pbits & PG_V) == 0)
2181				continue;
2182			if (pbits & PG_MANAGED) {
2183				m = NULL;
2184				if (pbits & PG_A) {
2185					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
2186					vm_page_flag_set(m, PG_REFERENCED);
2187					pbits &= ~PG_A;
2188				}
2189				if ((pbits & PG_M) != 0) {
2190					if (m == NULL)
2191						m = PHYS_TO_VM_PAGE(pbits &
2192						    PG_FRAME);
2193					vm_page_dirty(m);
2194				}
2195			}
2196
2197			if ((prot & VM_PROT_WRITE) == 0)
2198				pbits &= ~(PG_RW | PG_M);
2199			if ((prot & VM_PROT_EXECUTE) == 0)
2200				pbits |= pg_nx;
2201
2202			if (pbits != obits) {
2203				if (!atomic_cmpset_long(pte, obits, pbits))
2204					goto retry;
2205				if (obits & PG_G)
2206					pmap_invalidate_page(pmap, sva);
2207				else
2208					anychanged = 1;
2209			}
2210		}
2211	}
2212	vm_page_unlock_queues();
2213	if (anychanged)
2214		pmap_invalidate_all(pmap);
2215	PMAP_UNLOCK(pmap);
2216}
2217
2218/*
2219 *	Insert the given physical page (p) at
2220 *	the specified virtual address (v) in the
2221 *	target physical map with the protection requested.
2222 *
2223 *	If specified, the page will be wired down, meaning
2224 *	that the related pte can not be reclaimed.
2225 *
2226 *	NB:  This is the only routine which MAY NOT lazy-evaluate
2227 *	or lose information.  That is, this routine must actually
2228 *	insert this page into the given map NOW.
2229 */
2230void
2231pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2232	   boolean_t wired)
2233{
2234	vm_paddr_t pa;
2235	pd_entry_t *pde;
2236	pt_entry_t *pte;
2237	vm_paddr_t opa;
2238	pt_entry_t origpte, newpte;
2239	vm_page_t mpte, om;
2240	boolean_t invlva;
2241
2242	va = trunc_page(va);
2243#ifdef PMAP_DIAGNOSTIC
2244	if (va > VM_MAX_KERNEL_ADDRESS)
2245		panic("pmap_enter: toobig");
2246	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
2247		panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va);
2248#endif
2249
2250	mpte = NULL;
2251
2252	vm_page_lock_queues();
2253	PMAP_LOCK(pmap);
2254
2255	/*
2256	 * In the case that a page table page is not
2257	 * resident, we are creating it here.
2258	 */
2259	if (va < VM_MAXUSER_ADDRESS) {
2260		mpte = pmap_allocpte(pmap, va, M_WAITOK);
2261	}
2262#if 0 && defined(PMAP_DIAGNOSTIC)
2263	else {
2264		pd_entry_t *pdeaddr = pmap_pde(pmap, va);
2265		origpte = *pdeaddr;
2266		if ((origpte & PG_V) == 0) {
2267			panic("pmap_enter: invalid kernel page table page, pde=%p, va=%p\n",
2268				origpte, va);
2269		}
2270	}
2271#endif
2272
2273	pde = pmap_pde(pmap, va);
2274	if (pde != NULL) {
2275		if ((*pde & PG_PS) != 0)
2276			panic("pmap_enter: attempted pmap_enter on 2MB page");
2277		pte = pmap_pde_to_pte(pde, va);
2278	} else
2279		pte = NULL;
2280
2281	/*
2282	 * Page Directory table entry not valid, we need a new PT page
2283	 */
2284	if (pte == NULL)
2285		panic("pmap_enter: invalid page directory va=%#lx\n", va);
2286
2287	pa = VM_PAGE_TO_PHYS(m);
2288	om = NULL;
2289	origpte = *pte;
2290	opa = origpte & PG_FRAME;
2291
2292	/*
2293	 * Mapping has not changed, must be protection or wiring change.
2294	 */
2295	if (origpte && (opa == pa)) {
2296		/*
2297		 * Wiring change, just update stats. We don't worry about
2298		 * wiring PT pages as they remain resident as long as there
2299		 * are valid mappings in them. Hence, if a user page is wired,
2300		 * the PT page will be also.
2301		 */
2302		if (wired && ((origpte & PG_W) == 0))
2303			pmap->pm_stats.wired_count++;
2304		else if (!wired && (origpte & PG_W))
2305			pmap->pm_stats.wired_count--;
2306
2307		/*
2308		 * Remove extra pte reference
2309		 */
2310		if (mpte)
2311			mpte->wire_count--;
2312
2313		/*
2314		 * We might be turning off write access to the page,
2315		 * so we go ahead and sense modify status.
2316		 */
2317		if (origpte & PG_MANAGED) {
2318			om = m;
2319			pa |= PG_MANAGED;
2320		}
2321		goto validate;
2322	}
2323	/*
2324	 * Mapping has changed, invalidate old range and fall through to
2325	 * handle validating new mapping.
2326	 */
2327	if (opa) {
2328		if (origpte & PG_W)
2329			pmap->pm_stats.wired_count--;
2330		if (origpte & PG_MANAGED) {
2331			om = PHYS_TO_VM_PAGE(opa);
2332			pmap_remove_entry(pmap, om, va);
2333		}
2334		if (mpte != NULL) {
2335			mpte->wire_count--;
2336			KASSERT(mpte->wire_count > 0,
2337			    ("pmap_enter: missing reference to page table page,"
2338			     " va: 0x%lx", va));
2339		}
2340	} else
2341		pmap->pm_stats.resident_count++;
2342
2343	/*
2344	 * Enter on the PV list if part of our managed memory.
2345	 */
2346	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
2347		KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva,
2348		    ("pmap_enter: managed mapping within the clean submap"));
2349		pmap_insert_entry(pmap, va, m);
2350		pa |= PG_MANAGED;
2351	}
2352
2353	/*
2354	 * Increment counters
2355	 */
2356	if (wired)
2357		pmap->pm_stats.wired_count++;
2358
2359validate:
2360	/*
2361	 * Now validate mapping with desired protection/wiring.
2362	 */
2363	newpte = (pt_entry_t)(pa | PG_V);
2364	if ((prot & VM_PROT_WRITE) != 0) {
2365		newpte |= PG_RW;
2366		vm_page_flag_set(m, PG_WRITEABLE);
2367	}
2368	if ((prot & VM_PROT_EXECUTE) == 0)
2369		newpte |= pg_nx;
2370	if (wired)
2371		newpte |= PG_W;
2372	if (va < VM_MAXUSER_ADDRESS)
2373		newpte |= PG_U;
2374	if (pmap == kernel_pmap)
2375		newpte |= PG_G;
2376
2377	/*
2378	 * if the mapping or permission bits are different, we need
2379	 * to update the pte.
2380	 */
2381	if ((origpte & ~(PG_M|PG_A)) != newpte) {
2382		if (origpte & PG_V) {
2383			invlva = FALSE;
2384			origpte = pte_load_store(pte, newpte | PG_A);
2385			if (origpte & PG_A) {
2386				if (origpte & PG_MANAGED)
2387					vm_page_flag_set(om, PG_REFERENCED);
2388				if (opa != VM_PAGE_TO_PHYS(m) || ((origpte &
2389				    PG_NX) == 0 && (newpte & PG_NX)))
2390					invlva = TRUE;
2391			}
2392			if (origpte & PG_M) {
2393				KASSERT((origpte & PG_RW),
2394	("pmap_enter: modified page not writable: va: %#lx, pte: %#lx",
2395				    va, origpte));
2396				if ((origpte & PG_MANAGED) != 0)
2397					vm_page_dirty(om);
2398				if ((newpte & PG_RW) == 0)
2399					invlva = TRUE;
2400			}
2401			if (invlva)
2402				pmap_invalidate_page(pmap, va);
2403		} else
2404			pte_store(pte, newpte | PG_A);
2405	}
2406	vm_page_unlock_queues();
2407	PMAP_UNLOCK(pmap);
2408}
2409
2410/*
2411 * Maps a sequence of resident pages belonging to the same object.
2412 * The sequence begins with the given page m_start.  This page is
2413 * mapped at the given virtual address start.  Each subsequent page is
2414 * mapped at a virtual address that is offset from start by the same
2415 * amount as the page is offset from m_start within the object.  The
2416 * last page in the sequence is the page with the largest offset from
2417 * m_start that can be mapped at a virtual address less than the given
2418 * virtual address end.  Not every virtual page between start and end
2419 * is mapped; only those for which a resident page exists with the
2420 * corresponding offset from m_start are mapped.
2421 */
2422void
2423pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2424    vm_page_t m_start, vm_prot_t prot)
2425{
2426	vm_page_t m, mpte;
2427	vm_pindex_t diff, psize;
2428
2429	VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED);
2430	psize = atop(end - start);
2431	mpte = NULL;
2432	m = m_start;
2433	PMAP_LOCK(pmap);
2434	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2435		mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m,
2436		    prot, mpte);
2437		m = TAILQ_NEXT(m, listq);
2438	}
2439 	PMAP_UNLOCK(pmap);
2440}
2441
2442/*
2443 * this code makes some *MAJOR* assumptions:
2444 * 1. Current pmap & pmap exists.
2445 * 2. Not wired.
2446 * 3. Read access.
2447 * 4. No page table pages.
2448 * but is *MUCH* faster than pmap_enter...
2449 */
2450
2451void
2452pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2453{
2454
2455	PMAP_LOCK(pmap);
2456	(void) pmap_enter_quick_locked(pmap, va, m, prot, NULL);
2457	PMAP_UNLOCK(pmap);
2458}
2459
2460static vm_page_t
2461pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2462    vm_prot_t prot, vm_page_t mpte)
2463{
2464	pt_entry_t *pte;
2465	vm_paddr_t pa;
2466
2467	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2468	    (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0,
2469	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2470	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2471	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2472
2473	/*
2474	 * In the case that a page table page is not
2475	 * resident, we are creating it here.
2476	 */
2477	if (va < VM_MAXUSER_ADDRESS) {
2478		vm_pindex_t ptepindex;
2479		pd_entry_t *ptepa;
2480
2481		/*
2482		 * Calculate pagetable page index
2483		 */
2484		ptepindex = pmap_pde_pindex(va);
2485		if (mpte && (mpte->pindex == ptepindex)) {
2486			mpte->wire_count++;
2487		} else {
2488			/*
2489			 * Get the page directory entry
2490			 */
2491			ptepa = pmap_pde(pmap, va);
2492
2493			/*
2494			 * If the page table page is mapped, we just increment
2495			 * the hold count, and activate it.
2496			 */
2497			if (ptepa && (*ptepa & PG_V) != 0) {
2498				if (*ptepa & PG_PS)
2499					panic("pmap_enter_quick: unexpected mapping into 2MB page");
2500				mpte = PHYS_TO_VM_PAGE(*ptepa & PG_FRAME);
2501				mpte->wire_count++;
2502			} else {
2503				mpte = _pmap_allocpte(pmap, ptepindex,
2504				    M_NOWAIT);
2505				if (mpte == NULL)
2506					return (mpte);
2507			}
2508		}
2509	} else {
2510		mpte = NULL;
2511	}
2512
2513	/*
2514	 * This call to vtopte makes the assumption that we are
2515	 * entering the page into the current pmap.  In order to support
2516	 * quick entry into any pmap, one would likely use pmap_pte.
2517	 * But that isn't as quick as vtopte.
2518	 */
2519	pte = vtopte(va);
2520	if (*pte) {
2521		if (mpte != NULL) {
2522			pmap_unwire_pte_hold(pmap, va, mpte);
2523			mpte = NULL;
2524		}
2525		return (mpte);
2526	}
2527
2528	/*
2529	 * Enter on the PV list if part of our managed memory.
2530	 */
2531	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 &&
2532	    !pmap_try_insert_pv_entry(pmap, va, m)) {
2533		if (mpte != NULL) {
2534			pmap_unwire_pte_hold(pmap, va, mpte);
2535			mpte = NULL;
2536		}
2537		return (mpte);
2538	}
2539
2540	/*
2541	 * Increment counters
2542	 */
2543	pmap->pm_stats.resident_count++;
2544
2545	pa = VM_PAGE_TO_PHYS(m);
2546	if ((prot & VM_PROT_EXECUTE) == 0)
2547		pa |= pg_nx;
2548
2549	/*
2550	 * Now validate mapping with RO protection
2551	 */
2552	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2553		pte_store(pte, pa | PG_V | PG_U);
2554	else
2555		pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
2556	return mpte;
2557}
2558
2559/*
2560 * Make a temporary mapping for a physical address.  This is only intended
2561 * to be used for panic dumps.
2562 */
2563void *
2564pmap_kenter_temporary(vm_paddr_t pa, int i)
2565{
2566	vm_offset_t va;
2567
2568	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2569	pmap_kenter(va, pa);
2570	invlpg(va);
2571	return ((void *)crashdumpmap);
2572}
2573
2574/*
2575 * This code maps large physical mmap regions into the
2576 * processor address space.  Note that some shortcuts
2577 * are taken, but the code works.
2578 */
2579void
2580pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2581		    vm_object_t object, vm_pindex_t pindex,
2582		    vm_size_t size)
2583{
2584	vm_offset_t va;
2585	vm_page_t p, pdpg;
2586
2587	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2588	KASSERT(object->type == OBJT_DEVICE,
2589	    ("pmap_object_init_pt: non-device object"));
2590	if (((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
2591		vm_page_t m[1];
2592		pd_entry_t ptepa, *pde;
2593
2594		PMAP_LOCK(pmap);
2595		pde = pmap_pde(pmap, addr);
2596		if (pde != 0 && (*pde & PG_V) != 0)
2597			goto out;
2598		PMAP_UNLOCK(pmap);
2599retry:
2600		p = vm_page_lookup(object, pindex);
2601		if (p != NULL) {
2602			if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
2603				goto retry;
2604		} else {
2605			p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2606			if (p == NULL)
2607				return;
2608			m[0] = p;
2609
2610			if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2611				vm_page_lock_queues();
2612				vm_page_free(p);
2613				vm_page_unlock_queues();
2614				return;
2615			}
2616
2617			p = vm_page_lookup(object, pindex);
2618			vm_page_lock_queues();
2619			vm_page_wakeup(p);
2620			vm_page_unlock_queues();
2621		}
2622
2623		ptepa = VM_PAGE_TO_PHYS(p);
2624		if (ptepa & (NBPDR - 1))
2625			return;
2626
2627		p->valid = VM_PAGE_BITS_ALL;
2628
2629		PMAP_LOCK(pmap);
2630		for (va = addr; va < addr + size; va += NBPDR) {
2631			while ((pdpg =
2632			    pmap_allocpde(pmap, va, M_NOWAIT)) == NULL) {
2633				PMAP_UNLOCK(pmap);
2634				vm_page_lock_queues();
2635				vm_page_busy(p);
2636				vm_page_unlock_queues();
2637				VM_OBJECT_UNLOCK(object);
2638				VM_WAIT;
2639				VM_OBJECT_LOCK(object);
2640				vm_page_lock_queues();
2641				vm_page_wakeup(p);
2642				vm_page_unlock_queues();
2643				PMAP_LOCK(pmap);
2644			}
2645			pde = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pdpg));
2646			pde = &pde[pmap_pde_index(va)];
2647			if ((*pde & PG_V) == 0) {
2648				pde_store(pde, ptepa | PG_PS | PG_M | PG_A |
2649				    PG_U | PG_RW | PG_V);
2650				pmap->pm_stats.resident_count +=
2651				    NBPDR / PAGE_SIZE;
2652			} else {
2653				pdpg->wire_count--;
2654				KASSERT(pdpg->wire_count > 0,
2655				    ("pmap_object_init_pt: missing reference "
2656				     "to page directory page, va: 0x%lx", va));
2657			}
2658			ptepa += NBPDR;
2659		}
2660		pmap_invalidate_all(pmap);
2661out:
2662		PMAP_UNLOCK(pmap);
2663	}
2664}
2665
2666/*
2667 *	Routine:	pmap_change_wiring
2668 *	Function:	Change the wiring attribute for a map/virtual-address
2669 *			pair.
2670 *	In/out conditions:
2671 *			The mapping must already exist in the pmap.
2672 */
2673void
2674pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
2675{
2676	pt_entry_t *pte;
2677
2678	/*
2679	 * Wiring is not a hardware characteristic so there is no need to
2680	 * invalidate TLB.
2681	 */
2682	PMAP_LOCK(pmap);
2683	pte = pmap_pte(pmap, va);
2684	if (wired && (*pte & PG_W) == 0) {
2685		pmap->pm_stats.wired_count++;
2686		atomic_set_long(pte, PG_W);
2687	} else if (!wired && (*pte & PG_W) != 0) {
2688		pmap->pm_stats.wired_count--;
2689		atomic_clear_long(pte, PG_W);
2690	}
2691	PMAP_UNLOCK(pmap);
2692}
2693
2694
2695
2696/*
2697 *	Copy the range specified by src_addr/len
2698 *	from the source map to the range dst_addr/len
2699 *	in the destination map.
2700 *
2701 *	This routine is only advisory and need not do anything.
2702 */
2703
2704void
2705pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2706	  vm_offset_t src_addr)
2707{
2708	vm_offset_t addr;
2709	vm_offset_t end_addr = src_addr + len;
2710	vm_offset_t va_next;
2711
2712	if (dst_addr != src_addr)
2713		return;
2714
2715	if (!pmap_is_current(src_pmap))
2716		return;
2717
2718	vm_page_lock_queues();
2719	if (dst_pmap < src_pmap) {
2720		PMAP_LOCK(dst_pmap);
2721		PMAP_LOCK(src_pmap);
2722	} else {
2723		PMAP_LOCK(src_pmap);
2724		PMAP_LOCK(dst_pmap);
2725	}
2726	for (addr = src_addr; addr < end_addr; addr = va_next) {
2727		pt_entry_t *src_pte, *dst_pte;
2728		vm_page_t dstmpde, dstmpte, srcmpte;
2729		pml4_entry_t *pml4e;
2730		pdp_entry_t *pdpe;
2731		pd_entry_t srcptepaddr, *pde;
2732
2733		if (addr >= UPT_MIN_ADDRESS)
2734			panic("pmap_copy: invalid to pmap_copy page tables");
2735
2736		pml4e = pmap_pml4e(src_pmap, addr);
2737		if ((*pml4e & PG_V) == 0) {
2738			va_next = (addr + NBPML4) & ~PML4MASK;
2739			continue;
2740		}
2741
2742		pdpe = pmap_pml4e_to_pdpe(pml4e, addr);
2743		if ((*pdpe & PG_V) == 0) {
2744			va_next = (addr + NBPDP) & ~PDPMASK;
2745			continue;
2746		}
2747
2748		va_next = (addr + NBPDR) & ~PDRMASK;
2749
2750		pde = pmap_pdpe_to_pde(pdpe, addr);
2751		srcptepaddr = *pde;
2752		if (srcptepaddr == 0)
2753			continue;
2754
2755		if (srcptepaddr & PG_PS) {
2756			dstmpde = pmap_allocpde(dst_pmap, addr, M_NOWAIT);
2757			if (dstmpde == NULL)
2758				break;
2759			pde = (pd_entry_t *)
2760			    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpde));
2761			pde = &pde[pmap_pde_index(addr)];
2762			if (*pde == 0) {
2763				*pde = srcptepaddr & ~PG_W;
2764				dst_pmap->pm_stats.resident_count +=
2765				    NBPDR / PAGE_SIZE;
2766			} else
2767				pmap_unwire_pte_hold(dst_pmap, addr, dstmpde);
2768			continue;
2769		}
2770
2771		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr & PG_FRAME);
2772		if (srcmpte->wire_count == 0)
2773			panic("pmap_copy: source page table page is unused");
2774
2775		if (va_next > end_addr)
2776			va_next = end_addr;
2777
2778		src_pte = vtopte(addr);
2779		while (addr < va_next) {
2780			pt_entry_t ptetemp;
2781			ptetemp = *src_pte;
2782			/*
2783			 * we only virtual copy managed pages
2784			 */
2785			if ((ptetemp & PG_MANAGED) != 0) {
2786				dstmpte = pmap_allocpte(dst_pmap, addr,
2787				    M_NOWAIT);
2788				if (dstmpte == NULL)
2789					break;
2790				dst_pte = (pt_entry_t *)
2791				    PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dstmpte));
2792				dst_pte = &dst_pte[pmap_pte_index(addr)];
2793				if (*dst_pte == 0 &&
2794				    pmap_try_insert_pv_entry(dst_pmap, addr,
2795				    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
2796					/*
2797					 * Clear the wired, modified, and
2798					 * accessed (referenced) bits
2799					 * during the copy.
2800					 */
2801					*dst_pte = ptetemp & ~(PG_W | PG_M |
2802					    PG_A);
2803					dst_pmap->pm_stats.resident_count++;
2804	 			} else
2805					pmap_unwire_pte_hold(dst_pmap, addr,
2806					    dstmpte);
2807				if (dstmpte->wire_count >= srcmpte->wire_count)
2808					break;
2809			}
2810			addr += PAGE_SIZE;
2811			src_pte++;
2812		}
2813	}
2814	vm_page_unlock_queues();
2815	PMAP_UNLOCK(src_pmap);
2816	PMAP_UNLOCK(dst_pmap);
2817}
2818
2819/*
2820 *	pmap_zero_page zeros the specified hardware page by mapping
2821 *	the page into KVM and using bzero to clear its contents.
2822 */
2823void
2824pmap_zero_page(vm_page_t m)
2825{
2826	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2827
2828	pagezero((void *)va);
2829}
2830
2831/*
2832 *	pmap_zero_page_area zeros the specified hardware page by mapping
2833 *	the page into KVM and using bzero to clear its contents.
2834 *
2835 *	off and size may not cover an area beyond a single hardware page.
2836 */
2837void
2838pmap_zero_page_area(vm_page_t m, int off, int size)
2839{
2840	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2841
2842	if (off == 0 && size == PAGE_SIZE)
2843		pagezero((void *)va);
2844	else
2845		bzero((char *)va + off, size);
2846}
2847
2848/*
2849 *	pmap_zero_page_idle zeros the specified hardware page by mapping
2850 *	the page into KVM and using bzero to clear its contents.  This
2851 *	is intended to be called from the vm_pagezero process only and
2852 *	outside of Giant.
2853 */
2854void
2855pmap_zero_page_idle(vm_page_t m)
2856{
2857	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2858
2859	pagezero((void *)va);
2860}
2861
2862/*
2863 *	pmap_copy_page copies the specified (machine independent)
2864 *	page by mapping the page into virtual memory and using
2865 *	bcopy to copy the page, one machine dependent page at a
2866 *	time.
2867 */
2868void
2869pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
2870{
2871	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2872	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2873
2874	pagecopy((void *)src, (void *)dst);
2875}
2876
2877/*
2878 * Returns true if the pmap's pv is one of the first
2879 * 16 pvs linked to from this page.  This count may
2880 * be changed upwards or downwards in the future; it
2881 * is only necessary that true be returned for a small
2882 * subset of pmaps for proper page aging.
2883 */
2884boolean_t
2885pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2886{
2887	pv_entry_t pv;
2888	int loops = 0;
2889
2890	if (m->flags & PG_FICTITIOUS)
2891		return FALSE;
2892
2893	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2894	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2895		if (PV_PMAP(pv) == pmap) {
2896			return TRUE;
2897		}
2898		loops++;
2899		if (loops >= 16)
2900			break;
2901	}
2902	return (FALSE);
2903}
2904
2905/*
2906 * Remove all pages from specified address space
2907 * this aids process exit speeds.  Also, this code
2908 * is special cased for current process only, but
2909 * can have the more generic (and slightly slower)
2910 * mode enabled.  This is much faster than pmap_remove
2911 * in the case of running down an entire address space.
2912 */
2913void
2914pmap_remove_pages(pmap_t pmap)
2915{
2916	pt_entry_t *pte, tpte;
2917	vm_page_t m;
2918	pv_entry_t pv;
2919	struct pv_chunk *pc, *npc;
2920	int field, idx;
2921	int64_t bit;
2922	uint64_t inuse, bitmask;
2923	int allfree;
2924
2925	if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) {
2926		printf("warning: pmap_remove_pages called with non-current pmap\n");
2927		return;
2928	}
2929	vm_page_lock_queues();
2930	PMAP_LOCK(pmap);
2931	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2932		allfree = 1;
2933		for (field = 0; field < _NPCM; field++) {
2934			inuse = (~(pc->pc_map[field])) & pc_freemask[field];
2935			while (inuse != 0) {
2936				bit = bsfq(inuse);
2937				bitmask = 1UL << bit;
2938				idx = field * 64 + bit;
2939				pv = &pc->pc_pventry[idx];
2940				inuse &= ~bitmask;
2941
2942				pte = vtopte(pv->pv_va);
2943				tpte = *pte;
2944
2945				if (tpte == 0) {
2946					printf(
2947					    "TPTE at %p  IS ZERO @ VA %08lx\n",
2948					    pte, pv->pv_va);
2949					panic("bad pte");
2950				}
2951
2952/*
2953 * We cannot remove wired pages from a process' mapping at this time
2954 */
2955				if (tpte & PG_W) {
2956					allfree = 0;
2957					continue;
2958				}
2959
2960				m = PHYS_TO_VM_PAGE(tpte & PG_FRAME);
2961				KASSERT(m->phys_addr == (tpte & PG_FRAME),
2962				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
2963				    m, (uintmax_t)m->phys_addr,
2964				    (uintmax_t)tpte));
2965
2966				KASSERT(m < &vm_page_array[vm_page_array_size],
2967					("pmap_remove_pages: bad tpte %#jx",
2968					(uintmax_t)tpte));
2969
2970				pmap->pm_stats.resident_count--;
2971
2972				pte_clear(pte);
2973
2974				/*
2975				 * Update the vm_page_t clean/reference bits.
2976				 */
2977				if (tpte & PG_M)
2978					vm_page_dirty(m);
2979
2980				/* Mark free */
2981				PV_STAT(pv_entry_frees++);
2982				PV_STAT(pv_entry_spare++);
2983				pv_entry_count--;
2984				pc->pc_map[field] |= bitmask;
2985				m->md.pv_list_count--;
2986				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2987				if (TAILQ_EMPTY(&m->md.pv_list))
2988					vm_page_flag_clear(m, PG_WRITEABLE);
2989				pmap_unuse_pt(pmap, pv->pv_va,
2990				    *vtopde(pv->pv_va));
2991			}
2992		}
2993		if (allfree) {
2994			PV_STAT(pv_entry_spare -= _NPCPV);
2995			PV_STAT(pc_chunk_count--);
2996			PV_STAT(pc_chunk_frees++);
2997			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2998			m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
2999			dump_drop_page(m->phys_addr);
3000			vm_page_free(m);
3001		}
3002	}
3003	vm_page_unlock_queues();
3004	pmap_invalidate_all(pmap);
3005	PMAP_UNLOCK(pmap);
3006}
3007
3008/*
3009 *	pmap_is_modified:
3010 *
3011 *	Return whether or not the specified physical page was modified
3012 *	in any physical maps.
3013 */
3014boolean_t
3015pmap_is_modified(vm_page_t m)
3016{
3017	pv_entry_t pv;
3018	pt_entry_t *pte;
3019	pmap_t pmap;
3020	boolean_t rv;
3021
3022	rv = FALSE;
3023	if (m->flags & PG_FICTITIOUS)
3024		return (rv);
3025
3026	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3027	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3028		pmap = PV_PMAP(pv);
3029		PMAP_LOCK(pmap);
3030		pte = pmap_pte(pmap, pv->pv_va);
3031		rv = (*pte & PG_M) != 0;
3032		PMAP_UNLOCK(pmap);
3033		if (rv)
3034			break;
3035	}
3036	return (rv);
3037}
3038
3039/*
3040 *	pmap_is_prefaultable:
3041 *
3042 *	Return whether or not the specified virtual address is elgible
3043 *	for prefault.
3044 */
3045boolean_t
3046pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3047{
3048	pd_entry_t *pde;
3049	pt_entry_t *pte;
3050	boolean_t rv;
3051
3052	rv = FALSE;
3053	PMAP_LOCK(pmap);
3054	pde = pmap_pde(pmap, addr);
3055	if (pde != NULL && (*pde & PG_V)) {
3056		pte = vtopte(addr);
3057		rv = (*pte & PG_V) == 0;
3058	}
3059	PMAP_UNLOCK(pmap);
3060	return (rv);
3061}
3062
3063/*
3064 * Clear the write and modified bits in each of the given page's mappings.
3065 */
3066void
3067pmap_remove_write(vm_page_t m)
3068{
3069	pv_entry_t pv;
3070	pmap_t pmap;
3071	pt_entry_t oldpte, *pte;
3072
3073	if ((m->flags & PG_FICTITIOUS) != 0 ||
3074	    (m->flags & PG_WRITEABLE) == 0)
3075		return;
3076	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3077	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3078		pmap = PV_PMAP(pv);
3079		PMAP_LOCK(pmap);
3080		pte = pmap_pte(pmap, pv->pv_va);
3081retry:
3082		oldpte = *pte;
3083		if (oldpte & PG_RW) {
3084			if (!atomic_cmpset_long(pte, oldpte, oldpte &
3085			    ~(PG_RW | PG_M)))
3086				goto retry;
3087			if ((oldpte & PG_M) != 0)
3088				vm_page_dirty(m);
3089			pmap_invalidate_page(pmap, pv->pv_va);
3090		}
3091		PMAP_UNLOCK(pmap);
3092	}
3093	vm_page_flag_clear(m, PG_WRITEABLE);
3094}
3095
3096/*
3097 *	pmap_ts_referenced:
3098 *
3099 *	Return a count of reference bits for a page, clearing those bits.
3100 *	It is not necessary for every reference bit to be cleared, but it
3101 *	is necessary that 0 only be returned when there are truly no
3102 *	reference bits set.
3103 *
3104 *	XXX: The exact number of bits to check and clear is a matter that
3105 *	should be tested and standardized at some point in the future for
3106 *	optimal aging of shared pages.
3107 */
3108int
3109pmap_ts_referenced(vm_page_t m)
3110{
3111	pv_entry_t pv, pvf, pvn;
3112	pmap_t pmap;
3113	pt_entry_t *pte;
3114	int rtval = 0;
3115
3116	if (m->flags & PG_FICTITIOUS)
3117		return (rtval);
3118	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3119	if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3120		pvf = pv;
3121		do {
3122			pvn = TAILQ_NEXT(pv, pv_list);
3123			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3124			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
3125			pmap = PV_PMAP(pv);
3126			PMAP_LOCK(pmap);
3127			pte = pmap_pte(pmap, pv->pv_va);
3128			if ((*pte & PG_A) != 0) {
3129				atomic_clear_long(pte, PG_A);
3130				pmap_invalidate_page(pmap, pv->pv_va);
3131				rtval++;
3132				if (rtval > 4)
3133					pvn = NULL;
3134			}
3135			PMAP_UNLOCK(pmap);
3136		} while ((pv = pvn) != NULL && pv != pvf);
3137	}
3138	return (rtval);
3139}
3140
3141/*
3142 *	Clear the modify bits on the specified physical page.
3143 */
3144void
3145pmap_clear_modify(vm_page_t m)
3146{
3147	pv_entry_t pv;
3148	pmap_t pmap;
3149	pt_entry_t *pte;
3150
3151	if ((m->flags & PG_FICTITIOUS) != 0)
3152		return;
3153	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3154	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3155		pmap = PV_PMAP(pv);
3156		PMAP_LOCK(pmap);
3157		pte = pmap_pte(pmap, pv->pv_va);
3158		if (*pte & PG_M) {
3159			atomic_clear_long(pte, PG_M);
3160			pmap_invalidate_page(pmap, pv->pv_va);
3161		}
3162		PMAP_UNLOCK(pmap);
3163	}
3164}
3165
3166/*
3167 *	pmap_clear_reference:
3168 *
3169 *	Clear the reference bit on the specified physical page.
3170 */
3171void
3172pmap_clear_reference(vm_page_t m)
3173{
3174	pv_entry_t pv;
3175	pmap_t pmap;
3176	pt_entry_t *pte;
3177
3178	if ((m->flags & PG_FICTITIOUS) != 0)
3179		return;
3180	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
3181	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3182		pmap = PV_PMAP(pv);
3183		PMAP_LOCK(pmap);
3184		pte = pmap_pte(pmap, pv->pv_va);
3185		if (*pte & PG_A) {
3186			atomic_clear_long(pte, PG_A);
3187			pmap_invalidate_page(pmap, pv->pv_va);
3188		}
3189		PMAP_UNLOCK(pmap);
3190	}
3191}
3192
3193/*
3194 * Miscellaneous support routines follow
3195 */
3196
3197/* Adjust the cache mode for a 4KB page mapped via a PTE. */
3198static __inline void
3199pmap_pte_attr(vm_offset_t va, int mode)
3200{
3201	pt_entry_t *pte;
3202	u_int opte, npte;
3203
3204	pte = vtopte(va);
3205
3206	/*
3207	 * The cache mode bits are all in the low 32-bits of the
3208	 * PTE, so we can just spin on updating the low 32-bits.
3209	 */
3210	do {
3211		opte = *(u_int *)pte;
3212		npte = opte & ~(PG_PTE_PAT | PG_NC_PCD | PG_NC_PWT);
3213		npte |= pmap_cache_bits(mode, 0);
3214	} while (npte != opte && !atomic_cmpset_int((u_int *)pte, opte, npte));
3215}
3216
3217/* Adjust the cache mode for a 2MB page mapped via a PDE. */
3218static __inline void
3219pmap_pde_attr(vm_offset_t va, int mode)
3220{
3221	pd_entry_t *pde;
3222	u_int opde, npde;
3223
3224	pde = pmap_pde(kernel_pmap, va);
3225
3226	/*
3227	 * The cache mode bits are all in the low 32-bits of the
3228	 * PDE, so we can just spin on updating the low 32-bits.
3229	 */
3230	do {
3231		opde = *(u_int *)pde;
3232		npde = opde & ~(PG_PDE_PAT | PG_NC_PCD | PG_NC_PWT);
3233		npde |= pmap_cache_bits(mode, 1);
3234	} while (npde != opde && !atomic_cmpset_int((u_int *)pde, opde, npde));
3235}
3236
3237/*
3238 * Map a set of physical memory pages into the kernel virtual
3239 * address space. Return a pointer to where it is mapped. This
3240 * routine is intended to be used for mapping device memory,
3241 * NOT real memory.
3242 */
3243void *
3244pmap_mapdev_attr(vm_paddr_t pa, vm_size_t size, int mode)
3245{
3246	vm_offset_t va, tmpva, offset;
3247
3248	/*
3249	 * If this fits within the direct map window and use WB caching
3250	 * mode, use the direct map.
3251	 */
3252	if (pa < dmaplimit && (pa + size) < dmaplimit && mode == PAT_WRITE_BACK)
3253		return ((void *)PHYS_TO_DMAP(pa));
3254	offset = pa & PAGE_MASK;
3255	size = roundup(offset + size, PAGE_SIZE);
3256	va = kmem_alloc_nofault(kernel_map, size);
3257	if (!va)
3258		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3259	pa = trunc_page(pa);
3260	for (tmpva = va; size > 0; ) {
3261		pmap_kenter_attr(tmpva, pa, mode);
3262		size -= PAGE_SIZE;
3263		tmpva += PAGE_SIZE;
3264		pa += PAGE_SIZE;
3265	}
3266	pmap_invalidate_range(kernel_pmap, va, tmpva);
3267	pmap_invalidate_cache();
3268	return ((void *)(va + offset));
3269}
3270
3271void *
3272pmap_mapdev(vm_paddr_t pa, vm_size_t size)
3273{
3274
3275	return (pmap_mapdev_attr(pa, size, PAT_UNCACHEABLE));
3276}
3277
3278void *
3279pmap_mapbios(vm_paddr_t pa, vm_size_t size)
3280{
3281
3282	return (pmap_mapdev_attr(pa, size, PAT_WRITE_BACK));
3283}
3284
3285void
3286pmap_unmapdev(vm_offset_t va, vm_size_t size)
3287{
3288	vm_offset_t base, offset, tmpva;
3289
3290	/* If we gave a direct map region in pmap_mapdev, do nothing */
3291	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS)
3292		return;
3293	base = trunc_page(va);
3294	offset = va & PAGE_MASK;
3295	size = roundup(offset + size, PAGE_SIZE);
3296	for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE)
3297		pmap_kremove(tmpva);
3298	pmap_invalidate_range(kernel_pmap, va, tmpva);
3299	kmem_free(kernel_map, base, size);
3300}
3301
3302int
3303pmap_change_attr(va, size, mode)
3304	vm_offset_t va;
3305	vm_size_t size;
3306	int mode;
3307{
3308	vm_offset_t base, offset, tmpva;
3309	pd_entry_t *pde;
3310	pt_entry_t *pte;
3311
3312	base = va & PG_FRAME;
3313	offset = va & PAGE_MASK;
3314	size = roundup(offset + size, PAGE_SIZE);
3315
3316	/* Only supported on kernel virtual addresses. */
3317	if (base <= VM_MAXUSER_ADDRESS)
3318		return (EINVAL);
3319
3320	/*
3321	 * XXX: We have to support tearing 2MB pages down into 4k pages if
3322	 * needed here.
3323	 */
3324	/* Pages that aren't mapped aren't supported. */
3325	for (tmpva = base; tmpva < (base + size); ) {
3326		pde = pmap_pde(kernel_pmap, tmpva);
3327		if (*pde == 0)
3328			return (EINVAL);
3329		if (*pde & PG_PS) {
3330			/* Handle 2MB pages that are completely contained. */
3331			if (size >= NBPDR) {
3332				tmpva += NBPDR;
3333				continue;
3334			}
3335			return (EINVAL);
3336		}
3337		pte = vtopte(va);
3338		if (*pte == 0)
3339			return (EINVAL);
3340		tmpva += PAGE_SIZE;
3341	}
3342
3343	/*
3344	 * Ok, all the pages exist, so run through them updating their
3345	 * cache mode.
3346	 */
3347	for (tmpva = base; size > 0; ) {
3348		pde = pmap_pde(kernel_pmap, tmpva);
3349		if (*pde & PG_PS) {
3350			pmap_pde_attr(tmpva, mode);
3351			tmpva += NBPDR;
3352			size -= NBPDR;
3353		} else {
3354			pmap_pte_attr(tmpva, mode);
3355			tmpva += PAGE_SIZE;
3356			size -= PAGE_SIZE;
3357		}
3358	}
3359
3360	/*
3361	 * Flush CPU caches to make sure any data isn't cached that shouldn't
3362	 * be, etc.
3363	 */
3364	pmap_invalidate_range(kernel_pmap, base, tmpva);
3365	pmap_invalidate_cache();
3366	return (0);
3367}
3368
3369/*
3370 * perform the pmap work for mincore
3371 */
3372int
3373pmap_mincore(pmap_t pmap, vm_offset_t addr)
3374{
3375	pt_entry_t *ptep, pte;
3376	vm_page_t m;
3377	int val = 0;
3378
3379	PMAP_LOCK(pmap);
3380	ptep = pmap_pte(pmap, addr);
3381	pte = (ptep != NULL) ? *ptep : 0;
3382	PMAP_UNLOCK(pmap);
3383
3384	if (pte != 0) {
3385		vm_paddr_t pa;
3386
3387		val = MINCORE_INCORE;
3388		if ((pte & PG_MANAGED) == 0)
3389			return val;
3390
3391		pa = pte & PG_FRAME;
3392
3393		m = PHYS_TO_VM_PAGE(pa);
3394
3395		/*
3396		 * Modified by us
3397		 */
3398		if (pte & PG_M)
3399			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3400		else {
3401			/*
3402			 * Modified by someone else
3403			 */
3404			vm_page_lock_queues();
3405			if (m->dirty || pmap_is_modified(m))
3406				val |= MINCORE_MODIFIED_OTHER;
3407			vm_page_unlock_queues();
3408		}
3409		/*
3410		 * Referenced by us
3411		 */
3412		if (pte & PG_A)
3413			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3414		else {
3415			/*
3416			 * Referenced by someone else
3417			 */
3418			vm_page_lock_queues();
3419			if ((m->flags & PG_REFERENCED) ||
3420			    pmap_ts_referenced(m)) {
3421				val |= MINCORE_REFERENCED_OTHER;
3422				vm_page_flag_set(m, PG_REFERENCED);
3423			}
3424			vm_page_unlock_queues();
3425		}
3426	}
3427	return val;
3428}
3429
3430void
3431pmap_activate(struct thread *td)
3432{
3433	pmap_t	pmap, oldpmap;
3434	u_int64_t  cr3;
3435
3436	critical_enter();
3437	pmap = vmspace_pmap(td->td_proc->p_vmspace);
3438	oldpmap = PCPU_GET(curpmap);
3439#ifdef SMP
3440if (oldpmap)	/* XXX FIXME */
3441	atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
3442	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
3443#else
3444if (oldpmap)	/* XXX FIXME */
3445	oldpmap->pm_active &= ~PCPU_GET(cpumask);
3446	pmap->pm_active |= PCPU_GET(cpumask);
3447#endif
3448	cr3 = vtophys(pmap->pm_pml4);
3449	td->td_pcb->pcb_cr3 = cr3;
3450	load_cr3(cr3);
3451	critical_exit();
3452}
3453
3454vm_offset_t
3455pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
3456{
3457
3458	if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3459		return addr;
3460	}
3461
3462	addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3463	return addr;
3464}
3465