pmap.c revision 120307
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
42 */
43/*-
44 * Copyright (c) 2003 Networks Associates Technology, Inc.
45 * All rights reserved.
46 *
47 * This software was developed for the FreeBSD Project by Jake Burkholder,
48 * Safeport Network Services, and Network Associates Laboratories, the
49 * Security Research Division of Network Associates, Inc. under
50 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
51 * CHATS research program.
52 *
53 * Redistribution and use in source and binary forms, with or without
54 * modification, are permitted provided that the following conditions
55 * are met:
56 * 1. Redistributions of source code must retain the above copyright
57 *    notice, this list of conditions and the following disclaimer.
58 * 2. Redistributions in binary form must reproduce the above copyright
59 *    notice, this list of conditions and the following disclaimer in the
60 *    documentation and/or other materials provided with the distribution.
61 *
62 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
65 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
72 * SUCH DAMAGE.
73 */
74
75#include <sys/cdefs.h>
76__FBSDID("$FreeBSD: head/sys/i386/i386/pmap.c 120307 2003-09-20 23:54:36Z alc $");
77
78/*
79 *	Manages physical address maps.
80 *
81 *	In addition to hardware address maps, this
82 *	module is called upon to provide software-use-only
83 *	maps which may or may not be stored in the same
84 *	form as hardware maps.  These pseudo-maps are
85 *	used to store intermediate results from copy
86 *	operations to and from address spaces.
87 *
88 *	Since the information managed by this module is
89 *	also stored by the logical address mapping module,
90 *	this module may throw away valid virtual-to-physical
91 *	mappings at almost any time.  However, invalidations
92 *	of virtual-to-physical mappings must be done as
93 *	requested.
94 *
95 *	In order to cope with hardware architectures which
96 *	make virtual-to-physical map invalidates expensive,
97 *	this module may delay invalidate or reduced protection
98 *	operations until such time as they are actually
99 *	necessary.  This module is given full information as
100 *	to which processors are currently using which maps,
101 *	and to when physical maps must be made correct.
102 */
103
104#include "opt_pmap.h"
105#include "opt_msgbuf.h"
106#include "opt_kstack_pages.h"
107
108#include <sys/param.h>
109#include <sys/systm.h>
110#include <sys/kernel.h>
111#include <sys/lock.h>
112#include <sys/mman.h>
113#include <sys/msgbuf.h>
114#include <sys/mutex.h>
115#include <sys/proc.h>
116#include <sys/sx.h>
117#include <sys/user.h>
118#include <sys/vmmeter.h>
119#include <sys/sysctl.h>
120#ifdef SMP
121#include <sys/smp.h>
122#endif
123
124#include <vm/vm.h>
125#include <vm/vm_param.h>
126#include <vm/vm_kern.h>
127#include <vm/vm_page.h>
128#include <vm/vm_map.h>
129#include <vm/vm_object.h>
130#include <vm/vm_extern.h>
131#include <vm/vm_pageout.h>
132#include <vm/vm_pager.h>
133#include <vm/uma.h>
134
135#include <machine/cpu.h>
136#include <machine/cputypes.h>
137#include <machine/md_var.h>
138#include <machine/specialreg.h>
139#if defined(SMP) || defined(APIC_IO)
140#include <machine/smp.h>
141#include <machine/apic.h>
142#include <machine/segments.h>
143#include <machine/tss.h>
144#endif /* SMP || APIC_IO */
145
146#define PMAP_KEEP_PDIRS
147#ifndef PMAP_SHPGPERPROC
148#define PMAP_SHPGPERPROC 200
149#endif
150
151#if defined(DIAGNOSTIC)
152#define PMAP_DIAGNOSTIC
153#endif
154
155#define MINPV 2048
156
157#if !defined(PMAP_DIAGNOSTIC)
158#define PMAP_INLINE __inline
159#else
160#define PMAP_INLINE
161#endif
162
163/*
164 * Get PDEs and PTEs for user/kernel address space
165 */
166#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
167#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
168
169#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
170#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
171#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
172#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
173#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
174
175#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
176#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
177
178/*
179 * Given a map and a machine independent protection code,
180 * convert to a vax protection code.
181 */
182#define pte_prot(m, p)	(protection_codes[p])
183static int protection_codes[8];
184
185struct pmap kernel_pmap_store;
186LIST_HEAD(pmaplist, pmap);
187static struct pmaplist allpmaps;
188static struct mtx allpmaps_lock;
189#ifdef SMP
190static struct mtx lazypmap_lock;
191#endif
192
193vm_paddr_t avail_start;	/* PA of first available physical page */
194vm_paddr_t avail_end;	/* PA of last available physical page */
195vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
196vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
197static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
198static int pgeflag;		/* PG_G or-in */
199static int pseflag;		/* PG_PS or-in */
200
201static int nkpt;
202vm_offset_t kernel_vm_end;
203extern u_int32_t KERNend;
204
205#ifdef PAE
206static uma_zone_t pdptzone;
207#endif
208
209/*
210 * Data for the pv entry allocation mechanism
211 */
212static uma_zone_t pvzone;
213static struct vm_object pvzone_obj;
214static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
215int pmap_pagedaemon_waken;
216
217/*
218 * All those kernel PT submaps that BSD is so fond of
219 */
220pt_entry_t *CMAP1 = 0;
221static pt_entry_t *CMAP2, *CMAP3, *ptmmap;
222caddr_t CADDR1 = 0, ptvmmap = 0;
223static caddr_t CADDR2, CADDR3;
224static struct mtx CMAPCADDR12_lock;
225static pt_entry_t *msgbufmap;
226struct msgbuf *msgbufp = 0;
227
228/*
229 * Crashdump maps.
230 */
231static pt_entry_t *pt_crashdumpmap;
232static caddr_t crashdumpmap;
233
234#ifdef SMP
235extern pt_entry_t *SMPpt;
236#endif
237static pt_entry_t *PMAP1 = 0;
238static pt_entry_t *PADDR1 = 0;
239
240static PMAP_INLINE void	free_pv_entry(pv_entry_t pv);
241static pv_entry_t get_pv_entry(void);
242static void	i386_protection_init(void);
243static void	pmap_clear_ptes(vm_page_t m, int bit)
244    __always_inline;
245
246static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
247static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
248static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
249					vm_offset_t va);
250static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
251		vm_page_t mpte, vm_page_t m);
252
253static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
254
255static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex);
256static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
257static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
258static void *pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
259#ifdef PAE
260static void *pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
261#endif
262
263static pd_entry_t pdir4mb;
264
265CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
266CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
267
268/*
269 * Move the kernel virtual free pointer to the next
270 * 4MB.  This is used to help improve performance
271 * by using a large (4MB) page for much of the kernel
272 * (.text, .data, .bss)
273 */
274static vm_offset_t
275pmap_kmem_choose(vm_offset_t addr)
276{
277	vm_offset_t newaddr = addr;
278
279#ifdef I686_CPU_not	/* Problem seems to have gone away */
280	/* Deal with un-resolved Pentium4 issues */
281	if (cpu_class == CPUCLASS_686 &&
282	    strcmp(cpu_vendor, "GenuineIntel") == 0 &&
283	    (cpu_id & 0xf00) == 0xf00)
284		return newaddr;
285#endif
286#ifndef DISABLE_PSE
287	if (cpu_feature & CPUID_PSE)
288		newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
289#endif
290	return newaddr;
291}
292
293/*
294 *	Bootstrap the system enough to run with virtual memory.
295 *
296 *	On the i386 this is called after mapping has already been enabled
297 *	and just syncs the pmap module with what has already been done.
298 *	[We can't call it easily with mapping off since the kernel is not
299 *	mapped with PA == VA, hence we would have to relocate every address
300 *	from the linked base (virtual) address "KERNBASE" to the actual
301 *	(physical) address starting relative to 0]
302 */
303void
304pmap_bootstrap(firstaddr, loadaddr)
305	vm_paddr_t firstaddr;
306	vm_paddr_t loadaddr;
307{
308	vm_offset_t va;
309	pt_entry_t *pte;
310	int i;
311
312	avail_start = firstaddr;
313
314	/*
315	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
316	 * large. It should instead be correctly calculated in locore.s and
317	 * not based on 'first' (which is a physical address, not a virtual
318	 * address, for the start of unused physical memory). The kernel
319	 * page tables are NOT double mapped and thus should not be included
320	 * in this calculation.
321	 */
322	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
323	virtual_avail = pmap_kmem_choose(virtual_avail);
324
325	virtual_end = VM_MAX_KERNEL_ADDRESS;
326
327	/*
328	 * Initialize protection array.
329	 */
330	i386_protection_init();
331
332	/*
333	 * Initialize the kernel pmap (which is statically allocated).
334	 */
335	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
336#ifdef PAE
337	kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT);
338#endif
339	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
340	TAILQ_INIT(&kernel_pmap->pm_pvlist);
341	LIST_INIT(&allpmaps);
342#ifdef SMP
343	mtx_init(&lazypmap_lock, "lazypmap", NULL, MTX_SPIN);
344#endif
345	mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN);
346	mtx_lock_spin(&allpmaps_lock);
347	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
348	mtx_unlock_spin(&allpmaps_lock);
349	nkpt = NKPT;
350
351	/*
352	 * Reserve some special page table entries/VA space for temporary
353	 * mapping of pages.
354	 */
355#define	SYSMAP(c, p, v, n)	\
356	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
357
358	va = virtual_avail;
359	pte = vtopte(va);
360
361	/*
362	 * CMAP1/CMAP2 are used for zeroing and copying pages.
363	 * CMAP3 is used for the idle process page zeroing.
364	 */
365	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
366	SYSMAP(caddr_t, CMAP2, CADDR2, 1)
367	SYSMAP(caddr_t, CMAP3, CADDR3, 1)
368
369	mtx_init(&CMAPCADDR12_lock, "CMAPCADDR12", NULL, MTX_DEF);
370
371	/*
372	 * Crashdump maps.
373	 */
374	SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
375
376	/*
377	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
378	 * XXX ptmmap is not used.
379	 */
380	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
381
382	/*
383	 * msgbufp is used to map the system message buffer.
384	 * XXX msgbufmap is not used.
385	 */
386	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
387	       atop(round_page(MSGBUF_SIZE)))
388
389	/*
390	 * ptemap is used for pmap_pte_quick
391	 */
392	SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
393
394	virtual_avail = va;
395
396	*CMAP1 = *CMAP2 = 0;
397	for (i = 0; i < NKPT; i++)
398		PTD[i] = 0;
399
400	pgeflag = 0;
401#ifndef DISABLE_PG_G
402	if (cpu_feature & CPUID_PGE)
403		pgeflag = PG_G;
404#endif
405#ifdef I686_CPU_not	/* Problem seems to have gone away */
406	/* Deal with un-resolved Pentium4 issues */
407	if (cpu_class == CPUCLASS_686 &&
408	    strcmp(cpu_vendor, "GenuineIntel") == 0 &&
409	    (cpu_id & 0xf00) == 0xf00) {
410		printf("Warning: Pentium 4 cpu: PG_G disabled (global flag)\n");
411		pgeflag = 0;
412	}
413#endif
414
415/*
416 * Initialize the 4MB page size flag
417 */
418	pseflag = 0;
419/*
420 * The 4MB page version of the initial
421 * kernel page mapping.
422 */
423	pdir4mb = 0;
424
425#ifndef DISABLE_PSE
426	if (cpu_feature & CPUID_PSE)
427		pseflag = PG_PS;
428#endif
429#ifdef I686_CPU_not	/* Problem seems to have gone away */
430	/* Deal with un-resolved Pentium4 issues */
431	if (cpu_class == CPUCLASS_686 &&
432	    strcmp(cpu_vendor, "GenuineIntel") == 0 &&
433	    (cpu_id & 0xf00) == 0xf00) {
434		printf("Warning: Pentium 4 cpu: PG_PS disabled (4MB pages)\n");
435		pseflag = 0;
436	}
437#endif
438#ifndef DISABLE_PSE
439	if (pseflag) {
440		pd_entry_t ptditmp;
441		/*
442		 * Note that we have enabled PSE mode
443		 */
444		ptditmp = *(PTmap + i386_btop(KERNBASE));
445		ptditmp &= ~(NBPDR - 1);
446		ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
447		pdir4mb = ptditmp;
448	}
449#endif
450#ifndef SMP
451	/*
452	 * Turn on PGE/PSE.  SMP does this later on since the
453	 * 4K page tables are required for AP boot (for now).
454	 * XXX fixme.
455	 */
456	pmap_set_opt();
457#endif
458#ifdef SMP
459	if (cpu_apic_address == 0)
460		panic("pmap_bootstrap: no local apic! (non-SMP hardware?)");
461
462	/* local apic is mapped on last page */
463	SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
464	    (cpu_apic_address & PG_FRAME));
465#endif
466	invltlb();
467}
468
469/*
470 * Enable 4MB page mode for MP startup.  Turn on PG_G support.
471 * BSP will run this after all the AP's have started up.
472 */
473void
474pmap_set_opt(void)
475{
476	pt_entry_t *pte;
477	vm_offset_t va, endva;
478
479	if (pgeflag && (cpu_feature & CPUID_PGE)) {
480		load_cr4(rcr4() | CR4_PGE);
481		invltlb();		/* Insurance */
482	}
483#ifndef DISABLE_PSE
484	if (pseflag && (cpu_feature & CPUID_PSE)) {
485		load_cr4(rcr4() | CR4_PSE);
486		invltlb();		/* Insurance */
487	}
488#endif
489	if (PCPU_GET(cpuid) == 0) {
490#ifndef DISABLE_PSE
491		if (pdir4mb) {
492			kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb;
493			invltlb();	/* Insurance */
494		}
495#endif
496		if (pgeflag) {
497			/* Turn on PG_G for text, data, bss pages. */
498			va = (vm_offset_t)btext;
499#ifndef DISABLE_PSE
500			if (pseflag && (cpu_feature & CPUID_PSE)) {
501				if (va < KERNBASE + (1 << PDRSHIFT))
502					va = KERNBASE + (1 << PDRSHIFT);
503			}
504#endif
505			endva = KERNBASE + KERNend;
506			while (va < endva) {
507				pte = vtopte(va);
508				if (*pte)
509					*pte |= pgeflag;
510				va += PAGE_SIZE;
511			}
512			invltlb();	/* Insurance */
513		}
514		/*
515		 * We do not need to broadcast the invltlb here, because
516		 * each AP does it the moment it is released from the boot
517		 * lock.  See ap_init().
518		 */
519	}
520}
521
522static void *
523pmap_pv_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
524{
525	*flags = UMA_SLAB_PRIV;
526	return (void *)kmem_alloc(kernel_map, bytes);
527}
528
529#ifdef PAE
530static void *
531pmap_pdpt_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
532{
533	*flags = UMA_SLAB_PRIV;
534	return (contigmalloc(PAGE_SIZE, NULL, 0, 0x0ULL, 0xffffffffULL, 1, 0));
535}
536#endif
537
538/*
539 *	Initialize the pmap module.
540 *	Called by vm_init, to initialize any structures that the pmap
541 *	system needs to map virtual memory.
542 *	pmap_init has been enhanced to support in a fairly consistant
543 *	way, discontiguous physical memory.
544 */
545void
546pmap_init(phys_start, phys_end)
547	vm_paddr_t phys_start, phys_end;
548{
549	int i;
550	int initial_pvs;
551
552	/*
553	 * Allocate memory for random pmap data structures.  Includes the
554	 * pv_head_table.
555	 */
556
557	for(i = 0; i < vm_page_array_size; i++) {
558		vm_page_t m;
559
560		m = &vm_page_array[i];
561		TAILQ_INIT(&m->md.pv_list);
562		m->md.pv_list_count = 0;
563	}
564
565	/*
566	 * init the pv free list
567	 */
568	initial_pvs = vm_page_array_size;
569	if (initial_pvs < MINPV)
570		initial_pvs = MINPV;
571	pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
572	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
573	uma_zone_set_allocf(pvzone, pmap_pv_allocf);
574	uma_prealloc(pvzone, initial_pvs);
575
576#ifdef PAE
577	pdptzone = uma_zcreate("PDPT", NPGPTD * sizeof(pdpt_entry_t), NULL,
578	    NULL, NULL, NULL, (NPGPTD * sizeof(pdpt_entry_t)) - 1,
579	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
580	uma_zone_set_allocf(pdptzone, pmap_pdpt_allocf);
581#endif
582
583	/*
584	 * Now it is safe to enable pv_table recording.
585	 */
586	pmap_initialized = TRUE;
587}
588
589/*
590 * Initialize the address space (zone) for the pv_entries.  Set a
591 * high water mark so that the system can recover from excessive
592 * numbers of pv entries.
593 */
594void
595pmap_init2()
596{
597	int shpgperproc = PMAP_SHPGPERPROC;
598
599	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
600	pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
601	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
602	pv_entry_high_water = 9 * (pv_entry_max / 10);
603	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
604}
605
606
607/***************************************************
608 * Low level helper routines.....
609 ***************************************************/
610
611#if defined(PMAP_DIAGNOSTIC)
612
613/*
614 * This code checks for non-writeable/modified pages.
615 * This should be an invalid condition.
616 */
617static int
618pmap_nw_modified(pt_entry_t ptea)
619{
620	int pte;
621
622	pte = (int) ptea;
623
624	if ((pte & (PG_M|PG_RW)) == PG_M)
625		return 1;
626	else
627		return 0;
628}
629#endif
630
631
632/*
633 * this routine defines the region(s) of memory that should
634 * not be tested for the modified bit.
635 */
636static PMAP_INLINE int
637pmap_track_modified(vm_offset_t va)
638{
639	if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
640		return 1;
641	else
642		return 0;
643}
644
645#ifdef I386_CPU
646/*
647 * i386 only has "invalidate everything" and no SMP to worry about.
648 */
649PMAP_INLINE void
650pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
651{
652
653	if (pmap == kernel_pmap || pmap->pm_active)
654		invltlb();
655}
656
657PMAP_INLINE void
658pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
659{
660
661	if (pmap == kernel_pmap || pmap->pm_active)
662		invltlb();
663}
664
665PMAP_INLINE void
666pmap_invalidate_all(pmap_t pmap)
667{
668
669	if (pmap == kernel_pmap || pmap->pm_active)
670		invltlb();
671}
672#else /* !I386_CPU */
673#ifdef SMP
674/*
675 * For SMP, these functions have to use the IPI mechanism for coherence.
676 */
677void
678pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
679{
680	u_int cpumask;
681	u_int other_cpus;
682
683	critical_enter();
684	/*
685	 * We need to disable interrupt preemption but MUST NOT have
686	 * interrupts disabled here.
687	 * XXX we may need to hold schedlock to get a coherent pm_active
688	 */
689	if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) {
690		invlpg(va);
691		smp_invlpg(va);
692	} else {
693		cpumask = PCPU_GET(cpumask);
694		other_cpus = PCPU_GET(other_cpus);
695		if (pmap->pm_active & cpumask)
696			invlpg(va);
697		if (pmap->pm_active & other_cpus)
698			smp_masked_invlpg(pmap->pm_active & other_cpus, va);
699	}
700	critical_exit();
701}
702
703void
704pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
705{
706	u_int cpumask;
707	u_int other_cpus;
708	vm_offset_t addr;
709
710	critical_enter();
711	/*
712	 * We need to disable interrupt preemption but MUST NOT have
713	 * interrupts disabled here.
714	 * XXX we may need to hold schedlock to get a coherent pm_active
715	 */
716	if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) {
717		for (addr = sva; addr < eva; addr += PAGE_SIZE)
718			invlpg(addr);
719		smp_invlpg_range(sva, eva);
720	} else {
721		cpumask = PCPU_GET(cpumask);
722		other_cpus = PCPU_GET(other_cpus);
723		if (pmap->pm_active & cpumask)
724			for (addr = sva; addr < eva; addr += PAGE_SIZE)
725				invlpg(addr);
726		if (pmap->pm_active & other_cpus)
727			smp_masked_invlpg_range(pmap->pm_active & other_cpus,
728			    sva, eva);
729	}
730	critical_exit();
731}
732
733void
734pmap_invalidate_all(pmap_t pmap)
735{
736	u_int cpumask;
737	u_int other_cpus;
738
739	critical_enter();
740	/*
741	 * We need to disable interrupt preemption but MUST NOT have
742	 * interrupts disabled here.
743	 * XXX we may need to hold schedlock to get a coherent pm_active
744	 */
745	if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) {
746		invltlb();
747		smp_invltlb();
748	} else {
749		cpumask = PCPU_GET(cpumask);
750		other_cpus = PCPU_GET(other_cpus);
751		if (pmap->pm_active & cpumask)
752			invltlb();
753		if (pmap->pm_active & other_cpus)
754			smp_masked_invltlb(pmap->pm_active & other_cpus);
755	}
756	critical_exit();
757}
758#else /* !SMP */
759/*
760 * Normal, non-SMP, 486+ invalidation functions.
761 * We inline these within pmap.c for speed.
762 */
763PMAP_INLINE void
764pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
765{
766
767	if (pmap == kernel_pmap || pmap->pm_active)
768		invlpg(va);
769}
770
771PMAP_INLINE void
772pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
773{
774	vm_offset_t addr;
775
776	if (pmap == kernel_pmap || pmap->pm_active)
777		for (addr = sva; addr < eva; addr += PAGE_SIZE)
778			invlpg(addr);
779}
780
781PMAP_INLINE void
782pmap_invalidate_all(pmap_t pmap)
783{
784
785	if (pmap == kernel_pmap || pmap->pm_active)
786		invltlb();
787}
788#endif /* !SMP */
789#endif /* !I386_CPU */
790
791/*
792 * Are we current address space or kernel?
793 */
794static __inline int
795pmap_is_current(pmap_t pmap)
796{
797	return (pmap == kernel_pmap ||
798	    (pmap->pm_pdir[PTDPTDI] & PG_FRAME) == (PTDpde[0] & PG_FRAME));
799}
800
801/*
802 * Super fast pmap_pte routine best used when scanning
803 * the pv lists.  This eliminates many coarse-grained
804 * invltlb calls.  Note that many of the pv list
805 * scans are across different pmaps.  It is very wasteful
806 * to do an entire invltlb for checking a single mapping.
807 */
808pt_entry_t *
809pmap_pte_quick(pmap, va)
810	register pmap_t pmap;
811	vm_offset_t va;
812{
813	pd_entry_t newpf;
814	pd_entry_t *pde;
815
816	pde = pmap_pde(pmap, va);
817	if (*pde & PG_PS)
818		return (pde);
819	if (*pde != 0) {
820		/* are we current address space or kernel? */
821		if (pmap_is_current(pmap))
822			return vtopte(va);
823		newpf = *pde & PG_FRAME;
824		if (((*PMAP1) & PG_FRAME) != newpf) {
825			*PMAP1 = newpf | PG_RW | PG_V;
826			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR1);
827		}
828		return PADDR1 + (i386_btop(va) & (NPTEPG - 1));
829	}
830	return (0);
831}
832
833/*
834 *	Routine:	pmap_extract
835 *	Function:
836 *		Extract the physical page address associated
837 *		with the given map/virtual_address pair.
838 */
839vm_paddr_t
840pmap_extract(pmap, va)
841	register pmap_t pmap;
842	vm_offset_t va;
843{
844	vm_paddr_t rtval;
845	pt_entry_t *pte;
846	pd_entry_t pde;
847
848	if (pmap == 0)
849		return 0;
850	pde = pmap->pm_pdir[va >> PDRSHIFT];
851	if (pde != 0) {
852		if ((pde & PG_PS) != 0) {
853			rtval = (pde & ~PDRMASK) | (va & PDRMASK);
854			return rtval;
855		}
856		pte = pmap_pte_quick(pmap, va);
857		rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
858		return rtval;
859	}
860	return 0;
861
862}
863
864/*
865 *	Routine:	pmap_extract_and_hold
866 *	Function:
867 *		Atomically extract and hold the physical page
868 *		with the given pmap and virtual address pair
869 *		if that mapping permits the given protection.
870 */
871vm_page_t
872pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
873{
874	vm_paddr_t pa;
875	vm_page_t m;
876
877	m = NULL;
878	mtx_lock(&Giant);
879	if ((pa = pmap_extract(pmap, va)) != 0) {
880		m = PHYS_TO_VM_PAGE(pa);
881		vm_page_lock_queues();
882		vm_page_hold(m);
883		vm_page_unlock_queues();
884	}
885	mtx_unlock(&Giant);
886	return (m);
887}
888
889/***************************************************
890 * Low level mapping routines.....
891 ***************************************************/
892
893/*
894 * Add a wired page to the kva.
895 * Note: not SMP coherent.
896 */
897PMAP_INLINE void
898pmap_kenter(vm_offset_t va, vm_paddr_t pa)
899{
900	pt_entry_t *pte;
901
902	pte = vtopte(va);
903	pte_store(pte, pa | PG_RW | PG_V | pgeflag);
904}
905
906/*
907 * Remove a page from the kernel pagetables.
908 * Note: not SMP coherent.
909 */
910PMAP_INLINE void
911pmap_kremove(vm_offset_t va)
912{
913	pt_entry_t *pte;
914
915	pte = vtopte(va);
916	pte_clear(pte);
917}
918
919/*
920 *	Used to map a range of physical addresses into kernel
921 *	virtual address space.
922 *
923 *	The value passed in '*virt' is a suggested virtual address for
924 *	the mapping. Architectures which can support a direct-mapped
925 *	physical to virtual region can return the appropriate address
926 *	within that region, leaving '*virt' unchanged. Other
927 *	architectures should map the pages starting at '*virt' and
928 *	update '*virt' with the first usable address after the mapped
929 *	region.
930 */
931vm_offset_t
932pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
933{
934	vm_offset_t va, sva;
935
936	va = sva = *virt;
937	while (start < end) {
938		pmap_kenter(va, start);
939		va += PAGE_SIZE;
940		start += PAGE_SIZE;
941	}
942	pmap_invalidate_range(kernel_pmap, sva, va);
943	*virt = va;
944	return (sva);
945}
946
947
948/*
949 * Add a list of wired pages to the kva
950 * this routine is only used for temporary
951 * kernel mappings that do not need to have
952 * page modification or references recorded.
953 * Note that old mappings are simply written
954 * over.  The page *must* be wired.
955 * Note: SMP coherent.  Uses a ranged shootdown IPI.
956 */
957void
958pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
959{
960	vm_offset_t va;
961
962	va = sva;
963	while (count-- > 0) {
964		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
965		va += PAGE_SIZE;
966		m++;
967	}
968	pmap_invalidate_range(kernel_pmap, sva, va);
969}
970
971/*
972 * This routine tears out page mappings from the
973 * kernel -- it is meant only for temporary mappings.
974 * Note: SMP coherent.  Uses a ranged shootdown IPI.
975 */
976void
977pmap_qremove(vm_offset_t sva, int count)
978{
979	vm_offset_t va;
980
981	va = sva;
982	while (count-- > 0) {
983		pmap_kremove(va);
984		va += PAGE_SIZE;
985	}
986	pmap_invalidate_range(kernel_pmap, sva, va);
987}
988
989/***************************************************
990 * Page table page management routines.....
991 ***************************************************/
992
993/*
994 * This routine unholds page table pages, and if the hold count
995 * drops to zero, then it decrements the wire count.
996 */
997static int
998_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
999{
1000
1001	while (vm_page_sleep_if_busy(m, FALSE, "pmuwpt"))
1002		vm_page_lock_queues();
1003
1004	if (m->hold_count == 0) {
1005		vm_offset_t pteva;
1006		/*
1007		 * unmap the page table page
1008		 */
1009		pmap->pm_pdir[m->pindex] = 0;
1010		--pmap->pm_stats.resident_count;
1011		if (pmap_is_current(pmap)) {
1012			/*
1013			 * Do an invltlb to make the invalidated mapping
1014			 * take effect immediately.
1015			 */
1016			pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
1017			pmap_invalidate_page(pmap, pteva);
1018		}
1019
1020		/*
1021		 * If the page is finally unwired, simply free it.
1022		 */
1023		--m->wire_count;
1024		if (m->wire_count == 0) {
1025			vm_page_busy(m);
1026			vm_page_free_zero(m);
1027			atomic_subtract_int(&cnt.v_wire_count, 1);
1028		}
1029		return 1;
1030	}
1031	return 0;
1032}
1033
1034static PMAP_INLINE int
1035pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
1036{
1037	vm_page_unhold(m);
1038	if (m->hold_count == 0)
1039		return _pmap_unwire_pte_hold(pmap, m);
1040	else
1041		return 0;
1042}
1043
1044/*
1045 * After removing a page table entry, this routine is used to
1046 * conditionally free the page, and manage the hold/wire counts.
1047 */
1048static int
1049pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1050{
1051
1052	if (va >= VM_MAXUSER_ADDRESS)
1053		return 0;
1054
1055	return pmap_unwire_pte_hold(pmap, mpte);
1056}
1057
1058void
1059pmap_pinit0(pmap)
1060	struct pmap *pmap;
1061{
1062
1063	pmap->pm_pdir = (pd_entry_t *)(KERNBASE + (vm_offset_t)IdlePTD);
1064#ifdef PAE
1065	pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT);
1066#endif
1067	pmap->pm_active = 0;
1068	PCPU_SET(curpmap, pmap);
1069	TAILQ_INIT(&pmap->pm_pvlist);
1070	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1071	mtx_lock_spin(&allpmaps_lock);
1072	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1073	mtx_unlock_spin(&allpmaps_lock);
1074}
1075
1076/*
1077 * Initialize a preallocated and zeroed pmap structure,
1078 * such as one in a vmspace structure.
1079 */
1080void
1081pmap_pinit(pmap)
1082	register struct pmap *pmap;
1083{
1084	vm_page_t ptdpg[NPGPTD];
1085	vm_paddr_t pa;
1086	int i;
1087
1088	/*
1089	 * No need to allocate page table space yet but we do need a valid
1090	 * page directory table.
1091	 */
1092	if (pmap->pm_pdir == NULL) {
1093		pmap->pm_pdir = (pd_entry_t *)kmem_alloc_pageable(kernel_map,
1094		    NBPTD);
1095#ifdef PAE
1096		pmap->pm_pdpt = uma_zalloc(pdptzone, M_WAITOK | M_ZERO);
1097		KASSERT(((vm_offset_t)pmap->pm_pdpt &
1098		    ((NPGPTD * sizeof(pdpt_entry_t)) - 1)) == 0,
1099		    ("pmap_pinit: pdpt misaligned"));
1100		KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30),
1101		    ("pmap_pinit: pdpt above 4g"));
1102#endif
1103	}
1104
1105	/*
1106	 * allocate object for the ptes
1107	 */
1108	if (pmap->pm_pteobj == NULL)
1109		pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI +
1110		    NPGPTD);
1111
1112	/*
1113	 * allocate the page directory page(s)
1114	 */
1115	for (i = 0; i < NPGPTD; i++) {
1116		VM_OBJECT_LOCK(pmap->pm_pteobj);
1117		ptdpg[i] = vm_page_grab(pmap->pm_pteobj, PTDPTDI + i,
1118		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED |
1119		    VM_ALLOC_ZERO);
1120		vm_page_lock_queues();
1121		vm_page_flag_clear(ptdpg[i], PG_BUSY);
1122		ptdpg[i]->valid = VM_PAGE_BITS_ALL;
1123		vm_page_unlock_queues();
1124		VM_OBJECT_UNLOCK(pmap->pm_pteobj);
1125	}
1126
1127	pmap_qenter((vm_offset_t)pmap->pm_pdir, ptdpg, NPGPTD);
1128
1129	for (i = 0; i < NPGPTD; i++) {
1130		if ((ptdpg[i]->flags & PG_ZERO) == 0)
1131			bzero(pmap->pm_pdir + (i * NPDEPG), PAGE_SIZE);
1132	}
1133
1134	mtx_lock_spin(&allpmaps_lock);
1135	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1136	mtx_unlock_spin(&allpmaps_lock);
1137	/* Wire in kernel global address entries. */
1138	/* XXX copies current process, does not fill in MPPTDI */
1139	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * sizeof(pd_entry_t));
1140#ifdef SMP
1141	pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
1142#endif
1143
1144	/* install self-referential address mapping entry(s) */
1145	for (i = 0; i < NPGPTD; i++) {
1146		pa = VM_PAGE_TO_PHYS(ptdpg[i]);
1147		pmap->pm_pdir[PTDPTDI + i] = pa | PG_V | PG_RW | PG_A | PG_M;
1148#ifdef PAE
1149		pmap->pm_pdpt[i] = pa | PG_V;
1150#endif
1151	}
1152
1153	pmap->pm_active = 0;
1154	TAILQ_INIT(&pmap->pm_pvlist);
1155	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1156}
1157
1158/*
1159 * Wire in kernel global address entries.  To avoid a race condition
1160 * between pmap initialization and pmap_growkernel, this procedure
1161 * should be called after the vmspace is attached to the process
1162 * but before this pmap is activated.
1163 */
1164void
1165pmap_pinit2(pmap)
1166	struct pmap *pmap;
1167{
1168	/* XXX: Remove this stub when no longer called */
1169}
1170
1171/*
1172 * this routine is called if the page table page is not
1173 * mapped correctly.
1174 */
1175static vm_page_t
1176_pmap_allocpte(pmap, ptepindex)
1177	pmap_t	pmap;
1178	unsigned ptepindex;
1179{
1180	vm_paddr_t ptepa;
1181	vm_page_t m;
1182
1183	/*
1184	 * Find or fabricate a new pagetable page
1185	 */
1186	VM_OBJECT_LOCK(pmap->pm_pteobj);
1187	m = vm_page_grab(pmap->pm_pteobj, ptepindex,
1188	    VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_RETRY);
1189	if ((m->flags & PG_ZERO) == 0)
1190		pmap_zero_page(m);
1191
1192	KASSERT(m->queue == PQ_NONE,
1193		("_pmap_allocpte: %p->queue != PQ_NONE", m));
1194
1195	/*
1196	 * Increment the hold count for the page table page
1197	 * (denoting a new mapping.)
1198	 */
1199	m->hold_count++;
1200
1201	/*
1202	 * Map the pagetable page into the process address space, if
1203	 * it isn't already there.
1204	 */
1205
1206	pmap->pm_stats.resident_count++;
1207
1208	ptepa = VM_PAGE_TO_PHYS(m);
1209	pmap->pm_pdir[ptepindex] =
1210		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1211
1212	vm_page_lock_queues();
1213	m->valid = VM_PAGE_BITS_ALL;
1214	vm_page_flag_clear(m, PG_ZERO);
1215	vm_page_wakeup(m);
1216	vm_page_unlock_queues();
1217	VM_OBJECT_UNLOCK(pmap->pm_pteobj);
1218
1219	return m;
1220}
1221
1222static vm_page_t
1223pmap_allocpte(pmap_t pmap, vm_offset_t va)
1224{
1225	unsigned ptepindex;
1226	pd_entry_t ptepa;
1227	vm_page_t m;
1228
1229	/*
1230	 * Calculate pagetable page index
1231	 */
1232	ptepindex = va >> PDRSHIFT;
1233
1234	/*
1235	 * Get the page directory entry
1236	 */
1237	ptepa = pmap->pm_pdir[ptepindex];
1238
1239	/*
1240	 * This supports switching from a 4MB page to a
1241	 * normal 4K page.
1242	 */
1243	if (ptepa & PG_PS) {
1244		pmap->pm_pdir[ptepindex] = 0;
1245		ptepa = 0;
1246		pmap_invalidate_all(kernel_pmap);
1247	}
1248
1249	/*
1250	 * If the page table page is mapped, we just increment the
1251	 * hold count, and activate it.
1252	 */
1253	if (ptepa) {
1254		m = PHYS_TO_VM_PAGE(ptepa);
1255		m->hold_count++;
1256		return m;
1257	}
1258	/*
1259	 * Here if the pte page isn't mapped, or if it has been deallocated.
1260	 */
1261	return _pmap_allocpte(pmap, ptepindex);
1262}
1263
1264
1265/***************************************************
1266* Pmap allocation/deallocation routines.
1267 ***************************************************/
1268
1269#ifdef SMP
1270/*
1271 * Deal with a SMP shootdown of other users of the pmap that we are
1272 * trying to dispose of.  This can be a bit hairy.
1273 */
1274static u_int *lazymask;
1275static u_int lazyptd;
1276static volatile u_int lazywait;
1277
1278void pmap_lazyfix_action(void);
1279
1280void
1281pmap_lazyfix_action(void)
1282{
1283	u_int mymask = PCPU_GET(cpumask);
1284
1285	if (rcr3() == lazyptd)
1286		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1287	atomic_clear_int(lazymask, mymask);
1288	atomic_store_rel_int(&lazywait, 1);
1289}
1290
1291static void
1292pmap_lazyfix_self(u_int mymask)
1293{
1294
1295	if (rcr3() == lazyptd)
1296		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1297	atomic_clear_int(lazymask, mymask);
1298}
1299
1300
1301static void
1302pmap_lazyfix(pmap_t pmap)
1303{
1304	u_int mymask = PCPU_GET(cpumask);
1305	u_int mask;
1306	register u_int spins;
1307
1308	while ((mask = pmap->pm_active) != 0) {
1309		spins = 50000000;
1310		mask = mask & -mask;	/* Find least significant set bit */
1311		mtx_lock_spin(&lazypmap_lock);
1312#ifdef PAE
1313		lazyptd = vtophys(pmap->pm_pdpt);
1314#else
1315		lazyptd = vtophys(pmap->pm_pdir);
1316#endif
1317		if (mask == mymask) {
1318			lazymask = &pmap->pm_active;
1319			pmap_lazyfix_self(mymask);
1320		} else {
1321			atomic_store_rel_int((u_int *)&lazymask,
1322			    (u_int)&pmap->pm_active);
1323			atomic_store_rel_int(&lazywait, 0);
1324			ipi_selected(mask, IPI_LAZYPMAP);
1325			while (lazywait == 0) {
1326				ia32_pause();
1327				if (--spins == 0)
1328					break;
1329			}
1330		}
1331		mtx_unlock_spin(&lazypmap_lock);
1332		if (spins == 0)
1333			printf("pmap_lazyfix: spun for 50000000\n");
1334	}
1335}
1336
1337#else	/* SMP */
1338
1339/*
1340 * Cleaning up on uniprocessor is easy.  For various reasons, we're
1341 * unlikely to have to even execute this code, including the fact
1342 * that the cleanup is deferred until the parent does a wait(2), which
1343 * means that another userland process has run.
1344 */
1345static void
1346pmap_lazyfix(pmap_t pmap)
1347{
1348	u_int cr3;
1349
1350	cr3 = vtophys(pmap->pm_pdir);
1351	if (cr3 == rcr3()) {
1352		load_cr3(PCPU_GET(curpcb)->pcb_cr3);
1353		pmap->pm_active &= ~(PCPU_GET(cpumask));
1354	}
1355}
1356#endif	/* SMP */
1357
1358/*
1359 * Release any resources held by the given physical map.
1360 * Called when a pmap initialized by pmap_pinit is being released.
1361 * Should only be called if the map contains no valid mappings.
1362 */
1363void
1364pmap_release(pmap_t pmap)
1365{
1366	vm_page_t m, ptdpg[NPGPTD];
1367	int i;
1368
1369	KASSERT(pmap->pm_stats.resident_count == 0,
1370	    ("pmap_release: pmap resident count %ld != 0",
1371	    pmap->pm_stats.resident_count));
1372
1373	pmap_lazyfix(pmap);
1374	mtx_lock_spin(&allpmaps_lock);
1375	LIST_REMOVE(pmap, pm_list);
1376	mtx_unlock_spin(&allpmaps_lock);
1377
1378	for (i = 0; i < NPGPTD; i++)
1379		ptdpg[i] = PHYS_TO_VM_PAGE(pmap->pm_pdir[PTDPTDI + i]);
1380
1381	bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
1382	    sizeof(*pmap->pm_pdir));
1383#ifdef SMP
1384	pmap->pm_pdir[MPPTDI] = 0;
1385#endif
1386
1387	pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
1388
1389	vm_page_lock_queues();
1390	for (i = 0; i < NPGPTD; i++) {
1391		m = ptdpg[i];
1392#ifdef PAE
1393		KASSERT(VM_PAGE_TO_PHYS(m) == (pmap->pm_pdpt[i] & PG_FRAME),
1394		    ("pmap_release: got wrong ptd page"));
1395#endif
1396		m->wire_count--;
1397		atomic_subtract_int(&cnt.v_wire_count, 1);
1398		vm_page_busy(m);
1399		vm_page_free_zero(m);
1400	}
1401	vm_page_unlock_queues();
1402}
1403
1404static int
1405kvm_size(SYSCTL_HANDLER_ARGS)
1406{
1407	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1408
1409	return sysctl_handle_long(oidp, &ksize, 0, req);
1410}
1411SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1412    0, 0, kvm_size, "IU", "Size of KVM");
1413
1414static int
1415kvm_free(SYSCTL_HANDLER_ARGS)
1416{
1417	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1418
1419	return sysctl_handle_long(oidp, &kfree, 0, req);
1420}
1421SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1422    0, 0, kvm_free, "IU", "Amount of KVM free");
1423
1424/*
1425 * grow the number of kernel page table entries, if needed
1426 */
1427void
1428pmap_growkernel(vm_offset_t addr)
1429{
1430	struct pmap *pmap;
1431	int s;
1432	vm_paddr_t ptppaddr;
1433	vm_page_t nkpg;
1434	pd_entry_t newpdir;
1435	pt_entry_t *pde;
1436
1437	s = splhigh();
1438	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1439	if (kernel_vm_end == 0) {
1440		kernel_vm_end = KERNBASE;
1441		nkpt = 0;
1442		while (pdir_pde(PTD, kernel_vm_end)) {
1443			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1444			nkpt++;
1445		}
1446	}
1447	addr = roundup2(addr, PAGE_SIZE * NPTEPG);
1448	while (kernel_vm_end < addr) {
1449		if (pdir_pde(PTD, kernel_vm_end)) {
1450			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1451			continue;
1452		}
1453
1454		/*
1455		 * This index is bogus, but out of the way
1456		 */
1457		nkpg = vm_page_alloc(NULL, nkpt,
1458		    VM_ALLOC_NOOBJ | VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1459		if (!nkpg)
1460			panic("pmap_growkernel: no memory to grow kernel");
1461
1462		nkpt++;
1463
1464		pmap_zero_page(nkpg);
1465		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1466		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1467		pdir_pde(PTD, kernel_vm_end) = newpdir;
1468
1469		mtx_lock_spin(&allpmaps_lock);
1470		LIST_FOREACH(pmap, &allpmaps, pm_list) {
1471			pde = pmap_pde(pmap, kernel_vm_end);
1472			pde_store(pde, newpdir);
1473		}
1474		mtx_unlock_spin(&allpmaps_lock);
1475		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1476	}
1477	splx(s);
1478}
1479
1480
1481/***************************************************
1482 * page management routines.
1483 ***************************************************/
1484
1485/*
1486 * free the pv_entry back to the free list
1487 */
1488static PMAP_INLINE void
1489free_pv_entry(pv_entry_t pv)
1490{
1491	pv_entry_count--;
1492	uma_zfree(pvzone, pv);
1493}
1494
1495/*
1496 * get a new pv_entry, allocating a block from the system
1497 * when needed.
1498 * the memory allocation is performed bypassing the malloc code
1499 * because of the possibility of allocations at interrupt time.
1500 */
1501static pv_entry_t
1502get_pv_entry(void)
1503{
1504	pv_entry_count++;
1505	if (pv_entry_high_water &&
1506		(pv_entry_count > pv_entry_high_water) &&
1507		(pmap_pagedaemon_waken == 0)) {
1508		pmap_pagedaemon_waken = 1;
1509		wakeup (&vm_pages_needed);
1510	}
1511	return uma_zalloc(pvzone, M_NOWAIT);
1512}
1513
1514/*
1515 * If it is the first entry on the list, it is actually
1516 * in the header and we must copy the following entry up
1517 * to the header.  Otherwise we must search the list for
1518 * the entry.  In either case we free the now unused entry.
1519 */
1520
1521static int
1522pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1523{
1524	pv_entry_t pv;
1525	int rtval;
1526	int s;
1527
1528	s = splvm();
1529	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1530	if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1531		TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1532			if (pmap == pv->pv_pmap && va == pv->pv_va)
1533				break;
1534		}
1535	} else {
1536		TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1537			if (va == pv->pv_va)
1538				break;
1539		}
1540	}
1541
1542	rtval = 0;
1543	if (pv) {
1544		rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
1545		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1546		m->md.pv_list_count--;
1547		if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1548			vm_page_flag_clear(m, PG_WRITEABLE);
1549
1550		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1551		free_pv_entry(pv);
1552	}
1553
1554	splx(s);
1555	return rtval;
1556}
1557
1558/*
1559 * Create a pv entry for page at pa for
1560 * (pmap, va).
1561 */
1562static void
1563pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
1564{
1565
1566	int s;
1567	pv_entry_t pv;
1568
1569	s = splvm();
1570	pv = get_pv_entry();
1571	pv->pv_va = va;
1572	pv->pv_pmap = pmap;
1573	pv->pv_ptem = mpte;
1574
1575	vm_page_lock_queues();
1576	TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1577	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1578	m->md.pv_list_count++;
1579
1580	vm_page_unlock_queues();
1581	splx(s);
1582}
1583
1584/*
1585 * pmap_remove_pte: do the things to unmap a page in a process
1586 */
1587static int
1588pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
1589{
1590	pt_entry_t oldpte;
1591	vm_page_t m, mpte;
1592
1593	oldpte = pte_load_clear(ptq);
1594	if (oldpte & PG_W)
1595		pmap->pm_stats.wired_count -= 1;
1596	/*
1597	 * Machines that don't support invlpg, also don't support
1598	 * PG_G.
1599	 */
1600	if (oldpte & PG_G)
1601		pmap_invalidate_page(kernel_pmap, va);
1602	pmap->pm_stats.resident_count -= 1;
1603	if (oldpte & PG_MANAGED) {
1604		m = PHYS_TO_VM_PAGE(oldpte);
1605		if (oldpte & PG_M) {
1606#if defined(PMAP_DIAGNOSTIC)
1607			if (pmap_nw_modified((pt_entry_t) oldpte)) {
1608				printf(
1609	"pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1610				    va, oldpte);
1611			}
1612#endif
1613			if (pmap_track_modified(va))
1614				vm_page_dirty(m);
1615		}
1616		if (oldpte & PG_A)
1617			vm_page_flag_set(m, PG_REFERENCED);
1618		return pmap_remove_entry(pmap, m, va);
1619	} else {
1620		mpte = PHYS_TO_VM_PAGE(*pmap_pde(pmap, va));
1621		return pmap_unuse_pt(pmap, va, mpte);
1622	}
1623}
1624
1625/*
1626 * Remove a single page from a process address space
1627 */
1628static void
1629pmap_remove_page(pmap_t pmap, vm_offset_t va)
1630{
1631	pt_entry_t *pte;
1632
1633	if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
1634		return;
1635	pmap_remove_pte(pmap, pte, va);
1636	pmap_invalidate_page(pmap, va);
1637}
1638
1639/*
1640 *	Remove the given range of addresses from the specified map.
1641 *
1642 *	It is assumed that the start and end are properly
1643 *	rounded to the page size.
1644 */
1645void
1646pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1647{
1648	vm_offset_t pdnxt;
1649	pd_entry_t ptpaddr;
1650	pt_entry_t *pte;
1651	int anyvalid;
1652
1653	if (pmap == NULL)
1654		return;
1655
1656	if (pmap->pm_stats.resident_count == 0)
1657		return;
1658
1659	/*
1660	 * special handling of removing one page.  a very
1661	 * common operation and easy to short circuit some
1662	 * code.
1663	 */
1664	if ((sva + PAGE_SIZE == eva) &&
1665	    ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
1666		pmap_remove_page(pmap, sva);
1667		return;
1668	}
1669
1670	anyvalid = 0;
1671
1672	for (; sva < eva; sva = pdnxt) {
1673		unsigned pdirindex;
1674
1675		/*
1676		 * Calculate index for next page table.
1677		 */
1678		pdnxt = (sva + NBPDR) & ~PDRMASK;
1679		if (pmap->pm_stats.resident_count == 0)
1680			break;
1681
1682		pdirindex = sva >> PDRSHIFT;
1683		ptpaddr = pmap->pm_pdir[pdirindex];
1684
1685		/*
1686		 * Weed out invalid mappings. Note: we assume that the page
1687		 * directory table is always allocated, and in kernel virtual.
1688		 */
1689		if (ptpaddr == 0)
1690			continue;
1691
1692		/*
1693		 * Check for large page.
1694		 */
1695		if ((ptpaddr & PG_PS) != 0) {
1696			pmap->pm_pdir[pdirindex] = 0;
1697			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1698			anyvalid = 1;
1699			continue;
1700		}
1701
1702		/*
1703		 * Limit our scan to either the end of the va represented
1704		 * by the current page table page, or to the end of the
1705		 * range being removed.
1706		 */
1707		if (pdnxt > eva)
1708			pdnxt = eva;
1709
1710		for (; sva != pdnxt; sva += PAGE_SIZE) {
1711			if ((pte = pmap_pte_quick(pmap, sva)) == NULL ||
1712			    *pte == 0)
1713				continue;
1714			anyvalid = 1;
1715			if (pmap_remove_pte(pmap, pte, sva))
1716				break;
1717		}
1718	}
1719
1720	if (anyvalid)
1721		pmap_invalidate_all(pmap);
1722}
1723
1724/*
1725 *	Routine:	pmap_remove_all
1726 *	Function:
1727 *		Removes this physical page from
1728 *		all physical maps in which it resides.
1729 *		Reflects back modify bits to the pager.
1730 *
1731 *	Notes:
1732 *		Original versions of this routine were very
1733 *		inefficient because they iteratively called
1734 *		pmap_remove (slow...)
1735 */
1736
1737void
1738pmap_remove_all(vm_page_t m)
1739{
1740	register pv_entry_t pv;
1741	pt_entry_t *pte, tpte;
1742	int s;
1743
1744#if defined(PMAP_DIAGNOSTIC)
1745	/*
1746	 * XXX This makes pmap_remove_all() illegal for non-managed pages!
1747	 */
1748	if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
1749		panic("pmap_remove_all: illegal for unmanaged page, va: 0x%x",
1750		    VM_PAGE_TO_PHYS(m));
1751	}
1752#endif
1753	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1754	s = splvm();
1755	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1756		pv->pv_pmap->pm_stats.resident_count--;
1757		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
1758		tpte = pte_load_clear(pte);
1759		if (tpte & PG_W)
1760			pv->pv_pmap->pm_stats.wired_count--;
1761		if (tpte & PG_A)
1762			vm_page_flag_set(m, PG_REFERENCED);
1763
1764		/*
1765		 * Update the vm_page_t clean and reference bits.
1766		 */
1767		if (tpte & PG_M) {
1768#if defined(PMAP_DIAGNOSTIC)
1769			if (pmap_nw_modified((pt_entry_t) tpte)) {
1770				printf(
1771	"pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1772				    pv->pv_va, tpte);
1773			}
1774#endif
1775			if (pmap_track_modified(pv->pv_va))
1776				vm_page_dirty(m);
1777		}
1778		pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1779		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1780		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1781		m->md.pv_list_count--;
1782		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1783		free_pv_entry(pv);
1784	}
1785	vm_page_flag_clear(m, PG_WRITEABLE);
1786	splx(s);
1787}
1788
1789/*
1790 *	Set the physical protection on the
1791 *	specified range of this map as requested.
1792 */
1793void
1794pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1795{
1796	vm_offset_t pdnxt;
1797	pd_entry_t ptpaddr;
1798	int anychanged;
1799
1800	if (pmap == NULL)
1801		return;
1802
1803	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1804		pmap_remove(pmap, sva, eva);
1805		return;
1806	}
1807
1808	if (prot & VM_PROT_WRITE)
1809		return;
1810
1811	anychanged = 0;
1812
1813	for (; sva < eva; sva = pdnxt) {
1814		unsigned pdirindex;
1815
1816		pdnxt = (sva + NBPDR) & ~PDRMASK;
1817
1818		pdirindex = sva >> PDRSHIFT;
1819		ptpaddr = pmap->pm_pdir[pdirindex];
1820
1821		/*
1822		 * Weed out invalid mappings. Note: we assume that the page
1823		 * directory table is always allocated, and in kernel virtual.
1824		 */
1825		if (ptpaddr == 0)
1826			continue;
1827
1828		/*
1829		 * Check for large page.
1830		 */
1831		if ((ptpaddr & PG_PS) != 0) {
1832			pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
1833			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1834			anychanged = 1;
1835			continue;
1836		}
1837
1838		if (pdnxt > eva)
1839			pdnxt = eva;
1840
1841		for (; sva != pdnxt; sva += PAGE_SIZE) {
1842			pt_entry_t pbits;
1843			pt_entry_t *pte;
1844			vm_page_t m;
1845
1846			if ((pte = pmap_pte_quick(pmap, sva)) == NULL)
1847				continue;
1848			pbits = *pte;
1849			if (pbits & PG_MANAGED) {
1850				m = NULL;
1851				if (pbits & PG_A) {
1852					m = PHYS_TO_VM_PAGE(pbits);
1853					vm_page_flag_set(m, PG_REFERENCED);
1854					pbits &= ~PG_A;
1855				}
1856				if ((pbits & PG_M) != 0 &&
1857				    pmap_track_modified(sva)) {
1858					if (m == NULL)
1859						m = PHYS_TO_VM_PAGE(pbits);
1860					vm_page_dirty(m);
1861					pbits &= ~PG_M;
1862				}
1863			}
1864
1865			pbits &= ~PG_RW;
1866
1867			if (pbits != *pte) {
1868				pte_store(pte, pbits);
1869				anychanged = 1;
1870			}
1871		}
1872	}
1873	if (anychanged)
1874		pmap_invalidate_all(pmap);
1875}
1876
1877/*
1878 *	Insert the given physical page (p) at
1879 *	the specified virtual address (v) in the
1880 *	target physical map with the protection requested.
1881 *
1882 *	If specified, the page will be wired down, meaning
1883 *	that the related pte can not be reclaimed.
1884 *
1885 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1886 *	or lose information.  That is, this routine must actually
1887 *	insert this page into the given map NOW.
1888 */
1889void
1890pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1891	   boolean_t wired)
1892{
1893	vm_paddr_t pa;
1894	register pt_entry_t *pte;
1895	vm_paddr_t opa;
1896	pt_entry_t origpte, newpte;
1897	vm_page_t mpte;
1898
1899	if (pmap == NULL)
1900		return;
1901
1902	va &= PG_FRAME;
1903#ifdef PMAP_DIAGNOSTIC
1904	if (va > VM_MAX_KERNEL_ADDRESS)
1905		panic("pmap_enter: toobig");
1906	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
1907		panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
1908#endif
1909
1910	mpte = NULL;
1911	/*
1912	 * In the case that a page table page is not
1913	 * resident, we are creating it here.
1914	 */
1915	if (va < VM_MAXUSER_ADDRESS) {
1916		mpte = pmap_allocpte(pmap, va);
1917	}
1918#if 0 && defined(PMAP_DIAGNOSTIC)
1919	else {
1920		pd_entry_t *pdeaddr = pmap_pde(pmap, va);
1921		origpte = *pdeaddr;
1922		if ((origpte & PG_V) == 0) {
1923			panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n",
1924				pmap->pm_pdir[PTDPTDI], origpte, va);
1925		}
1926	}
1927#endif
1928
1929	pte = pmap_pte_quick(pmap, va);
1930
1931	/*
1932	 * Page Directory table entry not valid, we need a new PT page
1933	 */
1934	if (pte == NULL) {
1935		panic("pmap_enter: invalid page directory pdir=%#jx, va=%#x\n",
1936			(uintmax_t)pmap->pm_pdir[PTDPTDI], va);
1937	}
1938
1939	pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
1940	origpte = *pte;
1941	opa = origpte & PG_FRAME;
1942
1943	if (origpte & PG_PS)
1944		panic("pmap_enter: attempted pmap_enter on 4MB page");
1945
1946	/*
1947	 * Mapping has not changed, must be protection or wiring change.
1948	 */
1949	if (origpte && (opa == pa)) {
1950		/*
1951		 * Wiring change, just update stats. We don't worry about
1952		 * wiring PT pages as they remain resident as long as there
1953		 * are valid mappings in them. Hence, if a user page is wired,
1954		 * the PT page will be also.
1955		 */
1956		if (wired && ((origpte & PG_W) == 0))
1957			pmap->pm_stats.wired_count++;
1958		else if (!wired && (origpte & PG_W))
1959			pmap->pm_stats.wired_count--;
1960
1961#if defined(PMAP_DIAGNOSTIC)
1962		if (pmap_nw_modified((pt_entry_t) origpte)) {
1963			printf(
1964	"pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
1965			    va, origpte);
1966		}
1967#endif
1968
1969		/*
1970		 * Remove extra pte reference
1971		 */
1972		if (mpte)
1973			mpte->hold_count--;
1974
1975		if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
1976			if ((origpte & PG_RW) == 0) {
1977				pte_store(pte, origpte | PG_RW);
1978				pmap_invalidate_page(pmap, va);
1979			}
1980			return;
1981		}
1982
1983		/*
1984		 * We might be turning off write access to the page,
1985		 * so we go ahead and sense modify status.
1986		 */
1987		if (origpte & PG_MANAGED) {
1988			if ((origpte & PG_M) && pmap_track_modified(va)) {
1989				vm_page_t om;
1990				om = PHYS_TO_VM_PAGE(opa);
1991				vm_page_dirty(om);
1992			}
1993			pa |= PG_MANAGED;
1994		}
1995		goto validate;
1996	}
1997	/*
1998	 * Mapping has changed, invalidate old range and fall through to
1999	 * handle validating new mapping.
2000	 */
2001	if (opa) {
2002		int err;
2003		vm_page_lock_queues();
2004		err = pmap_remove_pte(pmap, pte, va);
2005		vm_page_unlock_queues();
2006		if (err)
2007			panic("pmap_enter: pte vanished, va: 0x%x", va);
2008	}
2009
2010	/*
2011	 * Enter on the PV list if part of our managed memory. Note that we
2012	 * raise IPL while manipulating pv_table since pmap_enter can be
2013	 * called at interrupt time.
2014	 */
2015	if (pmap_initialized &&
2016	    (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2017		pmap_insert_entry(pmap, va, mpte, m);
2018		pa |= PG_MANAGED;
2019	}
2020
2021	/*
2022	 * Increment counters
2023	 */
2024	pmap->pm_stats.resident_count++;
2025	if (wired)
2026		pmap->pm_stats.wired_count++;
2027
2028validate:
2029	/*
2030	 * Now validate mapping with desired protection/wiring.
2031	 */
2032	newpte = (pt_entry_t)(pa | pte_prot(pmap, prot) | PG_V);
2033
2034	if (wired)
2035		newpte |= PG_W;
2036	if (va < VM_MAXUSER_ADDRESS)
2037		newpte |= PG_U;
2038	if (pmap == kernel_pmap)
2039		newpte |= pgeflag;
2040
2041	/*
2042	 * if the mapping or permission bits are different, we need
2043	 * to update the pte.
2044	 */
2045	if ((origpte & ~(PG_M|PG_A)) != newpte) {
2046		pte_store(pte, newpte | PG_A);
2047		/*if (origpte)*/ {
2048			pmap_invalidate_page(pmap, va);
2049		}
2050	}
2051}
2052
2053/*
2054 * this code makes some *MAJOR* assumptions:
2055 * 1. Current pmap & pmap exists.
2056 * 2. Not wired.
2057 * 3. Read access.
2058 * 4. No page table pages.
2059 * 5. Tlbflush is deferred to calling procedure.
2060 * 6. Page IS managed.
2061 * but is *MUCH* faster than pmap_enter...
2062 */
2063
2064vm_page_t
2065pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
2066{
2067	pt_entry_t *pte;
2068	vm_paddr_t pa;
2069
2070	/*
2071	 * In the case that a page table page is not
2072	 * resident, we are creating it here.
2073	 */
2074	if (va < VM_MAXUSER_ADDRESS) {
2075		unsigned ptepindex;
2076		pd_entry_t ptepa;
2077
2078		/*
2079		 * Calculate pagetable page index
2080		 */
2081		ptepindex = va >> PDRSHIFT;
2082		if (mpte && (mpte->pindex == ptepindex)) {
2083			mpte->hold_count++;
2084		} else {
2085			/*
2086			 * Get the page directory entry
2087			 */
2088			ptepa = pmap->pm_pdir[ptepindex];
2089
2090			/*
2091			 * If the page table page is mapped, we just increment
2092			 * the hold count, and activate it.
2093			 */
2094			if (ptepa) {
2095				if (ptepa & PG_PS)
2096					panic("pmap_enter_quick: unexpected mapping into 4MB page");
2097				mpte = PHYS_TO_VM_PAGE(ptepa);
2098				mpte->hold_count++;
2099			} else {
2100				mpte = _pmap_allocpte(pmap, ptepindex);
2101			}
2102		}
2103	} else {
2104		mpte = NULL;
2105	}
2106
2107	/*
2108	 * This call to vtopte makes the assumption that we are
2109	 * entering the page into the current pmap.  In order to support
2110	 * quick entry into any pmap, one would likely use pmap_pte_quick.
2111	 * But that isn't as quick as vtopte.
2112	 */
2113	pte = vtopte(va);
2114	if (*pte) {
2115		if (mpte != NULL) {
2116			vm_page_lock_queues();
2117			pmap_unwire_pte_hold(pmap, mpte);
2118			vm_page_unlock_queues();
2119		}
2120		return 0;
2121	}
2122
2123	/*
2124	 * Enter on the PV list if part of our managed memory. Note that we
2125	 * raise IPL while manipulating pv_table since pmap_enter can be
2126	 * called at interrupt time.
2127	 */
2128	if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
2129		pmap_insert_entry(pmap, va, mpte, m);
2130
2131	/*
2132	 * Increment counters
2133	 */
2134	pmap->pm_stats.resident_count++;
2135
2136	pa = VM_PAGE_TO_PHYS(m);
2137
2138	/*
2139	 * Now validate mapping with RO protection
2140	 */
2141	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2142		pte_store(pte, pa | PG_V | PG_U);
2143	else
2144		pte_store(pte, pa | PG_V | PG_U | PG_MANAGED);
2145
2146	return mpte;
2147}
2148
2149/*
2150 * Make a temporary mapping for a physical address.  This is only intended
2151 * to be used for panic dumps.
2152 */
2153void *
2154pmap_kenter_temporary(vm_offset_t pa, int i)
2155{
2156	vm_offset_t va;
2157
2158	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2159	pmap_kenter(va, pa);
2160#ifndef I386_CPU
2161	invlpg(va);
2162#else
2163	invltlb();
2164#endif
2165	return ((void *)crashdumpmap);
2166}
2167
2168/*
2169 * This code maps large physical mmap regions into the
2170 * processor address space.  Note that some shortcuts
2171 * are taken, but the code works.
2172 */
2173void
2174pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2175		    vm_object_t object, vm_pindex_t pindex,
2176		    vm_size_t size)
2177{
2178	vm_page_t p;
2179
2180	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
2181	KASSERT(object->type == OBJT_DEVICE,
2182	    ("pmap_object_init_pt: non-device object"));
2183	if (pseflag &&
2184	    ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
2185		int i;
2186		vm_page_t m[1];
2187		unsigned int ptepindex;
2188		int npdes;
2189		pd_entry_t ptepa;
2190
2191		if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
2192			return;
2193retry:
2194		p = vm_page_lookup(object, pindex);
2195		if (p != NULL) {
2196			vm_page_lock_queues();
2197			if (vm_page_sleep_if_busy(p, FALSE, "init4p"))
2198				goto retry;
2199		} else {
2200			p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2201			if (p == NULL)
2202				return;
2203			m[0] = p;
2204
2205			if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2206				vm_page_lock_queues();
2207				vm_page_free(p);
2208				vm_page_unlock_queues();
2209				return;
2210			}
2211
2212			p = vm_page_lookup(object, pindex);
2213			vm_page_lock_queues();
2214			vm_page_wakeup(p);
2215		}
2216		vm_page_unlock_queues();
2217
2218		ptepa = VM_PAGE_TO_PHYS(p);
2219		if (ptepa & (NBPDR - 1))
2220			return;
2221
2222		p->valid = VM_PAGE_BITS_ALL;
2223
2224		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2225		npdes = size >> PDRSHIFT;
2226		for(i = 0; i < npdes; i++) {
2227			pde_store(&pmap->pm_pdir[ptepindex],
2228			    ptepa | PG_U | PG_RW | PG_V | PG_PS);
2229			ptepa += NBPDR;
2230			ptepindex += 1;
2231		}
2232		pmap_invalidate_all(pmap);
2233	}
2234}
2235
2236/*
2237 * pmap_prefault provides a quick way of clustering
2238 * pagefaults into a processes address space.  It is a "cousin"
2239 * of pmap_object_init_pt, except it runs at page fault time instead
2240 * of mmap time.
2241 */
2242#define PFBAK 4
2243#define PFFOR 4
2244#define PAGEORDER_SIZE (PFBAK+PFFOR)
2245
2246static int pmap_prefault_pageorder[] = {
2247	-1 * PAGE_SIZE, 1 * PAGE_SIZE,
2248	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
2249	-3 * PAGE_SIZE, 3 * PAGE_SIZE,
2250	-4 * PAGE_SIZE, 4 * PAGE_SIZE
2251};
2252
2253void
2254pmap_prefault(pmap, addra, entry)
2255	pmap_t pmap;
2256	vm_offset_t addra;
2257	vm_map_entry_t entry;
2258{
2259	int i;
2260	vm_offset_t starta;
2261	vm_offset_t addr;
2262	vm_pindex_t pindex;
2263	vm_page_t m, mpte;
2264	vm_object_t object;
2265
2266	if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
2267		return;
2268
2269	object = entry->object.vm_object;
2270
2271	starta = addra - PFBAK * PAGE_SIZE;
2272	if (starta < entry->start) {
2273		starta = entry->start;
2274	} else if (starta > addra) {
2275		starta = 0;
2276	}
2277
2278	mpte = NULL;
2279	for (i = 0; i < PAGEORDER_SIZE; i++) {
2280		vm_object_t backing_object, lobject;
2281		pt_entry_t *pte;
2282
2283		addr = addra + pmap_prefault_pageorder[i];
2284		if (addr > addra + (PFFOR * PAGE_SIZE))
2285			addr = 0;
2286
2287		if (addr < starta || addr >= entry->end)
2288			continue;
2289
2290		if ((*pmap_pde(pmap, addr)) == 0)
2291			continue;
2292
2293		pte = vtopte(addr);
2294		if (*pte)
2295			continue;
2296
2297		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2298		lobject = object;
2299		VM_OBJECT_LOCK(lobject);
2300		while ((m = vm_page_lookup(lobject, pindex)) == NULL &&
2301		    lobject->type == OBJT_DEFAULT &&
2302		    (backing_object = lobject->backing_object) != NULL) {
2303			if (lobject->backing_object_offset & PAGE_MASK)
2304				break;
2305			pindex += lobject->backing_object_offset >> PAGE_SHIFT;
2306			VM_OBJECT_LOCK(backing_object);
2307			VM_OBJECT_UNLOCK(lobject);
2308			lobject = backing_object;
2309		}
2310		VM_OBJECT_UNLOCK(lobject);
2311		/*
2312		 * give-up when a page is not in memory
2313		 */
2314		if (m == NULL)
2315			break;
2316		vm_page_lock_queues();
2317		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2318			(m->busy == 0) &&
2319		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2320
2321			if ((m->queue - m->pc) == PQ_CACHE) {
2322				vm_page_deactivate(m);
2323			}
2324			vm_page_busy(m);
2325			vm_page_unlock_queues();
2326			mpte = pmap_enter_quick(pmap, addr, m, mpte);
2327			vm_page_lock_queues();
2328			vm_page_wakeup(m);
2329		}
2330		vm_page_unlock_queues();
2331	}
2332}
2333
2334/*
2335 *	Routine:	pmap_change_wiring
2336 *	Function:	Change the wiring attribute for a map/virtual-address
2337 *			pair.
2338 *	In/out conditions:
2339 *			The mapping must already exist in the pmap.
2340 */
2341void
2342pmap_change_wiring(pmap, va, wired)
2343	register pmap_t pmap;
2344	vm_offset_t va;
2345	boolean_t wired;
2346{
2347	register pt_entry_t *pte;
2348
2349	if (pmap == NULL)
2350		return;
2351
2352	pte = pmap_pte_quick(pmap, va);
2353
2354	if (wired && !pmap_pte_w(pte))
2355		pmap->pm_stats.wired_count++;
2356	else if (!wired && pmap_pte_w(pte))
2357		pmap->pm_stats.wired_count--;
2358
2359	/*
2360	 * Wiring is not a hardware characteristic so there is no need to
2361	 * invalidate TLB.
2362	 */
2363	pmap_pte_set_w(pte, wired);
2364}
2365
2366
2367
2368/*
2369 *	Copy the range specified by src_addr/len
2370 *	from the source map to the range dst_addr/len
2371 *	in the destination map.
2372 *
2373 *	This routine is only advisory and need not do anything.
2374 */
2375
2376void
2377pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2378	  vm_offset_t src_addr)
2379{
2380	vm_offset_t addr;
2381	vm_offset_t end_addr = src_addr + len;
2382	vm_offset_t pdnxt;
2383	vm_page_t m;
2384
2385	if (dst_addr != src_addr)
2386		return;
2387
2388	if (!pmap_is_current(src_pmap))
2389		return;
2390
2391	for (addr = src_addr; addr < end_addr; addr = pdnxt) {
2392		pt_entry_t *src_pte, *dst_pte;
2393		vm_page_t dstmpte, srcmpte;
2394		pd_entry_t srcptepaddr;
2395		unsigned ptepindex;
2396
2397		if (addr >= UPT_MIN_ADDRESS)
2398			panic("pmap_copy: invalid to pmap_copy page tables\n");
2399
2400		/*
2401		 * Don't let optional prefaulting of pages make us go
2402		 * way below the low water mark of free pages or way
2403		 * above high water mark of used pv entries.
2404		 */
2405		if (cnt.v_free_count < cnt.v_free_reserved ||
2406		    pv_entry_count > pv_entry_high_water)
2407			break;
2408
2409		pdnxt = (addr + NBPDR) & ~PDRMASK;
2410		ptepindex = addr >> PDRSHIFT;
2411
2412		srcptepaddr = src_pmap->pm_pdir[ptepindex];
2413		if (srcptepaddr == 0)
2414			continue;
2415
2416		if (srcptepaddr & PG_PS) {
2417			if (dst_pmap->pm_pdir[ptepindex] == 0) {
2418				dst_pmap->pm_pdir[ptepindex] = srcptepaddr;
2419				dst_pmap->pm_stats.resident_count +=
2420				    NBPDR / PAGE_SIZE;
2421			}
2422			continue;
2423		}
2424
2425		srcmpte = PHYS_TO_VM_PAGE(srcptepaddr);
2426		if (srcmpte->hold_count == 0 || (srcmpte->flags & PG_BUSY))
2427			continue;
2428
2429		if (pdnxt > end_addr)
2430			pdnxt = end_addr;
2431
2432		src_pte = vtopte(addr);
2433		while (addr < pdnxt) {
2434			pt_entry_t ptetemp;
2435			ptetemp = *src_pte;
2436			/*
2437			 * we only virtual copy managed pages
2438			 */
2439			if ((ptetemp & PG_MANAGED) != 0) {
2440				/*
2441				 * We have to check after allocpte for the
2442				 * pte still being around...  allocpte can
2443				 * block.
2444				 */
2445				dstmpte = pmap_allocpte(dst_pmap, addr);
2446				dst_pte = pmap_pte_quick(dst_pmap, addr);
2447				if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
2448					/*
2449					 * Clear the modified and
2450					 * accessed (referenced) bits
2451					 * during the copy.
2452					 */
2453					m = PHYS_TO_VM_PAGE(ptetemp);
2454					*dst_pte = ptetemp & ~(PG_M | PG_A);
2455					dst_pmap->pm_stats.resident_count++;
2456					pmap_insert_entry(dst_pmap, addr,
2457						dstmpte, m);
2458	 			} else {
2459					vm_page_lock_queues();
2460					pmap_unwire_pte_hold(dst_pmap, dstmpte);
2461					vm_page_unlock_queues();
2462				}
2463				if (dstmpte->hold_count >= srcmpte->hold_count)
2464					break;
2465			}
2466			addr += PAGE_SIZE;
2467			src_pte++;
2468		}
2469	}
2470}
2471
2472#ifdef SMP
2473
2474/*
2475 *	pmap_zpi_switchin*()
2476 *
2477 *	These functions allow us to avoid doing IPIs alltogether in certain
2478 *	temporary page-mapping situations (page zeroing).  Instead to deal
2479 *	with being preempted and moved onto a different cpu we invalidate
2480 *	the page when the scheduler switches us in.  This does not occur
2481 *	very often so we remain relatively optimal with very little effort.
2482 */
2483static void
2484pmap_zpi_switchin12(void)
2485{
2486	invlpg((u_int)CADDR1);
2487	invlpg((u_int)CADDR2);
2488}
2489
2490static void
2491pmap_zpi_switchin2(void)
2492{
2493	invlpg((u_int)CADDR2);
2494}
2495
2496static void
2497pmap_zpi_switchin3(void)
2498{
2499	invlpg((u_int)CADDR3);
2500}
2501
2502#endif
2503
2504/*
2505 *	pmap_zero_page zeros the specified hardware page by mapping
2506 *	the page into KVM and using bzero to clear its contents.
2507 */
2508void
2509pmap_zero_page(vm_page_t m)
2510{
2511
2512	mtx_lock(&CMAPCADDR12_lock);
2513	if (*CMAP2)
2514		panic("pmap_zero_page: CMAP2 busy");
2515	*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
2516#ifdef I386_CPU
2517	invltlb();
2518#else
2519#ifdef SMP
2520	curthread->td_switchin = pmap_zpi_switchin2;
2521#endif
2522	invlpg((u_int)CADDR2);
2523#endif
2524#if defined(I686_CPU)
2525	if (cpu_class == CPUCLASS_686)
2526		i686_pagezero(CADDR2);
2527	else
2528#endif
2529		bzero(CADDR2, PAGE_SIZE);
2530#ifdef SMP
2531	curthread->td_switchin = NULL;
2532#endif
2533	*CMAP2 = 0;
2534	mtx_unlock(&CMAPCADDR12_lock);
2535}
2536
2537/*
2538 *	pmap_zero_page_area zeros the specified hardware page by mapping
2539 *	the page into KVM and using bzero to clear its contents.
2540 *
2541 *	off and size may not cover an area beyond a single hardware page.
2542 */
2543void
2544pmap_zero_page_area(vm_page_t m, int off, int size)
2545{
2546
2547	mtx_lock(&CMAPCADDR12_lock);
2548	if (*CMAP2)
2549		panic("pmap_zero_page: CMAP2 busy");
2550	*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
2551#ifdef I386_CPU
2552	invltlb();
2553#else
2554#ifdef SMP
2555	curthread->td_switchin = pmap_zpi_switchin2;
2556#endif
2557	invlpg((u_int)CADDR2);
2558#endif
2559#if defined(I686_CPU)
2560	if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
2561		i686_pagezero(CADDR2);
2562	else
2563#endif
2564		bzero((char *)CADDR2 + off, size);
2565#ifdef SMP
2566	curthread->td_switchin = NULL;
2567#endif
2568	*CMAP2 = 0;
2569	mtx_unlock(&CMAPCADDR12_lock);
2570}
2571
2572/*
2573 *	pmap_zero_page_idle zeros the specified hardware page by mapping
2574 *	the page into KVM and using bzero to clear its contents.  This
2575 *	is intended to be called from the vm_pagezero process only and
2576 *	outside of Giant.
2577 */
2578void
2579pmap_zero_page_idle(vm_page_t m)
2580{
2581
2582	if (*CMAP3)
2583		panic("pmap_zero_page: CMAP3 busy");
2584	*CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
2585#ifdef I386_CPU
2586	invltlb();
2587#else
2588#ifdef SMP
2589	curthread->td_switchin = pmap_zpi_switchin3;
2590#endif
2591	invlpg((u_int)CADDR3);
2592#endif
2593#if defined(I686_CPU)
2594	if (cpu_class == CPUCLASS_686)
2595		i686_pagezero(CADDR3);
2596	else
2597#endif
2598		bzero(CADDR3, PAGE_SIZE);
2599#ifdef SMP
2600	curthread->td_switchin = NULL;
2601#endif
2602	*CMAP3 = 0;
2603}
2604
2605/*
2606 *	pmap_copy_page copies the specified (machine independent)
2607 *	page by mapping the page into virtual memory and using
2608 *	bcopy to copy the page, one machine dependent page at a
2609 *	time.
2610 */
2611void
2612pmap_copy_page(vm_page_t src, vm_page_t dst)
2613{
2614
2615	mtx_lock(&CMAPCADDR12_lock);
2616	if (*CMAP1)
2617		panic("pmap_copy_page: CMAP1 busy");
2618	if (*CMAP2)
2619		panic("pmap_copy_page: CMAP2 busy");
2620	*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
2621	*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
2622#ifdef I386_CPU
2623	invltlb();
2624#else
2625#ifdef SMP
2626	curthread->td_switchin = pmap_zpi_switchin12;
2627#endif
2628	invlpg((u_int)CADDR1);
2629	invlpg((u_int)CADDR2);
2630#endif
2631	bcopy(CADDR1, CADDR2, PAGE_SIZE);
2632#ifdef SMP
2633	curthread->td_switchin = NULL;
2634#endif
2635	*CMAP1 = 0;
2636	*CMAP2 = 0;
2637	mtx_unlock(&CMAPCADDR12_lock);
2638}
2639
2640/*
2641 * Returns true if the pmap's pv is one of the first
2642 * 16 pvs linked to from this page.  This count may
2643 * be changed upwards or downwards in the future; it
2644 * is only necessary that true be returned for a small
2645 * subset of pmaps for proper page aging.
2646 */
2647boolean_t
2648pmap_page_exists_quick(pmap, m)
2649	pmap_t pmap;
2650	vm_page_t m;
2651{
2652	pv_entry_t pv;
2653	int loops = 0;
2654	int s;
2655
2656	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2657		return FALSE;
2658
2659	s = splvm();
2660	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2661	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2662		if (pv->pv_pmap == pmap) {
2663			splx(s);
2664			return TRUE;
2665		}
2666		loops++;
2667		if (loops >= 16)
2668			break;
2669	}
2670	splx(s);
2671	return (FALSE);
2672}
2673
2674#define PMAP_REMOVE_PAGES_CURPROC_ONLY
2675/*
2676 * Remove all pages from specified address space
2677 * this aids process exit speeds.  Also, this code
2678 * is special cased for current process only, but
2679 * can have the more generic (and slightly slower)
2680 * mode enabled.  This is much faster than pmap_remove
2681 * in the case of running down an entire address space.
2682 */
2683void
2684pmap_remove_pages(pmap, sva, eva)
2685	pmap_t pmap;
2686	vm_offset_t sva, eva;
2687{
2688	pt_entry_t *pte, tpte;
2689	vm_page_t m;
2690	pv_entry_t pv, npv;
2691	int s;
2692
2693#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2694	if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) {
2695		printf("warning: pmap_remove_pages called with non-current pmap\n");
2696		return;
2697	}
2698#endif
2699	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2700	s = splvm();
2701	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2702
2703		if (pv->pv_va >= eva || pv->pv_va < sva) {
2704			npv = TAILQ_NEXT(pv, pv_plist);
2705			continue;
2706		}
2707
2708#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2709		pte = vtopte(pv->pv_va);
2710#else
2711		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2712#endif
2713		tpte = *pte;
2714
2715		if (tpte == 0) {
2716			printf("TPTE at %p  IS ZERO @ VA %08x\n",
2717							pte, pv->pv_va);
2718			panic("bad pte");
2719		}
2720
2721/*
2722 * We cannot remove wired pages from a process' mapping at this time
2723 */
2724		if (tpte & PG_W) {
2725			npv = TAILQ_NEXT(pv, pv_plist);
2726			continue;
2727		}
2728
2729		m = PHYS_TO_VM_PAGE(tpte);
2730		KASSERT(m->phys_addr == (tpte & PG_FRAME),
2731		    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
2732		    m, (uintmax_t)m->phys_addr, (uintmax_t)tpte));
2733
2734		KASSERT(m < &vm_page_array[vm_page_array_size],
2735			("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte));
2736
2737		pv->pv_pmap->pm_stats.resident_count--;
2738
2739		pte_clear(pte);
2740
2741		/*
2742		 * Update the vm_page_t clean and reference bits.
2743		 */
2744		if (tpte & PG_M) {
2745			vm_page_dirty(m);
2746		}
2747
2748		npv = TAILQ_NEXT(pv, pv_plist);
2749		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2750
2751		m->md.pv_list_count--;
2752		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2753		if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
2754			vm_page_flag_clear(m, PG_WRITEABLE);
2755		}
2756
2757		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2758		free_pv_entry(pv);
2759	}
2760	splx(s);
2761	pmap_invalidate_all(pmap);
2762}
2763
2764/*
2765 *	pmap_is_modified:
2766 *
2767 *	Return whether or not the specified physical page was modified
2768 *	in any physical maps.
2769 */
2770boolean_t
2771pmap_is_modified(vm_page_t m)
2772{
2773	pv_entry_t pv;
2774	pt_entry_t *pte;
2775	int s;
2776
2777	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2778		return FALSE;
2779
2780	s = splvm();
2781	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2782	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2783		/*
2784		 * if the bit being tested is the modified bit, then
2785		 * mark clean_map and ptes as never
2786		 * modified.
2787		 */
2788		if (!pmap_track_modified(pv->pv_va))
2789			continue;
2790#if defined(PMAP_DIAGNOSTIC)
2791		if (!pv->pv_pmap) {
2792			printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
2793			continue;
2794		}
2795#endif
2796		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2797		if (*pte & PG_M) {
2798			splx(s);
2799			return TRUE;
2800		}
2801	}
2802	splx(s);
2803	return (FALSE);
2804}
2805
2806/*
2807 *	Clear the given bit in each of the given page's ptes.
2808 */
2809static __inline void
2810pmap_clear_ptes(vm_page_t m, int bit)
2811{
2812	register pv_entry_t pv;
2813	pt_entry_t pbits, *pte;
2814	int s;
2815
2816	if (!pmap_initialized || (m->flags & PG_FICTITIOUS) ||
2817	    (bit == PG_RW && (m->flags & PG_WRITEABLE) == 0))
2818		return;
2819
2820	s = splvm();
2821	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2822	/*
2823	 * Loop over all current mappings setting/clearing as appropos If
2824	 * setting RO do we need to clear the VAC?
2825	 */
2826	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2827		/*
2828		 * don't write protect pager mappings
2829		 */
2830		if (bit == PG_RW) {
2831			if (!pmap_track_modified(pv->pv_va))
2832				continue;
2833		}
2834
2835#if defined(PMAP_DIAGNOSTIC)
2836		if (!pv->pv_pmap) {
2837			printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
2838			continue;
2839		}
2840#endif
2841
2842		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2843		pbits = *pte;
2844		if (pbits & bit) {
2845			if (bit == PG_RW) {
2846				if (pbits & PG_M) {
2847					vm_page_dirty(m);
2848				}
2849				pte_store(pte, pbits & ~(PG_M|PG_RW));
2850			} else {
2851				pte_store(pte, pbits & ~bit);
2852			}
2853			pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2854		}
2855	}
2856	if (bit == PG_RW)
2857		vm_page_flag_clear(m, PG_WRITEABLE);
2858	splx(s);
2859}
2860
2861/*
2862 *      pmap_page_protect:
2863 *
2864 *      Lower the permission for all mappings to a given page.
2865 */
2866void
2867pmap_page_protect(vm_page_t m, vm_prot_t prot)
2868{
2869	if ((prot & VM_PROT_WRITE) == 0) {
2870		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
2871			pmap_clear_ptes(m, PG_RW);
2872		} else {
2873			pmap_remove_all(m);
2874		}
2875	}
2876}
2877
2878/*
2879 *	pmap_ts_referenced:
2880 *
2881 *	Return a count of reference bits for a page, clearing those bits.
2882 *	It is not necessary for every reference bit to be cleared, but it
2883 *	is necessary that 0 only be returned when there are truly no
2884 *	reference bits set.
2885 *
2886 *	XXX: The exact number of bits to check and clear is a matter that
2887 *	should be tested and standardized at some point in the future for
2888 *	optimal aging of shared pages.
2889 */
2890int
2891pmap_ts_referenced(vm_page_t m)
2892{
2893	register pv_entry_t pv, pvf, pvn;
2894	pt_entry_t *pte;
2895	pt_entry_t v;
2896	int s;
2897	int rtval = 0;
2898
2899	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2900		return (rtval);
2901
2902	s = splvm();
2903	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
2904	if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2905
2906		pvf = pv;
2907
2908		do {
2909			pvn = TAILQ_NEXT(pv, pv_list);
2910
2911			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
2912
2913			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
2914
2915			if (!pmap_track_modified(pv->pv_va))
2916				continue;
2917
2918			pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2919
2920			if (pte && ((v = pte_load(pte)) & PG_A) != 0) {
2921				pte_store(pte, v & ~PG_A);
2922				pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
2923
2924				rtval++;
2925				if (rtval > 4) {
2926					break;
2927				}
2928			}
2929		} while ((pv = pvn) != NULL && pv != pvf);
2930	}
2931	splx(s);
2932
2933	return (rtval);
2934}
2935
2936/*
2937 *	Clear the modify bits on the specified physical page.
2938 */
2939void
2940pmap_clear_modify(vm_page_t m)
2941{
2942	pmap_clear_ptes(m, PG_M);
2943}
2944
2945/*
2946 *	pmap_clear_reference:
2947 *
2948 *	Clear the reference bit on the specified physical page.
2949 */
2950void
2951pmap_clear_reference(vm_page_t m)
2952{
2953	pmap_clear_ptes(m, PG_A);
2954}
2955
2956/*
2957 * Miscellaneous support routines follow
2958 */
2959
2960static void
2961i386_protection_init()
2962{
2963	register int *kp, prot;
2964
2965	kp = protection_codes;
2966	for (prot = 0; prot < 8; prot++) {
2967		switch (prot) {
2968		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
2969			/*
2970			 * Read access is also 0. There isn't any execute bit,
2971			 * so just make it readable.
2972			 */
2973		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
2974		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
2975		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
2976			*kp++ = 0;
2977			break;
2978		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
2979		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
2980		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
2981		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
2982			*kp++ = PG_RW;
2983			break;
2984		}
2985	}
2986}
2987
2988/*
2989 * Map a set of physical memory pages into the kernel virtual
2990 * address space. Return a pointer to where it is mapped. This
2991 * routine is intended to be used for mapping device memory,
2992 * NOT real memory.
2993 */
2994void *
2995pmap_mapdev(pa, size)
2996	vm_paddr_t pa;
2997	vm_size_t size;
2998{
2999	vm_offset_t va, tmpva, offset;
3000
3001	offset = pa & PAGE_MASK;
3002	size = roundup(offset + size, PAGE_SIZE);
3003
3004	va = kmem_alloc_nofault(kernel_map, size);
3005	if (!va)
3006		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3007
3008	pa = pa & PG_FRAME;
3009	for (tmpva = va; size > 0; ) {
3010		pmap_kenter(tmpva, pa);
3011		size -= PAGE_SIZE;
3012		tmpva += PAGE_SIZE;
3013		pa += PAGE_SIZE;
3014	}
3015	pmap_invalidate_range(kernel_pmap, va, tmpva);
3016	return ((void *)(va + offset));
3017}
3018
3019void
3020pmap_unmapdev(va, size)
3021	vm_offset_t va;
3022	vm_size_t size;
3023{
3024	vm_offset_t base, offset, tmpva;
3025	pt_entry_t *pte;
3026
3027	base = va & PG_FRAME;
3028	offset = va & PAGE_MASK;
3029	size = roundup(offset + size, PAGE_SIZE);
3030	for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
3031		pte = vtopte(tmpva);
3032		pte_clear(pte);
3033	}
3034	pmap_invalidate_range(kernel_pmap, va, tmpva);
3035	kmem_free(kernel_map, base, size);
3036}
3037
3038/*
3039 * perform the pmap work for mincore
3040 */
3041int
3042pmap_mincore(pmap, addr)
3043	pmap_t pmap;
3044	vm_offset_t addr;
3045{
3046	pt_entry_t *ptep, pte;
3047	vm_page_t m;
3048	int val = 0;
3049
3050	ptep = pmap_pte_quick(pmap, addr);
3051	if (ptep == 0) {
3052		return 0;
3053	}
3054
3055	if ((pte = *ptep) != 0) {
3056		vm_paddr_t pa;
3057
3058		val = MINCORE_INCORE;
3059		if ((pte & PG_MANAGED) == 0)
3060			return val;
3061
3062		pa = pte & PG_FRAME;
3063
3064		m = PHYS_TO_VM_PAGE(pa);
3065
3066		/*
3067		 * Modified by us
3068		 */
3069		if (pte & PG_M)
3070			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3071		else {
3072			/*
3073			 * Modified by someone else
3074			 */
3075			vm_page_lock_queues();
3076			if (m->dirty || pmap_is_modified(m))
3077				val |= MINCORE_MODIFIED_OTHER;
3078			vm_page_unlock_queues();
3079		}
3080		/*
3081		 * Referenced by us
3082		 */
3083		if (pte & PG_A)
3084			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3085		else {
3086			/*
3087			 * Referenced by someone else
3088			 */
3089			vm_page_lock_queues();
3090			if ((m->flags & PG_REFERENCED) ||
3091			    pmap_ts_referenced(m)) {
3092				val |= MINCORE_REFERENCED_OTHER;
3093				vm_page_flag_set(m, PG_REFERENCED);
3094			}
3095			vm_page_unlock_queues();
3096		}
3097	}
3098	return val;
3099}
3100
3101void
3102pmap_activate(struct thread *td)
3103{
3104	struct proc *p = td->td_proc;
3105	pmap_t	pmap, oldpmap;
3106	u_int32_t  cr3;
3107
3108	critical_enter();
3109	pmap = vmspace_pmap(td->td_proc->p_vmspace);
3110	oldpmap = PCPU_GET(curpmap);
3111#if defined(SMP)
3112	atomic_clear_int(&oldpmap->pm_active, PCPU_GET(cpumask));
3113	atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask));
3114#else
3115	oldpmap->pm_active &= ~1;
3116	pmap->pm_active |= 1;
3117#endif
3118#ifdef PAE
3119	cr3 = vtophys(pmap->pm_pdpt);
3120#else
3121	cr3 = vtophys(pmap->pm_pdir);
3122#endif
3123	/* XXXKSE this is wrong.
3124	 * pmap_activate is for the current thread on the current cpu
3125	 */
3126	if (p->p_flag & P_SA) {
3127		/* Make sure all other cr3 entries are updated. */
3128		/* what if they are running?  XXXKSE (maybe abort them) */
3129		FOREACH_THREAD_IN_PROC(p, td) {
3130			td->td_pcb->pcb_cr3 = cr3;
3131		}
3132	} else {
3133		td->td_pcb->pcb_cr3 = cr3;
3134	}
3135	load_cr3(cr3);
3136	PCPU_SET(curpmap, pmap);
3137	critical_exit();
3138}
3139
3140vm_offset_t
3141pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
3142{
3143
3144	if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3145		return addr;
3146	}
3147
3148	addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3149	return addr;
3150}
3151
3152
3153#if defined(PMAP_DEBUG)
3154pmap_pid_dump(int pid)
3155{
3156	pmap_t pmap;
3157	struct proc *p;
3158	int npte = 0;
3159	int index;
3160
3161	sx_slock(&allproc_lock);
3162	LIST_FOREACH(p, &allproc, p_list) {
3163		if (p->p_pid != pid)
3164			continue;
3165
3166		if (p->p_vmspace) {
3167			int i,j;
3168			index = 0;
3169			pmap = vmspace_pmap(p->p_vmspace);
3170			for (i = 0; i < NPDEPTD; i++) {
3171				pd_entry_t *pde;
3172				pt_entry_t *pte;
3173				vm_offset_t base = i << PDRSHIFT;
3174
3175				pde = &pmap->pm_pdir[i];
3176				if (pde && pmap_pde_v(pde)) {
3177					for (j = 0; j < NPTEPG; j++) {
3178						vm_offset_t va = base + (j << PAGE_SHIFT);
3179						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
3180							if (index) {
3181								index = 0;
3182								printf("\n");
3183							}
3184							sx_sunlock(&allproc_lock);
3185							return npte;
3186						}
3187						pte = pmap_pte_quick(pmap, va);
3188						if (pte && pmap_pte_v(pte)) {
3189							pt_entry_t pa;
3190							vm_page_t m;
3191							pa = *pte;
3192							m = PHYS_TO_VM_PAGE(pa);
3193							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
3194								va, pa, m->hold_count, m->wire_count, m->flags);
3195							npte++;
3196							index++;
3197							if (index >= 2) {
3198								index = 0;
3199								printf("\n");
3200							} else {
3201								printf(" ");
3202							}
3203						}
3204					}
3205				}
3206			}
3207		}
3208	}
3209	sx_sunlock(&allproc_lock);
3210	return npte;
3211}
3212#endif
3213
3214#if defined(DEBUG)
3215
3216static void	pads(pmap_t pm);
3217void		pmap_pvdump(vm_offset_t pa);
3218
3219/* print address space of pmap*/
3220static void
3221pads(pm)
3222	pmap_t pm;
3223{
3224	int i, j;
3225	vm_paddr_t va;
3226	pt_entry_t *ptep;
3227
3228	if (pm == kernel_pmap)
3229		return;
3230	for (i = 0; i < NPDEPTD; i++)
3231		if (pm->pm_pdir[i])
3232			for (j = 0; j < NPTEPG; j++) {
3233				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
3234				if (pm == kernel_pmap && va < KERNBASE)
3235					continue;
3236				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
3237					continue;
3238				ptep = pmap_pte_quick(pm, va);
3239				if (pmap_pte_v(ptep))
3240					printf("%x:%x ", va, *ptep);
3241			};
3242
3243}
3244
3245void
3246pmap_pvdump(pa)
3247	vm_paddr_t pa;
3248{
3249	pv_entry_t pv;
3250	vm_page_t m;
3251
3252	printf("pa %x", pa);
3253	m = PHYS_TO_VM_PAGE(pa);
3254	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3255		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3256		pads(pv->pv_pmap);
3257	}
3258	printf(" ");
3259}
3260#endif
3261