pmap.c revision 44470
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
42 *	$Id: pmap.c,v 1.223 1999/02/19 14:25:33 luoqi Exp $
43 */
44
45/*
46 *	Manages physical address maps.
47 *
48 *	In addition to hardware address maps, this
49 *	module is called upon to provide software-use-only
50 *	maps which may or may not be stored in the same
51 *	form as hardware maps.  These pseudo-maps are
52 *	used to store intermediate results from copy
53 *	operations to and from address spaces.
54 *
55 *	Since the information managed by this module is
56 *	also stored by the logical address mapping module,
57 *	this module may throw away valid virtual-to-physical
58 *	mappings at almost any time.  However, invalidations
59 *	of virtual-to-physical mappings must be done as
60 *	requested.
61 *
62 *	In order to cope with hardware architectures which
63 *	make virtual-to-physical map invalidates expensive,
64 *	this module may delay invalidate or reduced protection
65 *	operations until such time as they are actually
66 *	necessary.  This module is given full information as
67 *	to which processors are currently using which maps,
68 *	and to when physical maps must be made correct.
69 */
70
71#include "opt_disable_pse.h"
72#include "opt_pmap.h"
73#include "opt_msgbuf.h"
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/proc.h>
78#include <sys/msgbuf.h>
79#include <sys/vmmeter.h>
80#include <sys/mman.h>
81
82#include <vm/vm.h>
83#include <vm/vm_param.h>
84#include <vm/vm_prot.h>
85#include <sys/lock.h>
86#include <vm/vm_kern.h>
87#include <vm/vm_page.h>
88#include <vm/vm_map.h>
89#include <vm/vm_object.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93#include <vm/vm_zone.h>
94
95#include <sys/user.h>
96
97#include <machine/cputypes.h>
98#include <machine/md_var.h>
99#include <machine/specialreg.h>
100#if defined(SMP) || defined(APIC_IO)
101#include <machine/smp.h>
102#include <machine/apic.h>
103#endif /* SMP || APIC_IO */
104
105#define PMAP_KEEP_PDIRS
106#ifndef PMAP_SHPGPERPROC
107#define PMAP_SHPGPERPROC 200
108#endif
109
110#if defined(DIAGNOSTIC)
111#define PMAP_DIAGNOSTIC
112#endif
113
114#define MINPV 2048
115
116#if !defined(PMAP_DIAGNOSTIC)
117#define PMAP_INLINE __inline
118#else
119#define PMAP_INLINE
120#endif
121
122/*
123 * Get PDEs and PTEs for user/kernel address space
124 */
125#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
126#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
127
128#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
129#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
130#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
131#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
132#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
133
134#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
135#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
136
137/*
138 * Given a map and a machine independent protection code,
139 * convert to a vax protection code.
140 */
141#define pte_prot(m, p)	(protection_codes[p])
142static int protection_codes[8];
143
144#define	pa_index(pa)		atop((pa) - vm_first_phys)
145#define	pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
146
147static struct pmap kernel_pmap_store;
148pmap_t kernel_pmap;
149extern pd_entry_t my_idlePTD;
150
151vm_offset_t avail_start;	/* PA of first available physical page */
152vm_offset_t avail_end;		/* PA of last available physical page */
153vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
154vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
155static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
156static vm_offset_t vm_first_phys;
157static int pgeflag;		/* PG_G or-in */
158static int pseflag;		/* PG_PS or-in */
159static int pv_npg;
160
161static vm_object_t kptobj;
162
163static int nkpt;
164vm_offset_t kernel_vm_end;
165
166/*
167 * Data for the pv entry allocation mechanism
168 */
169static vm_zone_t pvzone;
170static struct vm_zone pvzone_store;
171static struct vm_object pvzone_obj;
172static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
173static int pmap_pagedaemon_waken = 0;
174static struct pv_entry *pvinit;
175
176/*
177 * All those kernel PT submaps that BSD is so fond of
178 */
179pt_entry_t *CMAP1 = 0;
180static pt_entry_t *CMAP2, *ptmmap;
181static pv_table_t *pv_table;
182caddr_t CADDR1 = 0, ptvmmap = 0;
183static caddr_t CADDR2;
184static pt_entry_t *msgbufmap;
185struct msgbuf *msgbufp=0;
186
187/*
188 *  PPro_vmtrr
189 */
190struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
191
192/* AIO support */
193extern struct vmspace *aiovmspace;
194
195#ifdef SMP
196extern char prv_CPAGE1[], prv_CPAGE2[], prv_CPAGE3[];
197extern pt_entry_t *prv_CMAP1, *prv_CMAP2, *prv_CMAP3;
198extern pd_entry_t *IdlePTDS[];
199extern pt_entry_t SMP_prvpt[];
200#endif
201
202#ifdef SMP
203extern unsigned int prv_PPAGE1[];
204extern pt_entry_t *prv_PMAP1;
205#else
206static pt_entry_t *PMAP1 = 0;
207static unsigned *PADDR1 = 0;
208#endif
209
210static PMAP_INLINE void	free_pv_entry __P((pv_entry_t pv));
211static unsigned * get_ptbase __P((pmap_t pmap));
212static pv_entry_t get_pv_entry __P((void));
213static void	i386_protection_init __P((void));
214static void	pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
215
216static PMAP_INLINE int	pmap_is_managed __P((vm_offset_t pa));
217static void	pmap_remove_all __P((vm_offset_t pa));
218static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
219				      vm_offset_t pa, vm_page_t mpte));
220static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
221					vm_offset_t sva));
222static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
223static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv,
224					vm_offset_t va));
225static boolean_t pmap_testbit __P((vm_offset_t pa, int bit));
226static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
227		vm_page_t mpte, vm_offset_t pa));
228
229static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
230
231static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
232static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
233static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
234static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
235static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
236static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
237void pmap_collect(void);
238
239static unsigned pdir4mb;
240
241/*
242 *	Routine:	pmap_pte
243 *	Function:
244 *		Extract the page table entry associated
245 *		with the given map/virtual_address pair.
246 */
247
248PMAP_INLINE unsigned *
249pmap_pte(pmap, va)
250	register pmap_t pmap;
251	vm_offset_t va;
252{
253	unsigned *pdeaddr;
254
255	if (pmap) {
256		pdeaddr = (unsigned *) pmap_pde(pmap, va);
257		if (*pdeaddr & PG_PS)
258			return pdeaddr;
259		if (*pdeaddr) {
260			return get_ptbase(pmap) + i386_btop(va);
261		}
262	}
263	return (0);
264}
265
266/*
267 * Move the kernel virtual free pointer to the next
268 * 4MB.  This is used to help improve performance
269 * by using a large (4MB) page for much of the kernel
270 * (.text, .data, .bss)
271 */
272static vm_offset_t
273pmap_kmem_choose(vm_offset_t addr) {
274	vm_offset_t newaddr = addr;
275#ifndef DISABLE_PSE
276	if (cpu_feature & CPUID_PSE) {
277		newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
278	}
279#endif
280	return newaddr;
281}
282
283/*
284 *	Bootstrap the system enough to run with virtual memory.
285 *
286 *	On the i386 this is called after mapping has already been enabled
287 *	and just syncs the pmap module with what has already been done.
288 *	[We can't call it easily with mapping off since the kernel is not
289 *	mapped with PA == VA, hence we would have to relocate every address
290 *	from the linked base (virtual) address "KERNBASE" to the actual
291 *	(physical) address starting relative to 0]
292 */
293void
294pmap_bootstrap(firstaddr, loadaddr)
295	vm_offset_t firstaddr;
296	vm_offset_t loadaddr;
297{
298	vm_offset_t va;
299	pt_entry_t *pte;
300#ifdef SMP
301	int i, j;
302#endif
303
304	avail_start = firstaddr;
305
306	/*
307	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
308	 * large. It should instead be correctly calculated in locore.s and
309	 * not based on 'first' (which is a physical address, not a virtual
310	 * address, for the start of unused physical memory). The kernel
311	 * page tables are NOT double mapped and thus should not be included
312	 * in this calculation.
313	 */
314	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
315	virtual_avail = pmap_kmem_choose(virtual_avail);
316
317	virtual_end = VM_MAX_KERNEL_ADDRESS;
318
319	/*
320	 * Initialize protection array.
321	 */
322	i386_protection_init();
323
324	/*
325	 * The kernel's pmap is statically allocated so we don't have to use
326	 * pmap_create, which is unlikely to work correctly at this part of
327	 * the boot sequence (XXX and which no longer exists).
328	 */
329	kernel_pmap = &kernel_pmap_store;
330
331	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
332
333	kernel_pmap->pm_count = 1;
334	TAILQ_INIT(&kernel_pmap->pm_pvlist);
335	nkpt = NKPT;
336
337	/*
338	 * Reserve some special page table entries/VA space for temporary
339	 * mapping of pages.
340	 */
341#define	SYSMAP(c, p, v, n)	\
342	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
343
344	va = virtual_avail;
345	pte = (pt_entry_t *) pmap_pte(kernel_pmap, va);
346
347	/*
348	 * CMAP1/CMAP2 are used for zeroing and copying pages.
349	 */
350	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
351	SYSMAP(caddr_t, CMAP2, CADDR2, 1)
352
353	/*
354	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
355	 * XXX ptmmap is not used.
356	 */
357	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
358
359	/*
360	 * msgbufp is used to map the system message buffer.
361	 * XXX msgbufmap is not used.
362	 */
363	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
364	       atop(round_page(MSGBUF_SIZE)))
365
366#if !defined(SMP)
367	/*
368	 * ptemap is used for pmap_pte_quick
369	 */
370	SYSMAP(unsigned *, PMAP1, PADDR1, 1);
371#endif
372
373	virtual_avail = va;
374
375	*(int *) CMAP1 = *(int *) CMAP2 = 0;
376	*(int *) PTD = 0;
377
378
379	pgeflag = 0;
380#if !defined(SMP)
381	if (cpu_feature & CPUID_PGE) {
382		pgeflag = PG_G;
383	}
384#endif
385
386/*
387 * Initialize the 4MB page size flag
388 */
389	pseflag = 0;
390/*
391 * The 4MB page version of the initial
392 * kernel page mapping.
393 */
394	pdir4mb = 0;
395
396#if !defined(DISABLE_PSE)
397	if (cpu_feature & CPUID_PSE) {
398		unsigned ptditmp;
399		/*
400		 * Enable the PSE mode
401		 */
402		load_cr4(rcr4() | CR4_PSE);
403
404		/*
405		 * Note that we have enabled PSE mode
406		 */
407		pseflag = PG_PS;
408		ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
409		ptditmp &= ~(NBPDR - 1);
410		ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
411		pdir4mb = ptditmp;
412		/*
413		 * We can do the mapping here for the single processor
414		 * case.  We simply ignore the old page table page from
415		 * now on.
416		 */
417#if !defined(SMP)
418		PTD[KPTDI] = (pd_entry_t) ptditmp;
419		kernel_pmap->pm_pdir[KPTDI] = (pd_entry_t) ptditmp;
420		invltlb();
421#endif
422	}
423#endif
424
425#ifdef SMP
426	if (cpu_apic_address == 0)
427		panic("pmap_bootstrap: no local apic!");
428
429	/* 0 = private page */
430	/* 1 = page table page */
431	/* 2 = local apic */
432	/* 16-31 = io apics */
433	SMP_prvpt[2] = (pt_entry_t)(PG_V | PG_RW | pgeflag |
434	    (cpu_apic_address & PG_FRAME));
435
436	for (i = 0; i < mp_napics; i++) {
437		for (j = 0; j < 16; j++) {
438			/* same page frame as a previous IO apic? */
439			if (((vm_offset_t)SMP_prvpt[j + 16] & PG_FRAME) ==
440			    (io_apic_address[0] & PG_FRAME)) {
441				ioapic[i] = (ioapic_t *)&SMP_ioapic[j * PAGE_SIZE];
442				break;
443			}
444			/* use this slot if available */
445			if (((vm_offset_t)SMP_prvpt[j + 16] & PG_FRAME) == 0) {
446				SMP_prvpt[j + 16] = (pt_entry_t)(PG_V | PG_RW |
447				    pgeflag | (io_apic_address[i] & PG_FRAME));
448				ioapic[i] = (ioapic_t *)&SMP_ioapic[j * PAGE_SIZE];
449				break;
450			}
451		}
452		if (j == 16)
453			panic("no space to map IO apic %d!", i);
454	}
455
456	/* BSP does this itself, AP's get it pre-set */
457	prv_CMAP1 = &SMP_prvpt[3 + UPAGES];
458	prv_CMAP2 = &SMP_prvpt[4 + UPAGES];
459	prv_CMAP3 = &SMP_prvpt[5 + UPAGES];
460	prv_PMAP1 = &SMP_prvpt[6 + UPAGES];
461#endif
462
463	invltlb();
464
465}
466
467void
468getmtrr()
469{
470	int i;
471
472	if (cpu_class == CPUCLASS_686) {
473		for(i = 0; i < NPPROVMTRR; i++) {
474			PPro_vmtrr[i].base = rdmsr(PPRO_VMTRRphysBase0 + i * 2);
475			PPro_vmtrr[i].mask = rdmsr(PPRO_VMTRRphysMask0 + i * 2);
476		}
477	}
478}
479
480void
481putmtrr()
482{
483	int i;
484
485	if (cpu_class == CPUCLASS_686) {
486		wbinvd();
487		for(i = 0; i < NPPROVMTRR; i++) {
488			wrmsr(PPRO_VMTRRphysBase0 + i * 2, PPro_vmtrr[i].base);
489			wrmsr(PPRO_VMTRRphysMask0 + i * 2, PPro_vmtrr[i].mask);
490		}
491	}
492}
493
494void
495pmap_setvidram(void)
496{
497#if 0
498	if (cpu_class == CPUCLASS_686) {
499		wbinvd();
500		/*
501		 * Set memory between 0-640K to be WB
502		 */
503		wrmsr(0x250, 0x0606060606060606LL);
504		wrmsr(0x258, 0x0606060606060606LL);
505		/*
506		 * Set normal, PC video memory to be WC
507		 */
508		wrmsr(0x259, 0x0101010101010101LL);
509	}
510#endif
511}
512
513void
514pmap_setdevram(unsigned long long basea, vm_offset_t sizea)
515{
516	int i, free, skip;
517	unsigned basepage, basepaget;
518	unsigned long long base;
519	unsigned long long mask;
520
521	if (cpu_class != CPUCLASS_686)
522		return;
523
524	free = -1;
525	skip = 0;
526	basea &= ~0xfff;
527	base = basea | 0x1;
528	mask = (long long) (0xfffffffffLL - ((long) sizea - 1)) | (long long) 0x800;
529	mask &= ~0x7ff;
530
531	basepage = (long long) (base >> 12);
532	for(i = 0; i < NPPROVMTRR; i++) {
533		PPro_vmtrr[i].base = rdmsr(PPRO_VMTRRphysBase0 + i * 2);
534		PPro_vmtrr[i].mask = rdmsr(PPRO_VMTRRphysMask0 + i * 2);
535		basepaget = (long long) (PPro_vmtrr[i].base >> 12);
536		if (basepage == basepaget)
537			skip = 1;
538		if ((PPro_vmtrr[i].mask & 0x800) == 0) {
539			if (free == -1)
540				free = i;
541		}
542	}
543
544	if (!skip && free != -1) {
545		wbinvd();
546		PPro_vmtrr[free].base = base;
547		PPro_vmtrr[free].mask = mask;
548		wrmsr(PPRO_VMTRRphysBase0 + free * 2, base);
549		wrmsr(PPRO_VMTRRphysMask0 + free * 2, mask);
550		printf(
551	"pmap: added WC mapping at page: 0x%x %x, size: %u mask: 0x%x %x\n",
552		    (u_int)(base >> 32), (u_int)base, sizea,
553		    (u_int)(mask >> 32), (u_int)mask);
554	}
555}
556
557/*
558 * Set 4mb pdir for mp startup, and global flags
559 */
560void
561pmap_set_opt(unsigned *pdir) {
562	int i;
563
564	if (pseflag && (cpu_feature & CPUID_PSE)) {
565		load_cr4(rcr4() | CR4_PSE);
566		if (pdir4mb) {
567			pdir[KPTDI] = pdir4mb;
568		}
569	}
570
571	if (pgeflag && (cpu_feature & CPUID_PGE)) {
572		load_cr4(rcr4() | CR4_PGE);
573		for(i = KPTDI; i < KPTDI + nkpt; i++) {
574			if (pdir[i]) {
575				pdir[i] |= PG_G;
576			}
577		}
578	}
579}
580
581/*
582 * Setup the PTD for the boot processor
583 */
584void
585pmap_set_opt_bsp(void)
586{
587	pmap_set_opt((unsigned *)kernel_pmap->pm_pdir);
588	pmap_set_opt((unsigned *)PTD);
589	invltlb();
590}
591
592/*
593 *	Initialize the pmap module.
594 *	Called by vm_init, to initialize any structures that the pmap
595 *	system needs to map virtual memory.
596 *	pmap_init has been enhanced to support in a fairly consistant
597 *	way, discontiguous physical memory.
598 */
599void
600pmap_init(phys_start, phys_end)
601	vm_offset_t phys_start, phys_end;
602{
603	vm_offset_t addr;
604	vm_size_t s;
605	int i;
606	int initial_pvs;
607
608	/*
609	 * object for kernel page table pages
610	 */
611	kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
612
613	/*
614	 * calculate the number of pv_entries needed
615	 */
616	vm_first_phys = phys_avail[0];
617	for (i = 0; phys_avail[i + 1]; i += 2);
618	pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
619
620	/*
621	 * Allocate memory for random pmap data structures.  Includes the
622	 * pv_head_table.
623	 */
624	s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
625	s = round_page(s);
626
627	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
628	pv_table = (pv_table_t *) addr;
629	for(i = 0; i < pv_npg; i++) {
630		vm_offset_t pa;
631		TAILQ_INIT(&pv_table[i].pv_list);
632		pv_table[i].pv_list_count = 0;
633		pa = vm_first_phys + i * PAGE_SIZE;
634		pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
635	}
636
637	/*
638	 * init the pv free list
639	 */
640	initial_pvs = pv_npg;
641	if (initial_pvs < MINPV)
642		initial_pvs = MINPV;
643	pvzone = &pvzone_store;
644	pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
645		initial_pvs * sizeof (struct pv_entry));
646	zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
647
648	/*
649	 * Now it is safe to enable pv_table recording.
650	 */
651	pmap_initialized = TRUE;
652}
653
654/*
655 * Initialize the address space (zone) for the pv_entries.  Set a
656 * high water mark so that the system can recover from excessive
657 * numbers of pv entries.
658 */
659void
660pmap_init2() {
661	pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
662	pv_entry_high_water = 9 * (pv_entry_max / 10);
663	zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
664}
665
666/*
667 *	Used to map a range of physical addresses into kernel
668 *	virtual address space.
669 *
670 *	For now, VM is already on, we only need to map the
671 *	specified memory.
672 */
673vm_offset_t
674pmap_map(virt, start, end, prot)
675	vm_offset_t virt;
676	vm_offset_t start;
677	vm_offset_t end;
678	int prot;
679{
680	while (start < end) {
681		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
682		virt += PAGE_SIZE;
683		start += PAGE_SIZE;
684	}
685	return (virt);
686}
687
688
689/***************************************************
690 * Low level helper routines.....
691 ***************************************************/
692
693#if defined(PMAP_DIAGNOSTIC)
694
695/*
696 * This code checks for non-writeable/modified pages.
697 * This should be an invalid condition.
698 */
699static int
700pmap_nw_modified(pt_entry_t ptea) {
701	int pte;
702
703	pte = (int) ptea;
704
705	if ((pte & (PG_M|PG_RW)) == PG_M)
706		return 1;
707	else
708		return 0;
709}
710#endif
711
712
713/*
714 * this routine defines the region(s) of memory that should
715 * not be tested for the modified bit.
716 */
717static PMAP_INLINE int
718pmap_track_modified( vm_offset_t va) {
719	if ((va < clean_sva) || (va >= clean_eva))
720		return 1;
721	else
722		return 0;
723}
724
725static PMAP_INLINE void
726invltlb_1pg( vm_offset_t va) {
727#if defined(I386_CPU)
728	if (cpu_class == CPUCLASS_386) {
729		invltlb();
730	} else
731#endif
732	{
733		invlpg(va);
734	}
735}
736
737static unsigned *
738get_ptbase(pmap)
739	pmap_t pmap;
740{
741	unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
742
743	/* are we current address space or kernel? */
744	if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
745		return (unsigned *) PTmap;
746	}
747	/* otherwise, we are alternate address space */
748	if (frame != (((unsigned) APTDpde) & PG_FRAME)) {
749		APTDpde = (pd_entry_t) (frame | PG_RW | PG_V);
750#if defined(SMP)
751		/* The page directory is not shared between CPUs */
752		cpu_invltlb();
753#else
754		invltlb();
755#endif
756	}
757	return (unsigned *) APTmap;
758}
759
760/*
761 * Super fast pmap_pte routine best used when scanning
762 * the pv lists.  This eliminates many coarse-grained
763 * invltlb calls.  Note that many of the pv list
764 * scans are across different pmaps.  It is very wasteful
765 * to do an entire invltlb for checking a single mapping.
766 */
767
768static unsigned *
769pmap_pte_quick(pmap, va)
770	register pmap_t pmap;
771	vm_offset_t va;
772{
773	unsigned pde, newpf;
774	if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
775		unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
776		unsigned index = i386_btop(va);
777		/* are we current address space or kernel? */
778		if ((pmap == kernel_pmap) ||
779			(frame == (((unsigned) PTDpde) & PG_FRAME))) {
780			return (unsigned *) PTmap + index;
781		}
782		newpf = pde & PG_FRAME;
783#ifdef SMP
784		if ( ((* (unsigned *) prv_PMAP1) & PG_FRAME) != newpf) {
785			* (unsigned *) prv_PMAP1 = newpf | PG_RW | PG_V;
786			cpu_invlpg(&prv_PPAGE1);
787		}
788		return prv_PPAGE1 + ((unsigned) index & (NPTEPG - 1));
789#else
790		if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
791			* (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
792			invltlb_1pg((vm_offset_t) PADDR1);
793		}
794		return PADDR1 + ((unsigned) index & (NPTEPG - 1));
795#endif
796	}
797	return (0);
798}
799
800/*
801 *	Routine:	pmap_extract
802 *	Function:
803 *		Extract the physical page address associated
804 *		with the given map/virtual_address pair.
805 */
806vm_offset_t
807pmap_extract(pmap, va)
808	register pmap_t pmap;
809	vm_offset_t va;
810{
811	vm_offset_t rtval;
812	vm_offset_t pdirindex;
813	pdirindex = va >> PDRSHIFT;
814	if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
815		unsigned *pte;
816		if ((rtval & PG_PS) != 0) {
817			rtval &= ~(NBPDR - 1);
818			rtval |= va & (NBPDR - 1);
819			return rtval;
820		}
821		pte = get_ptbase(pmap) + i386_btop(va);
822		rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
823		return rtval;
824	}
825	return 0;
826
827}
828
829/*
830 * determine if a page is managed (memory vs. device)
831 */
832static PMAP_INLINE int
833pmap_is_managed(pa)
834	vm_offset_t pa;
835{
836	int i;
837
838	if (!pmap_initialized)
839		return 0;
840
841	for (i = 0; phys_avail[i + 1]; i += 2) {
842		if (pa < phys_avail[i + 1] && pa >= phys_avail[i])
843			return 1;
844	}
845	return 0;
846}
847
848
849/***************************************************
850 * Low level mapping routines.....
851 ***************************************************/
852
853/*
854 * Add a list of wired pages to the kva
855 * this routine is only used for temporary
856 * kernel mappings that do not need to have
857 * page modification or references recorded.
858 * Note that old mappings are simply written
859 * over.  The page *must* be wired.
860 */
861void
862pmap_qenter(va, m, count)
863	vm_offset_t va;
864	vm_page_t *m;
865	int count;
866{
867	int i;
868	register unsigned *pte;
869
870	for (i = 0; i < count; i++) {
871		vm_offset_t tva = va + i * PAGE_SIZE;
872		unsigned npte = VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V | pgeflag;
873		unsigned opte;
874		pte = (unsigned *)vtopte(tva);
875		opte = *pte;
876		*pte = npte;
877		if (opte)
878			invltlb_1pg(tva);
879	}
880}
881
882/*
883 * this routine jerks page mappings from the
884 * kernel -- it is meant only for temporary mappings.
885 */
886void
887pmap_qremove(va, count)
888	vm_offset_t va;
889	int count;
890{
891	int i;
892	register unsigned *pte;
893
894	for (i = 0; i < count; i++) {
895		pte = (unsigned *)vtopte(va);
896		*pte = 0;
897		invltlb_1pg(va);
898		va += PAGE_SIZE;
899	}
900}
901
902/*
903 * add a wired page to the kva
904 * note that in order for the mapping to take effect -- you
905 * should do a invltlb after doing the pmap_kenter...
906 */
907PMAP_INLINE void
908pmap_kenter(va, pa)
909	vm_offset_t va;
910	register vm_offset_t pa;
911{
912	register unsigned *pte;
913	unsigned npte, opte;
914
915	npte = pa | PG_RW | PG_V | pgeflag;
916	pte = (unsigned *)vtopte(va);
917	opte = *pte;
918	*pte = npte;
919	if (opte)
920		invltlb_1pg(va);
921}
922
923/*
924 * remove a page from the kernel pagetables
925 */
926PMAP_INLINE void
927pmap_kremove(va)
928	vm_offset_t va;
929{
930	register unsigned *pte;
931
932	pte = (unsigned *)vtopte(va);
933	*pte = 0;
934	invltlb_1pg(va);
935}
936
937static vm_page_t
938pmap_page_lookup(object, pindex)
939	vm_object_t object;
940	vm_pindex_t pindex;
941{
942	vm_page_t m;
943retry:
944	m = vm_page_lookup(object, pindex);
945	if (m && vm_page_sleep_busy(m, FALSE, "pplookp"))
946		goto retry;
947	return m;
948}
949
950/*
951 * Create the UPAGES for a new process.
952 * This routine directly affects the fork perf for a process.
953 */
954void
955pmap_new_proc(p)
956	struct proc *p;
957{
958	int i, updateneeded;
959	vm_object_t upobj;
960	vm_page_t m;
961	struct user *up;
962	unsigned *ptek, oldpte;
963
964	/*
965	 * allocate object for the upages
966	 */
967	if ((upobj = p->p_upages_obj) == NULL) {
968		upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
969		p->p_upages_obj = upobj;
970	}
971
972	/* get a kernel virtual address for the UPAGES for this proc */
973	if ((up = p->p_addr) == NULL) {
974		up = (struct user *) kmem_alloc_pageable(kernel_map,
975				UPAGES * PAGE_SIZE);
976#if !defined(MAX_PERF)
977		if (up == NULL)
978			panic("pmap_new_proc: u_map allocation failed");
979#endif
980		p->p_addr = up;
981	}
982
983	ptek = (unsigned *) vtopte((vm_offset_t) up);
984
985	updateneeded = 0;
986	for(i=0;i<UPAGES;i++) {
987		/*
988		 * Get a kernel stack page
989		 */
990		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
991
992		/*
993		 * Wire the page
994		 */
995		m->wire_count++;
996		cnt.v_wire_count++;
997
998		oldpte = *(ptek + i);
999		/*
1000		 * Enter the page into the kernel address space.
1001		 */
1002		*(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
1003		if (oldpte) {
1004			if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) {
1005				invlpg((vm_offset_t) up + i * PAGE_SIZE);
1006			} else {
1007				updateneeded = 1;
1008			}
1009		}
1010
1011		vm_page_wakeup(m);
1012		vm_page_flag_clear(m, PG_ZERO);
1013		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1014		m->valid = VM_PAGE_BITS_ALL;
1015	}
1016	if (updateneeded)
1017		invltlb();
1018}
1019
1020/*
1021 * Dispose the UPAGES for a process that has exited.
1022 * This routine directly impacts the exit perf of a process.
1023 */
1024void
1025pmap_dispose_proc(p)
1026	struct proc *p;
1027{
1028	int i;
1029	vm_object_t upobj;
1030	vm_page_t m;
1031	unsigned *ptek, oldpte;
1032
1033	upobj = p->p_upages_obj;
1034
1035	ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
1036	for(i=0;i<UPAGES;i++) {
1037
1038		if ((m = vm_page_lookup(upobj, i)) == NULL)
1039			panic("pmap_dispose_proc: upage already missing???");
1040
1041		vm_page_busy(m);
1042
1043		oldpte = *(ptek + i);
1044		*(ptek + i) = 0;
1045		if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386))
1046			invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
1047		vm_page_unwire(m, 0);
1048		vm_page_free(m);
1049	}
1050
1051	if (cpu_class <= CPUCLASS_386)
1052		invltlb();
1053}
1054
1055/*
1056 * Allow the UPAGES for a process to be prejudicially paged out.
1057 */
1058void
1059pmap_swapout_proc(p)
1060	struct proc *p;
1061{
1062	int i;
1063	vm_object_t upobj;
1064	vm_page_t m;
1065
1066	upobj = p->p_upages_obj;
1067	/*
1068	 * let the upages be paged
1069	 */
1070	for(i=0;i<UPAGES;i++) {
1071		if ((m = vm_page_lookup(upobj, i)) == NULL)
1072			panic("pmap_swapout_proc: upage already missing???");
1073		vm_page_dirty(m);
1074		vm_page_unwire(m, 0);
1075		pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
1076	}
1077}
1078
1079/*
1080 * Bring the UPAGES for a specified process back in.
1081 */
1082void
1083pmap_swapin_proc(p)
1084	struct proc *p;
1085{
1086	int i,rv;
1087	vm_object_t upobj;
1088	vm_page_t m;
1089
1090	upobj = p->p_upages_obj;
1091	for(i=0;i<UPAGES;i++) {
1092
1093		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1094
1095		pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
1096			VM_PAGE_TO_PHYS(m));
1097
1098		if (m->valid != VM_PAGE_BITS_ALL) {
1099			rv = vm_pager_get_pages(upobj, &m, 1, 0);
1100#if !defined(MAX_PERF)
1101			if (rv != VM_PAGER_OK)
1102				panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
1103#endif
1104			m = vm_page_lookup(upobj, i);
1105			m->valid = VM_PAGE_BITS_ALL;
1106		}
1107
1108		vm_page_wire(m);
1109		vm_page_wakeup(m);
1110		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1111	}
1112}
1113
1114/***************************************************
1115 * Page table page management routines.....
1116 ***************************************************/
1117
1118/*
1119 * This routine unholds page table pages, and if the hold count
1120 * drops to zero, then it decrements the wire count.
1121 */
1122static int
1123_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
1124
1125	while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
1126		;
1127
1128	if (m->hold_count == 0) {
1129		vm_offset_t pteva;
1130		/*
1131		 * unmap the page table page
1132		 */
1133		pmap->pm_pdir[m->pindex] = 0;
1134		--pmap->pm_stats.resident_count;
1135		if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
1136			(((unsigned) PTDpde) & PG_FRAME)) {
1137			/*
1138			 * Do a invltlb to make the invalidated mapping
1139			 * take effect immediately.
1140			 */
1141			pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex);
1142			invltlb_1pg(pteva);
1143		}
1144
1145		if (pmap->pm_ptphint == m)
1146			pmap->pm_ptphint = NULL;
1147
1148		/*
1149		 * If the page is finally unwired, simply free it.
1150		 */
1151		--m->wire_count;
1152		if (m->wire_count == 0) {
1153
1154			vm_page_flash(m);
1155			vm_page_busy(m);
1156			vm_page_free_zero(m);
1157			--cnt.v_wire_count;
1158		}
1159		return 1;
1160	}
1161	return 0;
1162}
1163
1164static PMAP_INLINE int
1165pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
1166	vm_page_unhold(m);
1167	if (m->hold_count == 0)
1168		return _pmap_unwire_pte_hold(pmap, m);
1169	else
1170		return 0;
1171}
1172
1173/*
1174 * After removing a page table entry, this routine is used to
1175 * conditionally free the page, and manage the hold/wire counts.
1176 */
1177static int
1178pmap_unuse_pt(pmap, va, mpte)
1179	pmap_t pmap;
1180	vm_offset_t va;
1181	vm_page_t mpte;
1182{
1183	unsigned ptepindex;
1184	if (va >= UPT_MIN_ADDRESS)
1185		return 0;
1186
1187	if (mpte == NULL) {
1188		ptepindex = (va >> PDRSHIFT);
1189		if (pmap->pm_ptphint &&
1190			(pmap->pm_ptphint->pindex == ptepindex)) {
1191			mpte = pmap->pm_ptphint;
1192		} else {
1193			mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1194			pmap->pm_ptphint = mpte;
1195		}
1196	}
1197
1198	return pmap_unwire_pte_hold(pmap, mpte);
1199}
1200
1201#if !defined(SMP)
1202void
1203pmap_pinit0(pmap)
1204	struct pmap *pmap;
1205{
1206	pmap->pm_pdir =
1207		(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
1208	pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD);
1209	pmap->pm_flags = 0;
1210	pmap->pm_count = 1;
1211	pmap->pm_ptphint = NULL;
1212	TAILQ_INIT(&pmap->pm_pvlist);
1213	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1214}
1215#else
1216void
1217pmap_pinit0(pmap)
1218	struct pmap *pmap;
1219{
1220	pmap_pinit(pmap);
1221}
1222#endif
1223
1224/*
1225 * Initialize a preallocated and zeroed pmap structure,
1226 * such as one in a vmspace structure.
1227 */
1228void
1229pmap_pinit(pmap)
1230	register struct pmap *pmap;
1231{
1232	vm_page_t ptdpg;
1233
1234	/*
1235	 * No need to allocate page table space yet but we do need a valid
1236	 * page directory table.
1237	 */
1238	if (pmap->pm_pdir == NULL)
1239		pmap->pm_pdir =
1240			(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
1241
1242	/*
1243	 * allocate object for the ptes
1244	 */
1245	if (pmap->pm_pteobj == NULL)
1246		pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
1247
1248	/*
1249	 * allocate the page directory page
1250	 */
1251	ptdpg = vm_page_grab( pmap->pm_pteobj, PTDPTDI,
1252			VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1253
1254	ptdpg->wire_count = 1;
1255	++cnt.v_wire_count;
1256
1257
1258	vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/
1259	ptdpg->valid = VM_PAGE_BITS_ALL;
1260
1261	pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
1262	if ((ptdpg->flags & PG_ZERO) == 0)
1263		bzero(pmap->pm_pdir, PAGE_SIZE);
1264
1265	/* wire in kernel global address entries */
1266	/* XXX copies current process, does not fill in MPPTDI */
1267	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
1268
1269	/* install self-referential address mapping entry */
1270	*(unsigned *) (pmap->pm_pdir + PTDPTDI) =
1271		VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1272
1273	pmap->pm_flags = 0;
1274	pmap->pm_count = 1;
1275	pmap->pm_ptphint = NULL;
1276	TAILQ_INIT(&pmap->pm_pvlist);
1277	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1278}
1279
1280static int
1281pmap_release_free_page(pmap, p)
1282	struct pmap *pmap;
1283	vm_page_t p;
1284{
1285	unsigned *pde = (unsigned *) pmap->pm_pdir;
1286	/*
1287	 * This code optimizes the case of freeing non-busy
1288	 * page-table pages.  Those pages are zero now, and
1289	 * might as well be placed directly into the zero queue.
1290	 */
1291	if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
1292		return 0;
1293
1294	vm_page_busy(p);
1295
1296	/*
1297	 * Remove the page table page from the processes address space.
1298	 */
1299	pde[p->pindex] = 0;
1300	pmap->pm_stats.resident_count--;
1301
1302#if !defined(MAX_PERF)
1303	if (p->hold_count)  {
1304		panic("pmap_release: freeing held page table page");
1305	}
1306#endif
1307	/*
1308	 * Page directory pages need to have the kernel
1309	 * stuff cleared, so they can go into the zero queue also.
1310	 */
1311	if (p->pindex == PTDPTDI) {
1312		bzero(pde + KPTDI, nkpt * PTESIZE);
1313#ifdef SMP
1314		pde[MPPTDI] = 0;
1315#endif
1316		pde[APTDPTDI] = 0;
1317		pmap_kremove((vm_offset_t) pmap->pm_pdir);
1318	}
1319
1320	if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1321		pmap->pm_ptphint = NULL;
1322
1323	p->wire_count--;
1324	cnt.v_wire_count--;
1325	vm_page_free_zero(p);
1326	return 1;
1327}
1328
1329/*
1330 * this routine is called if the page table page is not
1331 * mapped correctly.
1332 */
1333static vm_page_t
1334_pmap_allocpte(pmap, ptepindex)
1335	pmap_t	pmap;
1336	unsigned ptepindex;
1337{
1338	vm_offset_t pteva, ptepa;
1339	vm_page_t m;
1340
1341	/*
1342	 * Find or fabricate a new pagetable page
1343	 */
1344	m = vm_page_grab(pmap->pm_pteobj, ptepindex,
1345			VM_ALLOC_ZERO | VM_ALLOC_RETRY);
1346
1347	if (m->queue != PQ_NONE) {
1348		int s = splvm();
1349		vm_page_unqueue(m);
1350		splx(s);
1351	}
1352
1353	if (m->wire_count == 0)
1354		cnt.v_wire_count++;
1355	m->wire_count++;
1356
1357	/*
1358	 * Increment the hold count for the page table page
1359	 * (denoting a new mapping.)
1360	 */
1361	m->hold_count++;
1362
1363	/*
1364	 * Map the pagetable page into the process address space, if
1365	 * it isn't already there.
1366	 */
1367
1368	pmap->pm_stats.resident_count++;
1369
1370	ptepa = VM_PAGE_TO_PHYS(m);
1371	pmap->pm_pdir[ptepindex] =
1372		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1373
1374	/*
1375	 * Set the page table hint
1376	 */
1377	pmap->pm_ptphint = m;
1378
1379	/*
1380	 * Try to use the new mapping, but if we cannot, then
1381	 * do it with the routine that maps the page explicitly.
1382	 */
1383	if ((m->flags & PG_ZERO) == 0) {
1384		if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
1385			(((unsigned) PTDpde) & PG_FRAME)) {
1386			pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
1387			bzero((caddr_t) pteva, PAGE_SIZE);
1388		} else {
1389			pmap_zero_page(ptepa);
1390		}
1391	}
1392
1393	m->valid = VM_PAGE_BITS_ALL;
1394	vm_page_flag_clear(m, PG_ZERO);
1395	vm_page_flag_set(m, PG_MAPPED);
1396	vm_page_wakeup(m);
1397
1398	return m;
1399}
1400
1401static vm_page_t
1402pmap_allocpte(pmap, va)
1403	pmap_t	pmap;
1404	vm_offset_t va;
1405{
1406	unsigned ptepindex;
1407	vm_offset_t ptepa;
1408	vm_page_t m;
1409
1410	/*
1411	 * Calculate pagetable page index
1412	 */
1413	ptepindex = va >> PDRSHIFT;
1414
1415	/*
1416	 * Get the page directory entry
1417	 */
1418	ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1419
1420	/*
1421	 * This supports switching from a 4MB page to a
1422	 * normal 4K page.
1423	 */
1424	if (ptepa & PG_PS) {
1425		pmap->pm_pdir[ptepindex] = 0;
1426		ptepa = 0;
1427		invltlb();
1428	}
1429
1430	/*
1431	 * If the page table page is mapped, we just increment the
1432	 * hold count, and activate it.
1433	 */
1434	if (ptepa) {
1435		/*
1436		 * In order to get the page table page, try the
1437		 * hint first.
1438		 */
1439		if (pmap->pm_ptphint &&
1440			(pmap->pm_ptphint->pindex == ptepindex)) {
1441			m = pmap->pm_ptphint;
1442		} else {
1443			m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1444			pmap->pm_ptphint = m;
1445		}
1446		m->hold_count++;
1447		return m;
1448	}
1449	/*
1450	 * Here if the pte page isn't mapped, or if it has been deallocated.
1451	 */
1452	return _pmap_allocpte(pmap, ptepindex);
1453}
1454
1455
1456/***************************************************
1457* Pmap allocation/deallocation routines.
1458 ***************************************************/
1459
1460/*
1461 * Release any resources held by the given physical map.
1462 * Called when a pmap initialized by pmap_pinit is being released.
1463 * Should only be called if the map contains no valid mappings.
1464 */
1465void
1466pmap_release(pmap)
1467	register struct pmap *pmap;
1468{
1469	vm_page_t p,n,ptdpg;
1470	vm_object_t object = pmap->pm_pteobj;
1471	int curgeneration;
1472
1473#if defined(DIAGNOSTIC)
1474	if (object->ref_count != 1)
1475		panic("pmap_release: pteobj reference count != 1");
1476#endif
1477
1478	ptdpg = NULL;
1479retry:
1480	curgeneration = object->generation;
1481	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
1482		n = TAILQ_NEXT(p, listq);
1483		if (p->pindex == PTDPTDI) {
1484			ptdpg = p;
1485			continue;
1486		}
1487		while (1) {
1488			if (!pmap_release_free_page(pmap, p) &&
1489				(object->generation != curgeneration))
1490				goto retry;
1491		}
1492	}
1493
1494	if (ptdpg && !pmap_release_free_page(pmap, ptdpg))
1495		goto retry;
1496}
1497
1498/*
1499 * grow the number of kernel page table entries, if needed
1500 */
1501void
1502pmap_growkernel(vm_offset_t addr)
1503{
1504	struct proc *p;
1505	struct pmap *pmap;
1506	int s;
1507	vm_offset_t ptppaddr;
1508	vm_page_t nkpg;
1509#ifdef SMP
1510	int i;
1511#endif
1512	pd_entry_t newpdir;
1513
1514	s = splhigh();
1515	if (kernel_vm_end == 0) {
1516		kernel_vm_end = KERNBASE;
1517		nkpt = 0;
1518		while (pdir_pde(PTD, kernel_vm_end)) {
1519			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1520			nkpt++;
1521		}
1522	}
1523	addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1524	while (kernel_vm_end < addr) {
1525		if (pdir_pde(PTD, kernel_vm_end)) {
1526			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1527			continue;
1528		}
1529
1530		/*
1531		 * This index is bogus, but out of the way
1532		 */
1533		nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM);
1534#if !defined(MAX_PERF)
1535		if (!nkpg)
1536			panic("pmap_growkernel: no memory to grow kernel");
1537#endif
1538
1539		nkpt++;
1540
1541		vm_page_wire(nkpg);
1542		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1543		pmap_zero_page(ptppaddr);
1544		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1545		pdir_pde(PTD, kernel_vm_end) = newpdir;
1546
1547#ifdef SMP
1548		for (i = 0; i < mp_ncpus; i++) {
1549			if (IdlePTDS[i])
1550				pdir_pde(IdlePTDS[i], kernel_vm_end) = newpdir;
1551		}
1552#endif
1553
1554		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1555			if (p->p_vmspace) {
1556				pmap = vmspace_pmap(p->p_vmspace);
1557				*pmap_pde(pmap, kernel_vm_end) = newpdir;
1558			}
1559		}
1560		if (aiovmspace != NULL) {
1561			pmap = vmspace_pmap(aiovmspace);
1562			*pmap_pde(pmap, kernel_vm_end) = newpdir;
1563		}
1564		*pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
1565		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1566	}
1567	splx(s);
1568}
1569
1570/*
1571 *	Retire the given physical map from service.
1572 *	Should only be called if the map contains
1573 *	no valid mappings.
1574 */
1575void
1576pmap_destroy(pmap)
1577	register pmap_t pmap;
1578{
1579	int count;
1580
1581	if (pmap == NULL)
1582		return;
1583
1584	count = --pmap->pm_count;
1585	if (count == 0) {
1586		pmap_release(pmap);
1587#if !defined(MAX_PERF)
1588		panic("destroying a pmap is not yet implemented");
1589#endif
1590	}
1591}
1592
1593/*
1594 *	Add a reference to the specified pmap.
1595 */
1596void
1597pmap_reference(pmap)
1598	pmap_t pmap;
1599{
1600	if (pmap != NULL) {
1601		pmap->pm_count++;
1602	}
1603}
1604
1605/***************************************************
1606* page management routines.
1607 ***************************************************/
1608
1609/*
1610 * free the pv_entry back to the free list
1611 */
1612static PMAP_INLINE void
1613free_pv_entry(pv)
1614	pv_entry_t pv;
1615{
1616	pv_entry_count--;
1617	zfreei(pvzone, pv);
1618}
1619
1620/*
1621 * get a new pv_entry, allocating a block from the system
1622 * when needed.
1623 * the memory allocation is performed bypassing the malloc code
1624 * because of the possibility of allocations at interrupt time.
1625 */
1626static pv_entry_t
1627get_pv_entry(void)
1628{
1629	pv_entry_count++;
1630	if (pv_entry_high_water &&
1631		(pv_entry_count > pv_entry_high_water) &&
1632		(pmap_pagedaemon_waken == 0)) {
1633		pmap_pagedaemon_waken = 1;
1634		wakeup (&vm_pages_needed);
1635	}
1636	return zalloci(pvzone);
1637}
1638
1639/*
1640 * This routine is very drastic, but can save the system
1641 * in a pinch.
1642 */
1643void
1644pmap_collect() {
1645	pv_table_t *ppv;
1646	int i;
1647	vm_offset_t pa;
1648	vm_page_t m;
1649	static int warningdone=0;
1650
1651	if (pmap_pagedaemon_waken == 0)
1652		return;
1653
1654	if (warningdone < 5) {
1655		printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1656		warningdone++;
1657	}
1658
1659	for(i = 0; i < pv_npg; i++) {
1660		if ((ppv = &pv_table[i]) == 0)
1661			continue;
1662		m = ppv->pv_vm_page;
1663		if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
1664			continue;
1665		if (m->wire_count || m->hold_count || m->busy ||
1666			(m->flags & PG_BUSY))
1667			continue;
1668		pmap_remove_all(pa);
1669	}
1670	pmap_pagedaemon_waken = 0;
1671}
1672
1673
1674/*
1675 * If it is the first entry on the list, it is actually
1676 * in the header and we must copy the following entry up
1677 * to the header.  Otherwise we must search the list for
1678 * the entry.  In either case we free the now unused entry.
1679 */
1680
1681static int
1682pmap_remove_entry(pmap, ppv, va)
1683	struct pmap *pmap;
1684	pv_table_t *ppv;
1685	vm_offset_t va;
1686{
1687	pv_entry_t pv;
1688	int rtval;
1689	int s;
1690
1691	s = splvm();
1692	if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
1693		for (pv = TAILQ_FIRST(&ppv->pv_list);
1694			pv;
1695			pv = TAILQ_NEXT(pv, pv_list)) {
1696			if (pmap == pv->pv_pmap && va == pv->pv_va)
1697				break;
1698		}
1699	} else {
1700		for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
1701			pv;
1702			pv = TAILQ_NEXT(pv, pv_plist)) {
1703			if (va == pv->pv_va)
1704				break;
1705		}
1706	}
1707
1708	rtval = 0;
1709	if (pv) {
1710
1711		rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
1712		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
1713		ppv->pv_list_count--;
1714		if (TAILQ_FIRST(&ppv->pv_list) == NULL)
1715			vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
1716
1717		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1718		free_pv_entry(pv);
1719	}
1720
1721	splx(s);
1722	return rtval;
1723}
1724
1725/*
1726 * Create a pv entry for page at pa for
1727 * (pmap, va).
1728 */
1729static void
1730pmap_insert_entry(pmap, va, mpte, pa)
1731	pmap_t pmap;
1732	vm_offset_t va;
1733	vm_page_t mpte;
1734	vm_offset_t pa;
1735{
1736
1737	int s;
1738	pv_entry_t pv;
1739	pv_table_t *ppv;
1740
1741	s = splvm();
1742	pv = get_pv_entry();
1743	pv->pv_va = va;
1744	pv->pv_pmap = pmap;
1745	pv->pv_ptem = mpte;
1746
1747	TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1748
1749	ppv = pa_to_pvh(pa);
1750	TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
1751	ppv->pv_list_count++;
1752
1753	splx(s);
1754}
1755
1756/*
1757 * pmap_remove_pte: do the things to unmap a page in a process
1758 */
1759static int
1760pmap_remove_pte(pmap, ptq, va)
1761	struct pmap *pmap;
1762	unsigned *ptq;
1763	vm_offset_t va;
1764{
1765	unsigned oldpte;
1766	pv_table_t *ppv;
1767
1768	oldpte = *ptq;
1769	*ptq = 0;
1770	if (oldpte & PG_W)
1771		pmap->pm_stats.wired_count -= 1;
1772	/*
1773	 * Machines that don't support invlpg, also don't support
1774	 * PG_G.
1775	 */
1776	if (oldpte & PG_G)
1777		invlpg(va);
1778	pmap->pm_stats.resident_count -= 1;
1779	if (oldpte & PG_MANAGED) {
1780		ppv = pa_to_pvh(oldpte);
1781		if (oldpte & PG_M) {
1782#if defined(PMAP_DIAGNOSTIC)
1783			if (pmap_nw_modified((pt_entry_t) oldpte)) {
1784				printf(
1785	"pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1786				    va, oldpte);
1787			}
1788#endif
1789			if (pmap_track_modified(va))
1790				vm_page_dirty(ppv->pv_vm_page);
1791		}
1792		if (oldpte & PG_A)
1793			vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
1794		return pmap_remove_entry(pmap, ppv, va);
1795	} else {
1796		return pmap_unuse_pt(pmap, va, NULL);
1797	}
1798
1799	return 0;
1800}
1801
1802/*
1803 * Remove a single page from a process address space
1804 */
1805static void
1806pmap_remove_page(pmap, va)
1807	struct pmap *pmap;
1808	register vm_offset_t va;
1809{
1810	register unsigned *ptq;
1811
1812	/*
1813	 * if there is no pte for this address, just skip it!!!
1814	 */
1815	if (*pmap_pde(pmap, va) == 0) {
1816		return;
1817	}
1818
1819	/*
1820	 * get a local va for mappings for this pmap.
1821	 */
1822	ptq = get_ptbase(pmap) + i386_btop(va);
1823	if (*ptq) {
1824		(void) pmap_remove_pte(pmap, ptq, va);
1825		invltlb_1pg(va);
1826	}
1827	return;
1828}
1829
1830/*
1831 *	Remove the given range of addresses from the specified map.
1832 *
1833 *	It is assumed that the start and end are properly
1834 *	rounded to the page size.
1835 */
1836void
1837pmap_remove(pmap, sva, eva)
1838	struct pmap *pmap;
1839	register vm_offset_t sva;
1840	register vm_offset_t eva;
1841{
1842	register unsigned *ptbase;
1843	vm_offset_t pdnxt;
1844	vm_offset_t ptpaddr;
1845	vm_offset_t sindex, eindex;
1846	int anyvalid;
1847
1848	if (pmap == NULL)
1849		return;
1850
1851	if (pmap->pm_stats.resident_count == 0)
1852		return;
1853
1854	/*
1855	 * special handling of removing one page.  a very
1856	 * common operation and easy to short circuit some
1857	 * code.
1858	 */
1859	if (((sva + PAGE_SIZE) == eva) &&
1860		(((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
1861		pmap_remove_page(pmap, sva);
1862		return;
1863	}
1864
1865	anyvalid = 0;
1866
1867	/*
1868	 * Get a local virtual address for the mappings that are being
1869	 * worked with.
1870	 */
1871	ptbase = get_ptbase(pmap);
1872
1873	sindex = i386_btop(sva);
1874	eindex = i386_btop(eva);
1875
1876	for (; sindex < eindex; sindex = pdnxt) {
1877		unsigned pdirindex;
1878
1879		/*
1880		 * Calculate index for next page table.
1881		 */
1882		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1883		if (pmap->pm_stats.resident_count == 0)
1884			break;
1885
1886		pdirindex = sindex / NPDEPG;
1887		if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
1888			pmap->pm_pdir[pdirindex] = 0;
1889			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1890			anyvalid++;
1891			continue;
1892		}
1893
1894		/*
1895		 * Weed out invalid mappings. Note: we assume that the page
1896		 * directory table is always allocated, and in kernel virtual.
1897		 */
1898		if (ptpaddr == 0)
1899			continue;
1900
1901		/*
1902		 * Limit our scan to either the end of the va represented
1903		 * by the current page table page, or to the end of the
1904		 * range being removed.
1905		 */
1906		if (pdnxt > eindex) {
1907			pdnxt = eindex;
1908		}
1909
1910		for ( ;sindex != pdnxt; sindex++) {
1911			vm_offset_t va;
1912			if (ptbase[sindex] == 0) {
1913				continue;
1914			}
1915			va = i386_ptob(sindex);
1916
1917			anyvalid++;
1918			if (pmap_remove_pte(pmap,
1919				ptbase + sindex, va))
1920				break;
1921		}
1922	}
1923
1924	if (anyvalid) {
1925		invltlb();
1926	}
1927}
1928
1929/*
1930 *	Routine:	pmap_remove_all
1931 *	Function:
1932 *		Removes this physical page from
1933 *		all physical maps in which it resides.
1934 *		Reflects back modify bits to the pager.
1935 *
1936 *	Notes:
1937 *		Original versions of this routine were very
1938 *		inefficient because they iteratively called
1939 *		pmap_remove (slow...)
1940 */
1941
1942static void
1943pmap_remove_all(pa)
1944	vm_offset_t pa;
1945{
1946	register pv_entry_t pv;
1947	pv_table_t *ppv;
1948	register unsigned *pte, tpte;
1949	int nmodify;
1950	int update_needed;
1951	int s;
1952
1953	nmodify = 0;
1954	update_needed = 0;
1955#if defined(PMAP_DIAGNOSTIC)
1956	/*
1957	 * XXX this makes pmap_page_protect(NONE) illegal for non-managed
1958	 * pages!
1959	 */
1960	if (!pmap_is_managed(pa)) {
1961		panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", pa);
1962	}
1963#endif
1964
1965	s = splvm();
1966	ppv = pa_to_pvh(pa);
1967	while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
1968		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
1969
1970		pv->pv_pmap->pm_stats.resident_count--;
1971
1972		tpte = *pte;
1973		*pte = 0;
1974		if (tpte & PG_W)
1975			pv->pv_pmap->pm_stats.wired_count--;
1976
1977		if (tpte & PG_A)
1978			vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
1979
1980		/*
1981		 * Update the vm_page_t clean and reference bits.
1982		 */
1983		if (tpte & PG_M) {
1984#if defined(PMAP_DIAGNOSTIC)
1985			if (pmap_nw_modified((pt_entry_t) tpte)) {
1986				printf(
1987	"pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1988				    pv->pv_va, tpte);
1989			}
1990#endif
1991			if (pmap_track_modified(pv->pv_va))
1992				vm_page_dirty(ppv->pv_vm_page);
1993		}
1994#ifdef SMP
1995		update_needed = 1;
1996#else
1997		if (!update_needed &&
1998			((!curproc || (vmspace_pmap(curproc->p_vmspace) == pv->pv_pmap)) ||
1999			(pv->pv_pmap == kernel_pmap))) {
2000			update_needed = 1;
2001		}
2002#endif
2003
2004		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2005		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
2006		ppv->pv_list_count--;
2007		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2008		free_pv_entry(pv);
2009	}
2010
2011	vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
2012
2013	if (update_needed)
2014		invltlb();
2015
2016	splx(s);
2017	return;
2018}
2019
2020/*
2021 *	Set the physical protection on the
2022 *	specified range of this map as requested.
2023 */
2024void
2025pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2026{
2027	register unsigned *ptbase;
2028	vm_offset_t pdnxt, ptpaddr;
2029	vm_pindex_t sindex, eindex;
2030	int anychanged;
2031
2032
2033	if (pmap == NULL)
2034		return;
2035
2036	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2037		pmap_remove(pmap, sva, eva);
2038		return;
2039	}
2040
2041	if (prot & VM_PROT_WRITE)
2042		return;
2043
2044	anychanged = 0;
2045
2046	ptbase = get_ptbase(pmap);
2047
2048	sindex = i386_btop(sva);
2049	eindex = i386_btop(eva);
2050
2051	for (; sindex < eindex; sindex = pdnxt) {
2052
2053		unsigned pdirindex;
2054
2055		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
2056
2057		pdirindex = sindex / NPDEPG;
2058		if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
2059			(unsigned) pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
2060			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2061			anychanged++;
2062			continue;
2063		}
2064
2065		/*
2066		 * Weed out invalid mappings. Note: we assume that the page
2067		 * directory table is always allocated, and in kernel virtual.
2068		 */
2069		if (ptpaddr == 0)
2070			continue;
2071
2072		if (pdnxt > eindex) {
2073			pdnxt = eindex;
2074		}
2075
2076		for (; sindex != pdnxt; sindex++) {
2077
2078			unsigned pbits;
2079			pv_table_t *ppv;
2080
2081			pbits = ptbase[sindex];
2082
2083			if (pbits & PG_MANAGED) {
2084				ppv = NULL;
2085				if (pbits & PG_A) {
2086					ppv = pa_to_pvh(pbits);
2087					vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
2088					pbits &= ~PG_A;
2089				}
2090				if (pbits & PG_M) {
2091					if (pmap_track_modified(i386_ptob(sindex))) {
2092						if (ppv == NULL)
2093							ppv = pa_to_pvh(pbits);
2094						vm_page_dirty(ppv->pv_vm_page);
2095						pbits &= ~PG_M;
2096					}
2097				}
2098			}
2099
2100			pbits &= ~PG_RW;
2101
2102			if (pbits != ptbase[sindex]) {
2103				ptbase[sindex] = pbits;
2104				anychanged = 1;
2105			}
2106		}
2107	}
2108	if (anychanged)
2109		invltlb();
2110}
2111
2112/*
2113 *	Insert the given physical page (p) at
2114 *	the specified virtual address (v) in the
2115 *	target physical map with the protection requested.
2116 *
2117 *	If specified, the page will be wired down, meaning
2118 *	that the related pte can not be reclaimed.
2119 *
2120 *	NB:  This is the only routine which MAY NOT lazy-evaluate
2121 *	or lose information.  That is, this routine must actually
2122 *	insert this page into the given map NOW.
2123 */
2124void
2125pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
2126	   boolean_t wired)
2127{
2128	register unsigned *pte;
2129	vm_offset_t opa;
2130	vm_offset_t origpte, newpte;
2131	vm_page_t mpte;
2132
2133	if (pmap == NULL)
2134		return;
2135
2136	va &= PG_FRAME;
2137#ifdef PMAP_DIAGNOSTIC
2138	if (va > VM_MAX_KERNEL_ADDRESS)
2139		panic("pmap_enter: toobig");
2140	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
2141		panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
2142#endif
2143
2144	mpte = NULL;
2145	/*
2146	 * In the case that a page table page is not
2147	 * resident, we are creating it here.
2148	 */
2149	if (va < UPT_MIN_ADDRESS) {
2150		mpte = pmap_allocpte(pmap, va);
2151	}
2152#if 0 && defined(PMAP_DIAGNOSTIC)
2153	else {
2154		vm_offset_t *pdeaddr = (vm_offset_t *)pmap_pde(pmap, va);
2155		if (((origpte = (vm_offset_t) *pdeaddr) & PG_V) == 0) {
2156			panic("pmap_enter: invalid kernel page table page(0), pdir=%p, pde=%p, va=%p\n",
2157				pmap->pm_pdir[PTDPTDI], origpte, va);
2158		}
2159		if (smp_active) {
2160			pdeaddr = (vm_offset_t *) IdlePTDS[cpuid];
2161			if (((newpte = pdeaddr[va >> PDRSHIFT]) & PG_V) == 0) {
2162				if ((vm_offset_t) my_idlePTD != (vm_offset_t) vtophys(pdeaddr))
2163					printf("pde mismatch: %x, %x\n", my_idlePTD, pdeaddr);
2164				printf("cpuid: %d, pdeaddr: 0x%x\n", cpuid, pdeaddr);
2165				panic("pmap_enter: invalid kernel page table page(1), pdir=%p, npde=%p, pde=%p, va=%p\n",
2166					pmap->pm_pdir[PTDPTDI], newpte, origpte, va);
2167			}
2168		}
2169	}
2170#endif
2171
2172	pte = pmap_pte(pmap, va);
2173
2174#if !defined(MAX_PERF)
2175	/*
2176	 * Page Directory table entry not valid, we need a new PT page
2177	 */
2178	if (pte == NULL) {
2179		panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n",
2180			(void *)pmap->pm_pdir[PTDPTDI], va);
2181	}
2182#endif
2183
2184	origpte = *(vm_offset_t *)pte;
2185	pa &= PG_FRAME;
2186	opa = origpte & PG_FRAME;
2187
2188#if !defined(MAX_PERF)
2189	if (origpte & PG_PS)
2190		panic("pmap_enter: attempted pmap_enter on 4MB page");
2191#endif
2192
2193	/*
2194	 * Mapping has not changed, must be protection or wiring change.
2195	 */
2196	if (origpte && (opa == pa)) {
2197		/*
2198		 * Wiring change, just update stats. We don't worry about
2199		 * wiring PT pages as they remain resident as long as there
2200		 * are valid mappings in them. Hence, if a user page is wired,
2201		 * the PT page will be also.
2202		 */
2203		if (wired && ((origpte & PG_W) == 0))
2204			pmap->pm_stats.wired_count++;
2205		else if (!wired && (origpte & PG_W))
2206			pmap->pm_stats.wired_count--;
2207
2208#if defined(PMAP_DIAGNOSTIC)
2209		if (pmap_nw_modified((pt_entry_t) origpte)) {
2210			printf(
2211	"pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
2212			    va, origpte);
2213		}
2214#endif
2215
2216		/*
2217		 * Remove extra pte reference
2218		 */
2219		if (mpte)
2220			mpte->hold_count--;
2221
2222		if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
2223			if ((origpte & PG_RW) == 0) {
2224				*pte |= PG_RW;
2225				invltlb_1pg(va);
2226			}
2227			return;
2228		}
2229
2230		/*
2231		 * We might be turning off write access to the page,
2232		 * so we go ahead and sense modify status.
2233		 */
2234		if (origpte & PG_MANAGED) {
2235			if ((origpte & PG_M) && pmap_track_modified(va)) {
2236				pv_table_t *ppv;
2237				ppv = pa_to_pvh(opa);
2238				vm_page_dirty(ppv->pv_vm_page);
2239			}
2240			pa |= PG_MANAGED;
2241		}
2242		goto validate;
2243	}
2244	/*
2245	 * Mapping has changed, invalidate old range and fall through to
2246	 * handle validating new mapping.
2247	 */
2248	if (opa) {
2249		int err;
2250		err = pmap_remove_pte(pmap, pte, va);
2251#if !defined(MAX_PERF)
2252		if (err)
2253			panic("pmap_enter: pte vanished, va: 0x%x", va);
2254#endif
2255	}
2256
2257	/*
2258	 * Enter on the PV list if part of our managed memory Note that we
2259	 * raise IPL while manipulating pv_table since pmap_enter can be
2260	 * called at interrupt time.
2261	 */
2262	if (pmap_is_managed(pa)) {
2263		pmap_insert_entry(pmap, va, mpte, pa);
2264		pa |= PG_MANAGED;
2265	}
2266
2267	/*
2268	 * Increment counters
2269	 */
2270	pmap->pm_stats.resident_count++;
2271	if (wired)
2272		pmap->pm_stats.wired_count++;
2273
2274validate:
2275	/*
2276	 * Now validate mapping with desired protection/wiring.
2277	 */
2278	newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2279
2280	if (wired)
2281		newpte |= PG_W;
2282	if (va < UPT_MIN_ADDRESS)
2283		newpte |= PG_U;
2284	if (pmap == kernel_pmap)
2285		newpte |= pgeflag;
2286
2287	/*
2288	 * if the mapping or permission bits are different, we need
2289	 * to update the pte.
2290	 */
2291	if ((origpte & ~(PG_M|PG_A)) != newpte) {
2292		*pte = newpte | PG_A;
2293		if (origpte)
2294			invltlb_1pg(va);
2295	}
2296}
2297
2298/*
2299 * this code makes some *MAJOR* assumptions:
2300 * 1. Current pmap & pmap exists.
2301 * 2. Not wired.
2302 * 3. Read access.
2303 * 4. No page table pages.
2304 * 5. Tlbflush is deferred to calling procedure.
2305 * 6. Page IS managed.
2306 * but is *MUCH* faster than pmap_enter...
2307 */
2308
2309static vm_page_t
2310pmap_enter_quick(pmap, va, pa, mpte)
2311	register pmap_t pmap;
2312	vm_offset_t va;
2313	register vm_offset_t pa;
2314	vm_page_t mpte;
2315{
2316	register unsigned *pte;
2317
2318	/*
2319	 * In the case that a page table page is not
2320	 * resident, we are creating it here.
2321	 */
2322	if (va < UPT_MIN_ADDRESS) {
2323		unsigned ptepindex;
2324		vm_offset_t ptepa;
2325
2326		/*
2327		 * Calculate pagetable page index
2328		 */
2329		ptepindex = va >> PDRSHIFT;
2330		if (mpte && (mpte->pindex == ptepindex)) {
2331			mpte->hold_count++;
2332		} else {
2333retry:
2334			/*
2335			 * Get the page directory entry
2336			 */
2337			ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
2338
2339			/*
2340			 * If the page table page is mapped, we just increment
2341			 * the hold count, and activate it.
2342			 */
2343			if (ptepa) {
2344#if !defined(MAX_PERF)
2345				if (ptepa & PG_PS)
2346					panic("pmap_enter_quick: unexpected mapping into 4MB page");
2347#endif
2348				if (pmap->pm_ptphint &&
2349					(pmap->pm_ptphint->pindex == ptepindex)) {
2350					mpte = pmap->pm_ptphint;
2351				} else {
2352					mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
2353					pmap->pm_ptphint = mpte;
2354				}
2355				if (mpte == NULL)
2356					goto retry;
2357				mpte->hold_count++;
2358			} else {
2359				mpte = _pmap_allocpte(pmap, ptepindex);
2360			}
2361		}
2362	} else {
2363		mpte = NULL;
2364	}
2365
2366	/*
2367	 * This call to vtopte makes the assumption that we are
2368	 * entering the page into the current pmap.  In order to support
2369	 * quick entry into any pmap, one would likely use pmap_pte_quick.
2370	 * But that isn't as quick as vtopte.
2371	 */
2372	pte = (unsigned *)vtopte(va);
2373	if (*pte) {
2374		if (mpte)
2375			pmap_unwire_pte_hold(pmap, mpte);
2376		return 0;
2377	}
2378
2379	/*
2380	 * Enter on the PV list if part of our managed memory Note that we
2381	 * raise IPL while manipulating pv_table since pmap_enter can be
2382	 * called at interrupt time.
2383	 */
2384	pmap_insert_entry(pmap, va, mpte, pa);
2385
2386	/*
2387	 * Increment counters
2388	 */
2389	pmap->pm_stats.resident_count++;
2390
2391	/*
2392	 * Now validate mapping with RO protection
2393	 */
2394	*pte = pa | PG_V | PG_U | PG_MANAGED;
2395
2396	return mpte;
2397}
2398
2399#define MAX_INIT_PT (96)
2400/*
2401 * pmap_object_init_pt preloads the ptes for a given object
2402 * into the specified pmap.  This eliminates the blast of soft
2403 * faults on process startup and immediately after an mmap.
2404 */
2405void
2406pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
2407	pmap_t pmap;
2408	vm_offset_t addr;
2409	vm_object_t object;
2410	vm_pindex_t pindex;
2411	vm_size_t size;
2412	int limit;
2413{
2414	vm_offset_t tmpidx;
2415	int psize;
2416	vm_page_t p, mpte;
2417	int objpgs;
2418
2419	if (!pmap)
2420		return;
2421
2422	/*
2423	 * This code maps large physical mmap regions into the
2424	 * processor address space.  Note that some shortcuts
2425	 * are taken, but the code works.
2426	 */
2427	if (pseflag &&
2428		(object->type == OBJT_DEVICE) &&
2429		((addr & (NBPDR - 1)) == 0) &&
2430		((size & (NBPDR - 1)) == 0) ) {
2431		int i;
2432		vm_page_t m[1];
2433		unsigned int ptepindex;
2434		int npdes;
2435		vm_offset_t ptepa;
2436
2437		if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
2438			return;
2439
2440retry:
2441		p = vm_page_lookup(object, pindex);
2442		if (p && vm_page_sleep_busy(p, FALSE, "init4p"))
2443			goto retry;
2444
2445		if (p == NULL) {
2446			p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2447			if (p == NULL)
2448				return;
2449			m[0] = p;
2450
2451			if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2452				vm_page_free(p);
2453				return;
2454			}
2455
2456			p = vm_page_lookup(object, pindex);
2457			vm_page_wakeup(p);
2458		}
2459
2460		ptepa = (vm_offset_t) VM_PAGE_TO_PHYS(p);
2461		if (ptepa & (NBPDR - 1)) {
2462			return;
2463		}
2464
2465		p->valid = VM_PAGE_BITS_ALL;
2466
2467		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2468		npdes = size >> PDRSHIFT;
2469		for(i=0;i<npdes;i++) {
2470			pmap->pm_pdir[ptepindex] =
2471				(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_PS);
2472			ptepa += NBPDR;
2473			ptepindex += 1;
2474		}
2475		vm_page_flag_set(p, PG_MAPPED);
2476		invltlb();
2477		return;
2478	}
2479
2480	psize = i386_btop(size);
2481
2482	if ((object->type != OBJT_VNODE) ||
2483		(limit && (psize > MAX_INIT_PT) &&
2484			(object->resident_page_count > MAX_INIT_PT))) {
2485		return;
2486	}
2487
2488	if (psize + pindex > object->size)
2489		psize = object->size - pindex;
2490
2491	mpte = NULL;
2492	/*
2493	 * if we are processing a major portion of the object, then scan the
2494	 * entire thing.
2495	 */
2496	if (psize > (object->size >> 2)) {
2497		objpgs = psize;
2498
2499		for (p = TAILQ_FIRST(&object->memq);
2500		    ((objpgs > 0) && (p != NULL));
2501		    p = TAILQ_NEXT(p, listq)) {
2502
2503			tmpidx = p->pindex;
2504			if (tmpidx < pindex) {
2505				continue;
2506			}
2507			tmpidx -= pindex;
2508			if (tmpidx >= psize) {
2509				continue;
2510			}
2511			if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2512				(p->busy == 0) &&
2513			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2514				if ((p->queue - p->pc) == PQ_CACHE)
2515					vm_page_deactivate(p);
2516				vm_page_busy(p);
2517				mpte = pmap_enter_quick(pmap,
2518					addr + i386_ptob(tmpidx),
2519					VM_PAGE_TO_PHYS(p), mpte);
2520				vm_page_flag_set(p, PG_MAPPED);
2521				vm_page_wakeup(p);
2522			}
2523			objpgs -= 1;
2524		}
2525	} else {
2526		/*
2527		 * else lookup the pages one-by-one.
2528		 */
2529		for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
2530			p = vm_page_lookup(object, tmpidx + pindex);
2531			if (p &&
2532			    ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2533				(p->busy == 0) &&
2534			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2535				if ((p->queue - p->pc) == PQ_CACHE)
2536					vm_page_deactivate(p);
2537				vm_page_busy(p);
2538				mpte = pmap_enter_quick(pmap,
2539					addr + i386_ptob(tmpidx),
2540					VM_PAGE_TO_PHYS(p), mpte);
2541				vm_page_flag_set(p, PG_MAPPED);
2542				vm_page_wakeup(p);
2543			}
2544		}
2545	}
2546	return;
2547}
2548
2549/*
2550 * pmap_prefault provides a quick way of clustering
2551 * pagefaults into a processes address space.  It is a "cousin"
2552 * of pmap_object_init_pt, except it runs at page fault time instead
2553 * of mmap time.
2554 */
2555#define PFBAK 4
2556#define PFFOR 4
2557#define PAGEORDER_SIZE (PFBAK+PFFOR)
2558
2559static int pmap_prefault_pageorder[] = {
2560	-PAGE_SIZE, PAGE_SIZE,
2561	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
2562	-3 * PAGE_SIZE, 3 * PAGE_SIZE
2563	-4 * PAGE_SIZE, 4 * PAGE_SIZE
2564};
2565
2566void
2567pmap_prefault(pmap, addra, entry)
2568	pmap_t pmap;
2569	vm_offset_t addra;
2570	vm_map_entry_t entry;
2571{
2572	int i;
2573	vm_offset_t starta;
2574	vm_offset_t addr;
2575	vm_pindex_t pindex;
2576	vm_page_t m, mpte;
2577	vm_object_t object;
2578
2579	if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace)))
2580		return;
2581
2582	object = entry->object.vm_object;
2583
2584	starta = addra - PFBAK * PAGE_SIZE;
2585	if (starta < entry->start) {
2586		starta = entry->start;
2587	} else if (starta > addra) {
2588		starta = 0;
2589	}
2590
2591	mpte = NULL;
2592	for (i = 0; i < PAGEORDER_SIZE; i++) {
2593		vm_object_t lobject;
2594		unsigned *pte;
2595
2596		addr = addra + pmap_prefault_pageorder[i];
2597		if (addr > addra + (PFFOR * PAGE_SIZE))
2598			addr = 0;
2599
2600		if (addr < starta || addr >= entry->end)
2601			continue;
2602
2603		if ((*pmap_pde(pmap, addr)) == NULL)
2604			continue;
2605
2606		pte = (unsigned *) vtopte(addr);
2607		if (*pte)
2608			continue;
2609
2610		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2611		lobject = object;
2612		for (m = vm_page_lookup(lobject, pindex);
2613		    (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object));
2614		    lobject = lobject->backing_object) {
2615			if (lobject->backing_object_offset & PAGE_MASK)
2616				break;
2617			pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
2618			m = vm_page_lookup(lobject->backing_object, pindex);
2619		}
2620
2621		/*
2622		 * give-up when a page is not in memory
2623		 */
2624		if (m == NULL)
2625			break;
2626
2627		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2628			(m->busy == 0) &&
2629		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2630
2631			if ((m->queue - m->pc) == PQ_CACHE) {
2632				vm_page_deactivate(m);
2633			}
2634			vm_page_busy(m);
2635			mpte = pmap_enter_quick(pmap, addr,
2636				VM_PAGE_TO_PHYS(m), mpte);
2637			vm_page_flag_set(m, PG_MAPPED);
2638			vm_page_wakeup(m);
2639		}
2640	}
2641}
2642
2643/*
2644 *	Routine:	pmap_change_wiring
2645 *	Function:	Change the wiring attribute for a map/virtual-address
2646 *			pair.
2647 *	In/out conditions:
2648 *			The mapping must already exist in the pmap.
2649 */
2650void
2651pmap_change_wiring(pmap, va, wired)
2652	register pmap_t pmap;
2653	vm_offset_t va;
2654	boolean_t wired;
2655{
2656	register unsigned *pte;
2657
2658	if (pmap == NULL)
2659		return;
2660
2661	pte = pmap_pte(pmap, va);
2662
2663	if (wired && !pmap_pte_w(pte))
2664		pmap->pm_stats.wired_count++;
2665	else if (!wired && pmap_pte_w(pte))
2666		pmap->pm_stats.wired_count--;
2667
2668	/*
2669	 * Wiring is not a hardware characteristic so there is no need to
2670	 * invalidate TLB.
2671	 */
2672	pmap_pte_set_w(pte, wired);
2673}
2674
2675
2676
2677/*
2678 *	Copy the range specified by src_addr/len
2679 *	from the source map to the range dst_addr/len
2680 *	in the destination map.
2681 *
2682 *	This routine is only advisory and need not do anything.
2683 */
2684
2685void
2686pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2687	pmap_t dst_pmap, src_pmap;
2688	vm_offset_t dst_addr;
2689	vm_size_t len;
2690	vm_offset_t src_addr;
2691{
2692	vm_offset_t addr;
2693	vm_offset_t end_addr = src_addr + len;
2694	vm_offset_t pdnxt;
2695	unsigned src_frame, dst_frame;
2696
2697	if (dst_addr != src_addr)
2698		return;
2699
2700	src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2701	if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) {
2702		return;
2703	}
2704
2705	dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2706	if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) {
2707		APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
2708		invltlb();
2709	}
2710
2711	for(addr = src_addr; addr < end_addr; addr = pdnxt) {
2712		unsigned *src_pte, *dst_pte;
2713		vm_page_t dstmpte, srcmpte;
2714		vm_offset_t srcptepaddr;
2715		unsigned ptepindex;
2716
2717#if !defined(MAX_PERF)
2718		if (addr >= UPT_MIN_ADDRESS)
2719			panic("pmap_copy: invalid to pmap_copy page tables\n");
2720#endif
2721
2722		/*
2723		 * Don't let optional prefaulting of pages make us go
2724		 * way below the low water mark of free pages or way
2725		 * above high water mark of used pv entries.
2726		 */
2727		if (cnt.v_free_count < cnt.v_free_reserved ||
2728		    pv_entry_count > pv_entry_high_water)
2729			break;
2730
2731		pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
2732		ptepindex = addr >> PDRSHIFT;
2733
2734		srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
2735		if (srcptepaddr == 0)
2736			continue;
2737
2738		if (srcptepaddr & PG_PS) {
2739			if (dst_pmap->pm_pdir[ptepindex] == 0) {
2740				dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
2741				dst_pmap->pm_stats.resident_count += NBPDR;
2742			}
2743			continue;
2744		}
2745
2746		srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
2747		if ((srcmpte == NULL) ||
2748			(srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
2749			continue;
2750
2751		if (pdnxt > end_addr)
2752			pdnxt = end_addr;
2753
2754		src_pte = (unsigned *) vtopte(addr);
2755		dst_pte = (unsigned *) avtopte(addr);
2756		while (addr < pdnxt) {
2757			unsigned ptetemp;
2758			ptetemp = *src_pte;
2759			/*
2760			 * we only virtual copy managed pages
2761			 */
2762			if ((ptetemp & PG_MANAGED) != 0) {
2763				/*
2764				 * We have to check after allocpte for the
2765				 * pte still being around...  allocpte can
2766				 * block.
2767				 */
2768				dstmpte = pmap_allocpte(dst_pmap, addr);
2769				if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
2770					/*
2771					 * Clear the modified and
2772					 * accessed (referenced) bits
2773					 * during the copy.
2774					 */
2775					*dst_pte = ptetemp & ~(PG_M | PG_A);
2776					dst_pmap->pm_stats.resident_count++;
2777					pmap_insert_entry(dst_pmap, addr,
2778						dstmpte,
2779						(ptetemp & PG_FRAME));
2780	 			} else {
2781					pmap_unwire_pte_hold(dst_pmap, dstmpte);
2782				}
2783				if (dstmpte->hold_count >= srcmpte->hold_count)
2784					break;
2785			}
2786			addr += PAGE_SIZE;
2787			src_pte++;
2788			dst_pte++;
2789		}
2790	}
2791}
2792
2793/*
2794 *	Routine:	pmap_kernel
2795 *	Function:
2796 *		Returns the physical map handle for the kernel.
2797 */
2798pmap_t
2799pmap_kernel()
2800{
2801	return (kernel_pmap);
2802}
2803
2804/*
2805 *	pmap_zero_page zeros the specified (machine independent)
2806 *	page by mapping the page into virtual memory and using
2807 *	bzero to clear its contents, one machine dependent page
2808 *	at a time.
2809 */
2810void
2811pmap_zero_page(phys)
2812	vm_offset_t phys;
2813{
2814#ifdef SMP
2815#if !defined(MAX_PERF)
2816	if (*(int *) prv_CMAP3)
2817		panic("pmap_zero_page: prv_CMAP3 busy");
2818#endif
2819
2820	*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2821	cpu_invlpg(&prv_CPAGE3);
2822
2823#if defined(I686_CPU)
2824	if (cpu_class == CPUCLASS_686)
2825		i686_pagezero(&prv_CPAGE3);
2826	else
2827#endif
2828		bzero(&prv_CPAGE3, PAGE_SIZE);
2829
2830	*(int *) prv_CMAP3 = 0;
2831#else
2832#if !defined(MAX_PERF)
2833	if (*(int *) CMAP2)
2834		panic("pmap_zero_page: CMAP2 busy");
2835#endif
2836
2837	*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2838	if (cpu_class == CPUCLASS_386) {
2839		invltlb();
2840	} else {
2841		invlpg((u_int)CADDR2);
2842	}
2843
2844#if defined(I686_CPU)
2845	if (cpu_class == CPUCLASS_686)
2846		i686_pagezero(CADDR2);
2847	else
2848#endif
2849		bzero(CADDR2, PAGE_SIZE);
2850	*(int *) CMAP2 = 0;
2851#endif
2852}
2853
2854/*
2855 *	pmap_copy_page copies the specified (machine independent)
2856 *	page by mapping the page into virtual memory and using
2857 *	bcopy to copy the page, one machine dependent page at a
2858 *	time.
2859 */
2860void
2861pmap_copy_page(src, dst)
2862	vm_offset_t src;
2863	vm_offset_t dst;
2864{
2865#ifdef SMP
2866#if !defined(MAX_PERF)
2867	if (*(int *) prv_CMAP1)
2868		panic("pmap_copy_page: prv_CMAP1 busy");
2869	if (*(int *) prv_CMAP2)
2870		panic("pmap_copy_page: prv_CMAP2 busy");
2871#endif
2872
2873	*(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2874	*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2875
2876	cpu_invlpg(&prv_CPAGE1);
2877	cpu_invlpg(&prv_CPAGE2);
2878
2879	bcopy(&prv_CPAGE1, &prv_CPAGE2, PAGE_SIZE);
2880
2881	*(int *) prv_CMAP1 = 0;
2882	*(int *) prv_CMAP2 = 0;
2883#else
2884#if !defined(MAX_PERF)
2885	if (*(int *) CMAP1 || *(int *) CMAP2)
2886		panic("pmap_copy_page: CMAP busy");
2887#endif
2888
2889	*(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2890	*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2891	if (cpu_class == CPUCLASS_386) {
2892		invltlb();
2893	} else {
2894		invlpg((u_int)CADDR1);
2895		invlpg((u_int)CADDR2);
2896	}
2897
2898	bcopy(CADDR1, CADDR2, PAGE_SIZE);
2899
2900	*(int *) CMAP1 = 0;
2901	*(int *) CMAP2 = 0;
2902#endif
2903}
2904
2905
2906/*
2907 *	Routine:	pmap_pageable
2908 *	Function:
2909 *		Make the specified pages (by pmap, offset)
2910 *		pageable (or not) as requested.
2911 *
2912 *		A page which is not pageable may not take
2913 *		a fault; therefore, its page table entry
2914 *		must remain valid for the duration.
2915 *
2916 *		This routine is merely advisory; pmap_enter
2917 *		will specify that these pages are to be wired
2918 *		down (or not) as appropriate.
2919 */
2920void
2921pmap_pageable(pmap, sva, eva, pageable)
2922	pmap_t pmap;
2923	vm_offset_t sva, eva;
2924	boolean_t pageable;
2925{
2926}
2927
2928/*
2929 * this routine returns true if a physical page resides
2930 * in the given pmap.
2931 */
2932boolean_t
2933pmap_page_exists(pmap, pa)
2934	pmap_t pmap;
2935	vm_offset_t pa;
2936{
2937	register pv_entry_t pv;
2938	pv_table_t *ppv;
2939	int s;
2940
2941	if (!pmap_is_managed(pa))
2942		return FALSE;
2943
2944	s = splvm();
2945
2946	ppv = pa_to_pvh(pa);
2947	/*
2948	 * Not found, check current mappings returning immediately if found.
2949	 */
2950	for (pv = TAILQ_FIRST(&ppv->pv_list);
2951		pv;
2952		pv = TAILQ_NEXT(pv, pv_list)) {
2953		if (pv->pv_pmap == pmap) {
2954			splx(s);
2955			return TRUE;
2956		}
2957	}
2958	splx(s);
2959	return (FALSE);
2960}
2961
2962#define PMAP_REMOVE_PAGES_CURPROC_ONLY
2963/*
2964 * Remove all pages from specified address space
2965 * this aids process exit speeds.  Also, this code
2966 * is special cased for current process only, but
2967 * can have the more generic (and slightly slower)
2968 * mode enabled.  This is much faster than pmap_remove
2969 * in the case of running down an entire address space.
2970 */
2971void
2972pmap_remove_pages(pmap, sva, eva)
2973	pmap_t pmap;
2974	vm_offset_t sva, eva;
2975{
2976	unsigned *pte, tpte;
2977	pv_table_t *ppv;
2978	pv_entry_t pv, npv;
2979	int s;
2980
2981#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2982	if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
2983		printf("warning: pmap_remove_pages called with non-current pmap\n");
2984		return;
2985	}
2986#endif
2987
2988	s = splvm();
2989	for(pv = TAILQ_FIRST(&pmap->pm_pvlist);
2990		pv;
2991		pv = npv) {
2992
2993		if (pv->pv_va >= eva || pv->pv_va < sva) {
2994			npv = TAILQ_NEXT(pv, pv_plist);
2995			continue;
2996		}
2997
2998#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2999		pte = (unsigned *)vtopte(pv->pv_va);
3000#else
3001		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3002#endif
3003		tpte = *pte;
3004
3005/*
3006 * We cannot remove wired pages from a process' mapping at this time
3007 */
3008		if (tpte & PG_W) {
3009			npv = TAILQ_NEXT(pv, pv_plist);
3010			continue;
3011		}
3012		*pte = 0;
3013
3014		ppv = pa_to_pvh(tpte);
3015
3016		pv->pv_pmap->pm_stats.resident_count--;
3017
3018		/*
3019		 * Update the vm_page_t clean and reference bits.
3020		 */
3021		if (tpte & PG_M) {
3022			vm_page_dirty(ppv->pv_vm_page);
3023		}
3024
3025
3026		npv = TAILQ_NEXT(pv, pv_plist);
3027		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
3028
3029		ppv->pv_list_count--;
3030		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
3031		if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
3032			vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
3033		}
3034
3035		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
3036		free_pv_entry(pv);
3037	}
3038	splx(s);
3039	invltlb();
3040}
3041
3042/*
3043 * pmap_testbit tests bits in pte's
3044 * note that the testbit/changebit routines are inline,
3045 * and a lot of things compile-time evaluate.
3046 */
3047static boolean_t
3048pmap_testbit(pa, bit)
3049	register vm_offset_t pa;
3050	int bit;
3051{
3052	register pv_entry_t pv;
3053	pv_table_t *ppv;
3054	unsigned *pte;
3055	int s;
3056
3057	if (!pmap_is_managed(pa))
3058		return FALSE;
3059
3060	ppv = pa_to_pvh(pa);
3061	if (TAILQ_FIRST(&ppv->pv_list) == NULL)
3062		return FALSE;
3063
3064	s = splvm();
3065
3066	for (pv = TAILQ_FIRST(&ppv->pv_list);
3067		pv;
3068		pv = TAILQ_NEXT(pv, pv_list)) {
3069
3070		/*
3071		 * if the bit being tested is the modified bit, then
3072		 * mark clean_map and ptes as never
3073		 * modified.
3074		 */
3075		if (bit & (PG_A|PG_M)) {
3076			if (!pmap_track_modified(pv->pv_va))
3077				continue;
3078		}
3079
3080#if defined(PMAP_DIAGNOSTIC)
3081		if (!pv->pv_pmap) {
3082			printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
3083			continue;
3084		}
3085#endif
3086		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3087		if (*pte & bit) {
3088			splx(s);
3089			return TRUE;
3090		}
3091	}
3092	splx(s);
3093	return (FALSE);
3094}
3095
3096/*
3097 * this routine is used to modify bits in ptes
3098 */
3099static void
3100pmap_changebit(pa, bit, setem)
3101	vm_offset_t pa;
3102	int bit;
3103	boolean_t setem;
3104{
3105	register pv_entry_t pv;
3106	pv_table_t *ppv;
3107	register unsigned *pte;
3108	int changed;
3109	int s;
3110
3111	if (!pmap_is_managed(pa))
3112		return;
3113
3114	s = splvm();
3115	changed = 0;
3116	ppv = pa_to_pvh(pa);
3117
3118	/*
3119	 * Loop over all current mappings setting/clearing as appropos If
3120	 * setting RO do we need to clear the VAC?
3121	 */
3122	for (pv = TAILQ_FIRST(&ppv->pv_list);
3123		pv;
3124		pv = TAILQ_NEXT(pv, pv_list)) {
3125
3126		/*
3127		 * don't write protect pager mappings
3128		 */
3129		if (!setem && (bit == PG_RW)) {
3130			if (!pmap_track_modified(pv->pv_va))
3131				continue;
3132		}
3133
3134#if defined(PMAP_DIAGNOSTIC)
3135		if (!pv->pv_pmap) {
3136			printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
3137			continue;
3138		}
3139#endif
3140
3141		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3142
3143		if (setem) {
3144			*(int *)pte |= bit;
3145			changed = 1;
3146		} else {
3147			vm_offset_t pbits = *(vm_offset_t *)pte;
3148			if (pbits & bit) {
3149				changed = 1;
3150				if (bit == PG_RW) {
3151					if (pbits & PG_M) {
3152						vm_page_dirty(ppv->pv_vm_page);
3153					}
3154					*(int *)pte = pbits & ~(PG_M|PG_RW);
3155				} else {
3156					*(int *)pte = pbits & ~bit;
3157				}
3158			}
3159		}
3160	}
3161	splx(s);
3162	if (changed)
3163		invltlb();
3164}
3165
3166/*
3167 *      pmap_page_protect:
3168 *
3169 *      Lower the permission for all mappings to a given page.
3170 */
3171void
3172pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
3173{
3174	if ((prot & VM_PROT_WRITE) == 0) {
3175		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
3176			pmap_changebit(phys, PG_RW, FALSE);
3177		} else {
3178			pmap_remove_all(phys);
3179		}
3180	}
3181}
3182
3183vm_offset_t
3184pmap_phys_address(ppn)
3185	int ppn;
3186{
3187	return (i386_ptob(ppn));
3188}
3189
3190/*
3191 *	pmap_ts_referenced:
3192 *
3193 *	Return the count of reference bits for a page, clearing all of them.
3194 *
3195 */
3196int
3197pmap_ts_referenced(vm_offset_t pa)
3198{
3199	register pv_entry_t pv, pvf, pvn;
3200	pv_table_t *ppv;
3201	unsigned *pte;
3202	int s;
3203	int rtval = 0;
3204
3205	if (!pmap_is_managed(pa))
3206		return FALSE;
3207
3208	s = splvm();
3209
3210	ppv = pa_to_pvh(pa);
3211
3212	if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
3213		splx(s);
3214		return 0;
3215	}
3216
3217	/*
3218	 * Not found, check current mappings returning immediately if found.
3219	 */
3220	pvf = 0;
3221	for (pv = TAILQ_FIRST(&ppv->pv_list); pv && pv != pvf; pv = pvn) {
3222		if (!pvf)
3223			pvf = pv;
3224		pvn = TAILQ_NEXT(pv, pv_list);
3225
3226		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
3227		/*
3228		 * if the bit being tested is the modified bit, then
3229		 * mark clean_map and ptes as never
3230		 * modified.
3231		 */
3232		if (!pmap_track_modified(pv->pv_va)) {
3233			TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
3234			continue;
3235		}
3236
3237		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3238		if (pte == NULL) {
3239			TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
3240			continue;
3241		}
3242
3243		if (*pte & PG_A) {
3244			rtval++;
3245			*pte &= ~PG_A;
3246			if (rtval > 4) {
3247				TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
3248				break;
3249			}
3250		}
3251		TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
3252	}
3253
3254	splx(s);
3255	if (rtval) {
3256		invltlb();
3257	}
3258	return (rtval);
3259}
3260
3261/*
3262 *	pmap_is_modified:
3263 *
3264 *	Return whether or not the specified physical page was modified
3265 *	in any physical maps.
3266 */
3267boolean_t
3268pmap_is_modified(vm_offset_t pa)
3269{
3270	return pmap_testbit((pa), PG_M);
3271}
3272
3273/*
3274 *	Clear the modify bits on the specified physical page.
3275 */
3276void
3277pmap_clear_modify(vm_offset_t pa)
3278{
3279	pmap_changebit((pa), PG_M, FALSE);
3280}
3281
3282/*
3283 *	pmap_clear_reference:
3284 *
3285 *	Clear the reference bit on the specified physical page.
3286 */
3287void
3288pmap_clear_reference(vm_offset_t pa)
3289{
3290	pmap_changebit((pa), PG_A, FALSE);
3291}
3292
3293/*
3294 * Miscellaneous support routines follow
3295 */
3296
3297static void
3298i386_protection_init()
3299{
3300	register int *kp, prot;
3301
3302	kp = protection_codes;
3303	for (prot = 0; prot < 8; prot++) {
3304		switch (prot) {
3305		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
3306			/*
3307			 * Read access is also 0. There isn't any execute bit,
3308			 * so just make it readable.
3309			 */
3310		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
3311		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
3312		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
3313			*kp++ = 0;
3314			break;
3315		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
3316		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
3317		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
3318		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
3319			*kp++ = PG_RW;
3320			break;
3321		}
3322	}
3323}
3324
3325/*
3326 * Map a set of physical memory pages into the kernel virtual
3327 * address space. Return a pointer to where it is mapped. This
3328 * routine is intended to be used for mapping device memory,
3329 * NOT real memory.
3330 */
3331void *
3332pmap_mapdev(pa, size)
3333	vm_offset_t pa;
3334	vm_size_t size;
3335{
3336	vm_offset_t va, tmpva;
3337	unsigned *pte;
3338
3339	size = roundup(size, PAGE_SIZE);
3340
3341	va = kmem_alloc_pageable(kernel_map, size);
3342#if !defined(MAX_PERF)
3343	if (!va)
3344		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3345#endif
3346
3347	pa = pa & PG_FRAME;
3348	for (tmpva = va; size > 0;) {
3349		pte = (unsigned *)vtopte(tmpva);
3350		*pte = pa | PG_RW | PG_V | pgeflag;
3351		size -= PAGE_SIZE;
3352		tmpva += PAGE_SIZE;
3353		pa += PAGE_SIZE;
3354	}
3355	invltlb();
3356
3357	return ((void *) va);
3358}
3359
3360/*
3361 * perform the pmap work for mincore
3362 */
3363int
3364pmap_mincore(pmap, addr)
3365	pmap_t pmap;
3366	vm_offset_t addr;
3367{
3368
3369	unsigned *ptep, pte;
3370	vm_page_t m;
3371	int val = 0;
3372
3373	ptep = pmap_pte(pmap, addr);
3374	if (ptep == 0) {
3375		return 0;
3376	}
3377
3378	if ((pte = *ptep) != 0) {
3379		pv_table_t *ppv;
3380		vm_offset_t pa;
3381
3382		val = MINCORE_INCORE;
3383		if ((pte & PG_MANAGED) == 0)
3384			return val;
3385
3386		pa = pte & PG_FRAME;
3387
3388		ppv = pa_to_pvh((pa & PG_FRAME));
3389		m = ppv->pv_vm_page;
3390
3391		/*
3392		 * Modified by us
3393		 */
3394		if (pte & PG_M)
3395			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3396		/*
3397		 * Modified by someone
3398		 */
3399		else if (m->dirty || pmap_is_modified(pa))
3400			val |= MINCORE_MODIFIED_OTHER;
3401		/*
3402		 * Referenced by us
3403		 */
3404		if (pte & PG_A)
3405			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3406
3407		/*
3408		 * Referenced by someone
3409		 */
3410		else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
3411			val |= MINCORE_REFERENCED_OTHER;
3412			vm_page_flag_set(m, PG_REFERENCED);
3413		}
3414	}
3415	return val;
3416}
3417
3418void
3419pmap_activate(struct proc *p)
3420{
3421#if defined(SWTCH_OPTIM_STATS)
3422	tlb_flush_count++;
3423#endif
3424	load_cr3(p->p_addr->u_pcb.pcb_cr3 =
3425		vtophys(vmspace_pmap(p->p_vmspace)->pm_pdir));
3426}
3427
3428vm_offset_t
3429pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
3430
3431	if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3432		return addr;
3433	}
3434
3435	addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3436	return addr;
3437}
3438
3439
3440#if defined(PMAP_DEBUG)
3441pmap_pid_dump(int pid) {
3442	pmap_t pmap;
3443	struct proc *p;
3444	int npte = 0;
3445	int index;
3446	for (p = allproc.lh_first; p != NULL; p = p->p_list.le_next) {
3447		if (p->p_pid != pid)
3448			continue;
3449
3450		if (p->p_vmspace) {
3451			int i,j;
3452			index = 0;
3453			pmap = vmspace_pmap(p->p_vmspace);
3454			for(i=0;i<1024;i++) {
3455				pd_entry_t *pde;
3456				unsigned *pte;
3457				unsigned base = i << PDRSHIFT;
3458
3459				pde = &pmap->pm_pdir[i];
3460				if (pde && pmap_pde_v(pde)) {
3461					for(j=0;j<1024;j++) {
3462						unsigned va = base + (j << PAGE_SHIFT);
3463						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
3464							if (index) {
3465								index = 0;
3466								printf("\n");
3467							}
3468							return npte;
3469						}
3470						pte = pmap_pte_quick( pmap, va);
3471						if (pte && pmap_pte_v(pte)) {
3472							vm_offset_t pa;
3473							vm_page_t m;
3474							pa = *(int *)pte;
3475							m = PHYS_TO_VM_PAGE((pa & PG_FRAME));
3476							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
3477								va, pa, m->hold_count, m->wire_count, m->flags);
3478							npte++;
3479							index++;
3480							if (index >= 2) {
3481								index = 0;
3482								printf("\n");
3483							} else {
3484								printf(" ");
3485							}
3486						}
3487					}
3488				}
3489			}
3490		}
3491	}
3492	return npte;
3493}
3494#endif
3495
3496#if defined(DEBUG)
3497
3498static void	pads __P((pmap_t pm));
3499void		pmap_pvdump __P((vm_offset_t pa));
3500
3501/* print address space of pmap*/
3502static void
3503pads(pm)
3504	pmap_t pm;
3505{
3506	unsigned va, i, j;
3507	unsigned *ptep;
3508
3509	if (pm == kernel_pmap)
3510		return;
3511	for (i = 0; i < 1024; i++)
3512		if (pm->pm_pdir[i])
3513			for (j = 0; j < 1024; j++) {
3514				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
3515				if (pm == kernel_pmap && va < KERNBASE)
3516					continue;
3517				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
3518					continue;
3519				ptep = pmap_pte_quick(pm, va);
3520				if (pmap_pte_v(ptep))
3521					printf("%x:%x ", va, *(int *) ptep);
3522			};
3523
3524}
3525
3526void
3527pmap_pvdump(pa)
3528	vm_offset_t pa;
3529{
3530	pv_table_t *ppv;
3531	register pv_entry_t pv;
3532
3533	printf("pa %x", pa);
3534	ppv = pa_to_pvh(pa);
3535	for (pv = TAILQ_FIRST(&ppv->pv_list);
3536		pv;
3537		pv = TAILQ_NEXT(pv, pv_list)) {
3538#ifdef used_to_be
3539		printf(" -> pmap %p, va %x, flags %x",
3540		    (void *)pv->pv_pmap, pv->pv_va, pv->pv_flags);
3541#endif
3542		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3543		pads(pv->pv_pmap);
3544	}
3545	printf(" ");
3546}
3547#endif
3548