pmap.c revision 45252
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
42 *	$Id: pmap.c,v 1.225 1999/03/13 07:31:29 alc Exp $
43 */
44
45/*
46 *	Manages physical address maps.
47 *
48 *	In addition to hardware address maps, this
49 *	module is called upon to provide software-use-only
50 *	maps which may or may not be stored in the same
51 *	form as hardware maps.  These pseudo-maps are
52 *	used to store intermediate results from copy
53 *	operations to and from address spaces.
54 *
55 *	Since the information managed by this module is
56 *	also stored by the logical address mapping module,
57 *	this module may throw away valid virtual-to-physical
58 *	mappings at almost any time.  However, invalidations
59 *	of virtual-to-physical mappings must be done as
60 *	requested.
61 *
62 *	In order to cope with hardware architectures which
63 *	make virtual-to-physical map invalidates expensive,
64 *	this module may delay invalidate or reduced protection
65 *	operations until such time as they are actually
66 *	necessary.  This module is given full information as
67 *	to which processors are currently using which maps,
68 *	and to when physical maps must be made correct.
69 */
70
71#include "opt_disable_pse.h"
72#include "opt_pmap.h"
73#include "opt_msgbuf.h"
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/proc.h>
78#include <sys/msgbuf.h>
79#include <sys/vmmeter.h>
80#include <sys/mman.h>
81
82#include <vm/vm.h>
83#include <vm/vm_param.h>
84#include <vm/vm_prot.h>
85#include <sys/lock.h>
86#include <vm/vm_kern.h>
87#include <vm/vm_page.h>
88#include <vm/vm_map.h>
89#include <vm/vm_object.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93#include <vm/vm_zone.h>
94
95#include <sys/user.h>
96
97#include <machine/cputypes.h>
98#include <machine/md_var.h>
99#include <machine/specialreg.h>
100#if defined(SMP) || defined(APIC_IO)
101#include <machine/smp.h>
102#include <machine/apic.h>
103#endif /* SMP || APIC_IO */
104
105#define PMAP_KEEP_PDIRS
106#ifndef PMAP_SHPGPERPROC
107#define PMAP_SHPGPERPROC 200
108#endif
109
110#if defined(DIAGNOSTIC)
111#define PMAP_DIAGNOSTIC
112#endif
113
114#define MINPV 2048
115
116#if !defined(PMAP_DIAGNOSTIC)
117#define PMAP_INLINE __inline
118#else
119#define PMAP_INLINE
120#endif
121
122/*
123 * Get PDEs and PTEs for user/kernel address space
124 */
125#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
126#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
127
128#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
129#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
130#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
131#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
132#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
133
134#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
135#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
136
137/*
138 * Given a map and a machine independent protection code,
139 * convert to a vax protection code.
140 */
141#define pte_prot(m, p)	(protection_codes[p])
142static int protection_codes[8];
143
144#define	pa_index(pa)		atop((pa) - vm_first_phys)
145#define	pa_to_pvh(pa)		(&pv_table[pa_index(pa)])
146
147static struct pmap kernel_pmap_store;
148pmap_t kernel_pmap;
149extern pd_entry_t my_idlePTD;
150
151vm_offset_t avail_start;	/* PA of first available physical page */
152vm_offset_t avail_end;		/* PA of last available physical page */
153vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
154vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
155static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
156static vm_offset_t vm_first_phys;
157static int pgeflag;		/* PG_G or-in */
158static int pseflag;		/* PG_PS or-in */
159static int pv_npg;
160
161static vm_object_t kptobj;
162
163static int nkpt;
164vm_offset_t kernel_vm_end;
165
166/*
167 * Data for the pv entry allocation mechanism
168 */
169static vm_zone_t pvzone;
170static struct vm_zone pvzone_store;
171static struct vm_object pvzone_obj;
172static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
173static int pmap_pagedaemon_waken = 0;
174static struct pv_entry *pvinit;
175
176/*
177 * All those kernel PT submaps that BSD is so fond of
178 */
179pt_entry_t *CMAP1 = 0;
180static pt_entry_t *CMAP2, *ptmmap;
181static pv_table_t *pv_table;
182caddr_t CADDR1 = 0, ptvmmap = 0;
183static caddr_t CADDR2;
184static pt_entry_t *msgbufmap;
185struct msgbuf *msgbufp=0;
186
187/*
188 *  PPro_vmtrr
189 */
190struct ppro_vmtrr PPro_vmtrr[NPPROVMTRR];
191
192/* AIO support */
193extern struct vmspace *aiovmspace;
194
195#ifdef SMP
196extern char prv_CPAGE1[], prv_CPAGE2[], prv_CPAGE3[];
197extern pt_entry_t *prv_CMAP1, *prv_CMAP2, *prv_CMAP3;
198extern pd_entry_t *IdlePTDS[];
199extern pt_entry_t SMP_prvpt[];
200#endif
201
202#ifdef SMP
203extern unsigned int prv_PPAGE1[];
204extern pt_entry_t *prv_PMAP1;
205#else
206static pt_entry_t *PMAP1 = 0;
207static unsigned *PADDR1 = 0;
208#endif
209
210static PMAP_INLINE void	free_pv_entry __P((pv_entry_t pv));
211static unsigned * get_ptbase __P((pmap_t pmap));
212static pv_entry_t get_pv_entry __P((void));
213static void	i386_protection_init __P((void));
214static void	pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
215
216static PMAP_INLINE int	pmap_is_managed __P((vm_offset_t pa));
217static void	pmap_remove_all __P((vm_offset_t pa));
218static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
219				      vm_offset_t pa, vm_page_t mpte));
220static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq,
221					vm_offset_t sva));
222static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
223static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv,
224					vm_offset_t va));
225static boolean_t pmap_testbit __P((vm_offset_t pa, int bit));
226static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va,
227		vm_page_t mpte, vm_offset_t pa));
228
229static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va));
230
231static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p));
232static vm_page_t _pmap_allocpte __P((pmap_t pmap, unsigned ptepindex));
233static unsigned * pmap_pte_quick __P((pmap_t pmap, vm_offset_t va));
234static vm_page_t pmap_page_lookup __P((vm_object_t object, vm_pindex_t pindex));
235static int pmap_unuse_pt __P((pmap_t, vm_offset_t, vm_page_t));
236static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
237void pmap_collect(void);
238
239static unsigned pdir4mb;
240
241/*
242 *	Routine:	pmap_pte
243 *	Function:
244 *		Extract the page table entry associated
245 *		with the given map/virtual_address pair.
246 */
247
248PMAP_INLINE unsigned *
249pmap_pte(pmap, va)
250	register pmap_t pmap;
251	vm_offset_t va;
252{
253	unsigned *pdeaddr;
254
255	if (pmap) {
256		pdeaddr = (unsigned *) pmap_pde(pmap, va);
257		if (*pdeaddr & PG_PS)
258			return pdeaddr;
259		if (*pdeaddr) {
260			return get_ptbase(pmap) + i386_btop(va);
261		}
262	}
263	return (0);
264}
265
266/*
267 * Move the kernel virtual free pointer to the next
268 * 4MB.  This is used to help improve performance
269 * by using a large (4MB) page for much of the kernel
270 * (.text, .data, .bss)
271 */
272static vm_offset_t
273pmap_kmem_choose(vm_offset_t addr) {
274	vm_offset_t newaddr = addr;
275#ifndef DISABLE_PSE
276	if (cpu_feature & CPUID_PSE) {
277		newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
278	}
279#endif
280	return newaddr;
281}
282
283/*
284 *	Bootstrap the system enough to run with virtual memory.
285 *
286 *	On the i386 this is called after mapping has already been enabled
287 *	and just syncs the pmap module with what has already been done.
288 *	[We can't call it easily with mapping off since the kernel is not
289 *	mapped with PA == VA, hence we would have to relocate every address
290 *	from the linked base (virtual) address "KERNBASE" to the actual
291 *	(physical) address starting relative to 0]
292 */
293void
294pmap_bootstrap(firstaddr, loadaddr)
295	vm_offset_t firstaddr;
296	vm_offset_t loadaddr;
297{
298	vm_offset_t va;
299	pt_entry_t *pte;
300#ifdef SMP
301	int i, j;
302#endif
303
304	avail_start = firstaddr;
305
306	/*
307	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
308	 * large. It should instead be correctly calculated in locore.s and
309	 * not based on 'first' (which is a physical address, not a virtual
310	 * address, for the start of unused physical memory). The kernel
311	 * page tables are NOT double mapped and thus should not be included
312	 * in this calculation.
313	 */
314	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
315	virtual_avail = pmap_kmem_choose(virtual_avail);
316
317	virtual_end = VM_MAX_KERNEL_ADDRESS;
318
319	/*
320	 * Initialize protection array.
321	 */
322	i386_protection_init();
323
324	/*
325	 * The kernel's pmap is statically allocated so we don't have to use
326	 * pmap_create, which is unlikely to work correctly at this part of
327	 * the boot sequence (XXX and which no longer exists).
328	 */
329	kernel_pmap = &kernel_pmap_store;
330
331	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
332	kernel_pmap->pm_count = 1;
333	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
334	TAILQ_INIT(&kernel_pmap->pm_pvlist);
335	nkpt = NKPT;
336
337	/*
338	 * Reserve some special page table entries/VA space for temporary
339	 * mapping of pages.
340	 */
341#define	SYSMAP(c, p, v, n)	\
342	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
343
344	va = virtual_avail;
345	pte = (pt_entry_t *) pmap_pte(kernel_pmap, va);
346
347	/*
348	 * CMAP1/CMAP2 are used for zeroing and copying pages.
349	 */
350	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
351	SYSMAP(caddr_t, CMAP2, CADDR2, 1)
352
353	/*
354	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
355	 * XXX ptmmap is not used.
356	 */
357	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
358
359	/*
360	 * msgbufp is used to map the system message buffer.
361	 * XXX msgbufmap is not used.
362	 */
363	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
364	       atop(round_page(MSGBUF_SIZE)))
365
366#if !defined(SMP)
367	/*
368	 * ptemap is used for pmap_pte_quick
369	 */
370	SYSMAP(unsigned *, PMAP1, PADDR1, 1);
371#endif
372
373	virtual_avail = va;
374
375	*(int *) CMAP1 = *(int *) CMAP2 = 0;
376	*(int *) PTD = 0;
377
378
379	pgeflag = 0;
380#if !defined(SMP)
381	if (cpu_feature & CPUID_PGE) {
382		pgeflag = PG_G;
383	}
384#endif
385
386/*
387 * Initialize the 4MB page size flag
388 */
389	pseflag = 0;
390/*
391 * The 4MB page version of the initial
392 * kernel page mapping.
393 */
394	pdir4mb = 0;
395
396#if !defined(DISABLE_PSE)
397	if (cpu_feature & CPUID_PSE) {
398		unsigned ptditmp;
399		/*
400		 * Enable the PSE mode
401		 */
402		load_cr4(rcr4() | CR4_PSE);
403
404		/*
405		 * Note that we have enabled PSE mode
406		 */
407		pseflag = PG_PS;
408		ptditmp = *((unsigned *)PTmap + i386_btop(KERNBASE));
409		ptditmp &= ~(NBPDR - 1);
410		ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
411		pdir4mb = ptditmp;
412		/*
413		 * We can do the mapping here for the single processor
414		 * case.  We simply ignore the old page table page from
415		 * now on.
416		 */
417#if !defined(SMP)
418		PTD[KPTDI] = (pd_entry_t) ptditmp;
419		kernel_pmap->pm_pdir[KPTDI] = (pd_entry_t) ptditmp;
420		invltlb();
421#endif
422	}
423#endif
424
425#ifdef SMP
426	if (cpu_apic_address == 0)
427		panic("pmap_bootstrap: no local apic!");
428
429	/* 0 = private page */
430	/* 1 = page table page */
431	/* 2 = local apic */
432	/* 16-31 = io apics */
433	SMP_prvpt[2] = (pt_entry_t)(PG_V | PG_RW | pgeflag |
434	    (cpu_apic_address & PG_FRAME));
435
436	for (i = 0; i < mp_napics; i++) {
437		for (j = 0; j < 16; j++) {
438			/* same page frame as a previous IO apic? */
439			if (((vm_offset_t)SMP_prvpt[j + 16] & PG_FRAME) ==
440			    (io_apic_address[0] & PG_FRAME)) {
441				ioapic[i] = (ioapic_t *)&SMP_ioapic[j * PAGE_SIZE];
442				break;
443			}
444			/* use this slot if available */
445			if (((vm_offset_t)SMP_prvpt[j + 16] & PG_FRAME) == 0) {
446				SMP_prvpt[j + 16] = (pt_entry_t)(PG_V | PG_RW |
447				    pgeflag | (io_apic_address[i] & PG_FRAME));
448				ioapic[i] = (ioapic_t *)&SMP_ioapic[j * PAGE_SIZE];
449				break;
450			}
451		}
452		if (j == 16)
453			panic("no space to map IO apic %d!", i);
454	}
455
456	/* BSP does this itself, AP's get it pre-set */
457	prv_CMAP1 = &SMP_prvpt[3 + UPAGES];
458	prv_CMAP2 = &SMP_prvpt[4 + UPAGES];
459	prv_CMAP3 = &SMP_prvpt[5 + UPAGES];
460	prv_PMAP1 = &SMP_prvpt[6 + UPAGES];
461#endif
462
463	invltlb();
464
465}
466
467void
468getmtrr()
469{
470	int i;
471
472	if (cpu_class == CPUCLASS_686) {
473		for(i = 0; i < NPPROVMTRR; i++) {
474			PPro_vmtrr[i].base = rdmsr(PPRO_VMTRRphysBase0 + i * 2);
475			PPro_vmtrr[i].mask = rdmsr(PPRO_VMTRRphysMask0 + i * 2);
476		}
477	}
478}
479
480void
481putmtrr()
482{
483	int i;
484
485	if (cpu_class == CPUCLASS_686) {
486		wbinvd();
487		for(i = 0; i < NPPROVMTRR; i++) {
488			wrmsr(PPRO_VMTRRphysBase0 + i * 2, PPro_vmtrr[i].base);
489			wrmsr(PPRO_VMTRRphysMask0 + i * 2, PPro_vmtrr[i].mask);
490		}
491	}
492}
493
494void
495pmap_setvidram(void)
496{
497#if 0
498	if (cpu_class == CPUCLASS_686) {
499		wbinvd();
500		/*
501		 * Set memory between 0-640K to be WB
502		 */
503		wrmsr(0x250, 0x0606060606060606LL);
504		wrmsr(0x258, 0x0606060606060606LL);
505		/*
506		 * Set normal, PC video memory to be WC
507		 */
508		wrmsr(0x259, 0x0101010101010101LL);
509	}
510#endif
511}
512
513void
514pmap_setdevram(unsigned long long basea, vm_offset_t sizea)
515{
516	int i, free, skip;
517	unsigned basepage, basepaget;
518	unsigned long long base;
519	unsigned long long mask;
520
521	if (cpu_class != CPUCLASS_686)
522		return;
523
524	free = -1;
525	skip = 0;
526	basea &= ~0xfff;
527	base = basea | 0x1;
528	mask = (long long) (0xfffffffffLL - ((long) sizea - 1)) | (long long) 0x800;
529	mask &= ~0x7ff;
530
531	basepage = (long long) (base >> 12);
532	for(i = 0; i < NPPROVMTRR; i++) {
533		PPro_vmtrr[i].base = rdmsr(PPRO_VMTRRphysBase0 + i * 2);
534		PPro_vmtrr[i].mask = rdmsr(PPRO_VMTRRphysMask0 + i * 2);
535		basepaget = (long long) (PPro_vmtrr[i].base >> 12);
536		if (basepage == basepaget)
537			skip = 1;
538		if ((PPro_vmtrr[i].mask & 0x800) == 0) {
539			if (free == -1)
540				free = i;
541		}
542	}
543
544	if (!skip && free != -1) {
545		wbinvd();
546		PPro_vmtrr[free].base = base;
547		PPro_vmtrr[free].mask = mask;
548		wrmsr(PPRO_VMTRRphysBase0 + free * 2, base);
549		wrmsr(PPRO_VMTRRphysMask0 + free * 2, mask);
550		printf(
551	"pmap: added WC mapping at page: 0x%x %x, size: %u mask: 0x%x %x\n",
552		    (u_int)(base >> 32), (u_int)base, sizea,
553		    (u_int)(mask >> 32), (u_int)mask);
554	}
555}
556
557/*
558 * Set 4mb pdir for mp startup, and global flags
559 */
560void
561pmap_set_opt(unsigned *pdir) {
562	int i;
563
564	if (pseflag && (cpu_feature & CPUID_PSE)) {
565		load_cr4(rcr4() | CR4_PSE);
566		if (pdir4mb) {
567			pdir[KPTDI] = pdir4mb;
568		}
569	}
570
571	if (pgeflag && (cpu_feature & CPUID_PGE)) {
572		load_cr4(rcr4() | CR4_PGE);
573		for(i = KPTDI; i < KPTDI + nkpt; i++) {
574			if (pdir[i]) {
575				pdir[i] |= PG_G;
576			}
577		}
578	}
579}
580
581/*
582 * Setup the PTD for the boot processor
583 */
584void
585pmap_set_opt_bsp(void)
586{
587	pmap_set_opt((unsigned *)kernel_pmap->pm_pdir);
588	pmap_set_opt((unsigned *)PTD);
589	invltlb();
590}
591
592/*
593 *	Initialize the pmap module.
594 *	Called by vm_init, to initialize any structures that the pmap
595 *	system needs to map virtual memory.
596 *	pmap_init has been enhanced to support in a fairly consistant
597 *	way, discontiguous physical memory.
598 */
599void
600pmap_init(phys_start, phys_end)
601	vm_offset_t phys_start, phys_end;
602{
603	vm_offset_t addr;
604	vm_size_t s;
605	int i;
606	int initial_pvs;
607
608	/*
609	 * object for kernel page table pages
610	 */
611	kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
612
613	/*
614	 * calculate the number of pv_entries needed
615	 */
616	vm_first_phys = phys_avail[0];
617	for (i = 0; phys_avail[i + 1]; i += 2);
618	pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
619
620	/*
621	 * Allocate memory for random pmap data structures.  Includes the
622	 * pv_head_table.
623	 */
624	s = (vm_size_t) (sizeof(pv_table_t) * pv_npg);
625	s = round_page(s);
626
627	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
628	pv_table = (pv_table_t *) addr;
629	for(i = 0; i < pv_npg; i++) {
630		vm_offset_t pa;
631		TAILQ_INIT(&pv_table[i].pv_list);
632		pv_table[i].pv_list_count = 0;
633		pa = vm_first_phys + i * PAGE_SIZE;
634		pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa);
635	}
636
637	/*
638	 * init the pv free list
639	 */
640	initial_pvs = pv_npg;
641	if (initial_pvs < MINPV)
642		initial_pvs = MINPV;
643	pvzone = &pvzone_store;
644	pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
645		initial_pvs * sizeof (struct pv_entry));
646	zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg);
647
648	/*
649	 * Now it is safe to enable pv_table recording.
650	 */
651	pmap_initialized = TRUE;
652}
653
654/*
655 * Initialize the address space (zone) for the pv_entries.  Set a
656 * high water mark so that the system can recover from excessive
657 * numbers of pv entries.
658 */
659void
660pmap_init2() {
661	pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg;
662	pv_entry_high_water = 9 * (pv_entry_max / 10);
663	zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
664}
665
666/*
667 *	Used to map a range of physical addresses into kernel
668 *	virtual address space.
669 *
670 *	For now, VM is already on, we only need to map the
671 *	specified memory.
672 */
673vm_offset_t
674pmap_map(virt, start, end, prot)
675	vm_offset_t virt;
676	vm_offset_t start;
677	vm_offset_t end;
678	int prot;
679{
680	while (start < end) {
681		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
682		virt += PAGE_SIZE;
683		start += PAGE_SIZE;
684	}
685	return (virt);
686}
687
688
689/***************************************************
690 * Low level helper routines.....
691 ***************************************************/
692
693#if defined(PMAP_DIAGNOSTIC)
694
695/*
696 * This code checks for non-writeable/modified pages.
697 * This should be an invalid condition.
698 */
699static int
700pmap_nw_modified(pt_entry_t ptea) {
701	int pte;
702
703	pte = (int) ptea;
704
705	if ((pte & (PG_M|PG_RW)) == PG_M)
706		return 1;
707	else
708		return 0;
709}
710#endif
711
712
713/*
714 * this routine defines the region(s) of memory that should
715 * not be tested for the modified bit.
716 */
717static PMAP_INLINE int
718pmap_track_modified( vm_offset_t va) {
719	if ((va < clean_sva) || (va >= clean_eva))
720		return 1;
721	else
722		return 0;
723}
724
725static PMAP_INLINE void
726invltlb_1pg( vm_offset_t va) {
727#if defined(I386_CPU)
728	if (cpu_class == CPUCLASS_386) {
729		invltlb();
730	} else
731#endif
732	{
733		invlpg(va);
734	}
735}
736
737static __inline void
738pmap_TLB_invalidate(pmap_t pmap, vm_offset_t va)
739{
740#if defined(SMP)
741	if (pmap->pm_active & (1 << cpuid))
742		cpu_invlpg((void *)va);
743	if (pmap->pm_active & other_cpus)
744		smp_invltlb();
745#else
746	if (pmap->pm_active)
747		invltlb_1pg(va);
748#endif
749}
750
751static __inline void
752pmap_TLB_invalidate_all(pmap_t pmap)
753{
754#if defined(SMP)
755	if (pmap->pm_active & (1 << cpuid))
756		cpu_invltlb();
757	if (pmap->pm_active & other_cpus)
758		smp_invltlb();
759#else
760	if (pmap->pm_active)
761		invltlb();
762#endif
763}
764
765static unsigned *
766get_ptbase(pmap)
767	pmap_t pmap;
768{
769	unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
770
771	/* are we current address space or kernel? */
772	if (pmap == kernel_pmap || frame == (((unsigned) PTDpde) & PG_FRAME)) {
773		return (unsigned *) PTmap;
774	}
775	/* otherwise, we are alternate address space */
776	if (frame != (((unsigned) APTDpde) & PG_FRAME)) {
777		APTDpde = (pd_entry_t) (frame | PG_RW | PG_V);
778#if defined(SMP)
779		/* The page directory is not shared between CPUs */
780		cpu_invltlb();
781#else
782		invltlb();
783#endif
784	}
785	return (unsigned *) APTmap;
786}
787
788/*
789 * Super fast pmap_pte routine best used when scanning
790 * the pv lists.  This eliminates many coarse-grained
791 * invltlb calls.  Note that many of the pv list
792 * scans are across different pmaps.  It is very wasteful
793 * to do an entire invltlb for checking a single mapping.
794 */
795
796static unsigned *
797pmap_pte_quick(pmap, va)
798	register pmap_t pmap;
799	vm_offset_t va;
800{
801	unsigned pde, newpf;
802	if ((pde = (unsigned) pmap->pm_pdir[va >> PDRSHIFT]) != 0) {
803		unsigned frame = (unsigned) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
804		unsigned index = i386_btop(va);
805		/* are we current address space or kernel? */
806		if ((pmap == kernel_pmap) ||
807			(frame == (((unsigned) PTDpde) & PG_FRAME))) {
808			return (unsigned *) PTmap + index;
809		}
810		newpf = pde & PG_FRAME;
811#ifdef SMP
812		if ( ((* (unsigned *) prv_PMAP1) & PG_FRAME) != newpf) {
813			* (unsigned *) prv_PMAP1 = newpf | PG_RW | PG_V;
814			cpu_invlpg(&prv_PPAGE1);
815		}
816		return prv_PPAGE1 + ((unsigned) index & (NPTEPG - 1));
817#else
818		if ( ((* (unsigned *) PMAP1) & PG_FRAME) != newpf) {
819			* (unsigned *) PMAP1 = newpf | PG_RW | PG_V;
820			invltlb_1pg((vm_offset_t) PADDR1);
821		}
822		return PADDR1 + ((unsigned) index & (NPTEPG - 1));
823#endif
824	}
825	return (0);
826}
827
828/*
829 *	Routine:	pmap_extract
830 *	Function:
831 *		Extract the physical page address associated
832 *		with the given map/virtual_address pair.
833 */
834vm_offset_t
835pmap_extract(pmap, va)
836	register pmap_t pmap;
837	vm_offset_t va;
838{
839	vm_offset_t rtval;
840	vm_offset_t pdirindex;
841	pdirindex = va >> PDRSHIFT;
842	if (pmap && (rtval = (unsigned) pmap->pm_pdir[pdirindex])) {
843		unsigned *pte;
844		if ((rtval & PG_PS) != 0) {
845			rtval &= ~(NBPDR - 1);
846			rtval |= va & (NBPDR - 1);
847			return rtval;
848		}
849		pte = get_ptbase(pmap) + i386_btop(va);
850		rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
851		return rtval;
852	}
853	return 0;
854
855}
856
857/*
858 * determine if a page is managed (memory vs. device)
859 */
860static PMAP_INLINE int
861pmap_is_managed(pa)
862	vm_offset_t pa;
863{
864	int i;
865
866	if (!pmap_initialized)
867		return 0;
868
869	for (i = 0; phys_avail[i + 1]; i += 2) {
870		if (pa < phys_avail[i + 1] && pa >= phys_avail[i])
871			return 1;
872	}
873	return 0;
874}
875
876
877/***************************************************
878 * Low level mapping routines.....
879 ***************************************************/
880
881/*
882 * add a wired page to the kva
883 * note that in order for the mapping to take effect -- you
884 * should do a invltlb after doing the pmap_kenter...
885 */
886PMAP_INLINE void
887pmap_kenter(va, pa)
888	vm_offset_t va;
889	register vm_offset_t pa;
890{
891	register unsigned *pte;
892	unsigned npte, opte;
893
894	npte = pa | PG_RW | PG_V | pgeflag;
895	pte = (unsigned *)vtopte(va);
896	opte = *pte;
897	*pte = npte;
898	if (opte)
899		invltlb_1pg(va);
900}
901
902/*
903 * remove a page from the kernel pagetables
904 */
905PMAP_INLINE void
906pmap_kremove(va)
907	vm_offset_t va;
908{
909	register unsigned *pte;
910
911	pte = (unsigned *)vtopte(va);
912	*pte = 0;
913	invltlb_1pg(va);
914}
915
916/*
917 * Add a list of wired pages to the kva
918 * this routine is only used for temporary
919 * kernel mappings that do not need to have
920 * page modification or references recorded.
921 * Note that old mappings are simply written
922 * over.  The page *must* be wired.
923 */
924void
925pmap_qenter(va, m, count)
926	vm_offset_t va;
927	vm_page_t *m;
928	int count;
929{
930	int i;
931
932	for (i = 0; i < count; i++) {
933		vm_offset_t tva = va + i * PAGE_SIZE;
934		pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i]));
935	}
936}
937
938/*
939 * this routine jerks page mappings from the
940 * kernel -- it is meant only for temporary mappings.
941 */
942void
943pmap_qremove(va, count)
944	vm_offset_t va;
945	int count;
946{
947	int i;
948
949	for (i = 0; i < count; i++) {
950		pmap_kremove(va);
951		va += PAGE_SIZE;
952	}
953}
954
955static vm_page_t
956pmap_page_lookup(object, pindex)
957	vm_object_t object;
958	vm_pindex_t pindex;
959{
960	vm_page_t m;
961retry:
962	m = vm_page_lookup(object, pindex);
963	if (m && vm_page_sleep_busy(m, FALSE, "pplookp"))
964		goto retry;
965	return m;
966}
967
968/*
969 * Create the UPAGES for a new process.
970 * This routine directly affects the fork perf for a process.
971 */
972void
973pmap_new_proc(p)
974	struct proc *p;
975{
976	int i, updateneeded;
977	vm_object_t upobj;
978	vm_page_t m;
979	struct user *up;
980	unsigned *ptek, oldpte;
981
982	/*
983	 * allocate object for the upages
984	 */
985	if ((upobj = p->p_upages_obj) == NULL) {
986		upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
987		p->p_upages_obj = upobj;
988	}
989
990	/* get a kernel virtual address for the UPAGES for this proc */
991	if ((up = p->p_addr) == NULL) {
992		up = (struct user *) kmem_alloc_pageable(kernel_map,
993				UPAGES * PAGE_SIZE);
994#if !defined(MAX_PERF)
995		if (up == NULL)
996			panic("pmap_new_proc: u_map allocation failed");
997#endif
998		p->p_addr = up;
999	}
1000
1001	ptek = (unsigned *) vtopte((vm_offset_t) up);
1002
1003	updateneeded = 0;
1004	for(i=0;i<UPAGES;i++) {
1005		/*
1006		 * Get a kernel stack page
1007		 */
1008		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1009
1010		/*
1011		 * Wire the page
1012		 */
1013		m->wire_count++;
1014		cnt.v_wire_count++;
1015
1016		oldpte = *(ptek + i);
1017		/*
1018		 * Enter the page into the kernel address space.
1019		 */
1020		*(ptek + i) = VM_PAGE_TO_PHYS(m) | PG_RW | PG_V | pgeflag;
1021		if (oldpte) {
1022			if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386)) {
1023				invlpg((vm_offset_t) up + i * PAGE_SIZE);
1024			} else {
1025				updateneeded = 1;
1026			}
1027		}
1028
1029		vm_page_wakeup(m);
1030		vm_page_flag_clear(m, PG_ZERO);
1031		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1032		m->valid = VM_PAGE_BITS_ALL;
1033	}
1034	if (updateneeded)
1035		invltlb();
1036}
1037
1038/*
1039 * Dispose the UPAGES for a process that has exited.
1040 * This routine directly impacts the exit perf of a process.
1041 */
1042void
1043pmap_dispose_proc(p)
1044	struct proc *p;
1045{
1046	int i;
1047	vm_object_t upobj;
1048	vm_page_t m;
1049	unsigned *ptek, oldpte;
1050
1051	upobj = p->p_upages_obj;
1052
1053	ptek = (unsigned *) vtopte((vm_offset_t) p->p_addr);
1054	for(i=0;i<UPAGES;i++) {
1055
1056		if ((m = vm_page_lookup(upobj, i)) == NULL)
1057			panic("pmap_dispose_proc: upage already missing???");
1058
1059		vm_page_busy(m);
1060
1061		oldpte = *(ptek + i);
1062		*(ptek + i) = 0;
1063		if ((oldpte & PG_G) || (cpu_class > CPUCLASS_386))
1064			invlpg((vm_offset_t) p->p_addr + i * PAGE_SIZE);
1065		vm_page_unwire(m, 0);
1066		vm_page_free(m);
1067	}
1068
1069	if (cpu_class <= CPUCLASS_386)
1070		invltlb();
1071}
1072
1073/*
1074 * Allow the UPAGES for a process to be prejudicially paged out.
1075 */
1076void
1077pmap_swapout_proc(p)
1078	struct proc *p;
1079{
1080	int i;
1081	vm_object_t upobj;
1082	vm_page_t m;
1083
1084	upobj = p->p_upages_obj;
1085	/*
1086	 * let the upages be paged
1087	 */
1088	for(i=0;i<UPAGES;i++) {
1089		if ((m = vm_page_lookup(upobj, i)) == NULL)
1090			panic("pmap_swapout_proc: upage already missing???");
1091		vm_page_dirty(m);
1092		vm_page_unwire(m, 0);
1093		pmap_kremove( (vm_offset_t) p->p_addr + PAGE_SIZE * i);
1094	}
1095}
1096
1097/*
1098 * Bring the UPAGES for a specified process back in.
1099 */
1100void
1101pmap_swapin_proc(p)
1102	struct proc *p;
1103{
1104	int i,rv;
1105	vm_object_t upobj;
1106	vm_page_t m;
1107
1108	upobj = p->p_upages_obj;
1109	for(i=0;i<UPAGES;i++) {
1110
1111		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1112
1113		pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
1114			VM_PAGE_TO_PHYS(m));
1115
1116		if (m->valid != VM_PAGE_BITS_ALL) {
1117			rv = vm_pager_get_pages(upobj, &m, 1, 0);
1118#if !defined(MAX_PERF)
1119			if (rv != VM_PAGER_OK)
1120				panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
1121#endif
1122			m = vm_page_lookup(upobj, i);
1123			m->valid = VM_PAGE_BITS_ALL;
1124		}
1125
1126		vm_page_wire(m);
1127		vm_page_wakeup(m);
1128		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
1129	}
1130}
1131
1132/***************************************************
1133 * Page table page management routines.....
1134 ***************************************************/
1135
1136/*
1137 * This routine unholds page table pages, and if the hold count
1138 * drops to zero, then it decrements the wire count.
1139 */
1140static int
1141_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
1142
1143	while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
1144		;
1145
1146	if (m->hold_count == 0) {
1147		vm_offset_t pteva;
1148		/*
1149		 * unmap the page table page
1150		 */
1151		pmap->pm_pdir[m->pindex] = 0;
1152		--pmap->pm_stats.resident_count;
1153		if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
1154			(((unsigned) PTDpde) & PG_FRAME)) {
1155			/*
1156			 * Do a invltlb to make the invalidated mapping
1157			 * take effect immediately.
1158			 */
1159			pteva = UPT_MIN_ADDRESS + i386_ptob(m->pindex);
1160			invltlb_1pg(pteva);
1161		}
1162
1163		if (pmap->pm_ptphint == m)
1164			pmap->pm_ptphint = NULL;
1165
1166		/*
1167		 * If the page is finally unwired, simply free it.
1168		 */
1169		--m->wire_count;
1170		if (m->wire_count == 0) {
1171
1172			vm_page_flash(m);
1173			vm_page_busy(m);
1174			vm_page_free_zero(m);
1175			--cnt.v_wire_count;
1176		}
1177		return 1;
1178	}
1179	return 0;
1180}
1181
1182static PMAP_INLINE int
1183pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
1184	vm_page_unhold(m);
1185	if (m->hold_count == 0)
1186		return _pmap_unwire_pte_hold(pmap, m);
1187	else
1188		return 0;
1189}
1190
1191/*
1192 * After removing a page table entry, this routine is used to
1193 * conditionally free the page, and manage the hold/wire counts.
1194 */
1195static int
1196pmap_unuse_pt(pmap, va, mpte)
1197	pmap_t pmap;
1198	vm_offset_t va;
1199	vm_page_t mpte;
1200{
1201	unsigned ptepindex;
1202	if (va >= UPT_MIN_ADDRESS)
1203		return 0;
1204
1205	if (mpte == NULL) {
1206		ptepindex = (va >> PDRSHIFT);
1207		if (pmap->pm_ptphint &&
1208			(pmap->pm_ptphint->pindex == ptepindex)) {
1209			mpte = pmap->pm_ptphint;
1210		} else {
1211			mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1212			pmap->pm_ptphint = mpte;
1213		}
1214	}
1215
1216	return pmap_unwire_pte_hold(pmap, mpte);
1217}
1218
1219#if !defined(SMP)
1220void
1221pmap_pinit0(pmap)
1222	struct pmap *pmap;
1223{
1224	pmap->pm_pdir =
1225		(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
1226	pmap_kenter((vm_offset_t) pmap->pm_pdir, (vm_offset_t) IdlePTD);
1227	pmap->pm_count = 1;
1228	pmap->pm_active = 0;
1229	pmap->pm_ptphint = NULL;
1230	TAILQ_INIT(&pmap->pm_pvlist);
1231	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1232}
1233#else
1234void
1235pmap_pinit0(pmap)
1236	struct pmap *pmap;
1237{
1238	pmap_pinit(pmap);
1239}
1240#endif
1241
1242/*
1243 * Initialize a preallocated and zeroed pmap structure,
1244 * such as one in a vmspace structure.
1245 */
1246void
1247pmap_pinit(pmap)
1248	register struct pmap *pmap;
1249{
1250	vm_page_t ptdpg;
1251
1252	/*
1253	 * No need to allocate page table space yet but we do need a valid
1254	 * page directory table.
1255	 */
1256	if (pmap->pm_pdir == NULL)
1257		pmap->pm_pdir =
1258			(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
1259
1260	/*
1261	 * allocate object for the ptes
1262	 */
1263	if (pmap->pm_pteobj == NULL)
1264		pmap->pm_pteobj = vm_object_allocate( OBJT_DEFAULT, PTDPTDI + 1);
1265
1266	/*
1267	 * allocate the page directory page
1268	 */
1269	ptdpg = vm_page_grab( pmap->pm_pteobj, PTDPTDI,
1270			VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1271
1272	ptdpg->wire_count = 1;
1273	++cnt.v_wire_count;
1274
1275
1276	vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/
1277	ptdpg->valid = VM_PAGE_BITS_ALL;
1278
1279	pmap_kenter((vm_offset_t) pmap->pm_pdir, VM_PAGE_TO_PHYS(ptdpg));
1280	if ((ptdpg->flags & PG_ZERO) == 0)
1281		bzero(pmap->pm_pdir, PAGE_SIZE);
1282
1283	/* wire in kernel global address entries */
1284	/* XXX copies current process, does not fill in MPPTDI */
1285	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
1286
1287	/* install self-referential address mapping entry */
1288	*(unsigned *) (pmap->pm_pdir + PTDPTDI) =
1289		VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1290
1291	pmap->pm_count = 1;
1292	pmap->pm_active = 0;
1293	pmap->pm_ptphint = NULL;
1294	TAILQ_INIT(&pmap->pm_pvlist);
1295	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1296}
1297
1298static int
1299pmap_release_free_page(pmap, p)
1300	struct pmap *pmap;
1301	vm_page_t p;
1302{
1303	unsigned *pde = (unsigned *) pmap->pm_pdir;
1304	/*
1305	 * This code optimizes the case of freeing non-busy
1306	 * page-table pages.  Those pages are zero now, and
1307	 * might as well be placed directly into the zero queue.
1308	 */
1309	if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
1310		return 0;
1311
1312	vm_page_busy(p);
1313
1314	/*
1315	 * Remove the page table page from the processes address space.
1316	 */
1317	pde[p->pindex] = 0;
1318	pmap->pm_stats.resident_count--;
1319
1320#if !defined(MAX_PERF)
1321	if (p->hold_count)  {
1322		panic("pmap_release: freeing held page table page");
1323	}
1324#endif
1325	/*
1326	 * Page directory pages need to have the kernel
1327	 * stuff cleared, so they can go into the zero queue also.
1328	 */
1329	if (p->pindex == PTDPTDI) {
1330		bzero(pde + KPTDI, nkpt * PTESIZE);
1331#ifdef SMP
1332		pde[MPPTDI] = 0;
1333#endif
1334		pde[APTDPTDI] = 0;
1335		pmap_kremove((vm_offset_t) pmap->pm_pdir);
1336	}
1337
1338	if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1339		pmap->pm_ptphint = NULL;
1340
1341	p->wire_count--;
1342	cnt.v_wire_count--;
1343	vm_page_free_zero(p);
1344	return 1;
1345}
1346
1347/*
1348 * this routine is called if the page table page is not
1349 * mapped correctly.
1350 */
1351static vm_page_t
1352_pmap_allocpte(pmap, ptepindex)
1353	pmap_t	pmap;
1354	unsigned ptepindex;
1355{
1356	vm_offset_t pteva, ptepa;
1357	vm_page_t m;
1358
1359	/*
1360	 * Find or fabricate a new pagetable page
1361	 */
1362	m = vm_page_grab(pmap->pm_pteobj, ptepindex,
1363			VM_ALLOC_ZERO | VM_ALLOC_RETRY);
1364
1365	if (m->queue != PQ_NONE) {
1366		int s = splvm();
1367		vm_page_unqueue(m);
1368		splx(s);
1369	}
1370
1371	if (m->wire_count == 0)
1372		cnt.v_wire_count++;
1373	m->wire_count++;
1374
1375	/*
1376	 * Increment the hold count for the page table page
1377	 * (denoting a new mapping.)
1378	 */
1379	m->hold_count++;
1380
1381	/*
1382	 * Map the pagetable page into the process address space, if
1383	 * it isn't already there.
1384	 */
1385
1386	pmap->pm_stats.resident_count++;
1387
1388	ptepa = VM_PAGE_TO_PHYS(m);
1389	pmap->pm_pdir[ptepindex] =
1390		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1391
1392	/*
1393	 * Set the page table hint
1394	 */
1395	pmap->pm_ptphint = m;
1396
1397	/*
1398	 * Try to use the new mapping, but if we cannot, then
1399	 * do it with the routine that maps the page explicitly.
1400	 */
1401	if ((m->flags & PG_ZERO) == 0) {
1402		if ((((unsigned)pmap->pm_pdir[PTDPTDI]) & PG_FRAME) ==
1403			(((unsigned) PTDpde) & PG_FRAME)) {
1404			pteva = UPT_MIN_ADDRESS + i386_ptob(ptepindex);
1405			bzero((caddr_t) pteva, PAGE_SIZE);
1406		} else {
1407			pmap_zero_page(ptepa);
1408		}
1409	}
1410
1411	m->valid = VM_PAGE_BITS_ALL;
1412	vm_page_flag_clear(m, PG_ZERO);
1413	vm_page_flag_set(m, PG_MAPPED);
1414	vm_page_wakeup(m);
1415
1416	return m;
1417}
1418
1419static vm_page_t
1420pmap_allocpte(pmap, va)
1421	pmap_t	pmap;
1422	vm_offset_t va;
1423{
1424	unsigned ptepindex;
1425	vm_offset_t ptepa;
1426	vm_page_t m;
1427
1428	/*
1429	 * Calculate pagetable page index
1430	 */
1431	ptepindex = va >> PDRSHIFT;
1432
1433	/*
1434	 * Get the page directory entry
1435	 */
1436	ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1437
1438	/*
1439	 * This supports switching from a 4MB page to a
1440	 * normal 4K page.
1441	 */
1442	if (ptepa & PG_PS) {
1443		pmap->pm_pdir[ptepindex] = 0;
1444		ptepa = 0;
1445		invltlb();
1446	}
1447
1448	/*
1449	 * If the page table page is mapped, we just increment the
1450	 * hold count, and activate it.
1451	 */
1452	if (ptepa) {
1453		/*
1454		 * In order to get the page table page, try the
1455		 * hint first.
1456		 */
1457		if (pmap->pm_ptphint &&
1458			(pmap->pm_ptphint->pindex == ptepindex)) {
1459			m = pmap->pm_ptphint;
1460		} else {
1461			m = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
1462			pmap->pm_ptphint = m;
1463		}
1464		m->hold_count++;
1465		return m;
1466	}
1467	/*
1468	 * Here if the pte page isn't mapped, or if it has been deallocated.
1469	 */
1470	return _pmap_allocpte(pmap, ptepindex);
1471}
1472
1473
1474/***************************************************
1475* Pmap allocation/deallocation routines.
1476 ***************************************************/
1477
1478/*
1479 * Release any resources held by the given physical map.
1480 * Called when a pmap initialized by pmap_pinit is being released.
1481 * Should only be called if the map contains no valid mappings.
1482 */
1483void
1484pmap_release(pmap)
1485	register struct pmap *pmap;
1486{
1487	vm_page_t p,n,ptdpg;
1488	vm_object_t object = pmap->pm_pteobj;
1489	int curgeneration;
1490
1491#if defined(DIAGNOSTIC)
1492	if (object->ref_count != 1)
1493		panic("pmap_release: pteobj reference count != 1");
1494#endif
1495
1496	ptdpg = NULL;
1497retry:
1498	curgeneration = object->generation;
1499	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
1500		n = TAILQ_NEXT(p, listq);
1501		if (p->pindex == PTDPTDI) {
1502			ptdpg = p;
1503			continue;
1504		}
1505		while (1) {
1506			if (!pmap_release_free_page(pmap, p) &&
1507				(object->generation != curgeneration))
1508				goto retry;
1509		}
1510	}
1511
1512	if (ptdpg && !pmap_release_free_page(pmap, ptdpg))
1513		goto retry;
1514}
1515
1516/*
1517 * grow the number of kernel page table entries, if needed
1518 */
1519void
1520pmap_growkernel(vm_offset_t addr)
1521{
1522	struct proc *p;
1523	struct pmap *pmap;
1524	int s;
1525	vm_offset_t ptppaddr;
1526	vm_page_t nkpg;
1527#ifdef SMP
1528	int i;
1529#endif
1530	pd_entry_t newpdir;
1531
1532	s = splhigh();
1533	if (kernel_vm_end == 0) {
1534		kernel_vm_end = KERNBASE;
1535		nkpt = 0;
1536		while (pdir_pde(PTD, kernel_vm_end)) {
1537			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1538			nkpt++;
1539		}
1540	}
1541	addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1542	while (kernel_vm_end < addr) {
1543		if (pdir_pde(PTD, kernel_vm_end)) {
1544			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1545			continue;
1546		}
1547
1548		/*
1549		 * This index is bogus, but out of the way
1550		 */
1551		nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM);
1552#if !defined(MAX_PERF)
1553		if (!nkpg)
1554			panic("pmap_growkernel: no memory to grow kernel");
1555#endif
1556
1557		nkpt++;
1558
1559		vm_page_wire(nkpg);
1560		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1561		pmap_zero_page(ptppaddr);
1562		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1563		pdir_pde(PTD, kernel_vm_end) = newpdir;
1564
1565#ifdef SMP
1566		for (i = 0; i < mp_ncpus; i++) {
1567			if (IdlePTDS[i])
1568				pdir_pde(IdlePTDS[i], kernel_vm_end) = newpdir;
1569		}
1570#endif
1571
1572		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
1573			if (p->p_vmspace) {
1574				pmap = vmspace_pmap(p->p_vmspace);
1575				*pmap_pde(pmap, kernel_vm_end) = newpdir;
1576			}
1577		}
1578		if (aiovmspace != NULL) {
1579			pmap = vmspace_pmap(aiovmspace);
1580			*pmap_pde(pmap, kernel_vm_end) = newpdir;
1581		}
1582		*pmap_pde(kernel_pmap, kernel_vm_end) = newpdir;
1583		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1584	}
1585	splx(s);
1586}
1587
1588/*
1589 *	Retire the given physical map from service.
1590 *	Should only be called if the map contains
1591 *	no valid mappings.
1592 */
1593void
1594pmap_destroy(pmap)
1595	register pmap_t pmap;
1596{
1597	int count;
1598
1599	if (pmap == NULL)
1600		return;
1601
1602	count = --pmap->pm_count;
1603	if (count == 0) {
1604		pmap_release(pmap);
1605#if !defined(MAX_PERF)
1606		panic("destroying a pmap is not yet implemented");
1607#endif
1608	}
1609}
1610
1611/*
1612 *	Add a reference to the specified pmap.
1613 */
1614void
1615pmap_reference(pmap)
1616	pmap_t pmap;
1617{
1618	if (pmap != NULL) {
1619		pmap->pm_count++;
1620	}
1621}
1622
1623/***************************************************
1624* page management routines.
1625 ***************************************************/
1626
1627/*
1628 * free the pv_entry back to the free list
1629 */
1630static PMAP_INLINE void
1631free_pv_entry(pv)
1632	pv_entry_t pv;
1633{
1634	pv_entry_count--;
1635	zfreei(pvzone, pv);
1636}
1637
1638/*
1639 * get a new pv_entry, allocating a block from the system
1640 * when needed.
1641 * the memory allocation is performed bypassing the malloc code
1642 * because of the possibility of allocations at interrupt time.
1643 */
1644static pv_entry_t
1645get_pv_entry(void)
1646{
1647	pv_entry_count++;
1648	if (pv_entry_high_water &&
1649		(pv_entry_count > pv_entry_high_water) &&
1650		(pmap_pagedaemon_waken == 0)) {
1651		pmap_pagedaemon_waken = 1;
1652		wakeup (&vm_pages_needed);
1653	}
1654	return zalloci(pvzone);
1655}
1656
1657/*
1658 * This routine is very drastic, but can save the system
1659 * in a pinch.
1660 */
1661void
1662pmap_collect() {
1663	pv_table_t *ppv;
1664	int i;
1665	vm_offset_t pa;
1666	vm_page_t m;
1667	static int warningdone=0;
1668
1669	if (pmap_pagedaemon_waken == 0)
1670		return;
1671
1672	if (warningdone < 5) {
1673		printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1674		warningdone++;
1675	}
1676
1677	for(i = 0; i < pv_npg; i++) {
1678		if ((ppv = &pv_table[i]) == 0)
1679			continue;
1680		m = ppv->pv_vm_page;
1681		if ((pa = VM_PAGE_TO_PHYS(m)) == 0)
1682			continue;
1683		if (m->wire_count || m->hold_count || m->busy ||
1684			(m->flags & PG_BUSY))
1685			continue;
1686		pmap_remove_all(pa);
1687	}
1688	pmap_pagedaemon_waken = 0;
1689}
1690
1691
1692/*
1693 * If it is the first entry on the list, it is actually
1694 * in the header and we must copy the following entry up
1695 * to the header.  Otherwise we must search the list for
1696 * the entry.  In either case we free the now unused entry.
1697 */
1698
1699static int
1700pmap_remove_entry(pmap, ppv, va)
1701	struct pmap *pmap;
1702	pv_table_t *ppv;
1703	vm_offset_t va;
1704{
1705	pv_entry_t pv;
1706	int rtval;
1707	int s;
1708
1709	s = splvm();
1710	if (ppv->pv_list_count < pmap->pm_stats.resident_count) {
1711		for (pv = TAILQ_FIRST(&ppv->pv_list);
1712			pv;
1713			pv = TAILQ_NEXT(pv, pv_list)) {
1714			if (pmap == pv->pv_pmap && va == pv->pv_va)
1715				break;
1716		}
1717	} else {
1718		for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
1719			pv;
1720			pv = TAILQ_NEXT(pv, pv_plist)) {
1721			if (va == pv->pv_va)
1722				break;
1723		}
1724	}
1725
1726	rtval = 0;
1727	if (pv) {
1728
1729		rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
1730		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
1731		ppv->pv_list_count--;
1732		if (TAILQ_FIRST(&ppv->pv_list) == NULL)
1733			vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
1734
1735		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1736		free_pv_entry(pv);
1737	}
1738
1739	splx(s);
1740	return rtval;
1741}
1742
1743/*
1744 * Create a pv entry for page at pa for
1745 * (pmap, va).
1746 */
1747static void
1748pmap_insert_entry(pmap, va, mpte, pa)
1749	pmap_t pmap;
1750	vm_offset_t va;
1751	vm_page_t mpte;
1752	vm_offset_t pa;
1753{
1754
1755	int s;
1756	pv_entry_t pv;
1757	pv_table_t *ppv;
1758
1759	s = splvm();
1760	pv = get_pv_entry();
1761	pv->pv_va = va;
1762	pv->pv_pmap = pmap;
1763	pv->pv_ptem = mpte;
1764
1765	TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1766
1767	ppv = pa_to_pvh(pa);
1768	TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
1769	ppv->pv_list_count++;
1770
1771	splx(s);
1772}
1773
1774/*
1775 * pmap_remove_pte: do the things to unmap a page in a process
1776 */
1777static int
1778pmap_remove_pte(pmap, ptq, va)
1779	struct pmap *pmap;
1780	unsigned *ptq;
1781	vm_offset_t va;
1782{
1783	unsigned oldpte;
1784	pv_table_t *ppv;
1785
1786	oldpte = *ptq;
1787	*ptq = 0;
1788	if (oldpte & PG_W)
1789		pmap->pm_stats.wired_count -= 1;
1790	/*
1791	 * Machines that don't support invlpg, also don't support
1792	 * PG_G.
1793	 */
1794	if (oldpte & PG_G)
1795		invlpg(va);
1796	pmap->pm_stats.resident_count -= 1;
1797	if (oldpte & PG_MANAGED) {
1798		ppv = pa_to_pvh(oldpte);
1799		if (oldpte & PG_M) {
1800#if defined(PMAP_DIAGNOSTIC)
1801			if (pmap_nw_modified((pt_entry_t) oldpte)) {
1802				printf(
1803	"pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1804				    va, oldpte);
1805			}
1806#endif
1807			if (pmap_track_modified(va))
1808				vm_page_dirty(ppv->pv_vm_page);
1809		}
1810		if (oldpte & PG_A)
1811			vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
1812		return pmap_remove_entry(pmap, ppv, va);
1813	} else {
1814		return pmap_unuse_pt(pmap, va, NULL);
1815	}
1816
1817	return 0;
1818}
1819
1820/*
1821 * Remove a single page from a process address space
1822 */
1823static void
1824pmap_remove_page(pmap, va)
1825	struct pmap *pmap;
1826	register vm_offset_t va;
1827{
1828	register unsigned *ptq;
1829
1830	/*
1831	 * if there is no pte for this address, just skip it!!!
1832	 */
1833	if (*pmap_pde(pmap, va) == 0) {
1834		return;
1835	}
1836
1837	/*
1838	 * get a local va for mappings for this pmap.
1839	 */
1840	ptq = get_ptbase(pmap) + i386_btop(va);
1841	if (*ptq) {
1842		(void) pmap_remove_pte(pmap, ptq, va);
1843		invltlb_1pg(va);
1844	}
1845	return;
1846}
1847
1848/*
1849 *	Remove the given range of addresses from the specified map.
1850 *
1851 *	It is assumed that the start and end are properly
1852 *	rounded to the page size.
1853 */
1854void
1855pmap_remove(pmap, sva, eva)
1856	struct pmap *pmap;
1857	register vm_offset_t sva;
1858	register vm_offset_t eva;
1859{
1860	register unsigned *ptbase;
1861	vm_offset_t pdnxt;
1862	vm_offset_t ptpaddr;
1863	vm_offset_t sindex, eindex;
1864	int anyvalid;
1865
1866	if (pmap == NULL)
1867		return;
1868
1869	if (pmap->pm_stats.resident_count == 0)
1870		return;
1871
1872	/*
1873	 * special handling of removing one page.  a very
1874	 * common operation and easy to short circuit some
1875	 * code.
1876	 */
1877	if (((sva + PAGE_SIZE) == eva) &&
1878		(((unsigned) pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
1879		pmap_remove_page(pmap, sva);
1880		return;
1881	}
1882
1883	anyvalid = 0;
1884
1885	/*
1886	 * Get a local virtual address for the mappings that are being
1887	 * worked with.
1888	 */
1889	ptbase = get_ptbase(pmap);
1890
1891	sindex = i386_btop(sva);
1892	eindex = i386_btop(eva);
1893
1894	for (; sindex < eindex; sindex = pdnxt) {
1895		unsigned pdirindex;
1896
1897		/*
1898		 * Calculate index for next page table.
1899		 */
1900		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1901		if (pmap->pm_stats.resident_count == 0)
1902			break;
1903
1904		pdirindex = sindex / NPDEPG;
1905		if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
1906			pmap->pm_pdir[pdirindex] = 0;
1907			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1908			anyvalid++;
1909			continue;
1910		}
1911
1912		/*
1913		 * Weed out invalid mappings. Note: we assume that the page
1914		 * directory table is always allocated, and in kernel virtual.
1915		 */
1916		if (ptpaddr == 0)
1917			continue;
1918
1919		/*
1920		 * Limit our scan to either the end of the va represented
1921		 * by the current page table page, or to the end of the
1922		 * range being removed.
1923		 */
1924		if (pdnxt > eindex) {
1925			pdnxt = eindex;
1926		}
1927
1928		for ( ;sindex != pdnxt; sindex++) {
1929			vm_offset_t va;
1930			if (ptbase[sindex] == 0) {
1931				continue;
1932			}
1933			va = i386_ptob(sindex);
1934
1935			anyvalid++;
1936			if (pmap_remove_pte(pmap,
1937				ptbase + sindex, va))
1938				break;
1939		}
1940	}
1941
1942	if (anyvalid) {
1943		invltlb();
1944	}
1945}
1946
1947/*
1948 *	Routine:	pmap_remove_all
1949 *	Function:
1950 *		Removes this physical page from
1951 *		all physical maps in which it resides.
1952 *		Reflects back modify bits to the pager.
1953 *
1954 *	Notes:
1955 *		Original versions of this routine were very
1956 *		inefficient because they iteratively called
1957 *		pmap_remove (slow...)
1958 */
1959
1960static void
1961pmap_remove_all(pa)
1962	vm_offset_t pa;
1963{
1964	register pv_entry_t pv;
1965	pv_table_t *ppv;
1966	register unsigned *pte, tpte;
1967	int update_needed;
1968	int s;
1969
1970	update_needed = 0;
1971#if defined(PMAP_DIAGNOSTIC)
1972	/*
1973	 * XXX this makes pmap_page_protect(NONE) illegal for non-managed
1974	 * pages!
1975	 */
1976	if (!pmap_is_managed(pa)) {
1977		panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", pa);
1978	}
1979#endif
1980
1981	s = splvm();
1982	ppv = pa_to_pvh(pa);
1983	while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
1984		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
1985
1986		pv->pv_pmap->pm_stats.resident_count--;
1987
1988		tpte = *pte;
1989		*pte = 0;
1990		if (tpte & PG_W)
1991			pv->pv_pmap->pm_stats.wired_count--;
1992
1993		if (tpte & PG_A)
1994			vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
1995
1996		/*
1997		 * Update the vm_page_t clean and reference bits.
1998		 */
1999		if (tpte & PG_M) {
2000#if defined(PMAP_DIAGNOSTIC)
2001			if (pmap_nw_modified((pt_entry_t) tpte)) {
2002				printf(
2003	"pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
2004				    pv->pv_va, tpte);
2005			}
2006#endif
2007			if (pmap_track_modified(pv->pv_va))
2008				vm_page_dirty(ppv->pv_vm_page);
2009		}
2010#ifdef SMP
2011		update_needed = 1;
2012#else
2013		if (!update_needed &&
2014			((!curproc || (vmspace_pmap(curproc->p_vmspace) == pv->pv_pmap)) ||
2015			(pv->pv_pmap == kernel_pmap))) {
2016			update_needed = 1;
2017		}
2018#endif
2019
2020		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
2021		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
2022		ppv->pv_list_count--;
2023		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
2024		free_pv_entry(pv);
2025	}
2026
2027	vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
2028
2029	if (update_needed)
2030		invltlb();
2031
2032	splx(s);
2033	return;
2034}
2035
2036/*
2037 *	Set the physical protection on the
2038 *	specified range of this map as requested.
2039 */
2040void
2041pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2042{
2043	register unsigned *ptbase;
2044	vm_offset_t pdnxt, ptpaddr;
2045	vm_pindex_t sindex, eindex;
2046	int anychanged;
2047
2048
2049	if (pmap == NULL)
2050		return;
2051
2052	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2053		pmap_remove(pmap, sva, eva);
2054		return;
2055	}
2056
2057	if (prot & VM_PROT_WRITE)
2058		return;
2059
2060	anychanged = 0;
2061
2062	ptbase = get_ptbase(pmap);
2063
2064	sindex = i386_btop(sva);
2065	eindex = i386_btop(eva);
2066
2067	for (; sindex < eindex; sindex = pdnxt) {
2068
2069		unsigned pdirindex;
2070
2071		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
2072
2073		pdirindex = sindex / NPDEPG;
2074		if (((ptpaddr = (unsigned) pmap->pm_pdir[pdirindex]) & PG_PS) != 0) {
2075			(unsigned) pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
2076			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
2077			anychanged++;
2078			continue;
2079		}
2080
2081		/*
2082		 * Weed out invalid mappings. Note: we assume that the page
2083		 * directory table is always allocated, and in kernel virtual.
2084		 */
2085		if (ptpaddr == 0)
2086			continue;
2087
2088		if (pdnxt > eindex) {
2089			pdnxt = eindex;
2090		}
2091
2092		for (; sindex != pdnxt; sindex++) {
2093
2094			unsigned pbits;
2095			pv_table_t *ppv;
2096
2097			pbits = ptbase[sindex];
2098
2099			if (pbits & PG_MANAGED) {
2100				ppv = NULL;
2101				if (pbits & PG_A) {
2102					ppv = pa_to_pvh(pbits);
2103					vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED);
2104					pbits &= ~PG_A;
2105				}
2106				if (pbits & PG_M) {
2107					if (pmap_track_modified(i386_ptob(sindex))) {
2108						if (ppv == NULL)
2109							ppv = pa_to_pvh(pbits);
2110						vm_page_dirty(ppv->pv_vm_page);
2111						pbits &= ~PG_M;
2112					}
2113				}
2114			}
2115
2116			pbits &= ~PG_RW;
2117
2118			if (pbits != ptbase[sindex]) {
2119				ptbase[sindex] = pbits;
2120				anychanged = 1;
2121			}
2122		}
2123	}
2124	if (anychanged)
2125		invltlb();
2126}
2127
2128/*
2129 *	Insert the given physical page (p) at
2130 *	the specified virtual address (v) in the
2131 *	target physical map with the protection requested.
2132 *
2133 *	If specified, the page will be wired down, meaning
2134 *	that the related pte can not be reclaimed.
2135 *
2136 *	NB:  This is the only routine which MAY NOT lazy-evaluate
2137 *	or lose information.  That is, this routine must actually
2138 *	insert this page into the given map NOW.
2139 */
2140void
2141pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
2142	   boolean_t wired)
2143{
2144	register unsigned *pte;
2145	vm_offset_t opa;
2146	vm_offset_t origpte, newpte;
2147	vm_page_t mpte;
2148
2149	if (pmap == NULL)
2150		return;
2151
2152	va &= PG_FRAME;
2153#ifdef PMAP_DIAGNOSTIC
2154	if (va > VM_MAX_KERNEL_ADDRESS)
2155		panic("pmap_enter: toobig");
2156	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
2157		panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
2158#endif
2159
2160	mpte = NULL;
2161	/*
2162	 * In the case that a page table page is not
2163	 * resident, we are creating it here.
2164	 */
2165	if (va < UPT_MIN_ADDRESS) {
2166		mpte = pmap_allocpte(pmap, va);
2167	}
2168#if 0 && defined(PMAP_DIAGNOSTIC)
2169	else {
2170		vm_offset_t *pdeaddr = (vm_offset_t *)pmap_pde(pmap, va);
2171		if (((origpte = (vm_offset_t) *pdeaddr) & PG_V) == 0) {
2172			panic("pmap_enter: invalid kernel page table page(0), pdir=%p, pde=%p, va=%p\n",
2173				pmap->pm_pdir[PTDPTDI], origpte, va);
2174		}
2175		if (smp_active) {
2176			pdeaddr = (vm_offset_t *) IdlePTDS[cpuid];
2177			if (((newpte = pdeaddr[va >> PDRSHIFT]) & PG_V) == 0) {
2178				if ((vm_offset_t) my_idlePTD != (vm_offset_t) vtophys(pdeaddr))
2179					printf("pde mismatch: %x, %x\n", my_idlePTD, pdeaddr);
2180				printf("cpuid: %d, pdeaddr: 0x%x\n", cpuid, pdeaddr);
2181				panic("pmap_enter: invalid kernel page table page(1), pdir=%p, npde=%p, pde=%p, va=%p\n",
2182					pmap->pm_pdir[PTDPTDI], newpte, origpte, va);
2183			}
2184		}
2185	}
2186#endif
2187
2188	pte = pmap_pte(pmap, va);
2189
2190#if !defined(MAX_PERF)
2191	/*
2192	 * Page Directory table entry not valid, we need a new PT page
2193	 */
2194	if (pte == NULL) {
2195		panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n",
2196			(void *)pmap->pm_pdir[PTDPTDI], va);
2197	}
2198#endif
2199
2200	origpte = *(vm_offset_t *)pte;
2201	pa &= PG_FRAME;
2202	opa = origpte & PG_FRAME;
2203
2204#if !defined(MAX_PERF)
2205	if (origpte & PG_PS)
2206		panic("pmap_enter: attempted pmap_enter on 4MB page");
2207#endif
2208
2209	/*
2210	 * Mapping has not changed, must be protection or wiring change.
2211	 */
2212	if (origpte && (opa == pa)) {
2213		/*
2214		 * Wiring change, just update stats. We don't worry about
2215		 * wiring PT pages as they remain resident as long as there
2216		 * are valid mappings in them. Hence, if a user page is wired,
2217		 * the PT page will be also.
2218		 */
2219		if (wired && ((origpte & PG_W) == 0))
2220			pmap->pm_stats.wired_count++;
2221		else if (!wired && (origpte & PG_W))
2222			pmap->pm_stats.wired_count--;
2223
2224#if defined(PMAP_DIAGNOSTIC)
2225		if (pmap_nw_modified((pt_entry_t) origpte)) {
2226			printf(
2227	"pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
2228			    va, origpte);
2229		}
2230#endif
2231
2232		/*
2233		 * Remove extra pte reference
2234		 */
2235		if (mpte)
2236			mpte->hold_count--;
2237
2238		if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
2239			if ((origpte & PG_RW) == 0) {
2240				*pte |= PG_RW;
2241				invltlb_1pg(va);
2242			}
2243			return;
2244		}
2245
2246		/*
2247		 * We might be turning off write access to the page,
2248		 * so we go ahead and sense modify status.
2249		 */
2250		if (origpte & PG_MANAGED) {
2251			if ((origpte & PG_M) && pmap_track_modified(va)) {
2252				pv_table_t *ppv;
2253				ppv = pa_to_pvh(opa);
2254				vm_page_dirty(ppv->pv_vm_page);
2255			}
2256			pa |= PG_MANAGED;
2257		}
2258		goto validate;
2259	}
2260	/*
2261	 * Mapping has changed, invalidate old range and fall through to
2262	 * handle validating new mapping.
2263	 */
2264	if (opa) {
2265		int err;
2266		err = pmap_remove_pte(pmap, pte, va);
2267#if !defined(MAX_PERF)
2268		if (err)
2269			panic("pmap_enter: pte vanished, va: 0x%x", va);
2270#endif
2271	}
2272
2273	/*
2274	 * Enter on the PV list if part of our managed memory Note that we
2275	 * raise IPL while manipulating pv_table since pmap_enter can be
2276	 * called at interrupt time.
2277	 */
2278	if (pmap_is_managed(pa)) {
2279		pmap_insert_entry(pmap, va, mpte, pa);
2280		pa |= PG_MANAGED;
2281	}
2282
2283	/*
2284	 * Increment counters
2285	 */
2286	pmap->pm_stats.resident_count++;
2287	if (wired)
2288		pmap->pm_stats.wired_count++;
2289
2290validate:
2291	/*
2292	 * Now validate mapping with desired protection/wiring.
2293	 */
2294	newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2295
2296	if (wired)
2297		newpte |= PG_W;
2298	if (va < UPT_MIN_ADDRESS)
2299		newpte |= PG_U;
2300	if (pmap == kernel_pmap)
2301		newpte |= pgeflag;
2302
2303	/*
2304	 * if the mapping or permission bits are different, we need
2305	 * to update the pte.
2306	 */
2307	if ((origpte & ~(PG_M|PG_A)) != newpte) {
2308		*pte = newpte | PG_A;
2309		if (origpte)
2310			invltlb_1pg(va);
2311	}
2312}
2313
2314/*
2315 * this code makes some *MAJOR* assumptions:
2316 * 1. Current pmap & pmap exists.
2317 * 2. Not wired.
2318 * 3. Read access.
2319 * 4. No page table pages.
2320 * 5. Tlbflush is deferred to calling procedure.
2321 * 6. Page IS managed.
2322 * but is *MUCH* faster than pmap_enter...
2323 */
2324
2325static vm_page_t
2326pmap_enter_quick(pmap, va, pa, mpte)
2327	register pmap_t pmap;
2328	vm_offset_t va;
2329	register vm_offset_t pa;
2330	vm_page_t mpte;
2331{
2332	register unsigned *pte;
2333
2334	/*
2335	 * In the case that a page table page is not
2336	 * resident, we are creating it here.
2337	 */
2338	if (va < UPT_MIN_ADDRESS) {
2339		unsigned ptepindex;
2340		vm_offset_t ptepa;
2341
2342		/*
2343		 * Calculate pagetable page index
2344		 */
2345		ptepindex = va >> PDRSHIFT;
2346		if (mpte && (mpte->pindex == ptepindex)) {
2347			mpte->hold_count++;
2348		} else {
2349retry:
2350			/*
2351			 * Get the page directory entry
2352			 */
2353			ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
2354
2355			/*
2356			 * If the page table page is mapped, we just increment
2357			 * the hold count, and activate it.
2358			 */
2359			if (ptepa) {
2360#if !defined(MAX_PERF)
2361				if (ptepa & PG_PS)
2362					panic("pmap_enter_quick: unexpected mapping into 4MB page");
2363#endif
2364				if (pmap->pm_ptphint &&
2365					(pmap->pm_ptphint->pindex == ptepindex)) {
2366					mpte = pmap->pm_ptphint;
2367				} else {
2368					mpte = pmap_page_lookup( pmap->pm_pteobj, ptepindex);
2369					pmap->pm_ptphint = mpte;
2370				}
2371				if (mpte == NULL)
2372					goto retry;
2373				mpte->hold_count++;
2374			} else {
2375				mpte = _pmap_allocpte(pmap, ptepindex);
2376			}
2377		}
2378	} else {
2379		mpte = NULL;
2380	}
2381
2382	/*
2383	 * This call to vtopte makes the assumption that we are
2384	 * entering the page into the current pmap.  In order to support
2385	 * quick entry into any pmap, one would likely use pmap_pte_quick.
2386	 * But that isn't as quick as vtopte.
2387	 */
2388	pte = (unsigned *)vtopte(va);
2389	if (*pte) {
2390		if (mpte)
2391			pmap_unwire_pte_hold(pmap, mpte);
2392		return 0;
2393	}
2394
2395	/*
2396	 * Enter on the PV list if part of our managed memory Note that we
2397	 * raise IPL while manipulating pv_table since pmap_enter can be
2398	 * called at interrupt time.
2399	 */
2400	pmap_insert_entry(pmap, va, mpte, pa);
2401
2402	/*
2403	 * Increment counters
2404	 */
2405	pmap->pm_stats.resident_count++;
2406
2407	/*
2408	 * Now validate mapping with RO protection
2409	 */
2410	*pte = pa | PG_V | PG_U | PG_MANAGED;
2411
2412	return mpte;
2413}
2414
2415#define MAX_INIT_PT (96)
2416/*
2417 * pmap_object_init_pt preloads the ptes for a given object
2418 * into the specified pmap.  This eliminates the blast of soft
2419 * faults on process startup and immediately after an mmap.
2420 */
2421void
2422pmap_object_init_pt(pmap, addr, object, pindex, size, limit)
2423	pmap_t pmap;
2424	vm_offset_t addr;
2425	vm_object_t object;
2426	vm_pindex_t pindex;
2427	vm_size_t size;
2428	int limit;
2429{
2430	vm_offset_t tmpidx;
2431	int psize;
2432	vm_page_t p, mpte;
2433	int objpgs;
2434
2435	if (!pmap)
2436		return;
2437
2438	/*
2439	 * This code maps large physical mmap regions into the
2440	 * processor address space.  Note that some shortcuts
2441	 * are taken, but the code works.
2442	 */
2443	if (pseflag &&
2444		(object->type == OBJT_DEVICE) &&
2445		((addr & (NBPDR - 1)) == 0) &&
2446		((size & (NBPDR - 1)) == 0) ) {
2447		int i;
2448		vm_page_t m[1];
2449		unsigned int ptepindex;
2450		int npdes;
2451		vm_offset_t ptepa;
2452
2453		if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
2454			return;
2455
2456retry:
2457		p = vm_page_lookup(object, pindex);
2458		if (p && vm_page_sleep_busy(p, FALSE, "init4p"))
2459			goto retry;
2460
2461		if (p == NULL) {
2462			p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2463			if (p == NULL)
2464				return;
2465			m[0] = p;
2466
2467			if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2468				vm_page_free(p);
2469				return;
2470			}
2471
2472			p = vm_page_lookup(object, pindex);
2473			vm_page_wakeup(p);
2474		}
2475
2476		ptepa = (vm_offset_t) VM_PAGE_TO_PHYS(p);
2477		if (ptepa & (NBPDR - 1)) {
2478			return;
2479		}
2480
2481		p->valid = VM_PAGE_BITS_ALL;
2482
2483		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2484		npdes = size >> PDRSHIFT;
2485		for(i=0;i<npdes;i++) {
2486			pmap->pm_pdir[ptepindex] =
2487				(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_PS);
2488			ptepa += NBPDR;
2489			ptepindex += 1;
2490		}
2491		vm_page_flag_set(p, PG_MAPPED);
2492		invltlb();
2493		return;
2494	}
2495
2496	psize = i386_btop(size);
2497
2498	if ((object->type != OBJT_VNODE) ||
2499		(limit && (psize > MAX_INIT_PT) &&
2500			(object->resident_page_count > MAX_INIT_PT))) {
2501		return;
2502	}
2503
2504	if (psize + pindex > object->size)
2505		psize = object->size - pindex;
2506
2507	mpte = NULL;
2508	/*
2509	 * if we are processing a major portion of the object, then scan the
2510	 * entire thing.
2511	 */
2512	if (psize > (object->size >> 2)) {
2513		objpgs = psize;
2514
2515		for (p = TAILQ_FIRST(&object->memq);
2516		    ((objpgs > 0) && (p != NULL));
2517		    p = TAILQ_NEXT(p, listq)) {
2518
2519			tmpidx = p->pindex;
2520			if (tmpidx < pindex) {
2521				continue;
2522			}
2523			tmpidx -= pindex;
2524			if (tmpidx >= psize) {
2525				continue;
2526			}
2527			if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2528				(p->busy == 0) &&
2529			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2530				if ((p->queue - p->pc) == PQ_CACHE)
2531					vm_page_deactivate(p);
2532				vm_page_busy(p);
2533				mpte = pmap_enter_quick(pmap,
2534					addr + i386_ptob(tmpidx),
2535					VM_PAGE_TO_PHYS(p), mpte);
2536				vm_page_flag_set(p, PG_MAPPED);
2537				vm_page_wakeup(p);
2538			}
2539			objpgs -= 1;
2540		}
2541	} else {
2542		/*
2543		 * else lookup the pages one-by-one.
2544		 */
2545		for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
2546			p = vm_page_lookup(object, tmpidx + pindex);
2547			if (p &&
2548			    ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2549				(p->busy == 0) &&
2550			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2551				if ((p->queue - p->pc) == PQ_CACHE)
2552					vm_page_deactivate(p);
2553				vm_page_busy(p);
2554				mpte = pmap_enter_quick(pmap,
2555					addr + i386_ptob(tmpidx),
2556					VM_PAGE_TO_PHYS(p), mpte);
2557				vm_page_flag_set(p, PG_MAPPED);
2558				vm_page_wakeup(p);
2559			}
2560		}
2561	}
2562	return;
2563}
2564
2565/*
2566 * pmap_prefault provides a quick way of clustering
2567 * pagefaults into a processes address space.  It is a "cousin"
2568 * of pmap_object_init_pt, except it runs at page fault time instead
2569 * of mmap time.
2570 */
2571#define PFBAK 4
2572#define PFFOR 4
2573#define PAGEORDER_SIZE (PFBAK+PFFOR)
2574
2575static int pmap_prefault_pageorder[] = {
2576	-PAGE_SIZE, PAGE_SIZE,
2577	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
2578	-3 * PAGE_SIZE, 3 * PAGE_SIZE
2579	-4 * PAGE_SIZE, 4 * PAGE_SIZE
2580};
2581
2582void
2583pmap_prefault(pmap, addra, entry)
2584	pmap_t pmap;
2585	vm_offset_t addra;
2586	vm_map_entry_t entry;
2587{
2588	int i;
2589	vm_offset_t starta;
2590	vm_offset_t addr;
2591	vm_pindex_t pindex;
2592	vm_page_t m, mpte;
2593	vm_object_t object;
2594
2595	if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace)))
2596		return;
2597
2598	object = entry->object.vm_object;
2599
2600	starta = addra - PFBAK * PAGE_SIZE;
2601	if (starta < entry->start) {
2602		starta = entry->start;
2603	} else if (starta > addra) {
2604		starta = 0;
2605	}
2606
2607	mpte = NULL;
2608	for (i = 0; i < PAGEORDER_SIZE; i++) {
2609		vm_object_t lobject;
2610		unsigned *pte;
2611
2612		addr = addra + pmap_prefault_pageorder[i];
2613		if (addr > addra + (PFFOR * PAGE_SIZE))
2614			addr = 0;
2615
2616		if (addr < starta || addr >= entry->end)
2617			continue;
2618
2619		if ((*pmap_pde(pmap, addr)) == NULL)
2620			continue;
2621
2622		pte = (unsigned *) vtopte(addr);
2623		if (*pte)
2624			continue;
2625
2626		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2627		lobject = object;
2628		for (m = vm_page_lookup(lobject, pindex);
2629		    (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object));
2630		    lobject = lobject->backing_object) {
2631			if (lobject->backing_object_offset & PAGE_MASK)
2632				break;
2633			pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
2634			m = vm_page_lookup(lobject->backing_object, pindex);
2635		}
2636
2637		/*
2638		 * give-up when a page is not in memory
2639		 */
2640		if (m == NULL)
2641			break;
2642
2643		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2644			(m->busy == 0) &&
2645		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2646
2647			if ((m->queue - m->pc) == PQ_CACHE) {
2648				vm_page_deactivate(m);
2649			}
2650			vm_page_busy(m);
2651			mpte = pmap_enter_quick(pmap, addr,
2652				VM_PAGE_TO_PHYS(m), mpte);
2653			vm_page_flag_set(m, PG_MAPPED);
2654			vm_page_wakeup(m);
2655		}
2656	}
2657}
2658
2659/*
2660 *	Routine:	pmap_change_wiring
2661 *	Function:	Change the wiring attribute for a map/virtual-address
2662 *			pair.
2663 *	In/out conditions:
2664 *			The mapping must already exist in the pmap.
2665 */
2666void
2667pmap_change_wiring(pmap, va, wired)
2668	register pmap_t pmap;
2669	vm_offset_t va;
2670	boolean_t wired;
2671{
2672	register unsigned *pte;
2673
2674	if (pmap == NULL)
2675		return;
2676
2677	pte = pmap_pte(pmap, va);
2678
2679	if (wired && !pmap_pte_w(pte))
2680		pmap->pm_stats.wired_count++;
2681	else if (!wired && pmap_pte_w(pte))
2682		pmap->pm_stats.wired_count--;
2683
2684	/*
2685	 * Wiring is not a hardware characteristic so there is no need to
2686	 * invalidate TLB.
2687	 */
2688	pmap_pte_set_w(pte, wired);
2689}
2690
2691
2692
2693/*
2694 *	Copy the range specified by src_addr/len
2695 *	from the source map to the range dst_addr/len
2696 *	in the destination map.
2697 *
2698 *	This routine is only advisory and need not do anything.
2699 */
2700
2701void
2702pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
2703	pmap_t dst_pmap, src_pmap;
2704	vm_offset_t dst_addr;
2705	vm_size_t len;
2706	vm_offset_t src_addr;
2707{
2708	vm_offset_t addr;
2709	vm_offset_t end_addr = src_addr + len;
2710	vm_offset_t pdnxt;
2711	unsigned src_frame, dst_frame;
2712
2713	if (dst_addr != src_addr)
2714		return;
2715
2716	src_frame = ((unsigned) src_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2717	if (src_frame != (((unsigned) PTDpde) & PG_FRAME)) {
2718		return;
2719	}
2720
2721	dst_frame = ((unsigned) dst_pmap->pm_pdir[PTDPTDI]) & PG_FRAME;
2722	if (dst_frame != (((unsigned) APTDpde) & PG_FRAME)) {
2723		APTDpde = (pd_entry_t) (dst_frame | PG_RW | PG_V);
2724		invltlb();
2725	}
2726
2727	for(addr = src_addr; addr < end_addr; addr = pdnxt) {
2728		unsigned *src_pte, *dst_pte;
2729		vm_page_t dstmpte, srcmpte;
2730		vm_offset_t srcptepaddr;
2731		unsigned ptepindex;
2732
2733#if !defined(MAX_PERF)
2734		if (addr >= UPT_MIN_ADDRESS)
2735			panic("pmap_copy: invalid to pmap_copy page tables\n");
2736#endif
2737
2738		/*
2739		 * Don't let optional prefaulting of pages make us go
2740		 * way below the low water mark of free pages or way
2741		 * above high water mark of used pv entries.
2742		 */
2743		if (cnt.v_free_count < cnt.v_free_reserved ||
2744		    pv_entry_count > pv_entry_high_water)
2745			break;
2746
2747		pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
2748		ptepindex = addr >> PDRSHIFT;
2749
2750		srcptepaddr = (vm_offset_t) src_pmap->pm_pdir[ptepindex];
2751		if (srcptepaddr == 0)
2752			continue;
2753
2754		if (srcptepaddr & PG_PS) {
2755			if (dst_pmap->pm_pdir[ptepindex] == 0) {
2756				dst_pmap->pm_pdir[ptepindex] = (pd_entry_t) srcptepaddr;
2757				dst_pmap->pm_stats.resident_count += NBPDR;
2758			}
2759			continue;
2760		}
2761
2762		srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
2763		if ((srcmpte == NULL) ||
2764			(srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
2765			continue;
2766
2767		if (pdnxt > end_addr)
2768			pdnxt = end_addr;
2769
2770		src_pte = (unsigned *) vtopte(addr);
2771		dst_pte = (unsigned *) avtopte(addr);
2772		while (addr < pdnxt) {
2773			unsigned ptetemp;
2774			ptetemp = *src_pte;
2775			/*
2776			 * we only virtual copy managed pages
2777			 */
2778			if ((ptetemp & PG_MANAGED) != 0) {
2779				/*
2780				 * We have to check after allocpte for the
2781				 * pte still being around...  allocpte can
2782				 * block.
2783				 */
2784				dstmpte = pmap_allocpte(dst_pmap, addr);
2785				if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
2786					/*
2787					 * Clear the modified and
2788					 * accessed (referenced) bits
2789					 * during the copy.
2790					 */
2791					*dst_pte = ptetemp & ~(PG_M | PG_A);
2792					dst_pmap->pm_stats.resident_count++;
2793					pmap_insert_entry(dst_pmap, addr,
2794						dstmpte,
2795						(ptetemp & PG_FRAME));
2796	 			} else {
2797					pmap_unwire_pte_hold(dst_pmap, dstmpte);
2798				}
2799				if (dstmpte->hold_count >= srcmpte->hold_count)
2800					break;
2801			}
2802			addr += PAGE_SIZE;
2803			src_pte++;
2804			dst_pte++;
2805		}
2806	}
2807}
2808
2809/*
2810 *	Routine:	pmap_kernel
2811 *	Function:
2812 *		Returns the physical map handle for the kernel.
2813 */
2814pmap_t
2815pmap_kernel()
2816{
2817	return (kernel_pmap);
2818}
2819
2820/*
2821 *	pmap_zero_page zeros the specified (machine independent)
2822 *	page by mapping the page into virtual memory and using
2823 *	bzero to clear its contents, one machine dependent page
2824 *	at a time.
2825 */
2826void
2827pmap_zero_page(phys)
2828	vm_offset_t phys;
2829{
2830#ifdef SMP
2831#if !defined(MAX_PERF)
2832	if (*(int *) prv_CMAP3)
2833		panic("pmap_zero_page: prv_CMAP3 busy");
2834#endif
2835
2836	*(int *) prv_CMAP3 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2837	cpu_invlpg(&prv_CPAGE3);
2838
2839#if defined(I686_CPU)
2840	if (cpu_class == CPUCLASS_686)
2841		i686_pagezero(&prv_CPAGE3);
2842	else
2843#endif
2844		bzero(&prv_CPAGE3, PAGE_SIZE);
2845
2846	*(int *) prv_CMAP3 = 0;
2847#else
2848#if !defined(MAX_PERF)
2849	if (*(int *) CMAP2)
2850		panic("pmap_zero_page: CMAP2 busy");
2851#endif
2852
2853	*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME) | PG_A | PG_M;
2854	if (cpu_class == CPUCLASS_386) {
2855		invltlb();
2856	} else {
2857		invlpg((u_int)CADDR2);
2858	}
2859
2860#if defined(I686_CPU)
2861	if (cpu_class == CPUCLASS_686)
2862		i686_pagezero(CADDR2);
2863	else
2864#endif
2865		bzero(CADDR2, PAGE_SIZE);
2866	*(int *) CMAP2 = 0;
2867#endif
2868}
2869
2870/*
2871 *	pmap_copy_page copies the specified (machine independent)
2872 *	page by mapping the page into virtual memory and using
2873 *	bcopy to copy the page, one machine dependent page at a
2874 *	time.
2875 */
2876void
2877pmap_copy_page(src, dst)
2878	vm_offset_t src;
2879	vm_offset_t dst;
2880{
2881#ifdef SMP
2882#if !defined(MAX_PERF)
2883	if (*(int *) prv_CMAP1)
2884		panic("pmap_copy_page: prv_CMAP1 busy");
2885	if (*(int *) prv_CMAP2)
2886		panic("pmap_copy_page: prv_CMAP2 busy");
2887#endif
2888
2889	*(int *) prv_CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2890	*(int *) prv_CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2891
2892	cpu_invlpg(&prv_CPAGE1);
2893	cpu_invlpg(&prv_CPAGE2);
2894
2895	bcopy(&prv_CPAGE1, &prv_CPAGE2, PAGE_SIZE);
2896
2897	*(int *) prv_CMAP1 = 0;
2898	*(int *) prv_CMAP2 = 0;
2899#else
2900#if !defined(MAX_PERF)
2901	if (*(int *) CMAP1 || *(int *) CMAP2)
2902		panic("pmap_copy_page: CMAP busy");
2903#endif
2904
2905	*(int *) CMAP1 = PG_V | (src & PG_FRAME) | PG_A;
2906	*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME) | PG_A | PG_M;
2907	if (cpu_class == CPUCLASS_386) {
2908		invltlb();
2909	} else {
2910		invlpg((u_int)CADDR1);
2911		invlpg((u_int)CADDR2);
2912	}
2913
2914	bcopy(CADDR1, CADDR2, PAGE_SIZE);
2915
2916	*(int *) CMAP1 = 0;
2917	*(int *) CMAP2 = 0;
2918#endif
2919}
2920
2921
2922/*
2923 *	Routine:	pmap_pageable
2924 *	Function:
2925 *		Make the specified pages (by pmap, offset)
2926 *		pageable (or not) as requested.
2927 *
2928 *		A page which is not pageable may not take
2929 *		a fault; therefore, its page table entry
2930 *		must remain valid for the duration.
2931 *
2932 *		This routine is merely advisory; pmap_enter
2933 *		will specify that these pages are to be wired
2934 *		down (or not) as appropriate.
2935 */
2936void
2937pmap_pageable(pmap, sva, eva, pageable)
2938	pmap_t pmap;
2939	vm_offset_t sva, eva;
2940	boolean_t pageable;
2941{
2942}
2943
2944/*
2945 * this routine returns true if a physical page resides
2946 * in the given pmap.
2947 */
2948boolean_t
2949pmap_page_exists(pmap, pa)
2950	pmap_t pmap;
2951	vm_offset_t pa;
2952{
2953	register pv_entry_t pv;
2954	pv_table_t *ppv;
2955	int s;
2956
2957	if (!pmap_is_managed(pa))
2958		return FALSE;
2959
2960	s = splvm();
2961
2962	ppv = pa_to_pvh(pa);
2963	/*
2964	 * Not found, check current mappings returning immediately if found.
2965	 */
2966	for (pv = TAILQ_FIRST(&ppv->pv_list);
2967		pv;
2968		pv = TAILQ_NEXT(pv, pv_list)) {
2969		if (pv->pv_pmap == pmap) {
2970			splx(s);
2971			return TRUE;
2972		}
2973	}
2974	splx(s);
2975	return (FALSE);
2976}
2977
2978#define PMAP_REMOVE_PAGES_CURPROC_ONLY
2979/*
2980 * Remove all pages from specified address space
2981 * this aids process exit speeds.  Also, this code
2982 * is special cased for current process only, but
2983 * can have the more generic (and slightly slower)
2984 * mode enabled.  This is much faster than pmap_remove
2985 * in the case of running down an entire address space.
2986 */
2987void
2988pmap_remove_pages(pmap, sva, eva)
2989	pmap_t pmap;
2990	vm_offset_t sva, eva;
2991{
2992	unsigned *pte, tpte;
2993	pv_table_t *ppv;
2994	pv_entry_t pv, npv;
2995	int s;
2996
2997#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2998	if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
2999		printf("warning: pmap_remove_pages called with non-current pmap\n");
3000		return;
3001	}
3002#endif
3003
3004	s = splvm();
3005	for(pv = TAILQ_FIRST(&pmap->pm_pvlist);
3006		pv;
3007		pv = npv) {
3008
3009		if (pv->pv_va >= eva || pv->pv_va < sva) {
3010			npv = TAILQ_NEXT(pv, pv_plist);
3011			continue;
3012		}
3013
3014#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
3015		pte = (unsigned *)vtopte(pv->pv_va);
3016#else
3017		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3018#endif
3019		tpte = *pte;
3020
3021/*
3022 * We cannot remove wired pages from a process' mapping at this time
3023 */
3024		if (tpte & PG_W) {
3025			npv = TAILQ_NEXT(pv, pv_plist);
3026			continue;
3027		}
3028		*pte = 0;
3029
3030		ppv = pa_to_pvh(tpte);
3031
3032		pv->pv_pmap->pm_stats.resident_count--;
3033
3034		/*
3035		 * Update the vm_page_t clean and reference bits.
3036		 */
3037		if (tpte & PG_M) {
3038			vm_page_dirty(ppv->pv_vm_page);
3039		}
3040
3041
3042		npv = TAILQ_NEXT(pv, pv_plist);
3043		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
3044
3045		ppv->pv_list_count--;
3046		TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
3047		if (TAILQ_FIRST(&ppv->pv_list) == NULL) {
3048			vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE);
3049		}
3050
3051		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
3052		free_pv_entry(pv);
3053	}
3054	splx(s);
3055	invltlb();
3056}
3057
3058/*
3059 * pmap_testbit tests bits in pte's
3060 * note that the testbit/changebit routines are inline,
3061 * and a lot of things compile-time evaluate.
3062 */
3063static boolean_t
3064pmap_testbit(pa, bit)
3065	register vm_offset_t pa;
3066	int bit;
3067{
3068	register pv_entry_t pv;
3069	pv_table_t *ppv;
3070	unsigned *pte;
3071	int s;
3072
3073	if (!pmap_is_managed(pa))
3074		return FALSE;
3075
3076	ppv = pa_to_pvh(pa);
3077	if (TAILQ_FIRST(&ppv->pv_list) == NULL)
3078		return FALSE;
3079
3080	s = splvm();
3081
3082	for (pv = TAILQ_FIRST(&ppv->pv_list);
3083		pv;
3084		pv = TAILQ_NEXT(pv, pv_list)) {
3085
3086		/*
3087		 * if the bit being tested is the modified bit, then
3088		 * mark clean_map and ptes as never
3089		 * modified.
3090		 */
3091		if (bit & (PG_A|PG_M)) {
3092			if (!pmap_track_modified(pv->pv_va))
3093				continue;
3094		}
3095
3096#if defined(PMAP_DIAGNOSTIC)
3097		if (!pv->pv_pmap) {
3098			printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
3099			continue;
3100		}
3101#endif
3102		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3103		if (*pte & bit) {
3104			splx(s);
3105			return TRUE;
3106		}
3107	}
3108	splx(s);
3109	return (FALSE);
3110}
3111
3112/*
3113 * this routine is used to modify bits in ptes
3114 */
3115static void
3116pmap_changebit(pa, bit, setem)
3117	vm_offset_t pa;
3118	int bit;
3119	boolean_t setem;
3120{
3121	register pv_entry_t pv;
3122	pv_table_t *ppv;
3123	register unsigned *pte;
3124	int changed;
3125	int s;
3126
3127	if (!pmap_is_managed(pa))
3128		return;
3129
3130	s = splvm();
3131	changed = 0;
3132	ppv = pa_to_pvh(pa);
3133
3134	/*
3135	 * Loop over all current mappings setting/clearing as appropos If
3136	 * setting RO do we need to clear the VAC?
3137	 */
3138	for (pv = TAILQ_FIRST(&ppv->pv_list);
3139		pv;
3140		pv = TAILQ_NEXT(pv, pv_list)) {
3141
3142		/*
3143		 * don't write protect pager mappings
3144		 */
3145		if (!setem && (bit == PG_RW)) {
3146			if (!pmap_track_modified(pv->pv_va))
3147				continue;
3148		}
3149
3150#if defined(PMAP_DIAGNOSTIC)
3151		if (!pv->pv_pmap) {
3152			printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
3153			continue;
3154		}
3155#endif
3156
3157		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3158
3159		if (setem) {
3160			*(int *)pte |= bit;
3161			changed = 1;
3162		} else {
3163			vm_offset_t pbits = *(vm_offset_t *)pte;
3164			if (pbits & bit) {
3165				changed = 1;
3166				if (bit == PG_RW) {
3167					if (pbits & PG_M) {
3168						vm_page_dirty(ppv->pv_vm_page);
3169					}
3170					*(int *)pte = pbits & ~(PG_M|PG_RW);
3171				} else {
3172					*(int *)pte = pbits & ~bit;
3173				}
3174			}
3175		}
3176	}
3177	splx(s);
3178	if (changed)
3179		invltlb();
3180}
3181
3182/*
3183 *      pmap_page_protect:
3184 *
3185 *      Lower the permission for all mappings to a given page.
3186 */
3187void
3188pmap_page_protect(vm_offset_t phys, vm_prot_t prot)
3189{
3190	if ((prot & VM_PROT_WRITE) == 0) {
3191		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
3192			pmap_changebit(phys, PG_RW, FALSE);
3193		} else {
3194			pmap_remove_all(phys);
3195		}
3196	}
3197}
3198
3199vm_offset_t
3200pmap_phys_address(ppn)
3201	int ppn;
3202{
3203	return (i386_ptob(ppn));
3204}
3205
3206/*
3207 *	pmap_ts_referenced:
3208 *
3209 *	Return the count of reference bits for a page, clearing all of them.
3210 *
3211 */
3212int
3213pmap_ts_referenced(vm_offset_t pa)
3214{
3215	register pv_entry_t pv, pvf, pvn;
3216	pv_table_t *ppv;
3217	unsigned *pte;
3218	int s;
3219	int rtval = 0;
3220
3221	if (!pmap_is_managed(pa))
3222		return (rtval);
3223
3224	s = splvm();
3225
3226	ppv = pa_to_pvh(pa);
3227
3228	if ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) {
3229
3230		pvf = pv;
3231
3232		do {
3233			pvn = TAILQ_NEXT(pv, pv_list);
3234
3235			TAILQ_REMOVE(&ppv->pv_list, pv, pv_list);
3236
3237			TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list);
3238
3239			if (!pmap_track_modified(pv->pv_va))
3240				continue;
3241
3242			pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3243
3244			if (pte && *pte & PG_A) {
3245				*pte &= ~PG_A;
3246				rtval++;
3247				if (rtval > 4) {
3248					break;
3249				}
3250			}
3251		} while ((pv = pvn) != NULL && pv != pvf);
3252
3253		if (rtval) {
3254			invltlb();
3255		}
3256	}
3257	splx(s);
3258
3259	return (rtval);
3260}
3261
3262/*
3263 *	pmap_is_modified:
3264 *
3265 *	Return whether or not the specified physical page was modified
3266 *	in any physical maps.
3267 */
3268boolean_t
3269pmap_is_modified(vm_offset_t pa)
3270{
3271	return pmap_testbit((pa), PG_M);
3272}
3273
3274/*
3275 *	Clear the modify bits on the specified physical page.
3276 */
3277void
3278pmap_clear_modify(vm_offset_t pa)
3279{
3280	pmap_changebit((pa), PG_M, FALSE);
3281}
3282
3283/*
3284 *	pmap_clear_reference:
3285 *
3286 *	Clear the reference bit on the specified physical page.
3287 */
3288void
3289pmap_clear_reference(vm_offset_t pa)
3290{
3291	pmap_changebit((pa), PG_A, FALSE);
3292}
3293
3294/*
3295 * Miscellaneous support routines follow
3296 */
3297
3298static void
3299i386_protection_init()
3300{
3301	register int *kp, prot;
3302
3303	kp = protection_codes;
3304	for (prot = 0; prot < 8; prot++) {
3305		switch (prot) {
3306		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
3307			/*
3308			 * Read access is also 0. There isn't any execute bit,
3309			 * so just make it readable.
3310			 */
3311		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
3312		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
3313		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
3314			*kp++ = 0;
3315			break;
3316		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
3317		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
3318		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
3319		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
3320			*kp++ = PG_RW;
3321			break;
3322		}
3323	}
3324}
3325
3326/*
3327 * Map a set of physical memory pages into the kernel virtual
3328 * address space. Return a pointer to where it is mapped. This
3329 * routine is intended to be used for mapping device memory,
3330 * NOT real memory.
3331 */
3332void *
3333pmap_mapdev(pa, size)
3334	vm_offset_t pa;
3335	vm_size_t size;
3336{
3337	vm_offset_t va, tmpva;
3338	unsigned *pte;
3339
3340	size = roundup(size, PAGE_SIZE);
3341
3342	va = kmem_alloc_pageable(kernel_map, size);
3343#if !defined(MAX_PERF)
3344	if (!va)
3345		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3346#endif
3347
3348	pa = pa & PG_FRAME;
3349	for (tmpva = va; size > 0;) {
3350		pte = (unsigned *)vtopte(tmpva);
3351		*pte = pa | PG_RW | PG_V | pgeflag;
3352		size -= PAGE_SIZE;
3353		tmpva += PAGE_SIZE;
3354		pa += PAGE_SIZE;
3355	}
3356	invltlb();
3357
3358	return ((void *) va);
3359}
3360
3361/*
3362 * perform the pmap work for mincore
3363 */
3364int
3365pmap_mincore(pmap, addr)
3366	pmap_t pmap;
3367	vm_offset_t addr;
3368{
3369
3370	unsigned *ptep, pte;
3371	vm_page_t m;
3372	int val = 0;
3373
3374	ptep = pmap_pte(pmap, addr);
3375	if (ptep == 0) {
3376		return 0;
3377	}
3378
3379	if ((pte = *ptep) != 0) {
3380		pv_table_t *ppv;
3381		vm_offset_t pa;
3382
3383		val = MINCORE_INCORE;
3384		if ((pte & PG_MANAGED) == 0)
3385			return val;
3386
3387		pa = pte & PG_FRAME;
3388
3389		ppv = pa_to_pvh((pa & PG_FRAME));
3390		m = ppv->pv_vm_page;
3391
3392		/*
3393		 * Modified by us
3394		 */
3395		if (pte & PG_M)
3396			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3397		/*
3398		 * Modified by someone
3399		 */
3400		else if (m->dirty || pmap_is_modified(pa))
3401			val |= MINCORE_MODIFIED_OTHER;
3402		/*
3403		 * Referenced by us
3404		 */
3405		if (pte & PG_A)
3406			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3407
3408		/*
3409		 * Referenced by someone
3410		 */
3411		else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) {
3412			val |= MINCORE_REFERENCED_OTHER;
3413			vm_page_flag_set(m, PG_REFERENCED);
3414		}
3415	}
3416	return val;
3417}
3418
3419void
3420pmap_activate(struct proc *p)
3421{
3422	pmap_t	pmap;
3423
3424	pmap = vmspace_pmap(p->p_vmspace);
3425#if defined(SMP)
3426	pmap->pm_active |= 1 << cpuid;
3427#else
3428	pmap->pm_active |= 1;
3429#endif
3430#if defined(SWTCH_OPTIM_STATS)
3431	tlb_flush_count++;
3432#endif
3433	load_cr3(p->p_addr->u_pcb.pcb_cr3 = vtophys(pmap->pm_pdir));
3434}
3435
3436vm_offset_t
3437pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) {
3438
3439	if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3440		return addr;
3441	}
3442
3443	addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3444	return addr;
3445}
3446
3447
3448#if defined(PMAP_DEBUG)
3449pmap_pid_dump(int pid) {
3450	pmap_t pmap;
3451	struct proc *p;
3452	int npte = 0;
3453	int index;
3454	for (p = allproc.lh_first; p != NULL; p = p->p_list.le_next) {
3455		if (p->p_pid != pid)
3456			continue;
3457
3458		if (p->p_vmspace) {
3459			int i,j;
3460			index = 0;
3461			pmap = vmspace_pmap(p->p_vmspace);
3462			for(i=0;i<1024;i++) {
3463				pd_entry_t *pde;
3464				unsigned *pte;
3465				unsigned base = i << PDRSHIFT;
3466
3467				pde = &pmap->pm_pdir[i];
3468				if (pde && pmap_pde_v(pde)) {
3469					for(j=0;j<1024;j++) {
3470						unsigned va = base + (j << PAGE_SHIFT);
3471						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
3472							if (index) {
3473								index = 0;
3474								printf("\n");
3475							}
3476							return npte;
3477						}
3478						pte = pmap_pte_quick( pmap, va);
3479						if (pte && pmap_pte_v(pte)) {
3480							vm_offset_t pa;
3481							vm_page_t m;
3482							pa = *(int *)pte;
3483							m = PHYS_TO_VM_PAGE((pa & PG_FRAME));
3484							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
3485								va, pa, m->hold_count, m->wire_count, m->flags);
3486							npte++;
3487							index++;
3488							if (index >= 2) {
3489								index = 0;
3490								printf("\n");
3491							} else {
3492								printf(" ");
3493							}
3494						}
3495					}
3496				}
3497			}
3498		}
3499	}
3500	return npte;
3501}
3502#endif
3503
3504#if defined(DEBUG)
3505
3506static void	pads __P((pmap_t pm));
3507void		pmap_pvdump __P((vm_offset_t pa));
3508
3509/* print address space of pmap*/
3510static void
3511pads(pm)
3512	pmap_t pm;
3513{
3514	unsigned va, i, j;
3515	unsigned *ptep;
3516
3517	if (pm == kernel_pmap)
3518		return;
3519	for (i = 0; i < 1024; i++)
3520		if (pm->pm_pdir[i])
3521			for (j = 0; j < 1024; j++) {
3522				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
3523				if (pm == kernel_pmap && va < KERNBASE)
3524					continue;
3525				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
3526					continue;
3527				ptep = pmap_pte_quick(pm, va);
3528				if (pmap_pte_v(ptep))
3529					printf("%x:%x ", va, *(int *) ptep);
3530			};
3531
3532}
3533
3534void
3535pmap_pvdump(pa)
3536	vm_offset_t pa;
3537{
3538	pv_table_t *ppv;
3539	register pv_entry_t pv;
3540
3541	printf("pa %x", pa);
3542	ppv = pa_to_pvh(pa);
3543	for (pv = TAILQ_FIRST(&ppv->pv_list);
3544		pv;
3545		pv = TAILQ_NEXT(pv, pv_list)) {
3546#ifdef used_to_be
3547		printf(" -> pmap %p, va %x, flags %x",
3548		    (void *)pv->pv_pmap, pv->pv_va, pv->pv_flags);
3549#endif
3550		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3551		pads(pv->pv_pmap);
3552	}
3553	printf(" ");
3554}
3555#endif
3556