pmap.c revision 101197
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
42 * $FreeBSD: head/sys/i386/i386/pmap.c 101197 2002-08-02 04:14:19Z alc $
43 */
44
45/*
46 *	Manages physical address maps.
47 *
48 *	In addition to hardware address maps, this
49 *	module is called upon to provide software-use-only
50 *	maps which may or may not be stored in the same
51 *	form as hardware maps.  These pseudo-maps are
52 *	used to store intermediate results from copy
53 *	operations to and from address spaces.
54 *
55 *	Since the information managed by this module is
56 *	also stored by the logical address mapping module,
57 *	this module may throw away valid virtual-to-physical
58 *	mappings at almost any time.  However, invalidations
59 *	of virtual-to-physical mappings must be done as
60 *	requested.
61 *
62 *	In order to cope with hardware architectures which
63 *	make virtual-to-physical map invalidates expensive,
64 *	this module may delay invalidate or reduced protection
65 *	operations until such time as they are actually
66 *	necessary.  This module is given full information as
67 *	to which processors are currently using which maps,
68 *	and to when physical maps must be made correct.
69 */
70
71#include "opt_pmap.h"
72#include "opt_msgbuf.h"
73#include "opt_kstack_pages.h"
74
75#include <sys/param.h>
76#include <sys/systm.h>
77#include <sys/kernel.h>
78#include <sys/lock.h>
79#include <sys/mman.h>
80#include <sys/msgbuf.h>
81#include <sys/mutex.h>
82#include <sys/proc.h>
83#include <sys/sx.h>
84#include <sys/user.h>
85#include <sys/vmmeter.h>
86#include <sys/sysctl.h>
87#ifdef SMP
88#include <sys/smp.h>
89#endif
90
91#include <vm/vm.h>
92#include <vm/vm_param.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_page.h>
95#include <vm/vm_map.h>
96#include <vm/vm_object.h>
97#include <vm/vm_extern.h>
98#include <vm/vm_pageout.h>
99#include <vm/vm_pager.h>
100#include <vm/uma.h>
101
102#include <machine/cpu.h>
103#include <machine/cputypes.h>
104#include <machine/md_var.h>
105#include <machine/specialreg.h>
106#if defined(SMP) || defined(APIC_IO)
107#include <machine/smp.h>
108#include <machine/apic.h>
109#include <machine/segments.h>
110#include <machine/tss.h>
111#endif /* SMP || APIC_IO */
112
113#define PMAP_KEEP_PDIRS
114#ifndef PMAP_SHPGPERPROC
115#define PMAP_SHPGPERPROC 200
116#endif
117
118#if defined(DIAGNOSTIC)
119#define PMAP_DIAGNOSTIC
120#endif
121
122#define MINPV 2048
123
124#if !defined(PMAP_DIAGNOSTIC)
125#define PMAP_INLINE __inline
126#else
127#define PMAP_INLINE
128#endif
129
130/*
131 * Get PDEs and PTEs for user/kernel address space
132 */
133#define	pmap_pde(m, v)	(&((m)->pm_pdir[(vm_offset_t)(v) >> PDRSHIFT]))
134#define pdir_pde(m, v) (m[(vm_offset_t)(v) >> PDRSHIFT])
135
136#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
137#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
138#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
139#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
140#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
141
142#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
143#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
144
145/*
146 * Given a map and a machine independent protection code,
147 * convert to a vax protection code.
148 */
149#define pte_prot(m, p)	(protection_codes[p])
150static int protection_codes[8];
151
152struct pmap kernel_pmap_store;
153LIST_HEAD(pmaplist, pmap);
154struct pmaplist allpmaps;
155
156vm_offset_t avail_start;	/* PA of first available physical page */
157vm_offset_t avail_end;		/* PA of last available physical page */
158vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
159vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
160static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
161static int pgeflag;		/* PG_G or-in */
162static int pseflag;		/* PG_PS or-in */
163
164static vm_object_t kptobj;
165
166static int nkpt;
167vm_offset_t kernel_vm_end;
168extern u_int32_t KERNend;
169
170/*
171 * Data for the pv entry allocation mechanism
172 */
173static uma_zone_t pvzone;
174static struct vm_object pvzone_obj;
175static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
176static int pmap_pagedaemon_waken = 0;
177
178/*
179 * All those kernel PT submaps that BSD is so fond of
180 */
181pt_entry_t *CMAP1 = 0;
182static pt_entry_t *CMAP2, *CMAP3, *ptmmap;
183caddr_t CADDR1 = 0, ptvmmap = 0;
184static caddr_t CADDR2, CADDR3;
185static pt_entry_t *msgbufmap;
186struct msgbuf *msgbufp = 0;
187
188/*
189 * Crashdump maps.
190 */
191static pt_entry_t *pt_crashdumpmap;
192static caddr_t crashdumpmap;
193
194#ifdef SMP
195extern pt_entry_t *SMPpt;
196#endif
197static pt_entry_t *PMAP1 = 0;
198static pt_entry_t *PADDR1 = 0;
199
200static PMAP_INLINE void	free_pv_entry(pv_entry_t pv);
201static pt_entry_t *get_ptbase(pmap_t pmap);
202static pv_entry_t get_pv_entry(void);
203static void	i386_protection_init(void);
204static __inline void	pmap_changebit(vm_page_t m, int bit, boolean_t setem);
205
206static void	pmap_remove_all(vm_page_t m);
207static vm_page_t pmap_enter_quick(pmap_t pmap, vm_offset_t va,
208				      vm_page_t m, vm_page_t mpte);
209static int pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t sva);
210static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
211static int pmap_remove_entry(struct pmap *pmap, vm_page_t m,
212					vm_offset_t va);
213static boolean_t pmap_testbit(vm_page_t m, int bit);
214static void pmap_insert_entry(pmap_t pmap, vm_offset_t va,
215		vm_page_t mpte, vm_page_t m);
216
217static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va);
218
219static int pmap_release_free_page(pmap_t pmap, vm_page_t p);
220static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex);
221static pt_entry_t *pmap_pte_quick(pmap_t pmap, vm_offset_t va);
222static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex);
223static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t);
224static vm_offset_t pmap_kmem_choose(vm_offset_t addr);
225static void *pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait);
226
227static pd_entry_t pdir4mb;
228
229/*
230 *	Routine:	pmap_pte
231 *	Function:
232 *		Extract the page table entry associated
233 *		with the given map/virtual_address pair.
234 */
235
236PMAP_INLINE pt_entry_t *
237pmap_pte(pmap, va)
238	register pmap_t pmap;
239	vm_offset_t va;
240{
241	pd_entry_t *pdeaddr;
242
243	if (pmap) {
244		pdeaddr = pmap_pde(pmap, va);
245		if (*pdeaddr & PG_PS)
246			return pdeaddr;
247		if (*pdeaddr) {
248			return get_ptbase(pmap) + i386_btop(va);
249		}
250	}
251	return (0);
252}
253
254/*
255 * Move the kernel virtual free pointer to the next
256 * 4MB.  This is used to help improve performance
257 * by using a large (4MB) page for much of the kernel
258 * (.text, .data, .bss)
259 */
260static vm_offset_t
261pmap_kmem_choose(vm_offset_t addr)
262{
263	vm_offset_t newaddr = addr;
264
265#ifndef DISABLE_PSE
266	if (cpu_feature & CPUID_PSE)
267		newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
268#endif
269	return newaddr;
270}
271
272/*
273 *	Bootstrap the system enough to run with virtual memory.
274 *
275 *	On the i386 this is called after mapping has already been enabled
276 *	and just syncs the pmap module with what has already been done.
277 *	[We can't call it easily with mapping off since the kernel is not
278 *	mapped with PA == VA, hence we would have to relocate every address
279 *	from the linked base (virtual) address "KERNBASE" to the actual
280 *	(physical) address starting relative to 0]
281 */
282void
283pmap_bootstrap(firstaddr, loadaddr)
284	vm_offset_t firstaddr;
285	vm_offset_t loadaddr;
286{
287	vm_offset_t va;
288	pt_entry_t *pte;
289	int i;
290
291	avail_start = firstaddr;
292
293	/*
294	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
295	 * large. It should instead be correctly calculated in locore.s and
296	 * not based on 'first' (which is a physical address, not a virtual
297	 * address, for the start of unused physical memory). The kernel
298	 * page tables are NOT double mapped and thus should not be included
299	 * in this calculation.
300	 */
301	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
302	virtual_avail = pmap_kmem_choose(virtual_avail);
303
304	virtual_end = VM_MAX_KERNEL_ADDRESS;
305
306	/*
307	 * Initialize protection array.
308	 */
309	i386_protection_init();
310
311	/*
312	 * Initialize the kernel pmap (which is statically allocated).
313	 */
314	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + (u_int)IdlePTD);
315	kernel_pmap->pm_active = -1;	/* don't allow deactivation */
316	TAILQ_INIT(&kernel_pmap->pm_pvlist);
317	LIST_INIT(&allpmaps);
318	LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list);
319	nkpt = NKPT;
320
321	/*
322	 * Reserve some special page table entries/VA space for temporary
323	 * mapping of pages.
324	 */
325#define	SYSMAP(c, p, v, n)	\
326	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
327
328	va = virtual_avail;
329	pte = (pt_entry_t *) pmap_pte(kernel_pmap, va);
330
331	/*
332	 * CMAP1/CMAP2 are used for zeroing and copying pages.
333	 * CMAP3 is used for the idle process page zeroing.
334	 */
335	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
336	SYSMAP(caddr_t, CMAP2, CADDR2, 1)
337	SYSMAP(caddr_t, CMAP3, CADDR3, 1)
338
339	/*
340	 * Crashdump maps.
341	 */
342	SYSMAP(caddr_t, pt_crashdumpmap, crashdumpmap, MAXDUMPPGS);
343
344	/*
345	 * ptvmmap is used for reading arbitrary physical pages via /dev/mem.
346	 * XXX ptmmap is not used.
347	 */
348	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
349
350	/*
351	 * msgbufp is used to map the system message buffer.
352	 * XXX msgbufmap is not used.
353	 */
354	SYSMAP(struct msgbuf *, msgbufmap, msgbufp,
355	       atop(round_page(MSGBUF_SIZE)))
356
357	/*
358	 * ptemap is used for pmap_pte_quick
359	 */
360	SYSMAP(pt_entry_t *, PMAP1, PADDR1, 1);
361
362	virtual_avail = va;
363
364	*CMAP1 = *CMAP2 = 0;
365	for (i = 0; i < NKPT; i++)
366		PTD[i] = 0;
367
368	pgeflag = 0;
369#ifndef DISABLE_PG_G
370	if (cpu_feature & CPUID_PGE)
371		pgeflag = PG_G;
372#endif
373
374/*
375 * Initialize the 4MB page size flag
376 */
377	pseflag = 0;
378/*
379 * The 4MB page version of the initial
380 * kernel page mapping.
381 */
382	pdir4mb = 0;
383
384#ifndef DISABLE_PSE
385	if (cpu_feature & CPUID_PSE) {
386		pd_entry_t ptditmp;
387		/*
388		 * Note that we have enabled PSE mode
389		 */
390		pseflag = PG_PS;
391		ptditmp = *(PTmap + i386_btop(KERNBASE));
392		ptditmp &= ~(NBPDR - 1);
393		ptditmp |= PG_V | PG_RW | PG_PS | PG_U | pgeflag;
394		pdir4mb = ptditmp;
395	}
396#endif
397#ifndef SMP
398	/*
399	 * Turn on PGE/PSE.  SMP does this later on since the
400	 * 4K page tables are required for AP boot (for now).
401	 * XXX fixme.
402	 */
403	pmap_set_opt();
404#endif
405#ifdef SMP
406	if (cpu_apic_address == 0)
407		panic("pmap_bootstrap: no local apic! (non-SMP hardware?)");
408
409	/* local apic is mapped on last page */
410	SMPpt[NPTEPG - 1] = (pt_entry_t)(PG_V | PG_RW | PG_N | pgeflag |
411	    (cpu_apic_address & PG_FRAME));
412#endif
413	invltlb();
414}
415
416/*
417 * Enable 4MB page mode for MP startup.  Turn on PG_G support.
418 * BSP will run this after all the AP's have started up.
419 */
420void
421pmap_set_opt(void)
422{
423	pt_entry_t *pte;
424	vm_offset_t va, endva;
425
426	if (pgeflag && (cpu_feature & CPUID_PGE)) {
427		load_cr4(rcr4() | CR4_PGE);
428		invltlb();		/* Insurance */
429	}
430#ifndef DISABLE_PSE
431	if (pseflag && (cpu_feature & CPUID_PSE)) {
432		load_cr4(rcr4() | CR4_PSE);
433		invltlb();		/* Insurance */
434	}
435#endif
436	if (PCPU_GET(cpuid) == 0) {
437#ifndef DISABLE_PSE
438		if (pdir4mb) {
439			kernel_pmap->pm_pdir[KPTDI] = PTD[KPTDI] = pdir4mb;
440			invltlb();	/* Insurance */
441		}
442#endif
443		if (pgeflag) {
444			/* Turn on PG_G for text, data, bss pages. */
445			va = (vm_offset_t)btext;
446#ifndef DISABLE_PSE
447			if (pseflag && (cpu_feature & CPUID_PSE)) {
448				if (va < KERNBASE + (1 << PDRSHIFT))
449					va = KERNBASE + (1 << PDRSHIFT);
450			}
451#endif
452			endva = KERNBASE + KERNend;
453			while (va < endva) {
454				pte = vtopte(va);
455				if (*pte)
456					*pte |= pgeflag;
457				va += PAGE_SIZE;
458			}
459			invltlb();	/* Insurance */
460		}
461		/*
462		 * We do not need to broadcast the invltlb here, because
463		 * each AP does it the moment it is released from the boot
464		 * lock.  See ap_init().
465		 */
466	}
467}
468
469void *
470pmap_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
471{
472	*flags = UMA_SLAB_PRIV;
473	return (void *)kmem_alloc(kernel_map, bytes);
474}
475
476/*
477 *	Initialize the pmap module.
478 *	Called by vm_init, to initialize any structures that the pmap
479 *	system needs to map virtual memory.
480 *	pmap_init has been enhanced to support in a fairly consistant
481 *	way, discontiguous physical memory.
482 */
483void
484pmap_init(phys_start, phys_end)
485	vm_offset_t phys_start, phys_end;
486{
487	int i;
488	int initial_pvs;
489
490	/*
491	 * object for kernel page table pages
492	 */
493	kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE);
494
495	/*
496	 * Allocate memory for random pmap data structures.  Includes the
497	 * pv_head_table.
498	 */
499
500	for(i = 0; i < vm_page_array_size; i++) {
501		vm_page_t m;
502
503		m = &vm_page_array[i];
504		TAILQ_INIT(&m->md.pv_list);
505		m->md.pv_list_count = 0;
506	}
507
508	/*
509	 * init the pv free list
510	 */
511	initial_pvs = vm_page_array_size;
512	if (initial_pvs < MINPV)
513		initial_pvs = MINPV;
514	pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL,
515	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
516	uma_zone_set_allocf(pvzone, pmap_allocf);
517	uma_prealloc(pvzone, initial_pvs);
518
519	/*
520	 * Now it is safe to enable pv_table recording.
521	 */
522	pmap_initialized = TRUE;
523}
524
525/*
526 * Initialize the address space (zone) for the pv_entries.  Set a
527 * high water mark so that the system can recover from excessive
528 * numbers of pv entries.
529 */
530void
531pmap_init2()
532{
533	int shpgperproc = PMAP_SHPGPERPROC;
534
535	TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
536	pv_entry_max = shpgperproc * maxproc + vm_page_array_size;
537	TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
538	pv_entry_high_water = 9 * (pv_entry_max / 10);
539	uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
540}
541
542
543/***************************************************
544 * Low level helper routines.....
545 ***************************************************/
546
547#if defined(PMAP_DIAGNOSTIC)
548
549/*
550 * This code checks for non-writeable/modified pages.
551 * This should be an invalid condition.
552 */
553static int
554pmap_nw_modified(pt_entry_t ptea)
555{
556	int pte;
557
558	pte = (int) ptea;
559
560	if ((pte & (PG_M|PG_RW)) == PG_M)
561		return 1;
562	else
563		return 0;
564}
565#endif
566
567
568/*
569 * this routine defines the region(s) of memory that should
570 * not be tested for the modified bit.
571 */
572static PMAP_INLINE int
573pmap_track_modified(vm_offset_t va)
574{
575	if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
576		return 1;
577	else
578		return 0;
579}
580
581#ifdef I386_CPU
582/*
583 * i386 only has "invalidate everything" and no SMP to worry about.
584 */
585PMAP_INLINE void
586pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
587{
588
589	if (pmap == kernel_pmap || pmap->pm_active)
590		invltlb();
591}
592
593PMAP_INLINE void
594pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
595{
596
597	if (pmap == kernel_pmap || pmap->pm_active)
598		invltlb();
599}
600
601PMAP_INLINE void
602pmap_invalidate_all(pmap_t pmap)
603{
604
605	if (pmap == kernel_pmap || pmap->pm_active)
606		invltlb();
607}
608#else /* !I386_CPU */
609#ifdef SMP
610/*
611 * For SMP, these functions have to use the IPI mechanism for coherence.
612 */
613void
614pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
615{
616	u_int cpumask;
617	u_int other_cpus;
618
619	critical_enter();
620	/*
621	 * We need to disable interrupt preemption but MUST NOT have
622	 * interrupts disabled here.
623	 * XXX we may need to hold schedlock to get a coherent pm_active
624	 */
625	if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) {
626		invlpg(va);
627		smp_invlpg(va);
628	} else {
629		cpumask = PCPU_GET(cpumask);
630		other_cpus = PCPU_GET(other_cpus);
631		if (pmap->pm_active & cpumask)
632			invlpg(va);
633		if (pmap->pm_active & other_cpus)
634			smp_masked_invlpg(pmap->pm_active & other_cpus, va);
635	}
636	critical_exit();
637}
638
639void
640pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
641{
642	u_int cpumask;
643	u_int other_cpus;
644	vm_offset_t addr;
645
646	critical_enter();
647	/*
648	 * We need to disable interrupt preemption but MUST NOT have
649	 * interrupts disabled here.
650	 * XXX we may need to hold schedlock to get a coherent pm_active
651	 */
652	if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) {
653		for (addr = sva; addr < eva; addr += PAGE_SIZE)
654			invlpg(addr);
655		smp_invlpg_range(sva, eva);
656	} else {
657		cpumask = PCPU_GET(cpumask);
658		other_cpus = PCPU_GET(other_cpus);
659		if (pmap->pm_active & cpumask)
660			for (addr = sva; addr < eva; addr += PAGE_SIZE)
661				invlpg(addr);
662		if (pmap->pm_active & other_cpus)
663			smp_masked_invlpg_range(pmap->pm_active & other_cpus,
664			    sva, eva);
665	}
666	critical_exit();
667}
668
669void
670pmap_invalidate_all(pmap_t pmap)
671{
672	u_int cpumask;
673	u_int other_cpus;
674
675#ifdef SWTCH_OPTIM_STATS
676	tlb_flush_count++;
677#endif
678	critical_enter();
679	/*
680	 * We need to disable interrupt preemption but MUST NOT have
681	 * interrupts disabled here.
682	 * XXX we may need to hold schedlock to get a coherent pm_active
683	 */
684	if (pmap->pm_active == -1 || pmap->pm_active == all_cpus) {
685		invltlb();
686		smp_invltlb();
687	} else {
688		cpumask = PCPU_GET(cpumask);
689		other_cpus = PCPU_GET(other_cpus);
690		if (pmap->pm_active & cpumask)
691			invltlb();
692		if (pmap->pm_active & other_cpus)
693			smp_masked_invltlb(pmap->pm_active & other_cpus);
694	}
695	critical_exit();
696}
697#else /* !SMP */
698/*
699 * Normal, non-SMP, 486+ invalidation functions.
700 * We inline these within pmap.c for speed.
701 */
702PMAP_INLINE void
703pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
704{
705
706	if (pmap == kernel_pmap || pmap->pm_active)
707		invlpg(va);
708}
709
710PMAP_INLINE void
711pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
712{
713	vm_offset_t addr;
714
715	if (pmap == kernel_pmap || pmap->pm_active)
716		for (addr = sva; addr < eva; addr += PAGE_SIZE)
717			invlpg(addr);
718}
719
720PMAP_INLINE void
721pmap_invalidate_all(pmap_t pmap)
722{
723
724	if (pmap == kernel_pmap || pmap->pm_active)
725		invltlb();
726}
727#endif /* !SMP */
728#endif /* !I386_CPU */
729
730/*
731 * Return an address which is the base of the Virtual mapping of
732 * all the PTEs for the given pmap. Note this doesn't say that
733 * all the PTEs will be present or that the pages there are valid.
734 * The PTEs are made available by the recursive mapping trick.
735 * It will map in the alternate PTE space if needed.
736 */
737static pt_entry_t *
738get_ptbase(pmap)
739	pmap_t pmap;
740{
741	pd_entry_t frame;
742
743	/* are we current address space or kernel? */
744	if (pmap == kernel_pmap)
745		return PTmap;
746	frame = pmap->pm_pdir[PTDPTDI] & PG_FRAME;
747	if (frame == (PTDpde & PG_FRAME))
748		return PTmap;
749	/* otherwise, we are alternate address space */
750	if (frame != (APTDpde & PG_FRAME)) {
751		APTDpde = (pd_entry_t) (frame | PG_RW | PG_V);
752		pmap_invalidate_all(kernel_pmap);	/* XXX Bandaid */
753	}
754	return APTmap;
755}
756
757/*
758 * Super fast pmap_pte routine best used when scanning
759 * the pv lists.  This eliminates many coarse-grained
760 * invltlb calls.  Note that many of the pv list
761 * scans are across different pmaps.  It is very wasteful
762 * to do an entire invltlb for checking a single mapping.
763 */
764
765static pt_entry_t *
766pmap_pte_quick(pmap, va)
767	register pmap_t pmap;
768	vm_offset_t va;
769{
770	pd_entry_t pde, newpf;
771	pde = pmap->pm_pdir[va >> PDRSHIFT];
772	if (pde != 0) {
773		pd_entry_t frame = pmap->pm_pdir[PTDPTDI] & PG_FRAME;
774		unsigned index = i386_btop(va);
775		/* are we current address space or kernel? */
776		if (pmap == kernel_pmap || frame == (PTDpde & PG_FRAME))
777			return PTmap + index;
778		newpf = pde & PG_FRAME;
779		if (((*PMAP1) & PG_FRAME) != newpf) {
780			*PMAP1 = newpf | PG_RW | PG_V;
781			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR1);
782		}
783		return PADDR1 + (index & (NPTEPG - 1));
784	}
785	return (0);
786}
787
788/*
789 *	Routine:	pmap_extract
790 *	Function:
791 *		Extract the physical page address associated
792 *		with the given map/virtual_address pair.
793 */
794vm_offset_t
795pmap_extract(pmap, va)
796	register pmap_t pmap;
797	vm_offset_t va;
798{
799	vm_offset_t rtval;	/* XXX FIXME */
800	vm_offset_t pdirindex;
801
802	if (pmap == 0)
803		return 0;
804	pdirindex = va >> PDRSHIFT;
805	rtval = pmap->pm_pdir[pdirindex];
806	if (rtval != 0) {
807		pt_entry_t *pte;
808		if ((rtval & PG_PS) != 0) {
809			rtval &= ~(NBPDR - 1);
810			rtval |= va & (NBPDR - 1);
811			return rtval;
812		}
813		pte = get_ptbase(pmap) + i386_btop(va);
814		rtval = ((*pte & PG_FRAME) | (va & PAGE_MASK));
815		return rtval;
816	}
817	return 0;
818
819}
820
821/***************************************************
822 * Low level mapping routines.....
823 ***************************************************/
824
825/*
826 * Add a wired page to the kva.
827 * Note: not SMP coherent.
828 */
829PMAP_INLINE void
830pmap_kenter(vm_offset_t va, vm_offset_t pa)
831{
832	pt_entry_t *pte;
833
834	pte = vtopte(va);
835	*pte = pa | PG_RW | PG_V | pgeflag;
836}
837
838/*
839 * Remove a page from the kernel pagetables.
840 * Note: not SMP coherent.
841 */
842PMAP_INLINE void
843pmap_kremove(vm_offset_t va)
844{
845	pt_entry_t *pte;
846
847	pte = vtopte(va);
848	*pte = 0;
849}
850
851/*
852 *	Used to map a range of physical addresses into kernel
853 *	virtual address space.
854 *
855 *	The value passed in '*virt' is a suggested virtual address for
856 *	the mapping. Architectures which can support a direct-mapped
857 *	physical to virtual region can return the appropriate address
858 *	within that region, leaving '*virt' unchanged. Other
859 *	architectures should map the pages starting at '*virt' and
860 *	update '*virt' with the first usable address after the mapped
861 *	region.
862 */
863vm_offset_t
864pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
865{
866	vm_offset_t va, sva;
867
868	va = sva = *virt;
869	while (start < end) {
870		pmap_kenter(va, start);
871		va += PAGE_SIZE;
872		start += PAGE_SIZE;
873	}
874	pmap_invalidate_range(kernel_pmap, sva, va);
875	*virt = va;
876	return (sva);
877}
878
879
880/*
881 * Add a list of wired pages to the kva
882 * this routine is only used for temporary
883 * kernel mappings that do not need to have
884 * page modification or references recorded.
885 * Note that old mappings are simply written
886 * over.  The page *must* be wired.
887 * Note: SMP coherent.  Uses a ranged shootdown IPI.
888 */
889void
890pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
891{
892	vm_offset_t va;
893
894	va = sva;
895	while (count-- > 0) {
896		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
897		va += PAGE_SIZE;
898		m++;
899	}
900	pmap_invalidate_range(kernel_pmap, sva, va);
901}
902
903/*
904 * This routine tears out page mappings from the
905 * kernel -- it is meant only for temporary mappings.
906 * Note: SMP coherent.  Uses a ranged shootdown IPI.
907 */
908void
909pmap_qremove(vm_offset_t sva, int count)
910{
911	vm_offset_t va;
912
913	va = sva;
914	while (count-- > 0) {
915		pmap_kremove(va);
916		va += PAGE_SIZE;
917	}
918	pmap_invalidate_range(kernel_pmap, sva, va);
919}
920
921static vm_page_t
922pmap_page_lookup(vm_object_t object, vm_pindex_t pindex)
923{
924	vm_page_t m;
925
926retry:
927	m = vm_page_lookup(object, pindex);
928	if (m && vm_page_sleep_busy(m, FALSE, "pplookp"))
929		goto retry;
930	return m;
931}
932
933/*
934 * Create the kernel stack (including pcb for i386) for a new thread.
935 * This routine directly affects the fork perf for a process and
936 * create performance for a thread.
937 */
938void
939pmap_new_thread(struct thread *td)
940{
941	int i;
942	vm_page_t ma[KSTACK_PAGES];
943	vm_object_t ksobj;
944	vm_page_t m;
945	vm_offset_t ks;
946
947	/*
948	 * allocate object for the kstack
949	 */
950	ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES);
951	td->td_kstack_obj = ksobj;
952
953	/* get a kernel virtual address for the kstack for this thread */
954#ifdef KSTACK_GUARD
955	ks = kmem_alloc_nofault(kernel_map, (KSTACK_PAGES + 1) * PAGE_SIZE);
956	if (ks == 0)
957		panic("pmap_new_thread: kstack allocation failed");
958	if (*vtopte(ks) != 0)
959		pmap_qremove(ks, 1);
960	ks += PAGE_SIZE;
961	td->td_kstack = ks;
962#else
963	/* get a kernel virtual address for the kstack for this thread */
964	ks = kmem_alloc_nofault(kernel_map, KSTACK_PAGES * PAGE_SIZE);
965	if (ks == 0)
966		panic("pmap_new_thread: kstack allocation failed");
967	td->td_kstack = ks;
968#endif
969	/*
970	 * For the length of the stack, link in a real page of ram for each
971	 * page of stack.
972	 */
973	for (i = 0; i < KSTACK_PAGES; i++) {
974		/*
975		 * Get a kernel stack page
976		 */
977		m = vm_page_grab(ksobj, i,
978		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
979		ma[i] = m;
980
981		vm_page_wakeup(m);
982		vm_page_flag_clear(m, PG_ZERO);
983		m->valid = VM_PAGE_BITS_ALL;
984	}
985	pmap_qenter(ks, ma, KSTACK_PAGES);
986}
987
988/*
989 * Dispose the kernel stack for a thread that has exited.
990 * This routine directly impacts the exit perf of a process and thread.
991 */
992void
993pmap_dispose_thread(td)
994	struct thread *td;
995{
996	int i;
997	vm_object_t ksobj;
998	vm_offset_t ks;
999	vm_page_t m;
1000
1001	ksobj = td->td_kstack_obj;
1002	ks = td->td_kstack;
1003	pmap_qremove(ks, KSTACK_PAGES);
1004	for (i = 0; i < KSTACK_PAGES; i++) {
1005		m = vm_page_lookup(ksobj, i);
1006		if (m == NULL)
1007			panic("pmap_dispose_thread: kstack already missing?");
1008		vm_page_lock_queues();
1009		vm_page_busy(m);
1010		vm_page_unwire(m, 0);
1011		vm_page_free(m);
1012		vm_page_unlock_queues();
1013	}
1014	/*
1015	 * Free the space that this stack was mapped to in the kernel
1016	 * address map.
1017	 */
1018#ifdef KSTACK_GUARD
1019	kmem_free(kernel_map, ks - PAGE_SIZE, (KSTACK_PAGES + 1) * PAGE_SIZE);
1020#else
1021	kmem_free(kernel_map, ks, KSTACK_PAGES * PAGE_SIZE);
1022#endif
1023	vm_object_deallocate(ksobj);
1024}
1025
1026/*
1027 * Allow the Kernel stack for a thread to be prejudicially paged out.
1028 */
1029void
1030pmap_swapout_thread(td)
1031	struct thread *td;
1032{
1033	int i;
1034	vm_object_t ksobj;
1035	vm_offset_t ks;
1036	vm_page_t m;
1037
1038	ksobj = td->td_kstack_obj;
1039	ks = td->td_kstack;
1040	pmap_qremove(ks, KSTACK_PAGES);
1041	for (i = 0; i < KSTACK_PAGES; i++) {
1042		m = vm_page_lookup(ksobj, i);
1043		if (m == NULL)
1044			panic("pmap_swapout_thread: kstack already missing?");
1045		vm_page_lock_queues();
1046		vm_page_dirty(m);
1047		vm_page_unwire(m, 0);
1048		vm_page_unlock_queues();
1049	}
1050}
1051
1052/*
1053 * Bring the kernel stack for a specified thread back in.
1054 */
1055void
1056pmap_swapin_thread(td)
1057	struct thread *td;
1058{
1059	int i, rv;
1060	vm_page_t ma[KSTACK_PAGES];
1061	vm_object_t ksobj;
1062	vm_offset_t ks;
1063	vm_page_t m;
1064
1065	ksobj = td->td_kstack_obj;
1066	ks = td->td_kstack;
1067	for (i = 0; i < KSTACK_PAGES; i++) {
1068		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1069		if (m->valid != VM_PAGE_BITS_ALL) {
1070			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
1071			if (rv != VM_PAGER_OK)
1072				panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid);
1073			m = vm_page_lookup(ksobj, i);
1074			m->valid = VM_PAGE_BITS_ALL;
1075		}
1076		ma[i] = m;
1077		vm_page_lock_queues();
1078		vm_page_wire(m);
1079		vm_page_wakeup(m);
1080		vm_page_unlock_queues();
1081	}
1082	pmap_qenter(ks, ma, KSTACK_PAGES);
1083}
1084
1085/***************************************************
1086 * Page table page management routines.....
1087 ***************************************************/
1088
1089/*
1090 * This routine unholds page table pages, and if the hold count
1091 * drops to zero, then it decrements the wire count.
1092 */
1093static int
1094_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
1095{
1096
1097	while (vm_page_sleep_busy(m, FALSE, "pmuwpt"))
1098		;
1099
1100	if (m->hold_count == 0) {
1101		vm_offset_t pteva;
1102		/*
1103		 * unmap the page table page
1104		 */
1105		pmap->pm_pdir[m->pindex] = 0;
1106		--pmap->pm_stats.resident_count;
1107		if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) ==
1108		    (PTDpde & PG_FRAME)) {
1109			/*
1110			 * Do a invltlb to make the invalidated mapping
1111			 * take effect immediately.
1112			 */
1113			pteva = VM_MAXUSER_ADDRESS + i386_ptob(m->pindex);
1114			pmap_invalidate_page(pmap, pteva);
1115		}
1116
1117		if (pmap->pm_ptphint == m)
1118			pmap->pm_ptphint = NULL;
1119
1120		/*
1121		 * If the page is finally unwired, simply free it.
1122		 */
1123		--m->wire_count;
1124		if (m->wire_count == 0) {
1125
1126			vm_page_flash(m);
1127			vm_page_busy(m);
1128			vm_page_free_zero(m);
1129			--cnt.v_wire_count;
1130		}
1131		return 1;
1132	}
1133	return 0;
1134}
1135
1136static PMAP_INLINE int
1137pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
1138{
1139	vm_page_unhold(m);
1140	if (m->hold_count == 0)
1141		return _pmap_unwire_pte_hold(pmap, m);
1142	else
1143		return 0;
1144}
1145
1146/*
1147 * After removing a page table entry, this routine is used to
1148 * conditionally free the page, and manage the hold/wire counts.
1149 */
1150static int
1151pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte)
1152{
1153	unsigned ptepindex;
1154	if (va >= VM_MAXUSER_ADDRESS)
1155		return 0;
1156
1157	if (mpte == NULL) {
1158		ptepindex = (va >> PDRSHIFT);
1159		if (pmap->pm_ptphint &&
1160			(pmap->pm_ptphint->pindex == ptepindex)) {
1161			mpte = pmap->pm_ptphint;
1162		} else {
1163			mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
1164			pmap->pm_ptphint = mpte;
1165		}
1166	}
1167
1168	return pmap_unwire_pte_hold(pmap, mpte);
1169}
1170
1171void
1172pmap_pinit0(pmap)
1173	struct pmap *pmap;
1174{
1175	pmap->pm_pdir =
1176		(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
1177	pmap_kenter((vm_offset_t)pmap->pm_pdir, (vm_offset_t)IdlePTD);
1178#ifndef I386_CPU
1179	invlpg((vm_offset_t)pmap->pm_pdir);
1180#else
1181	invltlb();
1182#endif
1183	pmap->pm_ptphint = NULL;
1184	pmap->pm_active = 0;
1185	TAILQ_INIT(&pmap->pm_pvlist);
1186	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1187	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1188}
1189
1190/*
1191 * Initialize a preallocated and zeroed pmap structure,
1192 * such as one in a vmspace structure.
1193 */
1194void
1195pmap_pinit(pmap)
1196	register struct pmap *pmap;
1197{
1198	vm_page_t ptdpg;
1199
1200	/*
1201	 * No need to allocate page table space yet but we do need a valid
1202	 * page directory table.
1203	 */
1204	if (pmap->pm_pdir == NULL)
1205		pmap->pm_pdir =
1206			(pd_entry_t *)kmem_alloc_pageable(kernel_map, PAGE_SIZE);
1207
1208	/*
1209	 * allocate object for the ptes
1210	 */
1211	if (pmap->pm_pteobj == NULL)
1212		pmap->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1);
1213
1214	/*
1215	 * allocate the page directory page
1216	 */
1217	ptdpg = vm_page_grab(pmap->pm_pteobj, PTDPTDI,
1218			VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
1219	vm_page_flag_clear(ptdpg, PG_MAPPED | PG_BUSY); /* not usually mapped*/
1220	ptdpg->valid = VM_PAGE_BITS_ALL;
1221
1222	pmap_qenter((vm_offset_t) pmap->pm_pdir, &ptdpg, 1);
1223	if ((ptdpg->flags & PG_ZERO) == 0)
1224		bzero(pmap->pm_pdir, PAGE_SIZE);
1225
1226	LIST_INSERT_HEAD(&allpmaps, pmap, pm_list);
1227	/* Wire in kernel global address entries. */
1228	/* XXX copies current process, does not fill in MPPTDI */
1229	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
1230#ifdef SMP
1231	pmap->pm_pdir[MPPTDI] = PTD[MPPTDI];
1232#endif
1233
1234	/* install self-referential address mapping entry */
1235	pmap->pm_pdir[PTDPTDI] =
1236		VM_PAGE_TO_PHYS(ptdpg) | PG_V | PG_RW | PG_A | PG_M;
1237
1238	pmap->pm_active = 0;
1239	pmap->pm_ptphint = NULL;
1240	TAILQ_INIT(&pmap->pm_pvlist);
1241	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
1242}
1243
1244/*
1245 * Wire in kernel global address entries.  To avoid a race condition
1246 * between pmap initialization and pmap_growkernel, this procedure
1247 * should be called after the vmspace is attached to the process
1248 * but before this pmap is activated.
1249 */
1250void
1251pmap_pinit2(pmap)
1252	struct pmap *pmap;
1253{
1254	/* XXX: Remove this stub when no longer called */
1255}
1256
1257static int
1258pmap_release_free_page(pmap_t pmap, vm_page_t p)
1259{
1260	pd_entry_t *pde = pmap->pm_pdir;
1261	/*
1262	 * This code optimizes the case of freeing non-busy
1263	 * page-table pages.  Those pages are zero now, and
1264	 * might as well be placed directly into the zero queue.
1265	 */
1266	if (vm_page_sleep_busy(p, FALSE, "pmaprl"))
1267		return 0;
1268
1269	vm_page_lock_queues();
1270	vm_page_busy(p);
1271
1272	/*
1273	 * Remove the page table page from the processes address space.
1274	 */
1275	pde[p->pindex] = 0;
1276	pmap->pm_stats.resident_count--;
1277
1278	if (p->hold_count)  {
1279		panic("pmap_release: freeing held page table page");
1280	}
1281	/*
1282	 * Page directory pages need to have the kernel
1283	 * stuff cleared, so they can go into the zero queue also.
1284	 */
1285	if (p->pindex == PTDPTDI) {
1286		bzero(pde + KPTDI, nkpt * PTESIZE);
1287#ifdef SMP
1288		pde[MPPTDI] = 0;
1289#endif
1290		pde[APTDPTDI] = 0;
1291		pmap_kremove((vm_offset_t) pmap->pm_pdir);
1292	}
1293
1294	if (pmap->pm_ptphint && (pmap->pm_ptphint->pindex == p->pindex))
1295		pmap->pm_ptphint = NULL;
1296
1297	p->wire_count--;
1298	cnt.v_wire_count--;
1299	vm_page_free_zero(p);
1300	vm_page_unlock_queues();
1301	return 1;
1302}
1303
1304/*
1305 * this routine is called if the page table page is not
1306 * mapped correctly.
1307 */
1308static vm_page_t
1309_pmap_allocpte(pmap, ptepindex)
1310	pmap_t	pmap;
1311	unsigned ptepindex;
1312{
1313	vm_offset_t pteva, ptepa;	/* XXXPA */
1314	vm_page_t m;
1315
1316	/*
1317	 * Find or fabricate a new pagetable page
1318	 */
1319	m = vm_page_grab(pmap->pm_pteobj, ptepindex,
1320			VM_ALLOC_ZERO | VM_ALLOC_RETRY);
1321
1322	KASSERT(m->queue == PQ_NONE,
1323		("_pmap_allocpte: %p->queue != PQ_NONE", m));
1324
1325	if (m->wire_count == 0)
1326		cnt.v_wire_count++;
1327	m->wire_count++;
1328
1329	/*
1330	 * Increment the hold count for the page table page
1331	 * (denoting a new mapping.)
1332	 */
1333	m->hold_count++;
1334
1335	/*
1336	 * Map the pagetable page into the process address space, if
1337	 * it isn't already there.
1338	 */
1339
1340	pmap->pm_stats.resident_count++;
1341
1342	ptepa = VM_PAGE_TO_PHYS(m);
1343	pmap->pm_pdir[ptepindex] =
1344		(pd_entry_t) (ptepa | PG_U | PG_RW | PG_V | PG_A | PG_M);
1345
1346	/*
1347	 * Set the page table hint
1348	 */
1349	pmap->pm_ptphint = m;
1350
1351	/*
1352	 * Try to use the new mapping, but if we cannot, then
1353	 * do it with the routine that maps the page explicitly.
1354	 */
1355	if ((m->flags & PG_ZERO) == 0) {
1356		if ((pmap->pm_pdir[PTDPTDI] & PG_FRAME) ==
1357		    (PTDpde & PG_FRAME)) {
1358			pteva = VM_MAXUSER_ADDRESS + i386_ptob(ptepindex);
1359			bzero((caddr_t) pteva, PAGE_SIZE);
1360		} else {
1361			pmap_zero_page(m);
1362		}
1363	}
1364
1365	m->valid = VM_PAGE_BITS_ALL;
1366	vm_page_flag_clear(m, PG_ZERO);
1367	vm_page_flag_set(m, PG_MAPPED);
1368	vm_page_wakeup(m);
1369
1370	return m;
1371}
1372
1373static vm_page_t
1374pmap_allocpte(pmap_t pmap, vm_offset_t va)
1375{
1376	unsigned ptepindex;
1377	pd_entry_t ptepa;
1378	vm_page_t m;
1379
1380	/*
1381	 * Calculate pagetable page index
1382	 */
1383	ptepindex = va >> PDRSHIFT;
1384
1385	/*
1386	 * Get the page directory entry
1387	 */
1388	ptepa = (vm_offset_t) pmap->pm_pdir[ptepindex];
1389
1390	/*
1391	 * This supports switching from a 4MB page to a
1392	 * normal 4K page.
1393	 */
1394	if (ptepa & PG_PS) {
1395		pmap->pm_pdir[ptepindex] = 0;
1396		ptepa = 0;
1397		pmap_invalidate_all(kernel_pmap);
1398	}
1399
1400	/*
1401	 * If the page table page is mapped, we just increment the
1402	 * hold count, and activate it.
1403	 */
1404	if (ptepa) {
1405		/*
1406		 * In order to get the page table page, try the
1407		 * hint first.
1408		 */
1409		if (pmap->pm_ptphint &&
1410			(pmap->pm_ptphint->pindex == ptepindex)) {
1411			m = pmap->pm_ptphint;
1412		} else {
1413			m = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
1414			pmap->pm_ptphint = m;
1415		}
1416		m->hold_count++;
1417		return m;
1418	}
1419	/*
1420	 * Here if the pte page isn't mapped, or if it has been deallocated.
1421	 */
1422	return _pmap_allocpte(pmap, ptepindex);
1423}
1424
1425
1426/***************************************************
1427* Pmap allocation/deallocation routines.
1428 ***************************************************/
1429
1430/*
1431 * Release any resources held by the given physical map.
1432 * Called when a pmap initialized by pmap_pinit is being released.
1433 * Should only be called if the map contains no valid mappings.
1434 */
1435void
1436pmap_release(pmap_t pmap)
1437{
1438	vm_page_t p,n,ptdpg;
1439	vm_object_t object = pmap->pm_pteobj;
1440	int curgeneration;
1441
1442#if defined(DIAGNOSTIC)
1443	if (object->ref_count != 1)
1444		panic("pmap_release: pteobj reference count != 1");
1445#endif
1446
1447	ptdpg = NULL;
1448	LIST_REMOVE(pmap, pm_list);
1449retry:
1450	curgeneration = object->generation;
1451	for (p = TAILQ_FIRST(&object->memq); p != NULL; p = n) {
1452		n = TAILQ_NEXT(p, listq);
1453		if (p->pindex == PTDPTDI) {
1454			ptdpg = p;
1455			continue;
1456		}
1457		while (1) {
1458			if (!pmap_release_free_page(pmap, p) &&
1459				(object->generation != curgeneration))
1460				goto retry;
1461		}
1462	}
1463
1464	if (ptdpg && !pmap_release_free_page(pmap, ptdpg))
1465		goto retry;
1466}
1467
1468static int
1469kvm_size(SYSCTL_HANDLER_ARGS)
1470{
1471	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - KERNBASE;
1472
1473	return sysctl_handle_long(oidp, &ksize, 0, req);
1474}
1475SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1476    0, 0, kvm_size, "IU", "Size of KVM");
1477
1478static int
1479kvm_free(SYSCTL_HANDLER_ARGS)
1480{
1481	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1482
1483	return sysctl_handle_long(oidp, &kfree, 0, req);
1484}
1485SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1486    0, 0, kvm_free, "IU", "Amount of KVM free");
1487
1488/*
1489 * grow the number of kernel page table entries, if needed
1490 */
1491void
1492pmap_growkernel(vm_offset_t addr)
1493{
1494	struct pmap *pmap;
1495	int s;
1496	vm_offset_t ptppaddr;
1497	vm_page_t nkpg;
1498	pd_entry_t newpdir;
1499
1500	s = splhigh();
1501	if (kernel_vm_end == 0) {
1502		kernel_vm_end = KERNBASE;
1503		nkpt = 0;
1504		while (pdir_pde(PTD, kernel_vm_end)) {
1505			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1506			nkpt++;
1507		}
1508	}
1509	addr = (addr + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1510	while (kernel_vm_end < addr) {
1511		if (pdir_pde(PTD, kernel_vm_end)) {
1512			kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1513			continue;
1514		}
1515
1516		/*
1517		 * This index is bogus, but out of the way
1518		 */
1519		nkpg = vm_page_alloc(kptobj, nkpt,
1520				     VM_ALLOC_SYSTEM | VM_ALLOC_WIRED);
1521		if (!nkpg)
1522			panic("pmap_growkernel: no memory to grow kernel");
1523
1524		nkpt++;
1525
1526		pmap_zero_page(nkpg);
1527		ptppaddr = VM_PAGE_TO_PHYS(nkpg);
1528		newpdir = (pd_entry_t) (ptppaddr | PG_V | PG_RW | PG_A | PG_M);
1529		pdir_pde(PTD, kernel_vm_end) = newpdir;
1530
1531		LIST_FOREACH(pmap, &allpmaps, pm_list) {
1532			*pmap_pde(pmap, kernel_vm_end) = newpdir;
1533		}
1534		kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NPTEPG) & ~(PAGE_SIZE * NPTEPG - 1);
1535	}
1536	splx(s);
1537}
1538
1539
1540/***************************************************
1541 * page management routines.
1542 ***************************************************/
1543
1544/*
1545 * free the pv_entry back to the free list
1546 */
1547static PMAP_INLINE void
1548free_pv_entry(pv_entry_t pv)
1549{
1550	pv_entry_count--;
1551	uma_zfree(pvzone, pv);
1552}
1553
1554/*
1555 * get a new pv_entry, allocating a block from the system
1556 * when needed.
1557 * the memory allocation is performed bypassing the malloc code
1558 * because of the possibility of allocations at interrupt time.
1559 */
1560static pv_entry_t
1561get_pv_entry(void)
1562{
1563	pv_entry_count++;
1564	if (pv_entry_high_water &&
1565		(pv_entry_count > pv_entry_high_water) &&
1566		(pmap_pagedaemon_waken == 0)) {
1567		pmap_pagedaemon_waken = 1;
1568		wakeup (&vm_pages_needed);
1569	}
1570	return uma_zalloc(pvzone, M_NOWAIT);
1571}
1572
1573/*
1574 * This routine is very drastic, but can save the system
1575 * in a pinch.
1576 */
1577void
1578pmap_collect()
1579{
1580	int i;
1581	vm_page_t m;
1582	static int warningdone = 0;
1583
1584	if (pmap_pagedaemon_waken == 0)
1585		return;
1586
1587	if (warningdone < 5) {
1588		printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1589		warningdone++;
1590	}
1591
1592	for(i = 0; i < vm_page_array_size; i++) {
1593		m = &vm_page_array[i];
1594		if (m->wire_count || m->hold_count || m->busy ||
1595		    (m->flags & (PG_BUSY | PG_UNMANAGED)))
1596			continue;
1597		pmap_remove_all(m);
1598	}
1599	pmap_pagedaemon_waken = 0;
1600}
1601
1602
1603/*
1604 * If it is the first entry on the list, it is actually
1605 * in the header and we must copy the following entry up
1606 * to the header.  Otherwise we must search the list for
1607 * the entry.  In either case we free the now unused entry.
1608 */
1609
1610static int
1611pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va)
1612{
1613	pv_entry_t pv;
1614	int rtval;
1615	int s;
1616
1617	s = splvm();
1618	if (m->md.pv_list_count < pmap->pm_stats.resident_count) {
1619		TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
1620			if (pmap == pv->pv_pmap && va == pv->pv_va)
1621				break;
1622		}
1623	} else {
1624		TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) {
1625			if (va == pv->pv_va)
1626				break;
1627		}
1628	}
1629
1630	rtval = 0;
1631	if (pv) {
1632		rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem);
1633		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1634		m->md.pv_list_count--;
1635		if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1636			vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1637
1638		TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1639		free_pv_entry(pv);
1640	}
1641
1642	splx(s);
1643	return rtval;
1644}
1645
1646/*
1647 * Create a pv entry for page at pa for
1648 * (pmap, va).
1649 */
1650static void
1651pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m)
1652{
1653
1654	int s;
1655	pv_entry_t pv;
1656
1657	s = splvm();
1658	pv = get_pv_entry();
1659	pv->pv_va = va;
1660	pv->pv_pmap = pmap;
1661	pv->pv_ptem = mpte;
1662
1663	TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
1664	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
1665	m->md.pv_list_count++;
1666
1667	splx(s);
1668}
1669
1670/*
1671 * pmap_remove_pte: do the things to unmap a page in a process
1672 */
1673static int
1674pmap_remove_pte(pmap_t pmap, pt_entry_t *ptq, vm_offset_t va)
1675{
1676	pt_entry_t oldpte;
1677	vm_page_t m;
1678
1679	oldpte = atomic_readandclear_int(ptq);
1680	if (oldpte & PG_W)
1681		pmap->pm_stats.wired_count -= 1;
1682	/*
1683	 * Machines that don't support invlpg, also don't support
1684	 * PG_G.
1685	 */
1686	if (oldpte & PG_G)
1687		pmap_invalidate_page(kernel_pmap, va);
1688	pmap->pm_stats.resident_count -= 1;
1689	if (oldpte & PG_MANAGED) {
1690		m = PHYS_TO_VM_PAGE(oldpte);
1691		if (oldpte & PG_M) {
1692#if defined(PMAP_DIAGNOSTIC)
1693			if (pmap_nw_modified((pt_entry_t) oldpte)) {
1694				printf(
1695	"pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
1696				    va, oldpte);
1697			}
1698#endif
1699			if (pmap_track_modified(va))
1700				vm_page_dirty(m);
1701		}
1702		if (oldpte & PG_A)
1703			vm_page_flag_set(m, PG_REFERENCED);
1704		return pmap_remove_entry(pmap, m, va);
1705	} else {
1706		return pmap_unuse_pt(pmap, va, NULL);
1707	}
1708
1709	return 0;
1710}
1711
1712/*
1713 * Remove a single page from a process address space
1714 */
1715static void
1716pmap_remove_page(pmap_t pmap, vm_offset_t va)
1717{
1718	register pt_entry_t *ptq;
1719
1720	/*
1721	 * if there is no pte for this address, just skip it!!!
1722	 */
1723	if (*pmap_pde(pmap, va) == 0) {
1724		return;
1725	}
1726
1727	/*
1728	 * get a local va for mappings for this pmap.
1729	 */
1730	ptq = get_ptbase(pmap) + i386_btop(va);
1731	if (*ptq) {
1732		(void) pmap_remove_pte(pmap, ptq, va);
1733		pmap_invalidate_page(pmap, va);
1734	}
1735	return;
1736}
1737
1738/*
1739 *	Remove the given range of addresses from the specified map.
1740 *
1741 *	It is assumed that the start and end are properly
1742 *	rounded to the page size.
1743 */
1744void
1745pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1746{
1747	register pt_entry_t *ptbase;
1748	vm_offset_t pdnxt;
1749	pd_entry_t ptpaddr;
1750	vm_offset_t sindex, eindex;
1751	int anyvalid;
1752
1753	if (pmap == NULL)
1754		return;
1755
1756	if (pmap->pm_stats.resident_count == 0)
1757		return;
1758
1759	/*
1760	 * special handling of removing one page.  a very
1761	 * common operation and easy to short circuit some
1762	 * code.
1763	 */
1764	if ((sva + PAGE_SIZE == eva) &&
1765	    ((pmap->pm_pdir[(sva >> PDRSHIFT)] & PG_PS) == 0)) {
1766		pmap_remove_page(pmap, sva);
1767		return;
1768	}
1769
1770	anyvalid = 0;
1771
1772	/*
1773	 * Get a local virtual address for the mappings that are being
1774	 * worked with.
1775	 */
1776	ptbase = get_ptbase(pmap);
1777
1778	sindex = i386_btop(sva);
1779	eindex = i386_btop(eva);
1780
1781	for (; sindex < eindex; sindex = pdnxt) {
1782		unsigned pdirindex;
1783
1784		/*
1785		 * Calculate index for next page table.
1786		 */
1787		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1788		if (pmap->pm_stats.resident_count == 0)
1789			break;
1790
1791		pdirindex = sindex / NPDEPG;
1792		ptpaddr = pmap->pm_pdir[pdirindex];
1793		if ((ptpaddr & PG_PS) != 0) {
1794			pmap->pm_pdir[pdirindex] = 0;
1795			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1796			anyvalid++;
1797			continue;
1798		}
1799
1800		/*
1801		 * Weed out invalid mappings. Note: we assume that the page
1802		 * directory table is always allocated, and in kernel virtual.
1803		 */
1804		if (ptpaddr == 0)
1805			continue;
1806
1807		/*
1808		 * Limit our scan to either the end of the va represented
1809		 * by the current page table page, or to the end of the
1810		 * range being removed.
1811		 */
1812		if (pdnxt > eindex) {
1813			pdnxt = eindex;
1814		}
1815
1816		for (; sindex != pdnxt; sindex++) {
1817			vm_offset_t va;
1818			if (ptbase[sindex] == 0) {
1819				continue;
1820			}
1821			va = i386_ptob(sindex);
1822
1823			anyvalid++;
1824			if (pmap_remove_pte(pmap,
1825				ptbase + sindex, va))
1826				break;
1827		}
1828	}
1829
1830	if (anyvalid)
1831		pmap_invalidate_all(pmap);
1832}
1833
1834/*
1835 *	Routine:	pmap_remove_all
1836 *	Function:
1837 *		Removes this physical page from
1838 *		all physical maps in which it resides.
1839 *		Reflects back modify bits to the pager.
1840 *
1841 *	Notes:
1842 *		Original versions of this routine were very
1843 *		inefficient because they iteratively called
1844 *		pmap_remove (slow...)
1845 */
1846
1847static void
1848pmap_remove_all(vm_page_t m)
1849{
1850	register pv_entry_t pv;
1851	pt_entry_t *pte, tpte;
1852	int s;
1853
1854#if defined(PMAP_DIAGNOSTIC)
1855	/*
1856	 * XXX this makes pmap_page_protect(NONE) illegal for non-managed
1857	 * pages!
1858	 */
1859	if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
1860		panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m));
1861	}
1862#endif
1863
1864	s = splvm();
1865	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1866		pv->pv_pmap->pm_stats.resident_count--;
1867
1868		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
1869
1870		tpte = atomic_readandclear_int(pte);
1871		if (tpte & PG_W)
1872			pv->pv_pmap->pm_stats.wired_count--;
1873
1874		if (tpte & PG_A)
1875			vm_page_flag_set(m, PG_REFERENCED);
1876
1877		/*
1878		 * Update the vm_page_t clean and reference bits.
1879		 */
1880		if (tpte & PG_M) {
1881#if defined(PMAP_DIAGNOSTIC)
1882			if (pmap_nw_modified((pt_entry_t) tpte)) {
1883				printf(
1884	"pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
1885				    pv->pv_va, tpte);
1886			}
1887#endif
1888			if (pmap_track_modified(pv->pv_va))
1889				vm_page_dirty(m);
1890		}
1891		pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1892
1893		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
1894		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1895		m->md.pv_list_count--;
1896		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1897		free_pv_entry(pv);
1898	}
1899
1900	vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1901
1902	splx(s);
1903}
1904
1905/*
1906 *	Set the physical protection on the
1907 *	specified range of this map as requested.
1908 */
1909void
1910pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1911{
1912	register pt_entry_t *ptbase;
1913	vm_offset_t pdnxt;
1914	pd_entry_t ptpaddr;
1915	vm_offset_t sindex, eindex;
1916	int anychanged;
1917
1918	if (pmap == NULL)
1919		return;
1920
1921	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1922		pmap_remove(pmap, sva, eva);
1923		return;
1924	}
1925
1926	if (prot & VM_PROT_WRITE)
1927		return;
1928
1929	anychanged = 0;
1930
1931	ptbase = get_ptbase(pmap);
1932
1933	sindex = i386_btop(sva);
1934	eindex = i386_btop(eva);
1935
1936	for (; sindex < eindex; sindex = pdnxt) {
1937
1938		unsigned pdirindex;
1939
1940		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1941
1942		pdirindex = sindex / NPDEPG;
1943		ptpaddr = pmap->pm_pdir[pdirindex];
1944		if ((ptpaddr & PG_PS) != 0) {
1945			pmap->pm_pdir[pdirindex] &= ~(PG_M|PG_RW);
1946			pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE;
1947			anychanged++;
1948			continue;
1949		}
1950
1951		/*
1952		 * Weed out invalid mappings. Note: we assume that the page
1953		 * directory table is always allocated, and in kernel virtual.
1954		 */
1955		if (ptpaddr == 0)
1956			continue;
1957
1958		if (pdnxt > eindex) {
1959			pdnxt = eindex;
1960		}
1961
1962		for (; sindex != pdnxt; sindex++) {
1963
1964			pt_entry_t pbits;
1965			vm_page_t m;
1966
1967			pbits = ptbase[sindex];
1968
1969			if (pbits & PG_MANAGED) {
1970				m = NULL;
1971				if (pbits & PG_A) {
1972					m = PHYS_TO_VM_PAGE(pbits);
1973					vm_page_flag_set(m, PG_REFERENCED);
1974					pbits &= ~PG_A;
1975				}
1976				if (pbits & PG_M) {
1977					if (pmap_track_modified(i386_ptob(sindex))) {
1978						if (m == NULL)
1979							m = PHYS_TO_VM_PAGE(pbits);
1980						vm_page_dirty(m);
1981						pbits &= ~PG_M;
1982					}
1983				}
1984			}
1985
1986			pbits &= ~PG_RW;
1987
1988			if (pbits != ptbase[sindex]) {
1989				ptbase[sindex] = pbits;
1990				anychanged = 1;
1991			}
1992		}
1993	}
1994	if (anychanged)
1995		pmap_invalidate_all(pmap);
1996}
1997
1998/*
1999 *	Insert the given physical page (p) at
2000 *	the specified virtual address (v) in the
2001 *	target physical map with the protection requested.
2002 *
2003 *	If specified, the page will be wired down, meaning
2004 *	that the related pte can not be reclaimed.
2005 *
2006 *	NB:  This is the only routine which MAY NOT lazy-evaluate
2007 *	or lose information.  That is, this routine must actually
2008 *	insert this page into the given map NOW.
2009 */
2010void
2011pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2012	   boolean_t wired)
2013{
2014	vm_offset_t pa;
2015	register pt_entry_t *pte;
2016	vm_offset_t opa;
2017	pt_entry_t origpte, newpte;
2018	vm_page_t mpte;
2019
2020	if (pmap == NULL)
2021		return;
2022
2023	va &= PG_FRAME;
2024#ifdef PMAP_DIAGNOSTIC
2025	if (va > VM_MAX_KERNEL_ADDRESS)
2026		panic("pmap_enter: toobig");
2027	if ((va >= UPT_MIN_ADDRESS) && (va < UPT_MAX_ADDRESS))
2028		panic("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va);
2029#endif
2030
2031	mpte = NULL;
2032	/*
2033	 * In the case that a page table page is not
2034	 * resident, we are creating it here.
2035	 */
2036	if (va < VM_MAXUSER_ADDRESS) {
2037		mpte = pmap_allocpte(pmap, va);
2038	}
2039#if 0 && defined(PMAP_DIAGNOSTIC)
2040	else {
2041		pd_entry_t *pdeaddr = pmap_pde(pmap, va);
2042		origpte = *pdeaddr;
2043		if ((origpte & PG_V) == 0) {
2044			panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n",
2045				pmap->pm_pdir[PTDPTDI], origpte, va);
2046		}
2047	}
2048#endif
2049
2050	pte = pmap_pte(pmap, va);
2051
2052	/*
2053	 * Page Directory table entry not valid, we need a new PT page
2054	 */
2055	if (pte == NULL) {
2056		panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n",
2057			(void *)pmap->pm_pdir[PTDPTDI], va);
2058	}
2059
2060	pa = VM_PAGE_TO_PHYS(m) & PG_FRAME;
2061	origpte = *(vm_offset_t *)pte;
2062	opa = origpte & PG_FRAME;
2063
2064	if (origpte & PG_PS)
2065		panic("pmap_enter: attempted pmap_enter on 4MB page");
2066
2067	/*
2068	 * Mapping has not changed, must be protection or wiring change.
2069	 */
2070	if (origpte && (opa == pa)) {
2071		/*
2072		 * Wiring change, just update stats. We don't worry about
2073		 * wiring PT pages as they remain resident as long as there
2074		 * are valid mappings in them. Hence, if a user page is wired,
2075		 * the PT page will be also.
2076		 */
2077		if (wired && ((origpte & PG_W) == 0))
2078			pmap->pm_stats.wired_count++;
2079		else if (!wired && (origpte & PG_W))
2080			pmap->pm_stats.wired_count--;
2081
2082#if defined(PMAP_DIAGNOSTIC)
2083		if (pmap_nw_modified((pt_entry_t) origpte)) {
2084			printf(
2085	"pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
2086			    va, origpte);
2087		}
2088#endif
2089
2090		/*
2091		 * Remove extra pte reference
2092		 */
2093		if (mpte)
2094			mpte->hold_count--;
2095
2096		if ((prot & VM_PROT_WRITE) && (origpte & PG_V)) {
2097			if ((origpte & PG_RW) == 0) {
2098				*pte |= PG_RW;
2099				pmap_invalidate_page(pmap, va);
2100			}
2101			return;
2102		}
2103
2104		/*
2105		 * We might be turning off write access to the page,
2106		 * so we go ahead and sense modify status.
2107		 */
2108		if (origpte & PG_MANAGED) {
2109			if ((origpte & PG_M) && pmap_track_modified(va)) {
2110				vm_page_t om;
2111				om = PHYS_TO_VM_PAGE(opa);
2112				vm_page_dirty(om);
2113			}
2114			pa |= PG_MANAGED;
2115		}
2116		goto validate;
2117	}
2118	/*
2119	 * Mapping has changed, invalidate old range and fall through to
2120	 * handle validating new mapping.
2121	 */
2122	if (opa) {
2123		int err;
2124		err = pmap_remove_pte(pmap, pte, va);
2125		if (err)
2126			panic("pmap_enter: pte vanished, va: 0x%x", va);
2127	}
2128
2129	/*
2130	 * Enter on the PV list if part of our managed memory. Note that we
2131	 * raise IPL while manipulating pv_table since pmap_enter can be
2132	 * called at interrupt time.
2133	 */
2134	if (pmap_initialized &&
2135	    (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
2136		pmap_insert_entry(pmap, va, mpte, m);
2137		pa |= PG_MANAGED;
2138	}
2139
2140	/*
2141	 * Increment counters
2142	 */
2143	pmap->pm_stats.resident_count++;
2144	if (wired)
2145		pmap->pm_stats.wired_count++;
2146
2147validate:
2148	/*
2149	 * Now validate mapping with desired protection/wiring.
2150	 */
2151	newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
2152
2153	if (wired)
2154		newpte |= PG_W;
2155	if (va < VM_MAXUSER_ADDRESS)
2156		newpte |= PG_U;
2157	if (pmap == kernel_pmap)
2158		newpte |= pgeflag;
2159
2160	/*
2161	 * if the mapping or permission bits are different, we need
2162	 * to update the pte.
2163	 */
2164	if ((origpte & ~(PG_M|PG_A)) != newpte) {
2165		*pte = newpte | PG_A;
2166		/*if (origpte)*/ {
2167			pmap_invalidate_page(pmap, va);
2168		}
2169	}
2170}
2171
2172/*
2173 * this code makes some *MAJOR* assumptions:
2174 * 1. Current pmap & pmap exists.
2175 * 2. Not wired.
2176 * 3. Read access.
2177 * 4. No page table pages.
2178 * 5. Tlbflush is deferred to calling procedure.
2179 * 6. Page IS managed.
2180 * but is *MUCH* faster than pmap_enter...
2181 */
2182
2183static vm_page_t
2184pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte)
2185{
2186	pt_entry_t *pte;
2187	vm_offset_t pa;
2188
2189	/*
2190	 * In the case that a page table page is not
2191	 * resident, we are creating it here.
2192	 */
2193	if (va < VM_MAXUSER_ADDRESS) {
2194		unsigned ptepindex;
2195		pd_entry_t ptepa;
2196
2197		/*
2198		 * Calculate pagetable page index
2199		 */
2200		ptepindex = va >> PDRSHIFT;
2201		if (mpte && (mpte->pindex == ptepindex)) {
2202			mpte->hold_count++;
2203		} else {
2204retry:
2205			/*
2206			 * Get the page directory entry
2207			 */
2208			ptepa = pmap->pm_pdir[ptepindex];
2209
2210			/*
2211			 * If the page table page is mapped, we just increment
2212			 * the hold count, and activate it.
2213			 */
2214			if (ptepa) {
2215				if (ptepa & PG_PS)
2216					panic("pmap_enter_quick: unexpected mapping into 4MB page");
2217				if (pmap->pm_ptphint &&
2218					(pmap->pm_ptphint->pindex == ptepindex)) {
2219					mpte = pmap->pm_ptphint;
2220				} else {
2221					mpte = pmap_page_lookup(pmap->pm_pteobj, ptepindex);
2222					pmap->pm_ptphint = mpte;
2223				}
2224				if (mpte == NULL)
2225					goto retry;
2226				mpte->hold_count++;
2227			} else {
2228				mpte = _pmap_allocpte(pmap, ptepindex);
2229			}
2230		}
2231	} else {
2232		mpte = NULL;
2233	}
2234
2235	/*
2236	 * This call to vtopte makes the assumption that we are
2237	 * entering the page into the current pmap.  In order to support
2238	 * quick entry into any pmap, one would likely use pmap_pte_quick.
2239	 * But that isn't as quick as vtopte.
2240	 */
2241	pte = vtopte(va);
2242	if (*pte) {
2243		if (mpte)
2244			pmap_unwire_pte_hold(pmap, mpte);
2245		return 0;
2246	}
2247
2248	/*
2249	 * Enter on the PV list if part of our managed memory. Note that we
2250	 * raise IPL while manipulating pv_table since pmap_enter can be
2251	 * called at interrupt time.
2252	 */
2253	if ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)
2254		pmap_insert_entry(pmap, va, mpte, m);
2255
2256	/*
2257	 * Increment counters
2258	 */
2259	pmap->pm_stats.resident_count++;
2260
2261	pa = VM_PAGE_TO_PHYS(m);
2262
2263	/*
2264	 * Now validate mapping with RO protection
2265	 */
2266	if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED))
2267		*pte = pa | PG_V | PG_U;
2268	else
2269		*pte = pa | PG_V | PG_U | PG_MANAGED;
2270
2271	return mpte;
2272}
2273
2274/*
2275 * Make a temporary mapping for a physical address.  This is only intended
2276 * to be used for panic dumps.
2277 */
2278void *
2279pmap_kenter_temporary(vm_offset_t pa, int i)
2280{
2281	vm_offset_t va;
2282
2283	va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE);
2284	pmap_kenter(va, pa);
2285#ifndef I386_CPU
2286	invlpg(va);
2287#else
2288	invltlb();
2289#endif
2290	return ((void *)crashdumpmap);
2291}
2292
2293#define MAX_INIT_PT (96)
2294/*
2295 * pmap_object_init_pt preloads the ptes for a given object
2296 * into the specified pmap.  This eliminates the blast of soft
2297 * faults on process startup and immediately after an mmap.
2298 */
2299void
2300pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
2301		    vm_object_t object, vm_pindex_t pindex,
2302		    vm_size_t size, int limit)
2303{
2304	vm_offset_t tmpidx;
2305	int psize;
2306	vm_page_t p, mpte;
2307	int objpgs;
2308
2309	if (pmap == NULL || object == NULL)
2310		return;
2311
2312	/*
2313	 * This code maps large physical mmap regions into the
2314	 * processor address space.  Note that some shortcuts
2315	 * are taken, but the code works.
2316	 */
2317	if (pseflag && (object->type == OBJT_DEVICE) &&
2318	    ((addr & (NBPDR - 1)) == 0) && ((size & (NBPDR - 1)) == 0)) {
2319		int i;
2320		vm_page_t m[1];
2321		unsigned int ptepindex;
2322		int npdes;
2323		pd_entry_t ptepa;
2324
2325		if (pmap->pm_pdir[ptepindex = (addr >> PDRSHIFT)])
2326			return;
2327
2328retry:
2329		p = vm_page_lookup(object, pindex);
2330		if (p && vm_page_sleep_busy(p, FALSE, "init4p"))
2331			goto retry;
2332
2333		if (p == NULL) {
2334			p = vm_page_alloc(object, pindex, VM_ALLOC_NORMAL);
2335			if (p == NULL)
2336				return;
2337			m[0] = p;
2338
2339			if (vm_pager_get_pages(object, m, 1, 0) != VM_PAGER_OK) {
2340				vm_page_lock_queues();
2341				vm_page_free(p);
2342				vm_page_unlock_queues();
2343				return;
2344			}
2345
2346			p = vm_page_lookup(object, pindex);
2347			vm_page_wakeup(p);
2348		}
2349
2350		ptepa = VM_PAGE_TO_PHYS(p);
2351		if (ptepa & (NBPDR - 1)) {
2352			return;
2353		}
2354
2355		p->valid = VM_PAGE_BITS_ALL;
2356
2357		pmap->pm_stats.resident_count += size >> PAGE_SHIFT;
2358		npdes = size >> PDRSHIFT;
2359		for(i = 0; i < npdes; i++) {
2360			pmap->pm_pdir[ptepindex] =
2361			    ptepa | PG_U | PG_RW | PG_V | PG_PS;
2362			ptepa += NBPDR;
2363			ptepindex += 1;
2364		}
2365		vm_page_flag_set(p, PG_MAPPED);
2366		pmap_invalidate_all(kernel_pmap);
2367		return;
2368	}
2369
2370	psize = i386_btop(size);
2371
2372	if ((object->type != OBJT_VNODE) ||
2373	    ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) &&
2374	     (object->resident_page_count > MAX_INIT_PT))) {
2375		return;
2376	}
2377
2378	if (psize + pindex > object->size) {
2379		if (object->size < pindex)
2380			return;
2381		psize = object->size - pindex;
2382	}
2383
2384	mpte = NULL;
2385	/*
2386	 * if we are processing a major portion of the object, then scan the
2387	 * entire thing.
2388	 */
2389	if (psize > (object->resident_page_count >> 2)) {
2390		objpgs = psize;
2391
2392		for (p = TAILQ_FIRST(&object->memq);
2393		    ((objpgs > 0) && (p != NULL));
2394		    p = TAILQ_NEXT(p, listq)) {
2395
2396			if (p->pindex < pindex || p->pindex - pindex >= psize) {
2397				continue;
2398			}
2399			tmpidx = p->pindex - pindex;
2400			/*
2401			 * don't allow an madvise to blow away our really
2402			 * free pages allocating pv entries.
2403			 */
2404			if ((limit & MAP_PREFAULT_MADVISE) &&
2405			    cnt.v_free_count < cnt.v_free_reserved) {
2406				break;
2407			}
2408			vm_page_lock_queues();
2409			if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2410				(p->busy == 0) &&
2411			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2412				if ((p->queue - p->pc) == PQ_CACHE)
2413					vm_page_deactivate(p);
2414				vm_page_busy(p);
2415				vm_page_unlock_queues();
2416				mpte = pmap_enter_quick(pmap,
2417					addr + i386_ptob(tmpidx), p, mpte);
2418				vm_page_lock_queues();
2419				vm_page_flag_set(p, PG_MAPPED);
2420				vm_page_wakeup(p);
2421			}
2422			vm_page_unlock_queues();
2423			objpgs -= 1;
2424		}
2425	} else {
2426		/*
2427		 * else lookup the pages one-by-one.
2428		 */
2429		for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
2430			/*
2431			 * don't allow an madvise to blow away our really
2432			 * free pages allocating pv entries.
2433			 */
2434			if ((limit & MAP_PREFAULT_MADVISE) &&
2435			    cnt.v_free_count < cnt.v_free_reserved) {
2436				break;
2437			}
2438			p = vm_page_lookup(object, tmpidx + pindex);
2439			if (p == NULL)
2440				continue;
2441			vm_page_lock_queues();
2442			if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL &&
2443				(p->busy == 0) &&
2444			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2445				if ((p->queue - p->pc) == PQ_CACHE)
2446					vm_page_deactivate(p);
2447				vm_page_busy(p);
2448				vm_page_unlock_queues();
2449				mpte = pmap_enter_quick(pmap,
2450					addr + i386_ptob(tmpidx), p, mpte);
2451				vm_page_lock_queues();
2452				vm_page_flag_set(p, PG_MAPPED);
2453				vm_page_wakeup(p);
2454			}
2455			vm_page_unlock_queues();
2456		}
2457	}
2458	return;
2459}
2460
2461/*
2462 * pmap_prefault provides a quick way of clustering
2463 * pagefaults into a processes address space.  It is a "cousin"
2464 * of pmap_object_init_pt, except it runs at page fault time instead
2465 * of mmap time.
2466 */
2467#define PFBAK 4
2468#define PFFOR 4
2469#define PAGEORDER_SIZE (PFBAK+PFFOR)
2470
2471static int pmap_prefault_pageorder[] = {
2472	-PAGE_SIZE, PAGE_SIZE,
2473	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
2474	-3 * PAGE_SIZE, 3 * PAGE_SIZE
2475	-4 * PAGE_SIZE, 4 * PAGE_SIZE
2476};
2477
2478void
2479pmap_prefault(pmap, addra, entry)
2480	pmap_t pmap;
2481	vm_offset_t addra;
2482	vm_map_entry_t entry;
2483{
2484	int i;
2485	vm_offset_t starta;
2486	vm_offset_t addr;
2487	vm_pindex_t pindex;
2488	vm_page_t m, mpte;
2489	vm_object_t object;
2490
2491	if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)))
2492		return;
2493
2494	object = entry->object.vm_object;
2495
2496	starta = addra - PFBAK * PAGE_SIZE;
2497	if (starta < entry->start) {
2498		starta = entry->start;
2499	} else if (starta > addra) {
2500		starta = 0;
2501	}
2502
2503	mpte = NULL;
2504	for (i = 0; i < PAGEORDER_SIZE; i++) {
2505		vm_object_t lobject;
2506		pt_entry_t *pte;
2507
2508		addr = addra + pmap_prefault_pageorder[i];
2509		if (addr > addra + (PFFOR * PAGE_SIZE))
2510			addr = 0;
2511
2512		if (addr < starta || addr >= entry->end)
2513			continue;
2514
2515		if ((*pmap_pde(pmap, addr)) == NULL)
2516			continue;
2517
2518		pte = vtopte(addr);
2519		if (*pte)
2520			continue;
2521
2522		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
2523		lobject = object;
2524		for (m = vm_page_lookup(lobject, pindex);
2525		    (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object));
2526		    lobject = lobject->backing_object) {
2527			if (lobject->backing_object_offset & PAGE_MASK)
2528				break;
2529			pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
2530			m = vm_page_lookup(lobject->backing_object, pindex);
2531		}
2532
2533		/*
2534		 * give-up when a page is not in memory
2535		 */
2536		if (m == NULL)
2537			break;
2538		vm_page_lock_queues();
2539		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
2540			(m->busy == 0) &&
2541		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
2542
2543			if ((m->queue - m->pc) == PQ_CACHE) {
2544				vm_page_deactivate(m);
2545			}
2546			vm_page_busy(m);
2547			vm_page_unlock_queues();
2548			mpte = pmap_enter_quick(pmap, addr, m, mpte);
2549			vm_page_lock_queues();
2550			vm_page_flag_set(m, PG_MAPPED);
2551			vm_page_wakeup(m);
2552		}
2553		vm_page_unlock_queues();
2554	}
2555}
2556
2557/*
2558 *	Routine:	pmap_change_wiring
2559 *	Function:	Change the wiring attribute for a map/virtual-address
2560 *			pair.
2561 *	In/out conditions:
2562 *			The mapping must already exist in the pmap.
2563 */
2564void
2565pmap_change_wiring(pmap, va, wired)
2566	register pmap_t pmap;
2567	vm_offset_t va;
2568	boolean_t wired;
2569{
2570	register pt_entry_t *pte;
2571
2572	if (pmap == NULL)
2573		return;
2574
2575	pte = pmap_pte(pmap, va);
2576
2577	if (wired && !pmap_pte_w(pte))
2578		pmap->pm_stats.wired_count++;
2579	else if (!wired && pmap_pte_w(pte))
2580		pmap->pm_stats.wired_count--;
2581
2582	/*
2583	 * Wiring is not a hardware characteristic so there is no need to
2584	 * invalidate TLB.
2585	 */
2586	pmap_pte_set_w(pte, wired);
2587}
2588
2589
2590
2591/*
2592 *	Copy the range specified by src_addr/len
2593 *	from the source map to the range dst_addr/len
2594 *	in the destination map.
2595 *
2596 *	This routine is only advisory and need not do anything.
2597 */
2598
2599void
2600pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2601	  vm_offset_t src_addr)
2602{
2603	vm_offset_t addr;
2604	vm_offset_t end_addr = src_addr + len;
2605	vm_offset_t pdnxt;
2606	pd_entry_t src_frame, dst_frame;
2607	vm_page_t m;
2608
2609	if (dst_addr != src_addr)
2610		return;
2611
2612	src_frame = src_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
2613	if (src_frame != (PTDpde & PG_FRAME))
2614		return;
2615
2616	dst_frame = dst_pmap->pm_pdir[PTDPTDI] & PG_FRAME;
2617	for (addr = src_addr; addr < end_addr; addr = pdnxt) {
2618		pt_entry_t *src_pte, *dst_pte;
2619		vm_page_t dstmpte, srcmpte;
2620		pd_entry_t srcptepaddr;
2621		unsigned ptepindex;
2622
2623		if (addr >= UPT_MIN_ADDRESS)
2624			panic("pmap_copy: invalid to pmap_copy page tables\n");
2625
2626		/*
2627		 * Don't let optional prefaulting of pages make us go
2628		 * way below the low water mark of free pages or way
2629		 * above high water mark of used pv entries.
2630		 */
2631		if (cnt.v_free_count < cnt.v_free_reserved ||
2632		    pv_entry_count > pv_entry_high_water)
2633			break;
2634
2635		pdnxt = ((addr + PAGE_SIZE*NPTEPG) & ~(PAGE_SIZE*NPTEPG - 1));
2636		ptepindex = addr >> PDRSHIFT;
2637
2638		srcptepaddr = src_pmap->pm_pdir[ptepindex];
2639		if (srcptepaddr == 0)
2640			continue;
2641
2642		if (srcptepaddr & PG_PS) {
2643			if (dst_pmap->pm_pdir[ptepindex] == 0) {
2644				dst_pmap->pm_pdir[ptepindex] = srcptepaddr;
2645				dst_pmap->pm_stats.resident_count += NBPDR / PAGE_SIZE;
2646			}
2647			continue;
2648		}
2649
2650		srcmpte = vm_page_lookup(src_pmap->pm_pteobj, ptepindex);
2651		if ((srcmpte == NULL) ||
2652		    (srcmpte->hold_count == 0) || (srcmpte->flags & PG_BUSY))
2653			continue;
2654
2655		if (pdnxt > end_addr)
2656			pdnxt = end_addr;
2657
2658		/*
2659		 * Have to recheck this before every avtopte() call below
2660		 * in case we have blocked and something else used APTDpde.
2661		 */
2662		if (dst_frame != (APTDpde & PG_FRAME)) {
2663			APTDpde = dst_frame | PG_RW | PG_V;
2664			pmap_invalidate_all(kernel_pmap); /* XXX Bandaid */
2665		}
2666		src_pte = vtopte(addr);
2667		dst_pte = avtopte(addr);
2668		while (addr < pdnxt) {
2669			pt_entry_t ptetemp;
2670			ptetemp = *src_pte;
2671			/*
2672			 * we only virtual copy managed pages
2673			 */
2674			if ((ptetemp & PG_MANAGED) != 0) {
2675				/*
2676				 * We have to check after allocpte for the
2677				 * pte still being around...  allocpte can
2678				 * block.
2679				 */
2680				dstmpte = pmap_allocpte(dst_pmap, addr);
2681				if ((*dst_pte == 0) && (ptetemp = *src_pte)) {
2682					/*
2683					 * Clear the modified and
2684					 * accessed (referenced) bits
2685					 * during the copy.
2686					 */
2687					m = PHYS_TO_VM_PAGE(ptetemp);
2688					*dst_pte = ptetemp & ~(PG_M | PG_A);
2689					dst_pmap->pm_stats.resident_count++;
2690					pmap_insert_entry(dst_pmap, addr,
2691						dstmpte, m);
2692	 			} else {
2693					pmap_unwire_pte_hold(dst_pmap, dstmpte);
2694				}
2695				if (dstmpte->hold_count >= srcmpte->hold_count)
2696					break;
2697			}
2698			addr += PAGE_SIZE;
2699			src_pte++;
2700			dst_pte++;
2701		}
2702	}
2703}
2704
2705#ifdef SMP
2706
2707/*
2708 *	pmap_zpi_switchin*()
2709 *
2710 *	These functions allow us to avoid doing IPIs alltogether in certain
2711 *	temporary page-mapping situations (page zeroing).  Instead to deal
2712 *	with being preempted and moved onto a different cpu we invalidate
2713 *	the page when the scheduler switches us in.  This does not occur
2714 *	very often so we remain relatively optimal with very little effort.
2715 */
2716static void
2717pmap_zpi_switchin12(void)
2718{
2719	invlpg((u_int)CADDR1);
2720	invlpg((u_int)CADDR2);
2721}
2722
2723static void
2724pmap_zpi_switchin2(void)
2725{
2726	invlpg((u_int)CADDR2);
2727}
2728
2729static void
2730pmap_zpi_switchin3(void)
2731{
2732	invlpg((u_int)CADDR3);
2733}
2734
2735#endif
2736
2737/*
2738 *	pmap_zero_page zeros the specified hardware page by mapping
2739 *	the page into KVM and using bzero to clear its contents.
2740 */
2741void
2742pmap_zero_page(vm_page_t m)
2743{
2744	vm_offset_t phys;
2745
2746	phys = VM_PAGE_TO_PHYS(m);
2747	if (*CMAP2)
2748		panic("pmap_zero_page: CMAP2 busy");
2749	*CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M;
2750#ifdef I386_CPU
2751	invltlb();
2752#else
2753#ifdef SMP
2754	curthread->td_switchin = pmap_zpi_switchin2;
2755#endif
2756	invlpg((u_int)CADDR2);
2757#endif
2758#if defined(I686_CPU)
2759	if (cpu_class == CPUCLASS_686)
2760		i686_pagezero(CADDR2);
2761	else
2762#endif
2763		bzero(CADDR2, PAGE_SIZE);
2764#ifdef SMP
2765	curthread->td_switchin = NULL;
2766#endif
2767	*CMAP2 = 0;
2768}
2769
2770/*
2771 *	pmap_zero_page_area zeros the specified hardware page by mapping
2772 *	the page into KVM and using bzero to clear its contents.
2773 *
2774 *	off and size may not cover an area beyond a single hardware page.
2775 */
2776void
2777pmap_zero_page_area(vm_page_t m, int off, int size)
2778{
2779	vm_offset_t phys;
2780
2781	phys = VM_PAGE_TO_PHYS(m);
2782	if (*CMAP2)
2783		panic("pmap_zero_page: CMAP2 busy");
2784	*CMAP2 = PG_V | PG_RW | phys | PG_A | PG_M;
2785#ifdef I386_CPU
2786	invltlb();
2787#else
2788#ifdef SMP
2789	curthread->td_switchin = pmap_zpi_switchin2;
2790#endif
2791	invlpg((u_int)CADDR2);
2792#endif
2793#if defined(I686_CPU)
2794	if (cpu_class == CPUCLASS_686 && off == 0 && size == PAGE_SIZE)
2795		i686_pagezero(CADDR2);
2796	else
2797#endif
2798		bzero((char *)CADDR2 + off, size);
2799#ifdef SMP
2800	curthread->td_switchin = NULL;
2801#endif
2802	*CMAP2 = 0;
2803}
2804
2805/*
2806 *	pmap_zero_page_idle zeros the specified hardware page by mapping
2807 *	the page into KVM and using bzero to clear its contents.  This
2808 *	is intended to be called from the vm_pagezero process only and
2809 *	outside of Giant.
2810 */
2811void
2812pmap_zero_page_idle(vm_page_t m)
2813{
2814	vm_offset_t phys;
2815
2816	phys = VM_PAGE_TO_PHYS(m);
2817	if (*CMAP3)
2818		panic("pmap_zero_page: CMAP3 busy");
2819	*CMAP3 = PG_V | PG_RW | phys | PG_A | PG_M;
2820#ifdef I386_CPU
2821	invltlb();
2822#else
2823#ifdef SMP
2824	curthread->td_switchin = pmap_zpi_switchin3;
2825#endif
2826	invlpg((u_int)CADDR3);
2827#endif
2828#if defined(I686_CPU)
2829	if (cpu_class == CPUCLASS_686)
2830		i686_pagezero(CADDR3);
2831	else
2832#endif
2833		bzero(CADDR3, PAGE_SIZE);
2834#ifdef SMP
2835	curthread->td_switchin = NULL;
2836#endif
2837	*CMAP3 = 0;
2838}
2839
2840/*
2841 *	pmap_copy_page copies the specified (machine independent)
2842 *	page by mapping the page into virtual memory and using
2843 *	bcopy to copy the page, one machine dependent page at a
2844 *	time.
2845 */
2846void
2847pmap_copy_page(vm_page_t src, vm_page_t dst)
2848{
2849
2850	if (*CMAP1)
2851		panic("pmap_copy_page: CMAP1 busy");
2852	if (*CMAP2)
2853		panic("pmap_copy_page: CMAP2 busy");
2854	*CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A;
2855	*CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M;
2856#ifdef I386_CPU
2857	invltlb();
2858#else
2859#ifdef SMP
2860	curthread->td_switchin = pmap_zpi_switchin12;
2861#endif
2862	invlpg((u_int)CADDR1);
2863	invlpg((u_int)CADDR2);
2864#endif
2865	bcopy(CADDR1, CADDR2, PAGE_SIZE);
2866#ifdef SMP
2867	curthread->td_switchin = NULL;
2868#endif
2869	*CMAP1 = 0;
2870	*CMAP2 = 0;
2871}
2872
2873
2874/*
2875 *	Routine:	pmap_pageable
2876 *	Function:
2877 *		Make the specified pages (by pmap, offset)
2878 *		pageable (or not) as requested.
2879 *
2880 *		A page which is not pageable may not take
2881 *		a fault; therefore, its page table entry
2882 *		must remain valid for the duration.
2883 *
2884 *		This routine is merely advisory; pmap_enter
2885 *		will specify that these pages are to be wired
2886 *		down (or not) as appropriate.
2887 */
2888void
2889pmap_pageable(pmap, sva, eva, pageable)
2890	pmap_t pmap;
2891	vm_offset_t sva, eva;
2892	boolean_t pageable;
2893{
2894}
2895
2896/*
2897 * Returns true if the pmap's pv is one of the first
2898 * 16 pvs linked to from this page.  This count may
2899 * be changed upwards or downwards in the future; it
2900 * is only necessary that true be returned for a small
2901 * subset of pmaps for proper page aging.
2902 */
2903boolean_t
2904pmap_page_exists_quick(pmap, m)
2905	pmap_t pmap;
2906	vm_page_t m;
2907{
2908	pv_entry_t pv;
2909	int loops = 0;
2910	int s;
2911
2912	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2913		return FALSE;
2914
2915	s = splvm();
2916
2917	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
2918		if (pv->pv_pmap == pmap) {
2919			splx(s);
2920			return TRUE;
2921		}
2922		loops++;
2923		if (loops >= 16)
2924			break;
2925	}
2926	splx(s);
2927	return (FALSE);
2928}
2929
2930#define PMAP_REMOVE_PAGES_CURPROC_ONLY
2931/*
2932 * Remove all pages from specified address space
2933 * this aids process exit speeds.  Also, this code
2934 * is special cased for current process only, but
2935 * can have the more generic (and slightly slower)
2936 * mode enabled.  This is much faster than pmap_remove
2937 * in the case of running down an entire address space.
2938 */
2939void
2940pmap_remove_pages(pmap, sva, eva)
2941	pmap_t pmap;
2942	vm_offset_t sva, eva;
2943{
2944	pt_entry_t *pte, tpte;
2945	vm_page_t m;
2946	pv_entry_t pv, npv;
2947	int s;
2948
2949#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2950	if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) {
2951		printf("warning: pmap_remove_pages called with non-current pmap\n");
2952		return;
2953	}
2954#endif
2955
2956	s = splvm();
2957	for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) {
2958
2959		if (pv->pv_va >= eva || pv->pv_va < sva) {
2960			npv = TAILQ_NEXT(pv, pv_plist);
2961			continue;
2962		}
2963
2964#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
2965		pte = vtopte(pv->pv_va);
2966#else
2967		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
2968#endif
2969		tpte = *pte;
2970
2971		if (tpte == 0) {
2972			printf("TPTE at %p  IS ZERO @ VA %08x\n",
2973							pte, pv->pv_va);
2974			panic("bad pte");
2975		}
2976
2977/*
2978 * We cannot remove wired pages from a process' mapping at this time
2979 */
2980		if (tpte & PG_W) {
2981			npv = TAILQ_NEXT(pv, pv_plist);
2982			continue;
2983		}
2984
2985		m = PHYS_TO_VM_PAGE(tpte);
2986		KASSERT(m->phys_addr == (tpte & PG_FRAME),
2987		    ("vm_page_t %p phys_addr mismatch %08x %08x",
2988		    m, m->phys_addr, tpte));
2989
2990		KASSERT(m < &vm_page_array[vm_page_array_size],
2991			("pmap_remove_pages: bad tpte %x", tpte));
2992
2993		pv->pv_pmap->pm_stats.resident_count--;
2994
2995		*pte = 0;
2996
2997		/*
2998		 * Update the vm_page_t clean and reference bits.
2999		 */
3000		if (tpte & PG_M) {
3001			vm_page_dirty(m);
3002		}
3003
3004		npv = TAILQ_NEXT(pv, pv_plist);
3005		TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist);
3006
3007		m->md.pv_list_count--;
3008		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3009		if (TAILQ_FIRST(&m->md.pv_list) == NULL) {
3010			vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
3011		}
3012
3013		pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
3014		free_pv_entry(pv);
3015	}
3016	splx(s);
3017	pmap_invalidate_all(pmap);
3018}
3019
3020/*
3021 * pmap_testbit tests bits in pte's
3022 * note that the testbit/changebit routines are inline,
3023 * and a lot of things compile-time evaluate.
3024 */
3025static boolean_t
3026pmap_testbit(m, bit)
3027	vm_page_t m;
3028	int bit;
3029{
3030	pv_entry_t pv;
3031	pt_entry_t *pte;
3032	int s;
3033
3034	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3035		return FALSE;
3036
3037	if (TAILQ_FIRST(&m->md.pv_list) == NULL)
3038		return FALSE;
3039
3040	s = splvm();
3041
3042	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3043		/*
3044		 * if the bit being tested is the modified bit, then
3045		 * mark clean_map and ptes as never
3046		 * modified.
3047		 */
3048		if (bit & (PG_A|PG_M)) {
3049			if (!pmap_track_modified(pv->pv_va))
3050				continue;
3051		}
3052
3053#if defined(PMAP_DIAGNOSTIC)
3054		if (!pv->pv_pmap) {
3055			printf("Null pmap (tb) at va: 0x%x\n", pv->pv_va);
3056			continue;
3057		}
3058#endif
3059		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3060		if (*pte & bit) {
3061			splx(s);
3062			return TRUE;
3063		}
3064	}
3065	splx(s);
3066	return (FALSE);
3067}
3068
3069/*
3070 * this routine is used to modify bits in ptes
3071 */
3072static __inline void
3073pmap_changebit(vm_page_t m, int bit, boolean_t setem)
3074{
3075	register pv_entry_t pv;
3076	register pt_entry_t *pte;
3077	int s;
3078
3079	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3080		return;
3081
3082	s = splvm();
3083
3084	/*
3085	 * Loop over all current mappings setting/clearing as appropos If
3086	 * setting RO do we need to clear the VAC?
3087	 */
3088	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3089		/*
3090		 * don't write protect pager mappings
3091		 */
3092		if (!setem && (bit == PG_RW)) {
3093			if (!pmap_track_modified(pv->pv_va))
3094				continue;
3095		}
3096
3097#if defined(PMAP_DIAGNOSTIC)
3098		if (!pv->pv_pmap) {
3099			printf("Null pmap (cb) at va: 0x%x\n", pv->pv_va);
3100			continue;
3101		}
3102#endif
3103
3104		pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3105
3106		if (setem) {
3107			*pte |= bit;
3108			pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
3109		} else {
3110			pt_entry_t pbits = *pte;
3111			if (pbits & bit) {
3112				if (bit == PG_RW) {
3113					if (pbits & PG_M) {
3114						vm_page_dirty(m);
3115					}
3116					*pte = pbits & ~(PG_M|PG_RW);
3117				} else {
3118					*pte = pbits & ~bit;
3119				}
3120				pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
3121			}
3122		}
3123	}
3124	splx(s);
3125}
3126
3127/*
3128 *      pmap_page_protect:
3129 *
3130 *      Lower the permission for all mappings to a given page.
3131 */
3132void
3133pmap_page_protect(vm_page_t m, vm_prot_t prot)
3134{
3135	if ((prot & VM_PROT_WRITE) == 0) {
3136		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
3137			pmap_changebit(m, PG_RW, FALSE);
3138		} else {
3139			pmap_remove_all(m);
3140		}
3141	}
3142}
3143
3144vm_offset_t
3145pmap_phys_address(ppn)
3146	int ppn;
3147{
3148	return (i386_ptob(ppn));
3149}
3150
3151/*
3152 *	pmap_ts_referenced:
3153 *
3154 *	Return a count of reference bits for a page, clearing those bits.
3155 *	It is not necessary for every reference bit to be cleared, but it
3156 *	is necessary that 0 only be returned when there are truly no
3157 *	reference bits set.
3158 *
3159 *	XXX: The exact number of bits to check and clear is a matter that
3160 *	should be tested and standardized at some point in the future for
3161 *	optimal aging of shared pages.
3162 */
3163int
3164pmap_ts_referenced(vm_page_t m)
3165{
3166	register pv_entry_t pv, pvf, pvn;
3167	pt_entry_t *pte;
3168	int s;
3169	int rtval = 0;
3170
3171	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
3172		return (rtval);
3173
3174	s = splvm();
3175
3176	if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
3177
3178		pvf = pv;
3179
3180		do {
3181			pvn = TAILQ_NEXT(pv, pv_list);
3182
3183			TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
3184
3185			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
3186
3187			if (!pmap_track_modified(pv->pv_va))
3188				continue;
3189
3190			pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va);
3191
3192			if (pte && (*pte & PG_A)) {
3193				*pte &= ~PG_A;
3194
3195				pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
3196
3197				rtval++;
3198				if (rtval > 4) {
3199					break;
3200				}
3201			}
3202		} while ((pv = pvn) != NULL && pv != pvf);
3203	}
3204	splx(s);
3205
3206	return (rtval);
3207}
3208
3209/*
3210 *	pmap_is_modified:
3211 *
3212 *	Return whether or not the specified physical page was modified
3213 *	in any physical maps.
3214 */
3215boolean_t
3216pmap_is_modified(vm_page_t m)
3217{
3218	return pmap_testbit(m, PG_M);
3219}
3220
3221/*
3222 *	Clear the modify bits on the specified physical page.
3223 */
3224void
3225pmap_clear_modify(vm_page_t m)
3226{
3227	pmap_changebit(m, PG_M, FALSE);
3228}
3229
3230/*
3231 *	pmap_clear_reference:
3232 *
3233 *	Clear the reference bit on the specified physical page.
3234 */
3235void
3236pmap_clear_reference(vm_page_t m)
3237{
3238	pmap_changebit(m, PG_A, FALSE);
3239}
3240
3241/*
3242 * Miscellaneous support routines follow
3243 */
3244
3245static void
3246i386_protection_init()
3247{
3248	register int *kp, prot;
3249
3250	kp = protection_codes;
3251	for (prot = 0; prot < 8; prot++) {
3252		switch (prot) {
3253		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
3254			/*
3255			 * Read access is also 0. There isn't any execute bit,
3256			 * so just make it readable.
3257			 */
3258		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
3259		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
3260		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
3261			*kp++ = 0;
3262			break;
3263		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
3264		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
3265		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
3266		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
3267			*kp++ = PG_RW;
3268			break;
3269		}
3270	}
3271}
3272
3273/*
3274 * Map a set of physical memory pages into the kernel virtual
3275 * address space. Return a pointer to where it is mapped. This
3276 * routine is intended to be used for mapping device memory,
3277 * NOT real memory.
3278 */
3279void *
3280pmap_mapdev(pa, size)
3281	vm_offset_t pa;
3282	vm_size_t size;
3283{
3284	vm_offset_t va, tmpva, offset;
3285	pt_entry_t *pte;
3286
3287	offset = pa & PAGE_MASK;
3288	size = roundup(offset + size, PAGE_SIZE);
3289
3290	GIANT_REQUIRED;
3291
3292	va = kmem_alloc_pageable(kernel_map, size);
3293	if (!va)
3294		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
3295
3296	pa = pa & PG_FRAME;
3297	for (tmpva = va; size > 0; ) {
3298		pte = vtopte(tmpva);
3299		*pte = pa | PG_RW | PG_V | pgeflag;
3300		size -= PAGE_SIZE;
3301		tmpva += PAGE_SIZE;
3302		pa += PAGE_SIZE;
3303	}
3304	pmap_invalidate_range(kernel_pmap, va, tmpva);
3305	return ((void *)(va + offset));
3306}
3307
3308void
3309pmap_unmapdev(va, size)
3310	vm_offset_t va;
3311	vm_size_t size;
3312{
3313	vm_offset_t base, offset, tmpva;
3314	pt_entry_t *pte;
3315
3316	base = va & PG_FRAME;
3317	offset = va & PAGE_MASK;
3318	size = roundup(offset + size, PAGE_SIZE);
3319	for (tmpva = base; tmpva < (base + size); tmpva += PAGE_SIZE) {
3320		pte = vtopte(tmpva);
3321		*pte = 0;
3322	}
3323	pmap_invalidate_range(kernel_pmap, va, tmpva);
3324	kmem_free(kernel_map, base, size);
3325}
3326
3327/*
3328 * perform the pmap work for mincore
3329 */
3330int
3331pmap_mincore(pmap, addr)
3332	pmap_t pmap;
3333	vm_offset_t addr;
3334{
3335	pt_entry_t *ptep, pte;
3336	vm_page_t m;
3337	int val = 0;
3338
3339	ptep = pmap_pte(pmap, addr);
3340	if (ptep == 0) {
3341		return 0;
3342	}
3343
3344	if ((pte = *ptep) != 0) {
3345		vm_offset_t pa;
3346
3347		val = MINCORE_INCORE;
3348		if ((pte & PG_MANAGED) == 0)
3349			return val;
3350
3351		pa = pte & PG_FRAME;
3352
3353		m = PHYS_TO_VM_PAGE(pa);
3354
3355		/*
3356		 * Modified by us
3357		 */
3358		if (pte & PG_M)
3359			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
3360		/*
3361		 * Modified by someone
3362		 */
3363		else if (m->dirty || pmap_is_modified(m))
3364			val |= MINCORE_MODIFIED_OTHER;
3365		/*
3366		 * Referenced by us
3367		 */
3368		if (pte & PG_A)
3369			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
3370
3371		/*
3372		 * Referenced by someone
3373		 */
3374		else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) {
3375			val |= MINCORE_REFERENCED_OTHER;
3376			vm_page_flag_set(m, PG_REFERENCED);
3377		}
3378	}
3379	return val;
3380}
3381
3382void
3383pmap_activate(struct thread *td)
3384{
3385	struct proc *p = td->td_proc;
3386	pmap_t	pmap;
3387	u_int32_t  cr3;
3388
3389	pmap = vmspace_pmap(td->td_proc->p_vmspace);
3390#if defined(SMP)
3391	pmap->pm_active |= PCPU_GET(cpumask);
3392#else
3393	pmap->pm_active |= 1;
3394#endif
3395	cr3 = vtophys(pmap->pm_pdir);
3396	/* XXXKSE this is wrong.
3397	 * pmap_activate is for the current thread on the current cpu
3398	 */
3399	if (p->p_flag & P_KSES) {
3400		/* Make sure all other cr3 entries are updated. */
3401		/* what if they are running?  XXXKSE (maybe abort them) */
3402		FOREACH_THREAD_IN_PROC(p, td) {
3403			td->td_pcb->pcb_cr3 = cr3;
3404		}
3405	} else {
3406		td->td_pcb->pcb_cr3 = cr3;
3407	}
3408	load_cr3(cr3);
3409#ifdef SWTCH_OPTIM_STATS
3410	tlb_flush_count++;
3411#endif
3412}
3413
3414vm_offset_t
3415pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
3416{
3417
3418	if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) {
3419		return addr;
3420	}
3421
3422	addr = (addr + (NBPDR - 1)) & ~(NBPDR - 1);
3423	return addr;
3424}
3425
3426
3427#if defined(PMAP_DEBUG)
3428pmap_pid_dump(int pid)
3429{
3430	pmap_t pmap;
3431	struct proc *p;
3432	int npte = 0;
3433	int index;
3434
3435	sx_slock(&allproc_lock);
3436	LIST_FOREACH(p, &allproc, p_list) {
3437		if (p->p_pid != pid)
3438			continue;
3439
3440		if (p->p_vmspace) {
3441			int i,j;
3442			index = 0;
3443			pmap = vmspace_pmap(p->p_vmspace);
3444			for (i = 0; i < NPDEPG; i++) {
3445				pd_entry_t *pde;
3446				pt_entry_t *pte;
3447				vm_offset_t base = i << PDRSHIFT;
3448
3449				pde = &pmap->pm_pdir[i];
3450				if (pde && pmap_pde_v(pde)) {
3451					for (j = 0; j < NPTEPG; j++) {
3452						vm_offset_t va = base + (j << PAGE_SHIFT);
3453						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
3454							if (index) {
3455								index = 0;
3456								printf("\n");
3457							}
3458							sx_sunlock(&allproc_lock);
3459							return npte;
3460						}
3461						pte = pmap_pte_quick(pmap, va);
3462						if (pte && pmap_pte_v(pte)) {
3463							pt_entry_t pa;
3464							vm_page_t m;
3465							pa = *pte;
3466							m = PHYS_TO_VM_PAGE(pa);
3467							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
3468								va, pa, m->hold_count, m->wire_count, m->flags);
3469							npte++;
3470							index++;
3471							if (index >= 2) {
3472								index = 0;
3473								printf("\n");
3474							} else {
3475								printf(" ");
3476							}
3477						}
3478					}
3479				}
3480			}
3481		}
3482	}
3483	sx_sunlock(&allproc_lock);
3484	return npte;
3485}
3486#endif
3487
3488#if defined(DEBUG)
3489
3490static void	pads(pmap_t pm);
3491void		pmap_pvdump(vm_offset_t pa);
3492
3493/* print address space of pmap*/
3494static void
3495pads(pm)
3496	pmap_t pm;
3497{
3498	int i, j;
3499	vm_offset_t va;
3500	pt_entry_t *ptep;
3501
3502	if (pm == kernel_pmap)
3503		return;
3504	for (i = 0; i < NPDEPG; i++)
3505		if (pm->pm_pdir[i])
3506			for (j = 0; j < NPTEPG; j++) {
3507				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
3508				if (pm == kernel_pmap && va < KERNBASE)
3509					continue;
3510				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
3511					continue;
3512				ptep = pmap_pte_quick(pm, va);
3513				if (pmap_pte_v(ptep))
3514					printf("%x:%x ", va, *ptep);
3515			};
3516
3517}
3518
3519void
3520pmap_pvdump(pa)
3521	vm_offset_t pa;
3522{
3523	pv_entry_t pv;
3524	vm_page_t m;
3525
3526	printf("pa %x", pa);
3527	m = PHYS_TO_VM_PAGE(pa);
3528	TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
3529		printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va);
3530		pads(pv->pv_pmap);
3531	}
3532	printf(" ");
3533}
3534#endif
3535