pmap.c revision 981
1284345Ssjg/*
2284345Ssjg * Copyright (c) 1991 Regents of the University of California.
3284345Ssjg * All rights reserved.
4284345Ssjg *
5284345Ssjg * This code is derived from software contributed to Berkeley by
6284345Ssjg * the Systems Programming Group of the University of Utah Computer
7284345Ssjg * Science Department and William Jolitz of UUNET Technologies Inc.
8284345Ssjg *
9284345Ssjg * Redistribution and use in source and binary forms, with or without
10284345Ssjg * modification, are permitted provided that the following conditions
11284345Ssjg * are met:
12284345Ssjg * 1. Redistributions of source code must retain the above copyright
13291563Sbdrewery *    notice, this list of conditions and the following disclaimer.
14291563Sbdrewery * 2. Redistributions in binary form must reproduce the above copyright
15291563Sbdrewery *    notice, this list of conditions and the following disclaimer in the
16291563Sbdrewery *    documentation and/or other materials provided with the distribution.
17284345Ssjg * 3. All advertising materials mentioning features or use of this software
18284345Ssjg *    must display the following acknowledgement:
19284345Ssjg *	This product includes software developed by the University of
20284345Ssjg *	California, Berkeley and its contributors.
21284345Ssjg * 4. Neither the name of the University nor the names of its contributors
22284345Ssjg *    may be used to endorse or promote products derived from this software
23284345Ssjg *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38 *	$Id: pmap.c,v 1.12 1994/01/14 16:23:37 davidg Exp $
39 */
40
41/*
42 * Derived from hp300 version by Mike Hibler, this version by William
43 * Jolitz uses a recursive map [a pde points to the page directory] to
44 * map the page tables using the pagetables themselves. This is done to
45 * reduce the impact on kernel virtual memory for lots of sparse address
46 * space, and to reduce the cost of memory to each process.
47 *
48 *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90
49 */
50/*
51 * Major modifications by John S. Dyson primarily to support
52 * pageable page tables, eliminating pmap_attributes,
53 * discontiguous memory pages, and using more efficient string
54 * instructions. Jan 13, 1994.
55 */
56
57/*
58 *	Manages physical address maps.
59 *
60 *	In addition to hardware address maps, this
61 *	module is called upon to provide software-use-only
62 *	maps which may or may not be stored in the same
63 *	form as hardware maps.  These pseudo-maps are
64 *	used to store intermediate results from copy
65 *	operations to and from address spaces.
66 *
67 *	Since the information managed by this module is
68 *	also stored by the logical address mapping module,
69 *	this module may throw away valid virtual-to-physical
70 *	mappings at almost any time.  However, invalidations
71 *	of virtual-to-physical mappings must be done as
72 *	requested.
73 *
74 *	In order to cope with hardware architectures which
75 *	make virtual-to-physical map invalidates expensive,
76 *	this module may delay invalidate or reduced protection
77 *	operations until such time as they are actually
78 *	necessary.  This module is given full information as
79 *	to which processors are currently using which maps,
80 *	and to when physical maps must be made correct.
81 */
82
83#include "param.h"
84#include "systm.h"
85#include "proc.h"
86#include "malloc.h"
87#include "user.h"
88#include "i386/include/cpufunc.h"
89
90#include "vm/vm.h"
91#include "vm/vm_kern.h"
92#include "vm/vm_page.h"
93
94#include "i386/isa/isa.h"
95
96/*
97 * Allocate various and sundry SYSMAPs used in the days of old VM
98 * and not yet converted.  XXX.
99 */
100#define BSDVM_COMPAT	1
101
102#ifdef DEBUG
103struct {
104	int kernel;	/* entering kernel mapping */
105	int user;	/* entering user mapping */
106	int ptpneeded;	/* needed to allocate a PT page */
107	int pwchange;	/* no mapping change, just wiring or protection */
108	int wchange;	/* no mapping change, just wiring */
109	int mchange;	/* was mapped but mapping to different page */
110	int managed;	/* a managed page */
111	int firstpv;	/* first mapping for this PA */
112	int secondpv;	/* second mapping for this PA */
113	int ci;		/* cache inhibited */
114	int unmanaged;	/* not a managed page */
115	int flushes;	/* cache flushes */
116} enter_stats;
117struct {
118	int calls;
119	int removes;
120	int pvfirst;
121	int pvsearch;
122	int ptinvalid;
123	int uflushes;
124	int sflushes;
125} remove_stats;
126
127int debugmap = 0;
128int pmapdebug = 0 /* 0xffff */;
129#define PDB_FOLLOW	0x0001
130#define PDB_INIT	0x0002
131#define PDB_ENTER	0x0004
132#define PDB_REMOVE	0x0008
133#define PDB_CREATE	0x0010
134#define PDB_PTPAGE	0x0020
135#define PDB_CACHE	0x0040
136#define PDB_BITS	0x0080
137#define PDB_COLLECT	0x0100
138#define PDB_PROTECT	0x0200
139#define PDB_PDRTAB	0x0400
140#define PDB_PARANOIA	0x2000
141#define PDB_WIRING	0x4000
142#define PDB_PVDUMP	0x8000
143
144int pmapvacflush = 0;
145#define	PVF_ENTER	0x01
146#define	PVF_REMOVE	0x02
147#define	PVF_PROTECT	0x04
148#define	PVF_TOTAL	0x80
149#endif
150
151/*
152 * Get PDEs and PTEs for user/kernel address space
153 */
154#define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
155#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023])
156
157#define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
158
159#define pmap_pde_v(pte)		((pte)->pd_v)
160#define pmap_pte_w(pte)		((pte)->pg_w)
161/* #define pmap_pte_ci(pte)	((pte)->pg_ci) */
162#define pmap_pte_m(pte)		((pte)->pg_m)
163#define pmap_pte_u(pte)		((pte)->pg_u)
164#define pmap_pte_v(pte)		((pte)->pg_v)
165#define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
166#define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
167
168/*
169 * Given a map and a machine independent protection code,
170 * convert to a vax protection code.
171 */
172#define pte_prot(m, p)	(protection_codes[p])
173int	protection_codes[8];
174
175struct pmap	kernel_pmap_store;
176pmap_t		kernel_pmap;
177
178vm_offset_t	phys_avail[6];	/* 2 entries + 1 null */
179vm_offset_t    	avail_start;	/* PA of first available physical page */
180vm_offset_t	avail_end;	/* PA of last available physical page */
181vm_size_t	mem_size;	/* memory size in bytes */
182vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
183vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
184int		i386pagesperpage;	/* PAGE_SIZE / I386_PAGE_SIZE */
185boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
186vm_offset_t	vm_first_phys, vm_last_phys;
187
188boolean_t	pmap_testbit();
189void		pmap_clear_modify();
190void		i386_protection_init();
191
192#if BSDVM_COMPAT
193#include "msgbuf.h"
194
195/*
196 * All those kernel PT submaps that BSD is so fond of
197 */
198struct pte	*CMAP1, *CMAP2, *mmap;
199caddr_t		CADDR1, CADDR2, vmmap;
200struct pte	*msgbufmap;
201struct msgbuf	*msgbufp;
202#endif
203
204struct vm_map * pmap_fmap(pmap_t pmap) ;
205void init_pv_entries(int) ;
206
207/*
208 *	Routine:	pmap_pte
209 *	Function:
210 *		Extract the page table entry associated
211 *		with the given map/virtual_address pair.
212 * [ what about induced faults -wfj]
213 */
214
215struct pte *
216pmap_pte(pmap, va)
217	register pmap_t	pmap;
218	vm_offset_t va;
219{
220
221	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
222		/* are we current address space or kernel? */
223		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
224			|| pmap == kernel_pmap)
225			return ((struct pte *) vtopte(va));
226
227		/* otherwise, we are alternate address space */
228		else {
229			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
230				!= APTDpde.pd_pfnum) {
231				APTDpde = pmap->pm_pdir[PTDPTDI];
232				tlbflush();
233			}
234			return((struct pte *) avtopte(va));
235		}
236	}
237	return(0);
238}
239
240/*
241 *	Routine:	pmap_extract
242 *	Function:
243 *		Extract the physical page address associated
244 *		with the given map/virtual_address pair.
245 */
246
247vm_offset_t
248pmap_extract(pmap, va)
249	register pmap_t	pmap;
250	vm_offset_t va;
251{
252	struct pde save;
253	vm_offset_t pa;
254	int s;
255
256	s = splhigh();
257	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
258		/* are we current address space or kernel? */
259		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
260			|| pmap == kernel_pmap) {
261			pa = *(int *) vtopte(va);
262		/* otherwise, we are alternate address space */
263		} else {
264			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
265				!= APTDpde.pd_pfnum) {
266				save = APTDpde;
267				APTDpde = pmap->pm_pdir[PTDPTDI];
268				tlbflush();
269				pa = *(int *) avtopte(va);
270				APTDpde = save;
271				tlbflush();
272			} else {
273				tlbflush();
274				pa = *(int *) avtopte(va);
275			}
276		}
277		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
278		splx(s);
279		return pa;
280	}
281	splx(s);
282	return 0;
283
284}
285
286int
287pmap_is_managed(pa)
288	vm_offset_t pa;
289{
290	int i;
291
292	if (!pmap_initialized)
293		return 0;
294
295	for (i = 0; phys_avail[i + 1]; i += 2) {
296		if (pa >= phys_avail[i] && pa < phys_avail[i + 1])
297			return 1;
298	}
299	return 0;
300}
301
302/*
303 * increment/decrement pmap wiring count
304 */
305void
306pmap_use_pt(pmap, va, use)
307	pmap_t pmap;
308	vm_offset_t va;
309	int use;
310{
311	vm_offset_t pt, pa;
312	pv_entry_t pv;
313	vm_page_t m;
314
315	if (va >= VM_MAX_ADDRESS)
316		return;
317
318	pt = trunc_page(vtopte(va));
319	pa = pmap_extract(pmap, pt);
320	if (pa == 0) {
321		printf("Warning pmap_use_pt pte paging failure\n");
322	}
323	if (!pa || !pmap_is_managed(pa))
324		return;
325	pv = pa_to_pvh(pa);
326
327	m = PHYS_TO_VM_PAGE(pa);
328	if (use) {
329		vm_page_wire(m);
330	} else {
331		vm_page_unwire(m);
332	}
333}
334
335/* [ macro again?, should I force kstack into user map here? -wfj ] */
336void
337pmap_activate(pmap, pcbp)
338	register pmap_t pmap;
339	struct pcb *pcbp;
340{
341	PMAP_ACTIVATE(pmap, pcbp);
342}
343
344/*
345 *	Bootstrap the system enough to run with virtual memory.
346 *	Map the kernel's code and data, and allocate the system page table.
347 *
348 *	On the I386 this is called after mapping has already been enabled
349 *	and just syncs the pmap module with what has already been done.
350 *	[We can't call it easily with mapping off since the kernel is not
351 *	mapped with PA == VA, hence we would have to relocate every address
352 *	from the linked base (virtual) address "KERNBASE" to the actual
353 *	(physical) address starting relative to 0]
354 */
355
356#define DMAPAGES 8
357void
358pmap_bootstrap(firstaddr, loadaddr)
359	vm_offset_t firstaddr;
360	vm_offset_t loadaddr;
361{
362#if BSDVM_COMPAT
363	vm_offset_t va;
364	struct pte *pte;
365#endif
366	extern vm_offset_t maxmem, physmem;
367extern int IdlePTD;
368
369	avail_start = firstaddr + DMAPAGES*NBPG;
370	avail_end = maxmem << PG_SHIFT;
371
372	/* XXX: allow for msgbuf */
373	avail_end -= i386_round_page(sizeof(struct msgbuf));
374
375	mem_size = physmem << PG_SHIFT;
376	virtual_avail = (vm_offset_t) KERNBASE + avail_start;
377	virtual_end = VM_MAX_KERNEL_ADDRESS;
378	i386pagesperpage = PAGE_SIZE / NBPG;
379
380	/*
381	 * Initialize protection array.
382	 */
383	i386_protection_init();
384
385	/*
386	 * The kernel's pmap is statically allocated so we don't
387	 * have to use pmap_create, which is unlikely to work
388	 * correctly at this part of the boot sequence.
389	 */
390	kernel_pmap = &kernel_pmap_store;
391
392	kernel_pmap->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD);
393
394	simple_lock_init(&kernel_pmap->pm_lock);
395	kernel_pmap->pm_count = 1;
396
397#if BSDVM_COMPAT
398	/*
399	 * Allocate all the submaps we need
400	 */
401#define	SYSMAP(c, p, v, n)	\
402	v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
403
404	va = virtual_avail;
405	pte = pmap_pte(kernel_pmap, va);
406
407	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
408	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
409	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
410	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
411	virtual_avail = va;
412#endif
413	/*
414	 * reserve special hunk of memory for use by bus dma as a bounce
415	 * buffer (contiguous virtual *and* physical memory). for now,
416	 * assume vm does not use memory beneath hole, and we know that
417	 * the bootstrap uses top 32k of base memory. -wfj
418	 */
419	{
420		extern vm_offset_t isaphysmem;
421		isaphysmem = va;
422
423		virtual_avail = pmap_map(va, firstaddr,
424				firstaddr + DMAPAGES*NBPG, VM_PROT_ALL);
425	}
426
427	*(int *)PTD = 0;
428	load_cr3(rcr3());
429
430}
431
432/*
433 *	Initialize the pmap module.
434 *	Called by vm_init, to initialize any structures that the pmap
435 *	system needs to map virtual memory.
436 */
437void
438pmap_init(phys_start, phys_end)
439	vm_offset_t	phys_start, phys_end;
440{
441	vm_offset_t	addr, addr2;
442	vm_size_t	npg, s;
443	int		rv;
444	int i;
445	extern int KPTphys;
446	extern int IdlePTD;
447
448	/*
449	 * Now that kernel map has been allocated, we can mark as
450	 * unavailable regions which we have mapped in locore.
451	 */
452	addr = atdevbase;
453	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
454			   &addr, (0x100000-0xa0000), FALSE);
455
456	addr = (vm_offset_t) KERNBASE + IdlePTD;
457	vm_object_reference(kernel_object);
458	(void) vm_map_find(kernel_map, kernel_object, addr,
459			   &addr, (4 + NKPT) * NBPG, FALSE);
460
461
462	/*
463	 * calculate the number of pv_entries needed
464	 */
465	vm_first_phys = phys_avail[0];
466	for (i = 0; phys_avail[i + 1]; i += 2) ;
467	npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / NBPG;
468
469	/*
470	 * Allocate memory for random pmap data structures.  Includes the
471	 * pv_head_table.
472	 */
473	s = (vm_size_t) (sizeof(struct pv_entry) * npg);
474	s = i386_round_page(s);
475	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
476	pv_table = (pv_entry_t) addr;
477
478	/*
479	 * init the pv free list
480	 */
481	init_pv_entries(npg);
482	/*
483	 * Now it is safe to enable pv_table recording.
484	 */
485	pmap_initialized = TRUE;
486}
487
488/*
489 *	Used to map a range of physical addresses into kernel
490 *	virtual address space.
491 *
492 *	For now, VM is already on, we only need to map the
493 *	specified memory.
494 */
495vm_offset_t
496pmap_map(virt, start, end, prot)
497	vm_offset_t	virt;
498	vm_offset_t	start;
499	vm_offset_t	end;
500	int		prot;
501{
502	while (start < end) {
503		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
504		virt += PAGE_SIZE;
505		start += PAGE_SIZE;
506	}
507	return(virt);
508}
509
510/*
511 *	Create and return a physical map.
512 *
513 *	If the size specified for the map
514 *	is zero, the map is an actual physical
515 *	map, and may be referenced by the
516 *	hardware.
517 *
518 *	If the size specified is non-zero,
519 *	the map will be used in software only, and
520 *	is bounded by that size.
521 *
522 * [ just allocate a ptd and mark it uninitialize -- should we track
523 *   with a table which process has which ptd? -wfj ]
524 */
525
526pmap_t
527pmap_create(size)
528	vm_size_t	size;
529{
530	register pmap_t pmap;
531
532	/*
533	 * Software use map does not need a pmap
534	 */
535	if (size)
536		return(NULL);
537
538	/* XXX: is it ok to wait here? */
539	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
540#ifdef notifwewait
541	if (pmap == NULL)
542		panic("pmap_create: cannot allocate a pmap");
543#endif
544	bzero(pmap, sizeof(*pmap));
545	pmap_pinit(pmap);
546	return (pmap);
547}
548
549
550struct pmaplist {
551	struct pmaplist *next;
552};
553
554#define PMAPCACHESIZE 4
555struct pmaplist *vm_pmaplist;
556int pmapcount;
557
558void *
559vm_get_pmap() {
560	struct pmaplist *rtval;
561	int s;
562	if (vm_pmaplist) {
563		rtval = vm_pmaplist;
564		vm_pmaplist = rtval->next;
565		--pmapcount;
566		bzero(rtval, ctob(1));
567		return rtval;
568	}
569	rtval = (struct pmaplist *)kmem_alloc(kernel_map, ctob(1));
570	bzero(rtval, ctob(1));
571	return rtval;
572}
573
574void
575vm_put_pmap(up)
576	struct pmaplist *up;
577{
578	if (pmapcount > PMAPCACHESIZE) {
579		int s;
580		kmem_free(kernel_map, up, ctob(1));
581	} else {
582		up->next = vm_pmaplist;
583		vm_pmaplist = up;
584		++pmapcount;
585	}
586}
587
588/*
589 * Initialize a preallocated and zeroed pmap structure,
590 * such as one in a vmspace structure.
591 */
592void
593pmap_pinit(pmap)
594	register struct pmap *pmap;
595{
596	/*
597	 * No need to allocate page table space yet but we do need a
598	 * valid page directory table.
599	 */
600	pmap->pm_pdir = (pd_entry_t *) vm_get_pmap();
601
602	/* wire in kernel global address entries */
603	bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPT*PTESIZE);
604
605	/* install self-referential address mapping entry */
606	*(int *)(pmap->pm_pdir+PTDPTDI) =
607		((int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
608
609	pmap->pm_count = 1;
610	simple_lock_init(&pmap->pm_lock);
611}
612
613/*
614 *	Retire the given physical map from service.
615 *	Should only be called if the map contains
616 *	no valid mappings.
617 */
618void
619pmap_destroy(pmap)
620	register pmap_t pmap;
621{
622	int count;
623
624	if (pmap == NULL)
625		return;
626
627	simple_lock(&pmap->pm_lock);
628	count = --pmap->pm_count;
629	simple_unlock(&pmap->pm_lock);
630	if (count == 0) {
631		pmap_release(pmap);
632		free((caddr_t)pmap, M_VMPMAP);
633	}
634}
635
636/*
637 * Release any resources held by the given physical map.
638 * Called when a pmap initialized by pmap_pinit is being released.
639 * Should only be called if the map contains no valid mappings.
640 */
641void
642pmap_release(pmap)
643	register struct pmap *pmap;
644{
645	vm_put_pmap((struct pmaplist *) pmap->pm_pdir);
646}
647
648/*
649 *	Add a reference to the specified pmap.
650 */
651void
652pmap_reference(pmap)
653	pmap_t	pmap;
654{
655	if (pmap != NULL) {
656		simple_lock(&pmap->pm_lock);
657		pmap->pm_count++;
658		simple_unlock(&pmap->pm_lock);
659	}
660}
661
662#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2)
663
664/*
665 * Data for the pv entry allocation mechanism
666 */
667int pv_freelistcnt;
668pv_entry_t pv_freelist;
669vm_offset_t pvva;
670int npvvapg;
671
672/*
673 * free the pv_entry back to the free list
674 */
675inline static void
676free_pv_entry(pv)
677	pv_entry_t pv;
678{
679	if (!pv) return;
680	++pv_freelistcnt;
681	pv->pv_next = pv_freelist;
682	pv_freelist = pv;
683}
684
685/*
686 * get a new pv_entry, allocating a block from the system
687 * when needed.
688 * the memory allocation is performed bypassing the malloc code
689 * because of the possibility of allocations at interrupt time.
690 */
691static pv_entry_t
692get_pv_entry()
693{
694	pv_entry_t tmp;
695	/*
696	 * get more pv_entry pages if needed
697	 */
698	while (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) {
699		/*
700		 * do we have any pre-allocated map-pages left?
701		 */
702		if (npvvapg) {
703			vm_page_t m;
704			/*
705			 * we do this to keep recursion away
706			 */
707			pv_freelistcnt += PV_FREELIST_MIN;
708			/*
709			 * allocate a physical page out of the vm system
710			 */
711			if (m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map))) {
712				int newentries;
713				int i;
714				pv_entry_t entry;
715				newentries = (NBPG/sizeof (struct pv_entry));
716				/*
717				 * wire the page
718				 */
719				vm_page_wire(m);
720				m->flags &= ~PG_BUSY;
721				/*
722				 * let the kernel see it
723				 */
724				pmap_enter(vm_map_pmap(kernel_map), pvva,
725					VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT,1);
726
727				entry = (pv_entry_t) pvva;
728				/*
729				 * update the allocation pointers
730				 */
731				pvva += NBPG;
732				--npvvapg;
733
734				/*
735				 * free the entries into the free list
736				 */
737				for (i = 0; i < newentries; i++) {
738					free_pv_entry(entry);
739					entry++;
740				}
741			}
742			pv_freelistcnt -= PV_FREELIST_MIN;
743		}
744	}
745
746	/*
747	 * get a pv_entry off of the free list
748	 */
749	--pv_freelistcnt;
750	tmp = pv_freelist;
751	if (!tmp)
752		panic("get_pv_entry: cannot get a pv_entry_t");
753	pv_freelist = tmp->pv_next;
754	bzero(tmp, sizeof *tmp);
755	return tmp;
756}
757
758/*
759 * init the pv_entry allocation system
760 */
761#define PVSPERPAGE 16
762void
763init_pv_entries(npg)
764	int npg;
765{
766	/*
767	 * allocate enough kvm space for PVSPERPAGE entries per page (lots)
768	 * kvm space is fairly cheap, be generous!!!  (the system can panic
769	 * if this is too small.)
770	 */
771	npvvapg = ((npg*PVSPERPAGE) * sizeof(struct pv_entry) + NBPG - 1)/NBPG;
772	pvva = kmem_alloc_pageable(kernel_map, npvvapg * NBPG);
773	/*
774	 * get the first batch of entries
775	 */
776	free_pv_entry(get_pv_entry());
777}
778
779static pt_entry_t *
780get_pt_entry(pmap)
781	pmap_t pmap;
782{
783	pt_entry_t *ptp;
784	/* are we current address space or kernel? */
785	if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
786		|| pmap == kernel_pmap)
787		ptp=PTmap;
788
789	/* otherwise, we are alternate address space */
790	else {
791		if (pmap->pm_pdir[PTDPTDI].pd_pfnum
792			!= APTDpde.pd_pfnum) {
793			APTDpde = pmap->pm_pdir[PTDPTDI];
794			tlbflush();
795		}
796		ptp=APTmap;
797	     }
798	return ptp;
799}
800
801/*
802 * If it is the first entry on the list, it is actually
803 * in the header and we must copy the following entry up
804 * to the header.  Otherwise we must search the list for
805 * the entry.  In either case we free the now unused entry.
806 */
807void
808pmap_remove_entry(pmap, pv, va)
809	struct pmap *pmap;
810	pv_entry_t pv;
811	vm_offset_t va;
812{
813	pv_entry_t npv;
814	int s;
815	int wired;
816	s = splhigh();
817	if (pmap == pv->pv_pmap && va == pv->pv_va) {
818		npv = pv->pv_next;
819		if (npv) {
820			*pv = *npv;
821			free_pv_entry(npv);
822		} else {
823			pv->pv_pmap = NULL;
824		}
825	} else {
826		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
827			if (pmap == npv->pv_pmap && va == npv->pv_va) {
828				break;
829			}
830			pv = npv;
831		}
832		if (npv) {
833			pv->pv_next = npv->pv_next;
834			free_pv_entry(npv);
835		}
836	}
837	splx(s);
838}
839
840/*
841 *	Remove the given range of addresses from the specified map.
842 *
843 *	It is assumed that the start and end are properly
844 *	rounded to the page size.
845 */
846void
847pmap_remove(pmap, sva, eva)
848	struct pmap *pmap;
849	register vm_offset_t sva;
850	register vm_offset_t eva;
851{
852	register pt_entry_t *ptp,*ptq;
853	vm_offset_t pa;
854	register pv_entry_t pv;
855	int s;
856	vm_offset_t asva;
857
858	if (pmap == NULL)
859		return;
860
861	s = splbio();
862
863	ptp = get_pt_entry(pmap);
864
865
866	/* this is essential since we must check the PDE(sva) for precense */
867	while (sva <= eva && !pmap_pde_v(pmap_pde(pmap, sva)))
868		sva = (sva & PD_MASK) + (1<<PD_SHIFT);
869	sva = i386_btop(sva);
870	eva = i386_btop(eva);
871
872	for (; sva < eva; sva++) {
873		/*
874		 * Weed out invalid mappings.
875		 * Note: we assume that the page directory table is
876	 	 * always allocated, and in kernel virtual.
877		 */
878
879		if (!pmap_pde_v(pmap_pde(pmap, i386_ptob(sva))))
880			{
881			/* We can race ahead here, straight to next pde.. */
882			sva = sva & ~((NBPG/PTESIZE) - 1);
883			sva = sva + NBPG/PTESIZE - 1;
884			continue;
885			}
886
887		ptq=ptp+sva;
888
889
890		/*
891		 * search for page table entries
892		 */
893		if (!pmap_pte_v(ptq)) {
894			vm_offset_t nscan = ((sva + (NBPG/PTESIZE)) & ~((NBPG/PTESIZE) - 1)) - sva;
895			if ((nscan + sva) > eva)
896				nscan = eva - sva;
897			if (nscan) {
898				int found;
899
900				asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;"
901					:"=D"(ptq),"=a"(found)
902					:"c"(nscan),"0"(ptq)
903					:"cx");
904
905				if (found)
906					ptq -= 1;
907
908				sva = ptq - ptp;
909			}
910			if (sva >= eva)
911				goto endofloop;
912		}
913
914
915		if (!(sva & 0x3ff)) /* Only check once in a while */
916 		    {
917		    	if (!pmap_pde_v(pmap_pde(pmap, i386_ptob(sva)))) {
918			/* We can race ahead here, straight to next pde.. */
919					sva = sva & ~((NBPG/PTESIZE) - 1);
920					sva = sva + NBPG/PTESIZE - 1;
921					continue;
922				}
923		    }
924
925		if (!pmap_pte_v(ptq))
926			continue;
927
928		/*
929		 * Update statistics
930		 */
931		if (pmap_pte_w(ptq))
932			pmap->pm_stats.wired_count--;
933		pmap->pm_stats.resident_count--;
934
935		pa = pmap_pte_pa(ptq);
936
937
938		/*
939		 * Invalidate the PTEs.
940		 * XXX: should cluster them up and invalidate as many
941		 * as possible at once.
942		 */
943		*(int *)ptq = 0;
944
945		/*
946		 * Remove from the PV table (raise IPL since we
947		 * may be called at interrupt time).
948		 */
949		if (!pmap_is_managed(pa))
950			continue;
951		pv = pa_to_pvh(pa);
952		asva = i386_ptob(sva);
953		pmap_remove_entry(pmap, pv, asva);
954		pmap_use_pt(pmap, asva, 0);
955	}
956endofloop:
957	tlbflush();
958	splx(s);
959}
960
961/*
962 *	Routine:	pmap_remove_all
963 *	Function:
964 *		Removes this physical page from
965 *		all physical maps in which it resides.
966 *		Reflects back modify bits to the pager.
967 */
968void
969pmap_remove_all(pa)
970	vm_offset_t pa;
971{
972	register pv_entry_t pv, npv;
973	register pt_entry_t *pte, *ptp;
974	vm_offset_t va;
975	struct pmap *pmap;
976	struct map *map;
977	int s;
978
979	/*
980	 * Not one of ours
981	 */
982	if (!pmap_is_managed(pa))
983		return;
984
985	pa = i386_trunc_page(pa);
986	pv = pa_to_pvh(pa);
987	while (pv->pv_pmap != NULL) {
988		s = splhigh();
989		pmap = pv->pv_pmap;
990		ptp = get_pt_entry(pmap);
991		va = i386_btop(pv->pv_va);
992		pte = ptp + va;
993		if (pmap_pte_w(pte))
994			pmap->pm_stats.wired_count--;
995		if (pmap_pte_v(pte))
996			pmap->pm_stats.resident_count--;
997
998
999		*(int *)pte = 0;
1000		pmap_use_pt(pmap, pv->pv_va, 0);
1001
1002		npv = pv->pv_next;
1003		if (npv) {
1004			*pv = *npv;
1005			free_pv_entry(npv);
1006		} else {
1007			pv->pv_pmap = NULL;
1008		}
1009
1010		splx(s);
1011	}
1012
1013	tlbflush();
1014}
1015
1016
1017/*
1018 *	Set the physical protection on the
1019 *	specified range of this map as requested.
1020 */
1021void
1022pmap_protect(pmap, sva, eva, prot)
1023	register pmap_t	pmap;
1024	vm_offset_t	sva, eva;
1025	vm_prot_t	prot;
1026{
1027	register pt_entry_t *pte;
1028	register vm_offset_t va;
1029	int i386prot;
1030	register pt_entry_t *ptp;
1031	int reqactivate = 0;
1032	int evap = i386_btop(eva);
1033	int s;
1034
1035	if (pmap == NULL)
1036		return;
1037
1038	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1039		pmap_remove(pmap, sva, eva);
1040		return;
1041	}
1042	if (prot & VM_PROT_WRITE)
1043		return;
1044
1045	s = splbio();
1046	ptp = get_pt_entry(pmap);
1047
1048	for (va = sva; va < eva; va += PAGE_SIZE) {
1049		/*
1050		 * Page table page is not allocated.
1051		 * Skip it, we don't want to force allocation
1052		 * of unnecessary PTE pages just to set the protection.
1053		 */
1054		if (!pmap_pde_v(pmap_pde(pmap, va))) {
1055			/* XXX: avoid address wrap around */
1056			if (va >= i386_trunc_pdr((vm_offset_t)-1))
1057				break;
1058			va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE;
1059			continue;
1060		}
1061
1062		pte = ptp + i386_btop(va);
1063
1064		/*
1065		 * scan for a non-empty pte
1066		 */
1067		{
1068			int found=0;
1069			int svap = pte - ptp;
1070			vm_offset_t nscan =
1071				((svap + (NBPG/PTESIZE)) & ~((NBPG/PTESIZE) - 1)) - svap;
1072			if (nscan + svap > evap)
1073				nscan = evap - svap;
1074			if (nscan) {
1075				asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;"
1076					:"=D"(pte),"=a"(found)
1077					:"c"(nscan),"0"(pte):"cx");
1078
1079				pte -= 1;
1080				svap = pte - ptp;
1081
1082			}
1083			if (svap >= evap)
1084				goto endofloop;
1085			va = i386_ptob(svap);
1086			if (!found)
1087				continue;
1088		}
1089
1090
1091		/*
1092		 * Page not valid.  Again, skip it.
1093		 * Should we do this?  Or set protection anyway?
1094		 */
1095		if (!pmap_pte_v(pte))
1096			continue;
1097
1098		i386prot = pte_prot(pmap, prot);
1099		if (va < UPT_MAX_ADDRESS)
1100			i386prot |= PG_RW /*PG_u*/;
1101		if (i386prot != pte->pg_prot) {
1102			reqactivate = 1;
1103			pmap_pte_set_prot(pte, i386prot);
1104		}
1105	}
1106endofloop:
1107	tlbflush();
1108	splx(s);
1109}
1110
1111/*
1112 *	Insert the given physical page (p) at
1113 *	the specified virtual address (v) in the
1114 *	target physical map with the protection requested.
1115 *
1116 *	If specified, the page will be wired down, meaning
1117 *	that the related pte can not be reclaimed.
1118 *
1119 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1120 *	or lose information.  That is, this routine must actually
1121 *	insert this page into the given map NOW.
1122 */
1123void
1124pmap_enter(pmap, va, pa, prot, wired)
1125	register pmap_t pmap;
1126	vm_offset_t va;
1127	register vm_offset_t pa;
1128	vm_prot_t prot;
1129	boolean_t wired;
1130{
1131	register pt_entry_t *pte;
1132	register int npte;
1133	vm_offset_t opa;
1134	boolean_t cacheable = TRUE;
1135	boolean_t checkpv = TRUE;
1136	int s,s1;
1137
1138	if (pmap == NULL)
1139		return;
1140
1141	va = trunc_page(va);
1142	pa = trunc_page(pa);
1143	if (va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
1144
1145	/*
1146	 * Page Directory table entry not valid, we need a new PT page
1147	 */
1148	if (!pmap_pde_v(pmap_pde(pmap, va))) {
1149		pg("ptdi %x, va %x", pmap->pm_pdir[PTDPTDI], va);
1150	}
1151
1152	s1 = splbio();
1153	pte = pmap_pte(pmap, va);
1154	opa = pmap_pte_pa(pte);
1155
1156	/*
1157	 * Mapping has not changed, must be protection or wiring change.
1158	 */
1159	if (opa == pa) {
1160		/*
1161		 * Wiring change, just update stats.
1162		 * We don't worry about wiring PT pages as they remain
1163		 * resident as long as there are valid mappings in them.
1164		 * Hence, if a user page is wired, the PT page will be also.
1165		 */
1166		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1167			if (wired)
1168				pmap->pm_stats.wired_count++;
1169			else
1170				pmap->pm_stats.wired_count--;
1171		}
1172		goto validate;
1173	}
1174
1175	/*
1176	 * Mapping has changed, invalidate old range and fall through to
1177	 * handle validating new mapping.
1178	 */
1179	if (opa) {
1180		pmap_remove(pmap, va, va + PAGE_SIZE);
1181	}
1182
1183	/*
1184	 * Enter on the PV list if part of our managed memory
1185	 * Note that we raise IPL while manipulating pv_table
1186	 * since pmap_enter can be called at interrupt time.
1187	 */
1188	if (pmap_is_managed(pa)) {
1189		register pv_entry_t pv, npv;
1190		int s;
1191
1192		pv = pa_to_pvh(pa);
1193		s = splhigh();
1194		/*
1195		 * No entries yet, use header as the first entry
1196		 */
1197		if (pv->pv_pmap == NULL) {
1198			pv->pv_va = va;
1199			pv->pv_pmap = pmap;
1200			pv->pv_next = NULL;
1201		}
1202		/*
1203		 * There is at least one other VA mapping this page.
1204		 * Place this entry after the header.
1205		 */
1206		else {
1207			npv = get_pv_entry();
1208			npv->pv_va = va;
1209			npv->pv_pmap = pmap;
1210			npv->pv_next = pv->pv_next;
1211			pv->pv_next = npv;
1212		}
1213		splx(s);
1214	}
1215
1216	pmap_use_pt(pmap, va, 1);
1217
1218	/*
1219	 * Assumption: if it is not part of our managed memory
1220	 * then it must be device memory which may be volitile.
1221	 */
1222	if (pmap_initialized) {
1223		checkpv = cacheable = FALSE;
1224	}
1225
1226	/*
1227	 * Increment counters
1228	 */
1229	pmap->pm_stats.resident_count++;
1230	if (wired)
1231		pmap->pm_stats.wired_count++;
1232
1233validate:
1234	/*
1235	 * Now validate mapping with desired protection/wiring.
1236	 */
1237	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1238
1239	/*
1240	 * When forking (copy-on-write, etc):
1241	 * A process will turn off write permissions for any of its writable
1242	 * pages.  If the data (object) is only referred to by one process, the
1243	 * processes map is modified directly as opposed to using the
1244	 * object manipulation routine.  When using pmap_protect, the
1245	 * modified bits are not kept in the vm_page_t data structure.
1246	 * Therefore, when using pmap_enter in vm_fault to bring back
1247	 * writability of a page, there has been no memory of the
1248	 * modified or referenced bits except at the pte level.
1249	 * this clause supports the carryover of the modified and
1250	 * used (referenced) bits.
1251	 */
1252	if (pa == opa)
1253		npte |= *(int *)pte & (PG_M|PG_U);
1254
1255	if (wired)
1256		npte |= PG_W;
1257	if (va < UPT_MIN_ADDRESS)
1258		npte |= PG_u;
1259	else if (va < UPT_MAX_ADDRESS)
1260		npte |= PG_u | PG_RW;
1261
1262	if (npte != *(int *)pte) {
1263		*(int *)pte = npte;
1264		tlbflush();
1265	}
1266	splx(s1);
1267}
1268
1269/*
1270 *      pmap_page_protect:
1271 *
1272 *      Lower the permission for all mappings to a given page.
1273 */
1274void
1275pmap_page_protect(phys, prot)
1276        vm_offset_t     phys;
1277        vm_prot_t       prot;
1278{
1279	void pmap_copy_on_write();
1280        switch (prot) {
1281        case VM_PROT_READ:
1282        case VM_PROT_READ|VM_PROT_EXECUTE:
1283                pmap_copy_on_write(phys);
1284                break;
1285        case VM_PROT_ALL:
1286                break;
1287        default:
1288                pmap_remove_all(phys);
1289                break;
1290        }
1291}
1292
1293/*
1294 *	Routine:	pmap_change_wiring
1295 *	Function:	Change the wiring attribute for a map/virtual-address
1296 *			pair.
1297 *	In/out conditions:
1298 *			The mapping must already exist in the pmap.
1299 */
1300void
1301pmap_change_wiring(pmap, va, wired)
1302	register pmap_t	pmap;
1303	vm_offset_t	va;
1304	boolean_t	wired;
1305{
1306	register pt_entry_t *pte;
1307	int s;
1308
1309	if (pmap == NULL)
1310		return;
1311
1312	s = splbio();
1313	pte = pmap_pte(pmap, va);
1314	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1315		if (wired)
1316			pmap->pm_stats.wired_count++;
1317		else
1318			pmap->pm_stats.wired_count--;
1319	}
1320	/*
1321	 * Wiring is not a hardware characteristic so there is no need
1322	 * to invalidate TLB.
1323	 */
1324	pmap_pte_set_w(pte, wired);
1325	/*
1326 	 * When unwiring, set the modified bit in the pte -- could have
1327	 * been changed by the kernel
1328 	 */
1329	if (!wired)
1330		pmap_pte_m(pte) = 1;
1331	splx(s);
1332}
1333
1334
1335
1336/*
1337 *	Copy the range specified by src_addr/len
1338 *	from the source map to the range dst_addr/len
1339 *	in the destination map.
1340 *
1341 *	This routine is only advisory and need not do anything.
1342 */
1343void
1344pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1345	pmap_t		dst_pmap;
1346	pmap_t		src_pmap;
1347	vm_offset_t	dst_addr;
1348	vm_size_t	len;
1349	vm_offset_t	src_addr;
1350{
1351}
1352
1353/*
1354 *	Require that all active physical maps contain no
1355 *	incorrect entries NOW.  [This update includes
1356 *	forcing updates of any address map caching.]
1357 *
1358 *	Generally used to insure that a thread about
1359 *	to run will see a semantically correct world.
1360 */
1361void
1362pmap_update()
1363{
1364	tlbflush();
1365}
1366
1367
1368void
1369pmap_collect(pmap)
1370	pmap_t		pmap;
1371{
1372}
1373
1374/*
1375 *	Routine:	pmap_kernel
1376 *	Function:
1377 *		Returns the physical map handle for the kernel.
1378 */
1379pmap_t
1380pmap_kernel()
1381{
1382    	return (kernel_pmap);
1383}
1384
1385/*
1386 *	pmap_zero_page zeros the specified (machine independent)
1387 *	page by mapping the page into virtual memory and using
1388 *	bzero to clear its contents, one machine dependent page
1389 *	at a time.
1390 */
1391void
1392pmap_zero_page(phys)
1393	vm_offset_t phys;
1394{
1395	phys >>= PG_SHIFT;
1396	clearseg(phys);
1397}
1398
1399/*
1400 *	pmap_copy_page copies the specified (machine independent)
1401 *	page by mapping the page into virtual memory and using
1402 *	bcopy to copy the page, one machine dependent page at a
1403 *	time.
1404 */
1405void
1406pmap_copy_page(src, dst)
1407	vm_offset_t src;
1408	vm_offset_t dst;
1409{
1410
1411	src >>= PG_SHIFT;
1412	dst >>= PG_SHIFT;
1413
1414	*(int *)CMAP1 = PG_V | PG_KW | ctob(src);
1415	*(int *)CMAP2 = PG_V | PG_KW | ctob(dst);
1416	load_cr3(rcr3());
1417#if __GNUC__ > 1
1418	memcpy(CADDR2, CADDR1, NBPG);
1419#else
1420	bcopy(CADDR1, CADDR2, NBPG);
1421#endif
1422}
1423
1424
1425/*
1426 *	Routine:	pmap_pageable
1427 *	Function:
1428 *		Make the specified pages (by pmap, offset)
1429 *		pageable (or not) as requested.
1430 *
1431 *		A page which is not pageable may not take
1432 *		a fault; therefore, its page table entry
1433 *		must remain valid for the duration.
1434 *
1435 *		This routine is merely advisory; pmap_enter
1436 *		will specify that these pages are to be wired
1437 *		down (or not) as appropriate.
1438 */
1439void
1440pmap_pageable(pmap, sva, eva, pageable)
1441	pmap_t		pmap;
1442	vm_offset_t	sva, eva;
1443	boolean_t	pageable;
1444{
1445}
1446
1447boolean_t
1448pmap_page_exists(pmap, pa)
1449	pmap_t pmap;
1450	vm_offset_t pa;
1451{
1452	register pv_entry_t pv;
1453	register int *pte;
1454	int s;
1455
1456	if (!pmap_is_managed(pa))
1457		return FALSE;
1458
1459	pv = pa_to_pvh(pa);
1460	s = splhigh();
1461
1462	/*
1463	 * Not found, check current mappings returning
1464	 * immediately if found.
1465	 */
1466	if (pv->pv_pmap != NULL) {
1467		for (; pv; pv = pv->pv_next) {
1468			if (pv->pv_pmap == pmap) {
1469				splx(s);
1470				return TRUE;
1471			}
1472		}
1473	}
1474	splx(s);
1475	return(FALSE);
1476}
1477
1478inline boolean_t
1479pmap_testbit(pa, bit)
1480	register vm_offset_t pa;
1481	int bit;
1482{
1483	register pv_entry_t pv;
1484	register int *pte;
1485	int s;
1486
1487	if (!pmap_is_managed(pa))
1488		return FALSE;
1489
1490	pv = pa_to_pvh(pa);
1491	s = splhigh();
1492
1493	/*
1494	 * Not found, check current mappings returning
1495	 * immediately if found.
1496	 */
1497	if (pv->pv_pmap != NULL) {
1498		for (; pv; pv = pv->pv_next) {
1499			if (bit & PG_M ) {
1500				if (pv->pv_va >= USRSTACK) {
1501					if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
1502						splx(s);
1503						return TRUE;
1504					}
1505					else if (pv->pv_va < UPT_MAX_ADDRESS) {
1506						splx(s);
1507						return FALSE;
1508					}
1509				}
1510			}
1511			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1512			if (*pte & bit) {
1513				splx(s);
1514				return TRUE;
1515			}
1516		}
1517	}
1518	splx(s);
1519	return(FALSE);
1520}
1521
1522inline void
1523pmap_changebit(pa, bit, setem)
1524	vm_offset_t pa;
1525	int bit;
1526	boolean_t setem;
1527{
1528	register pv_entry_t pv;
1529	register int *pte, npte;
1530	vm_offset_t va;
1531	int s;
1532	int reqactivate = 0;
1533
1534	if (!pmap_is_managed(pa))
1535		return;
1536
1537	pv = pa_to_pvh(pa);
1538	s = splhigh();
1539
1540	/*
1541	 * Loop over all current mappings setting/clearing as appropos
1542	 * If setting RO do we need to clear the VAC?
1543	 */
1544	if (pv->pv_pmap != NULL) {
1545		for (; pv; pv = pv->pv_next) {
1546			va = pv->pv_va;
1547
1548                        /*
1549                         * XXX don't write protect pager mappings
1550                         */
1551                        if (bit == PG_RO) {
1552                                extern vm_offset_t pager_sva, pager_eva;
1553
1554                                if (va >= pager_sva && va < pager_eva)
1555                                        continue;
1556                        }
1557
1558			pte = (int *) pmap_pte(pv->pv_pmap, va);
1559			if (setem)
1560				npte = *pte | bit;
1561			else
1562				npte = *pte & ~bit;
1563			if (*pte != npte) {
1564				*pte = npte;
1565				tlbflush();
1566			}
1567		}
1568	}
1569	splx(s);
1570}
1571
1572/*
1573 *	Set the modify bit in a page (USED TO FORCE UPAGES MODIFIED)
1574 */
1575void
1576pmap_set_modify(pa)
1577	vm_offset_t	pa;
1578{
1579	pmap_changebit(pa, PG_M, TRUE);
1580}
1581
1582/*
1583 *	Clear the modify bits on the specified physical page.
1584 */
1585
1586void
1587pmap_clear_modify(pa)
1588	vm_offset_t	pa;
1589{
1590	pmap_changebit(pa, PG_M, FALSE);
1591}
1592
1593/*
1594 *	pmap_clear_reference:
1595 *
1596 *	Clear the reference bit on the specified physical page.
1597 */
1598
1599void
1600pmap_clear_reference(pa)
1601	vm_offset_t	pa;
1602{
1603	pmap_changebit(pa, PG_U, FALSE);
1604}
1605
1606/*
1607 *	pmap_clear_cached_attributes
1608 *
1609 *	Clear the cached attributes for a specified physical page
1610 *
1611 */
1612
1613void
1614pmap_clear_cached_attributes(pa)
1615	vm_offset_t	pa;
1616{
1617}
1618
1619/*
1620 *	pmap_is_referenced:
1621 *
1622 *	Return whether or not the specified physical page is referenced
1623 *	by any physical maps.
1624 */
1625
1626boolean_t
1627pmap_is_referenced(pa)
1628	vm_offset_t	pa;
1629{
1630	return(pmap_testbit(pa, PG_U));
1631}
1632
1633/*
1634 *	pmap_is_modified:
1635 *
1636 *	Return whether or not the specified physical page is modified
1637 *	by any physical maps.
1638 */
1639
1640boolean_t
1641pmap_is_modified(pa)
1642	vm_offset_t	pa;
1643{
1644
1645	return(pmap_testbit(pa, PG_M));
1646}
1647
1648/*
1649 *	Routine:	pmap_copy_on_write
1650 *	Function:
1651 *		Remove write privileges from all
1652 *		physical maps for this physical page.
1653 */
1654void
1655pmap_copy_on_write(pa)
1656	vm_offset_t pa;
1657{
1658	pmap_changebit(pa, PG_RO, TRUE);
1659}
1660
1661
1662vm_offset_t
1663pmap_phys_address(ppn)
1664	int ppn;
1665{
1666	return(i386_ptob(ppn));
1667}
1668
1669/*
1670 * Miscellaneous support routines follow
1671 */
1672
1673void
1674i386_protection_init()
1675{
1676	register int *kp, prot;
1677
1678	kp = protection_codes;
1679	for (prot = 0; prot < 8; prot++) {
1680		switch (prot) {
1681		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1682			*kp++ = 0;
1683			break;
1684		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1685		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1686		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1687			*kp++ = PG_RO;
1688			break;
1689		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1690		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1691		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1692		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1693			*kp++ = PG_RW;
1694			break;
1695		}
1696	}
1697}
1698
1699#ifdef DEBUG
1700void
1701pmap_pvdump(pa)
1702	vm_offset_t pa;
1703{
1704	register pv_entry_t pv;
1705
1706	printf("pa %x", pa);
1707	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1708		printf(" -> pmap %x, va %x, flags %x",
1709		       pv->pv_pmap, pv->pv_va, pv->pv_flags);
1710		pads(pv->pv_pmap);
1711	}
1712	printf(" ");
1713}
1714
1715#ifdef notyet
1716void
1717pmap_check_wiring(str, va)
1718	char *str;
1719	vm_offset_t va;
1720{
1721	vm_map_entry_t entry;
1722	register int count, *pte;
1723
1724	va = trunc_page(va);
1725	if (!pmap_pde_v(pmap_pde(kernel_pmap, va)) ||
1726	    !pmap_pte_v(pmap_pte(kernel_pmap, va)))
1727		return;
1728
1729	if (!vm_map_lookup_entry(pt_map, va, &entry)) {
1730		pg("wired_check: entry for %x not found\n", va);
1731		return;
1732	}
1733	count = 0;
1734	for (pte = (int *)va; pte < (int *)(va+PAGE_SIZE); pte++)
1735		if (*pte)
1736			count++;
1737	if (entry->wired_count != count)
1738		pg("*%s*: %x: w%d/a%d\n",
1739		       str, va, entry->wired_count, count);
1740}
1741#endif
1742
1743/* print address space of pmap*/
1744void
1745pads(pm)
1746	pmap_t pm;
1747{
1748	unsigned va, i, j;
1749	struct pte *ptep;
1750
1751	if (pm == kernel_pmap) return;
1752	for (i = 0; i < 1024; i++)
1753		if (pm->pm_pdir[i].pd_v)
1754			for (j = 0; j < 1024 ; j++) {
1755				va = (i<<PD_SHIFT)+(j<<PG_SHIFT);
1756				if (pm == kernel_pmap && va < KERNBASE)
1757						continue;
1758				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
1759						continue;
1760				ptep = pmap_pte(pm, va);
1761				if (pmap_pte_v(ptep))
1762					printf("%x:%x ", va, *(int *)ptep);
1763			} ;
1764
1765}
1766#endif
1767