pmap.c revision 1124
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *	This product includes software developed by the University of
20 *	California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
38 *	$Id: pmap.c,v 1.16 1994/01/31 23:47:27 davidg Exp $
39 */
40
41/*
42 * Derived from hp300 version by Mike Hibler, this version by William
43 * Jolitz uses a recursive map [a pde points to the page directory] to
44 * map the page tables using the pagetables themselves. This is done to
45 * reduce the impact on kernel virtual memory for lots of sparse address
46 * space, and to reduce the cost of memory to each process.
47 *
48 *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90
49 */
50/*
51 * Major modifications by John S. Dyson primarily to support
52 * pageable page tables, eliminating pmap_attributes,
53 * discontiguous memory pages, and using more efficient string
54 * instructions. Jan 13, 1994.
55 */
56
57/*
58 *	Manages physical address maps.
59 *
60 *	In addition to hardware address maps, this
61 *	module is called upon to provide software-use-only
62 *	maps which may or may not be stored in the same
63 *	form as hardware maps.  These pseudo-maps are
64 *	used to store intermediate results from copy
65 *	operations to and from address spaces.
66 *
67 *	Since the information managed by this module is
68 *	also stored by the logical address mapping module,
69 *	this module may throw away valid virtual-to-physical
70 *	mappings at almost any time.  However, invalidations
71 *	of virtual-to-physical mappings must be done as
72 *	requested.
73 *
74 *	In order to cope with hardware architectures which
75 *	make virtual-to-physical map invalidates expensive,
76 *	this module may delay invalidate or reduced protection
77 *	operations until such time as they are actually
78 *	necessary.  This module is given full information as
79 *	to which processors are currently using which maps,
80 *	and to when physical maps must be made correct.
81 */
82
83#include "param.h"
84#include "systm.h"
85#include "proc.h"
86#include "malloc.h"
87#include "user.h"
88#include "i386/include/cpufunc.h"
89
90#include "vm/vm.h"
91#include "vm/vm_kern.h"
92#include "vm/vm_page.h"
93
94#include "i386/isa/isa.h"
95
96/*
97 * Allocate various and sundry SYSMAPs used in the days of old VM
98 * and not yet converted.  XXX.
99 */
100#define BSDVM_COMPAT	1
101
102#ifdef DEBUG
103struct {
104	int kernel;	/* entering kernel mapping */
105	int user;	/* entering user mapping */
106	int ptpneeded;	/* needed to allocate a PT page */
107	int pwchange;	/* no mapping change, just wiring or protection */
108	int wchange;	/* no mapping change, just wiring */
109	int mchange;	/* was mapped but mapping to different page */
110	int managed;	/* a managed page */
111	int firstpv;	/* first mapping for this PA */
112	int secondpv;	/* second mapping for this PA */
113	int ci;		/* cache inhibited */
114	int unmanaged;	/* not a managed page */
115	int flushes;	/* cache flushes */
116} enter_stats;
117struct {
118	int calls;
119	int removes;
120	int pvfirst;
121	int pvsearch;
122	int ptinvalid;
123	int uflushes;
124	int sflushes;
125} remove_stats;
126
127int debugmap = 0;
128int pmapdebug = 0 /* 0xffff */;
129#define PDB_FOLLOW	0x0001
130#define PDB_INIT	0x0002
131#define PDB_ENTER	0x0004
132#define PDB_REMOVE	0x0008
133#define PDB_CREATE	0x0010
134#define PDB_PTPAGE	0x0020
135#define PDB_CACHE	0x0040
136#define PDB_BITS	0x0080
137#define PDB_COLLECT	0x0100
138#define PDB_PROTECT	0x0200
139#define PDB_PDRTAB	0x0400
140#define PDB_PARANOIA	0x2000
141#define PDB_WIRING	0x4000
142#define PDB_PVDUMP	0x8000
143
144int pmapvacflush = 0;
145#define	PVF_ENTER	0x01
146#define	PVF_REMOVE	0x02
147#define	PVF_PROTECT	0x04
148#define	PVF_TOTAL	0x80
149#endif
150
151/*
152 * Get PDEs and PTEs for user/kernel address space
153 */
154#define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PD_SHIFT)&1023]))
155#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PD_SHIFT)&1023])
156
157#define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
158
159#define pmap_pde_v(pte)		((pte)->pd_v)
160#define pmap_pte_w(pte)		((pte)->pg_w)
161/* #define pmap_pte_ci(pte)	((pte)->pg_ci) */
162#define pmap_pte_m(pte)		((pte)->pg_m)
163#define pmap_pte_u(pte)		((pte)->pg_u)
164#define pmap_pte_v(pte)		((pte)->pg_v)
165#define pmap_pte_set_w(pte, v)		((pte)->pg_w = (v))
166#define pmap_pte_set_prot(pte, v)	((pte)->pg_prot = (v))
167
168/*
169 * Given a map and a machine independent protection code,
170 * convert to a vax protection code.
171 */
172#define pte_prot(m, p)	(protection_codes[p])
173int	protection_codes[8];
174
175struct pmap	kernel_pmap_store;
176pmap_t		kernel_pmap;
177
178vm_offset_t	phys_avail[6];	/* 2 entries + 1 null */
179vm_offset_t    	avail_start;	/* PA of first available physical page */
180vm_offset_t	avail_end;	/* PA of last available physical page */
181vm_size_t	mem_size;	/* memory size in bytes */
182vm_offset_t	virtual_avail;  /* VA of first avail page (after kernel bss)*/
183vm_offset_t	virtual_end;	/* VA of last avail page (end of kernel AS) */
184int		i386pagesperpage;	/* PAGE_SIZE / I386_PAGE_SIZE */
185boolean_t	pmap_initialized = FALSE;	/* Has pmap_init completed? */
186vm_offset_t	vm_first_phys, vm_last_phys;
187
188static inline boolean_t pmap_testbit();
189static inline void pmap_changebit();
190static inline int pmap_is_managed();
191static inline void *vm_get_pmap();
192static inline void vm_put_pmap();
193static inline void pmap_use_pt();
194inline struct pte *pmap_pte();
195static inline pv_entry_t get_pv_entry();
196void pmap_alloc_pv_entry();
197void		pmap_clear_modify();
198void		i386_protection_init();
199
200#if BSDVM_COMPAT
201#include "msgbuf.h"
202
203/*
204 * All those kernel PT submaps that BSD is so fond of
205 */
206struct pte	*CMAP1, *CMAP2, *mmap;
207caddr_t		CADDR1, CADDR2, vmmap;
208struct pte	*msgbufmap;
209struct msgbuf	*msgbufp;
210#endif
211
212struct vm_map * pmap_fmap(pmap_t pmap) ;
213void init_pv_entries(int) ;
214
215/*
216 *	Routine:	pmap_pte
217 *	Function:
218 *		Extract the page table entry associated
219 *		with the given map/virtual_address pair.
220 * [ what about induced faults -wfj]
221 */
222
223inline struct pte *
224pmap_pte(pmap, va)
225	register pmap_t	pmap;
226	vm_offset_t va;
227{
228
229	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
230		/* are we current address space or kernel? */
231		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
232			|| pmap == kernel_pmap)
233			return ((struct pte *) vtopte(va));
234
235		/* otherwise, we are alternate address space */
236		else {
237			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
238				!= APTDpde.pd_pfnum) {
239				APTDpde = pmap->pm_pdir[PTDPTDI];
240				tlbflush();
241			}
242			return((struct pte *) avtopte(va));
243		}
244	}
245	return(0);
246}
247
248/*
249 *	Routine:	pmap_extract
250 *	Function:
251 *		Extract the physical page address associated
252 *		with the given map/virtual_address pair.
253 */
254
255vm_offset_t
256pmap_extract(pmap, va)
257	register pmap_t	pmap;
258	vm_offset_t va;
259{
260	struct pde save;
261	vm_offset_t pa;
262	int s;
263
264	s = splhigh();
265	if (pmap && pmap_pde_v(pmap_pde(pmap, va))) {
266		/* are we current address space or kernel? */
267		if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
268			|| pmap == kernel_pmap) {
269			pa = *(int *) vtopte(va);
270		/* otherwise, we are alternate address space */
271		} else {
272			if (pmap->pm_pdir[PTDPTDI].pd_pfnum
273				!= APTDpde.pd_pfnum) {
274				save = APTDpde;
275				APTDpde = pmap->pm_pdir[PTDPTDI];
276				tlbflush();
277				pa = *(int *) avtopte(va);
278				APTDpde = save;
279				tlbflush();
280			} else {
281				tlbflush();
282				pa = *(int *) avtopte(va);
283			}
284		}
285		pa = (pa & PG_FRAME) | (va & ~PG_FRAME);
286		splx(s);
287		return pa;
288	}
289	splx(s);
290	return 0;
291
292}
293
294static inline int
295pmap_is_managed(pa)
296	vm_offset_t pa;
297{
298	int i;
299
300	if (!pmap_initialized)
301		return 0;
302
303	for (i = 0; phys_avail[i + 1]; i += 2) {
304		if (pa >= phys_avail[i] && pa < phys_avail[i + 1])
305			return 1;
306	}
307	return 0;
308}
309
310/*
311 * increment/decrement pmap wiring count
312 */
313static inline void
314pmap_use_pt(pmap, va, use)
315	pmap_t pmap;
316	vm_offset_t va;
317	int use;
318{
319	vm_offset_t pt, pa;
320	pv_entry_t pv;
321	vm_page_t m;
322
323	if (va >= VM_MAX_ADDRESS)
324		return;
325
326	pt = i386_trunc_page(vtopte(va));
327	pa = pmap_extract(pmap, pt);
328	if (pa == 0) {
329		printf("Warning pmap_use_pt pte paging failure\n");
330	}
331	if (!pa || !pmap_is_managed(pa))
332		return;
333	pv = pa_to_pvh(pa);
334
335	m = PHYS_TO_VM_PAGE(pa);
336	if (use) {
337		vm_page_wire(m);
338	} else {
339		vm_page_unwire(m);
340	}
341}
342
343/* [ macro again?, should I force kstack into user map here? -wfj ] */
344void
345pmap_activate(pmap, pcbp)
346	register pmap_t pmap;
347	struct pcb *pcbp;
348{
349	PMAP_ACTIVATE(pmap, pcbp);
350}
351
352/*
353 *	Bootstrap the system enough to run with virtual memory.
354 *	Map the kernel's code and data, and allocate the system page table.
355 *
356 *	On the I386 this is called after mapping has already been enabled
357 *	and just syncs the pmap module with what has already been done.
358 *	[We can't call it easily with mapping off since the kernel is not
359 *	mapped with PA == VA, hence we would have to relocate every address
360 *	from the linked base (virtual) address "KERNBASE" to the actual
361 *	(physical) address starting relative to 0]
362 */
363
364#define DMAPAGES 8
365void
366pmap_bootstrap(firstaddr, loadaddr)
367	vm_offset_t firstaddr;
368	vm_offset_t loadaddr;
369{
370#if BSDVM_COMPAT
371	vm_offset_t va;
372	struct pte *pte;
373#endif
374	extern int IdlePTD;
375
376	avail_start = firstaddr + DMAPAGES*NBPG;
377
378	virtual_avail = (vm_offset_t) KERNBASE + avail_start;
379	virtual_end = VM_MAX_KERNEL_ADDRESS;
380	i386pagesperpage = PAGE_SIZE / NBPG;
381
382	/*
383	 * Initialize protection array.
384	 */
385	i386_protection_init();
386
387	/*
388	 * The kernel's pmap is statically allocated so we don't
389	 * have to use pmap_create, which is unlikely to work
390	 * correctly at this part of the boot sequence.
391	 */
392	kernel_pmap = &kernel_pmap_store;
393
394	kernel_pmap->pm_pdir = (pd_entry_t *)(KERNBASE + IdlePTD);
395
396	simple_lock_init(&kernel_pmap->pm_lock);
397	kernel_pmap->pm_count = 1;
398
399#if BSDVM_COMPAT
400	/*
401	 * Allocate all the submaps we need
402	 */
403#define	SYSMAP(c, p, v, n)	\
404	v = (c)va; va += ((n)*NBPG); p = pte; pte += (n);
405
406	va = virtual_avail;
407	pte = pmap_pte(kernel_pmap, va);
408
409	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
410	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
411	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
412	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
413	virtual_avail = va;
414#endif
415	/*
416	 * reserve special hunk of memory for use by bus dma as a bounce
417	 * buffer (contiguous virtual *and* physical memory). for now,
418	 * assume vm does not use memory beneath hole, and we know that
419	 * the bootstrap uses top 32k of base memory. -wfj
420	 */
421	{
422		extern vm_offset_t isaphysmem;
423		isaphysmem = va;
424
425		virtual_avail = pmap_map(va, firstaddr,
426				firstaddr + DMAPAGES*NBPG, VM_PROT_ALL);
427	}
428
429	*(int *)PTD = 0;
430	tlbflush();
431
432}
433
434/*
435 *	Initialize the pmap module.
436 *	Called by vm_init, to initialize any structures that the pmap
437 *	system needs to map virtual memory.
438 */
439void
440pmap_init(phys_start, phys_end)
441	vm_offset_t	phys_start, phys_end;
442{
443	vm_offset_t	addr, addr2;
444	vm_size_t	npg, s;
445	int		rv;
446	int i;
447	extern int KPTphys;
448	extern int IdlePTD;
449
450	/*
451	 * Now that kernel map has been allocated, we can mark as
452	 * unavailable regions which we have mapped in locore.
453	 */
454	addr = atdevbase;
455	(void) vm_map_find(kernel_map, NULL, (vm_offset_t) 0,
456			   &addr, (0x100000-0xa0000), FALSE);
457
458	addr = (vm_offset_t) KERNBASE + IdlePTD;
459	vm_object_reference(kernel_object);
460	(void) vm_map_find(kernel_map, kernel_object, addr,
461			   &addr, (4 + NKPT) * NBPG, FALSE);
462
463
464	/*
465	 * calculate the number of pv_entries needed
466	 */
467	vm_first_phys = phys_avail[0];
468	for (i = 0; phys_avail[i + 1]; i += 2) ;
469	npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / NBPG;
470
471	/*
472	 * Allocate memory for random pmap data structures.  Includes the
473	 * pv_head_table.
474	 */
475	s = (vm_size_t) (sizeof(struct pv_entry) * npg);
476	s = i386_round_page(s);
477	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
478	pv_table = (pv_entry_t) addr;
479
480	/*
481	 * init the pv free list
482	 */
483	init_pv_entries(npg);
484	/*
485	 * Now it is safe to enable pv_table recording.
486	 */
487	pmap_initialized = TRUE;
488}
489
490/*
491 *	Used to map a range of physical addresses into kernel
492 *	virtual address space.
493 *
494 *	For now, VM is already on, we only need to map the
495 *	specified memory.
496 */
497vm_offset_t
498pmap_map(virt, start, end, prot)
499	vm_offset_t	virt;
500	vm_offset_t	start;
501	vm_offset_t	end;
502	int		prot;
503{
504	while (start < end) {
505		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
506		virt += PAGE_SIZE;
507		start += PAGE_SIZE;
508	}
509	return(virt);
510}
511
512/*
513 *	Create and return a physical map.
514 *
515 *	If the size specified for the map
516 *	is zero, the map is an actual physical
517 *	map, and may be referenced by the
518 *	hardware.
519 *
520 *	If the size specified is non-zero,
521 *	the map will be used in software only, and
522 *	is bounded by that size.
523 *
524 * [ just allocate a ptd and mark it uninitialize -- should we track
525 *   with a table which process has which ptd? -wfj ]
526 */
527
528pmap_t
529pmap_create(size)
530	vm_size_t	size;
531{
532	register pmap_t pmap;
533
534	/*
535	 * Software use map does not need a pmap
536	 */
537	if (size)
538		return(NULL);
539
540	/* XXX: is it ok to wait here? */
541	pmap = (pmap_t) malloc(sizeof *pmap, M_VMPMAP, M_WAITOK);
542#ifdef notifwewait
543	if (pmap == NULL)
544		panic("pmap_create: cannot allocate a pmap");
545#endif
546	bzero(pmap, sizeof(*pmap));
547	pmap_pinit(pmap);
548	return (pmap);
549}
550
551
552struct pmaplist {
553	struct pmaplist *next;
554};
555
556static inline void *
557vm_get_pmap()
558{
559	struct pmaplist *rtval;
560
561	rtval = (struct pmaplist *)kmem_alloc(kernel_map, ctob(1));
562	bzero(rtval, ctob(1));
563	return rtval;
564}
565
566static inline void
567vm_put_pmap(up)
568	struct pmaplist *up;
569{
570	kmem_free(kernel_map, up, ctob(1));
571}
572
573/*
574 * Initialize a preallocated and zeroed pmap structure,
575 * such as one in a vmspace structure.
576 */
577void
578pmap_pinit(pmap)
579	register struct pmap *pmap;
580{
581	/*
582	 * No need to allocate page table space yet but we do need a
583	 * valid page directory table.
584	 */
585	pmap->pm_pdir = (pd_entry_t *) vm_get_pmap();
586
587	/* wire in kernel global address entries */
588	bcopy(PTD+KPTDI, pmap->pm_pdir+KPTDI, NKPT*PTESIZE);
589
590	/* install self-referential address mapping entry */
591	*(int *)(pmap->pm_pdir+PTDPTDI) =
592		((int)pmap_extract(kernel_pmap, (vm_offset_t)pmap->pm_pdir)) | PG_V | PG_KW;
593
594	pmap->pm_count = 1;
595	simple_lock_init(&pmap->pm_lock);
596}
597
598/*
599 *	Retire the given physical map from service.
600 *	Should only be called if the map contains
601 *	no valid mappings.
602 */
603void
604pmap_destroy(pmap)
605	register pmap_t pmap;
606{
607	int count;
608
609	if (pmap == NULL)
610		return;
611
612	simple_lock(&pmap->pm_lock);
613	count = --pmap->pm_count;
614	simple_unlock(&pmap->pm_lock);
615	if (count == 0) {
616		pmap_release(pmap);
617		free((caddr_t)pmap, M_VMPMAP);
618	}
619}
620
621/*
622 * Release any resources held by the given physical map.
623 * Called when a pmap initialized by pmap_pinit is being released.
624 * Should only be called if the map contains no valid mappings.
625 */
626void
627pmap_release(pmap)
628	register struct pmap *pmap;
629{
630	vm_put_pmap((struct pmaplist *) pmap->pm_pdir);
631}
632
633/*
634 *	Add a reference to the specified pmap.
635 */
636void
637pmap_reference(pmap)
638	pmap_t	pmap;
639{
640	if (pmap != NULL) {
641		simple_lock(&pmap->pm_lock);
642		pmap->pm_count++;
643		simple_unlock(&pmap->pm_lock);
644	}
645}
646
647#define PV_FREELIST_MIN ((NBPG / sizeof (struct pv_entry)) / 2)
648
649/*
650 * Data for the pv entry allocation mechanism
651 */
652int pv_freelistcnt;
653pv_entry_t pv_freelist;
654vm_offset_t pvva;
655int npvvapg;
656
657/*
658 * free the pv_entry back to the free list
659 */
660inline static void
661free_pv_entry(pv)
662	pv_entry_t pv;
663{
664	if (!pv) return;
665	++pv_freelistcnt;
666	pv->pv_next = pv_freelist;
667	pv_freelist = pv;
668}
669
670/*
671 * get a new pv_entry, allocating a block from the system
672 * when needed.
673 * the memory allocation is performed bypassing the malloc code
674 * because of the possibility of allocations at interrupt time.
675 */
676static inline pv_entry_t
677get_pv_entry()
678{
679	pv_entry_t tmp;
680
681	/*
682	 * get more pv_entry pages if needed
683	 */
684	while (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) {
685		pmap_alloc_pv_entry();
686	}
687
688	/*
689	 * get a pv_entry off of the free list
690	 */
691	--pv_freelistcnt;
692	tmp = pv_freelist;
693	pv_freelist = tmp->pv_next;
694	tmp->pv_pmap = 0;
695	tmp->pv_va = 0;
696	tmp->pv_next = 0;
697	return tmp;
698}
699
700void
701pmap_alloc_pv_entry()
702{
703	/*
704	 * do we have any pre-allocated map-pages left?
705	 */
706	if (npvvapg) {
707		vm_page_t m;
708		/*
709		 * we do this to keep recursion away
710		 */
711		pv_freelistcnt += PV_FREELIST_MIN;
712		/*
713		 * allocate a physical page out of the vm system
714		 */
715		if (m = vm_page_alloc(kernel_object, pvva-vm_map_min(kernel_map))) {
716			int newentries;
717			int i;
718			pv_entry_t entry;
719			newentries = (NBPG/sizeof (struct pv_entry));
720			/*
721			 * wire the page
722			 */
723			vm_page_wire(m);
724			m->flags &= ~PG_BUSY;
725			/*
726			 * let the kernel see it
727			 */
728			pmap_enter(vm_map_pmap(kernel_map), pvva,
729				VM_PAGE_TO_PHYS(m), VM_PROT_DEFAULT,1);
730
731			entry = (pv_entry_t) pvva;
732			/*
733			 * update the allocation pointers
734			 */
735			pvva += NBPG;
736			--npvvapg;
737
738			/*
739			 * free the entries into the free list
740			 */
741			for (i = 0; i < newentries; i++) {
742				free_pv_entry(entry);
743				entry++;
744			}
745		}
746		pv_freelistcnt -= PV_FREELIST_MIN;
747	}
748	if (!pv_freelist)
749		panic("get_pv_entry: cannot get a pv_entry_t");
750}
751
752
753
754/*
755 * init the pv_entry allocation system
756 */
757#define PVSPERPAGE 16
758void
759init_pv_entries(npg)
760	int npg;
761{
762	/*
763	 * allocate enough kvm space for PVSPERPAGE entries per page (lots)
764	 * kvm space is fairly cheap, be generous!!!  (the system can panic
765	 * if this is too small.)
766	 */
767	npvvapg = ((npg*PVSPERPAGE) * sizeof(struct pv_entry) + NBPG - 1)/NBPG;
768	pvva = kmem_alloc_pageable(kernel_map, npvvapg * NBPG);
769	/*
770	 * get the first batch of entries
771	 */
772	free_pv_entry(get_pv_entry());
773}
774
775static pt_entry_t *
776get_pt_entry(pmap)
777	pmap_t pmap;
778{
779	pt_entry_t *ptp;
780	/* are we current address space or kernel? */
781	if (pmap->pm_pdir[PTDPTDI].pd_pfnum == PTDpde.pd_pfnum
782		|| pmap == kernel_pmap)
783		ptp=PTmap;
784
785	/* otherwise, we are alternate address space */
786	else {
787		if (pmap->pm_pdir[PTDPTDI].pd_pfnum
788			!= APTDpde.pd_pfnum) {
789			APTDpde = pmap->pm_pdir[PTDPTDI];
790			tlbflush();
791		}
792		ptp=APTmap;
793	     }
794	return ptp;
795}
796
797/*
798 * If it is the first entry on the list, it is actually
799 * in the header and we must copy the following entry up
800 * to the header.  Otherwise we must search the list for
801 * the entry.  In either case we free the now unused entry.
802 */
803void
804pmap_remove_entry(pmap, pv, va)
805	struct pmap *pmap;
806	pv_entry_t pv;
807	vm_offset_t va;
808{
809	pv_entry_t npv;
810	int s;
811	int wired;
812	s = splhigh();
813	if (pmap == pv->pv_pmap && va == pv->pv_va) {
814		npv = pv->pv_next;
815		if (npv) {
816			*pv = *npv;
817			free_pv_entry(npv);
818		} else {
819			pv->pv_pmap = NULL;
820		}
821	} else {
822		for (npv = pv->pv_next; npv; npv = npv->pv_next) {
823			if (pmap == npv->pv_pmap && va == npv->pv_va) {
824				break;
825			}
826			pv = npv;
827		}
828		if (npv) {
829			pv->pv_next = npv->pv_next;
830			free_pv_entry(npv);
831		}
832	}
833	splx(s);
834}
835
836/*
837 *	Remove the given range of addresses from the specified map.
838 *
839 *	It is assumed that the start and end are properly
840 *	rounded to the page size.
841 */
842void
843pmap_remove(pmap, sva, eva)
844	struct pmap *pmap;
845	register vm_offset_t sva;
846	register vm_offset_t eva;
847{
848	register pt_entry_t *ptp,*ptq;
849	vm_offset_t pa;
850	register pv_entry_t pv;
851	vm_offset_t asva, va;
852	vm_page_t m;
853	int oldpte;
854
855	if (pmap == NULL)
856		return;
857
858	ptp = get_pt_entry(pmap);
859
860
861	/* this is essential since we must check the PDE(sva) for precense */
862	while (sva <= eva && !pmap_pde_v(pmap_pde(pmap, sva)))
863		sva = (sva & PD_MASK) + (1<<PD_SHIFT);
864	sva = i386_btop(sva);
865	eva = i386_btop(eva);
866
867	for (; sva < eva; sva++) {
868		/*
869		 * Weed out invalid mappings.
870		 * Note: we assume that the page directory table is
871	 	 * always allocated, and in kernel virtual.
872		 */
873
874		if (!pmap_pde_v(pmap_pde(pmap, i386_ptob(sva))))
875			{
876			/* We can race ahead here, straight to next pde.. */
877			sva = sva & ~((NBPG/PTESIZE) - 1);
878			sva = sva + NBPG/PTESIZE - 1;
879			continue;
880			}
881
882		ptq=ptp+sva;
883
884
885		/*
886		 * search for page table entries
887		 */
888		if (!pmap_pte_v(ptq)) {
889			vm_offset_t nscan = ((sva + (NBPG/PTESIZE)) & ~((NBPG/PTESIZE) - 1)) - sva;
890			if ((nscan + sva) > eva)
891				nscan = eva - sva;
892			if (nscan) {
893				int found;
894
895				asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;"
896					:"=D"(ptq),"=a"(found)
897					:"c"(nscan),"0"(ptq)
898					:"cx");
899
900				if (found)
901					ptq -= 1;
902
903				sva = ptq - ptp;
904			}
905			if (sva >= eva)
906				goto endofloop;
907		}
908
909
910		if (!(sva & 0x3ff)) /* Only check once in a while */
911 		    {
912		    	if (!pmap_pde_v(pmap_pde(pmap, i386_ptob(sva)))) {
913			/* We can race ahead here, straight to next pde.. */
914					sva = sva & ~((NBPG/PTESIZE) - 1);
915					sva = sva + NBPG/PTESIZE - 1;
916					continue;
917				}
918		    }
919
920		if (!pmap_pte_v(ptq))
921			continue;
922
923		/*
924		 * Update statistics
925		 */
926		if (pmap_pte_w(ptq))
927			pmap->pm_stats.wired_count--;
928		pmap->pm_stats.resident_count--;
929
930		pa = pmap_pte_pa(ptq);
931		oldpte = *(int *) ptq;
932
933		/*
934		 * Invalidate the PTEs.
935		 * XXX: should cluster them up and invalidate as many
936		 * as possible at once.
937		 */
938		*(int *)ptq = 0;
939
940		/*
941		 * Remove from the PV table (raise IPL since we
942		 * may be called at interrupt time).
943		 */
944		if (!pmap_is_managed(pa))
945			continue;
946
947		va = i386_ptob(sva);
948
949		if (((oldpte & PG_M) && (va < USRSTACK || va > UPT_MAX_ADDRESS))
950			|| (va >= USRSTACK && va < USRSTACK+(UPAGES*NBPG))) {
951			m = PHYS_TO_VM_PAGE(pa);
952			m->flags &= ~PG_CLEAN;
953		}
954
955		pv = pa_to_pvh(pa);
956		asva = i386_ptob(sva);
957		pmap_remove_entry(pmap, pv, asva);
958		pmap_use_pt(pmap, asva, 0);
959	}
960endofloop:
961	tlbflush();
962}
963
964/*
965 *	Routine:	pmap_remove_all
966 *	Function:
967 *		Removes this physical page from
968 *		all physical maps in which it resides.
969 *		Reflects back modify bits to the pager.
970 */
971void
972pmap_remove_all(pa)
973	vm_offset_t pa;
974{
975	register pv_entry_t pv, npv;
976	register pt_entry_t *pte, *ptp;
977	vm_offset_t va;
978	struct pmap *pmap;
979	struct map *map;
980	vm_page_t m;
981	int s;
982
983	/*
984	 * Not one of ours
985	 */
986	if (!pmap_is_managed(pa))
987		return;
988
989	pa = i386_trunc_page(pa);
990	pv = pa_to_pvh(pa);
991	m = PHYS_TO_VM_PAGE(pa);
992
993	while (pv->pv_pmap != NULL) {
994		s = splhigh();
995		pmap = pv->pv_pmap;
996		ptp = get_pt_entry(pmap);
997		va = i386_btop(pv->pv_va);
998		pte = ptp + va;
999		if (pmap_pte_w(pte))
1000			pmap->pm_stats.wired_count--;
1001		if (pmap_pte_v(pte))
1002			pmap->pm_stats.resident_count--;
1003
1004		if (((*(int *)pte & PG_M) && (pv->pv_va < USRSTACK || pv->pv_va > UPT_MAX_ADDRESS))
1005			|| (pv->pv_va >= USRSTACK && pv->pv_va < USRSTACK+(UPAGES*NBPG))) {
1006			m->flags &= ~PG_CLEAN;
1007		}
1008
1009		*(int *)pte = 0;
1010		pmap_use_pt(pmap, pv->pv_va, 0);
1011
1012		npv = pv->pv_next;
1013		if (npv) {
1014			*pv = *npv;
1015			free_pv_entry(npv);
1016		} else {
1017			pv->pv_pmap = NULL;
1018		}
1019
1020		splx(s);
1021	}
1022
1023	tlbflush();
1024}
1025
1026
1027/*
1028 *	Set the physical protection on the
1029 *	specified range of this map as requested.
1030 */
1031void
1032pmap_protect(pmap, sva, eva, prot)
1033	register pmap_t	pmap;
1034	vm_offset_t	sva, eva;
1035	vm_prot_t	prot;
1036{
1037	register pt_entry_t *pte;
1038	register vm_offset_t va;
1039	int i386prot;
1040	register pt_entry_t *ptp;
1041	int reqactivate = 0;
1042	int evap = i386_btop(eva);
1043	int s;
1044
1045	if (pmap == NULL)
1046		return;
1047
1048	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1049		pmap_remove(pmap, sva, eva);
1050		return;
1051	}
1052	if (prot & VM_PROT_WRITE)
1053		return;
1054
1055	ptp = get_pt_entry(pmap);
1056
1057	for (va = sva; va < eva; va += PAGE_SIZE) {
1058		/*
1059		 * Page table page is not allocated.
1060		 * Skip it, we don't want to force allocation
1061		 * of unnecessary PTE pages just to set the protection.
1062		 */
1063		if (!pmap_pde_v(pmap_pde(pmap, va))) {
1064			/* XXX: avoid address wrap around */
1065			if (va >= i386_trunc_pdr((vm_offset_t)-1))
1066				break;
1067			va = i386_round_pdr(va + PAGE_SIZE) - PAGE_SIZE;
1068			continue;
1069		}
1070
1071		pte = ptp + i386_btop(va);
1072
1073		/*
1074		 * scan for a non-empty pte
1075		 */
1076		{
1077			int found=0;
1078			int svap = pte - ptp;
1079			vm_offset_t nscan =
1080				((svap + (NBPG/PTESIZE)) & ~((NBPG/PTESIZE) - 1)) - svap;
1081			if (nscan + svap > evap)
1082				nscan = evap - svap;
1083			if (nscan) {
1084				asm("xorl %%eax,%%eax;cld;repe;scasl;jz 1f;incl %%eax;1:;"
1085					:"=D"(pte),"=a"(found)
1086					:"c"(nscan),"0"(pte):"cx");
1087
1088				pte -= 1;
1089				svap = pte - ptp;
1090
1091			}
1092			if (svap >= evap)
1093				goto endofloop;
1094			va = i386_ptob(svap);
1095			if (!found)
1096				continue;
1097		}
1098
1099
1100		/*
1101		 * Page not valid.  Again, skip it.
1102		 * Should we do this?  Or set protection anyway?
1103		 */
1104		if (!pmap_pte_v(pte))
1105			continue;
1106
1107		i386prot = pte_prot(pmap, prot);
1108		if (va < UPT_MAX_ADDRESS)
1109			i386prot |= PG_RW /*PG_u*/;
1110		if (i386prot != pte->pg_prot) {
1111			reqactivate = 1;
1112			pmap_pte_set_prot(pte, i386prot);
1113		}
1114	}
1115endofloop:
1116	tlbflush();
1117}
1118
1119/*
1120 *	Insert the given physical page (p) at
1121 *	the specified virtual address (v) in the
1122 *	target physical map with the protection requested.
1123 *
1124 *	If specified, the page will be wired down, meaning
1125 *	that the related pte can not be reclaimed.
1126 *
1127 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1128 *	or lose information.  That is, this routine must actually
1129 *	insert this page into the given map NOW.
1130 */
1131void
1132pmap_enter(pmap, va, pa, prot, wired)
1133	register pmap_t pmap;
1134	vm_offset_t va;
1135	register vm_offset_t pa;
1136	vm_prot_t prot;
1137	boolean_t wired;
1138{
1139	register pt_entry_t *pte;
1140	register int npte;
1141	vm_offset_t opa;
1142	boolean_t cacheable = TRUE;
1143	boolean_t checkpv = TRUE;
1144
1145	if (pmap == NULL)
1146		return;
1147
1148	va = i386_trunc_page(va);
1149	pa = i386_trunc_page(pa);
1150	if (va > VM_MAX_KERNEL_ADDRESS)panic("pmap_enter: toobig");
1151
1152	/*
1153	 * Page Directory table entry not valid, we need a new PT page
1154	 */
1155	if (!pmap_pde_v(pmap_pde(pmap, va))) {
1156		pg("ptdi %x, va %x", pmap->pm_pdir[PTDPTDI], va);
1157	}
1158
1159	pte = pmap_pte(pmap, va);
1160	opa = pmap_pte_pa(pte);
1161
1162	/*
1163	 * Mapping has not changed, must be protection or wiring change.
1164	 */
1165	if (opa == pa) {
1166		/*
1167		 * Wiring change, just update stats.
1168		 * We don't worry about wiring PT pages as they remain
1169		 * resident as long as there are valid mappings in them.
1170		 * Hence, if a user page is wired, the PT page will be also.
1171		 */
1172		if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1173			if (wired)
1174				pmap->pm_stats.wired_count++;
1175			else
1176				pmap->pm_stats.wired_count--;
1177		}
1178		goto validate;
1179	}
1180
1181	/*
1182	 * Mapping has changed, invalidate old range and fall through to
1183	 * handle validating new mapping.
1184	 */
1185	if (opa) {
1186		pmap_remove(pmap, va, va + PAGE_SIZE);
1187	}
1188
1189	/*
1190	 * Enter on the PV list if part of our managed memory
1191	 * Note that we raise IPL while manipulating pv_table
1192	 * since pmap_enter can be called at interrupt time.
1193	 */
1194	if (pmap_is_managed(pa)) {
1195		register pv_entry_t pv, npv;
1196		int s;
1197
1198		pv = pa_to_pvh(pa);
1199		s = splhigh();
1200		/*
1201		 * No entries yet, use header as the first entry
1202		 */
1203		if (pv->pv_pmap == NULL) {
1204			pv->pv_va = va;
1205			pv->pv_pmap = pmap;
1206			pv->pv_next = NULL;
1207		}
1208		/*
1209		 * There is at least one other VA mapping this page.
1210		 * Place this entry after the header.
1211		 */
1212		else {
1213			npv = get_pv_entry();
1214			npv->pv_va = va;
1215			npv->pv_pmap = pmap;
1216			npv->pv_next = pv->pv_next;
1217			pv->pv_next = npv;
1218		}
1219		splx(s);
1220	}
1221
1222	pmap_use_pt(pmap, va, 1);
1223
1224	/*
1225	 * Assumption: if it is not part of our managed memory
1226	 * then it must be device memory which may be volitile.
1227	 */
1228	if (pmap_initialized) {
1229		checkpv = cacheable = FALSE;
1230	}
1231
1232	/*
1233	 * Increment counters
1234	 */
1235	pmap->pm_stats.resident_count++;
1236	if (wired)
1237		pmap->pm_stats.wired_count++;
1238
1239validate:
1240	/*
1241	 * Now validate mapping with desired protection/wiring.
1242	 */
1243	npte = (pa & PG_FRAME) | pte_prot(pmap, prot) | PG_V;
1244
1245	/*
1246	 * When forking (copy-on-write, etc):
1247	 * A process will turn off write permissions for any of its writable
1248	 * pages.  If the data (object) is only referred to by one process, the
1249	 * processes map is modified directly as opposed to using the
1250	 * object manipulation routine.  When using pmap_protect, the
1251	 * modified bits are not kept in the vm_page_t data structure.
1252	 * Therefore, when using pmap_enter in vm_fault to bring back
1253	 * writability of a page, there has been no memory of the
1254	 * modified or referenced bits except at the pte level.
1255	 * this clause supports the carryover of the modified and
1256	 * used (referenced) bits.
1257	 */
1258	if (pa == opa)
1259		npte |= *(int *)pte & (PG_M|PG_U);
1260
1261	if (wired)
1262		npte |= PG_W;
1263	if (va < UPT_MIN_ADDRESS)
1264		npte |= PG_u;
1265	else if (va < UPT_MAX_ADDRESS)
1266		npte |= PG_u | PG_RW;
1267
1268	if (npte != *(int *)pte) {
1269		*(int *)pte = npte;
1270		tlbflush();
1271	}
1272}
1273
1274/*
1275 *      pmap_page_protect:
1276 *
1277 *      Lower the permission for all mappings to a given page.
1278 */
1279void
1280pmap_page_protect(phys, prot)
1281        vm_offset_t     phys;
1282        vm_prot_t       prot;
1283{
1284	void pmap_copy_on_write();
1285        switch (prot) {
1286        case VM_PROT_READ:
1287        case VM_PROT_READ|VM_PROT_EXECUTE:
1288                pmap_copy_on_write(phys);
1289                break;
1290        case VM_PROT_ALL:
1291                break;
1292        default:
1293                pmap_remove_all(phys);
1294                break;
1295        }
1296}
1297
1298/*
1299 *	Routine:	pmap_change_wiring
1300 *	Function:	Change the wiring attribute for a map/virtual-address
1301 *			pair.
1302 *	In/out conditions:
1303 *			The mapping must already exist in the pmap.
1304 */
1305void
1306pmap_change_wiring(pmap, va, wired)
1307	register pmap_t	pmap;
1308	vm_offset_t	va;
1309	boolean_t	wired;
1310{
1311	register pt_entry_t *pte;
1312
1313	if (pmap == NULL)
1314		return;
1315
1316	pte = pmap_pte(pmap, va);
1317	if (wired && !pmap_pte_w(pte) || !wired && pmap_pte_w(pte)) {
1318		if (wired)
1319			pmap->pm_stats.wired_count++;
1320		else
1321			pmap->pm_stats.wired_count--;
1322	}
1323	/*
1324	 * Wiring is not a hardware characteristic so there is no need
1325	 * to invalidate TLB.
1326	 */
1327	pmap_pte_set_w(pte, wired);
1328	/*
1329 	 * When unwiring, set the modified bit in the pte -- could have
1330	 * been changed by the kernel
1331 	 */
1332	if (!wired)
1333		pmap_pte_m(pte) = 1;
1334}
1335
1336
1337
1338/*
1339 *	Copy the range specified by src_addr/len
1340 *	from the source map to the range dst_addr/len
1341 *	in the destination map.
1342 *
1343 *	This routine is only advisory and need not do anything.
1344 */
1345void
1346pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1347	pmap_t		dst_pmap;
1348	pmap_t		src_pmap;
1349	vm_offset_t	dst_addr;
1350	vm_size_t	len;
1351	vm_offset_t	src_addr;
1352{
1353}
1354
1355/*
1356 *	Require that all active physical maps contain no
1357 *	incorrect entries NOW.  [This update includes
1358 *	forcing updates of any address map caching.]
1359 *
1360 *	Generally used to insure that a thread about
1361 *	to run will see a semantically correct world.
1362 */
1363void
1364pmap_update()
1365{
1366	tlbflush();
1367}
1368
1369/*
1370 *	Routine:	pmap_kernel
1371 *	Function:
1372 *		Returns the physical map handle for the kernel.
1373 */
1374pmap_t
1375pmap_kernel()
1376{
1377    	return (kernel_pmap);
1378}
1379
1380/*
1381 *	pmap_zero_page zeros the specified (machine independent)
1382 *	page by mapping the page into virtual memory and using
1383 *	bzero to clear its contents, one machine dependent page
1384 *	at a time.
1385 */
1386void
1387pmap_zero_page(phys)
1388	vm_offset_t phys;
1389{
1390	*(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(phys);
1391	tlbflush();
1392	bzero(CADDR2,NBPG);
1393}
1394
1395/*
1396 *	pmap_copy_page copies the specified (machine independent)
1397 *	page by mapping the page into virtual memory and using
1398 *	bcopy to copy the page, one machine dependent page at a
1399 *	time.
1400 */
1401void
1402pmap_copy_page(src, dst)
1403	vm_offset_t src;
1404	vm_offset_t dst;
1405{
1406	*(int *)CMAP1 = PG_V | PG_KW | i386_trunc_page(src);
1407	*(int *)CMAP2 = PG_V | PG_KW | i386_trunc_page(dst);
1408	tlbflush();
1409
1410#if __GNUC__ > 1
1411	memcpy(CADDR2, CADDR1, NBPG);
1412#else
1413	bcopy(CADDR1, CADDR2, NBPG);
1414#endif
1415}
1416
1417
1418/*
1419 *	Routine:	pmap_pageable
1420 *	Function:
1421 *		Make the specified pages (by pmap, offset)
1422 *		pageable (or not) as requested.
1423 *
1424 *		A page which is not pageable may not take
1425 *		a fault; therefore, its page table entry
1426 *		must remain valid for the duration.
1427 *
1428 *		This routine is merely advisory; pmap_enter
1429 *		will specify that these pages are to be wired
1430 *		down (or not) as appropriate.
1431 */
1432void
1433pmap_pageable(pmap, sva, eva, pageable)
1434	pmap_t		pmap;
1435	vm_offset_t	sva, eva;
1436	boolean_t	pageable;
1437{
1438}
1439
1440boolean_t
1441pmap_page_exists(pmap, pa)
1442	pmap_t pmap;
1443	vm_offset_t pa;
1444{
1445	register pv_entry_t pv;
1446	register int *pte;
1447	int s;
1448
1449	if (!pmap_is_managed(pa))
1450		return FALSE;
1451
1452	pv = pa_to_pvh(pa);
1453	s = splhigh();
1454
1455	/*
1456	 * Not found, check current mappings returning
1457	 * immediately if found.
1458	 */
1459	if (pv->pv_pmap != NULL) {
1460		for (; pv; pv = pv->pv_next) {
1461			if (pv->pv_pmap == pmap) {
1462				splx(s);
1463				return TRUE;
1464			}
1465		}
1466	}
1467	splx(s);
1468	return(FALSE);
1469}
1470
1471static inline boolean_t
1472pmap_testbit(pa, bit)
1473	register vm_offset_t pa;
1474	int bit;
1475{
1476	register pv_entry_t pv;
1477	register int *pte;
1478	int s;
1479
1480	if (!pmap_is_managed(pa))
1481		return FALSE;
1482
1483	pv = pa_to_pvh(pa);
1484	s = splhigh();
1485
1486	/*
1487	 * Not found, check current mappings returning
1488	 * immediately if found.
1489	 */
1490	if (pv->pv_pmap != NULL) {
1491		for (; pv; pv = pv->pv_next) {
1492			if (bit & PG_M ) {
1493				if (pv->pv_va >= USRSTACK) {
1494					if (pv->pv_va < USRSTACK+(UPAGES*NBPG)) {
1495						splx(s);
1496						return TRUE;
1497					}
1498					else if (pv->pv_va < UPT_MAX_ADDRESS) {
1499						splx(s);
1500						return FALSE;
1501					}
1502				}
1503			}
1504			pte = (int *) pmap_pte(pv->pv_pmap, pv->pv_va);
1505			if (*pte & bit) {
1506				splx(s);
1507				return TRUE;
1508			}
1509		}
1510	}
1511	splx(s);
1512	return(FALSE);
1513}
1514
1515static inline void
1516pmap_changebit(pa, bit, setem)
1517	vm_offset_t pa;
1518	int bit;
1519	boolean_t setem;
1520{
1521	register pv_entry_t pv;
1522	register int *pte, npte;
1523	vm_offset_t va;
1524	int s;
1525	int reqactivate = 0;
1526
1527	if (!pmap_is_managed(pa))
1528		return;
1529
1530	pv = pa_to_pvh(pa);
1531	s = splhigh();
1532
1533	/*
1534	 * Loop over all current mappings setting/clearing as appropos
1535	 * If setting RO do we need to clear the VAC?
1536	 */
1537	if (pv->pv_pmap != NULL) {
1538		for (; pv; pv = pv->pv_next) {
1539			va = pv->pv_va;
1540
1541                        /*
1542                         * XXX don't write protect pager mappings
1543                         */
1544                        if (bit == PG_RO) {
1545                                extern vm_offset_t pager_sva, pager_eva;
1546
1547                                if (va >= pager_sva && va < pager_eva)
1548                                        continue;
1549                        }
1550
1551			pte = (int *) pmap_pte(pv->pv_pmap, va);
1552			if (setem)
1553				npte = *pte | bit;
1554			else
1555				npte = *pte & ~bit;
1556			if (*pte != npte) {
1557				*pte = npte;
1558				tlbflush();
1559			}
1560		}
1561	}
1562	splx(s);
1563}
1564
1565/*
1566 *	Clear the modify bits on the specified physical page.
1567 */
1568
1569void
1570pmap_clear_modify(pa)
1571	vm_offset_t	pa;
1572{
1573	pmap_changebit(pa, PG_M, FALSE);
1574}
1575
1576/*
1577 *	pmap_clear_reference:
1578 *
1579 *	Clear the reference bit on the specified physical page.
1580 */
1581
1582void
1583pmap_clear_reference(pa)
1584	vm_offset_t	pa;
1585{
1586	pmap_changebit(pa, PG_U, FALSE);
1587}
1588
1589/*
1590 *	pmap_is_referenced:
1591 *
1592 *	Return whether or not the specified physical page is referenced
1593 *	by any physical maps.
1594 */
1595
1596boolean_t
1597pmap_is_referenced(pa)
1598	vm_offset_t	pa;
1599{
1600	return(pmap_testbit(pa, PG_U));
1601}
1602
1603/*
1604 *	pmap_is_modified:
1605 *
1606 *	Return whether or not the specified physical page is modified
1607 *	by any physical maps.
1608 */
1609
1610boolean_t
1611pmap_is_modified(pa)
1612	vm_offset_t	pa;
1613{
1614
1615	return(pmap_testbit(pa, PG_M));
1616}
1617
1618/*
1619 *	Routine:	pmap_copy_on_write
1620 *	Function:
1621 *		Remove write privileges from all
1622 *		physical maps for this physical page.
1623 */
1624void
1625pmap_copy_on_write(pa)
1626	vm_offset_t pa;
1627{
1628	pmap_changebit(pa, PG_RO, TRUE);
1629}
1630
1631
1632vm_offset_t
1633pmap_phys_address(ppn)
1634	int ppn;
1635{
1636	return(i386_ptob(ppn));
1637}
1638
1639/*
1640 * Miscellaneous support routines follow
1641 */
1642
1643void
1644i386_protection_init()
1645{
1646	register int *kp, prot;
1647
1648	kp = protection_codes;
1649	for (prot = 0; prot < 8; prot++) {
1650		switch (prot) {
1651		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
1652			*kp++ = 0;
1653			break;
1654		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
1655		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
1656		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
1657			*kp++ = PG_RO;
1658			break;
1659		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
1660		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
1661		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
1662		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
1663			*kp++ = PG_RW;
1664			break;
1665		}
1666	}
1667}
1668
1669#ifdef DEBUG
1670void
1671pmap_pvdump(pa)
1672	vm_offset_t pa;
1673{
1674	register pv_entry_t pv;
1675
1676	printf("pa %x", pa);
1677	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
1678		printf(" -> pmap %x, va %x, flags %x",
1679		       pv->pv_pmap, pv->pv_va, pv->pv_flags);
1680		pads(pv->pv_pmap);
1681	}
1682	printf(" ");
1683}
1684
1685/* print address space of pmap*/
1686void
1687pads(pm)
1688	pmap_t pm;
1689{
1690	unsigned va, i, j;
1691	struct pte *ptep;
1692
1693	if (pm == kernel_pmap) return;
1694	for (i = 0; i < 1024; i++)
1695		if (pm->pm_pdir[i].pd_v)
1696			for (j = 0; j < 1024 ; j++) {
1697				va = (i<<PD_SHIFT)+(j<<PG_SHIFT);
1698				if (pm == kernel_pmap && va < KERNBASE)
1699						continue;
1700				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
1701						continue;
1702				ptep = pmap_pte(pm, va);
1703				if (pmap_pte_v(ptep))
1704					printf("%x:%x ", va, *(int *)ptep);
1705			} ;
1706
1707}
1708#endif
1709