pmap.c revision 15583
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *	This product includes software developed by the University of
24 *	California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
42 *	$Id: pmap.c,v 1.88 1996/05/02 22:24:58 phk Exp $
43 */
44
45/*
46 * Derived from hp300 version by Mike Hibler, this version by William
47 * Jolitz uses a recursive map [a pde points to the page directory] to
48 * map the page tables using the pagetables themselves. This is done to
49 * reduce the impact on kernel virtual memory for lots of sparse address
50 * space, and to reduce the cost of memory to each process.
51 *
52 *	Derived from: hp300/@(#)pmap.c	7.1 (Berkeley) 12/5/90
53 */
54
55/*
56 *	Manages physical address maps.
57 *
58 *	In addition to hardware address maps, this
59 *	module is called upon to provide software-use-only
60 *	maps which may or may not be stored in the same
61 *	form as hardware maps.  These pseudo-maps are
62 *	used to store intermediate results from copy
63 *	operations to and from address spaces.
64 *
65 *	Since the information managed by this module is
66 *	also stored by the logical address mapping module,
67 *	this module may throw away valid virtual-to-physical
68 *	mappings at almost any time.  However, invalidations
69 *	of virtual-to-physical mappings must be done as
70 *	requested.
71 *
72 *	In order to cope with hardware architectures which
73 *	make virtual-to-physical map invalidates expensive,
74 *	this module may delay invalidate or reduced protection
75 *	operations until such time as they are actually
76 *	necessary.  This module is given full information as
77 *	to which processors are currently using which maps,
78 *	and to when physical maps must be made correct.
79 */
80
81#include <sys/param.h>
82#include <sys/systm.h>
83#include <sys/proc.h>
84#include <sys/malloc.h>
85#include <sys/msgbuf.h>
86#include <sys/queue.h>
87#include <sys/vmmeter.h>
88
89#include <vm/vm.h>
90#include <vm/vm_param.h>
91#include <vm/vm_prot.h>
92#include <vm/lock.h>
93#include <vm/vm_kern.h>
94#include <vm/vm_page.h>
95#include <vm/vm_map.h>
96#include <vm/vm_object.h>
97#include <vm/vm_extern.h>
98
99#include <machine/pcb.h>
100#include <machine/cputypes.h>
101#include <machine/md_var.h>
102
103#include <i386/isa/isa.h>
104
105#define PMAP_KEEP_PDIRS
106
107#if defined(DIAGNOSTIC)
108#define PMAP_DIAGNOSTIC
109#endif
110
111static void	init_pv_entries __P((int));
112
113/*
114 * Get PDEs and PTEs for user/kernel address space
115 */
116#define	pmap_pde(m, v)	(&((m)->pm_pdir[((vm_offset_t)(v) >> PDRSHIFT)&(NPDEPG-1)]))
117#define pdir_pde(m, v) (m[((vm_offset_t)(v) >> PDRSHIFT)&(NPDEPG-1)])
118
119#define pmap_pte_pa(pte)	(*(int *)(pte) & PG_FRAME)
120
121#define pmap_pde_v(pte)		((*(int *)pte & PG_V) != 0)
122#define pmap_pte_w(pte)		((*(int *)pte & PG_W) != 0)
123#define pmap_pte_m(pte)		((*(int *)pte & PG_M) != 0)
124#define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
125#define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
126
127#define pmap_pte_set_w(pte, v) ((v)?(*(int *)pte |= PG_W):(*(int *)pte &= ~PG_W))
128#define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
129
130/*
131 * Given a map and a machine independent protection code,
132 * convert to a vax protection code.
133 */
134#define pte_prot(m, p)	(protection_codes[p])
135static int protection_codes[8];
136
137static struct pmap kernel_pmap_store;
138pmap_t kernel_pmap;
139
140vm_offset_t avail_start;	/* PA of first available physical page */
141vm_offset_t avail_end;		/* PA of last available physical page */
142vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
143vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
144static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
145static vm_offset_t vm_first_phys;
146
147static int nkpt;
148
149extern vm_offset_t clean_sva, clean_eva;
150extern int cpu_class;
151
152/*
153 * All those kernel PT submaps that BSD is so fond of
154 */
155pt_entry_t *CMAP1;
156static pt_entry_t *CMAP2, *ptmmap;
157static pv_entry_t pv_table;
158caddr_t CADDR1, ptvmmap;
159static caddr_t CADDR2;
160static pt_entry_t *msgbufmap;
161struct msgbuf *msgbufp;
162
163static void	free_pv_entry __P((pv_entry_t pv));
164pt_entry_t *
165		get_ptbase __P((pmap_t pmap));
166static pv_entry_t
167		get_pv_entry __P((void));
168static void	i386_protection_init __P((void));
169static void	pmap_alloc_pv_entry __P((void));
170static void	pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem));
171static void	pmap_enter_quick __P((pmap_t pmap, vm_offset_t va,
172				      vm_offset_t pa));
173static int	pmap_is_managed __P((vm_offset_t pa));
174static void	pmap_remove_all __P((vm_offset_t pa));
175static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va));
176static __inline void pmap_remove_entry __P((struct pmap *pmap, pv_entry_t pv,
177					vm_offset_t va));
178static void pmap_remove_pte __P((struct pmap *pmap, pt_entry_t *ptq,
179					vm_offset_t sva));
180static boolean_t
181		pmap_testbit __P((vm_offset_t pa, int bit));
182static void *	pmap_getpdir __P((void));
183
184
185#if defined(PMAP_DIAGNOSTIC)
186
187/*
188 * This code checks for non-writeable/modified pages.
189 * This should be an invalid condition.
190 */
191static int
192pmap_nw_modified(pt_entry_t ptea) {
193	int pte;
194
195	pte = (int) ptea;
196
197	if ((pte & (PG_M|PG_RW)) == PG_M)
198		return 1;
199	else
200		return 0;
201}
202#endif
203
204/*
205 * The below are finer grained pmap_update routines.  These eliminate
206 * the gratuitious tlb flushes on non-i386 architectures.
207 */
208static __inline void
209pmap_update_1pg( vm_offset_t va) {
210#if defined(I386_CPU)
211	if (cpu_class == CPUCLASS_386)
212		pmap_update();
213	else
214#endif
215		__asm __volatile(".byte 0xf,0x1,0x38": :"a" (va));
216}
217
218static __inline void
219pmap_update_2pg( vm_offset_t va1, vm_offset_t va2) {
220#if defined(I386_CPU)
221	if (cpu_class == CPUCLASS_386) {
222		pmap_update();
223	} else
224#endif
225	{
226		__asm __volatile(".byte 0xf,0x1,0x38": :"a" (va1));
227		__asm __volatile(".byte 0xf,0x1,0x38": :"a" (va2));
228	}
229}
230
231/*
232 *	Routine:	pmap_pte
233 *	Function:
234 *		Extract the page table entry associated
235 *		with the given map/virtual_address pair.
236 * [ what about induced faults -wfj]
237 */
238
239__inline pt_entry_t * __pure
240pmap_pte(pmap, va)
241	register pmap_t pmap;
242	vm_offset_t va;
243{
244
245	if (pmap && *pmap_pde(pmap, va)) {
246		vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
247
248		/* are we current address space or kernel? */
249		if ((pmap == kernel_pmap) || (frame == ((int) PTDpde & PG_FRAME)))
250			return ((pt_entry_t *) vtopte(va));
251		/* otherwise, we are alternate address space */
252		else {
253			if (frame != ((int) APTDpde & PG_FRAME)) {
254				APTDpde = pmap->pm_pdir[PTDPTDI];
255				pmap_update();
256			}
257			return ((pt_entry_t *) avtopte(va));
258		}
259	}
260	return (0);
261}
262
263/*
264 *	Routine:	pmap_extract
265 *	Function:
266 *		Extract the physical page address associated
267 *		with the given map/virtual_address pair.
268 */
269
270vm_offset_t
271pmap_extract(pmap, va)
272	register pmap_t pmap;
273	vm_offset_t va;
274{
275	vm_offset_t pa;
276
277	if (pmap && *pmap_pde(pmap, va)) {
278		vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
279
280		/* are we current address space or kernel? */
281		if ((pmap == kernel_pmap)
282		    || (frame == ((int) PTDpde & PG_FRAME))) {
283			pa = *(int *) vtopte(va);
284			/* otherwise, we are alternate address space */
285		} else {
286			if (frame != ((int) APTDpde & PG_FRAME)) {
287				APTDpde = pmap->pm_pdir[PTDPTDI];
288				pmap_update();
289			}
290			pa = *(int *) avtopte(va);
291		}
292		return ((pa & PG_FRAME) | (va & ~PG_FRAME));
293	}
294	return 0;
295
296}
297
298/*
299 * determine if a page is managed (memory vs. device)
300 */
301static __inline int
302pmap_is_managed(pa)
303	vm_offset_t pa;
304{
305	int i;
306
307	if (!pmap_initialized)
308		return 0;
309
310	for (i = 0; phys_avail[i + 1]; i += 2) {
311		if (pa < phys_avail[i + 1] && pa >= phys_avail[i])
312			return 1;
313	}
314	return 0;
315}
316
317vm_page_t
318pmap_use_pt(pmap, va)
319	pmap_t pmap;
320	vm_offset_t va;
321{
322	vm_offset_t ptepa;
323	vm_page_t mpte;
324
325	if (va >= UPT_MIN_ADDRESS)
326		return NULL;
327
328	ptepa = ((vm_offset_t) *pmap_pde(pmap, va)) & PG_FRAME;
329#if defined(PMAP_DIAGNOSTIC)
330	if (!ptepa)
331		panic("pmap_use_pt: pagetable page missing, va: 0x%x", va);
332#endif
333
334	mpte = PHYS_TO_VM_PAGE(ptepa);
335	++mpte->hold_count;
336	return mpte;
337}
338
339#if !defined(PMAP_DIAGNOSTIC)
340__inline
341#endif
342void
343pmap_unuse_pt(pmap, va, mpte)
344	pmap_t pmap;
345	vm_offset_t va;
346	vm_page_t mpte;
347{
348	if (va >= UPT_MIN_ADDRESS)
349		return;
350
351	if (mpte == NULL) {
352		vm_offset_t ptepa;
353		ptepa = ((vm_offset_t) *pmap_pde(pmap, va)) & PG_FRAME;
354#if defined(PMAP_DIAGNOSTIC)
355		if (!ptepa)
356			panic("pmap_unuse_pt: pagetable page missing, va: 0x%x", va);
357#endif
358		mpte = PHYS_TO_VM_PAGE(ptepa);
359	}
360
361#if defined(PMAP_DIAGNOSTIC)
362	if (mpte->hold_count == 0) {
363		panic("pmap_unuse_pt: hold count < 0, va: 0x%x", va);
364	}
365#endif
366
367	vm_page_unhold(mpte);
368
369	if ((mpte->hold_count == 0) &&
370	    (mpte->wire_count == 0) &&
371	    (pmap != kernel_pmap) &&
372	    (va < KPT_MIN_ADDRESS)) {
373/*
374 * We don't free page-table-pages anymore because it can have a negative
375 * impact on perf at times.  Now we just deactivate, and it'll get cleaned
376 * up if needed...  Also, if the page ends up getting used, it will fault
377 * back into the process address space and be reactivated.
378 */
379#if defined(PMAP_FREE_OLD_PTES)
380		pmap_page_protect(VM_PAGE_TO_PHYS(mpte), VM_PROT_NONE);
381		vm_page_free(mpte);
382#else
383		mpte->dirty = 0;
384		vm_page_deactivate(mpte);
385#endif
386	}
387}
388
389/*
390 *	Bootstrap the system enough to run with virtual memory.
391 *
392 *	On the i386 this is called after mapping has already been enabled
393 *	and just syncs the pmap module with what has already been done.
394 *	[We can't call it easily with mapping off since the kernel is not
395 *	mapped with PA == VA, hence we would have to relocate every address
396 *	from the linked base (virtual) address "KERNBASE" to the actual
397 *	(physical) address starting relative to 0]
398 */
399void
400pmap_bootstrap(firstaddr, loadaddr)
401	vm_offset_t firstaddr;
402	vm_offset_t loadaddr;
403{
404	vm_offset_t va;
405	pt_entry_t *pte;
406
407	avail_start = firstaddr;
408
409	/*
410	 * XXX The calculation of virtual_avail is wrong. It's NKPT*PAGE_SIZE too
411	 * large. It should instead be correctly calculated in locore.s and
412	 * not based on 'first' (which is a physical address, not a virtual
413	 * address, for the start of unused physical memory). The kernel
414	 * page tables are NOT double mapped and thus should not be included
415	 * in this calculation.
416	 */
417	virtual_avail = (vm_offset_t) KERNBASE + firstaddr;
418	virtual_end = VM_MAX_KERNEL_ADDRESS;
419
420	/*
421	 * Initialize protection array.
422	 */
423	i386_protection_init();
424
425	/*
426	 * The kernel's pmap is statically allocated so we don't have to use
427	 * pmap_create, which is unlikely to work correctly at this part of
428	 * the boot sequence (XXX and which no longer exists).
429	 */
430	kernel_pmap = &kernel_pmap_store;
431
432	kernel_pmap->pm_pdir = (pd_entry_t *) (KERNBASE + IdlePTD);
433
434	kernel_pmap->pm_count = 1;
435	nkpt = NKPT;
436
437	/*
438	 * Reserve some special page table entries/VA space for temporary
439	 * mapping of pages.
440	 */
441#define	SYSMAP(c, p, v, n)	\
442	v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n);
443
444	va = virtual_avail;
445	pte = pmap_pte(kernel_pmap, va);
446
447	/*
448	 * CMAP1/CMAP2 are used for zeroing and copying pages.
449	 */
450	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
451	SYSMAP(caddr_t, CMAP2, CADDR2, 1)
452
453	/*
454	 * ptmmap is used for reading arbitrary physical pages via /dev/mem.
455	 */
456	SYSMAP(caddr_t, ptmmap, ptvmmap, 1)
457
458	/*
459	 * msgbufmap is used to map the system message buffer.
460	 */
461	SYSMAP(struct msgbuf *, msgbufmap, msgbufp, 1)
462
463	virtual_avail = va;
464
465	*(int *) CMAP1 = *(int *) CMAP2 = *(int *) PTD = 0;
466	pmap_update();
467}
468
469/*
470 *	Initialize the pmap module.
471 *	Called by vm_init, to initialize any structures that the pmap
472 *	system needs to map virtual memory.
473 *	pmap_init has been enhanced to support in a fairly consistant
474 *	way, discontiguous physical memory.
475 */
476void
477pmap_init(phys_start, phys_end)
478	vm_offset_t phys_start, phys_end;
479{
480	vm_offset_t addr;
481	vm_size_t npg, s;
482	int i;
483
484	/*
485	 * calculate the number of pv_entries needed
486	 */
487	vm_first_phys = phys_avail[0];
488	for (i = 0; phys_avail[i + 1]; i += 2);
489	npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE;
490
491	/*
492	 * Allocate memory for random pmap data structures.  Includes the
493	 * pv_head_table.
494	 */
495	s = (vm_size_t) (sizeof(struct pv_entry) * npg);
496	s = round_page(s);
497	addr = (vm_offset_t) kmem_alloc(kernel_map, s);
498	pv_table = (pv_entry_t) addr;
499
500	/*
501	 * init the pv free list
502	 */
503	init_pv_entries(npg);
504	/*
505	 * Now it is safe to enable pv_table recording.
506	 */
507	pmap_initialized = TRUE;
508}
509
510/*
511 *	Used to map a range of physical addresses into kernel
512 *	virtual address space.
513 *
514 *	For now, VM is already on, we only need to map the
515 *	specified memory.
516 */
517vm_offset_t
518pmap_map(virt, start, end, prot)
519	vm_offset_t virt;
520	vm_offset_t start;
521	vm_offset_t end;
522	int prot;
523{
524	while (start < end) {
525		pmap_enter(kernel_pmap, virt, start, prot, FALSE);
526		virt += PAGE_SIZE;
527		start += PAGE_SIZE;
528	}
529	return (virt);
530}
531
532#if defined(PMAP_KEEP_PDIRS)
533int nfreepdir;
534caddr_t *pdirlist;
535#define NFREEPDIR 3
536
537static void *
538pmap_getpdir() {
539	caddr_t *pdir;
540	if (pdirlist) {
541		--nfreepdir;
542		pdir = pdirlist;
543		pdirlist = (caddr_t *) *pdir;
544		*pdir = 0;
545#if 0 /* Not needed anymore */
546		bzero( (caddr_t) pdir, PAGE_SIZE);
547#endif
548	} else {
549		pdir = (caddr_t *) kmem_alloc(kernel_map, PAGE_SIZE);
550	}
551
552	return (void *) pdir;
553}
554
555static void
556pmap_freepdir(void *pdir) {
557	if (nfreepdir > NFREEPDIR) {
558		kmem_free(kernel_map, (vm_offset_t) pdir, PAGE_SIZE);
559	} else {
560		int i;
561		pt_entry_t *s;
562		s = (pt_entry_t *) pdir;
563
564		/*
565		 * remove wired in kernel mappings
566		 */
567		bzero(s + KPTDI, nkpt * PTESIZE);
568		s[APTDPTDI] = 0;
569		s[PTDPTDI] = 0;
570
571#if defined(PMAP_DIAGNOSTIC)
572		for(i=0;i<PAGE_SIZE/4;i++,s++) {
573			if (*s) {
574				printf("pmap_freepdir: index %d not zero: %lx\n", i, *s);
575			}
576		}
577#endif
578		* (caddr_t *) pdir = (caddr_t) pdirlist;
579		pdirlist = (caddr_t *) pdir;
580		++nfreepdir;
581	}
582}
583#endif
584
585/*
586 * Initialize a preallocated and zeroed pmap structure,
587 * such as one in a vmspace structure.
588 */
589void
590pmap_pinit(pmap)
591	register struct pmap *pmap;
592{
593	/*
594	 * No need to allocate page table space yet but we do need a valid
595	 * page directory table.
596	 */
597
598#if defined(PMAP_KEEP_PDIRS)
599	pmap->pm_pdir = pmap_getpdir();
600#else
601	pmap->pm_pdir = (pd_entry_t *) kmem_alloc(kernel_map, PAGE_SIZE);
602#endif
603
604	/* wire in kernel global address entries */
605	bcopy(PTD + KPTDI, pmap->pm_pdir + KPTDI, nkpt * PTESIZE);
606
607	/* install self-referential address mapping entry */
608	*(int *) (pmap->pm_pdir + PTDPTDI) =
609	    ((int) pmap_kextract((vm_offset_t) pmap->pm_pdir)) | PG_V | PG_RW;
610
611	pmap->pm_count = 1;
612}
613
614/*
615 * grow the number of kernel page table entries, if needed
616 */
617
618static vm_page_t nkpg;
619vm_offset_t kernel_vm_end;
620
621void
622pmap_growkernel(vm_offset_t addr)
623{
624	struct proc *p;
625	struct pmap *pmap;
626	int s;
627
628	s = splhigh();
629	if (kernel_vm_end == 0) {
630		kernel_vm_end = KERNBASE;
631		nkpt = 0;
632		while (pdir_pde(PTD, kernel_vm_end)) {
633			kernel_vm_end = (kernel_vm_end + NBPDR) & ~(NBPDR-1);
634			++nkpt;
635		}
636	}
637	addr = (addr + NBPDR) & ~(NBPDR - 1);
638	while (kernel_vm_end < addr) {
639		if (pdir_pde(PTD, kernel_vm_end)) {
640			kernel_vm_end = (kernel_vm_end + NBPDR) & ~(NBPDR-1);
641			continue;
642		}
643		++nkpt;
644		if (!nkpg) {
645			nkpg = vm_page_alloc(kernel_object, 0, VM_ALLOC_SYSTEM);
646			if (!nkpg)
647				panic("pmap_growkernel: no memory to grow kernel");
648			vm_page_wire(nkpg);
649			vm_page_remove(nkpg);
650			pmap_zero_page(VM_PAGE_TO_PHYS(nkpg));
651		}
652		pdir_pde(PTD, kernel_vm_end) = (pd_entry_t) (VM_PAGE_TO_PHYS(nkpg) | PG_V | PG_RW);
653		nkpg = NULL;
654
655		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
656			if (p->p_vmspace) {
657				pmap = &p->p_vmspace->vm_pmap;
658				*pmap_pde(pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end);
659			}
660		}
661		*pmap_pde(kernel_pmap, kernel_vm_end) = pdir_pde(PTD, kernel_vm_end);
662		kernel_vm_end = (kernel_vm_end + NBPDR) & ~(NBPDR-1);
663	}
664	splx(s);
665}
666
667/*
668 *	Retire the given physical map from service.
669 *	Should only be called if the map contains
670 *	no valid mappings.
671 */
672void
673pmap_destroy(pmap)
674	register pmap_t pmap;
675{
676	int count;
677
678	if (pmap == NULL)
679		return;
680
681	count = --pmap->pm_count;
682	if (count == 0) {
683		pmap_release(pmap);
684		free((caddr_t) pmap, M_VMPMAP);
685	}
686}
687
688/*
689 * Release any resources held by the given physical map.
690 * Called when a pmap initialized by pmap_pinit is being released.
691 * Should only be called if the map contains no valid mappings.
692 */
693void
694pmap_release(pmap)
695	register struct pmap *pmap;
696{
697#if defined(PMAP_KEEP_PDIRS)
698	pmap_freepdir( (void *)pmap->pm_pdir);
699#else
700	kmem_free(kernel_map, (vm_offset_t) pmap->pm_pdir, PAGE_SIZE);
701#endif
702}
703
704/*
705 *	Add a reference to the specified pmap.
706 */
707void
708pmap_reference(pmap)
709	pmap_t pmap;
710{
711	if (pmap != NULL) {
712		pmap->pm_count++;
713	}
714}
715
716#define PV_FREELIST_MIN ((PAGE_SIZE / sizeof (struct pv_entry)) / 2)
717
718/*
719 * Data for the pv entry allocation mechanism
720 */
721static int pv_freelistcnt;
722static pv_entry_t pv_freelist;
723static vm_offset_t pvva;
724static int npvvapg;
725
726/*
727 * free the pv_entry back to the free list
728 */
729static __inline void
730free_pv_entry(pv)
731	pv_entry_t pv;
732{
733	if (!pv)
734		return;
735	++pv_freelistcnt;
736	pv->pv_next = pv_freelist;
737	pv_freelist = pv;
738}
739
740/*
741 * get a new pv_entry, allocating a block from the system
742 * when needed.
743 * the memory allocation is performed bypassing the malloc code
744 * because of the possibility of allocations at interrupt time.
745 */
746static __inline pv_entry_t
747get_pv_entry()
748{
749	pv_entry_t tmp;
750
751	/*
752	 * get more pv_entry pages if needed
753	 */
754	if (pv_freelistcnt < PV_FREELIST_MIN || pv_freelist == 0) {
755		pmap_alloc_pv_entry();
756	}
757	/*
758	 * get a pv_entry off of the free list
759	 */
760	--pv_freelistcnt;
761	tmp = pv_freelist;
762	pv_freelist = tmp->pv_next;
763	return tmp;
764}
765
766/*
767 * this *strange* allocation routine *statistically* eliminates the
768 * *possibility* of a malloc failure (*FATAL*) for a pv_entry_t data structure.
769 * also -- this code is MUCH MUCH faster than the malloc equiv...
770 */
771static void
772pmap_alloc_pv_entry()
773{
774	/*
775	 * do we have any pre-allocated map-pages left?
776	 */
777	if (npvvapg) {
778		vm_page_t m;
779
780		/*
781		 * we do this to keep recursion away
782		 */
783		pv_freelistcnt += PV_FREELIST_MIN;
784		/*
785		 * allocate a physical page out of the vm system
786		 */
787		m = vm_page_alloc(kernel_object,
788		    OFF_TO_IDX(pvva - vm_map_min(kernel_map)),
789		    VM_ALLOC_INTERRUPT);
790		if (m) {
791			int newentries;
792			int i;
793			pv_entry_t entry;
794
795			newentries = (PAGE_SIZE / sizeof(struct pv_entry));
796			/*
797			 * wire the page
798			 */
799			vm_page_wire(m);
800			m->flags &= ~PG_BUSY;
801			/*
802			 * let the kernel see it
803			 */
804			pmap_kenter(pvva, VM_PAGE_TO_PHYS(m));
805
806			entry = (pv_entry_t) pvva;
807			/*
808			 * update the allocation pointers
809			 */
810			pvva += PAGE_SIZE;
811			--npvvapg;
812
813			/*
814			 * free the entries into the free list
815			 */
816			for (i = 0; i < newentries; i++) {
817				free_pv_entry(entry);
818				entry++;
819			}
820		}
821		pv_freelistcnt -= PV_FREELIST_MIN;
822	}
823	if (!pv_freelist)
824		panic("get_pv_entry: cannot get a pv_entry_t");
825}
826
827
828
829/*
830 * init the pv_entry allocation system
831 */
832#define PVSPERPAGE 64
833void
834init_pv_entries(npg)
835	int npg;
836{
837	/*
838	 * allocate enough kvm space for PVSPERPAGE entries per page (lots)
839	 * kvm space is fairly cheap, be generous!!!  (the system can panic if
840	 * this is too small.)
841	 */
842	npvvapg = btoc((npg * PVSPERPAGE) * sizeof(struct pv_entry));
843	pvva = kmem_alloc_pageable(kernel_map, npvvapg * PAGE_SIZE);
844	/*
845	 * get the first batch of entries
846	 */
847	free_pv_entry(get_pv_entry());
848}
849
850__inline pt_entry_t *
851get_ptbase(pmap)
852	pmap_t pmap;
853{
854	vm_offset_t frame = (int) pmap->pm_pdir[PTDPTDI] & PG_FRAME;
855
856	/* are we current address space or kernel? */
857	if (pmap == kernel_pmap || frame == ((int) PTDpde & PG_FRAME)) {
858		return PTmap;
859	}
860	/* otherwise, we are alternate address space */
861	if (frame != ((int) APTDpde & PG_FRAME)) {
862		APTDpde = pmap->pm_pdir[PTDPTDI];
863		pmap_update();
864	}
865	return APTmap;
866}
867
868/*
869 * If it is the first entry on the list, it is actually
870 * in the header and we must copy the following entry up
871 * to the header.  Otherwise we must search the list for
872 * the entry.  In either case we free the now unused entry.
873 */
874static __inline void
875pmap_remove_entry(pmap, pv, va)
876	struct pmap *pmap;
877	pv_entry_t pv;
878	vm_offset_t va;
879{
880	pv_entry_t npv;
881	int s;
882	s = splhigh();
883	if (pmap == pv->pv_pmap && va == pv->pv_va) {
884		pmap_unuse_pt(pmap, va, pv->pv_ptem);
885		npv = pv->pv_next;
886		if (npv) {
887			*pv = *npv;
888			free_pv_entry(npv);
889		} else {
890			pv->pv_pmap = NULL;
891		}
892	} else {
893		for (npv = pv->pv_next; npv; (pv = npv, npv = pv->pv_next)) {
894			if (pmap == npv->pv_pmap && va == npv->pv_va) {
895				pmap_unuse_pt(pmap, va, npv->pv_ptem);
896				pv->pv_next = npv->pv_next;
897				free_pv_entry(npv);
898				break;
899			}
900		}
901	}
902	splx(s);
903}
904
905/*
906 * pmap_remove_pte: do the things to unmap a page in a process
907 */
908static void
909pmap_remove_pte(pmap, ptq, sva)
910	struct pmap *pmap;
911	pt_entry_t *ptq;
912	vm_offset_t sva;
913{
914	pt_entry_t oldpte;
915	vm_offset_t pa;
916	pv_entry_t pv;
917
918	oldpte = *ptq;
919	if (((int)oldpte) & PG_W)
920		pmap->pm_stats.wired_count--;
921	pmap->pm_stats.resident_count--;
922
923	pa = ((vm_offset_t)oldpte) & PG_FRAME;
924	if (pmap_is_managed(pa)) {
925		if ((int) oldpte & PG_M) {
926#if defined(PMAP_DIAGNOSTIC)
927			if (pmap_nw_modified(oldpte)) {
928				printf("pmap_remove: modified page not writable: va: 0x%lx, pte: 0x%lx\n", sva, (int) oldpte);
929			}
930#endif
931
932			if (sva < USRSTACK + (UPAGES * PAGE_SIZE) ||
933			    (sva >= KERNBASE && (sva < clean_sva || sva >= clean_eva))) {
934				PHYS_TO_VM_PAGE(pa)->dirty = VM_PAGE_BITS_ALL;
935			}
936		}
937		pv = pa_to_pvh(pa);
938		pmap_remove_entry(pmap, pv, sva);
939	} else {
940		pmap_unuse_pt(pmap, sva, NULL);
941	}
942
943	*ptq = 0;
944	return;
945}
946
947/*
948 * Remove a single page from a process address space
949 */
950static __inline void
951pmap_remove_page(pmap, va)
952	struct pmap *pmap;
953	register vm_offset_t va;
954{
955	register pt_entry_t *ptbase, *ptq;
956	/*
957	 * if there is no pte for this address, just skip it!!!
958	 */
959	if (*pmap_pde(pmap, va) == 0)
960		return;
961	/*
962	 * get a local va for mappings for this pmap.
963	 */
964	ptbase = get_ptbase(pmap);
965	ptq = ptbase + i386_btop(va);
966	if (*ptq) {
967		pmap_remove_pte(pmap, ptq, va);
968		pmap_update_1pg(va);
969	}
970	return;
971}
972
973/*
974 *	Remove the given range of addresses from the specified map.
975 *
976 *	It is assumed that the start and end are properly
977 *	rounded to the page size.
978 */
979void
980pmap_remove(pmap, sva, eva)
981	struct pmap *pmap;
982	register vm_offset_t sva;
983	register vm_offset_t eva;
984{
985	register pt_entry_t *ptbase;
986	vm_offset_t pdnxt;
987	vm_offset_t ptpaddr;
988	vm_offset_t sindex, eindex;
989	vm_page_t mpte;
990
991	if (pmap == NULL)
992		return;
993
994	/*
995	 * special handling of removing one page.  a very
996	 * common operation and easy to short circuit some
997	 * code.
998	 */
999	if ((sva + PAGE_SIZE) == eva) {
1000		pmap_remove_page(pmap, sva);
1001		return;
1002	}
1003
1004	/*
1005	 * Get a local virtual address for the mappings that are being
1006	 * worked with.
1007	 */
1008	ptbase = get_ptbase(pmap);
1009
1010	sindex = i386_btop(sva);
1011	eindex = i386_btop(eva);
1012
1013	for (; sindex < eindex; sindex = pdnxt) {
1014
1015		/*
1016		 * Calculate index for next page table.
1017		 */
1018		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1019		ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex));
1020
1021		/*
1022		 * Weed out invalid mappings. Note: we assume that the page
1023		 * directory table is always allocated, and in kernel virtual.
1024		 */
1025		if (ptpaddr == 0)
1026			continue;
1027
1028		/*
1029		 * get the vm_page_t for the page table page
1030		 */
1031		mpte = PHYS_TO_VM_PAGE(ptpaddr);
1032
1033		/*
1034		 * if the pte isn't wired or held, just skip it.
1035		 */
1036		if ((mpte->hold_count == 0) && (mpte->wire_count == 0))
1037			continue;
1038
1039		/*
1040		 * Limit our scan to either the end of the va represented
1041		 * by the current page table page, or to the end of the
1042		 * range being removed.
1043		 */
1044		if (pdnxt > eindex) {
1045			pdnxt = eindex;
1046		}
1047
1048		for ( ;sindex != pdnxt; sindex++) {
1049			if (ptbase[sindex] == 0)
1050				continue;
1051			pmap_remove_pte(pmap, ptbase + sindex, i386_ptob(sindex));
1052			if (mpte->hold_count == 0 && mpte->wire_count == 0)
1053				break;
1054		}
1055	}
1056	pmap_update();
1057}
1058
1059/*
1060 *	Routine:	pmap_remove_all
1061 *	Function:
1062 *		Removes this physical page from
1063 *		all physical maps in which it resides.
1064 *		Reflects back modify bits to the pager.
1065 *
1066 *	Notes:
1067 *		Original versions of this routine were very
1068 *		inefficient because they iteratively called
1069 *		pmap_remove (slow...)
1070 */
1071static void
1072pmap_remove_all(pa)
1073	vm_offset_t pa;
1074{
1075	register pv_entry_t pv, opv, npv;
1076	register pt_entry_t *pte, *ptbase;
1077	vm_offset_t va;
1078	struct pmap *pmap;
1079	vm_page_t m;
1080	int s;
1081	int anyvalid = 0;
1082
1083#if defined(PMAP_DIAGNOSTIC)
1084	/*
1085	 * XXX this makes pmap_page_protect(NONE) illegal for non-managed
1086	 * pages!
1087	 */
1088	if (!pmap_is_managed(pa)) {
1089		panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa);
1090	}
1091#endif
1092
1093	pa = pa & PG_FRAME;
1094	opv = pa_to_pvh(pa);
1095	if (opv->pv_pmap == NULL)
1096		return;
1097
1098	m = PHYS_TO_VM_PAGE(pa);
1099	s = splhigh();
1100	pv = opv;
1101	while (pv && ((pmap = pv->pv_pmap) != NULL)) {
1102		int tpte;
1103		ptbase = get_ptbase(pmap);
1104		va = pv->pv_va;
1105		pte = ptbase + i386_btop(va);
1106		if (tpte = ((int) *pte)) {
1107			*pte = 0;
1108			if (tpte & PG_W)
1109				pmap->pm_stats.wired_count--;
1110			pmap->pm_stats.resident_count--;
1111			anyvalid = 1;
1112
1113			/*
1114			 * Update the vm_page_t clean and reference bits.
1115			 */
1116			if ((tpte & PG_M) != 0) {
1117#if defined(PMAP_DIAGNOSTIC)
1118				if (pmap_nw_modified((pt_entry_t) tpte)) {
1119					printf("pmap_remove_all: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, tpte);
1120				}
1121#endif
1122				if (va < USRSTACK + (UPAGES * PAGE_SIZE) ||
1123				    (va >= KERNBASE && (va < clean_sva || va >= clean_eva))) {
1124					m->dirty = VM_PAGE_BITS_ALL;
1125				}
1126			}
1127		}
1128		pv = pv->pv_next;
1129	}
1130
1131	if (opv->pv_pmap != NULL) {
1132		pmap_unuse_pt(opv->pv_pmap, opv->pv_va, opv->pv_ptem);
1133		for (pv = opv->pv_next; pv; pv = npv) {
1134			npv = pv->pv_next;
1135			pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem);
1136			free_pv_entry(pv);
1137		}
1138	}
1139
1140	opv->pv_pmap = NULL;
1141	opv->pv_next = NULL;
1142
1143	splx(s);
1144	if (anyvalid)
1145		pmap_update();
1146}
1147
1148
1149/*
1150 *	Set the physical protection on the
1151 *	specified range of this map as requested.
1152 */
1153void
1154pmap_protect(pmap, sva, eva, prot)
1155	register pmap_t pmap;
1156	vm_offset_t sva, eva;
1157	vm_prot_t prot;
1158{
1159	register pt_entry_t *pte;
1160	register pt_entry_t *ptbase;
1161	vm_offset_t pdnxt;
1162	vm_offset_t ptpaddr;
1163	vm_offset_t sindex, eindex;
1164	vm_page_t mpte;
1165	int anychanged;
1166
1167
1168	if (pmap == NULL)
1169		return;
1170
1171	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1172		pmap_remove(pmap, sva, eva);
1173		return;
1174	}
1175	if (prot & VM_PROT_WRITE)
1176		return;
1177
1178	anychanged = 0;
1179
1180	ptbase = get_ptbase(pmap);
1181
1182	sindex = i386_btop(sva);
1183	eindex = i386_btop(eva);
1184
1185	for (; sindex < eindex; sindex = pdnxt) {
1186		int pbits;
1187
1188		pdnxt = ((sindex + NPTEPG) & ~(NPTEPG - 1));
1189		ptpaddr = (vm_offset_t) *pmap_pde(pmap, i386_ptob(sindex));
1190
1191		/*
1192		 * Weed out invalid mappings. Note: we assume that the page
1193		 * directory table is always allocated, and in kernel virtual.
1194		 */
1195		if (ptpaddr == 0)
1196			continue;
1197
1198		mpte = PHYS_TO_VM_PAGE(ptpaddr);
1199
1200		if ((mpte->hold_count == 0) && (mpte->wire_count == 0))
1201			continue;
1202
1203		if (pdnxt > eindex) {
1204			pdnxt = eindex;
1205		}
1206
1207		for (; sindex != pdnxt; sindex++) {
1208			if (ptbase[sindex] == 0)
1209				continue;
1210			pte = ptbase + sindex;
1211			pbits = *(int *)pte;
1212			if (pbits & PG_RW) {
1213				if (pbits & PG_M) {
1214					vm_page_t m;
1215					vm_offset_t pa = pbits & PG_FRAME;
1216					m = PHYS_TO_VM_PAGE(pa);
1217					m->dirty = VM_PAGE_BITS_ALL;
1218				}
1219				*(int *)pte &= ~(PG_M|PG_RW);
1220				anychanged=1;
1221			}
1222		}
1223	}
1224	if (anychanged)
1225		pmap_update();
1226}
1227
1228/*
1229 *	Insert the given physical page (p) at
1230 *	the specified virtual address (v) in the
1231 *	target physical map with the protection requested.
1232 *
1233 *	If specified, the page will be wired down, meaning
1234 *	that the related pte can not be reclaimed.
1235 *
1236 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1237 *	or lose information.  That is, this routine must actually
1238 *	insert this page into the given map NOW.
1239 */
1240void
1241pmap_enter(pmap, va, pa, prot, wired)
1242	register pmap_t pmap;
1243	vm_offset_t va;
1244	register vm_offset_t pa;
1245	vm_prot_t prot;
1246	boolean_t wired;
1247{
1248	register pt_entry_t *pte;
1249	vm_offset_t opa;
1250	register pv_entry_t pv, npv;
1251	vm_offset_t origpte, newpte;
1252
1253	if (pmap == NULL)
1254		return;
1255
1256	pv = NULL;
1257
1258	va = va & PG_FRAME;
1259	if (va > VM_MAX_KERNEL_ADDRESS)
1260		panic("pmap_enter: toobig");
1261
1262	/*
1263	 * In the case that a page table page is not
1264	 * resident, we are creating it here.
1265	 */
1266	if ((va < VM_MIN_KERNEL_ADDRESS) &&
1267		(curproc != NULL) &&
1268		(pmap->pm_map->pmap == pmap)) {
1269		vm_offset_t v;
1270
1271		v = (vm_offset_t) vtopte(va);
1272		/* Fault the pte only if needed: */
1273		if (*((int *)vtopte(v)) == 0)
1274			(void) vm_fault(pmap->pm_map,
1275				trunc_page(v), VM_PROT_WRITE, FALSE);
1276	}
1277
1278	/*
1279	 * Page Directory table entry not valid, we need a new PT page
1280	 */
1281	pte = pmap_pte(pmap, va);
1282	if (pte == NULL) {
1283		printf("kernel page directory invalid pdir=%p, va=0x%lx\n",
1284			pmap->pm_pdir[PTDPTDI], va);
1285		panic("invalid kernel page directory");
1286	}
1287
1288	origpte = *(vm_offset_t *)pte;
1289	opa = origpte & PG_FRAME;
1290
1291	pa = pa & PG_FRAME;
1292
1293	/*
1294	 * Mapping has not changed, must be protection or wiring change.
1295	 */
1296	if (opa == pa) {
1297		/*
1298		 * Wiring change, just update stats. We don't worry about
1299		 * wiring PT pages as they remain resident as long as there
1300		 * are valid mappings in them. Hence, if a user page is wired,
1301		 * the PT page will be also.
1302		 */
1303		if (wired && ((origpte & PG_W) == 0))
1304			pmap->pm_stats.wired_count++;
1305		else if (!wired && (origpte & PG_W))
1306			pmap->pm_stats.wired_count--;
1307
1308#if defined(PMAP_DIAGNOSTIC)
1309		if (pmap_nw_modified((pt_entry_t) origpte)) {
1310			printf("pmap_enter: modified page not writable: va: 0x%lx, pte: 0x%lx\n", va, origpte);
1311		}
1312#endif
1313
1314		/*
1315		 * We might be turning off write access to the page,
1316		 * so we go ahead and sense modify status.
1317		 */
1318		if (origpte & PG_M) {
1319			vm_page_t m;
1320			m = PHYS_TO_VM_PAGE(pa);
1321			m->dirty = VM_PAGE_BITS_ALL;
1322		}
1323		goto validate;
1324	}
1325	/*
1326	 * Mapping has changed, invalidate old range and fall through to
1327	 * handle validating new mapping.
1328	 */
1329	if (opa) {
1330		pmap_remove_page(pmap, va);
1331		opa = 0;
1332		origpte = 0;
1333	}
1334	/*
1335	 * Enter on the PV list if part of our managed memory Note that we
1336	 * raise IPL while manipulating pv_table since pmap_enter can be
1337	 * called at interrupt time.
1338	 */
1339	if (pmap_is_managed(pa)) {
1340		int s;
1341
1342		pv = pa_to_pvh(pa);
1343		s = splhigh();
1344		/*
1345		 * No entries yet, use header as the first entry
1346		 */
1347		if (pv->pv_pmap == NULL) {
1348			pv->pv_va = va;
1349			pv->pv_pmap = pmap;
1350			pv->pv_next = NULL;
1351			pv->pv_ptem = NULL;
1352		}
1353		/*
1354		 * There is at least one other VA mapping this page. Place
1355		 * this entry after the header.
1356		 */
1357		else {
1358			npv = get_pv_entry();
1359			npv->pv_va = va;
1360			npv->pv_pmap = pmap;
1361			npv->pv_next = pv->pv_next;
1362			pv->pv_next = npv;
1363			pv = npv;
1364			pv->pv_ptem = NULL;
1365		}
1366		splx(s);
1367	}
1368
1369	/*
1370	 * Increment counters
1371	 */
1372	pmap->pm_stats.resident_count++;
1373	if (wired)
1374		pmap->pm_stats.wired_count++;
1375
1376validate:
1377	/*
1378	 * Now validate mapping with desired protection/wiring.
1379	 */
1380	newpte = (vm_offset_t) (pa | pte_prot(pmap, prot) | PG_V);
1381
1382	if (wired)
1383		newpte |= PG_W;
1384	if (va < UPT_MIN_ADDRESS)
1385		newpte |= PG_U;
1386	else if (va < UPT_MAX_ADDRESS)
1387		newpte |= PG_U | PG_RW;
1388
1389	/*
1390	 * if the mapping or permission bits are different, we need
1391	 * to update the pte.
1392	 */
1393	if ((origpte & ~(PG_M|PG_A)) != newpte) {
1394		*pte = (pt_entry_t) newpte;
1395		if (origpte)
1396			pmap_update_1pg(va);
1397	}
1398
1399	if (origpte == 0) {
1400		vm_page_t mpte;
1401		mpte = pmap_use_pt(pmap, va);
1402		if (pv)
1403			pv->pv_ptem = mpte;
1404	}
1405}
1406
1407/*
1408 * Add a list of wired pages to the kva
1409 * this routine is only used for temporary
1410 * kernel mappings that do not need to have
1411 * page modification or references recorded.
1412 * Note that old mappings are simply written
1413 * over.  The page *must* be wired.
1414 */
1415void
1416pmap_qenter(va, m, count)
1417	vm_offset_t va;
1418	vm_page_t *m;
1419	int count;
1420{
1421	int i;
1422	register pt_entry_t *pte;
1423
1424	for (i = 0; i < count; i++) {
1425		vm_offset_t tva = va + i * PAGE_SIZE;
1426		pt_entry_t npte = (pt_entry_t) ((int) (VM_PAGE_TO_PHYS(m[i]) | PG_RW | PG_V));
1427		pt_entry_t opte;
1428		pte = vtopte(tva);
1429		opte = *pte;
1430		*pte = npte;
1431		if (opte) pmap_update_1pg(tva);
1432	}
1433}
1434/*
1435 * this routine jerks page mappings from the
1436 * kernel -- it is meant only for temporary mappings.
1437 */
1438void
1439pmap_qremove(va, count)
1440	vm_offset_t va;
1441	int count;
1442{
1443	int i;
1444	register pt_entry_t *pte;
1445
1446	for (i = 0; i < count; i++) {
1447		vm_offset_t tva = va + i * PAGE_SIZE;
1448		pte = vtopte(tva);
1449		*pte = 0;
1450		pmap_update_1pg(tva);
1451	}
1452}
1453
1454/*
1455 * add a wired page to the kva
1456 * note that in order for the mapping to take effect -- you
1457 * should do a pmap_update after doing the pmap_kenter...
1458 */
1459void
1460pmap_kenter(va, pa)
1461	vm_offset_t va;
1462	register vm_offset_t pa;
1463{
1464	register pt_entry_t *pte;
1465	pt_entry_t npte, opte;
1466
1467	npte = (pt_entry_t) ((int) (pa | PG_RW | PG_V));
1468	pte = vtopte(va);
1469	opte = *pte;
1470	*pte = npte;
1471	if (opte) pmap_update_1pg(va);
1472}
1473
1474/*
1475 * remove a page from the kernel pagetables
1476 */
1477void
1478pmap_kremove(va)
1479	vm_offset_t va;
1480{
1481	register pt_entry_t *pte;
1482
1483	pte = vtopte(va);
1484	*pte = (pt_entry_t) 0;
1485	pmap_update_1pg(va);
1486}
1487
1488/*
1489 * this code makes some *MAJOR* assumptions:
1490 * 1. Current pmap & pmap exists.
1491 * 2. Not wired.
1492 * 3. Read access.
1493 * 4. No page table pages.
1494 * 5. Tlbflush is deferred to calling procedure.
1495 * 6. Page IS managed.
1496 * but is *MUCH* faster than pmap_enter...
1497 */
1498
1499static void
1500pmap_enter_quick(pmap, va, pa)
1501	register pmap_t pmap;
1502	vm_offset_t va;
1503	register vm_offset_t pa;
1504{
1505	register pt_entry_t *pte;
1506	register pv_entry_t pv, npv;
1507	int s;
1508
1509	/*
1510	 * Enter on the PV list if part of our managed memory Note that we
1511	 * raise IPL while manipulating pv_table since pmap_enter can be
1512	 * called at interrupt time.
1513	 */
1514
1515	pte = vtopte(va);
1516	/* a fault on the page table might occur here */
1517	if (*pte) {
1518		pmap_remove_page(pmap, va);
1519	}
1520
1521	pv = pa_to_pvh(pa);
1522	s = splhigh();
1523	/*
1524	 * No entries yet, use header as the first entry
1525	 */
1526	if (pv->pv_pmap == NULL) {
1527		pv->pv_pmap = pmap;
1528		pv->pv_va = va;
1529		pv->pv_next = NULL;
1530	}
1531	/*
1532	 * There is at least one other VA mapping this page. Place this entry
1533	 * after the header.
1534	 */
1535	else {
1536		npv = get_pv_entry();
1537		npv->pv_va = va;
1538		npv->pv_pmap = pmap;
1539		npv->pv_next = pv->pv_next;
1540		pv->pv_next = npv;
1541		pv = npv;
1542	}
1543	splx(s);
1544	pv->pv_ptem = pmap_use_pt(pmap, va);
1545
1546	/*
1547	 * Increment counters
1548	 */
1549	pmap->pm_stats.resident_count++;
1550
1551	/*
1552	 * Now validate mapping with RO protection
1553	 */
1554	*pte = (pt_entry_t) ((int) (pa | PG_V | PG_U));
1555
1556	return;
1557}
1558
1559#define MAX_INIT_PT (96)
1560/*
1561 * pmap_object_init_pt preloads the ptes for a given object
1562 * into the specified pmap.  This eliminates the blast of soft
1563 * faults on process startup and immediately after an mmap.
1564 */
1565void
1566pmap_object_init_pt(pmap, addr, object, pindex, size)
1567	pmap_t pmap;
1568	vm_offset_t addr;
1569	vm_object_t object;
1570	vm_pindex_t pindex;
1571	vm_size_t size;
1572{
1573	vm_offset_t tmpidx;
1574	int psize;
1575	vm_page_t p;
1576	int objpgs;
1577
1578	psize = (size >> PAGE_SHIFT);
1579
1580	if (!pmap || (object->type != OBJT_VNODE) ||
1581		((psize > MAX_INIT_PT) &&
1582			(object->resident_page_count > MAX_INIT_PT))) {
1583		return;
1584	}
1585
1586	/*
1587	 * remove any already used mappings
1588	 */
1589	pmap_remove( pmap, trunc_page(addr), round_page(addr + size));
1590
1591	/*
1592	 * if we are processing a major portion of the object, then scan the
1593	 * entire thing.
1594	 */
1595	if (psize > (object->size >> 2)) {
1596		objpgs = psize;
1597
1598		for (p = object->memq.tqh_first;
1599		    ((objpgs > 0) && (p != NULL));
1600		    p = p->listq.tqe_next) {
1601
1602			tmpidx = p->pindex;
1603			if (tmpidx < pindex) {
1604				continue;
1605			}
1606			tmpidx -= pindex;
1607			if (tmpidx >= psize) {
1608				continue;
1609			}
1610			if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1611			    (p->busy == 0) &&
1612			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1613				if (p->queue == PQ_CACHE)
1614					vm_page_deactivate(p);
1615				vm_page_hold(p);
1616				p->flags |= PG_MAPPED;
1617				pmap_enter_quick(pmap,
1618					addr + (tmpidx << PAGE_SHIFT),
1619					VM_PAGE_TO_PHYS(p));
1620				vm_page_unhold(p);
1621			}
1622			objpgs -= 1;
1623		}
1624	} else {
1625		/*
1626		 * else lookup the pages one-by-one.
1627		 */
1628		for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
1629			p = vm_page_lookup(object, tmpidx + pindex);
1630			if (p && (p->busy == 0) &&
1631			    ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1632			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1633				if (p->queue == PQ_CACHE)
1634					vm_page_deactivate(p);
1635				vm_page_hold(p);
1636				p->flags |= PG_MAPPED;
1637				pmap_enter_quick(pmap,
1638					addr + (tmpidx << PAGE_SHIFT),
1639					VM_PAGE_TO_PHYS(p));
1640				vm_page_unhold(p);
1641			}
1642		}
1643	}
1644	return;
1645}
1646
1647/*
1648 * pmap_prefault provides a quick way of clustering
1649 * pagefaults into a processes address space.  It is a "cousin"
1650 * of pmap_object_init_pt, except it runs at page fault time instead
1651 * of mmap time.
1652 */
1653#define PFBAK 2
1654#define PFFOR 2
1655#define PAGEORDER_SIZE (PFBAK+PFFOR)
1656
1657static int pmap_prefault_pageorder[] = {
1658	-PAGE_SIZE, PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE
1659};
1660
1661void
1662pmap_prefault(pmap, addra, entry, object)
1663	pmap_t pmap;
1664	vm_offset_t addra;
1665	vm_map_entry_t entry;
1666	vm_object_t object;
1667{
1668	int i;
1669	vm_offset_t starta;
1670	vm_offset_t addr;
1671	vm_pindex_t pindex;
1672	vm_page_t m;
1673
1674	if (entry->object.vm_object != object)
1675		return;
1676
1677	if (!curproc || (pmap != &curproc->p_vmspace->vm_pmap))
1678		return;
1679
1680	starta = addra - PFBAK * PAGE_SIZE;
1681	if (starta < entry->start) {
1682		starta = entry->start;
1683	} else if (starta > addra) {
1684		starta = 0;
1685	}
1686
1687	for (i = 0; i < PAGEORDER_SIZE; i++) {
1688		vm_object_t lobject;
1689		pt_entry_t *pte;
1690
1691		addr = addra + pmap_prefault_pageorder[i];
1692		if (addr < starta || addr >= entry->end)
1693			continue;
1694
1695		pte = vtopte(addr);
1696		if (*pte)
1697			continue;
1698
1699		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1700		lobject = object;
1701		for (m = vm_page_lookup(lobject, pindex);
1702		    (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object));
1703		    lobject = lobject->backing_object) {
1704			if (lobject->backing_object_offset & PAGE_MASK)
1705				break;
1706			pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
1707			m = vm_page_lookup(lobject->backing_object, pindex);
1708		}
1709
1710		/*
1711		 * give-up when a page is not in memory
1712		 */
1713		if (m == NULL)
1714			break;
1715
1716		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1717		    (m->busy == 0) &&
1718		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1719
1720			if (m->queue == PQ_CACHE) {
1721				if ((cnt.v_free_count + cnt.v_cache_count) <
1722					cnt.v_free_min)
1723					break;
1724				vm_page_deactivate(m);
1725			}
1726			vm_page_hold(m);
1727			m->flags |= PG_MAPPED;
1728			pmap_enter_quick(pmap, addr, VM_PAGE_TO_PHYS(m));
1729			vm_page_unhold(m);
1730
1731		}
1732	}
1733}
1734
1735/*
1736 *	Routine:	pmap_change_wiring
1737 *	Function:	Change the wiring attribute for a map/virtual-address
1738 *			pair.
1739 *	In/out conditions:
1740 *			The mapping must already exist in the pmap.
1741 */
1742void
1743pmap_change_wiring(pmap, va, wired)
1744	register pmap_t pmap;
1745	vm_offset_t va;
1746	boolean_t wired;
1747{
1748	register pt_entry_t *pte;
1749
1750	if (pmap == NULL)
1751		return;
1752
1753	pte = pmap_pte(pmap, va);
1754
1755	if (wired && !pmap_pte_w(pte))
1756		pmap->pm_stats.wired_count++;
1757	else if (!wired && pmap_pte_w(pte))
1758		pmap->pm_stats.wired_count--;
1759
1760	/*
1761	 * Wiring is not a hardware characteristic so there is no need to
1762	 * invalidate TLB.
1763	 */
1764	pmap_pte_set_w(pte, wired);
1765}
1766
1767
1768
1769/*
1770 *	Copy the range specified by src_addr/len
1771 *	from the source map to the range dst_addr/len
1772 *	in the destination map.
1773 *
1774 *	This routine is only advisory and need not do anything.
1775 */
1776void
1777pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
1778	pmap_t dst_pmap, src_pmap;
1779	vm_offset_t dst_addr;
1780	vm_size_t len;
1781	vm_offset_t src_addr;
1782{
1783}
1784
1785/*
1786 *	Routine:	pmap_kernel
1787 *	Function:
1788 *		Returns the physical map handle for the kernel.
1789 */
1790pmap_t
1791pmap_kernel()
1792{
1793	return (kernel_pmap);
1794}
1795
1796/*
1797 *	pmap_zero_page zeros the specified (machine independent)
1798 *	page by mapping the page into virtual memory and using
1799 *	bzero to clear its contents, one machine dependent page
1800 *	at a time.
1801 */
1802void
1803pmap_zero_page(phys)
1804	vm_offset_t phys;
1805{
1806	if (*(int *) CMAP2)
1807		panic("pmap_zero_page: CMAP busy");
1808
1809	*(int *) CMAP2 = PG_V | PG_RW | (phys & PG_FRAME);
1810	bzero(CADDR2, PAGE_SIZE);
1811
1812	*(int *) CMAP2 = 0;
1813	pmap_update_1pg((vm_offset_t) CADDR2);
1814}
1815
1816/*
1817 *	pmap_copy_page copies the specified (machine independent)
1818 *	page by mapping the page into virtual memory and using
1819 *	bcopy to copy the page, one machine dependent page at a
1820 *	time.
1821 */
1822void
1823pmap_copy_page(src, dst)
1824	vm_offset_t src;
1825	vm_offset_t dst;
1826{
1827	if (*(int *) CMAP1 || *(int *) CMAP2)
1828		panic("pmap_copy_page: CMAP busy");
1829
1830	*(int *) CMAP1 = PG_V | PG_RW | (src & PG_FRAME);
1831	*(int *) CMAP2 = PG_V | PG_RW | (dst & PG_FRAME);
1832
1833#if __GNUC__ > 1
1834	memcpy(CADDR2, CADDR1, PAGE_SIZE);
1835#else
1836	bcopy(CADDR1, CADDR2, PAGE_SIZE);
1837#endif
1838	*(int *) CMAP1 = 0;
1839	*(int *) CMAP2 = 0;
1840	pmap_update_2pg( (vm_offset_t) CADDR1, (vm_offset_t) CADDR2);
1841}
1842
1843
1844/*
1845 *	Routine:	pmap_pageable
1846 *	Function:
1847 *		Make the specified pages (by pmap, offset)
1848 *		pageable (or not) as requested.
1849 *
1850 *		A page which is not pageable may not take
1851 *		a fault; therefore, its page table entry
1852 *		must remain valid for the duration.
1853 *
1854 *		This routine is merely advisory; pmap_enter
1855 *		will specify that these pages are to be wired
1856 *		down (or not) as appropriate.
1857 */
1858void
1859pmap_pageable(pmap, sva, eva, pageable)
1860	pmap_t pmap;
1861	vm_offset_t sva, eva;
1862	boolean_t pageable;
1863{
1864}
1865
1866/*
1867 * this routine returns true if a physical page resides
1868 * in the given pmap.
1869 */
1870boolean_t
1871pmap_page_exists(pmap, pa)
1872	pmap_t pmap;
1873	vm_offset_t pa;
1874{
1875	register pv_entry_t pv;
1876	int s;
1877
1878	if (!pmap_is_managed(pa))
1879		return FALSE;
1880
1881	pv = pa_to_pvh(pa);
1882	s = splhigh();
1883
1884	/*
1885	 * Not found, check current mappings returning immediately if found.
1886	 */
1887	if (pv->pv_pmap != NULL) {
1888		for (; pv; pv = pv->pv_next) {
1889			if (pv->pv_pmap == pmap) {
1890				splx(s);
1891				return TRUE;
1892			}
1893		}
1894	}
1895	splx(s);
1896	return (FALSE);
1897}
1898
1899/*
1900 * pmap_testbit tests bits in pte's
1901 * note that the testbit/changebit routines are inline,
1902 * and a lot of things compile-time evaluate.
1903 */
1904static __inline boolean_t
1905pmap_testbit(pa, bit)
1906	register vm_offset_t pa;
1907	int bit;
1908{
1909	register pv_entry_t pv;
1910	pt_entry_t *pte;
1911	int s;
1912
1913	if (!pmap_is_managed(pa))
1914		return FALSE;
1915
1916	pv = pa_to_pvh(pa);
1917	s = splhigh();
1918
1919	/*
1920	 * Not found, check current mappings returning immediately if found.
1921	 */
1922	if (pv->pv_pmap != NULL) {
1923		for (; pv; pv = pv->pv_next) {
1924			/*
1925			 * if the bit being tested is the modified bit, then
1926			 * mark UPAGES as always modified, and ptes as never
1927			 * modified.
1928			 */
1929			if (bit & (PG_A|PG_M)) {
1930				if ((pv->pv_va >= clean_sva) && (pv->pv_va < clean_eva)) {
1931					continue;
1932				}
1933			}
1934			if (!pv->pv_pmap) {
1935#if defined(PMAP_DIAGNOSTIC)
1936				printf("Null pmap (tb) at va: 0x%lx\n", pv->pv_va);
1937#endif
1938				continue;
1939			}
1940			pte = pmap_pte(pv->pv_pmap, pv->pv_va);
1941			if ((int) *pte & bit) {
1942				splx(s);
1943				return TRUE;
1944			}
1945		}
1946	}
1947	splx(s);
1948	return (FALSE);
1949}
1950
1951/*
1952 * this routine is used to modify bits in ptes
1953 */
1954static __inline void
1955pmap_changebit(pa, bit, setem)
1956	vm_offset_t pa;
1957	int bit;
1958	boolean_t setem;
1959{
1960	register pv_entry_t pv;
1961	register pt_entry_t *pte;
1962	vm_offset_t va;
1963	int s;
1964
1965	if (!pmap_is_managed(pa))
1966		return;
1967
1968	pv = pa_to_pvh(pa);
1969	s = splhigh();
1970
1971	/*
1972	 * Loop over all current mappings setting/clearing as appropos If
1973	 * setting RO do we need to clear the VAC?
1974	 */
1975	if (pv->pv_pmap != NULL) {
1976		for (; pv; pv = pv->pv_next) {
1977			va = pv->pv_va;
1978
1979			/*
1980			 * don't write protect pager mappings
1981			 */
1982			if (!setem && (bit == PG_RW)) {
1983				if (va >= clean_sva && va < clean_eva)
1984					continue;
1985			}
1986			if (!pv->pv_pmap) {
1987#if defined(PMAP_DIAGNOSTIC)
1988				printf("Null pmap (cb) at va: 0x%lx\n", va);
1989#endif
1990				continue;
1991			}
1992
1993			pte = pmap_pte(pv->pv_pmap, va);
1994			if (setem) {
1995				*(int *)pte |= bit;
1996			} else {
1997				if (bit == PG_RW) {
1998					vm_offset_t pbits = *(vm_offset_t *)pte;
1999					if (pbits & PG_M) {
2000						vm_page_t m;
2001						vm_offset_t pa = pbits & PG_FRAME;
2002						m = PHYS_TO_VM_PAGE(pa);
2003						m->dirty = VM_PAGE_BITS_ALL;
2004					}
2005					*(int *)pte &= ~(PG_M|PG_RW);
2006				} else {
2007					*(int *)pte &= ~bit;
2008				}
2009			}
2010		}
2011	}
2012	splx(s);
2013	pmap_update();
2014}
2015
2016/*
2017 *      pmap_page_protect:
2018 *
2019 *      Lower the permission for all mappings to a given page.
2020 */
2021void
2022pmap_page_protect(phys, prot)
2023	vm_offset_t phys;
2024	vm_prot_t prot;
2025{
2026	if ((prot & VM_PROT_WRITE) == 0) {
2027		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
2028			pmap_changebit(phys, PG_RW, FALSE);
2029		else
2030			pmap_remove_all(phys);
2031	}
2032}
2033
2034vm_offset_t
2035pmap_phys_address(ppn)
2036	int ppn;
2037{
2038	return (i386_ptob(ppn));
2039}
2040
2041/*
2042 *	pmap_is_referenced:
2043 *
2044 *	Return whether or not the specified physical page was referenced
2045 *	by any physical maps.
2046 */
2047boolean_t
2048pmap_is_referenced(vm_offset_t pa)
2049{
2050	return pmap_testbit((pa), PG_A);
2051}
2052
2053/*
2054 *	pmap_is_modified:
2055 *
2056 *	Return whether or not the specified physical page was modified
2057 *	in any physical maps.
2058 */
2059boolean_t
2060pmap_is_modified(vm_offset_t pa)
2061{
2062	return pmap_testbit((pa), PG_M);
2063}
2064
2065/*
2066 *	Clear the modify bits on the specified physical page.
2067 */
2068void
2069pmap_clear_modify(vm_offset_t pa)
2070{
2071	pmap_changebit((pa), PG_M, FALSE);
2072}
2073
2074/*
2075 *	pmap_clear_reference:
2076 *
2077 *	Clear the reference bit on the specified physical page.
2078 */
2079void
2080pmap_clear_reference(vm_offset_t pa)
2081{
2082	pmap_changebit((pa), PG_A, FALSE);
2083}
2084
2085/*
2086 * Miscellaneous support routines follow
2087 */
2088
2089static void
2090i386_protection_init()
2091{
2092	register int *kp, prot;
2093
2094	kp = protection_codes;
2095	for (prot = 0; prot < 8; prot++) {
2096		switch (prot) {
2097		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
2098			/*
2099			 * Read access is also 0. There isn't any execute bit,
2100			 * so just make it readable.
2101			 */
2102		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
2103		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
2104		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
2105			*kp++ = 0;
2106			break;
2107		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
2108		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
2109		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
2110		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
2111			*kp++ = PG_RW;
2112			break;
2113		}
2114	}
2115}
2116
2117/*
2118 * Map a set of physical memory pages into the kernel virtual
2119 * address space. Return a pointer to where it is mapped. This
2120 * routine is intended to be used for mapping device memory,
2121 * NOT real memory. The non-cacheable bits are set on each
2122 * mapped page.
2123 */
2124void *
2125pmap_mapdev(pa, size)
2126	vm_offset_t pa;
2127	vm_size_t size;
2128{
2129	vm_offset_t va, tmpva;
2130	pt_entry_t *pte;
2131
2132	size = roundup(size, PAGE_SIZE);
2133
2134	va = kmem_alloc_pageable(kernel_map, size);
2135	if (!va)
2136		panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
2137
2138	pa = pa & PG_FRAME;
2139	for (tmpva = va; size > 0;) {
2140		pte = vtopte(tmpva);
2141		*pte = (pt_entry_t) ((int) (pa | PG_RW | PG_V | PG_N));
2142		size -= PAGE_SIZE;
2143		tmpva += PAGE_SIZE;
2144		pa += PAGE_SIZE;
2145	}
2146	pmap_update();
2147
2148	return ((void *) va);
2149}
2150
2151#if defined(PMAP_DEBUG)
2152pmap_pid_dump(int pid) {
2153	pmap_t pmap;
2154	struct proc *p;
2155	int npte = 0;
2156	int index;
2157	for (p = allproc.lh_first; p != NULL; p = p->p_list.le_next) {
2158		if (p->p_pid != pid)
2159			continue;
2160
2161		if (p->p_vmspace) {
2162			int i,j;
2163			index = 0;
2164			pmap = &p->p_vmspace->vm_pmap;
2165			for(i=0;i<1024;i++) {
2166				pd_entry_t *pde;
2167				pt_entry_t *pte;
2168				unsigned base = i << PD_SHIFT;
2169
2170				pde = &pmap->pm_pdir[i];
2171				if (pde && pmap_pde_v(pde)) {
2172					for(j=0;j<1024;j++) {
2173						unsigned va = base + (j << PAGE_SHIFT);
2174						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
2175							if (index) {
2176								index = 0;
2177								printf("\n");
2178							}
2179							return npte;
2180						}
2181						pte = pmap_pte( pmap, va);
2182						if (pte && pmap_pte_v(pte)) {
2183							vm_offset_t pa;
2184							vm_page_t m;
2185							pa = *(int *)pte;
2186							m = PHYS_TO_VM_PAGE((pa & PG_FRAME));
2187							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
2188								va, pa, m->hold_count, m->wire_count, m->flags);
2189							npte++;
2190							index++;
2191							if (index >= 2) {
2192								index = 0;
2193								printf("\n");
2194							} else {
2195								printf(" ");
2196							}
2197						}
2198					}
2199				}
2200			}
2201		}
2202	}
2203	return npte;
2204}
2205#endif
2206
2207#if defined(DEBUG)
2208
2209static void	pads __P((pmap_t pm));
2210static void	pmap_pvdump __P((vm_offset_t pa));
2211
2212/* print address space of pmap*/
2213static void
2214pads(pm)
2215	pmap_t pm;
2216{
2217	unsigned va, i, j;
2218	pt_entry_t *ptep;
2219
2220	if (pm == kernel_pmap)
2221		return;
2222	for (i = 0; i < 1024; i++)
2223		if (pm->pm_pdir[i])
2224			for (j = 0; j < 1024; j++) {
2225				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
2226				if (pm == kernel_pmap && va < KERNBASE)
2227					continue;
2228				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
2229					continue;
2230				ptep = pmap_pte(pm, va);
2231				if (pmap_pte_v(ptep))
2232					printf("%x:%x ", va, *(int *) ptep);
2233			};
2234
2235}
2236
2237static void
2238pmap_pvdump(pa)
2239	vm_offset_t pa;
2240{
2241	register pv_entry_t pv;
2242
2243	printf("pa %x", pa);
2244	for (pv = pa_to_pvh(pa); pv; pv = pv->pv_next) {
2245#ifdef used_to_be
2246		printf(" -> pmap %x, va %x, flags %x",
2247		    pv->pv_pmap, pv->pv_va, pv->pv_flags);
2248#endif
2249		printf(" -> pmap %x, va %x",
2250		    pv->pv_pmap, pv->pv_va);
2251		pads(pv->pv_pmap);
2252	}
2253	printf(" ");
2254}
2255#endif
2256