pmap.c revision 66463
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 1998,2000 Doug Rabson
9 * All rights reserved.
10 *
11 * This code is derived from software contributed to Berkeley by
12 * the Systems Programming Group of the University of Utah Computer
13 * Science Department and William Jolitz of UUNET Technologies Inc.
14 *
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions
17 * are met:
18 * 1. Redistributions of source code must retain the above copyright
19 *    notice, this list of conditions and the following disclaimer.
20 * 2. Redistributions in binary form must reproduce the above copyright
21 *    notice, this list of conditions and the following disclaimer in the
22 *    documentation and/or other materials provided with the distribution.
23 * 3. All advertising materials mentioning features or use of this software
24 *    must display the following acknowledgement:
25 *	This product includes software developed by the University of
26 *	California, Berkeley and its contributors.
27 * 4. Neither the name of the University nor the names of its contributors
28 *    may be used to endorse or promote products derived from this software
29 *    without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * SUCH DAMAGE.
42 *
43 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
44 *	from:	i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp
45 *		with some ideas from NetBSD's alpha pmap
46 * $FreeBSD: head/sys/ia64/ia64/pmap.c 66463 2000-09-29 16:52:50Z dfr $
47 */
48
49/*
50 *	Manages physical address maps.
51 *
52 *	In addition to hardware address maps, this
53 *	module is called upon to provide software-use-only
54 *	maps which may or may not be stored in the same
55 *	form as hardware maps.  These pseudo-maps are
56 *	used to store intermediate results from copy
57 *	operations to and from address spaces.
58 *
59 *	Since the information managed by this module is
60 *	also stored by the logical address mapping module,
61 *	this module may throw away valid virtual-to-physical
62 *	mappings at almost any time.  However, invalidations
63 *	of virtual-to-physical mappings must be done as
64 *	requested.
65 *
66 *	In order to cope with hardware architectures which
67 *	make virtual-to-physical map invalidates expensive,
68 *	this module may delay invalidate or reduced protection
69 *	operations until such time as they are actually
70 *	necessary.  This module is given full information as
71 *	to which processors are currently using which maps,
72 *	and to when physical maps must be made correct.
73 */
74
75/*
76 * Following the Linux model, region IDs are allocated in groups of
77 * eight so that a single region ID can be used for as many RRs as we
78 * want by encoding the RR number into the low bits of the ID.
79 *
80 * We reserve region ID 0 for the kernel and allocate the remaining
81 * IDs for user pmaps.
82 *
83 * Region 0..4
84 *	User virtually mapped
85 *
86 * Region 5
87 *	Kernel virtually mapped
88 *
89 * Region 6
90 *	Kernel physically mapped uncacheable
91 *
92 * Region 7
93 *	Kernel physically mapped cacheable
94 */
95
96#include <sys/param.h>
97#include <sys/systm.h>
98#include <sys/proc.h>
99#include <sys/msgbuf.h>
100#include <sys/vmmeter.h>
101#include <sys/mman.h>
102
103#include <vm/vm.h>
104#include <vm/vm_param.h>
105#include <sys/lock.h>
106#include <vm/vm_kern.h>
107#include <vm/vm_page.h>
108#include <vm/vm_map.h>
109#include <vm/vm_object.h>
110#include <vm/vm_extern.h>
111#include <vm/vm_pageout.h>
112#include <vm/vm_pager.h>
113#include <vm/vm_zone.h>
114
115#include <sys/user.h>
116
117#include <machine/md_var.h>
118
119#ifndef PMAP_SHPGPERPROC
120#define PMAP_SHPGPERPROC 200
121#endif
122
123#if defined(DIAGNOSTIC)
124#define PMAP_DIAGNOSTIC
125#endif
126
127#define MINPV 2048
128
129#if 0
130#define PMAP_DIAGNOSTIC
131#define PMAP_DEBUG
132#endif
133
134#if !defined(PMAP_DIAGNOSTIC)
135#define PMAP_INLINE __inline
136#else
137#define PMAP_INLINE
138#endif
139
140#if 0
141
142static void
143pmap_break(void)
144{
145}
146
147/* #define PMAP_DEBUG_VA(va) if ((va) == 0x120058000) pmap_break(); else */
148
149#endif
150
151#ifndef PMAP_DEBUG_VA
152#define PMAP_DEBUG_VA(va) do {} while(0)
153#endif
154
155/*
156 * Get PDEs and PTEs for user/kernel address space
157 */
158#define pmap_pte_w(pte)		((pte)->pte_ig & PTE_IG_WIRED)
159#define pmap_pte_managed(pte)	((pte)->pte_ig & PTE_IG_MANAGED)
160#define pmap_pte_v(pte)		((pte)->pte_p)
161#define pmap_pte_pa(pte)	(((pte)->pte_ppn) << 12)
162#define pmap_pte_prot(pte)	(((pte)->pte_ar << 2) | (pte)->pte_pl)
163
164#define pmap_pte_set_w(pte, v) ((v)?((pte)->pte_ig |= PTE_IG_WIRED) \
165				:((pte)->pte_ig &= ~PTE_IG_WIRED))
166#define pmap_pte_set_prot(pte, v) do {		\
167    (pte)->pte_ar = v >> 2;			\
168    (pte)->pte_pl = v & 3;			\
169} while (0)
170
171/*
172 * Given a map and a machine independent protection code,
173 * convert to an ia64 protection code.
174 */
175#define pte_prot(m, p)		(protection_codes[m == pmap_kernel() ? 0 : 1][p])
176int	protection_codes[2][8];
177
178/*
179 * Return non-zero if this pmap is currently active
180 */
181#define pmap_isactive(pmap)	(pmap->pm_active)
182
183/*
184 * Statically allocated kernel pmap
185 */
186static struct pmap kernel_pmap_store;
187pmap_t kernel_pmap;
188
189vm_offset_t avail_start;	/* PA of first available physical page */
190vm_offset_t avail_end;		/* PA of last available physical page */
191vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
192vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
193static boolean_t pmap_initialized = FALSE;	/* Has pmap_init completed? */
194
195
196vm_offset_t kernel_vm_end;
197
198/*
199 * Data for the ASN allocator
200 */
201static int pmap_maxasn;
202static int pmap_nextasn = 0;
203static u_int pmap_current_asngen = 1;
204static pmap_t pmap_active = 0;
205
206/*
207 * Data for the pv entry allocation mechanism
208 */
209static vm_zone_t pvzone;
210static struct vm_zone pvzone_store;
211static struct vm_object pvzone_obj;
212static vm_zone_t pvbootzone;
213static struct vm_zone pvbootzone_store;
214static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
215static int pmap_pagedaemon_waken = 0;
216static struct pv_entry *pvinit;
217static struct pv_entry *pvbootinit;
218
219static PMAP_INLINE void	free_pv_entry __P((pv_entry_t pv));
220static pv_entry_t get_pv_entry __P((void));
221static void	ia64_protection_init __P((void));
222static void	pmap_changebit __P((vm_page_t m, int bit, boolean_t setem));
223
224static void	pmap_remove_all __P((vm_page_t m));
225static void	pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_page_t m));
226static boolean_t pmap_is_referenced __P((vm_page_t m));
227
228vm_offset_t
229pmap_steal_memory(vm_size_t size)
230{
231	vm_size_t bank_size;
232	vm_offset_t pa, va;
233
234	size = round_page(size);
235
236	bank_size = phys_avail[1] - phys_avail[0];
237	while (size > bank_size) {
238		int i;
239		for (i = 0; phys_avail[i+2]; i+= 2) {
240			phys_avail[i] = phys_avail[i+2];
241			phys_avail[i+1] = phys_avail[i+3];
242		}
243		phys_avail[i] = 0;
244		phys_avail[i+1] = 0;
245		if (!phys_avail[0])
246			panic("pmap_steal_memory: out of memory");
247		bank_size = phys_avail[1] - phys_avail[0];
248	}
249
250	pa = phys_avail[0];
251	phys_avail[0] += size;
252
253	va = IA64_PHYS_TO_RR7(pa);
254	bzero((caddr_t) va, size);
255	return va;
256}
257
258/*
259 *	Bootstrap the system enough to run with virtual memory.
260 */
261void
262pmap_bootstrap()
263{
264	int i;
265	int boot_pvs;
266
267	/*
268	 * Setup ASNs
269	 */
270	pmap_nextasn = 0;
271	pmap_maxasn = 0;
272	pmap_current_asngen = 1;
273
274	avail_start = phys_avail[0];
275	for (i = 0; phys_avail[i+2]; i+= 2) ;
276	avail_end = phys_avail[i+1];
277
278	virtual_avail = IA64_RR_BASE(5);
279	virtual_end = IA64_RR_BASE(6)-1;
280
281	/*
282	 * Initialize protection array.
283	 */
284	ia64_protection_init();
285
286	/*
287	 * The kernel's pmap is statically allocated so we don't have to use
288	 * pmap_create, which is unlikely to work correctly at this part of
289	 * the boot sequence (XXX and which no longer exists).
290	 */
291	kernel_pmap = &kernel_pmap_store;
292	kernel_pmap->pm_count = 1;
293	kernel_pmap->pm_active = 1;
294	kernel_pmap->pm_asn = 0;
295	kernel_pmap->pm_asngen = pmap_current_asngen;
296	pmap_nextasn = 1;
297	TAILQ_INIT(&kernel_pmap->pm_pvlist);
298
299	/*
300	 * Region 5 is mapped via the vhpt.
301	 */
302	ia64_set_rr(IA64_RR_BASE(5),
303		    (5 << 8) | (PAGE_SHIFT << 2) | 1);
304
305	/*
306	 * Region 6 is direct mapped UC and region 7 is direct mapped
307	 * WC. The details of this is controlled by the Alt {I,D}TLB
308	 * handlers. Here we just make sure that they have the largest
309	 * possible page size to minimise TLB usage.
310	 */
311#if 0
312	ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2));
313	ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2));
314#endif
315
316	/*
317	 * We need some PVs to cope with pmap_kenter() calls prior to
318	 * pmap_init(). This is all a bit flaky and needs to be
319	 * rethought, probably by avoiding the zone allocator
320	 * entirely.
321	 */
322  	boot_pvs = 32768;
323	pvbootzone = &pvbootzone_store;
324	pvbootinit = (struct pv_entry *)
325		pmap_steal_memory(boot_pvs * sizeof (struct pv_entry));
326	zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry),
327		  pvbootinit, boot_pvs);
328
329	/*
330	 * Set up proc0's PCB.
331	 */
332#if 0
333	proc0.p_addr->u_pcb.pcb_hw.apcb_asn = 0;
334#endif
335}
336
337/*
338 *	Initialize the pmap module.
339 *	Called by vm_init, to initialize any structures that the pmap
340 *	system needs to map virtual memory.
341 *	pmap_init has been enhanced to support in a fairly consistant
342 *	way, discontiguous physical memory.
343 */
344void
345pmap_init(phys_start, phys_end)
346	vm_offset_t phys_start, phys_end;
347{
348	int i;
349	int initial_pvs;
350
351	/*
352	 * Allocate memory for random pmap data structures.  Includes the
353	 * pv_head_table.
354	 */
355
356	for(i = 0; i < vm_page_array_size; i++) {
357		vm_page_t m;
358
359		m = &vm_page_array[i];
360		TAILQ_INIT(&m->md.pv_list);
361		m->md.pv_list_count = 0;
362 	}
363
364	/*
365	 * init the pv free list
366	 */
367	initial_pvs = vm_page_array_size;
368	if (initial_pvs < MINPV)
369		initial_pvs = MINPV;
370	pvzone = &pvzone_store;
371	pvinit = (struct pv_entry *) kmem_alloc(kernel_map,
372		initial_pvs * sizeof (struct pv_entry));
373	zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit,
374		  vm_page_array_size);
375
376	/*
377	 * Now it is safe to enable pv_table recording.
378	 */
379	pmap_initialized = TRUE;
380}
381
382/*
383 * Initialize the address space (zone) for the pv_entries.  Set a
384 * high water mark so that the system can recover from excessive
385 * numbers of pv entries.
386 */
387void
388pmap_init2()
389{
390	pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size;
391	pv_entry_high_water = 9 * (pv_entry_max / 10);
392	zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1);
393}
394
395
396/***************************************************
397 * Manipulate TLBs for a pmap
398 ***************************************************/
399
400static void
401pmap_invalidate_asn(pmap_t pmap)
402{
403	pmap->pm_asngen = 0;
404}
405
406static void
407pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
408{
409#if 0
410	if (pmap_isactive(pmap)) {
411		IA64_TBIS(va);
412		ia64_pal_imb();		/* XXX overkill? */
413	} else
414		pmap_invalidate_asn(pmap);
415#endif
416}
417
418static void
419pmap_invalidate_all(pmap_t pmap)
420{
421#if 0
422	if (pmap_isactive(pmap)) {
423		IA64_TBIA();
424		ia64_pal_imb();		/* XXX overkill? */
425	} else
426		pmap_invalidate_asn(pmap);
427#endif
428}
429
430static void
431pmap_get_asn(pmap_t pmap)
432{
433#if 0
434	if (pmap->pm_asngen != pmap_current_asngen) {
435		if (pmap_nextasn > pmap_maxasn) {
436			/*
437			 * Start a new ASN generation.
438			 *
439			 * Invalidate all per-process mappings and I-cache
440			 */
441			pmap_nextasn = 0;
442			pmap_current_asngen++;
443
444			if (pmap_current_asngen == 0) {
445				/*
446				 * Clear the pm_asngen of all pmaps.
447				 * This is safe since it is only called from
448				 * pmap_activate after it has deactivated
449				 * the old pmap.
450				 */
451				struct proc *p;
452				pmap_t tpmap;
453
454#ifdef PMAP_DIAGNOSTIC
455				printf("pmap_get_asn: generation rollover\n");
456#endif
457				pmap_current_asngen = 1;
458				LIST_FOREACH(p, &allproc, p_list) {
459					if (p->p_vmspace) {
460						tpmap = vmspace_pmap(p->p_vmspace);
461						tpmap->pm_asngen = 0;
462					}
463				}
464			}
465
466			/*
467			 * Since we are about to start re-using ASNs, we must
468			 * clear out the TLB and the I-cache since they are tagged
469			 * with the ASN.
470			 */
471			IA64_TBIAP();
472			ia64_pal_imb();	/* XXX overkill? */
473		}
474		pmap->pm_asn = pmap_nextasn++;
475		pmap->pm_asngen = pmap_current_asngen;
476	}
477#endif
478}
479
480/***************************************************
481 * Low level helper routines.....
482 ***************************************************/
483
484/*
485 * Install a pte into the VHPT
486 */
487static PMAP_INLINE void
488pmap_install_pte(struct ia64_lpte *vhpte, struct ia64_lpte *pte)
489{
490	u_int64_t *vhp, *p;
491
492	/* invalidate the pte */
493	atomic_set_64(&vhpte->pte_tag, 1L << 63);
494	ia64_mf();			/* make sure everyone sees */
495
496	vhp = (u_int64_t *) vhpte;
497	p = (u_int64_t *) pte;
498
499	vhp[0] = p[0];
500	vhp[1] = p[1];
501	vhp[2] = p[2];			/* sets ti to one */
502
503	ia64_mf();
504}
505
506/*
507 * Compare essential parts of pte.
508 */
509static PMAP_INLINE int
510pmap_equal_pte(struct ia64_lpte *pte1, struct ia64_lpte *pte2)
511{
512	return *(u_int64_t *) pte1 == *(u_int64_t *) pte2;
513}
514
515/*
516 * this routine defines the region(s) of memory that should
517 * not be tested for the modified bit.
518 */
519static PMAP_INLINE int
520pmap_track_modified(vm_offset_t va)
521{
522	if ((va < clean_sva) || (va >= clean_eva))
523		return 1;
524	else
525		return 0;
526}
527
528/*
529 * Create the UPAGES for a new process.
530 * This routine directly affects the fork perf for a process.
531 */
532void
533pmap_new_proc(struct proc *p)
534{
535	int i;
536	vm_object_t upobj;
537	vm_page_t m;
538	struct user *up;
539
540	/*
541	 * allocate object for the upages
542	 */
543	if ((upobj = p->p_upages_obj) == NULL) {
544		upobj = vm_object_allocate( OBJT_DEFAULT, UPAGES);
545		p->p_upages_obj = upobj;
546	}
547
548	/* get a kernel virtual address for the UPAGES for this proc */
549	if ((up = p->p_addr) == NULL) {
550		up = (struct user *) kmem_alloc_nofault(kernel_map,
551				UPAGES * PAGE_SIZE);
552		if (up == NULL)
553			panic("pmap_new_proc: u_map allocation failed");
554		p->p_addr = up;
555	}
556
557	for(i=0;i<UPAGES;i++) {
558		/*
559		 * Get a kernel stack page
560		 */
561		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
562
563		/*
564		 * Wire the page
565		 */
566		m->wire_count++;
567		cnt.v_wire_count++;
568
569		pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
570			VM_PAGE_TO_PHYS(m));
571
572		pmap_invalidate_page(kernel_pmap,
573				     (vm_offset_t)up + i * PAGE_SIZE);
574
575		vm_page_wakeup(m);
576		vm_page_flag_clear(m, PG_ZERO);
577		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
578		m->valid = VM_PAGE_BITS_ALL;
579	}
580}
581
582/*
583 * Dispose the UPAGES for a process that has exited.
584 * This routine directly impacts the exit perf of a process.
585 */
586void
587pmap_dispose_proc(p)
588	struct proc *p;
589{
590	int i;
591	vm_object_t upobj;
592	vm_page_t m;
593
594	upobj = p->p_upages_obj;
595
596	for(i=0;i<UPAGES;i++) {
597
598		if ((m = vm_page_lookup(upobj, i)) == NULL)
599			panic("pmap_dispose_proc: upage already missing???");
600
601		vm_page_busy(m);
602
603		pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i);
604
605		vm_page_unwire(m, 0);
606		vm_page_free(m);
607	}
608}
609
610/*
611 * Allow the UPAGES for a process to be prejudicially paged out.
612 */
613void
614pmap_swapout_proc(p)
615	struct proc *p;
616{
617	int i;
618	vm_object_t upobj;
619	vm_page_t m;
620
621	/*
622	 * Make sure we aren't fpcurproc.
623	 */
624	ia64_fpstate_save(p, 1);
625
626	upobj = p->p_upages_obj;
627	/*
628	 * let the upages be paged
629	 */
630	for(i=0;i<UPAGES;i++) {
631		if ((m = vm_page_lookup(upobj, i)) == NULL)
632			panic("pmap_swapout_proc: upage already missing???");
633		vm_page_dirty(m);
634		vm_page_unwire(m, 0);
635		pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i);
636	}
637}
638
639/*
640 * Bring the UPAGES for a specified process back in.
641 */
642void
643pmap_swapin_proc(p)
644	struct proc *p;
645{
646	int i,rv;
647	vm_object_t upobj;
648	vm_page_t m;
649
650	upobj = p->p_upages_obj;
651	for(i=0;i<UPAGES;i++) {
652
653		m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
654
655		pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE,
656			VM_PAGE_TO_PHYS(m));
657
658		if (m->valid != VM_PAGE_BITS_ALL) {
659			rv = vm_pager_get_pages(upobj, &m, 1, 0);
660			if (rv != VM_PAGER_OK)
661				panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid);
662			m = vm_page_lookup(upobj, i);
663			m->valid = VM_PAGE_BITS_ALL;
664		}
665
666		vm_page_wire(m);
667		vm_page_wakeup(m);
668		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
669	}
670
671	/*
672	 * The pcb may be at a different physical address now so cache the
673	 * new address.
674	 */
675	p->p_md.md_pcbpaddr = (void*) vtophys((vm_offset_t) &p->p_addr->u_pcb);
676}
677
678/***************************************************
679 * Page table page management routines.....
680 ***************************************************/
681
682void
683pmap_pinit0(pmap)
684	struct pmap *pmap;
685{
686	/*
687	 * kernel_pmap is the same as any other pmap.
688	 */
689	pmap_pinit(pmap);
690	pmap->pm_flags = 0;
691	pmap->pm_count = 1;
692	pmap->pm_ptphint = NULL;
693	pmap->pm_active = 0;
694	pmap->pm_asn = 0;
695	pmap->pm_asngen = 0;
696	TAILQ_INIT(&pmap->pm_pvlist);
697	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
698}
699
700/*
701 * Initialize a preallocated and zeroed pmap structure,
702 * such as one in a vmspace structure.
703 */
704void
705pmap_pinit(pmap)
706	register struct pmap *pmap;
707{
708	pmap->pm_flags = 0;
709	pmap->pm_count = 1;
710	pmap->pm_ptphint = NULL;
711	pmap->pm_active = 0;
712	pmap->pm_asn = 0;
713	pmap->pm_asngen = 0;
714	TAILQ_INIT(&pmap->pm_pvlist);
715	bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
716}
717
718/*
719 * Wire in kernel global address entries.  To avoid a race condition
720 * between pmap initialization and pmap_growkernel, this procedure
721 * should be called after the vmspace is attached to the process
722 * but before this pmap is activated.
723 */
724void
725pmap_pinit2(pmap)
726	struct pmap *pmap;
727{
728}
729
730/***************************************************
731* Pmap allocation/deallocation routines.
732 ***************************************************/
733
734/*
735 * Release any resources held by the given physical map.
736 * Called when a pmap initialized by pmap_pinit is being released.
737 * Should only be called if the map contains no valid mappings.
738 */
739void
740pmap_release(pmap_t pmap)
741{
742#if defined(DIAGNOSTIC)
743	if (object->ref_count != 1)
744		panic("pmap_release: pteobj reference count != 1");
745#endif
746}
747
748/*
749 * grow the number of kernel page table entries, if needed
750 */
751void
752pmap_growkernel(vm_offset_t addr)
753{
754}
755
756/*
757 *	Retire the given physical map from service.
758 *	Should only be called if the map contains
759 *	no valid mappings.
760 */
761void
762pmap_destroy(pmap_t pmap)
763{
764	int count;
765
766	if (pmap == NULL)
767		return;
768
769	count = --pmap->pm_count;
770	if (count == 0) {
771		pmap_release(pmap);
772		panic("destroying a pmap is not yet implemented");
773	}
774}
775
776/*
777 *	Add a reference to the specified pmap.
778 */
779void
780pmap_reference(pmap_t pmap)
781{
782	if (pmap != NULL) {
783		pmap->pm_count++;
784	}
785}
786
787/***************************************************
788* page management routines.
789 ***************************************************/
790
791/*
792 * free the pv_entry back to the free list
793 */
794static PMAP_INLINE void
795free_pv_entry(pv_entry_t pv)
796{
797	pv_entry_count--;
798	zfreei(pvzone, pv);
799}
800
801/*
802 * get a new pv_entry, allocating a block from the system
803 * when needed.
804 * the memory allocation is performed bypassing the malloc code
805 * because of the possibility of allocations at interrupt time.
806 */
807static pv_entry_t
808get_pv_entry(void)
809{
810	if (!pvinit)
811		return zalloci(pvbootzone);
812
813	pv_entry_count++;
814	if (pv_entry_high_water &&
815		(pv_entry_count > pv_entry_high_water) &&
816		(pmap_pagedaemon_waken == 0)) {
817		pmap_pagedaemon_waken = 1;
818		wakeup (&vm_pages_needed);
819	}
820	return zalloci(pvzone);
821}
822
823/*
824 * Add a pv_entry to the VHPT.
825 */
826static void
827pmap_enter_vhpt(pv_entry_t pv)
828{
829	struct ia64_lpte *vhpte;
830
831	vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va);
832
833	pv->pv_pte.pte_chain = vhpte->pte_chain;
834	vhpte->pte_chain = ia64_tpa((vm_offset_t) pv);
835
836	if (!vhpte->pte_p && pv->pv_pte.pte_p)
837		pmap_install_pte(vhpte, &pv->pv_pte);
838	else
839		ia64_mf();
840}
841
842/*
843 * Update VHPT after pv->pv_pte has changed.
844 */
845static void
846pmap_update_vhpt(pv_entry_t pv)
847{
848	struct ia64_lpte *vhpte;
849
850	vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va);
851
852	if ((!vhpte->pte_p || vhpte->pte_tag == pv->pv_pte.pte_tag)
853	    && pv->pv_pte.pte_p)
854		pmap_install_pte(vhpte, &pv->pv_pte);
855}
856
857/*
858 * Remove a pv_entry from the VHPT. Return true if it worked.
859 */
860static int
861pmap_remove_vhpt(pv_entry_t pv)
862{
863	struct ia64_lpte *pte;
864	struct ia64_lpte *lpte;
865	struct ia64_lpte *vhpte;
866	u_int64_t tag;
867
868	vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va);
869
870	/*
871	 * If the VHPTE is invalid, there can't be a collision chain.
872	 */
873	if (!vhpte->pte_p)
874		return 1;
875
876	lpte = vhpte;
877	pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(vhpte->pte_chain);
878	tag = ia64_ttag(pv->pv_va);
879
880	while (pte->pte_tag != tag) {
881		lpte = pte;
882		if (pte->pte_chain)
883			pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain);
884		else
885			return 1; /* error here? */
886	}
887
888	/*
889	 * Snip this pv_entry out of the collision chain.
890	 */
891	lpte->pte_chain = pte->pte_chain;
892
893	/*
894	 * If the VHPTE matches as well, change it to map the first
895	 * element from the chain if there is one.
896	 */
897	if (vhpte->pte_tag == tag) {
898		if (vhpte->pte_chain) {
899			pte = (struct ia64_lpte *)
900				IA64_PHYS_TO_RR7(vhpte->pte_chain);
901			pmap_install_pte(vhpte, pte);
902		} else {
903			vhpte->pte_p = 0;
904			ia64_mf();
905		}
906	}
907
908	return 0;
909}
910
911/*
912 * Make a pv_entry_t which maps the given virtual address. The pte
913 * will be initialised with pte_p = 0. The function pmap_set_pv()
914 * should be called to change the value of the pte.
915 * Must be called at splvm().
916 */
917static pv_entry_t
918pmap_make_pv(pmap_t pmap, vm_offset_t va)
919{
920	pv_entry_t pv;
921
922	pv = get_pv_entry();
923	bzero(pv, sizeof(*pv));
924	pv->pv_va = va;
925	pv->pv_pmap = pmap;
926
927	pv->pv_pte.pte_p = 0;		/* invalid for now */
928	pv->pv_pte.pte_ma = PTE_MA_WB;	/* cacheable, write-back */
929	pv->pv_pte.pte_a = 0;
930	pv->pv_pte.pte_d = 0;
931	pv->pv_pte.pte_pl = 0;		/* privilege level 0 */
932	pv->pv_pte.pte_ar = 3;		/* read/write/execute */
933	pv->pv_pte.pte_ppn = 0;		/* physical address */
934	pv->pv_pte.pte_ed = 0;
935	pv->pv_pte.pte_ig = 0;
936
937	pv->pv_pte.pte_ps = PAGE_SHIFT;	/* page size */
938	pv->pv_pte.pte_key = 0;		/* protection key */
939
940	pv->pv_pte.pte_tag = ia64_ttag(va);
941
942	pmap_enter_vhpt(pv);
943
944	TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist);
945	pmap->pm_stats.resident_count++;
946
947	return pv;
948}
949
950/*
951 * Initialise a pv_entry_t with a given physical address and
952 * protection code. If the passed vm_page_t is non-zero, the entry is
953 * added to its list of mappings.
954 * Must be called at splvm().
955 */
956static void
957pmap_set_pv(pmap_t pmap, pv_entry_t pv, vm_offset_t pa,
958	    int prot, vm_page_t m)
959{
960	if (pv->pv_pte.pte_p && pv->pv_pte.pte_ig & PTE_IG_MANAGED) {
961		vm_offset_t opa = pv->pv_pte.pte_ppn << 12;
962		vm_page_t om = PHYS_TO_VM_PAGE(opa);
963
964		TAILQ_REMOVE(&om->md.pv_list, pv, pv_list);
965		om->md.pv_list_count--;
966
967		if (TAILQ_FIRST(&om->md.pv_list) == NULL)
968			vm_page_flag_clear(om, PG_MAPPED | PG_WRITEABLE);
969	}
970
971	pv->pv_pte.pte_p = 1;		/* set to valid */
972
973	/*
974	 * Only track access/modify for managed pages.
975	 */
976	if (m) {
977		pv->pv_pte.pte_a = 0;
978		pv->pv_pte.pte_d = 0;
979	} else {
980		pv->pv_pte.pte_a = 1;
981		pv->pv_pte.pte_d = 1;
982	}
983
984	pv->pv_pte.pte_pl = prot & 3;	/* privilege level */
985	pv->pv_pte.pte_ar = prot >> 2;	/* access rights */
986	pv->pv_pte.pte_ppn = pa >> 12;	/* physical address */
987
988	if (m) {
989		pv->pv_pte.pte_ig |= PTE_IG_MANAGED;
990		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list);
991		m->md.pv_list_count++;
992	}
993
994	/*
995	 * Update the VHPT entry if it needs to change.
996	 */
997	pmap_update_vhpt(pv);
998}
999
1000/*
1001 * Remove a mapping represented by a particular pv_entry_t. If the
1002 * passed vm_page_t is non-zero, then the entry is removed from it.
1003 * Must be called at splvm().
1004 */
1005static int
1006pmap_remove_pv(pmap_t pmap, pv_entry_t pv, vm_page_t m)
1007{
1008	int rtval;
1009
1010	/*
1011	 * First remove from the VHPT.
1012	 */
1013	rtval = pmap_remove_vhpt(pv);
1014	if (!rtval)
1015		return rtval;
1016
1017	if (m) {
1018		TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
1019		m->md.pv_list_count--;
1020
1021		if (TAILQ_FIRST(&m->md.pv_list) == NULL)
1022			vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1023	}
1024
1025	TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist);
1026	pmap->pm_stats.resident_count--;
1027
1028	free_pv_entry(pv);
1029
1030	return (rtval);
1031}
1032
1033/*
1034 * Find a pv given a pmap and virtual address.
1035 */
1036static pv_entry_t
1037pmap_find_pv(pmap_t pmap, vm_offset_t va)
1038{
1039	struct ia64_lpte *pte;
1040	u_int64_t tag;
1041
1042	pte = (struct ia64_lpte *) ia64_thash(va);
1043	if (!pte->pte_chain)
1044		return 0;
1045
1046	tag = ia64_ttag(va);
1047	pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain);
1048
1049	while (pte->pte_tag != tag) {
1050		if (pte->pte_chain)
1051			pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain);
1052		else
1053			return 0;
1054	}
1055
1056	return (pv_entry_t) pte;	/* XXX wrong va */
1057}
1058
1059/*
1060 *	Routine:	pmap_extract
1061 *	Function:
1062 *		Extract the physical page address associated
1063 *		with the given map/virtual_address pair.
1064 */
1065vm_offset_t
1066pmap_extract(pmap, va)
1067	register pmap_t pmap;
1068	vm_offset_t va;
1069{
1070	pv_entry_t pv = pmap_find_pv(pmap, va);
1071	if (pv)
1072		return pmap_pte_pa(&pv->pv_pte);
1073	else
1074		return 0;
1075}
1076
1077/***************************************************
1078 * Low level mapping routines.....
1079 ***************************************************/
1080
1081/*
1082 * Add a list of wired pages to the kva
1083 * this routine is only used for temporary
1084 * kernel mappings that do not need to have
1085 * page modification or references recorded.
1086 * Note that old mappings are simply written
1087 * over.  The page *must* be wired.
1088 */
1089void
1090pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
1091{
1092	int i, inval;
1093	pv_entry_t pv;
1094
1095	for (i = 0; i < count; i++) {
1096		vm_offset_t tva = va + i * PAGE_SIZE;
1097		pv = pmap_find_pv(kernel_pmap, tva);
1098		inval = 0;
1099		if (!pv)
1100			pv = pmap_make_pv(kernel_pmap, tva);
1101		else
1102			inval = 1;
1103
1104		PMAP_DEBUG_VA(va);
1105		pmap_set_pv(kernel_pmap, pv,
1106			    VM_PAGE_TO_PHYS(m[i]),
1107			    (PTE_AR_RWX<<2) | PTE_PL_KERN, 0);
1108		if (inval)
1109			pmap_invalidate_page(kernel_pmap, tva);
1110	}
1111}
1112
1113/*
1114 * this routine jerks page mappings from the
1115 * kernel -- it is meant only for temporary mappings.
1116 */
1117void
1118pmap_qremove(va, count)
1119	vm_offset_t va;
1120	int count;
1121{
1122	int i;
1123	pv_entry_t pv;
1124
1125	for (i = 0; i < count; i++) {
1126		pv = pmap_find_pv(kernel_pmap, va);
1127		PMAP_DEBUG_VA(va);
1128		if (pv) {
1129			pmap_remove_pv(kernel_pmap, pv, 0);
1130			pmap_invalidate_page(kernel_pmap, va);
1131		}
1132		va += PAGE_SIZE;
1133	}
1134}
1135
1136/*
1137 * Add a wired page to the kva.
1138 */
1139void
1140pmap_kenter(vm_offset_t va, vm_offset_t pa)
1141{
1142	pv_entry_t pv;
1143
1144	pv = pmap_find_pv(kernel_pmap, va);
1145	if (!pv)
1146		pv = pmap_make_pv(kernel_pmap, va);
1147	pmap_set_pv(kernel_pmap, pv,
1148		    pa, (PTE_AR_RWX<<2) | PTE_PL_KERN, 0);
1149}
1150
1151/*
1152 * Remove a page from the kva
1153 */
1154void
1155pmap_kremove(vm_offset_t va)
1156{
1157	pv_entry_t pv;
1158
1159	pv = pmap_find_pv(kernel_pmap, va);
1160	if (pv)
1161		pmap_remove_pv(kernel_pmap, pv, 0);
1162}
1163
1164/*
1165 *	Used to map a range of physical addresses into kernel
1166 *	virtual address space.
1167 *
1168 *	For now, VM is already on, we only need to map the
1169 *	specified memory.
1170 */
1171vm_offset_t
1172pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot)
1173{
1174	/*
1175	 * XXX We should really try to use larger pagesizes here to
1176	 * cut down the number of PVs used.
1177	 */
1178	while (start < end) {
1179		pmap_kenter(virt, start);
1180		virt += PAGE_SIZE;
1181		start += PAGE_SIZE;
1182	}
1183	return (virt);
1184}
1185
1186/*
1187 * This routine is very drastic, but can save the system
1188 * in a pinch.
1189 */
1190void
1191pmap_collect()
1192{
1193	int i;
1194	vm_page_t m;
1195	static int warningdone=0;
1196
1197	if (pmap_pagedaemon_waken == 0)
1198		return;
1199
1200	if (warningdone < 5) {
1201		printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n");
1202		warningdone++;
1203	}
1204
1205	for(i = 0; i < vm_page_array_size; i++) {
1206		m = &vm_page_array[i];
1207		if (m->wire_count || m->hold_count || m->busy ||
1208		    (m->flags & PG_BUSY))
1209			continue;
1210		pmap_remove_all(m);
1211	}
1212	pmap_pagedaemon_waken = 0;
1213}
1214
1215/*
1216 * Remove a single page from a process address space
1217 */
1218static void
1219pmap_remove_page(pmap_t pmap, vm_offset_t va)
1220{
1221	pv_entry_t pv;
1222	vm_page_t m;
1223	int rtval;
1224	int s;
1225
1226	s = splvm();
1227
1228	pv = pmap_find_pv(pmap, va);
1229
1230	rtval = 0;
1231	if (pv) {
1232		m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
1233		rtval = pmap_remove_pv(pmap, pv, m);
1234	}
1235
1236	splx(s);
1237	return;
1238}
1239
1240/*
1241 *	Remove the given range of addresses from the specified map.
1242 *
1243 *	It is assumed that the start and end are properly
1244 *	rounded to the page size.
1245 */
1246void
1247pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1248{
1249	vm_offset_t va, nva;
1250
1251	if (pmap == NULL)
1252		return;
1253
1254	if (pmap->pm_stats.resident_count == 0)
1255		return;
1256
1257	/*
1258	 * special handling of removing one page.  a very
1259	 * common operation and easy to short circuit some
1260	 * code.
1261	 */
1262	if (sva + PAGE_SIZE == eva) {
1263		pmap_remove_page(pmap, sva);
1264		return;
1265	}
1266
1267	if (atop(eva - sva) > pmap->pm_stats.resident_count) {
1268		for (va = sva; va < eva; va = nva) {
1269			pmap_remove_page(pmap, va);
1270			nva = va + PAGE_SIZE;
1271		}
1272	} else {
1273		pv_entry_t pv, pvnext;
1274		int s;
1275
1276		s = splvm();
1277		for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
1278			pv;
1279			pv = pvnext) {
1280			pvnext = TAILQ_NEXT(pv, pv_plist);
1281			if (pv->pv_va >= sva && pv->pv_va < eva) {
1282				vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
1283				pmap_remove_pv(pmap, pv, m);
1284			}
1285		}
1286		splx(s);
1287	}
1288}
1289
1290/*
1291 *	Routine:	pmap_remove_all
1292 *	Function:
1293 *		Removes this physical page from
1294 *		all physical maps in which it resides.
1295 *		Reflects back modify bits to the pager.
1296 *
1297 *	Notes:
1298 *		Original versions of this routine were very
1299 *		inefficient because they iteratively called
1300 *		pmap_remove (slow...)
1301 */
1302
1303static void
1304pmap_remove_all(vm_page_t m)
1305{
1306	register pv_entry_t pv;
1307	int nmodify;
1308	int s;
1309
1310	nmodify = 0;
1311#if defined(PMAP_DIAGNOSTIC)
1312	/*
1313	 * XXX this makes pmap_page_protect(NONE) illegal for non-managed
1314	 * pages!
1315	 */
1316	if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) {
1317		panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m));
1318	}
1319#endif
1320
1321	s = splvm();
1322
1323	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
1324		vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
1325		pmap_remove_pv(pv->pv_pmap, pv, m);
1326		pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1327	}
1328
1329	vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE);
1330
1331	splx(s);
1332	return;
1333}
1334
1335/*
1336 *	Set the physical protection on the
1337 *	specified range of this map as requested.
1338 */
1339void
1340pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1341{
1342	pv_entry_t pv;
1343	int newprot;
1344
1345	if (pmap == NULL)
1346		return;
1347
1348	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1349		pmap_remove(pmap, sva, eva);
1350		return;
1351	}
1352
1353	if (prot & VM_PROT_WRITE)
1354		return;
1355
1356	newprot = pte_prot(pmap, prot);
1357
1358	if ((sva & PAGE_MASK) || (eva & PAGE_MASK))
1359		panic("pmap_protect: unaligned addresses");
1360
1361	while (sva < eva) {
1362		/*
1363		 * If page is invalid, skip this page
1364		 */
1365		pv = pmap_find_pv(pmap, sva);
1366		if (!pv) {
1367			sva += PAGE_SIZE;
1368			continue;
1369		}
1370
1371		if (pmap_pte_prot(&pv->pv_pte) != newprot) {
1372			pmap_pte_set_prot(&pv->pv_pte, newprot);
1373			pmap_update_vhpt(pv);
1374			pmap_invalidate_page(pmap, sva);
1375		}
1376
1377		sva += PAGE_SIZE;
1378	}
1379}
1380
1381/*
1382 *	Insert the given physical page (p) at
1383 *	the specified virtual address (v) in the
1384 *	target physical map with the protection requested.
1385 *
1386 *	If specified, the page will be wired down, meaning
1387 *	that the related pte can not be reclaimed.
1388 *
1389 *	NB:  This is the only routine which MAY NOT lazy-evaluate
1390 *	or lose information.  That is, this routine must actually
1391 *	insert this page into the given map NOW.
1392 */
1393void
1394pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1395	   boolean_t wired)
1396{
1397	vm_offset_t pa;
1398	pv_entry_t pv;
1399	vm_offset_t opa;
1400	struct ia64_lpte origpte;
1401	int managed;
1402
1403	if (pmap == NULL)
1404		return;
1405
1406	va &= ~PAGE_MASK;
1407#ifdef PMAP_DIAGNOSTIC
1408	if (va > VM_MAX_KERNEL_ADDRESS)
1409		panic("pmap_enter: toobig");
1410#endif
1411
1412	pv = pmap_find_pv(pmap, va);
1413	if (!pv)
1414		pv = pmap_make_pv(pmap, va);
1415
1416	origpte = pv->pv_pte;
1417	if (origpte.pte_p)
1418		opa = pmap_pte_pa(&origpte);
1419	else
1420		opa = 0;
1421
1422	pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
1423	managed = 0;
1424
1425	/*
1426	 * Mapping has not changed, must be protection or wiring change.
1427	 */
1428	if (origpte.pte_p && (opa == pa)) {
1429		/*
1430		 * Wiring change, just update stats. We don't worry about
1431		 * wiring PT pages as they remain resident as long as there
1432		 * are valid mappings in them. Hence, if a user page is wired,
1433		 * the PT page will be also.
1434		 */
1435		if (wired && ((origpte.pte_ig & PTE_IG_WIRED) == 0))
1436			pmap->pm_stats.wired_count++;
1437		else if (!wired && (origpte.pte_ig & PTE_IG_WIRED))
1438			pmap->pm_stats.wired_count--;
1439
1440		managed = origpte.pte_ig & PTE_IG_MANAGED;
1441		goto validate;
1442	}  else {
1443		/*
1444		 * Mapping has changed, invalidate old range and fall
1445		 * through to handle validating new mapping.
1446		 */
1447	}
1448
1449	/*
1450	 * Increment counters
1451	 */
1452	if (wired)
1453		pmap->pm_stats.wired_count++;
1454
1455validate:
1456	/*
1457	 * Now validate mapping with desired protection/wiring.
1458	 * This enters the pv_entry_t on the page's list if necessary.
1459	 */
1460	pmap_set_pv(pmap, pv, pa, pte_prot(pmap, prot), m);
1461
1462	if (wired)
1463		pv->pv_pte.pte_ig |= PTE_IG_WIRED;
1464
1465	/*
1466	 * if the mapping or permission bits are different, we need
1467	 * to invalidate the page.
1468	 */
1469	if (pmap_equal_pte(&origpte, &pv->pv_pte)) {
1470		PMAP_DEBUG_VA(va);
1471		if (origpte.pte_p)
1472			pmap_invalidate_page(pmap, va);
1473	}
1474}
1475
1476/*
1477 * this code makes some *MAJOR* assumptions:
1478 * 1. Current pmap & pmap exists.
1479 * 2. Not wired.
1480 * 3. Read access.
1481 * 4. No page table pages.
1482 * 5. Tlbflush is deferred to calling procedure.
1483 * 6. Page IS managed.
1484 * but is *MUCH* faster than pmap_enter...
1485 */
1486
1487static void
1488pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m)
1489{
1490	pv_entry_t pv;
1491	int s;
1492
1493	s = splvm();
1494
1495	pv = pmap_find_pv(pmap, va);
1496	if (!pv)
1497		pv = pmap_make_pv(pmap, va);
1498
1499	/*
1500	 * Enter on the PV list if part of our managed memory. Note that we
1501	 * raise IPL while manipulating pv_table since pmap_enter can be
1502	 * called at interrupt time.
1503	 */
1504	PMAP_DEBUG_VA(va);
1505	pmap_set_pv(pmap, pv, VM_PAGE_TO_PHYS(m),
1506		    (PTE_AR_R << 2) | PTE_PL_USER, m);
1507
1508	splx(s);
1509}
1510
1511/*
1512 * Make temporary mapping for a physical address. This is called
1513 * during dump.
1514 */
1515void *
1516pmap_kenter_temporary(vm_offset_t pa)
1517{
1518	return (void *) IA64_PHYS_TO_RR7(pa);
1519}
1520
1521#define MAX_INIT_PT (96)
1522/*
1523 * pmap_object_init_pt preloads the ptes for a given object
1524 * into the specified pmap.  This eliminates the blast of soft
1525 * faults on process startup and immediately after an mmap.
1526 */
1527void
1528pmap_object_init_pt(pmap_t pmap, vm_offset_t addr,
1529		    vm_object_t object, vm_pindex_t pindex,
1530		    vm_size_t size, int limit)
1531{
1532	vm_offset_t tmpidx;
1533	int psize;
1534	vm_page_t p;
1535	int objpgs;
1536
1537	if (pmap == NULL || object == NULL)
1538		return;
1539
1540	psize = ia64_btop(size);
1541
1542	if ((object->type != OBJT_VNODE) ||
1543		(limit && (psize > MAX_INIT_PT) &&
1544			(object->resident_page_count > MAX_INIT_PT))) {
1545		return;
1546	}
1547
1548	if (psize + pindex > object->size)
1549		psize = object->size - pindex;
1550
1551	/*
1552	 * if we are processing a major portion of the object, then scan the
1553	 * entire thing.
1554	 */
1555	if (psize > (object->resident_page_count >> 2)) {
1556		objpgs = psize;
1557
1558		for (p = TAILQ_FIRST(&object->memq);
1559		    ((objpgs > 0) && (p != NULL));
1560		    p = TAILQ_NEXT(p, listq)) {
1561
1562			tmpidx = p->pindex;
1563			if (tmpidx < pindex) {
1564				continue;
1565			}
1566			tmpidx -= pindex;
1567			if (tmpidx >= psize) {
1568				continue;
1569			}
1570			if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1571			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1572				if ((p->queue - p->pc) == PQ_CACHE)
1573					vm_page_deactivate(p);
1574				vm_page_busy(p);
1575				pmap_enter_quick(pmap,
1576						 addr + ia64_ptob(tmpidx), p);
1577				vm_page_flag_set(p, PG_MAPPED);
1578				vm_page_wakeup(p);
1579			}
1580			objpgs -= 1;
1581		}
1582	} else {
1583		/*
1584		 * else lookup the pages one-by-one.
1585		 */
1586		for (tmpidx = 0; tmpidx < psize; tmpidx += 1) {
1587			p = vm_page_lookup(object, tmpidx + pindex);
1588			if (p &&
1589			    ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1590			    (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1591				if ((p->queue - p->pc) == PQ_CACHE)
1592					vm_page_deactivate(p);
1593				vm_page_busy(p);
1594				pmap_enter_quick(pmap,
1595						 addr + ia64_ptob(tmpidx), p);
1596				vm_page_flag_set(p, PG_MAPPED);
1597				vm_page_wakeup(p);
1598			}
1599		}
1600	}
1601	return;
1602}
1603
1604/*
1605 * pmap_prefault provides a quick way of clustering
1606 * pagefaults into a processes address space.  It is a "cousin"
1607 * of pmap_object_init_pt, except it runs at page fault time instead
1608 * of mmap time.
1609 */
1610#define PFBAK 4
1611#define PFFOR 4
1612#define PAGEORDER_SIZE (PFBAK+PFFOR)
1613
1614static int pmap_prefault_pageorder[] = {
1615	-PAGE_SIZE, PAGE_SIZE,
1616	-2 * PAGE_SIZE, 2 * PAGE_SIZE,
1617	-3 * PAGE_SIZE, 3 * PAGE_SIZE
1618	-4 * PAGE_SIZE, 4 * PAGE_SIZE
1619};
1620
1621void
1622pmap_prefault(pmap, addra, entry)
1623	pmap_t pmap;
1624	vm_offset_t addra;
1625	vm_map_entry_t entry;
1626{
1627	int i;
1628	vm_offset_t starta;
1629	vm_offset_t addr;
1630	vm_pindex_t pindex;
1631	vm_page_t m, mpte;
1632	vm_object_t object;
1633
1634	if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace)))
1635		return;
1636
1637	object = entry->object.vm_object;
1638
1639	starta = addra - PFBAK * PAGE_SIZE;
1640	if (starta < entry->start) {
1641		starta = entry->start;
1642	} else if (starta > addra) {
1643		starta = 0;
1644	}
1645
1646	mpte = NULL;
1647	for (i = 0; i < PAGEORDER_SIZE; i++) {
1648		vm_object_t lobject;
1649		pv_entry_t pv;
1650
1651		addr = addra + pmap_prefault_pageorder[i];
1652		if (addr > addra + (PFFOR * PAGE_SIZE))
1653			addr = 0;
1654
1655		if (addr < starta || addr >= entry->end)
1656			continue;
1657
1658		pv = pmap_find_pv(pmap, addr);
1659		if (pv)
1660			continue;
1661
1662		pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT;
1663		lobject = object;
1664		for (m = vm_page_lookup(lobject, pindex);
1665		    (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object));
1666		    lobject = lobject->backing_object) {
1667			if (lobject->backing_object_offset & PAGE_MASK)
1668				break;
1669			pindex += (lobject->backing_object_offset >> PAGE_SHIFT);
1670			m = vm_page_lookup(lobject->backing_object, pindex);
1671		}
1672
1673		/*
1674		 * give-up when a page is not in memory
1675		 */
1676		if (m == NULL)
1677			break;
1678
1679		if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) &&
1680		    (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) {
1681
1682			if ((m->queue - m->pc) == PQ_CACHE) {
1683				vm_page_deactivate(m);
1684			}
1685			vm_page_busy(m);
1686			pmap_enter_quick(pmap, addr, m);
1687			vm_page_flag_set(m, PG_MAPPED);
1688			vm_page_wakeup(m);
1689		}
1690	}
1691}
1692
1693/*
1694 *	Routine:	pmap_change_wiring
1695 *	Function:	Change the wiring attribute for a map/virtual-address
1696 *			pair.
1697 *	In/out conditions:
1698 *			The mapping must already exist in the pmap.
1699 */
1700void
1701pmap_change_wiring(pmap, va, wired)
1702	register pmap_t pmap;
1703	vm_offset_t va;
1704	boolean_t wired;
1705{
1706	pv_entry_t pv;
1707
1708	if (pmap == NULL)
1709		return;
1710
1711	pv = pmap_find_pv(pmap, va);
1712
1713	if (wired && !pmap_pte_w(&pv->pv_pte))
1714		pmap->pm_stats.wired_count++;
1715	else if (!wired && pmap_pte_w(&pv->pv_pte))
1716		pmap->pm_stats.wired_count--;
1717
1718	/*
1719	 * Wiring is not a hardware characteristic so there is no need to
1720	 * invalidate TLB.
1721	 */
1722	pmap_pte_set_w(&pv->pv_pte, wired);
1723}
1724
1725
1726
1727/*
1728 *	Copy the range specified by src_addr/len
1729 *	from the source map to the range dst_addr/len
1730 *	in the destination map.
1731 *
1732 *	This routine is only advisory and need not do anything.
1733 */
1734
1735void
1736pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
1737	  vm_offset_t src_addr)
1738{
1739}
1740
1741/*
1742 *	Routine:	pmap_kernel
1743 *	Function:
1744 *		Returns the physical map handle for the kernel.
1745 */
1746pmap_t
1747pmap_kernel()
1748{
1749	return (kernel_pmap);
1750}
1751
1752/*
1753 *	pmap_zero_page zeros the specified hardware page by
1754 *	mapping it into virtual memory and using bzero to clear
1755 *	its contents.
1756 */
1757
1758void
1759pmap_zero_page(vm_offset_t pa)
1760{
1761	vm_offset_t va = IA64_PHYS_TO_RR7(pa);
1762	bzero((caddr_t) va, PAGE_SIZE);
1763}
1764
1765
1766/*
1767 *	pmap_zero_page_area zeros the specified hardware page by
1768 *	mapping it into virtual memory and using bzero to clear
1769 *	its contents.
1770 *
1771 *	off and size must reside within a single page.
1772 */
1773
1774void
1775pmap_zero_page_area(vm_offset_t pa, int off, int size)
1776{
1777	vm_offset_t va = IA64_PHYS_TO_RR7(pa);
1778	bzero((char *)(caddr_t)va + off, size);
1779}
1780
1781/*
1782 *	pmap_copy_page copies the specified (machine independent)
1783 *	page by mapping the page into virtual memory and using
1784 *	bcopy to copy the page, one machine dependent page at a
1785 *	time.
1786 */
1787void
1788pmap_copy_page(vm_offset_t src, vm_offset_t dst)
1789{
1790	src = IA64_PHYS_TO_RR7(src);
1791	dst = IA64_PHYS_TO_RR7(dst);
1792	bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE);
1793}
1794
1795
1796/*
1797 *	Routine:	pmap_pageable
1798 *	Function:
1799 *		Make the specified pages (by pmap, offset)
1800 *		pageable (or not) as requested.
1801 *
1802 *		A page which is not pageable may not take
1803 *		a fault; therefore, its page table entry
1804 *		must remain valid for the duration.
1805 *
1806 *		This routine is merely advisory; pmap_enter
1807 *		will specify that these pages are to be wired
1808 *		down (or not) as appropriate.
1809 */
1810void
1811pmap_pageable(pmap, sva, eva, pageable)
1812	pmap_t pmap;
1813	vm_offset_t sva, eva;
1814	boolean_t pageable;
1815{
1816}
1817
1818/*
1819 * this routine returns true if a physical page resides
1820 * in the given pmap.
1821 */
1822boolean_t
1823pmap_page_exists(pmap, m)
1824	pmap_t pmap;
1825	vm_page_t m;
1826{
1827	register pv_entry_t pv;
1828	int s;
1829
1830	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1831		return FALSE;
1832
1833	s = splvm();
1834
1835	/*
1836	 * Not found, check current mappings returning immediately if found.
1837	 */
1838	for (pv = TAILQ_FIRST(&m->md.pv_list);
1839		pv;
1840		pv = TAILQ_NEXT(pv, pv_list)) {
1841		if (pv->pv_pmap == pmap) {
1842			splx(s);
1843			return TRUE;
1844		}
1845	}
1846	splx(s);
1847	return (FALSE);
1848}
1849
1850#define PMAP_REMOVE_PAGES_CURPROC_ONLY
1851/*
1852 * Remove all pages from specified address space
1853 * this aids process exit speeds.  Also, this code
1854 * is special cased for current process only, but
1855 * can have the more generic (and slightly slower)
1856 * mode enabled.  This is much faster than pmap_remove
1857 * in the case of running down an entire address space.
1858 */
1859void
1860pmap_remove_pages(pmap, sva, eva)
1861	pmap_t pmap;
1862	vm_offset_t sva, eva;
1863{
1864	pv_entry_t pv, npv;
1865	int s;
1866
1867#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY
1868	if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) {
1869		printf("warning: pmap_remove_pages called with non-current pmap\n");
1870		return;
1871	}
1872#endif
1873
1874	s = splvm();
1875	for (pv = TAILQ_FIRST(&pmap->pm_pvlist);
1876		pv;
1877		pv = npv) {
1878		vm_page_t m;
1879
1880		npv = TAILQ_NEXT(pv, pv_plist);
1881
1882		if (pv->pv_va >= eva || pv->pv_va < sva) {
1883			continue;
1884		}
1885
1886/*
1887 * We cannot remove wired pages from a process' mapping at this time
1888 */
1889		if (pv->pv_pte.pte_ig & PTE_IG_WIRED) {
1890			continue;
1891		}
1892
1893		PMAP_DEBUG_VA(pv->pv_va);
1894
1895		m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte));
1896		pmap_remove_pv(pmap, pv, m);
1897	}
1898	splx(s);
1899
1900	pmap_invalidate_all(pmap);
1901}
1902
1903/*
1904 * this routine is used to modify bits in ptes
1905 */
1906static void
1907pmap_changebit(vm_page_t m, int bit, boolean_t setem)
1908{
1909#if 0
1910	pv_entry_t pv;
1911	int changed;
1912	int s;
1913
1914	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
1915		return;
1916
1917	s = splvm();
1918	changed = 0;
1919
1920	/*
1921	 * Loop over all current mappings setting/clearing as appropos If
1922	 * setting RO do we need to clear the VAC?
1923	 */
1924	for (pv = TAILQ_FIRST(&m->md.pv_list);
1925		pv;
1926		pv = TAILQ_NEXT(pv, pv_list)) {
1927
1928		/*
1929		 * don't write protect pager mappings
1930		 */
1931		if (!setem && bit == (PG_UWE|PG_KWE)) {
1932			if (!pmap_track_modified(pv->pv_va))
1933				continue;
1934		}
1935
1936#if defined(PMAP_DIAGNOSTIC)
1937		if (!pv->pv_pmap) {
1938			printf("Null pmap (cb) at va: 0x%lx\n", pv->pv_va);
1939			continue;
1940		}
1941#endif
1942
1943		pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va);
1944
1945		changed = 0;
1946		if (setem) {
1947			*pte |= bit;
1948			changed = 1;
1949		} else {
1950			pt_entry_t pbits = *pte;
1951			if (pbits & bit) {
1952				changed = 1;
1953				*pte = pbits & ~bit;
1954			}
1955		}
1956		if (changed)
1957			pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
1958	}
1959	splx(s);
1960#endif
1961}
1962
1963/*
1964 *      pmap_page_protect:
1965 *
1966 *      Lower the permission for all mappings to a given page.
1967 */
1968void
1969pmap_page_protect(vm_page_t m, vm_prot_t prot)
1970{
1971#if 0
1972	if ((prot & VM_PROT_WRITE) == 0) {
1973		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) {
1974			pmap_changebit(m, PG_KWE|PG_UWE, FALSE);
1975		} else {
1976			pmap_remove_all(m);
1977		}
1978	}
1979#endif
1980}
1981
1982vm_offset_t
1983pmap_phys_address(ppn)
1984	int ppn;
1985{
1986	return (ia64_ptob(ppn));
1987}
1988
1989/*
1990 *	pmap_ts_referenced:
1991 *
1992 *	Return the count of reference bits for a page, clearing all of them.
1993 *
1994 */
1995int
1996pmap_ts_referenced(vm_page_t m)
1997{
1998	pv_entry_t pv;
1999	int count = 0;
2000
2001	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2002		return 0;
2003
2004	for (pv = TAILQ_FIRST(&m->md.pv_list);
2005		pv;
2006		pv = TAILQ_NEXT(pv, pv_list)) {
2007		if (pv->pv_pte.pte_a) {
2008			count++;
2009			pv->pv_pte.pte_a = 0;
2010			pmap_update_vhpt(pv);
2011		}
2012	}
2013
2014	return count;
2015}
2016
2017/*
2018 *	pmap_is_referenced:
2019 *
2020 *	Return whether or not the specified physical page was referenced
2021 *	in any physical maps.
2022 */
2023static boolean_t
2024pmap_is_referenced(vm_page_t m)
2025{
2026	pv_entry_t pv;
2027
2028	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2029		return FALSE;
2030
2031	for (pv = TAILQ_FIRST(&m->md.pv_list);
2032		pv;
2033		pv = TAILQ_NEXT(pv, pv_list)) {
2034		if (pv->pv_pte.pte_a) {
2035			return 1;
2036		}
2037	}
2038
2039	return 0;
2040}
2041
2042/*
2043 *	pmap_is_modified:
2044 *
2045 *	Return whether or not the specified physical page was modified
2046 *	in any physical maps.
2047 */
2048boolean_t
2049pmap_is_modified(vm_page_t m)
2050{
2051	pv_entry_t pv;
2052
2053	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2054		return FALSE;
2055
2056	for (pv = TAILQ_FIRST(&m->md.pv_list);
2057		pv;
2058		pv = TAILQ_NEXT(pv, pv_list)) {
2059		if (pv->pv_pte.pte_d) {
2060			return 1;
2061		}
2062	}
2063
2064	return 0;
2065}
2066
2067/*
2068 *	Clear the modify bits on the specified physical page.
2069 */
2070void
2071pmap_clear_modify(vm_page_t m)
2072{
2073	pv_entry_t pv;
2074
2075	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2076		return;
2077
2078	for (pv = TAILQ_FIRST(&m->md.pv_list);
2079		pv;
2080		pv = TAILQ_NEXT(pv, pv_list)) {
2081		if (pv->pv_pte.pte_d) {
2082			pv->pv_pte.pte_d = 0;
2083			pmap_update_vhpt(pv);
2084		}
2085	}
2086}
2087
2088/*
2089 *	pmap_clear_reference:
2090 *
2091 *	Clear the reference bit on the specified physical page.
2092 */
2093void
2094pmap_clear_reference(vm_page_t m)
2095{
2096	pv_entry_t pv;
2097
2098	if (!pmap_initialized || (m->flags & PG_FICTITIOUS))
2099		return;
2100
2101	for (pv = TAILQ_FIRST(&m->md.pv_list);
2102		pv;
2103		pv = TAILQ_NEXT(pv, pv_list)) {
2104		if (pv->pv_pte.pte_a) {
2105			pv->pv_pte.pte_a = 0;
2106			pmap_update_vhpt(pv);
2107		}
2108	}
2109}
2110
2111/*
2112 * Miscellaneous support routines follow
2113 */
2114
2115static void
2116ia64_protection_init()
2117{
2118	int prot, *kp, *up;
2119
2120	kp = protection_codes[0];
2121	up = protection_codes[1];
2122
2123	for (prot = 0; prot < 8; prot++) {
2124		switch (prot) {
2125		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
2126			*kp++ = (PTE_AR_R << 2) | PTE_PL_KERN;
2127			*up++ = (PTE_AR_R << 2) | PTE_PL_KERN;
2128			break;
2129
2130		case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
2131			*kp++ = (PTE_AR_X_RX << 2) | PTE_PL_KERN;
2132			*up++ = (PTE_AR_X_RX << 2) | PTE_PL_USER;
2133			break;
2134
2135		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
2136			*kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN;
2137			*up++ = (PTE_AR_RW << 2) | PTE_PL_USER;
2138			break;
2139
2140		case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
2141			*kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN;
2142			*up++ = (PTE_AR_RWX << 2) | PTE_PL_USER;
2143			break;
2144
2145		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
2146			*kp++ = (PTE_AR_R << 2) | PTE_PL_KERN;
2147			*up++ = (PTE_AR_R << 2) | PTE_PL_USER;
2148			break;
2149
2150		case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
2151			*kp++ = (PTE_AR_RX << 2) | PTE_PL_KERN;
2152			*up++ = (PTE_AR_RX << 2) | PTE_PL_USER;
2153			break;
2154
2155		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
2156			*kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN;
2157			*up++ = (PTE_AR_RW << 2) | PTE_PL_USER;
2158			break;
2159
2160		case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
2161			*kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN;
2162			*up++ = (PTE_AR_RWX << 2) | PTE_PL_USER;
2163			break;
2164		}
2165	}
2166}
2167
2168/*
2169 * Map a set of physical memory pages into the kernel virtual
2170 * address space. Return a pointer to where it is mapped. This
2171 * routine is intended to be used for mapping device memory,
2172 * NOT real memory.
2173 */
2174void *
2175pmap_mapdev(pa, size)
2176	vm_offset_t pa;
2177	vm_size_t size;
2178{
2179	return (void*) IA64_PHYS_TO_RR6(pa);
2180}
2181
2182/*
2183 * perform the pmap work for mincore
2184 */
2185int
2186pmap_mincore(pmap, addr)
2187	pmap_t pmap;
2188	vm_offset_t addr;
2189{
2190	pv_entry_t pv;
2191	struct ia64_lpte *pte;
2192	int val = 0;
2193
2194	pv = pmap_find_pv(pmap, addr);
2195	if (pv == 0) {
2196		return 0;
2197	}
2198	pte = &pv->pv_pte;
2199
2200	if (pmap_pte_v(pte)) {
2201		vm_page_t m;
2202		vm_offset_t pa;
2203
2204		val = MINCORE_INCORE;
2205		if ((pte->pte_ig & PTE_IG_MANAGED) == 0)
2206			return val;
2207
2208		pa = pmap_pte_pa(pte);
2209
2210		m = PHYS_TO_VM_PAGE(pa);
2211
2212		/*
2213		 * Modified by us
2214		 */
2215		if (pte->pte_d)
2216			val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
2217		/*
2218		 * Modified by someone
2219		 */
2220		else if (pmap_is_modified(m))
2221			val |= MINCORE_MODIFIED_OTHER;
2222		/*
2223		 * Referenced by us
2224		 */
2225		if (pte->pte_a)
2226			val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
2227
2228		/*
2229		 * Referenced by someone
2230		 */
2231		else if (pmap_ts_referenced(m)) {
2232			val |= MINCORE_REFERENCED_OTHER;
2233			vm_page_flag_set(m, PG_REFERENCED);
2234		}
2235	}
2236	return val;
2237}
2238
2239void
2240pmap_activate(struct proc *p)
2241{
2242	pmap_t pmap;
2243
2244	pmap = vmspace_pmap(p->p_vmspace);
2245
2246	if (pmap_active && pmap != pmap_active) {
2247		pmap_active->pm_active = 0;
2248		pmap_active = 0;
2249	}
2250
2251	if (pmap->pm_asngen != pmap_current_asngen)
2252		pmap_get_asn(pmap);
2253
2254	pmap_active = pmap;
2255	pmap->pm_active = 1;	/* XXX use bitmap for SMP */
2256
2257#if 0
2258	p->p_addr->u_pcb.pcb_hw.apcb_asn = pmap->pm_asn;
2259#endif
2260
2261	if (p == curproc) {
2262#if 0
2263		ia64_pal_swpctx((u_long)p->p_md.md_pcbpaddr);
2264#endif
2265	}
2266}
2267
2268void
2269pmap_deactivate(struct proc *p)
2270{
2271	pmap_t pmap;
2272	pmap = vmspace_pmap(p->p_vmspace);
2273	pmap->pm_active = 0;
2274	pmap_active = 0;
2275}
2276
2277vm_offset_t
2278pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size)
2279{
2280
2281	return addr;
2282}
2283
2284#if 0
2285#if defined(PMAP_DEBUG)
2286pmap_pid_dump(int pid)
2287{
2288	pmap_t pmap;
2289	struct proc *p;
2290	int npte = 0;
2291	int index;
2292	LIST_FOREACH(p, &allproc, p_list) {
2293		if (p->p_pid != pid)
2294			continue;
2295
2296		if (p->p_vmspace) {
2297			int i,j;
2298			index = 0;
2299			pmap = vmspace_pmap(p->p_vmspace);
2300			for(i=0;i<1024;i++) {
2301				pd_entry_t *pde;
2302				pt_entry_t *pte;
2303				unsigned base = i << PDRSHIFT;
2304
2305				pde = &pmap->pm_pdir[i];
2306				if (pde && pmap_pde_v(pde)) {
2307					for(j=0;j<1024;j++) {
2308						unsigned va = base + (j << PAGE_SHIFT);
2309						if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) {
2310							if (index) {
2311								index = 0;
2312								printf("\n");
2313							}
2314							return npte;
2315						}
2316						pte = pmap_pte_quick( pmap, va);
2317						if (pte && pmap_pte_v(pte)) {
2318							vm_offset_t pa;
2319							vm_page_t m;
2320							pa = *(int *)pte;
2321							m = PHYS_TO_VM_PAGE(pa);
2322							printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x",
2323								va, pa, m->hold_count, m->wire_count, m->flags);
2324							npte++;
2325							index++;
2326							if (index >= 2) {
2327								index = 0;
2328								printf("\n");
2329							} else {
2330								printf(" ");
2331							}
2332						}
2333					}
2334				}
2335			}
2336		}
2337	}
2338	return npte;
2339}
2340#endif
2341
2342#if defined(DEBUG)
2343
2344static void	pads __P((pmap_t pm));
2345static void	pmap_pvdump __P((vm_page_t m));
2346
2347/* print address space of pmap*/
2348static void
2349pads(pm)
2350	pmap_t pm;
2351{
2352        int i, j;
2353	vm_offset_t va;
2354	pt_entry_t *ptep;
2355
2356	if (pm == kernel_pmap)
2357		return;
2358	for (i = 0; i < 1024; i++)
2359		if (pm->pm_pdir[i])
2360			for (j = 0; j < 1024; j++) {
2361				va = (i << PDRSHIFT) + (j << PAGE_SHIFT);
2362				if (pm == kernel_pmap && va < KERNBASE)
2363					continue;
2364				if (pm != kernel_pmap && va > UPT_MAX_ADDRESS)
2365					continue;
2366				ptep = pmap_pte_quick(pm, va);
2367				if (pmap_pte_v(ptep))
2368					printf("%x:%x ", va, *(int *) ptep);
2369			};
2370
2371}
2372
2373static void
2374pmap_pvdump(pa)
2375	vm_offset_t pa;
2376{
2377	pv_entry_t pv;
2378
2379	printf("pa %x", pa);
2380	m = PHYS_TO_VM_PAGE(pa);
2381	for (pv = TAILQ_FIRST(&m->md.pv_list);
2382		pv;
2383		pv = TAILQ_NEXT(pv, pv_list)) {
2384		printf(" -> pmap %x, va %x",
2385		    pv->pv_pmap, pv->pv_va);
2386		pads(pv->pv_pmap);
2387	}
2388	printf(" ");
2389}
2390#endif
2391#endif
2392