pmap.c revision 80709
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/sparc64/pmap.c 80709 2001-07-31 06:05:05Z jake $
27 */
28
29/*
30 * Manages physical address maps.
31 *
32 * In addition to hardware address maps, this module is called upon to
33 * provide software-use-only maps which may or may not be stored in the
34 * same form as hardware maps.  These pseudo-maps are used to store
35 * intermediate results from copy operations to and from address spaces.
36 *
37 * Since the information managed by this module is also stored by the
38 * logical address mapping module, this module may throw away valid virtual
39 * to physical mappings at almost any time.  However, invalidations of
40 * mappings must be done as requested.
41 *
42 * In order to cope with hardware architectures which make virtual to
43 * physical map invalidates expensive, this module may delay invalidate
44 * reduced protection operations until such time as they are actually
45 * necessary.  This module is given full information as to which processors
46 * are currently using which maps, and to when physical maps must be made
47 * correct.
48 */
49
50#include <sys/param.h>
51#include <sys/lock.h>
52#include <sys/mutex.h>
53#include <sys/proc.h>
54#include <sys/systm.h>
55#include <sys/vmmeter.h>
56
57#include <dev/ofw/openfirm.h>
58
59#include <vm/vm.h>
60#include <vm/vm_param.h>
61#include <vm/vm_kern.h>
62#include <vm/vm_page.h>
63#include <vm/vm_map.h>
64#include <vm/vm_object.h>
65#include <vm/vm_extern.h>
66#include <vm/vm_pageout.h>
67#include <vm/vm_pager.h>
68#include <vm/vm_zone.h>
69
70#include <machine/frame.h>
71#include <machine/pv.h>
72#include <machine/tlb.h>
73#include <machine/tte.h>
74#include <machine/tsb.h>
75
76#define	PMAP_DEBUG
77
78#define	PMAP_LOCK(pm)
79#define	PMAP_UNLOCK(pm)
80
81#define	dcache_global_flush(pa)
82#define	icache_global_flush(pa)
83
84struct mem_region {
85	vm_offset_t mr_start;
86	vm_offset_t mr_size;
87};
88
89struct ofw_map {
90	vm_offset_t om_start;
91	vm_offset_t om_size;
92	u_long	om_tte;
93};
94
95/*
96 * Virtual address of message buffer.
97 */
98struct msgbuf *msgbufp;
99
100/*
101 * Physical addresses of first and last available physical page.
102 */
103vm_offset_t avail_start;
104vm_offset_t avail_end;
105
106/*
107 * Map of physical memory reagions.
108 */
109vm_offset_t phys_avail[10];
110
111/*
112 * First and last available kernel virtual addresses.
113 */
114vm_offset_t virtual_avail;
115vm_offset_t virtual_end;
116vm_offset_t kernel_vm_end;
117
118/*
119 * Kernel pmap handle and associated storage.
120 */
121pmap_t kernel_pmap;
122static struct pmap kernel_pmap_store;
123
124/*
125 * Map of free and in use hardware contexts and index of first potentially
126 * free context.
127 */
128static char pmap_context_map[PMAP_CONTEXT_MAX];
129static u_int pmap_context_base;
130
131/*
132 * Virtual addresses of free space for temporary mappings.  Used for copying
133 * and zeroing physical pages.
134 */
135static vm_offset_t CADDR1;
136static vm_offset_t CADDR2;
137
138static __inline int
139pmap_track_modified(vm_offset_t va)
140{
141	return ((va < clean_sva) || (va >= clean_eva));
142}
143
144/*
145 * Manipulate tte bits of all virtual to physical mappings for the given page.
146 */
147static void pmap_bit_clear(vm_page_t m, u_long bits);
148static void pmap_bit_set(vm_page_t m, u_long bits);
149static int pmap_bit_test(vm_page_t m, u_long bits);
150
151static void pmap_local_remove_all(vm_page_t m);
152static void pmap_global_remove_all(vm_page_t m);
153
154/*
155 * Allocate and free hardware context numbers.
156 */
157static u_int pmap_context_alloc(void);
158static void pmap_context_destroy(u_int i);
159
160/*
161 * Allocate physical memory for use in pmap_bootstrap.
162 */
163static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
164
165/*
166 * Quick sort callout for comparing memory regions.
167 */
168static int mr_cmp(const void *a, const void *b);
169static int
170mr_cmp(const void *a, const void *b)
171{
172	return ((const struct mem_region *)a)->mr_start -
173	    ((const struct mem_region *)b)->mr_start;
174}
175
176/*
177 * Bootstrap the system enough to run with virtual memory.
178 */
179void
180pmap_bootstrap(vm_offset_t skpa, vm_offset_t ekva)
181{
182	struct mem_region mra[8];
183	ihandle_t pmem;
184	struct pmap *pm;
185	vm_offset_t pa;
186	vm_offset_t va;
187	struct tte tte;
188	int sz;
189	int i;
190	int j;
191
192	/*
193	 * Find out what physical memory is available from the prom and
194	 * initialize the phys_avail array.
195	 */
196	if ((pmem = OF_finddevice("/memory")) == -1)
197		panic("pmap_bootstrap: finddevice /memory");
198	if ((sz = OF_getproplen(pmem, "available")) == -1)
199		panic("pmap_bootstrap: getproplen /memory/available");
200	if (sizeof(phys_avail) < sz)
201		panic("pmap_bootstrap: phys_avail too small");
202	bzero(mra, sz);
203	if (OF_getprop(pmem, "available", mra, sz) == -1)
204		panic("pmap_bootstrap: getprop /memory/available");
205	sz /= sizeof(*mra);
206	qsort(mra, sz, sizeof *mra, mr_cmp);
207	for (i = 0, j = 0; i < sz; i++, j += 2) {
208		phys_avail[j] = mra[i].mr_start;
209		phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
210	}
211
212	/*
213	 * Initialize the kernel pmap (which is statically allocated).
214	 */
215	pm = &kernel_pmap_store;
216	pm->pm_context = TLB_CTX_KERNEL;
217	pm->pm_active = ~0;
218	pm->pm_count = 1;
219	kernel_pmap = pm;
220
221	/*
222	 * Allocate the kernel tsb and lock it in the tlb.
223	 */
224	pa = pmap_bootstrap_alloc(TSB_KERNEL_SIZE);
225	if (pa & PAGE_MASK_4M)
226		panic("pmap_bootstrap: tsb unaligned\n");
227	tsb_kernel_phys = pa;
228	for (i = 0; i < TSB_KERNEL_PAGES; i++) {
229		va = TSB_KERNEL_MIN_ADDRESS + i * PAGE_SIZE_4M;
230		tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
231		tte.tte_data = TD_V | TD_4M | TD_VA_LOW(va) | TD_PA(pa) |
232		    TD_MOD | TD_REF | TD_TSB | TD_L | TD_CP | TD_P | TD_W;
233		tlb_store_slot(TLB_DTLB, va, tte, TLB_SLOT_TSB_KERNEL_MIN + i);
234	}
235	bzero((void *)va, TSB_KERNEL_SIZE);
236	stxa(AA_IMMU_TSB, ASI_IMMU,
237	    (va >> (STTE_SHIFT - TTE_SHIFT)) | TSB_SIZE_REG);
238	stxa(AA_DMMU_TSB, ASI_DMMU,
239	    (va >> (STTE_SHIFT - TTE_SHIFT)) | TSB_SIZE_REG);
240	membar(Sync);
241
242	/*
243	 * Calculate the first and last available physical addresses.
244	 */
245	avail_start = phys_avail[0];
246	for (i = 0; phys_avail[i + 2] != 0; i += 2)
247		;
248	avail_end = phys_avail[i + 1];
249
250	/*
251	 * Allocate physical memory for the heads of the stte alias chains.
252	 */
253	sz = round_page(((avail_end - avail_start) >> PAGE_SHIFT) *
254	    sizeof (vm_offset_t));
255	pv_table = pmap_bootstrap_alloc(sz);
256	/* XXX */
257	avail_start += sz;
258	for (i = 0; i < sz; i += sizeof(vm_offset_t))
259		stxp(pv_table + i, 0);
260
261	/*
262	 * Set the start and end of kva.  The kernel is loaded at the first
263	 * available 4 meg super page, so round up to the end of the page.
264	 */
265	virtual_avail = roundup(ekva, PAGE_SIZE_4M);
266	virtual_end = VM_MAX_KERNEL_ADDRESS;
267
268	/*
269	 * Allocate virtual address space for copying and zeroing pages of
270	 * physical memory.
271	 */
272	CADDR1 = virtual_avail;
273	virtual_avail += PAGE_SIZE;
274	CADDR2 = virtual_avail;
275	virtual_avail += PAGE_SIZE;
276}
277
278/*
279 * Allocate a physical page of memory directly from the phys_avail map.
280 * Can only be called from pmap_bootstrap before avail start and end are
281 * calculated.
282 */
283static vm_offset_t
284pmap_bootstrap_alloc(vm_size_t size)
285{
286	vm_offset_t pa;
287	int i;
288
289	size = round_page(size);
290	for (i = 0; phys_avail[i] != 0; i += 2) {
291		if (phys_avail[i + 1] - phys_avail[i] < size)
292			continue;
293		pa = phys_avail[i];
294		phys_avail[i] += size;
295		return (pa);
296	}
297	panic("pmap_bootstrap_alloc");
298}
299
300/*
301 * Allocate a hardware context number from the context map.
302 */
303static u_int
304pmap_context_alloc(void)
305{
306	u_int i;
307
308	i = pmap_context_base;
309	do {
310		if (pmap_context_map[i] == 0) {
311			pmap_context_map[i] = 1;
312			pmap_context_base = (i + 1) & (PMAP_CONTEXT_MAX - 1);
313			return (i);
314		}
315	} while ((i = (i + 1) & (PMAP_CONTEXT_MAX - 1)) != pmap_context_base);
316	panic("pmap_context_alloc");
317}
318
319/*
320 * Free a hardware context number back to the context map.
321 */
322static void
323pmap_context_destroy(u_int i)
324{
325
326	pmap_context_map[i] = 0;
327}
328
329/*
330 * Map a range of physical addresses into kernel virtual address space.
331 *
332 * The value passed in *virt is a suggested virtual address for the mapping.
333 * Architectures which can support a direct-mapped physical to virtual region
334 * can return the appropriate address within that region, leaving '*virt'
335 * unchanged.  We cannot and therefore do not; *virt is updated with the
336 * first usable address after the mapped region.
337 */
338vm_offset_t
339pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
340{
341	vm_offset_t sva;
342	vm_offset_t va;
343
344	sva = *virt;
345	va = sva;
346	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
347		pmap_kenter(va, pa_start);
348	*virt = va;
349	return (sva);
350}
351
352/*
353 * Map a wired page into kernel virtual address space.
354 */
355void
356pmap_kenter(vm_offset_t va, vm_offset_t pa)
357{
358	struct tte tte;
359
360	tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
361	tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
362	    TD_MOD | TD_REF | TD_CP | TD_P | TD_W;
363	tsb_tte_enter_kernel(va, tte);
364}
365
366/*
367 * Remove a wired page from kernel virtual address space.
368 */
369void
370pmap_kremove(vm_offset_t va)
371{
372	tsb_remove_kernel(va);
373}
374
375/*
376 * Map a list of wired pages into kernel virtual address space.  This is
377 * intended for temporary mappings which do not need page modification or
378 * references recorded.  Existing mappings in the region are overwritten.
379 */
380void
381pmap_qenter(vm_offset_t va, vm_page_t *m, int count)
382{
383	int i;
384
385	for (i = 0; i < count; i++, va += PAGE_SIZE)
386		pmap_kenter(va, VM_PAGE_TO_PHYS(m[i]));
387}
388
389/*
390 * Remove page mappings from kernel virtual address space.  Intended for
391 * temporary mappings entered by pmap_qenter.
392 */
393void
394pmap_qremove(vm_offset_t va, int count)
395{
396	int i;
397
398	for (i = 0; i < count; i++, va += PAGE_SIZE)
399		pmap_kremove(va);
400}
401
402/*
403 * Map the given physical page at the specified virtual address in the
404 * target pmap with the protection requested.  If specified the page
405 * will be wired down.
406 */
407void
408pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
409	   boolean_t wired)
410{
411	struct stte *stp;
412	struct tte tte;
413	vm_offset_t pa;
414
415	pa = VM_PAGE_TO_PHYS(m);
416	tte.tte_tag = TT_CTX(pm->pm_context) | TT_VA(va);
417	tte.tte_data = TD_V | TD_8K | TD_VA_LOW(va) | TD_PA(pa) |
418	    TD_CP | TD_CV;
419	if (pm->pm_context == TLB_CTX_KERNEL)
420		tte.tte_data |= TD_P;
421	if (wired == TRUE) {
422		tte.tte_data |= TD_REF;
423		if (prot & VM_PROT_WRITE)
424			tte.tte_data |= TD_MOD;
425	}
426	if (prot & VM_PROT_WRITE)
427		tte.tte_data |= TD_W;
428	if (prot & VM_PROT_EXECUTE) {
429		tte.tte_data |= TD_EXEC;
430		icache_global_flush(&pa);
431	}
432
433	if (pm == kernel_pmap) {
434		tsb_tte_enter_kernel(va, tte);
435		return;
436	}
437
438	PMAP_LOCK(pm);
439	if ((stp = tsb_stte_lookup(pm, va)) != NULL) {
440		pv_remove_virt(stp);
441		tsb_stte_remove(stp);
442		pv_insert(pm, pa, va, stp);
443		stp->st_tte = tte;
444	} else {
445		tsb_tte_enter(pm, va, tte);
446	}
447	PMAP_UNLOCK(pm);
448}
449
450/*
451 * Initialize the pmap module.
452 */
453void
454pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
455{
456}
457
458void
459pmap_init2(void)
460{
461}
462
463/*
464 * Initialize the pmap associated with process 0.
465 */
466void
467pmap_pinit0(pmap_t pm)
468{
469
470	pm = &kernel_pmap_store;
471	pm->pm_context = pmap_context_alloc();
472	pm->pm_active = 0;
473	pm->pm_count = 1;
474	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
475}
476
477/*
478 * Initialize a preallocated and zeroed pmap structure.
479 */
480void
481pmap_pinit(pmap_t pm)
482{
483	struct stte *stp;
484
485	pm->pm_context = pmap_context_alloc();
486	pm->pm_active = 0;
487	pm->pm_count = 1;
488	stp = &pm->pm_stte;
489	stp->st_tte = tsb_page_alloc(pm, (vm_offset_t)tsb_base(0));
490	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
491}
492
493void
494pmap_pinit2(pmap_t pmap)
495{
496}
497
498/*
499 * Grow the number of kernel page table entries.  Unneeded.
500 */
501void
502pmap_growkernel(vm_offset_t addr)
503{
504}
505
506/*
507 * Zero a page of physical memory by temporarily mapping it into the tlb.
508 */
509void
510pmap_zero_page(vm_offset_t pa)
511{
512	struct tte tte;
513	vm_offset_t va;
514
515	va = CADDR2;
516	tte.tte_tag = TT_CTX(TLB_CTX_KERNEL) | TT_VA(va);
517	tte.tte_data = TD_V | TD_8K | TD_PA(pa) | TD_L | TD_CP | TD_P | TD_W;
518	tlb_store(TLB_DTLB, va, tte);
519	bzero((void *)va, PAGE_SIZE);
520	tlb_page_demap(TLB_DTLB, TLB_CTX_KERNEL, va);
521}
522
523/*
524 * Make the specified page pageable (or not).  Unneeded.
525 */
526void
527pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva,
528	      boolean_t pageable)
529{
530}
531
532/*
533 * Create the kernel stack and user structure for a new process.  This
534 * routine directly affects the performance of fork().
535 */
536void
537pmap_new_proc(struct proc *p)
538{
539	struct user *u;
540	vm_object_t o;
541	vm_page_t m;
542	u_int i;
543
544	if ((o = p->p_upages_obj) == NULL) {
545		o = vm_object_allocate(OBJT_DEFAULT, UPAGES);
546		p->p_upages_obj = o;
547	}
548	if ((u = p->p_addr) == NULL) {
549		u = (struct user *)kmem_alloc_nofault(kernel_map,
550		    UPAGES * PAGE_SIZE);
551		KASSERT(u != NULL, ("pmap_new_proc: u area\n"));
552		p->p_addr = u;
553	}
554	for (i = 0; i < UPAGES; i++) {
555		m = vm_page_grab(o, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
556		m->wire_count++;
557		cnt.v_wire_count++;
558
559		pmap_kenter((vm_offset_t)u + i * PAGE_SIZE,
560		    VM_PAGE_TO_PHYS(m));
561
562		vm_page_wakeup(m);
563		vm_page_flag_clear(m, PG_ZERO);
564		vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE);
565		m->valid = VM_PAGE_BITS_ALL;
566	}
567}
568
569void
570pmap_page_protect(vm_page_t m, vm_prot_t prot)
571{
572
573	if (m->flags & PG_FICTITIOUS || prot & VM_PROT_WRITE)
574		return;
575	if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
576		pmap_bit_clear(m, TD_W);
577	else
578		pmap_global_remove_all(m);
579}
580
581void
582pmap_clear_modify(vm_page_t m)
583{
584
585	if (m->flags & PG_FICTITIOUS)
586		return;
587	pmap_bit_clear(m, TD_MOD);
588}
589
590static void
591pmap_bit_clear(vm_page_t m, u_long bits)
592{
593	vm_offset_t pstp;
594	vm_offset_t pvh;
595	vm_offset_t pa;
596	vm_offset_t va;
597	struct tte tte;
598
599	pa = VM_PAGE_TO_PHYS(m);
600	pvh = pv_lookup(pa);
601	PV_LOCK();
602#ifdef notyet
603restart:
604#endif
605	for (pstp = pv_get_first(pvh); pstp != 0;  pstp = pv_get_next(pstp)) {
606		tte = pv_get_tte(pstp);
607		KASSERT(TD_PA(tte.tte_data) == pa,
608		    ("pmap_bit_clear: corrupt alias chain"));
609		if ((tte.tte_data & bits) == 0)
610			continue;
611		va = tte_get_va(tte);
612		if (bits == TD_W && !pmap_track_modified(va))
613			continue;
614		if (bits == TD_W && tte.tte_data & TD_MOD) {
615			vm_page_dirty(m);
616			bits |= TD_MOD;
617		}
618		pv_bit_clear(pstp, bits);
619#ifdef notyet
620		generation = pv_generation;
621		PV_UNLOCK();
622		/* XXX pass function and parameter to ipi call */
623		ipi_all(IPI_TLB_PAGE_DEMAP);
624		PV_LOCK();
625		if (generation != pv_generation)
626			goto restart;
627#else
628		tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), va);
629#endif
630	}
631	PV_UNLOCK();
632}
633
634static void
635pmap_bit_set(vm_page_t m, u_long bits)
636{
637	vm_offset_t pstp;
638	vm_offset_t pvh;
639	vm_offset_t pa;
640	struct tte tte;
641
642	pa = VM_PAGE_TO_PHYS(m);
643	pvh = pv_lookup(pa);
644	PV_LOCK();
645#ifdef notyet
646restart:
647#endif
648	for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
649		tte = pv_get_tte(pstp);
650		KASSERT(TD_PA(tte.tte_data) == pa,
651		    ("pmap_bit_set: corrupt alias chain"));
652		if (tte.tte_data & bits)
653			continue;
654		pv_bit_set(pstp, bits);
655#ifdef notyet
656		generation = pv_generation;
657		PV_UNLOCK();
658		/* XXX pass function and parameter to ipi call */
659		ipi_all(IPI_TLB_PAGE_DEMAP);
660		PV_LOCK();
661		if (generation != pv_generation)
662			goto restart;
663#else
664		tlb_page_demap(TLB_DTLB, tte_get_ctx(tte), tte_get_va(tte));
665#endif
666	}
667	PV_UNLOCK();
668}
669
670static int
671pmap_bit_test(vm_page_t m, u_long bits)
672{
673	vm_offset_t pstp;
674	vm_offset_t pvh;
675	vm_offset_t pa;
676
677	pa = VM_PAGE_TO_PHYS(m);
678	pvh = pv_lookup(pa);
679	PV_LOCK();
680	for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
681		if (pv_bit_test(pstp, bits)) {
682			PV_UNLOCK();
683			return (1);
684		}
685	}
686	PV_UNLOCK();
687	return (0);
688}
689
690static void
691pmap_global_remove_all(vm_page_t m)
692{
693	vm_offset_t pstp;
694	vm_offset_t pvh;
695	vm_offset_t pa;
696
697	printf("pmap_global_remove_all\n");
698	pa = VM_PAGE_TO_PHYS(m);
699	pvh = pv_lookup(pa);
700	pv_dump(pvh);
701	PV_LOCK();
702	printf("pmap_global_remove_all: for\n");
703	for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp))
704		pv_bit_clear(pstp, TD_V);
705	printf("pmap_global_remove_all: done for\n");
706	PV_UNLOCK();
707	pmap_local_remove_all(m);
708	pv_dump(pvh);
709	PV_LOCK();
710	printf("pmap_global_remove_all: while\n");
711	while ((pstp = pv_get_first(pvh)) != 0) {
712		pv_dump(pvh);
713		pv_remove_phys(pstp);
714	}
715	printf("pmap_global_remove_all: done while\n");
716	PV_UNLOCK();
717	printf("pmap_global_remove_all: done\n");
718}
719
720static void
721pmap_local_remove_all(vm_page_t m)
722{
723	vm_offset_t pstp;
724	vm_offset_t pvh;
725	vm_offset_t pa;
726	struct tte tte;
727
728	pa = VM_PAGE_TO_PHYS(m);
729	pvh = pv_lookup(pa);
730	PV_LOCK();
731	printf("pmap_local_remove_all: for\n");
732	for (pstp = pv_get_first(pvh); pstp != 0; pstp = pv_get_next(pstp)) {
733		tte = pv_get_tte(pstp);
734		tsb_tte_local_remove(&tte);
735	}
736	printf("pmap_local_remove_all: done for\n");
737	PV_UNLOCK();
738}
739
740void
741pmap_activate(struct proc *p)
742{
743	TODO;
744}
745
746vm_offset_t
747pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
748{
749	TODO;
750	return (0);
751}
752
753void
754pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired)
755{
756	TODO;
757}
758
759void
760pmap_collect(void)
761{
762	TODO;
763}
764
765void
766pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
767	  vm_size_t len, vm_offset_t src_addr)
768{
769	TODO;
770}
771
772void
773pmap_copy_page(vm_offset_t src, vm_offset_t dst)
774{
775	TODO;
776}
777
778void
779pmap_zero_page_area(vm_offset_t pa, int off, int size)
780{
781	TODO;
782}
783
784vm_offset_t
785pmap_extract(pmap_t pmap, vm_offset_t va)
786{
787	TODO;
788	return (0);
789}
790
791boolean_t
792pmap_is_modified(vm_page_t m)
793{
794	TODO;
795	return (0);
796}
797
798void
799pmap_clear_reference(vm_page_t m)
800{
801	TODO;
802}
803
804int
805pmap_ts_referenced(vm_page_t m)
806{
807	TODO;
808	return (0);
809}
810
811vm_offset_t
812pmap_kextract(vm_offset_t va)
813{
814	TODO;
815	return (0);
816}
817
818int
819pmap_mincore(pmap_t pmap, vm_offset_t addr)
820{
821	TODO;
822	return (0);
823}
824
825void
826pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
827		    vm_pindex_t pindex, vm_size_t size, int limit)
828{
829	TODO;
830}
831
832boolean_t
833pmap_page_exists(pmap_t pmap, vm_page_t m)
834{
835	TODO;
836	return (0);
837}
838
839void
840pmap_prefault(pmap_t pmap, vm_offset_t va, vm_map_entry_t entry)
841{
842	TODO;
843}
844
845void
846pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
847{
848	TODO;
849}
850
851vm_offset_t
852pmap_phys_address(int ppn)
853{
854	TODO;
855	return (0);
856}
857
858void
859pmap_reference(pmap_t pm)
860{
861	if (pm != NULL)
862		pm->pm_count++;
863}
864
865void
866pmap_release(pmap_t pmap)
867{
868	TODO;
869}
870
871void
872pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
873{
874	TODO;
875}
876
877void
878pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
879{
880	TODO;
881}
882
883void
884pmap_swapin_proc(struct proc *p)
885{
886	TODO;
887}
888
889void
890pmap_swapout_proc(struct proc *p)
891{
892	TODO;
893}
894