pmap.c revision 108245
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *      This product includes software developed by the University of
24 *      California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
42 * $FreeBSD: head/sys/sparc64/sparc64/pmap.c 108245 2002-12-23 23:39:57Z jake $
43 */
44
45/*
46 * Manages physical address maps.
47 *
48 * In addition to hardware address maps, this module is called upon to
49 * provide software-use-only maps which may or may not be stored in the
50 * same form as hardware maps.  These pseudo-maps are used to store
51 * intermediate results from copy operations to and from address spaces.
52 *
53 * Since the information managed by this module is also stored by the
54 * logical address mapping module, this module may throw away valid virtual
55 * to physical mappings at almost any time.  However, invalidations of
56 * mappings must be done as requested.
57 *
58 * In order to cope with hardware architectures which make virtual to
59 * physical map invalidates expensive, this module may delay invalidate
60 * reduced protection operations until such time as they are actually
61 * necessary.  This module is given full information as to which processors
62 * are currently using which maps, and to when physical maps must be made
63 * correct.
64 */
65
66#include "opt_msgbuf.h"
67#include "opt_pmap.h"
68
69#include <sys/param.h>
70#include <sys/kernel.h>
71#include <sys/ktr.h>
72#include <sys/lock.h>
73#include <sys/msgbuf.h>
74#include <sys/mutex.h>
75#include <sys/proc.h>
76#include <sys/smp.h>
77#include <sys/sysctl.h>
78#include <sys/systm.h>
79#include <sys/vmmeter.h>
80
81#include <dev/ofw/openfirm.h>
82
83#include <vm/vm.h>
84#include <vm/vm_param.h>
85#include <vm/vm_kern.h>
86#include <vm/vm_page.h>
87#include <vm/vm_map.h>
88#include <vm/vm_object.h>
89#include <vm/vm_extern.h>
90#include <vm/vm_pageout.h>
91#include <vm/vm_pager.h>
92#include <vm/uma.h>
93
94#include <machine/cache.h>
95#include <machine/frame.h>
96#include <machine/instr.h>
97#include <machine/md_var.h>
98#include <machine/metadata.h>
99#include <machine/ofw_mem.h>
100#include <machine/smp.h>
101#include <machine/tlb.h>
102#include <machine/tte.h>
103#include <machine/tsb.h>
104
105#define	PMAP_DEBUG
106
107#ifndef	PMAP_SHPGPERPROC
108#define	PMAP_SHPGPERPROC	200
109#endif
110
111/*
112 * Virtual and physical address of message buffer.
113 */
114struct msgbuf *msgbufp;
115vm_offset_t msgbuf_phys;
116
117/*
118 * Physical addresses of first and last available physical page.
119 */
120vm_offset_t avail_start;
121vm_offset_t avail_end;
122
123int pmap_pagedaemon_waken;
124
125/*
126 * Map of physical memory reagions.
127 */
128vm_offset_t phys_avail[128];
129static struct ofw_mem_region mra[128];
130struct ofw_mem_region sparc64_memreg[128];
131int sparc64_nmemreg;
132static struct ofw_map translations[128];
133static int translations_size;
134
135static vm_offset_t pmap_idle_map;
136static vm_offset_t pmap_temp_map_1;
137static vm_offset_t pmap_temp_map_2;
138
139/*
140 * First and last available kernel virtual addresses.
141 */
142vm_offset_t virtual_avail;
143vm_offset_t virtual_end;
144vm_offset_t kernel_vm_end;
145
146vm_offset_t vm_max_kernel_address;
147
148static vm_offset_t crashdumpmap;
149
150/*
151 * Kernel pmap.
152 */
153struct pmap kernel_pmap_store;
154
155/*
156 * Allocate physical memory for use in pmap_bootstrap.
157 */
158static vm_offset_t pmap_bootstrap_alloc(vm_size_t size);
159
160extern int tl1_immu_miss_patch_1[];
161extern int tl1_immu_miss_patch_2[];
162extern int tl1_dmmu_miss_patch_1[];
163extern int tl1_dmmu_miss_patch_2[];
164extern int tl1_dmmu_prot_patch_1[];
165extern int tl1_dmmu_prot_patch_2[];
166
167/*
168 * If user pmap is processed with pmap_remove and with pmap_remove and the
169 * resident count drops to 0, there are no more pages to remove, so we
170 * need not continue.
171 */
172#define	PMAP_REMOVE_DONE(pm) \
173	((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
174
175/*
176 * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
177 * and pmap_protect() instead of trying each virtual address.
178 */
179#define	PMAP_TSB_THRESH	((TSB_SIZE / 2) * PAGE_SIZE)
180
181#ifdef PMAP_STATS
182static long pmap_enter_nupdate;
183static long pmap_enter_nreplace;
184static long pmap_enter_nnew;
185static long pmap_ncache_enter;
186static long pmap_ncache_enter_c;
187static long pmap_ncache_enter_cc;
188static long pmap_ncache_enter_nc;
189static long pmap_ncache_remove;
190static long pmap_ncache_remove_c;
191static long pmap_ncache_remove_cc;
192static long pmap_ncache_remove_nc;
193static long pmap_niflush;
194
195SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "Statistics");
196SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nupdate, CTLFLAG_RD,
197    &pmap_enter_nupdate, 0, "Number of pmap_enter() updates");
198SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nreplace, CTLFLAG_RD,
199    &pmap_enter_nreplace, 0, "Number of pmap_enter() replacements");
200SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_enter_nnew, CTLFLAG_RD,
201    &pmap_enter_nnew, 0, "Number of pmap_enter() additions");
202SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter, CTLFLAG_RD,
203    &pmap_ncache_enter, 0, "Number of pmap_cache_enter() calls");
204SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_c, CTLFLAG_RD,
205    &pmap_ncache_enter_c, 0, "Number of pmap_cache_enter() cacheable");
206SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_cc, CTLFLAG_RD,
207    &pmap_ncache_enter_cc, 0, "Number of pmap_cache_enter() change color");
208SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_enter_nc, CTLFLAG_RD,
209    &pmap_ncache_enter_nc, 0, "Number of pmap_cache_enter() noncacheable");
210SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove, CTLFLAG_RD,
211    &pmap_ncache_remove, 0, "Number of pmap_cache_remove() calls");
212SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_c, CTLFLAG_RD,
213    &pmap_ncache_remove_c, 0, "Number of pmap_cache_remove() cacheable");
214SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_cc, CTLFLAG_RD,
215    &pmap_ncache_remove_cc, 0, "Number of pmap_cache_remove() change color");
216SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_ncache_remove_nc, CTLFLAG_RD,
217    &pmap_ncache_remove_nc, 0, "Number of pmap_cache_remove() noncacheable");
218SYSCTL_LONG(_debug_pmap_stats, OID_AUTO, pmap_niflush, CTLFLAG_RD,
219    &pmap_niflush, 0, "Number of pmap I$ flushes");
220
221#define	PMAP_STATS_INC(var)	atomic_add_long(&var, 1)
222#else
223#define	PMAP_STATS_INC(var)
224#endif
225
226/*
227 * Quick sort callout for comparing memory regions.
228 */
229static int mr_cmp(const void *a, const void *b);
230static int om_cmp(const void *a, const void *b);
231static int
232mr_cmp(const void *a, const void *b)
233{
234	const struct ofw_mem_region *mra;
235	const struct ofw_mem_region *mrb;
236
237	mra = a;
238	mrb = b;
239	if (mra->mr_start < mrb->mr_start)
240		return (-1);
241	else if (mra->mr_start > mrb->mr_start)
242		return (1);
243	else
244		return (0);
245}
246static int
247om_cmp(const void *a, const void *b)
248{
249	const struct ofw_map *oma;
250	const struct ofw_map *omb;
251
252	oma = a;
253	omb = b;
254	if (oma->om_start < omb->om_start)
255		return (-1);
256	else if (oma->om_start > omb->om_start)
257		return (1);
258	else
259		return (0);
260}
261
262/*
263 * Bootstrap the system enough to run with virtual memory.
264 */
265void
266pmap_bootstrap(vm_offset_t ekva)
267{
268	struct pmap *pm;
269	struct tte *tp;
270	vm_offset_t off;
271	vm_offset_t pa;
272	vm_offset_t va;
273	vm_size_t physsz;
274	vm_size_t virtsz;
275	ihandle_t pmem;
276	ihandle_t vmem;
277	int sz;
278	int i;
279	int j;
280
281	/*
282	 * Find out what physical memory is available from the prom and
283	 * initialize the phys_avail array.  This must be done before
284	 * pmap_bootstrap_alloc is called.
285	 */
286	if ((pmem = OF_finddevice("/memory")) == -1)
287		panic("pmap_bootstrap: finddevice /memory");
288	if ((sz = OF_getproplen(pmem, "available")) == -1)
289		panic("pmap_bootstrap: getproplen /memory/available");
290	if (sizeof(phys_avail) < sz)
291		panic("pmap_bootstrap: phys_avail too small");
292	if (sizeof(mra) < sz)
293		panic("pmap_bootstrap: mra too small");
294	bzero(mra, sz);
295	if (OF_getprop(pmem, "available", mra, sz) == -1)
296		panic("pmap_bootstrap: getprop /memory/available");
297	sz /= sizeof(*mra);
298	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
299	qsort(mra, sz, sizeof (*mra), mr_cmp);
300	physsz = 0;
301	for (i = 0, j = 0; i < sz; i++, j += 2) {
302		CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
303		    mra[i].mr_size);
304		phys_avail[j] = mra[i].mr_start;
305		phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
306		physsz += mra[i].mr_size;
307	}
308	physmem = btoc(physsz);
309
310	/*
311	 * Calculate the size of kernel virtual memory, and the size and mask
312	 * for the kernel tsb.
313	 */
314	virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
315	vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
316	tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
317	tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
318
319	/*
320	 * Allocate the kernel tsb and lock it in the tlb.
321	 */
322	pa = pmap_bootstrap_alloc(tsb_kernel_size);
323	if (pa & PAGE_MASK_4M)
324		panic("pmap_bootstrap: tsb unaligned\n");
325	tsb_kernel_phys = pa;
326	tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
327	pmap_map_tsb();
328	bzero(tsb_kernel, tsb_kernel_size);
329
330	/*
331	 * Allocate the message buffer.
332	 */
333	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
334
335	/*
336	 * Patch the virtual address and the tsb mask into the trap table.
337	 */
338
339#define	SETHI(rd, imm22) \
340	(EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \
341	    EIF_IMM((imm22) >> 10, 22))
342#define	OR_R_I_R(rd, imm13, rs1) \
343	(EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \
344	    EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
345
346#define	PATCH(addr) do { \
347	if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
348	    addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, IF_F3_RS1(addr[1])) || \
349	    addr[2] != SETHI(IF_F2_RD(addr[2]), 0x0)) \
350		panic("pmap_boostrap: patched instructions have changed"); \
351	addr[0] |= EIF_IMM((tsb_kernel_mask) >> 10, 22); \
352	addr[1] |= EIF_IMM(tsb_kernel_mask, 10); \
353	addr[2] |= EIF_IMM(((vm_offset_t)tsb_kernel) >> 10, 22); \
354	flush(addr); \
355	flush(addr + 1); \
356	flush(addr + 2); \
357} while (0)
358
359	PATCH(tl1_immu_miss_patch_1);
360	PATCH(tl1_immu_miss_patch_2);
361	PATCH(tl1_dmmu_miss_patch_1);
362	PATCH(tl1_dmmu_miss_patch_2);
363	PATCH(tl1_dmmu_prot_patch_1);
364	PATCH(tl1_dmmu_prot_patch_2);
365
366	/*
367	 * Enter fake 8k pages for the 4MB kernel pages, so that
368	 * pmap_kextract() will work for them.
369	 */
370	for (i = 0; i < kernel_tlb_slots; i++) {
371		pa = kernel_tlbs[i].te_pa;
372		va = kernel_tlbs[i].te_va;
373		for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
374			tp = tsb_kvtotte(va + off);
375			tp->tte_vpn = TV_VPN(va + off, TS_8K);
376			tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) |
377			    TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
378		}
379	}
380
381	/*
382	 * Set the start and end of kva.  The kernel is loaded at the first
383	 * available 4 meg super page, so round up to the end of the page.
384	 */
385	virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
386	virtual_end = vm_max_kernel_address;
387	kernel_vm_end = vm_max_kernel_address;
388
389	/*
390	 * Allocate kva space for temporary mappings.
391	 */
392	pmap_idle_map = virtual_avail;
393	virtual_avail += PAGE_SIZE * DCACHE_COLORS;
394	pmap_temp_map_1 = virtual_avail;
395	virtual_avail += PAGE_SIZE * DCACHE_COLORS;
396	pmap_temp_map_2 = virtual_avail;
397	virtual_avail += PAGE_SIZE * DCACHE_COLORS;
398
399	/*
400	 * Allocate virtual address space for the message buffer.
401	 */
402	msgbufp = (struct msgbuf *)virtual_avail;
403	virtual_avail += round_page(MSGBUF_SIZE);
404
405	/*
406	 * Allocate virtual address space to map pages during a kernel dump.
407	 */
408	crashdumpmap = virtual_avail;
409	virtual_avail += MAXDUMPPGS * PAGE_SIZE;
410
411	/*
412	 * Allocate a kernel stack with guard page for thread0 and map it into
413	 * the kernel tsb.
414	 */
415	pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE);
416	kstack0_phys = pa;
417	kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE);
418	virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE;
419	for (i = 0; i < KSTACK_PAGES; i++) {
420		pa = kstack0_phys + i * PAGE_SIZE;
421		va = kstack0 + i * PAGE_SIZE;
422		tp = tsb_kvtotte(va);
423		tp->tte_vpn = TV_VPN(va, TS_8K);
424		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
425		    TD_CP | TD_CV | TD_P | TD_W;
426	}
427
428	/*
429	 * Calculate the first and last available physical addresses.
430	 */
431	avail_start = phys_avail[0];
432	for (i = 0; phys_avail[i + 2] != 0; i += 2)
433		;
434	avail_end = phys_avail[i + 1];
435	Maxmem = sparc64_btop(avail_end);
436
437	/*
438	 * Add the prom mappings to the kernel tsb.
439	 */
440	if ((vmem = OF_finddevice("/virtual-memory")) == -1)
441		panic("pmap_bootstrap: finddevice /virtual-memory");
442	if ((sz = OF_getproplen(vmem, "translations")) == -1)
443		panic("pmap_bootstrap: getproplen translations");
444	if (sizeof(translations) < sz)
445		panic("pmap_bootstrap: translations too small");
446	bzero(translations, sz);
447	if (OF_getprop(vmem, "translations", translations, sz) == -1)
448		panic("pmap_bootstrap: getprop /virtual-memory/translations");
449	sz /= sizeof(*translations);
450	translations_size = sz;
451	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
452	qsort(translations, sz, sizeof (*translations), om_cmp);
453	for (i = 0; i < sz; i++) {
454		CTR3(KTR_PMAP,
455		    "translation: start=%#lx size=%#lx tte=%#lx",
456		    translations[i].om_start, translations[i].om_size,
457		    translations[i].om_tte);
458		if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
459		    translations[i].om_start > VM_MAX_PROM_ADDRESS)
460			continue;
461		for (off = 0; off < translations[i].om_size;
462		    off += PAGE_SIZE) {
463			va = translations[i].om_start + off;
464			tp = tsb_kvtotte(va);
465			tp->tte_vpn = TV_VPN(va, TS_8K);
466			tp->tte_data =
467			    ((translations[i].om_tte &
468			      ~(TD_SOFT_MASK << TD_SOFT_SHIFT)) | TD_EXEC) +
469			    off;
470		}
471	}
472
473	/*
474	 * Get the available physical memory ranges from /memory/reg. These
475	 * are only used for kernel dumps, but it may not be wise to do prom
476	 * calls in that situation.
477	 */
478	if ((sz = OF_getproplen(pmem, "reg")) == -1)
479		panic("pmap_bootstrap: getproplen /memory/reg");
480	if (sizeof(sparc64_memreg) < sz)
481		panic("pmap_bootstrap: sparc64_memreg too small");
482	if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
483		panic("pmap_bootstrap: getprop /memory/reg");
484	sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
485
486	/*
487	 * Initialize the kernel pmap (which is statically allocated).
488	 */
489	pm = kernel_pmap;
490	for (i = 0; i < MAXCPU; i++)
491		pm->pm_context[i] = TLB_CTX_KERNEL;
492	pm->pm_active = ~0;
493
494	/* XXX flush all non-locked tlb entries */
495}
496
497void
498pmap_map_tsb(void)
499{
500	vm_offset_t va;
501	vm_offset_t pa;
502	u_long data;
503	u_long s;
504	int i;
505
506	s = intr_disable();
507
508	/*
509	 * Map the 4mb tsb pages.
510	 */
511	for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
512		va = (vm_offset_t)tsb_kernel + i;
513		pa = tsb_kernel_phys + i;
514		data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
515		    TD_P | TD_W;
516		/* XXX - cheetah */
517		stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
518		    TLB_TAR_CTX(TLB_CTX_KERNEL));
519		stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
520	}
521
522	/*
523	 * Set the secondary context to be the kernel context (needed for
524	 * fp block operations in the kernel and the cache code).
525	 */
526	stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
527	membar(Sync);
528
529	intr_restore(s);
530}
531
532/*
533 * Allocate a physical page of memory directly from the phys_avail map.
534 * Can only be called from pmap_bootstrap before avail start and end are
535 * calculated.
536 */
537static vm_offset_t
538pmap_bootstrap_alloc(vm_size_t size)
539{
540	vm_offset_t pa;
541	int i;
542
543	size = round_page(size);
544	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
545		if (phys_avail[i + 1] - phys_avail[i] < size)
546			continue;
547		pa = phys_avail[i];
548		phys_avail[i] += size;
549		return (pa);
550	}
551	panic("pmap_bootstrap_alloc");
552}
553
554void
555pmap_context_rollover(void)
556{
557	u_long data;
558	u_long tag;
559	int i;
560
561	mtx_assert(&sched_lock, MA_OWNED);
562	CTR0(KTR_PMAP, "pmap_context_rollover");
563	for (i = 0; i < tlb_dtlb_entries; i++) {
564		/* XXX - cheetah */
565		data = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG);
566		tag = ldxa(TLB_DAR_SLOT(i), ASI_DTLB_TAG_READ_REG);
567		if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
568		    TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
569			stxa_sync(TLB_DAR_SLOT(i), ASI_DTLB_DATA_ACCESS_REG, 0);
570		data = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG);
571		tag = ldxa(TLB_DAR_SLOT(i), ASI_ITLB_TAG_READ_REG);
572		if ((data & TD_V) != 0 && (data & TD_L) == 0 &&
573		    TLB_TAR_CTX(tag) != TLB_CTX_KERNEL)
574			stxa_sync(TLB_DAR_SLOT(i), ASI_ITLB_DATA_ACCESS_REG, 0);
575	}
576	PCPU_SET(tlb_ctx, PCPU_GET(tlb_ctx_min));
577}
578
579static __inline u_int
580pmap_context_alloc(void)
581{
582	u_int context;
583
584	mtx_assert(&sched_lock, MA_OWNED);
585	context = PCPU_GET(tlb_ctx);
586	if (context + 1 == PCPU_GET(tlb_ctx_max))
587		pmap_context_rollover();
588	else
589		PCPU_SET(tlb_ctx, context + 1);
590	return (context);
591}
592
593/*
594 * Initialize the pmap module.
595 */
596void
597pmap_init(vm_offset_t phys_start, vm_offset_t phys_end)
598{
599	vm_offset_t addr;
600	vm_size_t size;
601	int result;
602	int i;
603
604	for (i = 0; i < vm_page_array_size; i++) {
605		vm_page_t m;
606
607		m = &vm_page_array[i];
608		TAILQ_INIT(&m->md.tte_list);
609		m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
610		m->md.flags = 0;
611		m->md.pmap = NULL;
612	}
613
614	for (i = 0; i < translations_size; i++) {
615		addr = translations[i].om_start;
616		size = translations[i].om_size;
617		if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
618			continue;
619		result = vm_map_find(kernel_map, NULL, 0, &addr, size, TRUE,
620		    VM_PROT_ALL, VM_PROT_ALL, 0);
621		if (result != KERN_SUCCESS || addr != translations[i].om_start)
622			panic("pmap_init: vm_map_find");
623	}
624}
625
626/*
627 * Initialize the address space (zone) for the pv_entries.  Set a
628 * high water mark so that the system can recover from excessive
629 * numbers of pv entries.
630 */
631void
632pmap_init2(void)
633{
634}
635
636/*
637 * Extract the physical page address associated with the given
638 * map/virtual_address pair.
639 */
640vm_offset_t
641pmap_extract(pmap_t pm, vm_offset_t va)
642{
643	struct tte *tp;
644
645	if (pm == kernel_pmap)
646		return (pmap_kextract(va));
647	tp = tsb_tte_lookup(pm, va);
648	if (tp == NULL)
649		return (0);
650	else
651		return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
652}
653
654/*
655 * Extract the physical page address associated with the given kernel virtual
656 * address.
657 */
658vm_offset_t
659pmap_kextract(vm_offset_t va)
660{
661	struct tte *tp;
662
663	if (va >= VM_MIN_DIRECT_ADDRESS)
664		return (TLB_DIRECT_TO_PHYS(va));
665	tp = tsb_kvtotte(va);
666	if ((tp->tte_data & TD_V) == 0)
667		return (0);
668	return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
669}
670
671int
672pmap_cache_enter(vm_page_t m, vm_offset_t va)
673{
674	struct tte *tp;
675	int color;
676
677	PMAP_STATS_INC(pmap_ncache_enter);
678
679	/*
680	 * Find the color for this virtual address and note the added mapping.
681	 */
682	color = DCACHE_COLOR(va);
683	m->md.colors[color]++;
684
685	/*
686	 * If all existing mappings have the same color, the mapping is
687	 * cacheable.
688	 */
689	if (m->md.color == color) {
690		KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
691		    ("pmap_cache_enter: cacheable, mappings of other color"));
692		PMAP_STATS_INC(pmap_ncache_enter_c);
693		return (1);
694	}
695
696	/*
697	 * If there are no mappings of the other color, and the page still has
698	 * the wrong color, this must be a new mapping.  Change the color to
699	 * match the new mapping, which is cacheable.  We must flush the page
700	 * from the cache now.
701	 */
702	if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
703		KASSERT(m->md.colors[color] == 1,
704		    ("pmap_cache_enter: changing color, not new mapping"));
705		dcache_page_inval(VM_PAGE_TO_PHYS(m));
706		m->md.color = color;
707		PMAP_STATS_INC(pmap_ncache_enter_cc);
708		return (1);
709	}
710
711	PMAP_STATS_INC(pmap_ncache_enter_nc);
712
713	/*
714	 * If the mapping is already non-cacheable, just return.
715	 */
716	if (m->md.color == -1)
717		return (0);
718
719	/*
720	 * Mark all mappings as uncacheable, flush any lines with the other
721	 * color out of the dcache, and set the color to none (-1).
722	 */
723	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
724		atomic_clear_long(&tp->tte_data, TD_CV);
725		tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
726	}
727	dcache_page_inval(VM_PAGE_TO_PHYS(m));
728	m->md.color = -1;
729	return (0);
730}
731
732void
733pmap_cache_remove(vm_page_t m, vm_offset_t va)
734{
735	struct tte *tp;
736	int color;
737
738	CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
739	    m->md.colors[DCACHE_COLOR(va)]);
740	KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
741	    ("pmap_cache_remove: no mappings %d <= 0",
742	    m->md.colors[DCACHE_COLOR(va)]));
743	PMAP_STATS_INC(pmap_ncache_remove);
744
745	/*
746	 * Find the color for this virtual address and note the removal of
747	 * the mapping.
748	 */
749	color = DCACHE_COLOR(va);
750	m->md.colors[color]--;
751
752	/*
753	 * If the page is cacheable, just return and keep the same color, even
754	 * if there are no longer any mappings.
755	 */
756	if (m->md.color != -1) {
757		PMAP_STATS_INC(pmap_ncache_remove_c);
758		return;
759	}
760
761	KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
762	    ("pmap_cache_remove: uncacheable, no mappings of other color"));
763
764	/*
765	 * If the page is not cacheable (color is -1), and the number of
766	 * mappings for this color is not zero, just return.  There are
767	 * mappings of the other color still, so remain non-cacheable.
768	 */
769	if (m->md.colors[color] != 0) {
770		PMAP_STATS_INC(pmap_ncache_remove_nc);
771		return;
772	}
773
774	PMAP_STATS_INC(pmap_ncache_remove_cc);
775
776	/*
777	 * The number of mappings for this color is now zero.  Recache the
778	 * other colored mappings, and change the page color to the other
779	 * color.  There should be no lines in the data cache for this page,
780	 * so flushing should not be needed.
781	 */
782	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
783		atomic_set_long(&tp->tte_data, TD_CV);
784		tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
785	}
786	m->md.color = DCACHE_OTHER_COLOR(color);
787}
788
789/*
790 * Map a wired page into kernel virtual address space.
791 */
792void
793pmap_kenter(vm_offset_t va, vm_offset_t pa)
794{
795	vm_offset_t ova;
796	struct tte *tp;
797	vm_page_t om;
798	vm_page_t m;
799	u_long data;
800
801	tp = tsb_kvtotte(va);
802	m = PHYS_TO_VM_PAGE(pa);
803	CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
804	    va, pa, tp, tp->tte_data);
805	if ((tp->tte_data & TD_V) != 0) {
806		om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
807		ova = TTE_GET_VA(tp);
808		TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
809		pmap_cache_remove(om, ova);
810		if (va != ova)
811			tlb_page_demap(kernel_pmap, ova);
812	}
813	data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | TD_P | TD_W;
814	if (pmap_cache_enter(m, va) != 0)
815		data |= TD_CV;
816	tp->tte_vpn = TV_VPN(va, TS_8K);
817	tp->tte_data = data;
818	TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
819}
820
821/*
822 * Map a wired page into kernel virtual address space. This additionally
823 * takes a flag argument wich is or'ed to the TTE data. This is used by
824 * bus_space_map().
825 * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
826 * to flush entries that might still be in the cache, if applicable.
827 */
828void
829pmap_kenter_flags(vm_offset_t va, vm_offset_t pa, u_long flags)
830{
831	struct tte *tp;
832
833	tp = tsb_kvtotte(va);
834	CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
835	    va, pa, tp, tp->tte_data);
836	tp->tte_vpn = TV_VPN(va, TS_8K);
837	tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
838}
839
840/*
841 * Make a temporary mapping for a physical address.  This is only intended
842 * to be used for panic dumps. Caching issues can be ignored completely here,
843 * because pages mapped this way are only read.
844 */
845void *
846pmap_kenter_temporary(vm_offset_t pa, int i)
847{
848	struct tte *tp;
849	vm_offset_t va;
850
851	va = crashdumpmap + i * PAGE_SIZE;
852	tlb_page_demap(kernel_pmap, va);
853	tp = tsb_kvtotte(va);
854	tp->tte_vpn = TV_VPN(va, TS_8K);
855	tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_CP | TD_CV | TD_P;
856	return ((void *)crashdumpmap);
857}
858
859/*
860 * Remove a wired page from kernel virtual address space.
861 */
862void
863pmap_kremove(vm_offset_t va)
864{
865	struct tte *tp;
866	vm_page_t m;
867
868	tp = tsb_kvtotte(va);
869	CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
870	    tp->tte_data);
871	m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
872	TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
873	pmap_cache_remove(m, va);
874	TTE_ZERO(tp);
875}
876
877/*
878 * Inverse of pmap_kenter_flags, used by bus_space_unmap().
879 */
880void
881pmap_kremove_flags(vm_offset_t va)
882{
883	struct tte *tp;
884
885	tp = tsb_kvtotte(va);
886	CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
887	    tp->tte_data);
888	TTE_ZERO(tp);
889}
890
891/*
892 * Map a range of physical addresses into kernel virtual address space.
893 *
894 * The value passed in *virt is a suggested virtual address for the mapping.
895 * Architectures which can support a direct-mapped physical to virtual region
896 * can return the appropriate address within that region, leaving '*virt'
897 * unchanged.
898 */
899vm_offset_t
900pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot)
901{
902	struct tte *tp;
903	vm_offset_t sva;
904	vm_offset_t va;
905	vm_offset_t pa;
906
907	pa = pa_start;
908	sva = *virt;
909	va = sva;
910	for (; pa < pa_end; pa += PAGE_SIZE, va += PAGE_SIZE) {
911		tp = tsb_kvtotte(va);
912		tp->tte_vpn = TV_VPN(va, TS_8K);
913		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
914		    TD_CP | TD_CV | TD_P | TD_W;
915	}
916	tlb_range_demap(kernel_pmap, sva, sva + (pa_end - pa_start) - 1);
917	*virt = va;
918	return (sva);
919}
920
921/*
922 * Map a list of wired pages into kernel virtual address space.  This is
923 * intended for temporary mappings which do not need page modification or
924 * references recorded.  Existing mappings in the region are overwritten.
925 */
926void
927pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
928{
929	vm_offset_t va;
930
931	va = sva;
932	while (count-- > 0) {
933		pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
934		va += PAGE_SIZE;
935		m++;
936	}
937	tlb_range_demap(kernel_pmap, sva, va);
938}
939
940/*
941 * Remove page mappings from kernel virtual address space.  Intended for
942 * temporary mappings entered by pmap_qenter.
943 */
944void
945pmap_qremove(vm_offset_t sva, int count)
946{
947	vm_offset_t va;
948
949	va = sva;
950	while (count-- > 0) {
951		pmap_kremove(va);
952		va += PAGE_SIZE;
953	}
954	tlb_range_demap(kernel_pmap, sva, va);
955}
956
957#ifndef KSTACK_MAX_PAGES
958#define KSTACK_MAX_PAGES 32
959#endif
960
961/*
962 * Create the kernel stack and pcb for a new thread.
963 * This routine directly affects the fork perf for a process and
964 * create performance for a thread.
965 */
966void
967pmap_new_thread(struct thread *td, int pages)
968{
969	vm_page_t ma[KSTACK_MAX_PAGES];
970	vm_object_t ksobj;
971	vm_offset_t ks;
972	vm_page_t m;
973	u_int i;
974
975	/* Bounds check */
976	if (pages <= 1)
977		pages = KSTACK_PAGES;
978	else if (pages > KSTACK_MAX_PAGES)
979		pages = KSTACK_MAX_PAGES;
980
981	/*
982	 * Allocate object for the kstack,
983	 */
984	ksobj = vm_object_allocate(OBJT_DEFAULT, pages);
985	td->td_kstack_obj = ksobj;
986
987	/*
988	 * Get a kernel virtual address for the kstack for this thread.
989	 */
990	ks = kmem_alloc_nofault(kernel_map,
991	   (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
992	if (ks == 0)
993		panic("pmap_new_thread: kstack allocation failed");
994	if (KSTACK_GUARD_PAGES != 0) {
995		tlb_page_demap(kernel_pmap, ks);
996		ks += KSTACK_GUARD_PAGES * PAGE_SIZE;
997	}
998	td->td_kstack = ks;
999
1000	/*
1001	 * Knowing the number of pages allocated is useful when you
1002	 * want to deallocate them.
1003	 */
1004	td->td_kstack_pages = pages;
1005
1006	for (i = 0; i < pages; i++) {
1007		/*
1008		 * Get a kernel stack page.
1009		 */
1010		m = vm_page_grab(ksobj, i,
1011		    VM_ALLOC_NORMAL | VM_ALLOC_RETRY | VM_ALLOC_WIRED);
1012		ma[i] = m;
1013
1014		vm_page_wakeup(m);
1015		vm_page_flag_clear(m, PG_ZERO);
1016		m->valid = VM_PAGE_BITS_ALL;
1017	}
1018
1019	/*
1020	 * Enter the page into the kernel address space.
1021	 */
1022	pmap_qenter(ks, ma, pages);
1023}
1024
1025/*
1026 * Dispose the kernel stack for a thread that has exited.
1027 * This routine directly impacts the exit perf of a process and thread.
1028 */
1029void
1030pmap_dispose_thread(struct thread *td)
1031{
1032	vm_object_t ksobj;
1033	vm_offset_t ks;
1034	vm_page_t m;
1035	int i;
1036	int pages;
1037
1038	pages = td->td_kstack_pages;
1039	ksobj = td->td_kstack_obj;
1040	ks = td->td_kstack;
1041	for (i = 0; i < pages ; i++) {
1042		m = vm_page_lookup(ksobj, i);
1043		if (m == NULL)
1044			panic("pmap_dispose_thread: kstack already missing?");
1045		vm_page_lock_queues();
1046		vm_page_busy(m);
1047		vm_page_unwire(m, 0);
1048		vm_page_free(m);
1049		vm_page_unlock_queues();
1050	}
1051	pmap_qremove(ks, pages);
1052	kmem_free(kernel_map, ks - (KSTACK_GUARD_PAGES * PAGE_SIZE),
1053	    (pages + KSTACK_GUARD_PAGES) * PAGE_SIZE);
1054	vm_object_deallocate(ksobj);
1055}
1056
1057/*
1058 * Set up a variable sized alternate kstack.
1059 */
1060void
1061pmap_new_altkstack(struct thread *td, int pages)
1062{
1063	/* shuffle the original stack */
1064	td->td_altkstack_obj = td->td_kstack_obj;
1065	td->td_altkstack = td->td_kstack;
1066	td->td_altkstack_pages = td->td_kstack_pages;
1067
1068	pmap_new_thread(td, pages);
1069}
1070
1071void
1072pmap_dispose_altkstack(struct thread *td)
1073{
1074	pmap_dispose_thread(td);
1075
1076	/* restore the original kstack */
1077	td->td_kstack = td->td_altkstack;
1078	td->td_kstack_obj = td->td_altkstack_obj;
1079	td->td_kstack_pages = td->td_altkstack_pages;
1080	td->td_altkstack = 0;
1081	td->td_altkstack_obj = NULL;
1082	td->td_altkstack_pages = 0;
1083}
1084
1085/*
1086 * Allow the kernel stack for a thread to be prejudicially paged out.
1087 */
1088void
1089pmap_swapout_thread(struct thread *td)
1090{
1091	vm_object_t ksobj;
1092	vm_offset_t ks;
1093	vm_page_t m;
1094	int i;
1095	int pages;
1096
1097	pages = td->td_kstack_pages;
1098	ksobj = td->td_kstack_obj;
1099	ks = (vm_offset_t)td->td_kstack;
1100	for (i = 0; i < pages; i++) {
1101		m = vm_page_lookup(ksobj, i);
1102		if (m == NULL)
1103			panic("pmap_swapout_thread: kstack already missing?");
1104		vm_page_lock_queues();
1105		vm_page_dirty(m);
1106		vm_page_unwire(m, 0);
1107		vm_page_unlock_queues();
1108	}
1109	pmap_qremove(ks, pages);
1110}
1111
1112/*
1113 * Bring the kernel stack for a specified thread back in.
1114 */
1115void
1116pmap_swapin_thread(struct thread *td)
1117{
1118	vm_page_t ma[KSTACK_MAX_PAGES];
1119	vm_object_t ksobj;
1120	vm_offset_t ks;
1121	vm_page_t m;
1122	int rv;
1123	int i;
1124	int pages;
1125
1126	pages = td->td_kstack_pages;
1127	ksobj = td->td_kstack_obj;
1128	ks = td->td_kstack;
1129	for (i = 0; i < pages; i++) {
1130		m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY);
1131		if (m->valid != VM_PAGE_BITS_ALL) {
1132			rv = vm_pager_get_pages(ksobj, &m, 1, 0);
1133			if (rv != VM_PAGER_OK)
1134				panic("pmap_swapin_thread: cannot get kstack");
1135			m = vm_page_lookup(ksobj, i);
1136			m->valid = VM_PAGE_BITS_ALL;
1137		}
1138		ma[i] = m;
1139		vm_page_lock_queues();
1140		vm_page_wire(m);
1141		vm_page_wakeup(m);
1142		vm_page_unlock_queues();
1143	}
1144	pmap_qenter(ks, ma, pages);
1145}
1146
1147/*
1148 * Initialize the pmap associated with process 0.
1149 */
1150void
1151pmap_pinit0(pmap_t pm)
1152{
1153	int i;
1154
1155	for (i = 0; i < MAXCPU; i++)
1156		pm->pm_context[i] = 0;
1157	pm->pm_active = 0;
1158	pm->pm_tsb = NULL;
1159	pm->pm_tsb_obj = NULL;
1160	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1161}
1162
1163/*
1164 * Initialize a preallocated and zeroed pmap structure, uch as one in a
1165 * vmspace structure.
1166 */
1167void
1168pmap_pinit(pmap_t pm)
1169{
1170	vm_page_t ma[TSB_PAGES];
1171	vm_page_t m;
1172	int i;
1173
1174	/*
1175	 * Allocate kva space for the tsb.
1176	 */
1177	if (pm->pm_tsb == NULL) {
1178		pm->pm_tsb = (struct tte *)kmem_alloc_pageable(kernel_map,
1179		    TSB_BSIZE);
1180	}
1181
1182	/*
1183	 * Allocate an object for it.
1184	 */
1185	if (pm->pm_tsb_obj == NULL)
1186		pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES);
1187
1188	for (i = 0; i < TSB_PAGES; i++) {
1189		m = vm_page_grab(pm->pm_tsb_obj, i,
1190		    VM_ALLOC_RETRY | VM_ALLOC_ZERO);
1191		if ((m->flags & PG_ZERO) == 0)
1192			pmap_zero_page(m);
1193
1194		m->wire_count++;
1195		cnt.v_wire_count++;
1196
1197		vm_page_flag_clear(m, PG_BUSY);
1198		m->valid = VM_PAGE_BITS_ALL;
1199		m->md.pmap = pm;
1200
1201		ma[i] = m;
1202	}
1203	pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
1204
1205	for (i = 0; i < MAXCPU; i++)
1206		pm->pm_context[i] = -1;
1207	pm->pm_active = 0;
1208	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1209}
1210
1211void
1212pmap_pinit2(pmap_t pmap)
1213{
1214	/* XXX: Remove this stub when no longer called */
1215}
1216
1217/*
1218 * Release any resources held by the given physical map.
1219 * Called when a pmap initialized by pmap_pinit is being released.
1220 * Should only be called if the map contains no valid mappings.
1221 */
1222void
1223pmap_release(pmap_t pm)
1224{
1225	vm_object_t obj;
1226	vm_page_t m;
1227
1228	CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
1229	    pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb);
1230	obj = pm->pm_tsb_obj;
1231	KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
1232	KASSERT(pmap_resident_count(pm) == 0,
1233	    ("pmap_release: resident pages %ld != 0",
1234	    pmap_resident_count(pm)));
1235	while (!TAILQ_EMPTY(&obj->memq)) {
1236		m = TAILQ_FIRST(&obj->memq);
1237		vm_page_lock_queues();
1238		if (vm_page_sleep_if_busy(m, FALSE, "pmaprl"))
1239			continue;
1240		vm_page_busy(m);
1241		KASSERT(m->hold_count == 0,
1242		    ("pmap_release: freeing held tsb page"));
1243		m->md.pmap = NULL;
1244		m->wire_count--;
1245		cnt.v_wire_count--;
1246		vm_page_free_zero(m);
1247		vm_page_unlock_queues();
1248	}
1249	pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
1250}
1251
1252/*
1253 * Grow the number of kernel page table entries.  Unneeded.
1254 */
1255void
1256pmap_growkernel(vm_offset_t addr)
1257{
1258
1259	panic("pmap_growkernel: can't grow kernel");
1260}
1261
1262int
1263pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1264		vm_offset_t va)
1265{
1266	vm_page_t m;
1267	u_long data;
1268
1269	data = atomic_readandclear_long(&tp->tte_data);
1270	m = PHYS_TO_VM_PAGE(TD_PA(data));
1271	TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1272	if ((data & TD_WIRED) != 0)
1273		pm->pm_stats.wired_count--;
1274	if ((data & TD_PV) != 0) {
1275		if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
1276			vm_page_dirty(m);
1277		if ((data & TD_REF) != 0)
1278			vm_page_flag_set(m, PG_REFERENCED);
1279		if (TAILQ_EMPTY(&m->md.tte_list))
1280			vm_page_flag_clear(m, PG_WRITEABLE);
1281		pm->pm_stats.resident_count--;
1282	}
1283	pmap_cache_remove(m, va);
1284	TTE_ZERO(tp);
1285	if (PMAP_REMOVE_DONE(pm))
1286		return (0);
1287	return (1);
1288}
1289
1290/*
1291 * Remove the given range of addresses from the specified map.
1292 */
1293void
1294pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
1295{
1296	struct tte *tp;
1297	vm_offset_t va;
1298
1299	CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
1300	    pm->pm_context[PCPU_GET(cpuid)], start, end);
1301	if (PMAP_REMOVE_DONE(pm))
1302		return;
1303	if (end - start > PMAP_TSB_THRESH) {
1304		tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
1305		tlb_context_demap(pm);
1306	} else {
1307		for (va = start; va < end; va += PAGE_SIZE) {
1308			if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
1309				if (!pmap_remove_tte(pm, NULL, tp, va))
1310					break;
1311			}
1312		}
1313		tlb_range_demap(pm, start, end - 1);
1314	}
1315}
1316
1317void
1318pmap_remove_all(vm_page_t m)
1319{
1320	struct pmap *pm;
1321	struct tte *tpn;
1322	struct tte *tp;
1323	vm_offset_t va;
1324
1325	KASSERT((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0,
1326	   ("pv_remove_all: illegal for unmanaged page %#lx",
1327	   VM_PAGE_TO_PHYS(m)));
1328	for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
1329		tpn = TAILQ_NEXT(tp, tte_link);
1330		if ((tp->tte_data & TD_PV) == 0)
1331			continue;
1332		pm = TTE_GET_PMAP(tp);
1333		va = TTE_GET_VA(tp);
1334		if ((tp->tte_data & TD_WIRED) != 0)
1335			pm->pm_stats.wired_count--;
1336		if ((tp->tte_data & TD_REF) != 0)
1337			vm_page_flag_set(m, PG_REFERENCED);
1338		if ((tp->tte_data & TD_W) != 0 &&
1339		    pmap_track_modified(pm, va))
1340			vm_page_dirty(m);
1341		tp->tte_data &= ~TD_V;
1342		tlb_page_demap(pm, va);
1343		TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1344		pm->pm_stats.resident_count--;
1345		pmap_cache_remove(m, va);
1346		TTE_ZERO(tp);
1347	}
1348	vm_page_flag_clear(m, PG_WRITEABLE);
1349}
1350
1351int
1352pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1353		 vm_offset_t va)
1354{
1355	u_long data;
1356	vm_page_t m;
1357
1358	data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W);
1359	if ((data & TD_PV) != 0) {
1360		m = PHYS_TO_VM_PAGE(TD_PA(data));
1361		if ((data & TD_REF) != 0)
1362			vm_page_flag_set(m, PG_REFERENCED);
1363		if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
1364			vm_page_dirty(m);
1365	}
1366	return (0);
1367}
1368
1369/*
1370 * Set the physical protection on the specified range of this map as requested.
1371 */
1372void
1373pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1374{
1375	vm_offset_t va;
1376	struct tte *tp;
1377
1378	CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
1379	    pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot);
1380
1381	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1382		pmap_remove(pm, sva, eva);
1383		return;
1384	}
1385
1386	if (prot & VM_PROT_WRITE)
1387		return;
1388
1389	if (eva - sva > PMAP_TSB_THRESH) {
1390		tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
1391		tlb_context_demap(pm);
1392	} else {
1393		for (va = sva; va < eva; va += PAGE_SIZE) {
1394			if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1395				pmap_protect_tte(pm, NULL, tp, va);
1396		}
1397		tlb_range_demap(pm, sva, eva - 1);
1398	}
1399}
1400
1401/*
1402 * Map the given physical page at the specified virtual address in the
1403 * target pmap with the protection requested.  If specified the page
1404 * will be wired down.
1405 */
1406void
1407pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1408	   boolean_t wired)
1409{
1410	struct tte *tp;
1411	vm_offset_t pa;
1412	u_long data;
1413
1414	pa = VM_PAGE_TO_PHYS(m);
1415	CTR6(KTR_PMAP,
1416	    "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
1417	    pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
1418
1419	/*
1420	 * If there is an existing mapping, and the physical address has not
1421	 * changed, must be protection or wiring change.
1422	 */
1423	if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
1424		CTR0(KTR_PMAP, "pmap_enter: update");
1425		PMAP_STATS_INC(pmap_enter_nupdate);
1426
1427		/*
1428		 * Wiring change, just update stats.
1429		 */
1430		if (wired) {
1431			if ((tp->tte_data & TD_WIRED) == 0) {
1432				tp->tte_data |= TD_WIRED;
1433				pm->pm_stats.wired_count++;
1434			}
1435		} else {
1436			if ((tp->tte_data & TD_WIRED) != 0) {
1437				tp->tte_data &= ~TD_WIRED;
1438				pm->pm_stats.wired_count--;
1439			}
1440		}
1441
1442		/*
1443		 * Save the old bits and clear the ones we're interested in.
1444		 */
1445		data = tp->tte_data;
1446		tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
1447
1448		/*
1449		 * If we're turning off write permissions, sense modify status.
1450		 */
1451		if ((prot & VM_PROT_WRITE) != 0) {
1452			tp->tte_data |= TD_SW;
1453			if (wired) {
1454				tp->tte_data |= TD_W;
1455			}
1456		} else if ((data & TD_W) != 0 &&
1457		    pmap_track_modified(pm, va)) {
1458			vm_page_dirty(m);
1459		}
1460
1461		/*
1462		 * If we're turning on execute permissions, flush the icache.
1463		 */
1464		if ((prot & VM_PROT_EXECUTE) != 0) {
1465			if ((data & TD_EXEC) == 0) {
1466				PMAP_STATS_INC(pmap_niflush);
1467				icache_page_inval(pa);
1468			}
1469			tp->tte_data |= TD_EXEC;
1470		}
1471
1472		/*
1473		 * Delete the old mapping.
1474		 */
1475		tlb_page_demap(pm, TTE_GET_VA(tp));
1476	} else {
1477		/*
1478		 * If there is an existing mapping, but its for a different
1479		 * phsyical address, delete the old mapping.
1480		 */
1481		if (tp != NULL) {
1482			CTR0(KTR_PMAP, "pmap_enter: replace");
1483			PMAP_STATS_INC(pmap_enter_nreplace);
1484			vm_page_lock_queues();
1485			pmap_remove_tte(pm, NULL, tp, va);
1486			vm_page_unlock_queues();
1487			tlb_page_demap(pm, va);
1488		} else {
1489			CTR0(KTR_PMAP, "pmap_enter: new");
1490			PMAP_STATS_INC(pmap_enter_nnew);
1491		}
1492
1493		/*
1494		 * Now set up the data and install the new mapping.
1495		 */
1496		data = TD_V | TD_8K | TD_PA(pa) | TD_CP;
1497		if (pm == kernel_pmap)
1498			data |= TD_P;
1499		if (prot & VM_PROT_WRITE)
1500			data |= TD_SW;
1501		if (prot & VM_PROT_EXECUTE) {
1502			data |= TD_EXEC;
1503			PMAP_STATS_INC(pmap_niflush);
1504			icache_page_inval(pa);
1505		}
1506
1507		/*
1508		 * If its wired update stats.  We also don't need reference or
1509		 * modify tracking for wired mappings, so set the bits now.
1510		 */
1511		if (wired) {
1512			pm->pm_stats.wired_count++;
1513			data |= TD_REF | TD_WIRED;
1514			if ((prot & VM_PROT_WRITE) != 0)
1515				data |= TD_W;
1516		}
1517
1518		tsb_tte_enter(pm, m, va, TS_8K, data);
1519	}
1520}
1521
1522void
1523pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1524		    vm_pindex_t pindex, vm_size_t size, int limit)
1525{
1526	/* XXX */
1527}
1528
1529void
1530pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry)
1531{
1532	/* XXX */
1533}
1534
1535/*
1536 * Change the wiring attribute for a map/virtual-address pair.
1537 * The mapping must already exist in the pmap.
1538 */
1539void
1540pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
1541{
1542	struct tte *tp;
1543	u_long data;
1544
1545	if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
1546		if (wired) {
1547			data = atomic_set_long(&tp->tte_data, TD_WIRED);
1548			if ((data & TD_WIRED) == 0)
1549				pm->pm_stats.wired_count++;
1550		} else {
1551			data = atomic_clear_long(&tp->tte_data, TD_WIRED);
1552			if ((data & TD_WIRED) != 0)
1553				pm->pm_stats.wired_count--;
1554		}
1555	}
1556}
1557
1558static int
1559pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
1560{
1561	vm_page_t m;
1562	u_long data;
1563
1564	if (tsb_tte_lookup(dst_pmap, va) == NULL) {
1565		data = tp->tte_data &
1566		    ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
1567		m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1568		tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
1569	}
1570	return (1);
1571}
1572
1573void
1574pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1575	  vm_size_t len, vm_offset_t src_addr)
1576{
1577	struct tte *tp;
1578	vm_offset_t va;
1579
1580	if (dst_addr != src_addr)
1581		return;
1582	if (len > PMAP_TSB_THRESH) {
1583		tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
1584		    pmap_copy_tte);
1585		tlb_context_demap(dst_pmap);
1586	} else {
1587		for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) {
1588			if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
1589				pmap_copy_tte(src_pmap, dst_pmap, tp, va);
1590		}
1591		tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
1592	}
1593}
1594
1595void
1596pmap_zero_page(vm_page_t m)
1597{
1598	vm_offset_t pa;
1599	vm_offset_t va;
1600	struct tte *tp;
1601
1602	pa = VM_PAGE_TO_PHYS(m);
1603	if (m->md.color == -1)
1604		aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1605	else if (m->md.color == DCACHE_COLOR(pa)) {
1606		va = TLB_PHYS_TO_DIRECT(pa);
1607		bzero((void *)va, PAGE_SIZE);
1608	} else {
1609		va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1610		tp = tsb_kvtotte(va);
1611		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1612		tp->tte_vpn = TV_VPN(va, TS_8K);
1613		bzero((void *)va, PAGE_SIZE);
1614		tlb_page_demap(kernel_pmap, va);
1615	}
1616}
1617
1618void
1619pmap_zero_page_area(vm_page_t m, int off, int size)
1620{
1621	vm_offset_t pa;
1622	vm_offset_t va;
1623	struct tte *tp;
1624
1625	KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
1626	pa = VM_PAGE_TO_PHYS(m);
1627	if (m->md.color == -1)
1628		aszero(ASI_PHYS_USE_EC, pa + off, size);
1629	else if (m->md.color == DCACHE_COLOR(pa)) {
1630		va = TLB_PHYS_TO_DIRECT(pa);
1631		bzero((void *)(va + off), size);
1632	} else {
1633		va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1634		tp = tsb_kvtotte(va);
1635		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1636		tp->tte_vpn = TV_VPN(va, TS_8K);
1637		bzero((void *)(va + off), size);
1638		tlb_page_demap(kernel_pmap, va);
1639	}
1640}
1641
1642void
1643pmap_zero_page_idle(vm_page_t m)
1644{
1645	vm_offset_t pa;
1646	vm_offset_t va;
1647	struct tte *tp;
1648
1649	pa = VM_PAGE_TO_PHYS(m);
1650	if (m->md.color == -1)
1651		aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1652	else if (m->md.color == DCACHE_COLOR(pa)) {
1653		va = TLB_PHYS_TO_DIRECT(pa);
1654		bzero((void *)va, PAGE_SIZE);
1655	} else {
1656		va = pmap_idle_map + (m->md.color * PAGE_SIZE);
1657		tp = tsb_kvtotte(va);
1658		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1659		tp->tte_vpn = TV_VPN(va, TS_8K);
1660		bzero((void *)va, PAGE_SIZE);
1661		tlb_page_demap(kernel_pmap, va);
1662	}
1663}
1664
1665void
1666pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1667{
1668	vm_offset_t pdst;
1669	vm_offset_t psrc;
1670	vm_offset_t vdst;
1671	vm_offset_t vsrc;
1672	struct tte *tp;
1673
1674	pdst = VM_PAGE_TO_PHYS(mdst);
1675	psrc = VM_PAGE_TO_PHYS(msrc);
1676	if (msrc->md.color == -1 && mdst->md.color == -1)
1677		ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
1678	else if (msrc->md.color == DCACHE_COLOR(psrc) &&
1679	    mdst->md.color == DCACHE_COLOR(pdst)) {
1680		vdst = TLB_PHYS_TO_DIRECT(pdst);
1681		vsrc = TLB_PHYS_TO_DIRECT(psrc);
1682		bcopy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1683	} else if (msrc->md.color == -1) {
1684		if (mdst->md.color == DCACHE_COLOR(pdst)) {
1685			vdst = TLB_PHYS_TO_DIRECT(pdst);
1686			ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1687			    PAGE_SIZE);
1688		} else {
1689			vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1690			tp = tsb_kvtotte(vdst);
1691			tp->tte_data =
1692			    TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1693			tp->tte_vpn = TV_VPN(vdst, TS_8K);
1694			ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1695			    PAGE_SIZE);
1696			tlb_page_demap(kernel_pmap, vdst);
1697		}
1698	} else if (mdst->md.color == -1) {
1699		if (msrc->md.color == DCACHE_COLOR(psrc)) {
1700			vsrc = TLB_PHYS_TO_DIRECT(psrc);
1701			ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1702			    PAGE_SIZE);
1703		} else {
1704			vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
1705			tp = tsb_kvtotte(vsrc);
1706			tp->tte_data =
1707			    TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1708			tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1709			ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1710			    PAGE_SIZE);
1711			tlb_page_demap(kernel_pmap, vsrc);
1712		}
1713	} else {
1714		vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1715		tp = tsb_kvtotte(vdst);
1716		tp->tte_data =
1717		    TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1718		tp->tte_vpn = TV_VPN(vdst, TS_8K);
1719		vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
1720		tp = tsb_kvtotte(vsrc);
1721		tp->tte_data =
1722		    TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1723		tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1724		bcopy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1725		tlb_page_demap(kernel_pmap, vdst);
1726		tlb_page_demap(kernel_pmap, vsrc);
1727	}
1728}
1729
1730/*
1731 * Returns true if the pmap's pv is one of the first
1732 * 16 pvs linked to from this page.  This count may
1733 * be changed upwards or downwards in the future; it
1734 * is only necessary that true be returned for a small
1735 * subset of pmaps for proper page aging.
1736 */
1737boolean_t
1738pmap_page_exists_quick(pmap_t pm, vm_page_t m)
1739{
1740	struct tte *tp;
1741	int loops;
1742
1743	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1744		return (FALSE);
1745	loops = 0;
1746	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1747		if ((tp->tte_data & TD_PV) == 0)
1748			continue;
1749		if (TTE_GET_PMAP(tp) == pm)
1750			return (TRUE);
1751		if (++loops >= 16)
1752			break;
1753	}
1754	return (FALSE);
1755}
1756
1757/*
1758 * Remove all pages from specified address space, this aids process exit
1759 * speeds.  This is much faster than pmap_remove n the case of running down
1760 * an entire address space.  Only works for the current pmap.
1761 */
1762void
1763pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1764{
1765}
1766
1767/*
1768 * Lower the permission for all mappings to a given page.
1769 */
1770void
1771pmap_page_protect(vm_page_t m, vm_prot_t prot)
1772{
1773
1774	if ((prot & VM_PROT_WRITE) == 0) {
1775		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
1776			pmap_clear_write(m);
1777		else
1778			pmap_remove_all(m);
1779	}
1780}
1781
1782vm_offset_t
1783pmap_phys_address(int ppn)
1784{
1785
1786	return (sparc64_ptob(ppn));
1787}
1788
1789/*
1790 *	pmap_ts_referenced:
1791 *
1792 *	Return a count of reference bits for a page, clearing those bits.
1793 *	It is not necessary for every reference bit to be cleared, but it
1794 *	is necessary that 0 only be returned when there are truly no
1795 *	reference bits set.
1796 *
1797 *	XXX: The exact number of bits to check and clear is a matter that
1798 *	should be tested and standardized at some point in the future for
1799 *	optimal aging of shared pages.
1800 */
1801
1802int
1803pmap_ts_referenced(vm_page_t m)
1804{
1805	struct tte *tpf;
1806	struct tte *tpn;
1807	struct tte *tp;
1808	u_long data;
1809	int count;
1810
1811	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1812		return (0);
1813	count = 0;
1814	if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
1815		tpf = tp;
1816		do {
1817			tpn = TAILQ_NEXT(tp, tte_link);
1818			TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1819			TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
1820			if ((tp->tte_data & TD_PV) == 0 ||
1821			    !pmap_track_modified(TTE_GET_PMAP(tp),
1822			     TTE_GET_VA(tp)))
1823				continue;
1824			data = atomic_clear_long(&tp->tte_data, TD_REF);
1825			if ((data & TD_REF) != 0 && ++count > 4)
1826				break;
1827		} while ((tp = tpn) != NULL && tp != tpf);
1828	}
1829	return (count);
1830}
1831
1832boolean_t
1833pmap_is_modified(vm_page_t m)
1834{
1835	struct tte *tp;
1836
1837	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1838		return FALSE;
1839	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1840		if ((tp->tte_data & TD_PV) == 0 ||
1841		    !pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp)))
1842			continue;
1843		if ((tp->tte_data & TD_W) != 0)
1844			return (TRUE);
1845	}
1846	return (FALSE);
1847}
1848
1849void
1850pmap_clear_modify(vm_page_t m)
1851{
1852	struct tte *tp;
1853	u_long data;
1854
1855	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1856		return;
1857	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1858		if ((tp->tte_data & TD_PV) == 0)
1859			continue;
1860		data = atomic_clear_long(&tp->tte_data, TD_W);
1861		if ((data & TD_W) != 0)
1862			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1863	}
1864}
1865
1866void
1867pmap_clear_reference(vm_page_t m)
1868{
1869	struct tte *tp;
1870	u_long data;
1871
1872	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1873		return;
1874	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1875		if ((tp->tte_data & TD_PV) == 0)
1876			continue;
1877		data = atomic_clear_long(&tp->tte_data, TD_REF);
1878		if ((data & TD_REF) != 0)
1879			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1880	}
1881}
1882
1883void
1884pmap_clear_write(vm_page_t m)
1885{
1886	struct tte *tp;
1887	u_long data;
1888
1889	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1890	    (m->flags & PG_WRITEABLE) == 0)
1891		return;
1892	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1893		if ((tp->tte_data & TD_PV) == 0)
1894			continue;
1895		data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
1896		if ((data & TD_W) != 0) {
1897			if (pmap_track_modified(TTE_GET_PMAP(tp),
1898			    TTE_GET_VA(tp)))
1899				vm_page_dirty(m);
1900			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1901		}
1902	}
1903	vm_page_flag_clear(m, PG_WRITEABLE);
1904}
1905
1906int
1907pmap_mincore(pmap_t pm, vm_offset_t addr)
1908{
1909	/* TODO; */
1910	return (0);
1911}
1912
1913/*
1914 * Activate a user pmap.  The pmap must be activated before its address space
1915 * can be accessed in any way.
1916 */
1917void
1918pmap_activate(struct thread *td)
1919{
1920	struct vmspace *vm;
1921	vm_offset_t tsb;
1922	u_long context;
1923	pmap_t pm;
1924
1925	vm = td->td_proc->p_vmspace;
1926	pm = &vm->vm_pmap;
1927	tsb = (vm_offset_t)pm->pm_tsb;
1928
1929	KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?"));
1930	KASSERT(pm->pm_context[PCPU_GET(cpuid)] != 0,
1931	    ("pmap_activate: activating nucleus context?"));
1932
1933	mtx_lock_spin(&sched_lock);
1934	stxa(AA_DMMU_TSB, ASI_DMMU, tsb);
1935	stxa(AA_IMMU_TSB, ASI_IMMU, tsb);
1936	membar(Sync);
1937	context = pmap_context_alloc();
1938	pm->pm_context[PCPU_GET(cpuid)] = context;
1939	pm->pm_active |= PCPU_GET(cpumask);
1940	PCPU_SET(vmspace, vm);
1941	stxa(AA_DMMU_PCXR, ASI_DMMU, context);
1942	membar(Sync);
1943	mtx_unlock_spin(&sched_lock);
1944}
1945
1946vm_offset_t
1947pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
1948{
1949
1950	return (va);
1951}
1952