pmap.c revision 127875
1/*
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to Berkeley by
10 * the Systems Programming Group of the University of Utah Computer
11 * Science Department and William Jolitz of UUNET Technologies Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 *    notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 *    notice, this list of conditions and the following disclaimer in the
20 *    documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 *    must display the following acknowledgement:
23 *      This product includes software developed by the University of
24 *      California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 *    may be used to endorse or promote products derived from this software
27 *    without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 *      from:   @(#)pmap.c      7.7 (Berkeley)  5/12/91
42 * $FreeBSD: head/sys/sparc64/sparc64/pmap.c 127875 2004-04-05 04:08:00Z alc $
43 */
44
45/*
46 * Manages physical address maps.
47 *
48 * In addition to hardware address maps, this module is called upon to
49 * provide software-use-only maps which may or may not be stored in the
50 * same form as hardware maps.  These pseudo-maps are used to store
51 * intermediate results from copy operations to and from address spaces.
52 *
53 * Since the information managed by this module is also stored by the
54 * logical address mapping module, this module may throw away valid virtual
55 * to physical mappings at almost any time.  However, invalidations of
56 * mappings must be done as requested.
57 *
58 * In order to cope with hardware architectures which make virtual to
59 * physical map invalidates expensive, this module may delay invalidate
60 * reduced protection operations until such time as they are actually
61 * necessary.  This module is given full information as to which processors
62 * are currently using which maps, and to when physical maps must be made
63 * correct.
64 */
65
66#include "opt_kstack_pages.h"
67#include "opt_msgbuf.h"
68#include "opt_pmap.h"
69
70#include <sys/param.h>
71#include <sys/kernel.h>
72#include <sys/ktr.h>
73#include <sys/lock.h>
74#include <sys/msgbuf.h>
75#include <sys/mutex.h>
76#include <sys/proc.h>
77#include <sys/smp.h>
78#include <sys/sysctl.h>
79#include <sys/systm.h>
80#include <sys/vmmeter.h>
81
82#include <dev/ofw/openfirm.h>
83
84#include <vm/vm.h>
85#include <vm/vm_param.h>
86#include <vm/vm_kern.h>
87#include <vm/vm_page.h>
88#include <vm/vm_map.h>
89#include <vm/vm_object.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_pageout.h>
92#include <vm/vm_pager.h>
93
94#include <machine/cache.h>
95#include <machine/frame.h>
96#include <machine/instr.h>
97#include <machine/md_var.h>
98#include <machine/metadata.h>
99#include <machine/ofw_mem.h>
100#include <machine/smp.h>
101#include <machine/tlb.h>
102#include <machine/tte.h>
103#include <machine/tsb.h>
104
105#define	PMAP_DEBUG
106
107#ifndef	PMAP_SHPGPERPROC
108#define	PMAP_SHPGPERPROC	200
109#endif
110
111/*
112 * Virtual and physical address of message buffer.
113 */
114struct msgbuf *msgbufp;
115vm_paddr_t msgbuf_phys;
116
117/*
118 * Physical address of the last available physical page.
119 */
120vm_paddr_t avail_end;
121
122int pmap_pagedaemon_waken;
123
124/*
125 * Map of physical memory reagions.
126 */
127vm_paddr_t phys_avail[128];
128static struct ofw_mem_region mra[128];
129struct ofw_mem_region sparc64_memreg[128];
130int sparc64_nmemreg;
131static struct ofw_map translations[128];
132static int translations_size;
133
134static vm_offset_t pmap_idle_map;
135static vm_offset_t pmap_temp_map_1;
136static vm_offset_t pmap_temp_map_2;
137
138/*
139 * First and last available kernel virtual addresses.
140 */
141vm_offset_t virtual_avail;
142vm_offset_t virtual_end;
143vm_offset_t kernel_vm_end;
144
145vm_offset_t vm_max_kernel_address;
146
147/*
148 * Kernel pmap.
149 */
150struct pmap kernel_pmap_store;
151
152/*
153 * Allocate physical memory for use in pmap_bootstrap.
154 */
155static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size);
156
157extern int tl1_immu_miss_patch_1[];
158extern int tl1_immu_miss_patch_2[];
159extern int tl1_dmmu_miss_patch_1[];
160extern int tl1_dmmu_miss_patch_2[];
161extern int tl1_dmmu_prot_patch_1[];
162extern int tl1_dmmu_prot_patch_2[];
163
164/*
165 * If user pmap is processed with pmap_remove and with pmap_remove and the
166 * resident count drops to 0, there are no more pages to remove, so we
167 * need not continue.
168 */
169#define	PMAP_REMOVE_DONE(pm) \
170	((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0)
171
172/*
173 * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove()
174 * and pmap_protect() instead of trying each virtual address.
175 */
176#define	PMAP_TSB_THRESH	((TSB_SIZE / 2) * PAGE_SIZE)
177
178SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, "");
179
180PMAP_STATS_VAR(pmap_nenter);
181PMAP_STATS_VAR(pmap_nenter_update);
182PMAP_STATS_VAR(pmap_nenter_replace);
183PMAP_STATS_VAR(pmap_nenter_new);
184PMAP_STATS_VAR(pmap_nkenter);
185PMAP_STATS_VAR(pmap_nkenter_oc);
186PMAP_STATS_VAR(pmap_nkenter_stupid);
187PMAP_STATS_VAR(pmap_nkremove);
188PMAP_STATS_VAR(pmap_nqenter);
189PMAP_STATS_VAR(pmap_nqremove);
190PMAP_STATS_VAR(pmap_ncache_enter);
191PMAP_STATS_VAR(pmap_ncache_enter_c);
192PMAP_STATS_VAR(pmap_ncache_enter_oc);
193PMAP_STATS_VAR(pmap_ncache_enter_cc);
194PMAP_STATS_VAR(pmap_ncache_enter_coc);
195PMAP_STATS_VAR(pmap_ncache_enter_nc);
196PMAP_STATS_VAR(pmap_ncache_enter_cnc);
197PMAP_STATS_VAR(pmap_ncache_remove);
198PMAP_STATS_VAR(pmap_ncache_remove_c);
199PMAP_STATS_VAR(pmap_ncache_remove_oc);
200PMAP_STATS_VAR(pmap_ncache_remove_cc);
201PMAP_STATS_VAR(pmap_ncache_remove_coc);
202PMAP_STATS_VAR(pmap_ncache_remove_nc);
203PMAP_STATS_VAR(pmap_nzero_page);
204PMAP_STATS_VAR(pmap_nzero_page_c);
205PMAP_STATS_VAR(pmap_nzero_page_oc);
206PMAP_STATS_VAR(pmap_nzero_page_nc);
207PMAP_STATS_VAR(pmap_nzero_page_area);
208PMAP_STATS_VAR(pmap_nzero_page_area_c);
209PMAP_STATS_VAR(pmap_nzero_page_area_oc);
210PMAP_STATS_VAR(pmap_nzero_page_area_nc);
211PMAP_STATS_VAR(pmap_nzero_page_idle);
212PMAP_STATS_VAR(pmap_nzero_page_idle_c);
213PMAP_STATS_VAR(pmap_nzero_page_idle_oc);
214PMAP_STATS_VAR(pmap_nzero_page_idle_nc);
215PMAP_STATS_VAR(pmap_ncopy_page);
216PMAP_STATS_VAR(pmap_ncopy_page_c);
217PMAP_STATS_VAR(pmap_ncopy_page_oc);
218PMAP_STATS_VAR(pmap_ncopy_page_nc);
219PMAP_STATS_VAR(pmap_ncopy_page_dc);
220PMAP_STATS_VAR(pmap_ncopy_page_doc);
221PMAP_STATS_VAR(pmap_ncopy_page_sc);
222PMAP_STATS_VAR(pmap_ncopy_page_soc);
223
224PMAP_STATS_VAR(pmap_nnew_thread);
225PMAP_STATS_VAR(pmap_nnew_thread_oc);
226
227/*
228 * Quick sort callout for comparing memory regions.
229 */
230static int mr_cmp(const void *a, const void *b);
231static int om_cmp(const void *a, const void *b);
232static int
233mr_cmp(const void *a, const void *b)
234{
235	const struct ofw_mem_region *mra;
236	const struct ofw_mem_region *mrb;
237
238	mra = a;
239	mrb = b;
240	if (mra->mr_start < mrb->mr_start)
241		return (-1);
242	else if (mra->mr_start > mrb->mr_start)
243		return (1);
244	else
245		return (0);
246}
247static int
248om_cmp(const void *a, const void *b)
249{
250	const struct ofw_map *oma;
251	const struct ofw_map *omb;
252
253	oma = a;
254	omb = b;
255	if (oma->om_start < omb->om_start)
256		return (-1);
257	else if (oma->om_start > omb->om_start)
258		return (1);
259	else
260		return (0);
261}
262
263/*
264 * Bootstrap the system enough to run with virtual memory.
265 */
266void
267pmap_bootstrap(vm_offset_t ekva)
268{
269	struct pmap *pm;
270	struct tte *tp;
271	vm_offset_t off;
272	vm_offset_t va;
273	vm_paddr_t pa;
274	vm_size_t physsz;
275	vm_size_t virtsz;
276	ihandle_t pmem;
277	ihandle_t vmem;
278	int sz;
279	int i;
280	int j;
281
282	/*
283	 * Find out what physical memory is available from the prom and
284	 * initialize the phys_avail array.  This must be done before
285	 * pmap_bootstrap_alloc is called.
286	 */
287	if ((pmem = OF_finddevice("/memory")) == -1)
288		panic("pmap_bootstrap: finddevice /memory");
289	if ((sz = OF_getproplen(pmem, "available")) == -1)
290		panic("pmap_bootstrap: getproplen /memory/available");
291	if (sizeof(phys_avail) < sz)
292		panic("pmap_bootstrap: phys_avail too small");
293	if (sizeof(mra) < sz)
294		panic("pmap_bootstrap: mra too small");
295	bzero(mra, sz);
296	if (OF_getprop(pmem, "available", mra, sz) == -1)
297		panic("pmap_bootstrap: getprop /memory/available");
298	sz /= sizeof(*mra);
299	CTR0(KTR_PMAP, "pmap_bootstrap: physical memory");
300	qsort(mra, sz, sizeof (*mra), mr_cmp);
301	physsz = 0;
302	getenv_quad("hw.physmem", &physmem);
303	for (i = 0, j = 0; i < sz; i++, j += 2) {
304		CTR2(KTR_PMAP, "start=%#lx size=%#lx", mra[i].mr_start,
305		    mra[i].mr_size);
306		if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) {
307			if (btoc(physsz) < physmem) {
308				phys_avail[j] = mra[i].mr_start;
309				phys_avail[j + 1] = mra[i].mr_start +
310				    (ctob(physmem) - physsz);
311				physsz = ctob(physmem);
312			}
313			break;
314		}
315		phys_avail[j] = mra[i].mr_start;
316		phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size;
317		physsz += mra[i].mr_size;
318	}
319	physmem = btoc(physsz);
320
321	/*
322	 * Calculate the size of kernel virtual memory, and the size and mask
323	 * for the kernel tsb.
324	 */
325	virtsz = roundup(physsz, PAGE_SIZE_4M << (PAGE_SHIFT - TTE_SHIFT));
326	vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz;
327	tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT);
328	tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1;
329
330	/*
331	 * Allocate the kernel tsb and lock it in the tlb.
332	 */
333	pa = pmap_bootstrap_alloc(tsb_kernel_size);
334	if (pa & PAGE_MASK_4M)
335		panic("pmap_bootstrap: tsb unaligned\n");
336	tsb_kernel_phys = pa;
337	tsb_kernel = (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size);
338	pmap_map_tsb();
339	bzero(tsb_kernel, tsb_kernel_size);
340
341	/*
342	 * Allocate and map the message buffer.
343	 */
344	msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE);
345	msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(msgbuf_phys);
346
347	/*
348	 * Patch the virtual address and the tsb mask into the trap table.
349	 */
350
351#define	SETHI(rd, imm22) \
352	(EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \
353	    EIF_IMM((imm22) >> 10, 22))
354#define	OR_R_I_R(rd, imm13, rs1) \
355	(EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \
356	    EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13))
357
358#define	PATCH(addr) do { \
359	if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \
360	    addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, IF_F3_RS1(addr[1])) || \
361	    addr[2] != SETHI(IF_F2_RD(addr[2]), 0x0)) \
362		panic("pmap_boostrap: patched instructions have changed"); \
363	addr[0] |= EIF_IMM((tsb_kernel_mask) >> 10, 22); \
364	addr[1] |= EIF_IMM(tsb_kernel_mask, 10); \
365	addr[2] |= EIF_IMM(((vm_offset_t)tsb_kernel) >> 10, 22); \
366	flush(addr); \
367	flush(addr + 1); \
368	flush(addr + 2); \
369} while (0)
370
371	PATCH(tl1_immu_miss_patch_1);
372	PATCH(tl1_immu_miss_patch_2);
373	PATCH(tl1_dmmu_miss_patch_1);
374	PATCH(tl1_dmmu_miss_patch_2);
375	PATCH(tl1_dmmu_prot_patch_1);
376	PATCH(tl1_dmmu_prot_patch_2);
377
378	/*
379	 * Enter fake 8k pages for the 4MB kernel pages, so that
380	 * pmap_kextract() will work for them.
381	 */
382	for (i = 0; i < kernel_tlb_slots; i++) {
383		pa = kernel_tlbs[i].te_pa;
384		va = kernel_tlbs[i].te_va;
385		for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) {
386			tp = tsb_kvtotte(va + off);
387			tp->tte_vpn = TV_VPN(va + off, TS_8K);
388			tp->tte_data = TD_V | TD_8K | TD_PA(pa + off) |
389			    TD_REF | TD_SW | TD_CP | TD_CV | TD_P | TD_W;
390		}
391	}
392
393	/*
394	 * Set the start and end of kva.  The kernel is loaded at the first
395	 * available 4 meg super page, so round up to the end of the page.
396	 */
397	virtual_avail = roundup2(ekva, PAGE_SIZE_4M);
398	virtual_end = vm_max_kernel_address;
399	kernel_vm_end = vm_max_kernel_address;
400
401	/*
402	 * Allocate kva space for temporary mappings.
403	 */
404	pmap_idle_map = virtual_avail;
405	virtual_avail += PAGE_SIZE * DCACHE_COLORS;
406	pmap_temp_map_1 = virtual_avail;
407	virtual_avail += PAGE_SIZE * DCACHE_COLORS;
408	pmap_temp_map_2 = virtual_avail;
409	virtual_avail += PAGE_SIZE * DCACHE_COLORS;
410
411	/*
412	 * Allocate a kernel stack with guard page for thread0 and map it into
413	 * the kernel tsb.  We must ensure that the virtual address is coloured
414	 * properly, since we're allocating from phys_avail so the memory won't
415	 * have an associated vm_page_t.
416	 */
417	pa = pmap_bootstrap_alloc(roundup(KSTACK_PAGES, DCACHE_COLORS) *
418	    PAGE_SIZE);
419	kstack0_phys = pa;
420	virtual_avail += roundup(KSTACK_GUARD_PAGES, DCACHE_COLORS) *
421	    PAGE_SIZE;
422	kstack0 = virtual_avail;
423	virtual_avail += roundup(KSTACK_PAGES, DCACHE_COLORS) * PAGE_SIZE;
424	KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys),
425	    ("pmap_bootstrap: kstack0 miscoloured"));
426	for (i = 0; i < KSTACK_PAGES; i++) {
427		pa = kstack0_phys + i * PAGE_SIZE;
428		va = kstack0 + i * PAGE_SIZE;
429		tp = tsb_kvtotte(va);
430		tp->tte_vpn = TV_VPN(va, TS_8K);
431		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW |
432		    TD_CP | TD_CV | TD_P | TD_W;
433	}
434
435	/*
436	 * Calculate the last available physical address.
437	 */
438	for (i = 0; phys_avail[i + 2] != 0; i += 2)
439		;
440	avail_end = phys_avail[i + 1];
441	Maxmem = sparc64_btop(avail_end);
442
443	/*
444	 * Add the prom mappings to the kernel tsb.
445	 */
446	if ((vmem = OF_finddevice("/virtual-memory")) == -1)
447		panic("pmap_bootstrap: finddevice /virtual-memory");
448	if ((sz = OF_getproplen(vmem, "translations")) == -1)
449		panic("pmap_bootstrap: getproplen translations");
450	if (sizeof(translations) < sz)
451		panic("pmap_bootstrap: translations too small");
452	bzero(translations, sz);
453	if (OF_getprop(vmem, "translations", translations, sz) == -1)
454		panic("pmap_bootstrap: getprop /virtual-memory/translations");
455	sz /= sizeof(*translations);
456	translations_size = sz;
457	CTR0(KTR_PMAP, "pmap_bootstrap: translations");
458	qsort(translations, sz, sizeof (*translations), om_cmp);
459	for (i = 0; i < sz; i++) {
460		CTR3(KTR_PMAP,
461		    "translation: start=%#lx size=%#lx tte=%#lx",
462		    translations[i].om_start, translations[i].om_size,
463		    translations[i].om_tte);
464		if (translations[i].om_start < VM_MIN_PROM_ADDRESS ||
465		    translations[i].om_start > VM_MAX_PROM_ADDRESS)
466			continue;
467		for (off = 0; off < translations[i].om_size;
468		    off += PAGE_SIZE) {
469			va = translations[i].om_start + off;
470			tp = tsb_kvtotte(va);
471			tp->tte_vpn = TV_VPN(va, TS_8K);
472			tp->tte_data =
473			    ((translations[i].om_tte &
474			      ~(TD_SOFT_MASK << TD_SOFT_SHIFT)) | TD_EXEC) +
475			    off;
476		}
477	}
478
479	/*
480	 * Get the available physical memory ranges from /memory/reg. These
481	 * are only used for kernel dumps, but it may not be wise to do prom
482	 * calls in that situation.
483	 */
484	if ((sz = OF_getproplen(pmem, "reg")) == -1)
485		panic("pmap_bootstrap: getproplen /memory/reg");
486	if (sizeof(sparc64_memreg) < sz)
487		panic("pmap_bootstrap: sparc64_memreg too small");
488	if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1)
489		panic("pmap_bootstrap: getprop /memory/reg");
490	sparc64_nmemreg = sz / sizeof(*sparc64_memreg);
491
492	/*
493	 * Initialize the kernel pmap (which is statically allocated).
494	 */
495	pm = kernel_pmap;
496	for (i = 0; i < MAXCPU; i++)
497		pm->pm_context[i] = TLB_CTX_KERNEL;
498	pm->pm_active = ~0;
499
500	/* XXX flush all non-locked tlb entries */
501}
502
503void
504pmap_map_tsb(void)
505{
506	vm_offset_t va;
507	vm_paddr_t pa;
508	u_long data;
509	u_long s;
510	int i;
511
512	s = intr_disable();
513
514	/*
515	 * Map the 4mb tsb pages.
516	 */
517	for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) {
518		va = (vm_offset_t)tsb_kernel + i;
519		pa = tsb_kernel_phys + i;
520		data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV |
521		    TD_P | TD_W;
522		/* XXX - cheetah */
523		stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) |
524		    TLB_TAR_CTX(TLB_CTX_KERNEL));
525		stxa_sync(0, ASI_DTLB_DATA_IN_REG, data);
526	}
527
528	/*
529	 * Set the secondary context to be the kernel context (needed for
530	 * fp block operations in the kernel and the cache code).
531	 */
532	stxa(AA_DMMU_SCXR, ASI_DMMU, TLB_CTX_KERNEL);
533	membar(Sync);
534
535	intr_restore(s);
536}
537
538/*
539 * Allocate a physical page of memory directly from the phys_avail map.
540 * Can only be called from pmap_bootstrap before avail start and end are
541 * calculated.
542 */
543static vm_paddr_t
544pmap_bootstrap_alloc(vm_size_t size)
545{
546	vm_paddr_t pa;
547	int i;
548
549	size = round_page(size);
550	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
551		if (phys_avail[i + 1] - phys_avail[i] < size)
552			continue;
553		pa = phys_avail[i];
554		phys_avail[i] += size;
555		return (pa);
556	}
557	panic("pmap_bootstrap_alloc");
558}
559
560/*
561 * Initialize the pmap module.
562 */
563void
564pmap_init(void)
565{
566	vm_offset_t addr;
567	vm_size_t size;
568	int result;
569	int i;
570
571	for (i = 0; i < vm_page_array_size; i++) {
572		vm_page_t m;
573
574		m = &vm_page_array[i];
575		TAILQ_INIT(&m->md.tte_list);
576		m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m));
577		m->md.flags = 0;
578		m->md.pmap = NULL;
579	}
580
581	for (i = 0; i < translations_size; i++) {
582		addr = translations[i].om_start;
583		size = translations[i].om_size;
584		if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS)
585			continue;
586		result = vm_map_find(kernel_map, NULL, 0, &addr, size, FALSE,
587		    VM_PROT_ALL, VM_PROT_ALL, 0);
588		if (result != KERN_SUCCESS || addr != translations[i].om_start)
589			panic("pmap_init: vm_map_find");
590	}
591}
592
593/*
594 * Initialize the address space (zone) for the pv_entries.  Set a
595 * high water mark so that the system can recover from excessive
596 * numbers of pv entries.
597 */
598void
599pmap_init2(void)
600{
601}
602
603/*
604 * Extract the physical page address associated with the given
605 * map/virtual_address pair.
606 */
607vm_paddr_t
608pmap_extract(pmap_t pm, vm_offset_t va)
609{
610	struct tte *tp;
611
612	if (pm == kernel_pmap)
613		return (pmap_kextract(va));
614	tp = tsb_tte_lookup(pm, va);
615	if (tp == NULL)
616		return (0);
617	else
618		return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
619}
620
621/*
622 * Atomically extract and hold the physical page with the given
623 * pmap and virtual address pair if that mapping permits the given
624 * protection.
625 */
626vm_page_t
627pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
628{
629	vm_paddr_t pa;
630	vm_page_t m;
631
632	m = NULL;
633	mtx_lock(&Giant);
634	if ((pa = pmap_extract(pmap, va)) != 0) {
635		m = PHYS_TO_VM_PAGE(pa);
636		vm_page_lock_queues();
637		vm_page_hold(m);
638		vm_page_unlock_queues();
639	}
640	mtx_unlock(&Giant);
641	return (m);
642}
643
644/*
645 * Extract the physical page address associated with the given kernel virtual
646 * address.
647 */
648vm_paddr_t
649pmap_kextract(vm_offset_t va)
650{
651	struct tte *tp;
652
653	if (va >= VM_MIN_DIRECT_ADDRESS)
654		return (TLB_DIRECT_TO_PHYS(va));
655	tp = tsb_kvtotte(va);
656	if ((tp->tte_data & TD_V) == 0)
657		return (0);
658	return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)));
659}
660
661int
662pmap_cache_enter(vm_page_t m, vm_offset_t va)
663{
664	struct tte *tp;
665	int color;
666
667	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
668	KASSERT((m->flags & PG_FICTITIOUS) == 0,
669	    ("pmap_cache_enter: fake page"));
670	PMAP_STATS_INC(pmap_ncache_enter);
671
672	/*
673	 * Find the color for this virtual address and note the added mapping.
674	 */
675	color = DCACHE_COLOR(va);
676	m->md.colors[color]++;
677
678	/*
679	 * If all existing mappings have the same color, the mapping is
680	 * cacheable.
681	 */
682	if (m->md.color == color) {
683		KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0,
684		    ("pmap_cache_enter: cacheable, mappings of other color"));
685		if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
686			PMAP_STATS_INC(pmap_ncache_enter_c);
687		else
688			PMAP_STATS_INC(pmap_ncache_enter_oc);
689		return (1);
690	}
691
692	/*
693	 * If there are no mappings of the other color, and the page still has
694	 * the wrong color, this must be a new mapping.  Change the color to
695	 * match the new mapping, which is cacheable.  We must flush the page
696	 * from the cache now.
697	 */
698	if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) {
699		KASSERT(m->md.colors[color] == 1,
700		    ("pmap_cache_enter: changing color, not new mapping"));
701		dcache_page_inval(VM_PAGE_TO_PHYS(m));
702		m->md.color = color;
703		if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
704			PMAP_STATS_INC(pmap_ncache_enter_cc);
705		else
706			PMAP_STATS_INC(pmap_ncache_enter_coc);
707		return (1);
708	}
709
710	/*
711	 * If the mapping is already non-cacheable, just return.
712	 */
713	if (m->md.color == -1) {
714		PMAP_STATS_INC(pmap_ncache_enter_nc);
715		return (0);
716	}
717
718	PMAP_STATS_INC(pmap_ncache_enter_cnc);
719
720	/*
721	 * Mark all mappings as uncacheable, flush any lines with the other
722	 * color out of the dcache, and set the color to none (-1).
723	 */
724	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
725		atomic_clear_long(&tp->tte_data, TD_CV);
726		tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
727	}
728	dcache_page_inval(VM_PAGE_TO_PHYS(m));
729	m->md.color = -1;
730	return (0);
731}
732
733void
734pmap_cache_remove(vm_page_t m, vm_offset_t va)
735{
736	struct tte *tp;
737	int color;
738
739	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
740	CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va,
741	    m->md.colors[DCACHE_COLOR(va)]);
742	KASSERT((m->flags & PG_FICTITIOUS) == 0,
743	    ("pmap_cache_remove: fake page"));
744	KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0,
745	    ("pmap_cache_remove: no mappings %d <= 0",
746	    m->md.colors[DCACHE_COLOR(va)]));
747	PMAP_STATS_INC(pmap_ncache_remove);
748
749	/*
750	 * Find the color for this virtual address and note the removal of
751	 * the mapping.
752	 */
753	color = DCACHE_COLOR(va);
754	m->md.colors[color]--;
755
756	/*
757	 * If the page is cacheable, just return and keep the same color, even
758	 * if there are no longer any mappings.
759	 */
760	if (m->md.color != -1) {
761		if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
762			PMAP_STATS_INC(pmap_ncache_remove_c);
763		else
764			PMAP_STATS_INC(pmap_ncache_remove_oc);
765		return;
766	}
767
768	KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0,
769	    ("pmap_cache_remove: uncacheable, no mappings of other color"));
770
771	/*
772	 * If the page is not cacheable (color is -1), and the number of
773	 * mappings for this color is not zero, just return.  There are
774	 * mappings of the other color still, so remain non-cacheable.
775	 */
776	if (m->md.colors[color] != 0) {
777		PMAP_STATS_INC(pmap_ncache_remove_nc);
778		return;
779	}
780
781	/*
782	 * The number of mappings for this color is now zero.  Recache the
783	 * other colored mappings, and change the page color to the other
784	 * color.  There should be no lines in the data cache for this page,
785	 * so flushing should not be needed.
786	 */
787	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
788		atomic_set_long(&tp->tte_data, TD_CV);
789		tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
790	}
791	m->md.color = DCACHE_OTHER_COLOR(color);
792
793	if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m)))
794		PMAP_STATS_INC(pmap_ncache_remove_cc);
795	else
796		PMAP_STATS_INC(pmap_ncache_remove_coc);
797}
798
799/*
800 * Map a wired page into kernel virtual address space.
801 */
802void
803pmap_kenter(vm_offset_t va, vm_page_t m)
804{
805	vm_offset_t ova;
806	struct tte *tp;
807	vm_page_t om;
808	u_long data;
809
810	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
811	PMAP_STATS_INC(pmap_nkenter);
812	tp = tsb_kvtotte(va);
813	CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx",
814	    va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data);
815	if (m->pc != DCACHE_COLOR(va)) {
816		CTR6(KTR_CT2,
817	"pmap_kenter: off colour va=%#lx pa=%#lx o=%p oc=%#lx ot=%d pi=%#lx",
818		    va, VM_PAGE_TO_PHYS(m), m->object,
819		    m->object ? m->object->pg_color : -1,
820		    m->object ? m->object->type : -1,
821		    m->pindex);
822		PMAP_STATS_INC(pmap_nkenter_oc);
823	}
824	if ((tp->tte_data & TD_V) != 0) {
825		om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
826		ova = TTE_GET_VA(tp);
827		if (m == om && va == ova) {
828			PMAP_STATS_INC(pmap_nkenter_stupid);
829			return;
830		}
831		TAILQ_REMOVE(&om->md.tte_list, tp, tte_link);
832		pmap_cache_remove(om, ova);
833		if (va != ova)
834			tlb_page_demap(kernel_pmap, ova);
835	}
836	data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP |
837	    TD_P | TD_W;
838	if (pmap_cache_enter(m, va) != 0)
839		data |= TD_CV;
840	tp->tte_vpn = TV_VPN(va, TS_8K);
841	tp->tte_data = data;
842	TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
843}
844
845/*
846 * Map a wired page into kernel virtual address space. This additionally
847 * takes a flag argument wich is or'ed to the TTE data. This is used by
848 * bus_space_map().
849 * NOTE: if the mapping is non-cacheable, it's the caller's responsibility
850 * to flush entries that might still be in the cache, if applicable.
851 */
852void
853pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags)
854{
855	struct tte *tp;
856
857	tp = tsb_kvtotte(va);
858	CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx",
859	    va, pa, tp, tp->tte_data);
860	tp->tte_vpn = TV_VPN(va, TS_8K);
861	tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags;
862}
863
864/*
865 * Remove a wired page from kernel virtual address space.
866 */
867void
868pmap_kremove(vm_offset_t va)
869{
870	struct tte *tp;
871	vm_page_t m;
872
873	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
874	PMAP_STATS_INC(pmap_nkremove);
875	tp = tsb_kvtotte(va);
876	CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
877	    tp->tte_data);
878	if ((tp->tte_data & TD_V) == 0)
879		return;
880	m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
881	TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
882	pmap_cache_remove(m, va);
883	TTE_ZERO(tp);
884}
885
886/*
887 * Inverse of pmap_kenter_flags, used by bus_space_unmap().
888 */
889void
890pmap_kremove_flags(vm_offset_t va)
891{
892	struct tte *tp;
893
894	tp = tsb_kvtotte(va);
895	CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp,
896	    tp->tte_data);
897	TTE_ZERO(tp);
898}
899
900/*
901 * Map a range of physical addresses into kernel virtual address space.
902 *
903 * The value passed in *virt is a suggested virtual address for the mapping.
904 * Architectures which can support a direct-mapped physical to virtual region
905 * can return the appropriate address within that region, leaving '*virt'
906 * unchanged.
907 */
908vm_offset_t
909pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
910{
911
912	return (TLB_PHYS_TO_DIRECT(start));
913}
914
915/*
916 * Map a list of wired pages into kernel virtual address space.  This is
917 * intended for temporary mappings which do not need page modification or
918 * references recorded.  Existing mappings in the region are overwritten.
919 */
920void
921pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
922{
923	vm_offset_t va;
924	int locked;
925
926	PMAP_STATS_INC(pmap_nqenter);
927	va = sva;
928	if (!(locked = mtx_owned(&vm_page_queue_mtx)))
929		vm_page_lock_queues();
930	while (count-- > 0) {
931		pmap_kenter(va, *m);
932		va += PAGE_SIZE;
933		m++;
934	}
935	if (!locked)
936		vm_page_unlock_queues();
937	tlb_range_demap(kernel_pmap, sva, va);
938}
939
940/*
941 * Remove page mappings from kernel virtual address space.  Intended for
942 * temporary mappings entered by pmap_qenter.
943 */
944void
945pmap_qremove(vm_offset_t sva, int count)
946{
947	vm_offset_t va;
948	int locked;
949
950	PMAP_STATS_INC(pmap_nqremove);
951	va = sva;
952	if (!(locked = mtx_owned(&vm_page_queue_mtx)))
953		vm_page_lock_queues();
954	while (count-- > 0) {
955		pmap_kremove(va);
956		va += PAGE_SIZE;
957	}
958	if (!locked)
959		vm_page_unlock_queues();
960	tlb_range_demap(kernel_pmap, sva, va);
961}
962
963/*
964 * Initialize the pmap associated with process 0.
965 */
966void
967pmap_pinit0(pmap_t pm)
968{
969	int i;
970
971	for (i = 0; i < MAXCPU; i++)
972		pm->pm_context[i] = 0;
973	pm->pm_active = 0;
974	pm->pm_tsb = NULL;
975	pm->pm_tsb_obj = NULL;
976	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
977}
978
979/*
980 * Initialize a preallocated and zeroed pmap structure, uch as one in a
981 * vmspace structure.
982 */
983void
984pmap_pinit(pmap_t pm)
985{
986	vm_page_t ma[TSB_PAGES];
987	vm_page_t m;
988	int i;
989
990	/*
991	 * Allocate kva space for the tsb.
992	 */
993	if (pm->pm_tsb == NULL) {
994		pm->pm_tsb = (struct tte *)kmem_alloc_pageable(kernel_map,
995		    TSB_BSIZE);
996	}
997
998	/*
999	 * Allocate an object for it.
1000	 */
1001	if (pm->pm_tsb_obj == NULL)
1002		pm->pm_tsb_obj = vm_object_allocate(OBJT_DEFAULT, TSB_PAGES);
1003
1004	VM_OBJECT_LOCK(pm->pm_tsb_obj);
1005	for (i = 0; i < TSB_PAGES; i++) {
1006		m = vm_page_grab(pm->pm_tsb_obj, i,
1007		    VM_ALLOC_RETRY | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1008		if ((m->flags & PG_ZERO) == 0)
1009			pmap_zero_page(m);
1010
1011		vm_page_lock_queues();
1012		vm_page_flag_clear(m, PG_BUSY);
1013		m->valid = VM_PAGE_BITS_ALL;
1014		m->md.pmap = pm;
1015		vm_page_unlock_queues();
1016
1017		ma[i] = m;
1018	}
1019	VM_OBJECT_UNLOCK(pm->pm_tsb_obj);
1020	pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES);
1021
1022	for (i = 0; i < MAXCPU; i++)
1023		pm->pm_context[i] = -1;
1024	pm->pm_active = 0;
1025	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1026}
1027
1028/*
1029 * Release any resources held by the given physical map.
1030 * Called when a pmap initialized by pmap_pinit is being released.
1031 * Should only be called if the map contains no valid mappings.
1032 */
1033void
1034pmap_release(pmap_t pm)
1035{
1036	vm_object_t obj;
1037	vm_page_t m;
1038
1039	CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p",
1040	    pm->pm_context[PCPU_GET(cpuid)], pm->pm_tsb);
1041	KASSERT(pmap_resident_count(pm) == 0,
1042	    ("pmap_release: resident pages %ld != 0",
1043	    pmap_resident_count(pm)));
1044	obj = pm->pm_tsb_obj;
1045	VM_OBJECT_LOCK(obj);
1046	KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1"));
1047	while (!TAILQ_EMPTY(&obj->memq)) {
1048		m = TAILQ_FIRST(&obj->memq);
1049		vm_page_lock_queues();
1050		if (vm_page_sleep_if_busy(m, FALSE, "pmaprl"))
1051			continue;
1052		vm_page_busy(m);
1053		KASSERT(m->hold_count == 0,
1054		    ("pmap_release: freeing held tsb page"));
1055		m->md.pmap = NULL;
1056		m->wire_count--;
1057		atomic_subtract_int(&cnt.v_wire_count, 1);
1058		vm_page_free_zero(m);
1059		vm_page_unlock_queues();
1060	}
1061	VM_OBJECT_UNLOCK(obj);
1062	pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES);
1063}
1064
1065/*
1066 * Grow the number of kernel page table entries.  Unneeded.
1067 */
1068void
1069pmap_growkernel(vm_offset_t addr)
1070{
1071
1072	panic("pmap_growkernel: can't grow kernel");
1073}
1074
1075int
1076pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1077		vm_offset_t va)
1078{
1079	vm_page_t m;
1080	u_long data;
1081
1082	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1083	data = atomic_readandclear_long(&tp->tte_data);
1084	if ((data & TD_FAKE) == 0) {
1085		m = PHYS_TO_VM_PAGE(TD_PA(data));
1086		TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1087		if ((data & TD_WIRED) != 0)
1088			pm->pm_stats.wired_count--;
1089		if ((data & TD_PV) != 0) {
1090			if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
1091				vm_page_dirty(m);
1092			if ((data & TD_REF) != 0)
1093				vm_page_flag_set(m, PG_REFERENCED);
1094			if (TAILQ_EMPTY(&m->md.tte_list))
1095				vm_page_flag_clear(m, PG_WRITEABLE);
1096			pm->pm_stats.resident_count--;
1097		}
1098		pmap_cache_remove(m, va);
1099	}
1100	TTE_ZERO(tp);
1101	if (PMAP_REMOVE_DONE(pm))
1102		return (0);
1103	return (1);
1104}
1105
1106/*
1107 * Remove the given range of addresses from the specified map.
1108 */
1109void
1110pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end)
1111{
1112	struct tte *tp;
1113	vm_offset_t va;
1114
1115	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1116	CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx",
1117	    pm->pm_context[PCPU_GET(cpuid)], start, end);
1118	if (PMAP_REMOVE_DONE(pm))
1119		return;
1120	if (end - start > PMAP_TSB_THRESH) {
1121		tsb_foreach(pm, NULL, start, end, pmap_remove_tte);
1122		tlb_context_demap(pm);
1123	} else {
1124		for (va = start; va < end; va += PAGE_SIZE) {
1125			if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
1126				if (!pmap_remove_tte(pm, NULL, tp, va))
1127					break;
1128			}
1129		}
1130		tlb_range_demap(pm, start, end - 1);
1131	}
1132}
1133
1134void
1135pmap_remove_all(vm_page_t m)
1136{
1137	struct pmap *pm;
1138	struct tte *tpn;
1139	struct tte *tp;
1140	vm_offset_t va;
1141
1142	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1143	for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) {
1144		tpn = TAILQ_NEXT(tp, tte_link);
1145		if ((tp->tte_data & TD_PV) == 0)
1146			continue;
1147		pm = TTE_GET_PMAP(tp);
1148		va = TTE_GET_VA(tp);
1149		if ((tp->tte_data & TD_WIRED) != 0)
1150			pm->pm_stats.wired_count--;
1151		if ((tp->tte_data & TD_REF) != 0)
1152			vm_page_flag_set(m, PG_REFERENCED);
1153		if ((tp->tte_data & TD_W) != 0 &&
1154		    pmap_track_modified(pm, va))
1155			vm_page_dirty(m);
1156		tp->tte_data &= ~TD_V;
1157		tlb_page_demap(pm, va);
1158		TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1159		pm->pm_stats.resident_count--;
1160		pmap_cache_remove(m, va);
1161		TTE_ZERO(tp);
1162	}
1163	vm_page_flag_clear(m, PG_WRITEABLE);
1164}
1165
1166int
1167pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp,
1168		 vm_offset_t va)
1169{
1170	u_long data;
1171	vm_page_t m;
1172
1173	data = atomic_clear_long(&tp->tte_data, TD_REF | TD_SW | TD_W);
1174	if ((data & TD_PV) != 0) {
1175		m = PHYS_TO_VM_PAGE(TD_PA(data));
1176		if ((data & TD_REF) != 0)
1177			vm_page_flag_set(m, PG_REFERENCED);
1178		if ((data & TD_W) != 0 && pmap_track_modified(pm, va))
1179			vm_page_dirty(m);
1180	}
1181	return (1);
1182}
1183
1184/*
1185 * Set the physical protection on the specified range of this map as requested.
1186 */
1187void
1188pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
1189{
1190	vm_offset_t va;
1191	struct tte *tp;
1192
1193	CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx",
1194	    pm->pm_context[PCPU_GET(cpuid)], sva, eva, prot);
1195
1196	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1197		pmap_remove(pm, sva, eva);
1198		return;
1199	}
1200
1201	if (prot & VM_PROT_WRITE)
1202		return;
1203
1204	if (eva - sva > PMAP_TSB_THRESH) {
1205		tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte);
1206		tlb_context_demap(pm);
1207	} else {
1208		for (va = sva; va < eva; va += PAGE_SIZE) {
1209			if ((tp = tsb_tte_lookup(pm, va)) != NULL)
1210				pmap_protect_tte(pm, NULL, tp, va);
1211		}
1212		tlb_range_demap(pm, sva, eva - 1);
1213	}
1214}
1215
1216/*
1217 * Map the given physical page at the specified virtual address in the
1218 * target pmap with the protection requested.  If specified the page
1219 * will be wired down.
1220 */
1221void
1222pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1223	   boolean_t wired)
1224{
1225	struct tte *tp;
1226	vm_paddr_t pa;
1227	u_long data;
1228	int i;
1229
1230	PMAP_STATS_INC(pmap_nenter);
1231	pa = VM_PAGE_TO_PHYS(m);
1232
1233	/*
1234	 * If this is a fake page from the device_pager, but it covers actual
1235	 * physical memory, convert to the real backing page.
1236	 */
1237	if ((m->flags & PG_FICTITIOUS) != 0) {
1238		for (i = 0; phys_avail[i + 1] != 0; i += 2) {
1239			if (pa >= phys_avail[i] && pa <= phys_avail[i + 1]) {
1240				m = PHYS_TO_VM_PAGE(pa);
1241				break;
1242			}
1243		}
1244	}
1245
1246	CTR6(KTR_PMAP,
1247	    "pmap_enter: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d",
1248	    pm->pm_context[PCPU_GET(cpuid)], m, va, pa, prot, wired);
1249
1250	/*
1251	 * If there is an existing mapping, and the physical address has not
1252	 * changed, must be protection or wiring change.
1253	 */
1254	if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) {
1255		CTR0(KTR_PMAP, "pmap_enter: update");
1256		PMAP_STATS_INC(pmap_nenter_update);
1257
1258		/*
1259		 * Wiring change, just update stats.
1260		 */
1261		if (wired) {
1262			if ((tp->tte_data & TD_WIRED) == 0) {
1263				tp->tte_data |= TD_WIRED;
1264				pm->pm_stats.wired_count++;
1265			}
1266		} else {
1267			if ((tp->tte_data & TD_WIRED) != 0) {
1268				tp->tte_data &= ~TD_WIRED;
1269				pm->pm_stats.wired_count--;
1270			}
1271		}
1272
1273		/*
1274		 * Save the old bits and clear the ones we're interested in.
1275		 */
1276		data = tp->tte_data;
1277		tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W);
1278
1279		/*
1280		 * If we're turning off write permissions, sense modify status.
1281		 */
1282		if ((prot & VM_PROT_WRITE) != 0) {
1283			tp->tte_data |= TD_SW;
1284			if (wired) {
1285				tp->tte_data |= TD_W;
1286			}
1287		} else if ((data & TD_W) != 0 &&
1288		    pmap_track_modified(pm, va)) {
1289			vm_page_dirty(m);
1290		}
1291
1292		/*
1293		 * If we're turning on execute permissions, flush the icache.
1294		 */
1295		if ((prot & VM_PROT_EXECUTE) != 0) {
1296			if ((data & TD_EXEC) == 0) {
1297				icache_page_inval(pa);
1298			}
1299			tp->tte_data |= TD_EXEC;
1300		}
1301
1302		/*
1303		 * Delete the old mapping.
1304		 */
1305		tlb_page_demap(pm, TTE_GET_VA(tp));
1306	} else {
1307		/*
1308		 * If there is an existing mapping, but its for a different
1309		 * phsyical address, delete the old mapping.
1310		 */
1311		if (tp != NULL) {
1312			CTR0(KTR_PMAP, "pmap_enter: replace");
1313			PMAP_STATS_INC(pmap_nenter_replace);
1314			vm_page_lock_queues();
1315			pmap_remove_tte(pm, NULL, tp, va);
1316			vm_page_unlock_queues();
1317			tlb_page_demap(pm, va);
1318		} else {
1319			CTR0(KTR_PMAP, "pmap_enter: new");
1320			PMAP_STATS_INC(pmap_nenter_new);
1321		}
1322
1323		/*
1324		 * Now set up the data and install the new mapping.
1325		 */
1326		data = TD_V | TD_8K | TD_PA(pa);
1327		if (pm == kernel_pmap)
1328			data |= TD_P;
1329		if (prot & VM_PROT_WRITE)
1330			data |= TD_SW;
1331		if (prot & VM_PROT_EXECUTE) {
1332			data |= TD_EXEC;
1333			icache_page_inval(pa);
1334		}
1335
1336		/*
1337		 * If its wired update stats.  We also don't need reference or
1338		 * modify tracking for wired mappings, so set the bits now.
1339		 */
1340		if (wired) {
1341			pm->pm_stats.wired_count++;
1342			data |= TD_REF | TD_WIRED;
1343			if ((prot & VM_PROT_WRITE) != 0)
1344				data |= TD_W;
1345		}
1346
1347		tsb_tte_enter(pm, m, va, TS_8K, data);
1348	}
1349}
1350
1351vm_page_t
1352pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte)
1353{
1354
1355	pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE);
1356	return (NULL);
1357}
1358
1359void
1360pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object,
1361		    vm_pindex_t pindex, vm_size_t size)
1362{
1363
1364	VM_OBJECT_LOCK_ASSERT(object, MA_OWNED);
1365	KASSERT(object->type == OBJT_DEVICE,
1366	    ("pmap_object_init_pt: non-device object"));
1367}
1368
1369/*
1370 * Change the wiring attribute for a map/virtual-address pair.
1371 * The mapping must already exist in the pmap.
1372 */
1373void
1374pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired)
1375{
1376	struct tte *tp;
1377	u_long data;
1378
1379	if ((tp = tsb_tte_lookup(pm, va)) != NULL) {
1380		if (wired) {
1381			data = atomic_set_long(&tp->tte_data, TD_WIRED);
1382			if ((data & TD_WIRED) == 0)
1383				pm->pm_stats.wired_count++;
1384		} else {
1385			data = atomic_clear_long(&tp->tte_data, TD_WIRED);
1386			if ((data & TD_WIRED) != 0)
1387				pm->pm_stats.wired_count--;
1388		}
1389	}
1390}
1391
1392static int
1393pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, vm_offset_t va)
1394{
1395	vm_page_t m;
1396	u_long data;
1397
1398	if ((tp->tte_data & TD_FAKE) != 0)
1399		return (1);
1400	if (tsb_tte_lookup(dst_pmap, va) == NULL) {
1401		data = tp->tte_data &
1402		    ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W);
1403		m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp));
1404		tsb_tte_enter(dst_pmap, m, va, TS_8K, data);
1405	}
1406	return (1);
1407}
1408
1409void
1410pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr,
1411	  vm_size_t len, vm_offset_t src_addr)
1412{
1413	struct tte *tp;
1414	vm_offset_t va;
1415
1416	if (dst_addr != src_addr)
1417		return;
1418	if (len > PMAP_TSB_THRESH) {
1419		tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len,
1420		    pmap_copy_tte);
1421		tlb_context_demap(dst_pmap);
1422	} else {
1423		for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) {
1424			if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL)
1425				pmap_copy_tte(src_pmap, dst_pmap, tp, va);
1426		}
1427		tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1);
1428	}
1429}
1430
1431void
1432pmap_zero_page(vm_page_t m)
1433{
1434	struct tte *tp;
1435	vm_offset_t va;
1436	vm_paddr_t pa;
1437
1438	KASSERT((m->flags & PG_FICTITIOUS) == 0,
1439	    ("pmap_zero_page: fake page"));
1440	PMAP_STATS_INC(pmap_nzero_page);
1441	pa = VM_PAGE_TO_PHYS(m);
1442	if (m->md.color == -1) {
1443		PMAP_STATS_INC(pmap_nzero_page_nc);
1444		aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1445	} else if (m->md.color == DCACHE_COLOR(pa)) {
1446		PMAP_STATS_INC(pmap_nzero_page_c);
1447		va = TLB_PHYS_TO_DIRECT(pa);
1448		cpu_block_zero((void *)va, PAGE_SIZE);
1449	} else {
1450		PMAP_STATS_INC(pmap_nzero_page_oc);
1451		va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1452		tp = tsb_kvtotte(va);
1453		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1454		tp->tte_vpn = TV_VPN(va, TS_8K);
1455		cpu_block_zero((void *)va, PAGE_SIZE);
1456		tlb_page_demap(kernel_pmap, va);
1457	}
1458}
1459
1460void
1461pmap_zero_page_area(vm_page_t m, int off, int size)
1462{
1463	struct tte *tp;
1464	vm_offset_t va;
1465	vm_paddr_t pa;
1466
1467	KASSERT((m->flags & PG_FICTITIOUS) == 0,
1468	    ("pmap_zero_page_area: fake page"));
1469	KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size"));
1470	PMAP_STATS_INC(pmap_nzero_page_area);
1471	pa = VM_PAGE_TO_PHYS(m);
1472	if (m->md.color == -1) {
1473		PMAP_STATS_INC(pmap_nzero_page_area_nc);
1474		aszero(ASI_PHYS_USE_EC, pa + off, size);
1475	} else if (m->md.color == DCACHE_COLOR(pa)) {
1476		PMAP_STATS_INC(pmap_nzero_page_area_c);
1477		va = TLB_PHYS_TO_DIRECT(pa);
1478		bzero((void *)(va + off), size);
1479	} else {
1480		PMAP_STATS_INC(pmap_nzero_page_area_oc);
1481		va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE);
1482		tp = tsb_kvtotte(va);
1483		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1484		tp->tte_vpn = TV_VPN(va, TS_8K);
1485		bzero((void *)(va + off), size);
1486		tlb_page_demap(kernel_pmap, va);
1487	}
1488}
1489
1490void
1491pmap_zero_page_idle(vm_page_t m)
1492{
1493	struct tte *tp;
1494	vm_offset_t va;
1495	vm_paddr_t pa;
1496
1497	KASSERT((m->flags & PG_FICTITIOUS) == 0,
1498	    ("pmap_zero_page_idle: fake page"));
1499	PMAP_STATS_INC(pmap_nzero_page_idle);
1500	pa = VM_PAGE_TO_PHYS(m);
1501	if (m->md.color == -1) {
1502		PMAP_STATS_INC(pmap_nzero_page_idle_nc);
1503		aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE);
1504	} else if (m->md.color == DCACHE_COLOR(pa)) {
1505		PMAP_STATS_INC(pmap_nzero_page_idle_c);
1506		va = TLB_PHYS_TO_DIRECT(pa);
1507		cpu_block_zero((void *)va, PAGE_SIZE);
1508	} else {
1509		PMAP_STATS_INC(pmap_nzero_page_idle_oc);
1510		va = pmap_idle_map + (m->md.color * PAGE_SIZE);
1511		tp = tsb_kvtotte(va);
1512		tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W;
1513		tp->tte_vpn = TV_VPN(va, TS_8K);
1514		cpu_block_zero((void *)va, PAGE_SIZE);
1515		tlb_page_demap(kernel_pmap, va);
1516	}
1517}
1518
1519void
1520pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
1521{
1522	vm_offset_t vdst;
1523	vm_offset_t vsrc;
1524	vm_paddr_t pdst;
1525	vm_paddr_t psrc;
1526	struct tte *tp;
1527
1528	KASSERT((mdst->flags & PG_FICTITIOUS) == 0,
1529	    ("pmap_copy_page: fake dst page"));
1530	KASSERT((msrc->flags & PG_FICTITIOUS) == 0,
1531	    ("pmap_copy_page: fake src page"));
1532	PMAP_STATS_INC(pmap_ncopy_page);
1533	pdst = VM_PAGE_TO_PHYS(mdst);
1534	psrc = VM_PAGE_TO_PHYS(msrc);
1535	if (msrc->md.color == -1 && mdst->md.color == -1) {
1536		PMAP_STATS_INC(pmap_ncopy_page_nc);
1537		ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE);
1538	} else if (msrc->md.color == DCACHE_COLOR(psrc) &&
1539	    mdst->md.color == DCACHE_COLOR(pdst)) {
1540		PMAP_STATS_INC(pmap_ncopy_page_c);
1541		vdst = TLB_PHYS_TO_DIRECT(pdst);
1542		vsrc = TLB_PHYS_TO_DIRECT(psrc);
1543		cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1544	} else if (msrc->md.color == -1) {
1545		if (mdst->md.color == DCACHE_COLOR(pdst)) {
1546			PMAP_STATS_INC(pmap_ncopy_page_dc);
1547			vdst = TLB_PHYS_TO_DIRECT(pdst);
1548			ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1549			    PAGE_SIZE);
1550		} else {
1551			PMAP_STATS_INC(pmap_ncopy_page_doc);
1552			vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1553			tp = tsb_kvtotte(vdst);
1554			tp->tte_data =
1555			    TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1556			tp->tte_vpn = TV_VPN(vdst, TS_8K);
1557			ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst,
1558			    PAGE_SIZE);
1559			tlb_page_demap(kernel_pmap, vdst);
1560		}
1561	} else if (mdst->md.color == -1) {
1562		if (msrc->md.color == DCACHE_COLOR(psrc)) {
1563			PMAP_STATS_INC(pmap_ncopy_page_sc);
1564			vsrc = TLB_PHYS_TO_DIRECT(psrc);
1565			ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1566			    PAGE_SIZE);
1567		} else {
1568			PMAP_STATS_INC(pmap_ncopy_page_soc);
1569			vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE);
1570			tp = tsb_kvtotte(vsrc);
1571			tp->tte_data =
1572			    TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1573			tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1574			ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst,
1575			    PAGE_SIZE);
1576			tlb_page_demap(kernel_pmap, vsrc);
1577		}
1578	} else {
1579		PMAP_STATS_INC(pmap_ncopy_page_oc);
1580		vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE);
1581		tp = tsb_kvtotte(vdst);
1582		tp->tte_data =
1583		    TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W;
1584		tp->tte_vpn = TV_VPN(vdst, TS_8K);
1585		vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE);
1586		tp = tsb_kvtotte(vsrc);
1587		tp->tte_data =
1588		    TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W;
1589		tp->tte_vpn = TV_VPN(vsrc, TS_8K);
1590		cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE);
1591		tlb_page_demap(kernel_pmap, vdst);
1592		tlb_page_demap(kernel_pmap, vsrc);
1593	}
1594}
1595
1596/*
1597 * Returns true if the pmap's pv is one of the first
1598 * 16 pvs linked to from this page.  This count may
1599 * be changed upwards or downwards in the future; it
1600 * is only necessary that true be returned for a small
1601 * subset of pmaps for proper page aging.
1602 */
1603boolean_t
1604pmap_page_exists_quick(pmap_t pm, vm_page_t m)
1605{
1606	struct tte *tp;
1607	int loops;
1608
1609	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1610	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1611		return (FALSE);
1612	loops = 0;
1613	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1614		if ((tp->tte_data & TD_PV) == 0)
1615			continue;
1616		if (TTE_GET_PMAP(tp) == pm)
1617			return (TRUE);
1618		if (++loops >= 16)
1619			break;
1620	}
1621	return (FALSE);
1622}
1623
1624/*
1625 * Remove all pages from specified address space, this aids process exit
1626 * speeds.  This is much faster than pmap_remove n the case of running down
1627 * an entire address space.  Only works for the current pmap.
1628 */
1629void
1630pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1631{
1632}
1633
1634/*
1635 * Lower the permission for all mappings to a given page.
1636 */
1637void
1638pmap_page_protect(vm_page_t m, vm_prot_t prot)
1639{
1640
1641	KASSERT((m->flags & PG_FICTITIOUS) == 0,
1642	    ("pmap_page_protect: fake page"));
1643	if ((prot & VM_PROT_WRITE) == 0) {
1644		if (prot & (VM_PROT_READ | VM_PROT_EXECUTE))
1645			pmap_clear_write(m);
1646		else
1647			pmap_remove_all(m);
1648	}
1649}
1650
1651/*
1652 *	pmap_ts_referenced:
1653 *
1654 *	Return a count of reference bits for a page, clearing those bits.
1655 *	It is not necessary for every reference bit to be cleared, but it
1656 *	is necessary that 0 only be returned when there are truly no
1657 *	reference bits set.
1658 *
1659 *	XXX: The exact number of bits to check and clear is a matter that
1660 *	should be tested and standardized at some point in the future for
1661 *	optimal aging of shared pages.
1662 */
1663
1664int
1665pmap_ts_referenced(vm_page_t m)
1666{
1667	struct tte *tpf;
1668	struct tte *tpn;
1669	struct tte *tp;
1670	u_long data;
1671	int count;
1672
1673	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1674	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1675		return (0);
1676	count = 0;
1677	if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) {
1678		tpf = tp;
1679		do {
1680			tpn = TAILQ_NEXT(tp, tte_link);
1681			TAILQ_REMOVE(&m->md.tte_list, tp, tte_link);
1682			TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link);
1683			if ((tp->tte_data & TD_PV) == 0 ||
1684			    !pmap_track_modified(TTE_GET_PMAP(tp),
1685			     TTE_GET_VA(tp)))
1686				continue;
1687			data = atomic_clear_long(&tp->tte_data, TD_REF);
1688			if ((data & TD_REF) != 0 && ++count > 4)
1689				break;
1690		} while ((tp = tpn) != NULL && tp != tpf);
1691	}
1692	return (count);
1693}
1694
1695boolean_t
1696pmap_is_modified(vm_page_t m)
1697{
1698	struct tte *tp;
1699
1700	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1701	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1702		return (FALSE);
1703	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1704		if ((tp->tte_data & TD_PV) == 0 ||
1705		    !pmap_track_modified(TTE_GET_PMAP(tp), TTE_GET_VA(tp)))
1706			continue;
1707		if ((tp->tte_data & TD_W) != 0)
1708			return (TRUE);
1709	}
1710	return (FALSE);
1711}
1712
1713/*
1714 *	pmap_is_prefaultable:
1715 *
1716 *	Return whether or not the specified virtual address is elgible
1717 *	for prefault.
1718 */
1719boolean_t
1720pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
1721{
1722
1723	return (FALSE);
1724}
1725
1726void
1727pmap_clear_modify(vm_page_t m)
1728{
1729	struct tte *tp;
1730	u_long data;
1731
1732	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1733	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1734		return;
1735	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1736		if ((tp->tte_data & TD_PV) == 0)
1737			continue;
1738		data = atomic_clear_long(&tp->tte_data, TD_W);
1739		if ((data & TD_W) != 0)
1740			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1741	}
1742}
1743
1744void
1745pmap_clear_reference(vm_page_t m)
1746{
1747	struct tte *tp;
1748	u_long data;
1749
1750	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1751	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0)
1752		return;
1753	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1754		if ((tp->tte_data & TD_PV) == 0)
1755			continue;
1756		data = atomic_clear_long(&tp->tte_data, TD_REF);
1757		if ((data & TD_REF) != 0)
1758			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1759	}
1760}
1761
1762void
1763pmap_clear_write(vm_page_t m)
1764{
1765	struct tte *tp;
1766	u_long data;
1767
1768	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1769	if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1770	    (m->flags & PG_WRITEABLE) == 0)
1771		return;
1772	TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) {
1773		if ((tp->tte_data & TD_PV) == 0)
1774			continue;
1775		data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W);
1776		if ((data & TD_W) != 0) {
1777			if (pmap_track_modified(TTE_GET_PMAP(tp),
1778			    TTE_GET_VA(tp)))
1779				vm_page_dirty(m);
1780			tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp));
1781		}
1782	}
1783	vm_page_flag_clear(m, PG_WRITEABLE);
1784}
1785
1786int
1787pmap_mincore(pmap_t pm, vm_offset_t addr)
1788{
1789	/* TODO; */
1790	return (0);
1791}
1792
1793/*
1794 * Activate a user pmap.  The pmap must be activated before its address space
1795 * can be accessed in any way.
1796 */
1797void
1798pmap_activate(struct thread *td)
1799{
1800	struct vmspace *vm;
1801	struct pmap *pm;
1802	int context;
1803
1804	vm = td->td_proc->p_vmspace;
1805	pm = vmspace_pmap(vm);
1806
1807	mtx_lock_spin(&sched_lock);
1808
1809	context = PCPU_GET(tlb_ctx);
1810	if (context == PCPU_GET(tlb_ctx_max)) {
1811		tlb_flush_user();
1812		context = PCPU_GET(tlb_ctx_min);
1813	}
1814	PCPU_SET(tlb_ctx, context + 1);
1815
1816	pm->pm_context[PCPU_GET(cpuid)] = context;
1817	pm->pm_active |= PCPU_GET(cpumask);
1818	PCPU_SET(vmspace, vm);
1819
1820	stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb);
1821	stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb);
1822	stxa(AA_DMMU_PCXR, ASI_DMMU, context);
1823	membar(Sync);
1824
1825	mtx_unlock_spin(&sched_lock);
1826}
1827
1828vm_offset_t
1829pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size)
1830{
1831
1832	return (va);
1833}
1834