mmu_oea64.c revision 277157
1190681Snwhitehorn/*-
2190681Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc.
3190681Snwhitehorn * All rights reserved.
4190681Snwhitehorn *
5190681Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation
6190681Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7190681Snwhitehorn *
8190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
9190681Snwhitehorn * modification, are permitted provided that the following conditions
10190681Snwhitehorn * are met:
11190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
12190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
13190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
14190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
15190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
16190681Snwhitehorn *
17190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18190681Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19190681Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20190681Snwhitehorn * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21190681Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22190681Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23190681Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24190681Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25190681Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26190681Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27190681Snwhitehorn * POSSIBILITY OF SUCH DAMAGE.
28190681Snwhitehorn */
29190681Snwhitehorn/*-
30190681Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank.
31190681Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH.
32190681Snwhitehorn * All rights reserved.
33190681Snwhitehorn *
34190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
35190681Snwhitehorn * modification, are permitted provided that the following conditions
36190681Snwhitehorn * are met:
37190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
38190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
39190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
40190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
41190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
42190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software
43190681Snwhitehorn *    must display the following acknowledgement:
44190681Snwhitehorn *	This product includes software developed by TooLs GmbH.
45190681Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products
46190681Snwhitehorn *    derived from this software without specific prior written permission.
47190681Snwhitehorn *
48190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
49190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
54190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
55190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
56190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
57190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58190681Snwhitehorn *
59190681Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
60190681Snwhitehorn */
61190681Snwhitehorn/*-
62190681Snwhitehorn * Copyright (C) 2001 Benno Rice.
63190681Snwhitehorn * All rights reserved.
64190681Snwhitehorn *
65190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
66190681Snwhitehorn * modification, are permitted provided that the following conditions
67190681Snwhitehorn * are met:
68190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
69190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
70190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
71190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
72190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
73190681Snwhitehorn *
74190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
75190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
76190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
77190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
78190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
79190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84190681Snwhitehorn */
85190681Snwhitehorn
86190681Snwhitehorn#include <sys/cdefs.h>
87190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 277157 2015-01-14 02:18:29Z nwhitehorn $");
88190681Snwhitehorn
89190681Snwhitehorn/*
90190681Snwhitehorn * Manages physical address maps.
91190681Snwhitehorn *
92190681Snwhitehorn * Since the information managed by this module is also stored by the
93190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual
94190681Snwhitehorn * to physical mappings at almost any time.  However, invalidations of
95190681Snwhitehorn * mappings must be done as requested.
96190681Snwhitehorn *
97190681Snwhitehorn * In order to cope with hardware architectures which make virtual to
98190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate
99190681Snwhitehorn * reduced protection operations until such time as they are actually
100190681Snwhitehorn * necessary.  This module is given full information as to which processors
101190681Snwhitehorn * are currently using which maps, and to when physical maps must be made
102190681Snwhitehorn * correct.
103190681Snwhitehorn */
104190681Snwhitehorn
105230779Skib#include "opt_compat.h"
106190681Snwhitehorn#include "opt_kstack_pages.h"
107190681Snwhitehorn
108190681Snwhitehorn#include <sys/param.h>
109190681Snwhitehorn#include <sys/kernel.h>
110276772Smarkj#include <sys/conf.h>
111222813Sattilio#include <sys/queue.h>
112222813Sattilio#include <sys/cpuset.h>
113276772Smarkj#include <sys/kerneldump.h>
114190681Snwhitehorn#include <sys/ktr.h>
115190681Snwhitehorn#include <sys/lock.h>
116190681Snwhitehorn#include <sys/msgbuf.h>
117243040Skib#include <sys/malloc.h>
118190681Snwhitehorn#include <sys/mutex.h>
119190681Snwhitehorn#include <sys/proc.h>
120233529Snwhitehorn#include <sys/rwlock.h>
121222813Sattilio#include <sys/sched.h>
122190681Snwhitehorn#include <sys/sysctl.h>
123190681Snwhitehorn#include <sys/systm.h>
124190681Snwhitehorn#include <sys/vmmeter.h>
125190681Snwhitehorn
126190681Snwhitehorn#include <sys/kdb.h>
127190681Snwhitehorn
128190681Snwhitehorn#include <dev/ofw/openfirm.h>
129190681Snwhitehorn
130190681Snwhitehorn#include <vm/vm.h>
131190681Snwhitehorn#include <vm/vm_param.h>
132190681Snwhitehorn#include <vm/vm_kern.h>
133190681Snwhitehorn#include <vm/vm_page.h>
134190681Snwhitehorn#include <vm/vm_map.h>
135190681Snwhitehorn#include <vm/vm_object.h>
136190681Snwhitehorn#include <vm/vm_extern.h>
137190681Snwhitehorn#include <vm/vm_pageout.h>
138190681Snwhitehorn#include <vm/uma.h>
139190681Snwhitehorn
140209975Snwhitehorn#include <machine/_inttypes.h>
141190681Snwhitehorn#include <machine/cpu.h>
142192067Snwhitehorn#include <machine/platform.h>
143190681Snwhitehorn#include <machine/frame.h>
144190681Snwhitehorn#include <machine/md_var.h>
145190681Snwhitehorn#include <machine/psl.h>
146190681Snwhitehorn#include <machine/bat.h>
147209975Snwhitehorn#include <machine/hid.h>
148190681Snwhitehorn#include <machine/pte.h>
149190681Snwhitehorn#include <machine/sr.h>
150190681Snwhitehorn#include <machine/trap.h>
151190681Snwhitehorn#include <machine/mmuvar.h>
152190681Snwhitehorn
153216174Snwhitehorn#include "mmu_oea64.h"
154190681Snwhitehorn#include "mmu_if.h"
155216174Snwhitehorn#include "moea64_if.h"
156190681Snwhitehorn
157209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid);
158209975Snwhitehornuintptr_t moea64_get_unique_vsid(void);
159190681Snwhitehorn
160222614Snwhitehorn#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
161222614Snwhitehorn#define ENABLE_TRANS(msr)	mtmsr(msr)
162190681Snwhitehorn
163190681Snwhitehorn#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
164190681Snwhitehorn#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
165204268Snwhitehorn#define	VSID_HASH_MASK		0x0000007fffffffffULL
166190681Snwhitehorn
167233529Snwhitehorn/*
168233529Snwhitehorn * Locking semantics:
169233529Snwhitehorn * -- Read lock: if no modifications are being made to either the PVO lists
170233529Snwhitehorn *    or page table or if any modifications being made result in internal
171233529Snwhitehorn *    changes (e.g. wiring, protection) such that the existence of the PVOs
172233529Snwhitehorn *    is unchanged and they remain associated with the same pmap (in which
173233529Snwhitehorn *    case the changes should be protected by the pmap lock)
174233529Snwhitehorn * -- Write lock: required if PTEs/PVOs are being inserted or removed.
175233529Snwhitehorn */
176190681Snwhitehorn
177233529Snwhitehorn#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock)
178233529Snwhitehorn#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock)
179233529Snwhitehorn#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock)
180233529Snwhitehorn#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock)
181233529Snwhitehorn
182190681Snwhitehornstruct ofw_map {
183209975Snwhitehorn	cell_t	om_va;
184209975Snwhitehorn	cell_t	om_len;
185258268Snwhitehorn	uint64_t om_pa;
186209975Snwhitehorn	cell_t	om_mode;
187190681Snwhitehorn};
188190681Snwhitehorn
189257941Sjhibbitsextern unsigned char _etext[];
190257941Sjhibbitsextern unsigned char _end[];
191257941Sjhibbits
192276515Snwhitehornextern int ofw_real_mode;
193257941Sjhibbits
194190681Snwhitehorn/*
195190681Snwhitehorn * Map of physical memory regions.
196190681Snwhitehorn */
197190681Snwhitehornstatic struct	mem_region *regions;
198190681Snwhitehornstatic struct	mem_region *pregions;
199209975Snwhitehornstatic u_int	phys_avail_count;
200209975Snwhitehornstatic int	regions_sz, pregions_sz;
201190681Snwhitehorn
202190681Snwhitehornextern void bs_remap_earlyboot(void);
203190681Snwhitehorn
204190681Snwhitehorn/*
205190681Snwhitehorn * Lock for the pteg and pvo tables.
206190681Snwhitehorn */
207233529Snwhitehornstruct rwlock	moea64_table_lock;
208211967Snwhitehornstruct mtx	moea64_slb_mutex;
209190681Snwhitehorn
210190681Snwhitehorn/*
211190681Snwhitehorn * PTEG data.
212190681Snwhitehorn */
213190681Snwhitehornu_int		moea64_pteg_count;
214190681Snwhitehornu_int		moea64_pteg_mask;
215190681Snwhitehorn
216190681Snwhitehorn/*
217190681Snwhitehorn * PVO data.
218190681Snwhitehorn */
219190681Snwhitehornstruct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
220190681Snwhitehorn
221190681Snwhitehornuma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
222190681Snwhitehornuma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
223190681Snwhitehorn
224190681Snwhitehorn#define	BPVO_POOL_SIZE	327680
225190681Snwhitehornstatic struct	pvo_entry *moea64_bpvo_pool;
226190681Snwhitehornstatic int	moea64_bpvo_pool_index = 0;
227277157SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
228277157Snwhitehorn    &moea64_bpvo_pool_index, 0, "");
229190681Snwhitehorn
230190681Snwhitehorn#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
231209975Snwhitehorn#ifdef __powerpc64__
232209975Snwhitehorn#define	NVSIDS		(NPMAPS * 16)
233209975Snwhitehorn#define VSID_HASHMASK	0xffffffffUL
234209975Snwhitehorn#else
235209975Snwhitehorn#define NVSIDS		NPMAPS
236209975Snwhitehorn#define VSID_HASHMASK	0xfffffUL
237209975Snwhitehorn#endif
238209975Snwhitehornstatic u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
239190681Snwhitehorn
240190681Snwhitehornstatic boolean_t moea64_initialized = FALSE;
241190681Snwhitehorn
242190681Snwhitehorn/*
243190681Snwhitehorn * Statistics.
244190681Snwhitehorn */
245190681Snwhitehornu_int	moea64_pte_valid = 0;
246190681Snwhitehornu_int	moea64_pte_overflow = 0;
247190681Snwhitehornu_int	moea64_pvo_entries = 0;
248190681Snwhitehornu_int	moea64_pvo_enter_calls = 0;
249190681Snwhitehornu_int	moea64_pvo_remove_calls = 0;
250190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
251190681Snwhitehorn    &moea64_pte_valid, 0, "");
252190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
253190681Snwhitehorn    &moea64_pte_overflow, 0, "");
254190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
255190681Snwhitehorn    &moea64_pvo_entries, 0, "");
256190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
257190681Snwhitehorn    &moea64_pvo_enter_calls, 0, "");
258190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
259190681Snwhitehorn    &moea64_pvo_remove_calls, 0, "");
260190681Snwhitehorn
261190681Snwhitehornvm_offset_t	moea64_scratchpage_va[2];
262216174Snwhitehornstruct pvo_entry *moea64_scratchpage_pvo[2];
263216174Snwhitehornuintptr_t	moea64_scratchpage_pte[2];
264190681Snwhitehornstruct	mtx	moea64_scratchpage_mtx;
265190681Snwhitehorn
266209975Snwhitehornuint64_t 	moea64_large_page_mask = 0;
267255418Snwhitehornuint64_t	moea64_large_page_size = 0;
268209975Snwhitehornint		moea64_large_page_shift = 0;
269209975Snwhitehorn
270190681Snwhitehorn/*
271190681Snwhitehorn * PVO calls.
272190681Snwhitehorn */
273216174Snwhitehornstatic int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
274269728Skib		    vm_offset_t, vm_offset_t, uint64_t, int, int8_t);
275216174Snwhitehornstatic void	moea64_pvo_remove(mmu_t, struct pvo_entry *);
276209975Snwhitehornstatic struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
277190681Snwhitehorn
278190681Snwhitehorn/*
279190681Snwhitehorn * Utility routines.
280190681Snwhitehorn */
281216174Snwhitehornstatic boolean_t	moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
282216174Snwhitehornstatic u_int		moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
283190681Snwhitehornstatic void		moea64_kremove(mmu_t, vm_offset_t);
284216174Snwhitehornstatic void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
285198341Smarcel			    vm_offset_t pa, vm_size_t sz);
286190681Snwhitehorn
287190681Snwhitehorn/*
288190681Snwhitehorn * Kernel MMU interface
289190681Snwhitehorn */
290190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t);
291190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
292248280Skibvoid moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
293248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize);
294269728Skibint moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
295269728Skib    u_int flags, int8_t psind);
296190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
297190681Snwhitehorn    vm_prot_t);
298190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
299190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
300190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
301190681Snwhitehornvoid moea64_init(mmu_t);
302190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t);
303214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
304207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t);
305238357Salcint moea64_ts_referenced(mmu_t, vm_page_t);
306236019Srajvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
307190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
308190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t);
309190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t);
310190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t);
311190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
312190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
313190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int);
314190681Snwhitehornvoid moea64_release(mmu_t, pmap_t);
315190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
316233017Snwhitehornvoid moea64_remove_pages(mmu_t, pmap_t);
317190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t);
318190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t);
319268591Salcvoid moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
320190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t);
321190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int);
322190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t);
323190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *);
324190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *);
325236019Srajvoid *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
326213307Snwhitehornvoid *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
327190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
328236019Srajvm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
329213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
330213307Snwhitehornvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
331236019Srajvoid moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
332236019Srajboolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
333198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
334276772Smarkjvoid moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
335276772Smarkj    void **va);
336276772Smarkjvoid moea64_scan_init(mmu_t mmu);
337190681Snwhitehorn
338209975Snwhitehornstatic mmu_method_t moea64_methods[] = {
339190681Snwhitehorn	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
340190681Snwhitehorn	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
341248280Skib	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
342190681Snwhitehorn	MMUMETHOD(mmu_enter,		moea64_enter),
343190681Snwhitehorn	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
344190681Snwhitehorn	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
345190681Snwhitehorn	MMUMETHOD(mmu_extract,		moea64_extract),
346190681Snwhitehorn	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
347190681Snwhitehorn	MMUMETHOD(mmu_init,		moea64_init),
348190681Snwhitehorn	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
349214617Salc	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
350207155Salc	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
351190681Snwhitehorn	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
352190681Snwhitehorn	MMUMETHOD(mmu_map,     		moea64_map),
353190681Snwhitehorn	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
354190681Snwhitehorn	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
355190681Snwhitehorn	MMUMETHOD(mmu_pinit,		moea64_pinit),
356190681Snwhitehorn	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
357190681Snwhitehorn	MMUMETHOD(mmu_protect,		moea64_protect),
358190681Snwhitehorn	MMUMETHOD(mmu_qenter,		moea64_qenter),
359190681Snwhitehorn	MMUMETHOD(mmu_qremove,		moea64_qremove),
360190681Snwhitehorn	MMUMETHOD(mmu_release,		moea64_release),
361190681Snwhitehorn	MMUMETHOD(mmu_remove,		moea64_remove),
362233017Snwhitehorn	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
363190681Snwhitehorn	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
364190681Snwhitehorn	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
365198341Smarcel	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
366268591Salc	MMUMETHOD(mmu_unwire,		moea64_unwire),
367190681Snwhitehorn	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
368190681Snwhitehorn	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
369190681Snwhitehorn	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
370190681Snwhitehorn	MMUMETHOD(mmu_activate,		moea64_activate),
371190681Snwhitehorn	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
372213307Snwhitehorn	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
373190681Snwhitehorn
374190681Snwhitehorn	/* Internal interfaces */
375190681Snwhitehorn	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
376213307Snwhitehorn	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
377190681Snwhitehorn	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
378190681Snwhitehorn	MMUMETHOD(mmu_kextract,		moea64_kextract),
379190681Snwhitehorn	MMUMETHOD(mmu_kenter,		moea64_kenter),
380213307Snwhitehorn	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
381190681Snwhitehorn	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
382276772Smarkj	MMUMETHOD(mmu_scan_init,	moea64_scan_init),
383257941Sjhibbits	MMUMETHOD(mmu_dumpsys_map,	moea64_dumpsys_map),
384190681Snwhitehorn
385190681Snwhitehorn	{ 0, 0 }
386190681Snwhitehorn};
387190681Snwhitehorn
388216174SnwhitehornMMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
389190681Snwhitehorn
390190681Snwhitehornstatic __inline u_int
391209975Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
392190681Snwhitehorn{
393204268Snwhitehorn	uint64_t hash;
394209975Snwhitehorn	int shift;
395190681Snwhitehorn
396209975Snwhitehorn	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
397204268Snwhitehorn	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
398209975Snwhitehorn	    shift);
399190681Snwhitehorn	return (hash & moea64_pteg_mask);
400190681Snwhitehorn}
401190681Snwhitehorn
402190681Snwhitehornstatic __inline struct pvo_head *
403190681Snwhitehornvm_page_to_pvoh(vm_page_t m)
404190681Snwhitehorn{
405190681Snwhitehorn
406190681Snwhitehorn	return (&m->md.mdpg_pvoh);
407190681Snwhitehorn}
408190681Snwhitehorn
409190681Snwhitehornstatic __inline void
410190681Snwhitehornmoea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
411209975Snwhitehorn    uint64_t pte_lo, int flags)
412190681Snwhitehorn{
413209975Snwhitehorn
414190681Snwhitehorn	/*
415190681Snwhitehorn	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
416190681Snwhitehorn	 * set when the real pte is set in memory.
417190681Snwhitehorn	 *
418190681Snwhitehorn	 * Note: Don't set the valid bit for correct operation of tlb update.
419190681Snwhitehorn	 */
420190681Snwhitehorn	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
421190681Snwhitehorn	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
422190681Snwhitehorn
423209975Snwhitehorn	if (flags & PVO_LARGE)
424209975Snwhitehorn		pt->pte_hi |= LPTE_BIG;
425209975Snwhitehorn
426190681Snwhitehorn	pt->pte_lo = pte_lo;
427190681Snwhitehorn}
428190681Snwhitehorn
429190681Snwhitehornstatic __inline uint64_t
430213307Snwhitehornmoea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
431190681Snwhitehorn{
432190681Snwhitehorn	uint64_t pte_lo;
433190681Snwhitehorn	int i;
434190681Snwhitehorn
435213307Snwhitehorn	if (ma != VM_MEMATTR_DEFAULT) {
436213307Snwhitehorn		switch (ma) {
437213307Snwhitehorn		case VM_MEMATTR_UNCACHEABLE:
438213307Snwhitehorn			return (LPTE_I | LPTE_G);
439213307Snwhitehorn		case VM_MEMATTR_WRITE_COMBINING:
440213307Snwhitehorn		case VM_MEMATTR_WRITE_BACK:
441213307Snwhitehorn		case VM_MEMATTR_PREFETCHABLE:
442213307Snwhitehorn			return (LPTE_I);
443213307Snwhitehorn		case VM_MEMATTR_WRITE_THROUGH:
444213307Snwhitehorn			return (LPTE_W | LPTE_M);
445213307Snwhitehorn		}
446213307Snwhitehorn	}
447213307Snwhitehorn
448190681Snwhitehorn	/*
449190681Snwhitehorn	 * Assume the page is cache inhibited and access is guarded unless
450190681Snwhitehorn	 * it's in our available memory array.
451190681Snwhitehorn	 */
452190681Snwhitehorn	pte_lo = LPTE_I | LPTE_G;
453190681Snwhitehorn	for (i = 0; i < pregions_sz; i++) {
454190681Snwhitehorn		if ((pa >= pregions[i].mr_start) &&
455190681Snwhitehorn		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
456190681Snwhitehorn			pte_lo &= ~(LPTE_I | LPTE_G);
457190681Snwhitehorn			pte_lo |= LPTE_M;
458190681Snwhitehorn			break;
459190681Snwhitehorn		}
460190681Snwhitehorn	}
461190681Snwhitehorn
462190681Snwhitehorn	return pte_lo;
463190681Snwhitehorn}
464190681Snwhitehorn
465190681Snwhitehorn/*
466190681Snwhitehorn * Quick sort callout for comparing memory regions.
467190681Snwhitehorn */
468190681Snwhitehornstatic int	om_cmp(const void *a, const void *b);
469190681Snwhitehorn
470190681Snwhitehornstatic int
471190681Snwhitehornom_cmp(const void *a, const void *b)
472190681Snwhitehorn{
473190681Snwhitehorn	const struct	ofw_map *mapa;
474190681Snwhitehorn	const struct	ofw_map *mapb;
475190681Snwhitehorn
476190681Snwhitehorn	mapa = a;
477190681Snwhitehorn	mapb = b;
478258268Snwhitehorn	if (mapa->om_pa < mapb->om_pa)
479190681Snwhitehorn		return (-1);
480258268Snwhitehorn	else if (mapa->om_pa > mapb->om_pa)
481190681Snwhitehorn		return (1);
482190681Snwhitehorn	else
483190681Snwhitehorn		return (0);
484190681Snwhitehorn}
485190681Snwhitehorn
486190681Snwhitehornstatic void
487199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
488199226Snwhitehorn{
489258268Snwhitehorn	struct ofw_map	translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
490258268Snwhitehorn	pcell_t		acells, trans_cells[sz/sizeof(cell_t)];
491199226Snwhitehorn	register_t	msr;
492199226Snwhitehorn	vm_offset_t	off;
493204128Snwhitehorn	vm_paddr_t	pa_base;
494258268Snwhitehorn	int		i, j;
495199226Snwhitehorn
496199226Snwhitehorn	bzero(translations, sz);
497258268Snwhitehorn	OF_getprop(OF_finddevice("/"), "#address-cells", &acells,
498258268Snwhitehorn	    sizeof(acells));
499258268Snwhitehorn	if (OF_getprop(mmu, "translations", trans_cells, sz) == -1)
500199226Snwhitehorn		panic("moea64_bootstrap: can't get ofw translations");
501199226Snwhitehorn
502199226Snwhitehorn	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
503258268Snwhitehorn	sz /= sizeof(cell_t);
504258268Snwhitehorn	for (i = 0, j = 0; i < sz; j++) {
505258268Snwhitehorn		translations[j].om_va = trans_cells[i++];
506258268Snwhitehorn		translations[j].om_len = trans_cells[i++];
507258268Snwhitehorn		translations[j].om_pa = trans_cells[i++];
508258268Snwhitehorn		if (acells == 2) {
509258268Snwhitehorn			translations[j].om_pa <<= 32;
510258268Snwhitehorn			translations[j].om_pa |= trans_cells[i++];
511258268Snwhitehorn		}
512258268Snwhitehorn		translations[j].om_mode = trans_cells[i++];
513258268Snwhitehorn	}
514258268Snwhitehorn	KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
515258268Snwhitehorn	    i, sz));
516258268Snwhitehorn
517258268Snwhitehorn	sz = j;
518199226Snwhitehorn	qsort(translations, sz, sizeof (*translations), om_cmp);
519199226Snwhitehorn
520216563Snwhitehorn	for (i = 0; i < sz; i++) {
521258268Snwhitehorn		pa_base = translations[i].om_pa;
522258268Snwhitehorn	      #ifndef __powerpc64__
523258268Snwhitehorn		if ((translations[i].om_pa >> 32) != 0)
524199226Snwhitehorn			panic("OFW translations above 32-bit boundary!");
525209975Snwhitehorn	      #endif
526199226Snwhitehorn
527257180Snwhitehorn		if (pa_base % PAGE_SIZE)
528257180Snwhitehorn			panic("OFW translation not page-aligned (phys)!");
529257180Snwhitehorn		if (translations[i].om_va % PAGE_SIZE)
530257180Snwhitehorn			panic("OFW translation not page-aligned (virt)!");
531257180Snwhitehorn
532257180Snwhitehorn		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
533257180Snwhitehorn		    pa_base, translations[i].om_va, translations[i].om_len);
534257180Snwhitehorn
535199226Snwhitehorn		/* Now enter the pages for this mapping */
536199226Snwhitehorn
537199226Snwhitehorn		DISABLE_TRANS(msr);
538199226Snwhitehorn		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
539277157Snwhitehorn			/* If this address is direct-mapped, skip remapping */
540277157Snwhitehorn			if (hw_direct_map && translations[i].om_va == pa_base &&
541277157Snwhitehorn			    moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) 			    == LPTE_M)
542277157Snwhitehorn				continue;
543277157Snwhitehorn
544209975Snwhitehorn			if (moea64_pvo_find_va(kernel_pmap,
545209975Snwhitehorn			    translations[i].om_va + off) != NULL)
546209975Snwhitehorn				continue;
547209975Snwhitehorn
548204128Snwhitehorn			moea64_kenter(mmup, translations[i].om_va + off,
549204128Snwhitehorn			    pa_base + off);
550199226Snwhitehorn		}
551199226Snwhitehorn		ENABLE_TRANS(msr);
552199226Snwhitehorn	}
553199226Snwhitehorn}
554199226Snwhitehorn
555209975Snwhitehorn#ifdef __powerpc64__
556199226Snwhitehornstatic void
557209975Snwhitehornmoea64_probe_large_page(void)
558190681Snwhitehorn{
559209975Snwhitehorn	uint16_t pvr = mfpvr() >> 16;
560209975Snwhitehorn
561209975Snwhitehorn	switch (pvr) {
562209975Snwhitehorn	case IBM970:
563209975Snwhitehorn	case IBM970FX:
564209975Snwhitehorn	case IBM970MP:
565209975Snwhitehorn		powerpc_sync(); isync();
566209975Snwhitehorn		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
567209975Snwhitehorn		powerpc_sync(); isync();
568209975Snwhitehorn
569209975Snwhitehorn		/* FALLTHROUGH */
570255418Snwhitehorn	default:
571209975Snwhitehorn		moea64_large_page_size = 0x1000000; /* 16 MB */
572209975Snwhitehorn		moea64_large_page_shift = 24;
573209975Snwhitehorn	}
574209975Snwhitehorn
575209975Snwhitehorn	moea64_large_page_mask = moea64_large_page_size - 1;
576209975Snwhitehorn}
577209975Snwhitehorn
578209975Snwhitehornstatic void
579209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large)
580209975Snwhitehorn{
581209975Snwhitehorn	struct slb *cache;
582209975Snwhitehorn	struct slb entry;
583209975Snwhitehorn	uint64_t esid, slbe;
584209975Snwhitehorn	uint64_t i;
585209975Snwhitehorn
586209975Snwhitehorn	cache = PCPU_GET(slb);
587209975Snwhitehorn	esid = va >> ADDR_SR_SHFT;
588209975Snwhitehorn	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
589209975Snwhitehorn
590209975Snwhitehorn	for (i = 0; i < 64; i++) {
591209975Snwhitehorn		if (cache[i].slbe == (slbe | i))
592209975Snwhitehorn			return;
593209975Snwhitehorn	}
594209975Snwhitehorn
595209975Snwhitehorn	entry.slbe = slbe;
596210704Snwhitehorn	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
597209975Snwhitehorn	if (large)
598209975Snwhitehorn		entry.slbv |= SLBV_L;
599209975Snwhitehorn
600212722Snwhitehorn	slb_insert_kernel(entry.slbe, entry.slbv);
601209975Snwhitehorn}
602209975Snwhitehorn#endif
603209975Snwhitehorn
604209975Snwhitehornstatic void
605209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
606209975Snwhitehorn    vm_offset_t kernelend)
607209975Snwhitehorn{
608209975Snwhitehorn	register_t msr;
609209975Snwhitehorn	vm_paddr_t pa;
610209975Snwhitehorn	vm_offset_t size, off;
611209975Snwhitehorn	uint64_t pte_lo;
612209975Snwhitehorn	int i;
613209975Snwhitehorn
614209975Snwhitehorn	if (moea64_large_page_size == 0)
615209975Snwhitehorn		hw_direct_map = 0;
616209975Snwhitehorn
617209975Snwhitehorn	DISABLE_TRANS(msr);
618209975Snwhitehorn	if (hw_direct_map) {
619233529Snwhitehorn		LOCK_TABLE_WR();
620209975Snwhitehorn		PMAP_LOCK(kernel_pmap);
621209975Snwhitehorn		for (i = 0; i < pregions_sz; i++) {
622209975Snwhitehorn		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
623209975Snwhitehorn		     pregions[i].mr_size; pa += moea64_large_page_size) {
624209975Snwhitehorn			pte_lo = LPTE_M;
625209975Snwhitehorn
626209975Snwhitehorn			/*
627209975Snwhitehorn			 * Set memory access as guarded if prefetch within
628209975Snwhitehorn			 * the page could exit the available physmem area.
629209975Snwhitehorn			 */
630209975Snwhitehorn			if (pa & moea64_large_page_mask) {
631209975Snwhitehorn				pa &= moea64_large_page_mask;
632209975Snwhitehorn				pte_lo |= LPTE_G;
633209975Snwhitehorn			}
634209975Snwhitehorn			if (pa + moea64_large_page_size >
635209975Snwhitehorn			    pregions[i].mr_start + pregions[i].mr_size)
636209975Snwhitehorn				pte_lo |= LPTE_G;
637209975Snwhitehorn
638216174Snwhitehorn			moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
639235689Snwhitehorn				    NULL, pa, pa, pte_lo,
640269728Skib				    PVO_WIRED | PVO_LARGE, 0);
641209975Snwhitehorn		  }
642209975Snwhitehorn		}
643209975Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
644233529Snwhitehorn		UNLOCK_TABLE_WR();
645209975Snwhitehorn	} else {
646209975Snwhitehorn		size = sizeof(struct pvo_head) * moea64_pteg_count;
647209975Snwhitehorn		off = (vm_offset_t)(moea64_pvo_table);
648209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
649209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
650209975Snwhitehorn		size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
651209975Snwhitehorn		off = (vm_offset_t)(moea64_bpvo_pool);
652209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
653209975Snwhitehorn		moea64_kenter(mmup, pa, pa);
654209975Snwhitehorn
655209975Snwhitehorn		/*
656209975Snwhitehorn		 * Map certain important things, like ourselves.
657209975Snwhitehorn		 *
658209975Snwhitehorn		 * NOTE: We do not map the exception vector space. That code is
659209975Snwhitehorn		 * used only in real mode, and leaving it unmapped allows us to
660209975Snwhitehorn		 * catch NULL pointer deferences, instead of making NULL a valid
661209975Snwhitehorn		 * address.
662209975Snwhitehorn		 */
663209975Snwhitehorn
664209975Snwhitehorn		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
665209975Snwhitehorn		    pa += PAGE_SIZE)
666209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
667209975Snwhitehorn	}
668209975Snwhitehorn	ENABLE_TRANS(msr);
669248508Skib
670248508Skib	/*
671248508Skib	 * Allow user to override unmapped_buf_allowed for testing.
672248508Skib	 * XXXKIB Only direct map implementation was tested.
673248508Skib	 */
674248508Skib	if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
675248508Skib	    &unmapped_buf_allowed))
676248508Skib		unmapped_buf_allowed = hw_direct_map;
677209975Snwhitehorn}
678209975Snwhitehorn
679216174Snwhitehornvoid
680216174Snwhitehornmoea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
681209975Snwhitehorn{
682190681Snwhitehorn	int		i, j;
683216174Snwhitehorn	vm_size_t	physsz, hwphyssz;
684190681Snwhitehorn
685209975Snwhitehorn#ifndef __powerpc64__
686190681Snwhitehorn	/* We don't have a direct map since there is no BAT */
687190681Snwhitehorn	hw_direct_map = 0;
688190681Snwhitehorn
689190681Snwhitehorn	/* Make sure battable is zero, since we have no BAT */
690190681Snwhitehorn	for (i = 0; i < 16; i++) {
691190681Snwhitehorn		battable[i].batu = 0;
692190681Snwhitehorn		battable[i].batl = 0;
693190681Snwhitehorn	}
694209975Snwhitehorn#else
695209975Snwhitehorn	moea64_probe_large_page();
696190681Snwhitehorn
697209975Snwhitehorn	/* Use a direct map if we have large page support */
698209975Snwhitehorn	if (moea64_large_page_size > 0)
699209975Snwhitehorn		hw_direct_map = 1;
700209975Snwhitehorn	else
701209975Snwhitehorn		hw_direct_map = 0;
702209975Snwhitehorn#endif
703209975Snwhitehorn
704190681Snwhitehorn	/* Get physical memory regions from firmware */
705190681Snwhitehorn	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
706190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
707190681Snwhitehorn
708190681Snwhitehorn	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
709190681Snwhitehorn		panic("moea64_bootstrap: phys_avail too small");
710222614Snwhitehorn
711190681Snwhitehorn	phys_avail_count = 0;
712190681Snwhitehorn	physsz = 0;
713190681Snwhitehorn	hwphyssz = 0;
714190681Snwhitehorn	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
715190681Snwhitehorn	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
716257180Snwhitehorn		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
717257180Snwhitehorn		    regions[i].mr_start, regions[i].mr_start +
718257180Snwhitehorn		    regions[i].mr_size, regions[i].mr_size);
719190681Snwhitehorn		if (hwphyssz != 0 &&
720190681Snwhitehorn		    (physsz + regions[i].mr_size) >= hwphyssz) {
721190681Snwhitehorn			if (physsz < hwphyssz) {
722190681Snwhitehorn				phys_avail[j] = regions[i].mr_start;
723190681Snwhitehorn				phys_avail[j + 1] = regions[i].mr_start +
724190681Snwhitehorn				    hwphyssz - physsz;
725190681Snwhitehorn				physsz = hwphyssz;
726190681Snwhitehorn				phys_avail_count++;
727190681Snwhitehorn			}
728190681Snwhitehorn			break;
729190681Snwhitehorn		}
730190681Snwhitehorn		phys_avail[j] = regions[i].mr_start;
731190681Snwhitehorn		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
732190681Snwhitehorn		phys_avail_count++;
733190681Snwhitehorn		physsz += regions[i].mr_size;
734190681Snwhitehorn	}
735209975Snwhitehorn
736209975Snwhitehorn	/* Check for overlap with the kernel and exception vectors */
737209975Snwhitehorn	for (j = 0; j < 2*phys_avail_count; j+=2) {
738209975Snwhitehorn		if (phys_avail[j] < EXC_LAST)
739209975Snwhitehorn			phys_avail[j] += EXC_LAST;
740209975Snwhitehorn
741209975Snwhitehorn		if (kernelstart >= phys_avail[j] &&
742209975Snwhitehorn		    kernelstart < phys_avail[j+1]) {
743209975Snwhitehorn			if (kernelend < phys_avail[j+1]) {
744209975Snwhitehorn				phys_avail[2*phys_avail_count] =
745209975Snwhitehorn				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
746209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
747209975Snwhitehorn				    phys_avail[j+1];
748209975Snwhitehorn				phys_avail_count++;
749209975Snwhitehorn			}
750209975Snwhitehorn
751209975Snwhitehorn			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
752209975Snwhitehorn		}
753209975Snwhitehorn
754209975Snwhitehorn		if (kernelend >= phys_avail[j] &&
755209975Snwhitehorn		    kernelend < phys_avail[j+1]) {
756209975Snwhitehorn			if (kernelstart > phys_avail[j]) {
757209975Snwhitehorn				phys_avail[2*phys_avail_count] = phys_avail[j];
758209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
759209975Snwhitehorn				    kernelstart & ~PAGE_MASK;
760209975Snwhitehorn				phys_avail_count++;
761209975Snwhitehorn			}
762209975Snwhitehorn
763209975Snwhitehorn			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
764209975Snwhitehorn		}
765209975Snwhitehorn	}
766209975Snwhitehorn
767190681Snwhitehorn	physmem = btoc(physsz);
768190681Snwhitehorn
769190681Snwhitehorn#ifdef PTEGCOUNT
770190681Snwhitehorn	moea64_pteg_count = PTEGCOUNT;
771190681Snwhitehorn#else
772190681Snwhitehorn	moea64_pteg_count = 0x1000;
773190681Snwhitehorn
774190681Snwhitehorn	while (moea64_pteg_count < physmem)
775190681Snwhitehorn		moea64_pteg_count <<= 1;
776209975Snwhitehorn
777209975Snwhitehorn	moea64_pteg_count >>= 1;
778190681Snwhitehorn#endif /* PTEGCOUNT */
779216174Snwhitehorn}
780190681Snwhitehorn
781216174Snwhitehornvoid
782216174Snwhitehornmoea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
783216174Snwhitehorn{
784216174Snwhitehorn	vm_size_t	size;
785216174Snwhitehorn	register_t	msr;
786216174Snwhitehorn	int		i;
787190681Snwhitehorn
788190681Snwhitehorn	/*
789216174Snwhitehorn	 * Set PTEG mask
790190681Snwhitehorn	 */
791190681Snwhitehorn	moea64_pteg_mask = moea64_pteg_count - 1;
792190681Snwhitehorn
793190681Snwhitehorn	/*
794190681Snwhitehorn	 * Allocate pv/overflow lists.
795190681Snwhitehorn	 */
796190681Snwhitehorn	size = sizeof(struct pvo_head) * moea64_pteg_count;
797190681Snwhitehorn
798190681Snwhitehorn	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
799190681Snwhitehorn	    PAGE_SIZE);
800190681Snwhitehorn	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
801190681Snwhitehorn
802190681Snwhitehorn	DISABLE_TRANS(msr);
803190681Snwhitehorn	for (i = 0; i < moea64_pteg_count; i++)
804190681Snwhitehorn		LIST_INIT(&moea64_pvo_table[i]);
805190681Snwhitehorn	ENABLE_TRANS(msr);
806190681Snwhitehorn
807190681Snwhitehorn	/*
808190681Snwhitehorn	 * Initialize the lock that synchronizes access to the pteg and pvo
809190681Snwhitehorn	 * tables.
810190681Snwhitehorn	 */
811233529Snwhitehorn	rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE);
812211967Snwhitehorn	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
813190681Snwhitehorn
814190681Snwhitehorn	/*
815190681Snwhitehorn	 * Initialise the unmanaged pvo pool.
816190681Snwhitehorn	 */
817190681Snwhitehorn	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
818190681Snwhitehorn		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
819190681Snwhitehorn	moea64_bpvo_pool_index = 0;
820190681Snwhitehorn
821190681Snwhitehorn	/*
822190681Snwhitehorn	 * Make sure kernel vsid is allocated as well as VSID 0.
823190681Snwhitehorn	 */
824209975Snwhitehorn	#ifndef __powerpc64__
825209975Snwhitehorn	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
826190681Snwhitehorn		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
827190681Snwhitehorn	moea64_vsid_bitmap[0] |= 1;
828209975Snwhitehorn	#endif
829190681Snwhitehorn
830190681Snwhitehorn	/*
831190681Snwhitehorn	 * Initialize the kernel pmap (which is statically allocated).
832190681Snwhitehorn	 */
833209975Snwhitehorn	#ifdef __powerpc64__
834209975Snwhitehorn	for (i = 0; i < 64; i++) {
835209975Snwhitehorn		pcpup->pc_slb[i].slbv = 0;
836209975Snwhitehorn		pcpup->pc_slb[i].slbe = 0;
837209975Snwhitehorn	}
838209975Snwhitehorn	#else
839190681Snwhitehorn	for (i = 0; i < 16; i++)
840190681Snwhitehorn		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
841209975Snwhitehorn	#endif
842190681Snwhitehorn
843190681Snwhitehorn	kernel_pmap->pmap_phys = kernel_pmap;
844222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
845235689Snwhitehorn	RB_INIT(&kernel_pmap->pmap_pvo);
846190681Snwhitehorn
847190681Snwhitehorn	PMAP_LOCK_INIT(kernel_pmap);
848190681Snwhitehorn
849190681Snwhitehorn	/*
850190681Snwhitehorn	 * Now map in all the other buffers we allocated earlier
851190681Snwhitehorn	 */
852190681Snwhitehorn
853209975Snwhitehorn	moea64_setup_direct_map(mmup, kernelstart, kernelend);
854216174Snwhitehorn}
855190681Snwhitehorn
856216174Snwhitehornvoid
857216174Snwhitehornmoea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
858216174Snwhitehorn{
859216174Snwhitehorn	ihandle_t	mmui;
860216174Snwhitehorn	phandle_t	chosen;
861216174Snwhitehorn	phandle_t	mmu;
862276515Snwhitehorn	ssize_t		sz;
863216174Snwhitehorn	int		i;
864216174Snwhitehorn	vm_offset_t	pa, va;
865216174Snwhitehorn	void		*dpcpu;
866216174Snwhitehorn
867190681Snwhitehorn	/*
868209975Snwhitehorn	 * Set up the Open Firmware pmap and add its mappings if not in real
869209975Snwhitehorn	 * mode.
870190681Snwhitehorn	 */
871190681Snwhitehorn
872215067Snwhitehorn	chosen = OF_finddevice("/chosen");
873276515Snwhitehorn	if (!ofw_real_mode && chosen != -1 &&
874276515Snwhitehorn	    OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
875276515Snwhitehorn		mmu = OF_instance_to_package(mmui);
876276515Snwhitehorn		if (mmu == -1 ||
877276515Snwhitehorn		    (sz = OF_getproplen(mmu, "translations")) == -1)
878276515Snwhitehorn			sz = 0;
879276515Snwhitehorn		if (sz > 6144 /* tmpstksz - 2 KB headroom */)
880276515Snwhitehorn			panic("moea64_bootstrap: too many ofw translations");
881190681Snwhitehorn
882276515Snwhitehorn		if (sz > 0)
883276515Snwhitehorn			moea64_add_ofw_mappings(mmup, mmu, sz);
884190681Snwhitehorn	}
885190681Snwhitehorn
886190681Snwhitehorn	/*
887190681Snwhitehorn	 * Calculate the last available physical address.
888190681Snwhitehorn	 */
889190681Snwhitehorn	for (i = 0; phys_avail[i + 2] != 0; i += 2)
890190681Snwhitehorn		;
891190681Snwhitehorn	Maxmem = powerpc_btop(phys_avail[i + 1]);
892190681Snwhitehorn
893190681Snwhitehorn	/*
894190681Snwhitehorn	 * Initialize MMU and remap early physical mappings
895190681Snwhitehorn	 */
896216174Snwhitehorn	MMU_CPU_BOOTSTRAP(mmup,0);
897222614Snwhitehorn	mtmsr(mfmsr() | PSL_DR | PSL_IR);
898190681Snwhitehorn	pmap_bootstrapped++;
899190681Snwhitehorn	bs_remap_earlyboot();
900190681Snwhitehorn
901190681Snwhitehorn	/*
902190681Snwhitehorn	 * Set the start and end of kva.
903190681Snwhitehorn	 */
904190681Snwhitehorn	virtual_avail = VM_MIN_KERNEL_ADDRESS;
905204128Snwhitehorn	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
906190681Snwhitehorn
907190681Snwhitehorn	/*
908209975Snwhitehorn	 * Map the entire KVA range into the SLB. We must not fault there.
909209975Snwhitehorn	 */
910209975Snwhitehorn	#ifdef __powerpc64__
911209975Snwhitehorn	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
912209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, 0);
913209975Snwhitehorn	#endif
914209975Snwhitehorn
915209975Snwhitehorn	/*
916204128Snwhitehorn	 * Figure out how far we can extend virtual_end into segment 16
917204128Snwhitehorn	 * without running into existing mappings. Segment 16 is guaranteed
918204128Snwhitehorn	 * to contain neither RAM nor devices (at least on Apple hardware),
919204128Snwhitehorn	 * but will generally contain some OFW mappings we should not
920204128Snwhitehorn	 * step on.
921190681Snwhitehorn	 */
922190681Snwhitehorn
923209975Snwhitehorn	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
924204128Snwhitehorn	PMAP_LOCK(kernel_pmap);
925209975Snwhitehorn	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
926209975Snwhitehorn	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
927204128Snwhitehorn		virtual_end += PAGE_SIZE;
928204128Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
929209975Snwhitehorn	#endif
930190681Snwhitehorn
931190681Snwhitehorn	/*
932190681Snwhitehorn	 * Allocate a kernel stack with a guard page for thread0 and map it
933190681Snwhitehorn	 * into the kernel page map.
934190681Snwhitehorn	 */
935190681Snwhitehorn	pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
936190681Snwhitehorn	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
937190681Snwhitehorn	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
938220642Sandreast	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
939190681Snwhitehorn	thread0.td_kstack = va;
940190681Snwhitehorn	thread0.td_kstack_pages = KSTACK_PAGES;
941190681Snwhitehorn	for (i = 0; i < KSTACK_PAGES; i++) {
942201758Smbr		moea64_kenter(mmup, va, pa);
943190681Snwhitehorn		pa += PAGE_SIZE;
944190681Snwhitehorn		va += PAGE_SIZE;
945190681Snwhitehorn	}
946190681Snwhitehorn
947190681Snwhitehorn	/*
948190681Snwhitehorn	 * Allocate virtual address space for the message buffer.
949190681Snwhitehorn	 */
950217688Spluknet	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
951204297Snwhitehorn	msgbufp = (struct msgbuf *)virtual_avail;
952204297Snwhitehorn	va = virtual_avail;
953217688Spluknet	virtual_avail += round_page(msgbufsize);
954204297Snwhitehorn	while (va < virtual_avail) {
955204297Snwhitehorn		moea64_kenter(mmup, va, pa);
956190681Snwhitehorn		pa += PAGE_SIZE;
957204297Snwhitehorn		va += PAGE_SIZE;
958190681Snwhitehorn	}
959194784Sjeff
960194784Sjeff	/*
961194784Sjeff	 * Allocate virtual address space for the dynamic percpu area.
962194784Sjeff	 */
963194784Sjeff	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
964204297Snwhitehorn	dpcpu = (void *)virtual_avail;
965209975Snwhitehorn	va = virtual_avail;
966204297Snwhitehorn	virtual_avail += DPCPU_SIZE;
967204297Snwhitehorn	while (va < virtual_avail) {
968204297Snwhitehorn		moea64_kenter(mmup, va, pa);
969194784Sjeff		pa += PAGE_SIZE;
970204297Snwhitehorn		va += PAGE_SIZE;
971194784Sjeff	}
972194784Sjeff	dpcpu_init(dpcpu, 0);
973216174Snwhitehorn
974216174Snwhitehorn	/*
975216174Snwhitehorn	 * Allocate some things for page zeroing. We put this directly
976216174Snwhitehorn	 * in the page table, marked with LPTE_LOCKED, to avoid any
977216174Snwhitehorn	 * of the PVO book-keeping or other parts of the VM system
978216174Snwhitehorn	 * from even knowing that this hack exists.
979216174Snwhitehorn	 */
980216174Snwhitehorn
981216174Snwhitehorn	if (!hw_direct_map) {
982216174Snwhitehorn		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
983216174Snwhitehorn		    MTX_DEF);
984216174Snwhitehorn		for (i = 0; i < 2; i++) {
985216174Snwhitehorn			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
986216174Snwhitehorn			virtual_end -= PAGE_SIZE;
987216174Snwhitehorn
988216174Snwhitehorn			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
989216174Snwhitehorn
990216174Snwhitehorn			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
991216174Snwhitehorn			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
992233529Snwhitehorn			LOCK_TABLE_RD();
993216174Snwhitehorn			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
994216174Snwhitehorn			    mmup, moea64_scratchpage_pvo[i]);
995216174Snwhitehorn			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
996216174Snwhitehorn			    |= LPTE_LOCKED;
997216174Snwhitehorn			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
998216174Snwhitehorn			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
999216174Snwhitehorn			    moea64_scratchpage_pvo[i]->pvo_vpn);
1000233529Snwhitehorn			UNLOCK_TABLE_RD();
1001216174Snwhitehorn		}
1002216174Snwhitehorn	}
1003190681Snwhitehorn}
1004190681Snwhitehorn
1005190681Snwhitehorn/*
1006209975Snwhitehorn * Activate a user pmap.  The pmap must be activated before its address
1007190681Snwhitehorn * space can be accessed in any way.
1008190681Snwhitehorn */
1009190681Snwhitehornvoid
1010190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td)
1011190681Snwhitehorn{
1012209975Snwhitehorn	pmap_t	pm;
1013190681Snwhitehorn
1014190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1015223758Sattilio	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1016190681Snwhitehorn
1017209975Snwhitehorn	#ifdef __powerpc64__
1018209975Snwhitehorn	PCPU_SET(userslb, pm->pm_slb);
1019209975Snwhitehorn	#else
1020209975Snwhitehorn	PCPU_SET(curpmap, pm->pmap_phys);
1021209975Snwhitehorn	#endif
1022190681Snwhitehorn}
1023190681Snwhitehorn
1024190681Snwhitehornvoid
1025190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td)
1026190681Snwhitehorn{
1027190681Snwhitehorn	pmap_t	pm;
1028190681Snwhitehorn
1029190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1030223758Sattilio	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1031209975Snwhitehorn	#ifdef __powerpc64__
1032209975Snwhitehorn	PCPU_SET(userslb, NULL);
1033209975Snwhitehorn	#else
1034190681Snwhitehorn	PCPU_SET(curpmap, NULL);
1035209975Snwhitehorn	#endif
1036190681Snwhitehorn}
1037190681Snwhitehorn
1038190681Snwhitehornvoid
1039268591Salcmoea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1040268591Salc{
1041268591Salc	struct	pvo_entry key, *pvo;
1042268591Salc	uintptr_t pt;
1043268591Salc
1044268591Salc	LOCK_TABLE_RD();
1045268591Salc	PMAP_LOCK(pm);
1046268591Salc	key.pvo_vaddr = sva;
1047268591Salc	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1048268591Salc	    pvo != NULL && PVO_VADDR(pvo) < eva;
1049268591Salc	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1050269339Salc		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1051268591Salc		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1052268591Salc			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1053268591Salc			    pvo);
1054268591Salc		pvo->pvo_vaddr &= ~PVO_WIRED;
1055268591Salc		if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0)
1056268591Salc			panic("moea64_unwire: pte %p is missing LPTE_WIRED",
1057268591Salc			    &pvo->pvo_pte.lpte);
1058268591Salc		pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1059269339Salc		if (pt != -1) {
1060268591Salc			/*
1061268591Salc			 * The PTE's wired attribute is not a hardware
1062268591Salc			 * feature, so there is no need to invalidate any TLB
1063268591Salc			 * entries.
1064268591Salc			 */
1065268591Salc			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1066268591Salc			    pvo->pvo_vpn);
1067268591Salc		}
1068268591Salc		pm->pm_stats.wired_count--;
1069268591Salc	}
1070268591Salc	UNLOCK_TABLE_RD();
1071268591Salc	PMAP_UNLOCK(pm);
1072268591Salc}
1073268591Salc
1074190681Snwhitehorn/*
1075190681Snwhitehorn * This goes through and sets the physical address of our
1076190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because
1077190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by
1078190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here
1079190681Snwhitehorn */
1080190681Snwhitehorn
1081190681Snwhitehornstatic __inline
1082216174Snwhitehornvoid moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1083204694Snwhitehorn
1084209975Snwhitehorn	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1085204268Snwhitehorn	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1086204268Snwhitehorn
1087216174Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1088204694Snwhitehorn	    ~(LPTE_WIMG | LPTE_RPGN);
1089216174Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1090213307Snwhitehorn	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1091216174Snwhitehorn	MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1092216174Snwhitehorn	    &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1093216174Snwhitehorn	    moea64_scratchpage_pvo[which]->pvo_vpn);
1094216383Snwhitehorn	isync();
1095190681Snwhitehorn}
1096190681Snwhitehorn
1097190681Snwhitehornvoid
1098190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1099190681Snwhitehorn{
1100190681Snwhitehorn	vm_offset_t	dst;
1101190681Snwhitehorn	vm_offset_t	src;
1102190681Snwhitehorn
1103190681Snwhitehorn	dst = VM_PAGE_TO_PHYS(mdst);
1104190681Snwhitehorn	src = VM_PAGE_TO_PHYS(msrc);
1105190681Snwhitehorn
1106209975Snwhitehorn	if (hw_direct_map) {
1107234156Snwhitehorn		bcopy((void *)src, (void *)dst, PAGE_SIZE);
1108209975Snwhitehorn	} else {
1109209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1110190681Snwhitehorn
1111216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, src);
1112216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, dst);
1113190681Snwhitehorn
1114234156Snwhitehorn		bcopy((void *)moea64_scratchpage_va[0],
1115209975Snwhitehorn		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1116190681Snwhitehorn
1117209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1118209975Snwhitehorn	}
1119190681Snwhitehorn}
1120190681Snwhitehorn
1121248280Skibstatic inline void
1122248280Skibmoea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1123248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1124248280Skib{
1125248280Skib	void *a_cp, *b_cp;
1126248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
1127248280Skib	int cnt;
1128248280Skib
1129248280Skib	while (xfersize > 0) {
1130248280Skib		a_pg_offset = a_offset & PAGE_MASK;
1131248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1132248280Skib		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1133248280Skib		    a_pg_offset;
1134248280Skib		b_pg_offset = b_offset & PAGE_MASK;
1135248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1136248280Skib		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1137248280Skib		    b_pg_offset;
1138248280Skib		bcopy(a_cp, b_cp, cnt);
1139248280Skib		a_offset += cnt;
1140248280Skib		b_offset += cnt;
1141248280Skib		xfersize -= cnt;
1142248280Skib	}
1143248280Skib}
1144248280Skib
1145248280Skibstatic inline void
1146248280Skibmoea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1147248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1148248280Skib{
1149248280Skib	void *a_cp, *b_cp;
1150248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
1151248280Skib	int cnt;
1152248280Skib
1153248280Skib	mtx_lock(&moea64_scratchpage_mtx);
1154248280Skib	while (xfersize > 0) {
1155248280Skib		a_pg_offset = a_offset & PAGE_MASK;
1156248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1157248280Skib		moea64_set_scratchpage_pa(mmu, 0,
1158248280Skib		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1159248280Skib		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1160248280Skib		b_pg_offset = b_offset & PAGE_MASK;
1161248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1162248280Skib		moea64_set_scratchpage_pa(mmu, 1,
1163248280Skib		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1164248280Skib		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1165248280Skib		bcopy(a_cp, b_cp, cnt);
1166248280Skib		a_offset += cnt;
1167248280Skib		b_offset += cnt;
1168248280Skib		xfersize -= cnt;
1169248280Skib	}
1170248280Skib	mtx_unlock(&moea64_scratchpage_mtx);
1171248280Skib}
1172248280Skib
1173190681Snwhitehornvoid
1174248280Skibmoea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1175248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1176248280Skib{
1177248280Skib
1178248280Skib	if (hw_direct_map) {
1179248280Skib		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1180248280Skib		    xfersize);
1181248280Skib	} else {
1182248280Skib		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1183248280Skib		    xfersize);
1184248280Skib	}
1185248280Skib}
1186248280Skib
1187248280Skibvoid
1188190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1189190681Snwhitehorn{
1190190681Snwhitehorn	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1191190681Snwhitehorn
1192190681Snwhitehorn	if (size + off > PAGE_SIZE)
1193190681Snwhitehorn		panic("moea64_zero_page: size + off > PAGE_SIZE");
1194190681Snwhitehorn
1195209975Snwhitehorn	if (hw_direct_map) {
1196209975Snwhitehorn		bzero((caddr_t)pa + off, size);
1197209975Snwhitehorn	} else {
1198209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1199216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1200209975Snwhitehorn		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1201209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1202209975Snwhitehorn	}
1203190681Snwhitehorn}
1204190681Snwhitehorn
1205204269Snwhitehorn/*
1206204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it
1207204269Snwhitehorn */
1208190681Snwhitehornvoid
1209204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m)
1210204269Snwhitehorn{
1211204269Snwhitehorn	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1212209975Snwhitehorn	vm_offset_t va, off;
1213204269Snwhitehorn
1214209975Snwhitehorn	if (!hw_direct_map) {
1215209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1216204269Snwhitehorn
1217216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1218209975Snwhitehorn		va = moea64_scratchpage_va[0];
1219209975Snwhitehorn	} else {
1220209975Snwhitehorn		va = pa;
1221209975Snwhitehorn	}
1222209975Snwhitehorn
1223204269Snwhitehorn	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1224209975Snwhitehorn		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1225209975Snwhitehorn
1226209975Snwhitehorn	if (!hw_direct_map)
1227209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1228204269Snwhitehorn}
1229204269Snwhitehorn
1230204269Snwhitehornvoid
1231190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1232190681Snwhitehorn{
1233190681Snwhitehorn
1234190681Snwhitehorn	moea64_zero_page(mmu, m);
1235190681Snwhitehorn}
1236190681Snwhitehorn
1237190681Snwhitehorn/*
1238190681Snwhitehorn * Map the given physical page at the specified virtual address in the
1239190681Snwhitehorn * target pmap with the protection requested.  If specified the page
1240190681Snwhitehorn * will be wired down.
1241190681Snwhitehorn */
1242233957Snwhitehorn
1243269728Skibint
1244190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1245269728Skib    vm_prot_t prot, u_int flags, int8_t psind)
1246190681Snwhitehorn{
1247190681Snwhitehorn	struct		pvo_head *pvo_head;
1248190681Snwhitehorn	uma_zone_t	zone;
1249190681Snwhitehorn	uint64_t	pte_lo;
1250190681Snwhitehorn	u_int		pvo_flags;
1251190681Snwhitehorn	int		error;
1252190681Snwhitehorn
1253269388Salc	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1254269388Salc		VM_OBJECT_ASSERT_LOCKED(m->object);
1255269388Salc
1256269388Salc	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1257235689Snwhitehorn		pvo_head = NULL;
1258190681Snwhitehorn		zone = moea64_upvo_zone;
1259190681Snwhitehorn		pvo_flags = 0;
1260190681Snwhitehorn	} else {
1261190681Snwhitehorn		pvo_head = vm_page_to_pvoh(m);
1262190681Snwhitehorn		zone = moea64_mpvo_zone;
1263190681Snwhitehorn		pvo_flags = PVO_MANAGED;
1264190681Snwhitehorn	}
1265190681Snwhitehorn
1266213307Snwhitehorn	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1267190681Snwhitehorn
1268190681Snwhitehorn	if (prot & VM_PROT_WRITE) {
1269190681Snwhitehorn		pte_lo |= LPTE_BW;
1270208810Salc		if (pmap_bootstrapped &&
1271224746Skib		    (m->oflags & VPO_UNMANAGED) == 0)
1272225418Skib			vm_page_aflag_set(m, PGA_WRITEABLE);
1273190681Snwhitehorn	} else
1274190681Snwhitehorn		pte_lo |= LPTE_BR;
1275190681Snwhitehorn
1276217341Snwhitehorn	if ((prot & VM_PROT_EXECUTE) == 0)
1277217341Snwhitehorn		pte_lo |= LPTE_NOEXEC;
1278190681Snwhitehorn
1279269728Skib	if ((flags & PMAP_ENTER_WIRED) != 0)
1280190681Snwhitehorn		pvo_flags |= PVO_WIRED;
1281190681Snwhitehorn
1282269728Skib	for (;;) {
1283269728Skib		LOCK_TABLE_WR();
1284269728Skib		PMAP_LOCK(pmap);
1285269728Skib		error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1286269728Skib		    VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags, psind);
1287269728Skib		PMAP_UNLOCK(pmap);
1288269728Skib		UNLOCK_TABLE_WR();
1289269728Skib		if (error != ENOMEM)
1290269728Skib			break;
1291269728Skib		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1292269728Skib			return (KERN_RESOURCE_SHORTAGE);
1293269728Skib		VM_OBJECT_ASSERT_UNLOCKED(m->object);
1294269728Skib		VM_WAIT;
1295269728Skib	}
1296190681Snwhitehorn
1297190681Snwhitehorn	/*
1298190681Snwhitehorn	 * Flush the page from the instruction cache if this page is
1299190681Snwhitehorn	 * mapped executable and cacheable.
1300190681Snwhitehorn	 */
1301233949Snwhitehorn	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1302233949Snwhitehorn	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1303233949Snwhitehorn		vm_page_aflag_set(m, PGA_EXECUTABLE);
1304216174Snwhitehorn		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1305233949Snwhitehorn	}
1306269728Skib	return (KERN_SUCCESS);
1307190681Snwhitehorn}
1308190681Snwhitehorn
1309190681Snwhitehornstatic void
1310216174Snwhitehornmoea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1311216174Snwhitehorn    vm_size_t sz)
1312190681Snwhitehorn{
1313204042Snwhitehorn
1314190681Snwhitehorn	/*
1315190681Snwhitehorn	 * This is much trickier than on older systems because
1316190681Snwhitehorn	 * we can't sync the icache on physical addresses directly
1317190681Snwhitehorn	 * without a direct map. Instead we check a couple of cases
1318190681Snwhitehorn	 * where the memory is already mapped in and, failing that,
1319190681Snwhitehorn	 * use the same trick we use for page zeroing to create
1320190681Snwhitehorn	 * a temporary mapping for this physical address.
1321190681Snwhitehorn	 */
1322190681Snwhitehorn
1323190681Snwhitehorn	if (!pmap_bootstrapped) {
1324190681Snwhitehorn		/*
1325190681Snwhitehorn		 * If PMAP is not bootstrapped, we are likely to be
1326190681Snwhitehorn		 * in real mode.
1327190681Snwhitehorn		 */
1328198341Smarcel		__syncicache((void *)pa, sz);
1329190681Snwhitehorn	} else if (pmap == kernel_pmap) {
1330198341Smarcel		__syncicache((void *)va, sz);
1331209975Snwhitehorn	} else if (hw_direct_map) {
1332209975Snwhitehorn		__syncicache((void *)pa, sz);
1333190681Snwhitehorn	} else {
1334190681Snwhitehorn		/* Use the scratch page to set up a temp mapping */
1335190681Snwhitehorn
1336190681Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1337190681Snwhitehorn
1338216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1339204042Snwhitehorn		__syncicache((void *)(moea64_scratchpage_va[1] +
1340204042Snwhitehorn		    (va & ADDR_POFF)), sz);
1341190681Snwhitehorn
1342190681Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1343190681Snwhitehorn	}
1344190681Snwhitehorn}
1345190681Snwhitehorn
1346190681Snwhitehorn/*
1347190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object.
1348190681Snwhitehorn * The sequence begins with the given page m_start.  This page is
1349190681Snwhitehorn * mapped at the given virtual address start.  Each subsequent page is
1350190681Snwhitehorn * mapped at a virtual address that is offset from start by the same
1351190681Snwhitehorn * amount as the page is offset from m_start within the object.  The
1352190681Snwhitehorn * last page in the sequence is the page with the largest offset from
1353190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given
1354190681Snwhitehorn * virtual address end.  Not every virtual page between start and end
1355190681Snwhitehorn * is mapped; only those for which a resident page exists with the
1356190681Snwhitehorn * corresponding offset from m_start are mapped.
1357190681Snwhitehorn */
1358190681Snwhitehornvoid
1359190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1360190681Snwhitehorn    vm_page_t m_start, vm_prot_t prot)
1361190681Snwhitehorn{
1362190681Snwhitehorn	vm_page_t m;
1363190681Snwhitehorn	vm_pindex_t diff, psize;
1364190681Snwhitehorn
1365250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1366250884Sattilio
1367190681Snwhitehorn	psize = atop(end - start);
1368190681Snwhitehorn	m = m_start;
1369190681Snwhitehorn	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1370233957Snwhitehorn		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1371269728Skib		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
1372190681Snwhitehorn		m = TAILQ_NEXT(m, listq);
1373190681Snwhitehorn	}
1374190681Snwhitehorn}
1375190681Snwhitehorn
1376190681Snwhitehornvoid
1377190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1378190681Snwhitehorn    vm_prot_t prot)
1379190681Snwhitehorn{
1380207796Salc
1381269728Skib	moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1382269728Skib	    PMAP_ENTER_NOSLEEP, 0);
1383190681Snwhitehorn}
1384190681Snwhitehorn
1385190681Snwhitehornvm_paddr_t
1386190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1387190681Snwhitehorn{
1388190681Snwhitehorn	struct	pvo_entry *pvo;
1389190681Snwhitehorn	vm_paddr_t pa;
1390190681Snwhitehorn
1391190681Snwhitehorn	PMAP_LOCK(pm);
1392209975Snwhitehorn	pvo = moea64_pvo_find_va(pm, va);
1393190681Snwhitehorn	if (pvo == NULL)
1394190681Snwhitehorn		pa = 0;
1395190681Snwhitehorn	else
1396209975Snwhitehorn		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1397209975Snwhitehorn		    (va - PVO_VADDR(pvo));
1398190681Snwhitehorn	PMAP_UNLOCK(pm);
1399190681Snwhitehorn	return (pa);
1400190681Snwhitehorn}
1401190681Snwhitehorn
1402190681Snwhitehorn/*
1403190681Snwhitehorn * Atomically extract and hold the physical page with the given
1404190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given
1405190681Snwhitehorn * protection.
1406190681Snwhitehorn */
1407190681Snwhitehornvm_page_t
1408190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1409190681Snwhitehorn{
1410190681Snwhitehorn	struct	pvo_entry *pvo;
1411190681Snwhitehorn	vm_page_t m;
1412207410Skmacy        vm_paddr_t pa;
1413190681Snwhitehorn
1414190681Snwhitehorn	m = NULL;
1415207410Skmacy	pa = 0;
1416190681Snwhitehorn	PMAP_LOCK(pmap);
1417207410Skmacyretry:
1418209975Snwhitehorn	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1419190681Snwhitehorn	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1420190681Snwhitehorn	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1421190681Snwhitehorn	     (prot & VM_PROT_WRITE) == 0)) {
1422235689Snwhitehorn		if (vm_page_pa_tryrelock(pmap,
1423207410Skmacy			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1424207410Skmacy			goto retry;
1425190681Snwhitehorn		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1426190681Snwhitehorn		vm_page_hold(m);
1427190681Snwhitehorn	}
1428207410Skmacy	PA_UNLOCK_COND(pa);
1429190681Snwhitehorn	PMAP_UNLOCK(pmap);
1430190681Snwhitehorn	return (m);
1431190681Snwhitehorn}
1432190681Snwhitehorn
1433216174Snwhitehornstatic mmu_t installed_mmu;
1434216174Snwhitehorn
1435190681Snwhitehornstatic void *
1436190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1437190681Snwhitehorn{
1438190681Snwhitehorn	/*
1439190681Snwhitehorn	 * This entire routine is a horrible hack to avoid bothering kmem
1440190681Snwhitehorn	 * for new KVA addresses. Because this can get called from inside
1441190681Snwhitehorn	 * kmem allocation routines, calling kmem for a new address here
1442190681Snwhitehorn	 * can lead to multiply locking non-recursive mutexes.
1443190681Snwhitehorn	 */
1444190681Snwhitehorn        vm_offset_t va;
1445190681Snwhitehorn
1446190681Snwhitehorn        vm_page_t m;
1447190681Snwhitehorn        int pflags, needed_lock;
1448190681Snwhitehorn
1449190681Snwhitehorn	*flags = UMA_SLAB_PRIV;
1450190681Snwhitehorn	needed_lock = !PMAP_LOCKED(kernel_pmap);
1451243040Skib	pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED;
1452190681Snwhitehorn
1453190681Snwhitehorn        for (;;) {
1454228522Salc                m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1455190681Snwhitehorn                if (m == NULL) {
1456190681Snwhitehorn                        if (wait & M_NOWAIT)
1457190681Snwhitehorn                                return (NULL);
1458190681Snwhitehorn                        VM_WAIT;
1459190681Snwhitehorn                } else
1460190681Snwhitehorn                        break;
1461190681Snwhitehorn        }
1462190681Snwhitehorn
1463204128Snwhitehorn	va = VM_PAGE_TO_PHYS(m);
1464190681Snwhitehorn
1465233529Snwhitehorn	LOCK_TABLE_WR();
1466233529Snwhitehorn	if (needed_lock)
1467233529Snwhitehorn		PMAP_LOCK(kernel_pmap);
1468233529Snwhitehorn
1469216174Snwhitehorn	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1470269728Skib	    NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP,
1471269728Skib	    0);
1472190681Snwhitehorn
1473190681Snwhitehorn	if (needed_lock)
1474190681Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
1475233529Snwhitehorn	UNLOCK_TABLE_WR();
1476198378Snwhitehorn
1477190681Snwhitehorn	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1478190681Snwhitehorn                bzero((void *)va, PAGE_SIZE);
1479190681Snwhitehorn
1480190681Snwhitehorn	return (void *)va;
1481190681Snwhitehorn}
1482190681Snwhitehorn
1483230767Skibextern int elf32_nxstack;
1484230767Skib
1485190681Snwhitehornvoid
1486190681Snwhitehornmoea64_init(mmu_t mmu)
1487190681Snwhitehorn{
1488190681Snwhitehorn
1489190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_init");
1490190681Snwhitehorn
1491190681Snwhitehorn	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1492190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1493190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1494190681Snwhitehorn	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1495190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1496190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1497190681Snwhitehorn
1498190681Snwhitehorn	if (!hw_direct_map) {
1499216174Snwhitehorn		installed_mmu = mmu;
1500190681Snwhitehorn		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1501190681Snwhitehorn		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1502190681Snwhitehorn	}
1503190681Snwhitehorn
1504230779Skib#ifdef COMPAT_FREEBSD32
1505230767Skib	elf32_nxstack = 1;
1506230779Skib#endif
1507230767Skib
1508190681Snwhitehorn	moea64_initialized = TRUE;
1509190681Snwhitehorn}
1510190681Snwhitehorn
1511190681Snwhitehornboolean_t
1512207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m)
1513207155Salc{
1514207155Salc
1515224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1516208574Salc	    ("moea64_is_referenced: page %p is not managed", m));
1517216174Snwhitehorn	return (moea64_query_bit(mmu, m, PTE_REF));
1518207155Salc}
1519207155Salc
1520207155Salcboolean_t
1521190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m)
1522190681Snwhitehorn{
1523190681Snwhitehorn
1524224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1525208504Salc	    ("moea64_is_modified: page %p is not managed", m));
1526208504Salc
1527208504Salc	/*
1528254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1529225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1530208504Salc	 * is clear, no PTEs can have LPTE_CHG set.
1531208504Salc	 */
1532255503Snwhitehorn	VM_OBJECT_ASSERT_LOCKED(m->object);
1533254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1534190681Snwhitehorn		return (FALSE);
1535216174Snwhitehorn	return (moea64_query_bit(mmu, m, LPTE_CHG));
1536190681Snwhitehorn}
1537190681Snwhitehorn
1538214617Salcboolean_t
1539214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1540214617Salc{
1541214617Salc	struct pvo_entry *pvo;
1542214617Salc	boolean_t rv;
1543214617Salc
1544214617Salc	PMAP_LOCK(pmap);
1545214617Salc	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1546214617Salc	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1547214617Salc	PMAP_UNLOCK(pmap);
1548214617Salc	return (rv);
1549214617Salc}
1550214617Salc
1551190681Snwhitehornvoid
1552190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m)
1553190681Snwhitehorn{
1554190681Snwhitehorn
1555224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1556208504Salc	    ("moea64_clear_modify: page %p is not managed", m));
1557248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1558254138Sattilio	KASSERT(!vm_page_xbusied(m),
1559254138Sattilio	    ("moea64_clear_modify: page %p is exclusive busied", m));
1560208504Salc
1561208504Salc	/*
1562225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1563208504Salc	 * set.  If the object containing the page is locked and the page is
1564254138Sattilio	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1565208504Salc	 */
1566225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
1567190681Snwhitehorn		return;
1568216174Snwhitehorn	moea64_clear_bit(mmu, m, LPTE_CHG);
1569190681Snwhitehorn}
1570190681Snwhitehorn
1571190681Snwhitehorn/*
1572190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings.
1573190681Snwhitehorn */
1574190681Snwhitehornvoid
1575190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m)
1576190681Snwhitehorn{
1577190681Snwhitehorn	struct	pvo_entry *pvo;
1578216174Snwhitehorn	uintptr_t pt;
1579190681Snwhitehorn	pmap_t	pmap;
1580233434Snwhitehorn	uint64_t lo = 0;
1581190681Snwhitehorn
1582224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1583208175Salc	    ("moea64_remove_write: page %p is not managed", m));
1584208175Salc
1585208175Salc	/*
1586254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1587254138Sattilio	 * set by another thread while the object is locked.  Thus,
1588254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
1589208175Salc	 */
1590248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1591254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1592190681Snwhitehorn		return;
1593216174Snwhitehorn	powerpc_sync();
1594233529Snwhitehorn	LOCK_TABLE_RD();
1595190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1596190681Snwhitehorn		pmap = pvo->pvo_pmap;
1597190681Snwhitehorn		PMAP_LOCK(pmap);
1598190681Snwhitehorn		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1599216174Snwhitehorn			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1600190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1601190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1602216174Snwhitehorn			if (pt != -1) {
1603216174Snwhitehorn				MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1604190681Snwhitehorn				lo |= pvo->pvo_pte.lpte.pte_lo;
1605190681Snwhitehorn				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1606216174Snwhitehorn				MOEA64_PTE_CHANGE(mmu, pt,
1607216174Snwhitehorn				    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1608209975Snwhitehorn				if (pvo->pvo_pmap == kernel_pmap)
1609209975Snwhitehorn					isync();
1610190681Snwhitehorn			}
1611190681Snwhitehorn		}
1612233530Snwhitehorn		if ((lo & LPTE_CHG) != 0)
1613233530Snwhitehorn			vm_page_dirty(m);
1614190681Snwhitehorn		PMAP_UNLOCK(pmap);
1615190681Snwhitehorn	}
1616233529Snwhitehorn	UNLOCK_TABLE_RD();
1617225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1618190681Snwhitehorn}
1619190681Snwhitehorn
1620190681Snwhitehorn/*
1621190681Snwhitehorn *	moea64_ts_referenced:
1622190681Snwhitehorn *
1623190681Snwhitehorn *	Return a count of reference bits for a page, clearing those bits.
1624190681Snwhitehorn *	It is not necessary for every reference bit to be cleared, but it
1625190681Snwhitehorn *	is necessary that 0 only be returned when there are truly no
1626190681Snwhitehorn *	reference bits set.
1627190681Snwhitehorn *
1628190681Snwhitehorn *	XXX: The exact number of bits to check and clear is a matter that
1629190681Snwhitehorn *	should be tested and standardized at some point in the future for
1630190681Snwhitehorn *	optimal aging of shared pages.
1631190681Snwhitehorn */
1632238357Salcint
1633190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m)
1634190681Snwhitehorn{
1635190681Snwhitehorn
1636224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1637208990Salc	    ("moea64_ts_referenced: page %p is not managed", m));
1638216174Snwhitehorn	return (moea64_clear_bit(mmu, m, LPTE_REF));
1639190681Snwhitehorn}
1640190681Snwhitehorn
1641190681Snwhitehorn/*
1642213307Snwhitehorn * Modify the WIMG settings of all mappings for a page.
1643213307Snwhitehorn */
1644213307Snwhitehornvoid
1645213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1646213307Snwhitehorn{
1647213307Snwhitehorn	struct	pvo_entry *pvo;
1648213335Snwhitehorn	struct  pvo_head *pvo_head;
1649216174Snwhitehorn	uintptr_t pt;
1650213307Snwhitehorn	pmap_t	pmap;
1651213307Snwhitehorn	uint64_t lo;
1652213307Snwhitehorn
1653224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1654213335Snwhitehorn		m->md.mdpg_cache_attrs = ma;
1655213335Snwhitehorn		return;
1656213335Snwhitehorn	}
1657213335Snwhitehorn
1658213335Snwhitehorn	pvo_head = vm_page_to_pvoh(m);
1659213307Snwhitehorn	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1660233529Snwhitehorn	LOCK_TABLE_RD();
1661213335Snwhitehorn	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1662213307Snwhitehorn		pmap = pvo->pvo_pmap;
1663213307Snwhitehorn		PMAP_LOCK(pmap);
1664216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1665213307Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1666213307Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= lo;
1667216174Snwhitehorn		if (pt != -1) {
1668216174Snwhitehorn			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1669213307Snwhitehorn			    pvo->pvo_vpn);
1670213307Snwhitehorn			if (pvo->pvo_pmap == kernel_pmap)
1671213307Snwhitehorn				isync();
1672213307Snwhitehorn		}
1673213307Snwhitehorn		PMAP_UNLOCK(pmap);
1674213307Snwhitehorn	}
1675233529Snwhitehorn	UNLOCK_TABLE_RD();
1676213307Snwhitehorn	m->md.mdpg_cache_attrs = ma;
1677213307Snwhitehorn}
1678213307Snwhitehorn
1679213307Snwhitehorn/*
1680190681Snwhitehorn * Map a wired page into kernel virtual address space.
1681190681Snwhitehorn */
1682190681Snwhitehornvoid
1683213307Snwhitehornmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1684190681Snwhitehorn{
1685190681Snwhitehorn	uint64_t	pte_lo;
1686190681Snwhitehorn	int		error;
1687190681Snwhitehorn
1688213307Snwhitehorn	pte_lo = moea64_calc_wimg(pa, ma);
1689190681Snwhitehorn
1690233529Snwhitehorn	LOCK_TABLE_WR();
1691190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1692216174Snwhitehorn	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1693269728Skib	    NULL, va, pa, pte_lo, PVO_WIRED, 0);
1694233529Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1695233529Snwhitehorn	UNLOCK_TABLE_WR();
1696190681Snwhitehorn
1697190681Snwhitehorn	if (error != 0 && error != ENOENT)
1698209975Snwhitehorn		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1699190681Snwhitehorn		    pa, error);
1700190681Snwhitehorn}
1701190681Snwhitehorn
1702213307Snwhitehornvoid
1703236019Srajmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1704213307Snwhitehorn{
1705213307Snwhitehorn
1706213307Snwhitehorn	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1707213307Snwhitehorn}
1708213307Snwhitehorn
1709190681Snwhitehorn/*
1710190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual
1711190681Snwhitehorn * address.
1712190681Snwhitehorn */
1713236019Srajvm_paddr_t
1714190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va)
1715190681Snwhitehorn{
1716190681Snwhitehorn	struct		pvo_entry *pvo;
1717190681Snwhitehorn	vm_paddr_t pa;
1718190681Snwhitehorn
1719205370Snwhitehorn	/*
1720205370Snwhitehorn	 * Shortcut the direct-mapped case when applicable.  We never put
1721205370Snwhitehorn	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1722205370Snwhitehorn	 */
1723205370Snwhitehorn	if (va < VM_MIN_KERNEL_ADDRESS)
1724205370Snwhitehorn		return (va);
1725205370Snwhitehorn
1726190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1727209975Snwhitehorn	pvo = moea64_pvo_find_va(kernel_pmap, va);
1728209975Snwhitehorn	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1729209975Snwhitehorn	    va));
1730223471Sandreast	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1731190681Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1732190681Snwhitehorn	return (pa);
1733190681Snwhitehorn}
1734190681Snwhitehorn
1735190681Snwhitehorn/*
1736190681Snwhitehorn * Remove a wired page from kernel virtual address space.
1737190681Snwhitehorn */
1738190681Snwhitehornvoid
1739190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va)
1740190681Snwhitehorn{
1741190681Snwhitehorn	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1742190681Snwhitehorn}
1743190681Snwhitehorn
1744190681Snwhitehorn/*
1745190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space.
1746190681Snwhitehorn *
1747190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping.
1748190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region
1749190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt'
1750190681Snwhitehorn * unchanged.  We cannot and therefore do not; *virt is updated with the
1751190681Snwhitehorn * first usable address after the mapped region.
1752190681Snwhitehorn */
1753190681Snwhitehornvm_offset_t
1754236019Srajmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1755236019Sraj    vm_paddr_t pa_end, int prot)
1756190681Snwhitehorn{
1757190681Snwhitehorn	vm_offset_t	sva, va;
1758190681Snwhitehorn
1759190681Snwhitehorn	sva = *virt;
1760190681Snwhitehorn	va = sva;
1761190681Snwhitehorn	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1762190681Snwhitehorn		moea64_kenter(mmu, va, pa_start);
1763190681Snwhitehorn	*virt = va;
1764190681Snwhitehorn
1765190681Snwhitehorn	return (sva);
1766190681Snwhitehorn}
1767190681Snwhitehorn
1768190681Snwhitehorn/*
1769190681Snwhitehorn * Returns true if the pmap's pv is one of the first
1770190681Snwhitehorn * 16 pvs linked to from this page.  This count may
1771190681Snwhitehorn * be changed upwards or downwards in the future; it
1772190681Snwhitehorn * is only necessary that true be returned for a small
1773190681Snwhitehorn * subset of pmaps for proper page aging.
1774190681Snwhitehorn */
1775190681Snwhitehornboolean_t
1776190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1777190681Snwhitehorn{
1778190681Snwhitehorn        int loops;
1779190681Snwhitehorn	struct pvo_entry *pvo;
1780208990Salc	boolean_t rv;
1781190681Snwhitehorn
1782224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1783208990Salc	    ("moea64_page_exists_quick: page %p is not managed", m));
1784190681Snwhitehorn	loops = 0;
1785208990Salc	rv = FALSE;
1786233529Snwhitehorn	LOCK_TABLE_RD();
1787190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1788208990Salc		if (pvo->pvo_pmap == pmap) {
1789208990Salc			rv = TRUE;
1790208990Salc			break;
1791208990Salc		}
1792190681Snwhitehorn		if (++loops >= 16)
1793190681Snwhitehorn			break;
1794190681Snwhitehorn	}
1795233529Snwhitehorn	UNLOCK_TABLE_RD();
1796208990Salc	return (rv);
1797190681Snwhitehorn}
1798190681Snwhitehorn
1799190681Snwhitehorn/*
1800190681Snwhitehorn * Return the number of managed mappings to the given physical page
1801190681Snwhitehorn * that are wired.
1802190681Snwhitehorn */
1803190681Snwhitehornint
1804190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1805190681Snwhitehorn{
1806190681Snwhitehorn	struct pvo_entry *pvo;
1807190681Snwhitehorn	int count;
1808190681Snwhitehorn
1809190681Snwhitehorn	count = 0;
1810224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
1811190681Snwhitehorn		return (count);
1812233529Snwhitehorn	LOCK_TABLE_RD();
1813190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1814190681Snwhitehorn		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1815190681Snwhitehorn			count++;
1816233529Snwhitehorn	UNLOCK_TABLE_RD();
1817190681Snwhitehorn	return (count);
1818190681Snwhitehorn}
1819190681Snwhitehorn
1820209975Snwhitehornstatic uintptr_t	moea64_vsidcontext;
1821190681Snwhitehorn
1822209975Snwhitehornuintptr_t
1823209975Snwhitehornmoea64_get_unique_vsid(void) {
1824209975Snwhitehorn	u_int entropy;
1825209975Snwhitehorn	register_t hash;
1826209975Snwhitehorn	uint32_t mask;
1827209975Snwhitehorn	int i;
1828190681Snwhitehorn
1829190681Snwhitehorn	entropy = 0;
1830190681Snwhitehorn	__asm __volatile("mftb %0" : "=r"(entropy));
1831190681Snwhitehorn
1832211967Snwhitehorn	mtx_lock(&moea64_slb_mutex);
1833209975Snwhitehorn	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1834209975Snwhitehorn		u_int	n;
1835190681Snwhitehorn
1836190681Snwhitehorn		/*
1837190681Snwhitehorn		 * Create a new value by mutiplying by a prime and adding in
1838190681Snwhitehorn		 * entropy from the timebase register.  This is to make the
1839190681Snwhitehorn		 * VSID more random so that the PT hash function collides
1840190681Snwhitehorn		 * less often.  (Note that the prime casues gcc to do shifts
1841190681Snwhitehorn		 * instead of a multiply.)
1842190681Snwhitehorn		 */
1843190681Snwhitehorn		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1844209975Snwhitehorn		hash = moea64_vsidcontext & (NVSIDS - 1);
1845190681Snwhitehorn		if (hash == 0)		/* 0 is special, avoid it */
1846190681Snwhitehorn			continue;
1847190681Snwhitehorn		n = hash >> 5;
1848190681Snwhitehorn		mask = 1 << (hash & (VSID_NBPW - 1));
1849209975Snwhitehorn		hash = (moea64_vsidcontext & VSID_HASHMASK);
1850190681Snwhitehorn		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1851190681Snwhitehorn			/* anything free in this bucket? */
1852190681Snwhitehorn			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1853190681Snwhitehorn				entropy = (moea64_vsidcontext >> 20);
1854190681Snwhitehorn				continue;
1855190681Snwhitehorn			}
1856212322Snwhitehorn			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1857190681Snwhitehorn			mask = 1 << i;
1858209975Snwhitehorn			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1859190681Snwhitehorn			hash |= i;
1860190681Snwhitehorn		}
1861212322Snwhitehorn		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1862212331Snwhitehorn		    ("Allocating in-use VSID %#zx\n", hash));
1863190681Snwhitehorn		moea64_vsid_bitmap[n] |= mask;
1864211967Snwhitehorn		mtx_unlock(&moea64_slb_mutex);
1865209975Snwhitehorn		return (hash);
1866190681Snwhitehorn	}
1867190681Snwhitehorn
1868211967Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
1869209975Snwhitehorn	panic("%s: out of segments",__func__);
1870190681Snwhitehorn}
1871190681Snwhitehorn
1872209975Snwhitehorn#ifdef __powerpc64__
1873209975Snwhitehornvoid
1874209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1875209975Snwhitehorn{
1876254667Skib
1877235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
1878209975Snwhitehorn
1879212715Snwhitehorn	pmap->pm_slb_tree_root = slb_alloc_tree();
1880209975Snwhitehorn	pmap->pm_slb = slb_alloc_user_cache();
1881212722Snwhitehorn	pmap->pm_slb_len = 0;
1882209975Snwhitehorn}
1883209975Snwhitehorn#else
1884209975Snwhitehornvoid
1885209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1886209975Snwhitehorn{
1887209975Snwhitehorn	int	i;
1888212308Snwhitehorn	uint32_t hash;
1889209975Snwhitehorn
1890235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
1891209975Snwhitehorn
1892209975Snwhitehorn	if (pmap_bootstrapped)
1893209975Snwhitehorn		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1894209975Snwhitehorn		    (vm_offset_t)pmap);
1895209975Snwhitehorn	else
1896209975Snwhitehorn		pmap->pmap_phys = pmap;
1897209975Snwhitehorn
1898209975Snwhitehorn	/*
1899209975Snwhitehorn	 * Allocate some segment registers for this pmap.
1900209975Snwhitehorn	 */
1901209975Snwhitehorn	hash = moea64_get_unique_vsid();
1902209975Snwhitehorn
1903209975Snwhitehorn	for (i = 0; i < 16; i++)
1904209975Snwhitehorn		pmap->pm_sr[i] = VSID_MAKE(i, hash);
1905212308Snwhitehorn
1906212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1907209975Snwhitehorn}
1908209975Snwhitehorn#endif
1909209975Snwhitehorn
1910190681Snwhitehorn/*
1911190681Snwhitehorn * Initialize the pmap associated with process 0.
1912190681Snwhitehorn */
1913190681Snwhitehornvoid
1914190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm)
1915190681Snwhitehorn{
1916254667Skib
1917254667Skib	PMAP_LOCK_INIT(pm);
1918190681Snwhitehorn	moea64_pinit(mmu, pm);
1919190681Snwhitehorn	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1920190681Snwhitehorn}
1921190681Snwhitehorn
1922190681Snwhitehorn/*
1923190681Snwhitehorn * Set the physical protection on the specified range of this map as requested.
1924190681Snwhitehorn */
1925233011Snwhitehornstatic void
1926233011Snwhitehornmoea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
1927233011Snwhitehorn{
1928233011Snwhitehorn	uintptr_t pt;
1929233949Snwhitehorn	struct	vm_page *pg;
1930233436Snwhitehorn	uint64_t oldlo;
1931233011Snwhitehorn
1932233529Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
1933233529Snwhitehorn
1934233011Snwhitehorn	/*
1935233011Snwhitehorn	 * Grab the PTE pointer before we diddle with the cached PTE
1936233011Snwhitehorn	 * copy.
1937233011Snwhitehorn	 */
1938233011Snwhitehorn	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1939233011Snwhitehorn
1940233011Snwhitehorn	/*
1941233011Snwhitehorn	 * Change the protection of the page.
1942233011Snwhitehorn	 */
1943233436Snwhitehorn	oldlo = pvo->pvo_pte.lpte.pte_lo;
1944233011Snwhitehorn	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1945233011Snwhitehorn	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1946233011Snwhitehorn	if ((prot & VM_PROT_EXECUTE) == 0)
1947233011Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1948233436Snwhitehorn	if (prot & VM_PROT_WRITE)
1949233436Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
1950233436Snwhitehorn	else
1951233436Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1952233011Snwhitehorn
1953233949Snwhitehorn	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1954233949Snwhitehorn
1955233011Snwhitehorn	/*
1956233011Snwhitehorn	 * If the PVO is in the page table, update that pte as well.
1957233011Snwhitehorn	 */
1958234155Snwhitehorn	if (pt != -1)
1959233011Snwhitehorn		MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1960233011Snwhitehorn		    pvo->pvo_vpn);
1961234155Snwhitehorn	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
1962234155Snwhitehorn	    (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1963234155Snwhitehorn		if ((pg->oflags & VPO_UNMANAGED) == 0)
1964233949Snwhitehorn			vm_page_aflag_set(pg, PGA_EXECUTABLE);
1965234155Snwhitehorn		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
1966234155Snwhitehorn		    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
1967233011Snwhitehorn	}
1968233434Snwhitehorn
1969233434Snwhitehorn	/*
1970233436Snwhitehorn	 * Update vm about the REF/CHG bits if the page is managed and we have
1971233436Snwhitehorn	 * removed write access.
1972233434Snwhitehorn	 */
1973233436Snwhitehorn	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
1974253272Snwhitehorn	    (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) {
1975233434Snwhitehorn		if (pg != NULL) {
1976233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
1977233434Snwhitehorn				vm_page_dirty(pg);
1978233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
1979233434Snwhitehorn				vm_page_aflag_set(pg, PGA_REFERENCED);
1980233434Snwhitehorn		}
1981233434Snwhitehorn	}
1982233011Snwhitehorn}
1983233011Snwhitehorn
1984190681Snwhitehornvoid
1985190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1986190681Snwhitehorn    vm_prot_t prot)
1987190681Snwhitehorn{
1988235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
1989190681Snwhitehorn
1990233011Snwhitehorn	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
1991233011Snwhitehorn	    sva, eva, prot);
1992190681Snwhitehorn
1993190681Snwhitehorn	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1994190681Snwhitehorn	    ("moea64_protect: non current pmap"));
1995190681Snwhitehorn
1996190681Snwhitehorn	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1997190681Snwhitehorn		moea64_remove(mmu, pm, sva, eva);
1998190681Snwhitehorn		return;
1999190681Snwhitehorn	}
2000190681Snwhitehorn
2001233529Snwhitehorn	LOCK_TABLE_RD();
2002190681Snwhitehorn	PMAP_LOCK(pm);
2003235689Snwhitehorn	key.pvo_vaddr = sva;
2004235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2005235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2006235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2007235689Snwhitehorn		moea64_pvo_protect(mmu, pm, pvo, prot);
2008190681Snwhitehorn	}
2009233529Snwhitehorn	UNLOCK_TABLE_RD();
2010190681Snwhitehorn	PMAP_UNLOCK(pm);
2011190681Snwhitehorn}
2012190681Snwhitehorn
2013190681Snwhitehorn/*
2014190681Snwhitehorn * Map a list of wired pages into kernel virtual address space.  This is
2015190681Snwhitehorn * intended for temporary mappings which do not need page modification or
2016190681Snwhitehorn * references recorded.  Existing mappings in the region are overwritten.
2017190681Snwhitehorn */
2018190681Snwhitehornvoid
2019190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2020190681Snwhitehorn{
2021190681Snwhitehorn	while (count-- > 0) {
2022190681Snwhitehorn		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2023190681Snwhitehorn		va += PAGE_SIZE;
2024190681Snwhitehorn		m++;
2025190681Snwhitehorn	}
2026190681Snwhitehorn}
2027190681Snwhitehorn
2028190681Snwhitehorn/*
2029190681Snwhitehorn * Remove page mappings from kernel virtual address space.  Intended for
2030190681Snwhitehorn * temporary mappings entered by moea64_qenter.
2031190681Snwhitehorn */
2032190681Snwhitehornvoid
2033190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2034190681Snwhitehorn{
2035190681Snwhitehorn	while (count-- > 0) {
2036190681Snwhitehorn		moea64_kremove(mmu, va);
2037190681Snwhitehorn		va += PAGE_SIZE;
2038190681Snwhitehorn	}
2039190681Snwhitehorn}
2040190681Snwhitehorn
2041190681Snwhitehornvoid
2042209975Snwhitehornmoea64_release_vsid(uint64_t vsid)
2043209975Snwhitehorn{
2044212044Snwhitehorn	int idx, mask;
2045209975Snwhitehorn
2046212044Snwhitehorn	mtx_lock(&moea64_slb_mutex);
2047212044Snwhitehorn	idx = vsid & (NVSIDS-1);
2048212044Snwhitehorn	mask = 1 << (idx % VSID_NBPW);
2049212044Snwhitehorn	idx /= VSID_NBPW;
2050212308Snwhitehorn	KASSERT(moea64_vsid_bitmap[idx] & mask,
2051212308Snwhitehorn	    ("Freeing unallocated VSID %#jx", vsid));
2052212044Snwhitehorn	moea64_vsid_bitmap[idx] &= ~mask;
2053212044Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
2054209975Snwhitehorn}
2055209975Snwhitehorn
2056209975Snwhitehorn
2057209975Snwhitehornvoid
2058190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap)
2059190681Snwhitehorn{
2060190681Snwhitehorn
2061190681Snwhitehorn	/*
2062209975Snwhitehorn	 * Free segment registers' VSIDs
2063190681Snwhitehorn	 */
2064209975Snwhitehorn    #ifdef __powerpc64__
2065212715Snwhitehorn	slb_free_tree(pmap);
2066209975Snwhitehorn	slb_free_user_cache(pmap->pm_slb);
2067209975Snwhitehorn    #else
2068212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2069190681Snwhitehorn
2070212308Snwhitehorn	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2071209975Snwhitehorn    #endif
2072190681Snwhitehorn}
2073190681Snwhitehorn
2074190681Snwhitehorn/*
2075233017Snwhitehorn * Remove all pages mapped by the specified pmap
2076233017Snwhitehorn */
2077233017Snwhitehornvoid
2078233017Snwhitehornmoea64_remove_pages(mmu_t mmu, pmap_t pm)
2079233017Snwhitehorn{
2080233017Snwhitehorn	struct	pvo_entry *pvo, *tpvo;
2081233017Snwhitehorn
2082233529Snwhitehorn	LOCK_TABLE_WR();
2083233017Snwhitehorn	PMAP_LOCK(pm);
2084235689Snwhitehorn	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2085233434Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_WIRED))
2086233434Snwhitehorn			moea64_pvo_remove(mmu, pvo);
2087233434Snwhitehorn	}
2088233529Snwhitehorn	UNLOCK_TABLE_WR();
2089233017Snwhitehorn	PMAP_UNLOCK(pm);
2090233017Snwhitehorn}
2091233017Snwhitehorn
2092233017Snwhitehorn/*
2093190681Snwhitehorn * Remove the given range of addresses from the specified map.
2094190681Snwhitehorn */
2095190681Snwhitehornvoid
2096190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2097190681Snwhitehorn{
2098235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
2099190681Snwhitehorn
2100233011Snwhitehorn	/*
2101233011Snwhitehorn	 * Perform an unsynchronized read.  This is, however, safe.
2102233011Snwhitehorn	 */
2103233011Snwhitehorn	if (pm->pm_stats.resident_count == 0)
2104233011Snwhitehorn		return;
2105233011Snwhitehorn
2106233529Snwhitehorn	LOCK_TABLE_WR();
2107190681Snwhitehorn	PMAP_LOCK(pm);
2108235689Snwhitehorn	key.pvo_vaddr = sva;
2109235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2110235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2111235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2112235689Snwhitehorn		moea64_pvo_remove(mmu, pvo);
2113190681Snwhitehorn	}
2114233529Snwhitehorn	UNLOCK_TABLE_WR();
2115190681Snwhitehorn	PMAP_UNLOCK(pm);
2116190681Snwhitehorn}
2117190681Snwhitehorn
2118190681Snwhitehorn/*
2119190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2120190681Snwhitehorn * will reflect changes in pte's back to the vm_page.
2121190681Snwhitehorn */
2122190681Snwhitehornvoid
2123190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m)
2124190681Snwhitehorn{
2125190681Snwhitehorn	struct	pvo_entry *pvo, *next_pvo;
2126190681Snwhitehorn	pmap_t	pmap;
2127190681Snwhitehorn
2128233529Snwhitehorn	LOCK_TABLE_WR();
2129233949Snwhitehorn	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2130190681Snwhitehorn		pmap = pvo->pvo_pmap;
2131190681Snwhitehorn		PMAP_LOCK(pmap);
2132216174Snwhitehorn		moea64_pvo_remove(mmu, pvo);
2133190681Snwhitehorn		PMAP_UNLOCK(pmap);
2134190681Snwhitehorn	}
2135233529Snwhitehorn	UNLOCK_TABLE_WR();
2136233434Snwhitehorn	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m))
2137204042Snwhitehorn		vm_page_dirty(m);
2138225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2139233949Snwhitehorn	vm_page_aflag_clear(m, PGA_EXECUTABLE);
2140190681Snwhitehorn}
2141190681Snwhitehorn
2142190681Snwhitehorn/*
2143190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map.
2144190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are
2145190681Snwhitehorn * calculated.
2146190681Snwhitehorn */
2147216174Snwhitehornvm_offset_t
2148190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align)
2149190681Snwhitehorn{
2150190681Snwhitehorn	vm_offset_t	s, e;
2151190681Snwhitehorn	int		i, j;
2152190681Snwhitehorn
2153190681Snwhitehorn	size = round_page(size);
2154190681Snwhitehorn	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2155190681Snwhitehorn		if (align != 0)
2156190681Snwhitehorn			s = (phys_avail[i] + align - 1) & ~(align - 1);
2157190681Snwhitehorn		else
2158190681Snwhitehorn			s = phys_avail[i];
2159190681Snwhitehorn		e = s + size;
2160190681Snwhitehorn
2161190681Snwhitehorn		if (s < phys_avail[i] || e > phys_avail[i + 1])
2162190681Snwhitehorn			continue;
2163190681Snwhitehorn
2164215159Snwhitehorn		if (s + size > platform_real_maxaddr())
2165215159Snwhitehorn			continue;
2166215159Snwhitehorn
2167190681Snwhitehorn		if (s == phys_avail[i]) {
2168190681Snwhitehorn			phys_avail[i] += size;
2169190681Snwhitehorn		} else if (e == phys_avail[i + 1]) {
2170190681Snwhitehorn			phys_avail[i + 1] -= size;
2171190681Snwhitehorn		} else {
2172190681Snwhitehorn			for (j = phys_avail_count * 2; j > i; j -= 2) {
2173190681Snwhitehorn				phys_avail[j] = phys_avail[j - 2];
2174190681Snwhitehorn				phys_avail[j + 1] = phys_avail[j - 1];
2175190681Snwhitehorn			}
2176190681Snwhitehorn
2177190681Snwhitehorn			phys_avail[i + 3] = phys_avail[i + 1];
2178190681Snwhitehorn			phys_avail[i + 1] = s;
2179190681Snwhitehorn			phys_avail[i + 2] = e;
2180190681Snwhitehorn			phys_avail_count++;
2181190681Snwhitehorn		}
2182190681Snwhitehorn
2183190681Snwhitehorn		return (s);
2184190681Snwhitehorn	}
2185190681Snwhitehorn	panic("moea64_bootstrap_alloc: could not allocate memory");
2186190681Snwhitehorn}
2187190681Snwhitehorn
2188190681Snwhitehornstatic int
2189216174Snwhitehornmoea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2190216174Snwhitehorn    struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2191269728Skib    uint64_t pte_lo, int flags, int8_t psind __unused)
2192190681Snwhitehorn{
2193190681Snwhitehorn	struct	 pvo_entry *pvo;
2194269365Salc	uintptr_t pt;
2195190681Snwhitehorn	uint64_t vsid;
2196190681Snwhitehorn	int	 first;
2197190681Snwhitehorn	u_int	 ptegidx;
2198190681Snwhitehorn	int	 i;
2199190681Snwhitehorn	int      bootstrap;
2200190681Snwhitehorn
2201190681Snwhitehorn	/*
2202190681Snwhitehorn	 * One nasty thing that can happen here is that the UMA calls to
2203190681Snwhitehorn	 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2204190681Snwhitehorn	 * which calls UMA...
2205190681Snwhitehorn	 *
2206190681Snwhitehorn	 * We break the loop by detecting recursion and allocating out of
2207190681Snwhitehorn	 * the bootstrap pool.
2208190681Snwhitehorn	 */
2209190681Snwhitehorn
2210190681Snwhitehorn	first = 0;
2211190681Snwhitehorn	bootstrap = (flags & PVO_BOOTSTRAP);
2212190681Snwhitehorn
2213190681Snwhitehorn	if (!moea64_initialized)
2214190681Snwhitehorn		bootstrap = 1;
2215190681Snwhitehorn
2216233529Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2217233529Snwhitehorn	rw_assert(&moea64_table_lock, RA_WLOCKED);
2218233529Snwhitehorn
2219190681Snwhitehorn	/*
2220190681Snwhitehorn	 * Compute the PTE Group index.
2221190681Snwhitehorn	 */
2222190681Snwhitehorn	va &= ~ADDR_POFF;
2223190681Snwhitehorn	vsid = va_to_vsid(pm, va);
2224209975Snwhitehorn	ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2225190681Snwhitehorn
2226190681Snwhitehorn	/*
2227190681Snwhitehorn	 * Remove any existing mapping for this page.  Reuse the pvo entry if
2228190681Snwhitehorn	 * there is a mapping.
2229190681Snwhitehorn	 */
2230212363Snwhitehorn	moea64_pvo_enter_calls++;
2231212363Snwhitehorn
2232190681Snwhitehorn	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2233190681Snwhitehorn		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2234190681Snwhitehorn			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2235217341Snwhitehorn			    (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2236217341Snwhitehorn			    == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2237269365Salc				/*
2238269365Salc				 * The physical page and protection are not
2239269365Salc				 * changing.  Instead, this may be a request
2240269365Salc				 * to change the mapping's wired attribute.
2241269365Salc				 */
2242269365Salc				pt = -1;
2243269365Salc				if ((flags & PVO_WIRED) != 0 &&
2244269365Salc				    (pvo->pvo_vaddr & PVO_WIRED) == 0) {
2245269365Salc					pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2246269365Salc					pvo->pvo_vaddr |= PVO_WIRED;
2247269365Salc					pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2248269365Salc					pm->pm_stats.wired_count++;
2249269365Salc				} else if ((flags & PVO_WIRED) == 0 &&
2250269365Salc				    (pvo->pvo_vaddr & PVO_WIRED) != 0) {
2251269365Salc					pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2252269365Salc					pvo->pvo_vaddr &= ~PVO_WIRED;
2253269365Salc					pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
2254269365Salc					pm->pm_stats.wired_count--;
2255269365Salc				}
2256209975Snwhitehorn			    	if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2257269365Salc					KASSERT(pt == -1,
2258269365Salc					    ("moea64_pvo_enter: valid pt"));
2259209975Snwhitehorn					/* Re-insert if spilled */
2260216174Snwhitehorn					i = MOEA64_PTE_INSERT(mmu, ptegidx,
2261209975Snwhitehorn					    &pvo->pvo_pte.lpte);
2262209975Snwhitehorn					if (i >= 0)
2263209975Snwhitehorn						PVO_PTEGIDX_SET(pvo, i);
2264209975Snwhitehorn					moea64_pte_overflow--;
2265269365Salc				} else if (pt != -1) {
2266269365Salc					/*
2267269365Salc					 * The PTE's wired attribute is not a
2268269365Salc					 * hardware feature, so there is no
2269269365Salc					 * need to invalidate any TLB entries.
2270269365Salc					 */
2271269365Salc					MOEA64_PTE_CHANGE(mmu, pt,
2272269365Salc					    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2273209975Snwhitehorn				}
2274190681Snwhitehorn				return (0);
2275190681Snwhitehorn			}
2276216174Snwhitehorn			moea64_pvo_remove(mmu, pvo);
2277190681Snwhitehorn			break;
2278190681Snwhitehorn		}
2279190681Snwhitehorn	}
2280190681Snwhitehorn
2281190681Snwhitehorn	/*
2282190681Snwhitehorn	 * If we aren't overwriting a mapping, try to allocate.
2283190681Snwhitehorn	 */
2284190681Snwhitehorn	if (bootstrap) {
2285190681Snwhitehorn		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2286209975Snwhitehorn			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2287190681Snwhitehorn			      moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2288190681Snwhitehorn			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2289190681Snwhitehorn		}
2290190681Snwhitehorn		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2291190681Snwhitehorn		moea64_bpvo_pool_index++;
2292190681Snwhitehorn		bootstrap = 1;
2293190681Snwhitehorn	} else {
2294190681Snwhitehorn		pvo = uma_zalloc(zone, M_NOWAIT);
2295190681Snwhitehorn	}
2296190681Snwhitehorn
2297233529Snwhitehorn	if (pvo == NULL)
2298190681Snwhitehorn		return (ENOMEM);
2299190681Snwhitehorn
2300190681Snwhitehorn	moea64_pvo_entries++;
2301190681Snwhitehorn	pvo->pvo_vaddr = va;
2302209975Snwhitehorn	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2303209975Snwhitehorn	    | (vsid << 16);
2304190681Snwhitehorn	pvo->pvo_pmap = pm;
2305190681Snwhitehorn	LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2306190681Snwhitehorn	pvo->pvo_vaddr &= ~ADDR_POFF;
2307190681Snwhitehorn
2308190681Snwhitehorn	if (flags & PVO_WIRED)
2309190681Snwhitehorn		pvo->pvo_vaddr |= PVO_WIRED;
2310235689Snwhitehorn	if (pvo_head != NULL)
2311190681Snwhitehorn		pvo->pvo_vaddr |= PVO_MANAGED;
2312190681Snwhitehorn	if (bootstrap)
2313190681Snwhitehorn		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2314209975Snwhitehorn	if (flags & PVO_LARGE)
2315209975Snwhitehorn		pvo->pvo_vaddr |= PVO_LARGE;
2316190681Snwhitehorn
2317190681Snwhitehorn	moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2318209975Snwhitehorn	    (uint64_t)(pa) | pte_lo, flags);
2319190681Snwhitehorn
2320190681Snwhitehorn	/*
2321228412Snwhitehorn	 * Add to pmap list
2322228412Snwhitehorn	 */
2323235689Snwhitehorn	RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
2324228412Snwhitehorn
2325228412Snwhitehorn	/*
2326190681Snwhitehorn	 * Remember if the list was empty and therefore will be the first
2327190681Snwhitehorn	 * item.
2328190681Snwhitehorn	 */
2329235689Snwhitehorn	if (pvo_head != NULL) {
2330235689Snwhitehorn		if (LIST_FIRST(pvo_head) == NULL)
2331235689Snwhitehorn			first = 1;
2332235689Snwhitehorn		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2333235689Snwhitehorn	}
2334190681Snwhitehorn
2335209975Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED) {
2336209975Snwhitehorn		pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2337190681Snwhitehorn		pm->pm_stats.wired_count++;
2338209975Snwhitehorn	}
2339190681Snwhitehorn	pm->pm_stats.resident_count++;
2340190681Snwhitehorn
2341190681Snwhitehorn	/*
2342190681Snwhitehorn	 * We hope this succeeds but it isn't required.
2343190681Snwhitehorn	 */
2344216174Snwhitehorn	i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2345190681Snwhitehorn	if (i >= 0) {
2346190681Snwhitehorn		PVO_PTEGIDX_SET(pvo, i);
2347190681Snwhitehorn	} else {
2348190681Snwhitehorn		panic("moea64_pvo_enter: overflow");
2349190681Snwhitehorn		moea64_pte_overflow++;
2350190681Snwhitehorn	}
2351190681Snwhitehorn
2352204042Snwhitehorn	if (pm == kernel_pmap)
2353204042Snwhitehorn		isync();
2354204042Snwhitehorn
2355209975Snwhitehorn#ifdef __powerpc64__
2356209975Snwhitehorn	/*
2357209975Snwhitehorn	 * Make sure all our bootstrap mappings are in the SLB as soon
2358209975Snwhitehorn	 * as virtual memory is switched on.
2359209975Snwhitehorn	 */
2360209975Snwhitehorn	if (!pmap_bootstrapped)
2361209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2362209975Snwhitehorn#endif
2363209975Snwhitehorn
2364190681Snwhitehorn	return (first ? ENOENT : 0);
2365190681Snwhitehorn}
2366190681Snwhitehorn
2367190681Snwhitehornstatic void
2368216174Snwhitehornmoea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2369190681Snwhitehorn{
2370233949Snwhitehorn	struct	vm_page *pg;
2371216174Snwhitehorn	uintptr_t pt;
2372190681Snwhitehorn
2373233529Snwhitehorn	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2374233529Snwhitehorn	rw_assert(&moea64_table_lock, RA_WLOCKED);
2375233529Snwhitehorn
2376190681Snwhitehorn	/*
2377190681Snwhitehorn	 * If there is an active pte entry, we need to deactivate it (and
2378190681Snwhitehorn	 * save the ref & cfg bits).
2379190681Snwhitehorn	 */
2380216174Snwhitehorn	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2381216174Snwhitehorn	if (pt != -1) {
2382216174Snwhitehorn		MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2383190681Snwhitehorn		PVO_PTEGIDX_CLR(pvo);
2384190681Snwhitehorn	} else {
2385190681Snwhitehorn		moea64_pte_overflow--;
2386190681Snwhitehorn	}
2387190681Snwhitehorn
2388190681Snwhitehorn	/*
2389190681Snwhitehorn	 * Update our statistics.
2390190681Snwhitehorn	 */
2391190681Snwhitehorn	pvo->pvo_pmap->pm_stats.resident_count--;
2392204042Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED)
2393190681Snwhitehorn		pvo->pvo_pmap->pm_stats.wired_count--;
2394190681Snwhitehorn
2395190681Snwhitehorn	/*
2396235689Snwhitehorn	 * Remove this PVO from the pmap list.
2397233529Snwhitehorn	 */
2398235689Snwhitehorn	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2399233529Snwhitehorn
2400233529Snwhitehorn	/*
2401233529Snwhitehorn	 * Remove this from the overflow list and return it to the pool
2402233529Snwhitehorn	 * if we aren't going to reuse it.
2403233529Snwhitehorn	 */
2404233529Snwhitehorn	LIST_REMOVE(pvo, pvo_olink);
2405233529Snwhitehorn
2406233529Snwhitehorn	/*
2407233434Snwhitehorn	 * Update vm about the REF/CHG bits if the page is managed.
2408190681Snwhitehorn	 */
2409233949Snwhitehorn	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2410233949Snwhitehorn
2411234155Snwhitehorn	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
2412235689Snwhitehorn		LIST_REMOVE(pvo, pvo_vlink);
2413234155Snwhitehorn		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
2414233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
2415233434Snwhitehorn				vm_page_dirty(pg);
2416233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
2417233434Snwhitehorn				vm_page_aflag_set(pg, PGA_REFERENCED);
2418233529Snwhitehorn			if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2419233529Snwhitehorn				vm_page_aflag_clear(pg, PGA_WRITEABLE);
2420190681Snwhitehorn		}
2421234155Snwhitehorn		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2422234155Snwhitehorn			vm_page_aflag_clear(pg, PGA_EXECUTABLE);
2423190681Snwhitehorn	}
2424190681Snwhitehorn
2425212363Snwhitehorn	moea64_pvo_entries--;
2426212363Snwhitehorn	moea64_pvo_remove_calls++;
2427212363Snwhitehorn
2428190681Snwhitehorn	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2429204042Snwhitehorn		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2430190681Snwhitehorn		    moea64_upvo_zone, pvo);
2431190681Snwhitehorn}
2432190681Snwhitehorn
2433190681Snwhitehornstatic struct pvo_entry *
2434209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2435190681Snwhitehorn{
2436235689Snwhitehorn	struct pvo_entry key;
2437190681Snwhitehorn
2438235689Snwhitehorn	key.pvo_vaddr = va & ~ADDR_POFF;
2439235689Snwhitehorn	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2440190681Snwhitehorn}
2441190681Snwhitehorn
2442190681Snwhitehornstatic boolean_t
2443216174Snwhitehornmoea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2444190681Snwhitehorn{
2445190681Snwhitehorn	struct	pvo_entry *pvo;
2446216174Snwhitehorn	uintptr_t pt;
2447190681Snwhitehorn
2448233529Snwhitehorn	LOCK_TABLE_RD();
2449190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2450190681Snwhitehorn		/*
2451233434Snwhitehorn		 * See if we saved the bit off.  If so, return success.
2452190681Snwhitehorn		 */
2453190681Snwhitehorn		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2454233529Snwhitehorn			UNLOCK_TABLE_RD();
2455190681Snwhitehorn			return (TRUE);
2456190681Snwhitehorn		}
2457190681Snwhitehorn	}
2458190681Snwhitehorn
2459190681Snwhitehorn	/*
2460190681Snwhitehorn	 * No luck, now go through the hard part of looking at the PTEs
2461190681Snwhitehorn	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2462190681Snwhitehorn	 * the PTEs.
2463190681Snwhitehorn	 */
2464216174Snwhitehorn	powerpc_sync();
2465190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2466190681Snwhitehorn
2467190681Snwhitehorn		/*
2468190681Snwhitehorn		 * See if this pvo has a valid PTE.  if so, fetch the
2469190681Snwhitehorn		 * REF/CHG bits from the valid PTE.  If the appropriate
2470233434Snwhitehorn		 * ptebit is set, return success.
2471190681Snwhitehorn		 */
2472233529Snwhitehorn		PMAP_LOCK(pvo->pvo_pmap);
2473216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2474216174Snwhitehorn		if (pt != -1) {
2475216174Snwhitehorn			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2476190681Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2477233529Snwhitehorn				PMAP_UNLOCK(pvo->pvo_pmap);
2478233529Snwhitehorn				UNLOCK_TABLE_RD();
2479190681Snwhitehorn				return (TRUE);
2480190681Snwhitehorn			}
2481190681Snwhitehorn		}
2482233529Snwhitehorn		PMAP_UNLOCK(pvo->pvo_pmap);
2483190681Snwhitehorn	}
2484190681Snwhitehorn
2485233529Snwhitehorn	UNLOCK_TABLE_RD();
2486190681Snwhitehorn	return (FALSE);
2487190681Snwhitehorn}
2488190681Snwhitehorn
2489190681Snwhitehornstatic u_int
2490216174Snwhitehornmoea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2491190681Snwhitehorn{
2492190681Snwhitehorn	u_int	count;
2493190681Snwhitehorn	struct	pvo_entry *pvo;
2494216174Snwhitehorn	uintptr_t pt;
2495190681Snwhitehorn
2496190681Snwhitehorn	/*
2497190681Snwhitehorn	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2498190681Snwhitehorn	 * we can reset the right ones).  note that since the pvo entries and
2499190681Snwhitehorn	 * list heads are accessed via BAT0 and are never placed in the page
2500190681Snwhitehorn	 * table, we don't have to worry about further accesses setting the
2501190681Snwhitehorn	 * REF/CHG bits.
2502190681Snwhitehorn	 */
2503216174Snwhitehorn	powerpc_sync();
2504190681Snwhitehorn
2505190681Snwhitehorn	/*
2506190681Snwhitehorn	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2507190681Snwhitehorn	 * valid pte clear the ptebit from the valid pte.
2508190681Snwhitehorn	 */
2509190681Snwhitehorn	count = 0;
2510233529Snwhitehorn	LOCK_TABLE_RD();
2511190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2512233529Snwhitehorn		PMAP_LOCK(pvo->pvo_pmap);
2513216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2514216174Snwhitehorn		if (pt != -1) {
2515216174Snwhitehorn			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2516190681Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2517190681Snwhitehorn				count++;
2518216174Snwhitehorn				MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2519216174Snwhitehorn				    pvo->pvo_vpn, ptebit);
2520190681Snwhitehorn			}
2521190681Snwhitehorn		}
2522190681Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2523233529Snwhitehorn		PMAP_UNLOCK(pvo->pvo_pmap);
2524190681Snwhitehorn	}
2525190681Snwhitehorn
2526233529Snwhitehorn	UNLOCK_TABLE_RD();
2527190681Snwhitehorn	return (count);
2528190681Snwhitehorn}
2529190681Snwhitehorn
2530190681Snwhitehornboolean_t
2531236019Srajmoea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2532190681Snwhitehorn{
2533235689Snwhitehorn	struct pvo_entry *pvo, key;
2534204296Snwhitehorn	vm_offset_t ppa;
2535204296Snwhitehorn	int error = 0;
2536204296Snwhitehorn
2537204296Snwhitehorn	PMAP_LOCK(kernel_pmap);
2538235689Snwhitehorn	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2539235689Snwhitehorn	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2540235689Snwhitehorn	    ppa < pa + size; ppa += PAGE_SIZE,
2541235689Snwhitehorn	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2542204296Snwhitehorn		if (pvo == NULL ||
2543204296Snwhitehorn		    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2544204296Snwhitehorn			error = EFAULT;
2545204296Snwhitehorn			break;
2546204296Snwhitehorn		}
2547204296Snwhitehorn	}
2548204296Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
2549204296Snwhitehorn
2550204296Snwhitehorn	return (error);
2551190681Snwhitehorn}
2552190681Snwhitehorn
2553190681Snwhitehorn/*
2554190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual
2555190681Snwhitehorn * address space. Return a pointer to where it is mapped. This
2556190681Snwhitehorn * routine is intended to be used for mapping device memory,
2557190681Snwhitehorn * NOT real memory.
2558190681Snwhitehorn */
2559190681Snwhitehornvoid *
2560213307Snwhitehornmoea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2561190681Snwhitehorn{
2562190681Snwhitehorn	vm_offset_t va, tmpva, ppa, offset;
2563190681Snwhitehorn
2564190681Snwhitehorn	ppa = trunc_page(pa);
2565190681Snwhitehorn	offset = pa & PAGE_MASK;
2566233618Snwhitehorn	size = roundup2(offset + size, PAGE_SIZE);
2567190681Snwhitehorn
2568254025Sjeff	va = kva_alloc(size);
2569190681Snwhitehorn
2570190681Snwhitehorn	if (!va)
2571190681Snwhitehorn		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2572190681Snwhitehorn
2573190681Snwhitehorn	for (tmpva = va; size > 0;) {
2574213307Snwhitehorn		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2575190681Snwhitehorn		size -= PAGE_SIZE;
2576190681Snwhitehorn		tmpva += PAGE_SIZE;
2577190681Snwhitehorn		ppa += PAGE_SIZE;
2578190681Snwhitehorn	}
2579190681Snwhitehorn
2580190681Snwhitehorn	return ((void *)(va + offset));
2581190681Snwhitehorn}
2582190681Snwhitehorn
2583213307Snwhitehornvoid *
2584236019Srajmoea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2585213307Snwhitehorn{
2586213307Snwhitehorn
2587213307Snwhitehorn	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2588213307Snwhitehorn}
2589213307Snwhitehorn
2590190681Snwhitehornvoid
2591190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2592190681Snwhitehorn{
2593190681Snwhitehorn	vm_offset_t base, offset;
2594190681Snwhitehorn
2595190681Snwhitehorn	base = trunc_page(va);
2596190681Snwhitehorn	offset = va & PAGE_MASK;
2597233618Snwhitehorn	size = roundup2(offset + size, PAGE_SIZE);
2598190681Snwhitehorn
2599254025Sjeff	kva_free(base, size);
2600190681Snwhitehorn}
2601190681Snwhitehorn
2602216174Snwhitehornvoid
2603198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2604198341Smarcel{
2605198341Smarcel	struct pvo_entry *pvo;
2606198341Smarcel	vm_offset_t lim;
2607198341Smarcel	vm_paddr_t pa;
2608198341Smarcel	vm_size_t len;
2609198341Smarcel
2610198341Smarcel	PMAP_LOCK(pm);
2611198341Smarcel	while (sz > 0) {
2612198341Smarcel		lim = round_page(va);
2613198341Smarcel		len = MIN(lim - va, sz);
2614209975Snwhitehorn		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2615222666Snwhitehorn		if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2616222666Snwhitehorn			pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2617198341Smarcel			    (va & ADDR_POFF);
2618216174Snwhitehorn			moea64_syncicache(mmu, pm, va, pa, len);
2619198341Smarcel		}
2620198341Smarcel		va += len;
2621198341Smarcel		sz -= len;
2622198341Smarcel	}
2623198341Smarcel	PMAP_UNLOCK(pm);
2624198341Smarcel}
2625257941Sjhibbits
2626276772Smarkjvoid
2627276772Smarkjmoea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2628257941Sjhibbits{
2629276772Smarkj
2630276772Smarkj	*va = (void *)pa;
2631257941Sjhibbits}
2632257941Sjhibbits
2633276772Smarkjextern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2634276772Smarkj
2635276772Smarkjvoid
2636276772Smarkjmoea64_scan_init(mmu_t mmu)
2637257941Sjhibbits{
2638257941Sjhibbits	struct pvo_entry *pvo;
2639257941Sjhibbits	vm_offset_t va;
2640276772Smarkj	int i;
2641276772Smarkj
2642276772Smarkj	if (!do_minidump) {
2643276772Smarkj		/* Initialize phys. segments for dumpsys(). */
2644276772Smarkj		memset(&dump_map, 0, sizeof(dump_map));
2645276772Smarkj		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
2646276772Smarkj		for (i = 0; i < pregions_sz; i++) {
2647276772Smarkj			dump_map[i].pa_start = pregions[i].mr_start;
2648276772Smarkj			dump_map[i].pa_size = pregions[i].mr_size;
2649257941Sjhibbits		}
2650276772Smarkj		return;
2651276772Smarkj	}
2652276772Smarkj
2653276772Smarkj	/* Virtual segments for minidumps: */
2654276772Smarkj	memset(&dump_map, 0, sizeof(dump_map));
2655276772Smarkj
2656276772Smarkj	/* 1st: kernel .data and .bss. */
2657276772Smarkj	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2658276772Smarkj	dump_map[0].pa_size = round_page((uintptr_t)_end) - dump_map[0].pa_start;
2659276772Smarkj
2660276772Smarkj	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2661276772Smarkj	dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
2662276772Smarkj	dump_map[1].pa_size = round_page(msgbufp->msg_size);
2663276772Smarkj
2664276772Smarkj	/* 3rd: kernel VM. */
2665276772Smarkj	va = dump_map[1].pa_start + dump_map[1].pa_size;
2666276772Smarkj	/* Find start of next chunk (from va). */
2667276772Smarkj	while (va < virtual_end) {
2668276772Smarkj		/* Don't dump the buffer cache. */
2669276772Smarkj		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2670276772Smarkj			va = kmi.buffer_eva;
2671276772Smarkj			continue;
2672276772Smarkj		}
2673276772Smarkj		pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2674276772Smarkj		if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
2675257941Sjhibbits			break;
2676276772Smarkj		va += PAGE_SIZE;
2677276772Smarkj	}
2678276772Smarkj	if (va < virtual_end) {
2679276772Smarkj		dump_map[2].pa_start = va;
2680276772Smarkj		va += PAGE_SIZE;
2681276772Smarkj		/* Find last page in chunk. */
2682276772Smarkj		while (va < virtual_end) {
2683276772Smarkj			/* Don't run into the buffer cache. */
2684276772Smarkj			if (va == kmi.buffer_sva)
2685257941Sjhibbits				break;
2686276772Smarkj			pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2687276772Smarkj			if (pvo == NULL ||
2688276772Smarkj			    !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID))
2689276772Smarkj				break;
2690276772Smarkj			va += PAGE_SIZE;
2691257941Sjhibbits		}
2692276772Smarkj		dump_map[2].pa_size = va - dump_map[2].pa_start;
2693257941Sjhibbits	}
2694257941Sjhibbits}
2695