1190681Snwhitehorn/*-
2279252Snwhitehorn * Copyright (c) 2008-2015 Nathan Whitehorn
3190681Snwhitehorn * All rights reserved.
4190681Snwhitehorn *
5190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
6190681Snwhitehorn * modification, are permitted provided that the following conditions
7190681Snwhitehorn * are met:
8190681Snwhitehorn *
9190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
10190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
11190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
12190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
13190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
14190681Snwhitehorn *
15279252Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18279252Snwhitehorn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19279252Snwhitehorn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20279252Snwhitehorn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21279252Snwhitehorn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22279252Snwhitehorn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23279252Snwhitehorn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24279252Snwhitehorn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25190681Snwhitehorn */
26190681Snwhitehorn
27190681Snwhitehorn#include <sys/cdefs.h>
28190681Snwhitehorn__FBSDID("$FreeBSD: stable/11/sys/powerpc/aim/mmu_oea64.c 327785 2018-01-10 20:39:26Z markj $");
29190681Snwhitehorn
30190681Snwhitehorn/*
31190681Snwhitehorn * Manages physical address maps.
32190681Snwhitehorn *
33190681Snwhitehorn * Since the information managed by this module is also stored by the
34190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual
35190681Snwhitehorn * to physical mappings at almost any time.  However, invalidations of
36190681Snwhitehorn * mappings must be done as requested.
37190681Snwhitehorn *
38190681Snwhitehorn * In order to cope with hardware architectures which make virtual to
39190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate
40190681Snwhitehorn * reduced protection operations until such time as they are actually
41190681Snwhitehorn * necessary.  This module is given full information as to which processors
42190681Snwhitehorn * are currently using which maps, and to when physical maps must be made
43190681Snwhitehorn * correct.
44190681Snwhitehorn */
45190681Snwhitehorn
46230779Skib#include "opt_compat.h"
47190681Snwhitehorn#include "opt_kstack_pages.h"
48190681Snwhitehorn
49190681Snwhitehorn#include <sys/param.h>
50190681Snwhitehorn#include <sys/kernel.h>
51276772Smarkj#include <sys/conf.h>
52222813Sattilio#include <sys/queue.h>
53222813Sattilio#include <sys/cpuset.h>
54276772Smarkj#include <sys/kerneldump.h>
55190681Snwhitehorn#include <sys/ktr.h>
56190681Snwhitehorn#include <sys/lock.h>
57190681Snwhitehorn#include <sys/msgbuf.h>
58243040Skib#include <sys/malloc.h>
59190681Snwhitehorn#include <sys/mutex.h>
60190681Snwhitehorn#include <sys/proc.h>
61233529Snwhitehorn#include <sys/rwlock.h>
62222813Sattilio#include <sys/sched.h>
63190681Snwhitehorn#include <sys/sysctl.h>
64190681Snwhitehorn#include <sys/systm.h>
65190681Snwhitehorn#include <sys/vmmeter.h>
66286296Sjah#include <sys/smp.h>
67190681Snwhitehorn
68190681Snwhitehorn#include <sys/kdb.h>
69190681Snwhitehorn
70190681Snwhitehorn#include <dev/ofw/openfirm.h>
71190681Snwhitehorn
72190681Snwhitehorn#include <vm/vm.h>
73190681Snwhitehorn#include <vm/vm_param.h>
74190681Snwhitehorn#include <vm/vm_kern.h>
75190681Snwhitehorn#include <vm/vm_page.h>
76190681Snwhitehorn#include <vm/vm_map.h>
77190681Snwhitehorn#include <vm/vm_object.h>
78190681Snwhitehorn#include <vm/vm_extern.h>
79190681Snwhitehorn#include <vm/vm_pageout.h>
80190681Snwhitehorn#include <vm/uma.h>
81190681Snwhitehorn
82209975Snwhitehorn#include <machine/_inttypes.h>
83190681Snwhitehorn#include <machine/cpu.h>
84192067Snwhitehorn#include <machine/platform.h>
85190681Snwhitehorn#include <machine/frame.h>
86190681Snwhitehorn#include <machine/md_var.h>
87190681Snwhitehorn#include <machine/psl.h>
88190681Snwhitehorn#include <machine/bat.h>
89209975Snwhitehorn#include <machine/hid.h>
90190681Snwhitehorn#include <machine/pte.h>
91190681Snwhitehorn#include <machine/sr.h>
92190681Snwhitehorn#include <machine/trap.h>
93190681Snwhitehorn#include <machine/mmuvar.h>
94190681Snwhitehorn
95216174Snwhitehorn#include "mmu_oea64.h"
96190681Snwhitehorn#include "mmu_if.h"
97216174Snwhitehorn#include "moea64_if.h"
98190681Snwhitehorn
99209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid);
100209975Snwhitehornuintptr_t moea64_get_unique_vsid(void);
101190681Snwhitehorn
102222614Snwhitehorn#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
103222614Snwhitehorn#define ENABLE_TRANS(msr)	mtmsr(msr)
104190681Snwhitehorn
105190681Snwhitehorn#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
106190681Snwhitehorn#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
107204268Snwhitehorn#define	VSID_HASH_MASK		0x0000007fffffffffULL
108190681Snwhitehorn
109233529Snwhitehorn/*
110233529Snwhitehorn * Locking semantics:
111279252Snwhitehorn *
112279252Snwhitehorn * There are two locks of interest: the page locks and the pmap locks, which
113279252Snwhitehorn * protect their individual PVO lists and are locked in that order. The contents
114279252Snwhitehorn * of all PVO entries are protected by the locks of their respective pmaps.
115279252Snwhitehorn * The pmap of any PVO is guaranteed not to change so long as the PVO is linked
116279252Snwhitehorn * into any list.
117279252Snwhitehorn *
118233529Snwhitehorn */
119190681Snwhitehorn
120279252Snwhitehorn#define PV_LOCK_COUNT	PA_LOCK_COUNT*3
121279252Snwhitehornstatic struct mtx_padalign pv_lock[PV_LOCK_COUNT];
122279252Snwhitehorn
123279252Snwhitehorn#define PV_LOCKPTR(pa)	((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT]))
124279252Snwhitehorn#define PV_LOCK(pa)		mtx_lock(PV_LOCKPTR(pa))
125279252Snwhitehorn#define PV_UNLOCK(pa)		mtx_unlock(PV_LOCKPTR(pa))
126279252Snwhitehorn#define PV_LOCKASSERT(pa) 	mtx_assert(PV_LOCKPTR(pa), MA_OWNED)
127279252Snwhitehorn#define PV_PAGE_LOCK(m)		PV_LOCK(VM_PAGE_TO_PHYS(m))
128279252Snwhitehorn#define PV_PAGE_UNLOCK(m)	PV_UNLOCK(VM_PAGE_TO_PHYS(m))
129279252Snwhitehorn#define PV_PAGE_LOCKASSERT(m)	PV_LOCKASSERT(VM_PAGE_TO_PHYS(m))
130233529Snwhitehorn
131190681Snwhitehornstruct ofw_map {
132209975Snwhitehorn	cell_t	om_va;
133209975Snwhitehorn	cell_t	om_len;
134258268Snwhitehorn	uint64_t om_pa;
135209975Snwhitehorn	cell_t	om_mode;
136190681Snwhitehorn};
137190681Snwhitehorn
138257941Sjhibbitsextern unsigned char _etext[];
139257941Sjhibbitsextern unsigned char _end[];
140257941Sjhibbits
141190681Snwhitehorn/*
142190681Snwhitehorn * Map of physical memory regions.
143190681Snwhitehorn */
144190681Snwhitehornstatic struct	mem_region *regions;
145190681Snwhitehornstatic struct	mem_region *pregions;
146209975Snwhitehornstatic u_int	phys_avail_count;
147209975Snwhitehornstatic int	regions_sz, pregions_sz;
148190681Snwhitehorn
149190681Snwhitehornextern void bs_remap_earlyboot(void);
150190681Snwhitehorn
151190681Snwhitehorn/*
152279252Snwhitehorn * Lock for the SLB tables.
153190681Snwhitehorn */
154211967Snwhitehornstruct mtx	moea64_slb_mutex;
155190681Snwhitehorn
156190681Snwhitehorn/*
157190681Snwhitehorn * PTEG data.
158190681Snwhitehorn */
159190681Snwhitehornu_int		moea64_pteg_count;
160190681Snwhitehornu_int		moea64_pteg_mask;
161190681Snwhitehorn
162190681Snwhitehorn/*
163190681Snwhitehorn * PVO data.
164190681Snwhitehorn */
165190681Snwhitehorn
166279252Snwhitehornuma_zone_t	moea64_pvo_zone; /* zone for pvo entries */
167190681Snwhitehorn
168190681Snwhitehornstatic struct	pvo_entry *moea64_bpvo_pool;
169190681Snwhitehornstatic int	moea64_bpvo_pool_index = 0;
170277356Snwhitehornstatic int	moea64_bpvo_pool_size = 327680;
171277356SnwhitehornTUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size);
172277157SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD,
173277157Snwhitehorn    &moea64_bpvo_pool_index, 0, "");
174190681Snwhitehorn
175190681Snwhitehorn#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
176209975Snwhitehorn#ifdef __powerpc64__
177209975Snwhitehorn#define	NVSIDS		(NPMAPS * 16)
178209975Snwhitehorn#define VSID_HASHMASK	0xffffffffUL
179209975Snwhitehorn#else
180209975Snwhitehorn#define NVSIDS		NPMAPS
181209975Snwhitehorn#define VSID_HASHMASK	0xfffffUL
182209975Snwhitehorn#endif
183209975Snwhitehornstatic u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
184190681Snwhitehorn
185190681Snwhitehornstatic boolean_t moea64_initialized = FALSE;
186190681Snwhitehorn
187190681Snwhitehorn/*
188190681Snwhitehorn * Statistics.
189190681Snwhitehorn */
190190681Snwhitehornu_int	moea64_pte_valid = 0;
191190681Snwhitehornu_int	moea64_pte_overflow = 0;
192190681Snwhitehornu_int	moea64_pvo_entries = 0;
193190681Snwhitehornu_int	moea64_pvo_enter_calls = 0;
194190681Snwhitehornu_int	moea64_pvo_remove_calls = 0;
195190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
196190681Snwhitehorn    &moea64_pte_valid, 0, "");
197190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
198190681Snwhitehorn    &moea64_pte_overflow, 0, "");
199190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
200190681Snwhitehorn    &moea64_pvo_entries, 0, "");
201190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
202190681Snwhitehorn    &moea64_pvo_enter_calls, 0, "");
203190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
204190681Snwhitehorn    &moea64_pvo_remove_calls, 0, "");
205190681Snwhitehorn
206190681Snwhitehornvm_offset_t	moea64_scratchpage_va[2];
207216174Snwhitehornstruct pvo_entry *moea64_scratchpage_pvo[2];
208190681Snwhitehornstruct	mtx	moea64_scratchpage_mtx;
209190681Snwhitehorn
210209975Snwhitehornuint64_t 	moea64_large_page_mask = 0;
211255418Snwhitehornuint64_t	moea64_large_page_size = 0;
212209975Snwhitehornint		moea64_large_page_shift = 0;
213209975Snwhitehorn
214190681Snwhitehorn/*
215190681Snwhitehorn * PVO calls.
216190681Snwhitehorn */
217279252Snwhitehornstatic int	moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo,
218279252Snwhitehorn		    struct pvo_head *pvo_head);
219279252Snwhitehornstatic void	moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo);
220279252Snwhitehornstatic void	moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo);
221209975Snwhitehornstatic struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
222190681Snwhitehorn
223190681Snwhitehorn/*
224190681Snwhitehorn * Utility routines.
225190681Snwhitehorn */
226279252Snwhitehornstatic boolean_t	moea64_query_bit(mmu_t, vm_page_t, uint64_t);
227279252Snwhitehornstatic u_int		moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
228190681Snwhitehornstatic void		moea64_kremove(mmu_t, vm_offset_t);
229216174Snwhitehornstatic void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
230285148Sjhibbits			    vm_paddr_t pa, vm_size_t sz);
231286296Sjahstatic void		moea64_pmap_init_qpages(void);
232190681Snwhitehorn
233190681Snwhitehorn/*
234190681Snwhitehorn * Kernel MMU interface
235190681Snwhitehorn */
236190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t);
237190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
238248280Skibvoid moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
239248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize);
240269728Skibint moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t,
241269728Skib    u_int flags, int8_t psind);
242190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
243190681Snwhitehorn    vm_prot_t);
244190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
245190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
246190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
247190681Snwhitehornvoid moea64_init(mmu_t);
248190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t);
249214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
250207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t);
251238357Salcint moea64_ts_referenced(mmu_t, vm_page_t);
252236019Srajvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
253190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
254323968Smarkjvoid moea64_page_init(mmu_t, vm_page_t);
255190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t);
256190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t);
257190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t);
258190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
259190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
260190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int);
261190681Snwhitehornvoid moea64_release(mmu_t, pmap_t);
262190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
263233017Snwhitehornvoid moea64_remove_pages(mmu_t, pmap_t);
264190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t);
265190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t);
266268591Salcvoid moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
267190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t);
268190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int);
269190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t);
270190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *);
271190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *);
272236019Srajvoid *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
273285148Sjhibbitsvoid *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
274190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
275236019Srajvm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
276213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
277285148Sjhibbitsvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
278236019Srajvoid moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
279236019Srajboolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
280198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
281276772Smarkjvoid moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
282276772Smarkj    void **va);
283276772Smarkjvoid moea64_scan_init(mmu_t mmu);
284286296Sjahvm_offset_t moea64_quick_enter_page(mmu_t mmu, vm_page_t m);
285286296Sjahvoid moea64_quick_remove_page(mmu_t mmu, vm_offset_t addr);
286190681Snwhitehorn
287209975Snwhitehornstatic mmu_method_t moea64_methods[] = {
288190681Snwhitehorn	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
289190681Snwhitehorn	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
290248280Skib	MMUMETHOD(mmu_copy_pages,	moea64_copy_pages),
291190681Snwhitehorn	MMUMETHOD(mmu_enter,		moea64_enter),
292190681Snwhitehorn	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
293190681Snwhitehorn	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
294190681Snwhitehorn	MMUMETHOD(mmu_extract,		moea64_extract),
295190681Snwhitehorn	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
296190681Snwhitehorn	MMUMETHOD(mmu_init,		moea64_init),
297190681Snwhitehorn	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
298214617Salc	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
299207155Salc	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
300190681Snwhitehorn	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
301190681Snwhitehorn	MMUMETHOD(mmu_map,     		moea64_map),
302190681Snwhitehorn	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
303323968Smarkj	MMUMETHOD(mmu_page_init,	moea64_page_init),
304190681Snwhitehorn	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
305190681Snwhitehorn	MMUMETHOD(mmu_pinit,		moea64_pinit),
306190681Snwhitehorn	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
307190681Snwhitehorn	MMUMETHOD(mmu_protect,		moea64_protect),
308190681Snwhitehorn	MMUMETHOD(mmu_qenter,		moea64_qenter),
309190681Snwhitehorn	MMUMETHOD(mmu_qremove,		moea64_qremove),
310190681Snwhitehorn	MMUMETHOD(mmu_release,		moea64_release),
311190681Snwhitehorn	MMUMETHOD(mmu_remove,		moea64_remove),
312233017Snwhitehorn	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
313190681Snwhitehorn	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
314190681Snwhitehorn	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
315198341Smarcel	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
316268591Salc	MMUMETHOD(mmu_unwire,		moea64_unwire),
317190681Snwhitehorn	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
318190681Snwhitehorn	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
319190681Snwhitehorn	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
320190681Snwhitehorn	MMUMETHOD(mmu_activate,		moea64_activate),
321190681Snwhitehorn	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
322213307Snwhitehorn	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
323286296Sjah	MMUMETHOD(mmu_quick_enter_page, moea64_quick_enter_page),
324286296Sjah	MMUMETHOD(mmu_quick_remove_page, moea64_quick_remove_page),
325190681Snwhitehorn
326190681Snwhitehorn	/* Internal interfaces */
327190681Snwhitehorn	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
328213307Snwhitehorn	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
329190681Snwhitehorn	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
330190681Snwhitehorn	MMUMETHOD(mmu_kextract,		moea64_kextract),
331190681Snwhitehorn	MMUMETHOD(mmu_kenter,		moea64_kenter),
332213307Snwhitehorn	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
333190681Snwhitehorn	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
334276772Smarkj	MMUMETHOD(mmu_scan_init,	moea64_scan_init),
335257941Sjhibbits	MMUMETHOD(mmu_dumpsys_map,	moea64_dumpsys_map),
336190681Snwhitehorn
337190681Snwhitehorn	{ 0, 0 }
338190681Snwhitehorn};
339190681Snwhitehorn
340216174SnwhitehornMMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
341190681Snwhitehorn
342279252Snwhitehornstatic struct pvo_head *
343279252Snwhitehornvm_page_to_pvoh(vm_page_t m)
344190681Snwhitehorn{
345279252Snwhitehorn
346279252Snwhitehorn	mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED);
347279252Snwhitehorn	return (&m->md.mdpg_pvoh);
348279252Snwhitehorn}
349279252Snwhitehorn
350279252Snwhitehornstatic struct pvo_entry *
351279252Snwhitehornalloc_pvo_entry(int bootstrap)
352279252Snwhitehorn{
353279252Snwhitehorn	struct pvo_entry *pvo;
354279252Snwhitehorn
355279252Snwhitehorn	if (!moea64_initialized || bootstrap) {
356279252Snwhitehorn		if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) {
357279252Snwhitehorn			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
358279252Snwhitehorn			      moea64_bpvo_pool_index, moea64_bpvo_pool_size,
359279252Snwhitehorn			      moea64_bpvo_pool_size * sizeof(struct pvo_entry));
360279252Snwhitehorn		}
361279252Snwhitehorn		pvo = &moea64_bpvo_pool[
362279252Snwhitehorn		    atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)];
363279252Snwhitehorn		bzero(pvo, sizeof(*pvo));
364279252Snwhitehorn		pvo->pvo_vaddr = PVO_BOOTSTRAP;
365279252Snwhitehorn	} else {
366279252Snwhitehorn		pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT);
367279252Snwhitehorn		bzero(pvo, sizeof(*pvo));
368279252Snwhitehorn	}
369279252Snwhitehorn
370279252Snwhitehorn	return (pvo);
371279252Snwhitehorn}
372279252Snwhitehorn
373279252Snwhitehorn
374279252Snwhitehornstatic void
375279252Snwhitehorninit_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va)
376279252Snwhitehorn{
377279252Snwhitehorn	uint64_t vsid;
378204268Snwhitehorn	uint64_t hash;
379209975Snwhitehorn	int shift;
380190681Snwhitehorn
381279252Snwhitehorn	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
382279252Snwhitehorn
383279252Snwhitehorn	pvo->pvo_pmap = pmap;
384279252Snwhitehorn	va &= ~ADDR_POFF;
385279252Snwhitehorn	pvo->pvo_vaddr |= va;
386279252Snwhitehorn	vsid = va_to_vsid(pmap, va);
387279252Snwhitehorn	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
388279252Snwhitehorn	    | (vsid << 16);
389279252Snwhitehorn
390279252Snwhitehorn	shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift :
391279252Snwhitehorn	    ADDR_PIDX_SHFT;
392279252Snwhitehorn	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift);
393279252Snwhitehorn	pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3;
394190681Snwhitehorn}
395190681Snwhitehorn
396279252Snwhitehornstatic void
397279252Snwhitehornfree_pvo_entry(struct pvo_entry *pvo)
398190681Snwhitehorn{
399190681Snwhitehorn
400279252Snwhitehorn	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
401279252Snwhitehorn		uma_zfree(moea64_pvo_zone, pvo);
402190681Snwhitehorn}
403190681Snwhitehorn
404279252Snwhitehornvoid
405279252Snwhitehornmoea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte)
406190681Snwhitehorn{
407209975Snwhitehorn
408279252Snwhitehorn	lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) &
409279252Snwhitehorn	    LPTE_AVPN_MASK;
410279252Snwhitehorn	lpte->pte_hi |= LPTE_VALID;
411279252Snwhitehorn
412279252Snwhitehorn	if (pvo->pvo_vaddr & PVO_LARGE)
413279252Snwhitehorn		lpte->pte_hi |= LPTE_BIG;
414279252Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED)
415279252Snwhitehorn		lpte->pte_hi |= LPTE_WIRED;
416279252Snwhitehorn	if (pvo->pvo_vaddr & PVO_HID)
417279252Snwhitehorn		lpte->pte_hi |= LPTE_HID;
418190681Snwhitehorn
419279252Snwhitehorn	lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */
420279252Snwhitehorn	if (pvo->pvo_pte.prot & VM_PROT_WRITE)
421279252Snwhitehorn		lpte->pte_lo |= LPTE_BW;
422279252Snwhitehorn	else
423279252Snwhitehorn		lpte->pte_lo |= LPTE_BR;
424209975Snwhitehorn
425279252Snwhitehorn	if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
426279252Snwhitehorn		lpte->pte_lo |= LPTE_NOEXEC;
427190681Snwhitehorn}
428190681Snwhitehorn
429190681Snwhitehornstatic __inline uint64_t
430285148Sjhibbitsmoea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
431190681Snwhitehorn{
432190681Snwhitehorn	uint64_t pte_lo;
433190681Snwhitehorn	int i;
434190681Snwhitehorn
435213307Snwhitehorn	if (ma != VM_MEMATTR_DEFAULT) {
436213307Snwhitehorn		switch (ma) {
437213307Snwhitehorn		case VM_MEMATTR_UNCACHEABLE:
438213307Snwhitehorn			return (LPTE_I | LPTE_G);
439296245Sjhibbits		case VM_MEMATTR_CACHEABLE:
440296245Sjhibbits			return (LPTE_M);
441213307Snwhitehorn		case VM_MEMATTR_WRITE_COMBINING:
442213307Snwhitehorn		case VM_MEMATTR_WRITE_BACK:
443213307Snwhitehorn		case VM_MEMATTR_PREFETCHABLE:
444213307Snwhitehorn			return (LPTE_I);
445213307Snwhitehorn		case VM_MEMATTR_WRITE_THROUGH:
446213307Snwhitehorn			return (LPTE_W | LPTE_M);
447213307Snwhitehorn		}
448213307Snwhitehorn	}
449213307Snwhitehorn
450190681Snwhitehorn	/*
451190681Snwhitehorn	 * Assume the page is cache inhibited and access is guarded unless
452190681Snwhitehorn	 * it's in our available memory array.
453190681Snwhitehorn	 */
454190681Snwhitehorn	pte_lo = LPTE_I | LPTE_G;
455190681Snwhitehorn	for (i = 0; i < pregions_sz; i++) {
456190681Snwhitehorn		if ((pa >= pregions[i].mr_start) &&
457190681Snwhitehorn		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
458190681Snwhitehorn			pte_lo &= ~(LPTE_I | LPTE_G);
459190681Snwhitehorn			pte_lo |= LPTE_M;
460190681Snwhitehorn			break;
461190681Snwhitehorn		}
462190681Snwhitehorn	}
463190681Snwhitehorn
464190681Snwhitehorn	return pte_lo;
465190681Snwhitehorn}
466190681Snwhitehorn
467190681Snwhitehorn/*
468190681Snwhitehorn * Quick sort callout for comparing memory regions.
469190681Snwhitehorn */
470190681Snwhitehornstatic int	om_cmp(const void *a, const void *b);
471190681Snwhitehorn
472190681Snwhitehornstatic int
473190681Snwhitehornom_cmp(const void *a, const void *b)
474190681Snwhitehorn{
475190681Snwhitehorn	const struct	ofw_map *mapa;
476190681Snwhitehorn	const struct	ofw_map *mapb;
477190681Snwhitehorn
478190681Snwhitehorn	mapa = a;
479190681Snwhitehorn	mapb = b;
480258268Snwhitehorn	if (mapa->om_pa < mapb->om_pa)
481190681Snwhitehorn		return (-1);
482258268Snwhitehorn	else if (mapa->om_pa > mapb->om_pa)
483190681Snwhitehorn		return (1);
484190681Snwhitehorn	else
485190681Snwhitehorn		return (0);
486190681Snwhitehorn}
487190681Snwhitehorn
488190681Snwhitehornstatic void
489199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
490199226Snwhitehorn{
491258268Snwhitehorn	struct ofw_map	translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */
492258268Snwhitehorn	pcell_t		acells, trans_cells[sz/sizeof(cell_t)];
493279252Snwhitehorn	struct pvo_entry *pvo;
494199226Snwhitehorn	register_t	msr;
495199226Snwhitehorn	vm_offset_t	off;
496204128Snwhitehorn	vm_paddr_t	pa_base;
497258268Snwhitehorn	int		i, j;
498199226Snwhitehorn
499199226Snwhitehorn	bzero(translations, sz);
500290989Snwhitehorn	OF_getencprop(OF_finddevice("/"), "#address-cells", &acells,
501258268Snwhitehorn	    sizeof(acells));
502290989Snwhitehorn	if (OF_getencprop(mmu, "translations", trans_cells, sz) == -1)
503199226Snwhitehorn		panic("moea64_bootstrap: can't get ofw translations");
504199226Snwhitehorn
505199226Snwhitehorn	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
506258268Snwhitehorn	sz /= sizeof(cell_t);
507258268Snwhitehorn	for (i = 0, j = 0; i < sz; j++) {
508258268Snwhitehorn		translations[j].om_va = trans_cells[i++];
509258268Snwhitehorn		translations[j].om_len = trans_cells[i++];
510258268Snwhitehorn		translations[j].om_pa = trans_cells[i++];
511258268Snwhitehorn		if (acells == 2) {
512258268Snwhitehorn			translations[j].om_pa <<= 32;
513258268Snwhitehorn			translations[j].om_pa |= trans_cells[i++];
514258268Snwhitehorn		}
515258268Snwhitehorn		translations[j].om_mode = trans_cells[i++];
516258268Snwhitehorn	}
517258268Snwhitehorn	KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)",
518258268Snwhitehorn	    i, sz));
519258268Snwhitehorn
520258268Snwhitehorn	sz = j;
521199226Snwhitehorn	qsort(translations, sz, sizeof (*translations), om_cmp);
522199226Snwhitehorn
523216563Snwhitehorn	for (i = 0; i < sz; i++) {
524258268Snwhitehorn		pa_base = translations[i].om_pa;
525258268Snwhitehorn	      #ifndef __powerpc64__
526258268Snwhitehorn		if ((translations[i].om_pa >> 32) != 0)
527199226Snwhitehorn			panic("OFW translations above 32-bit boundary!");
528209975Snwhitehorn	      #endif
529199226Snwhitehorn
530257180Snwhitehorn		if (pa_base % PAGE_SIZE)
531257180Snwhitehorn			panic("OFW translation not page-aligned (phys)!");
532257180Snwhitehorn		if (translations[i].om_va % PAGE_SIZE)
533257180Snwhitehorn			panic("OFW translation not page-aligned (virt)!");
534257180Snwhitehorn
535257180Snwhitehorn		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
536257180Snwhitehorn		    pa_base, translations[i].om_va, translations[i].om_len);
537257180Snwhitehorn
538199226Snwhitehorn		/* Now enter the pages for this mapping */
539199226Snwhitehorn
540199226Snwhitehorn		DISABLE_TRANS(msr);
541199226Snwhitehorn		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
542277157Snwhitehorn			/* If this address is direct-mapped, skip remapping */
543277157Snwhitehorn			if (hw_direct_map && translations[i].om_va == pa_base &&
544277157Snwhitehorn			    moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) 			    == LPTE_M)
545277157Snwhitehorn				continue;
546277157Snwhitehorn
547279252Snwhitehorn			PMAP_LOCK(kernel_pmap);
548279252Snwhitehorn			pvo = moea64_pvo_find_va(kernel_pmap,
549279252Snwhitehorn			    translations[i].om_va + off);
550279252Snwhitehorn			PMAP_UNLOCK(kernel_pmap);
551279252Snwhitehorn			if (pvo != NULL)
552209975Snwhitehorn				continue;
553209975Snwhitehorn
554204128Snwhitehorn			moea64_kenter(mmup, translations[i].om_va + off,
555204128Snwhitehorn			    pa_base + off);
556199226Snwhitehorn		}
557199226Snwhitehorn		ENABLE_TRANS(msr);
558199226Snwhitehorn	}
559199226Snwhitehorn}
560199226Snwhitehorn
561209975Snwhitehorn#ifdef __powerpc64__
562199226Snwhitehornstatic void
563209975Snwhitehornmoea64_probe_large_page(void)
564190681Snwhitehorn{
565209975Snwhitehorn	uint16_t pvr = mfpvr() >> 16;
566209975Snwhitehorn
567209975Snwhitehorn	switch (pvr) {
568209975Snwhitehorn	case IBM970:
569209975Snwhitehorn	case IBM970FX:
570209975Snwhitehorn	case IBM970MP:
571209975Snwhitehorn		powerpc_sync(); isync();
572209975Snwhitehorn		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
573209975Snwhitehorn		powerpc_sync(); isync();
574209975Snwhitehorn
575209975Snwhitehorn		/* FALLTHROUGH */
576255418Snwhitehorn	default:
577209975Snwhitehorn		moea64_large_page_size = 0x1000000; /* 16 MB */
578209975Snwhitehorn		moea64_large_page_shift = 24;
579209975Snwhitehorn	}
580209975Snwhitehorn
581209975Snwhitehorn	moea64_large_page_mask = moea64_large_page_size - 1;
582209975Snwhitehorn}
583209975Snwhitehorn
584209975Snwhitehornstatic void
585209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large)
586209975Snwhitehorn{
587209975Snwhitehorn	struct slb *cache;
588209975Snwhitehorn	struct slb entry;
589209975Snwhitehorn	uint64_t esid, slbe;
590209975Snwhitehorn	uint64_t i;
591209975Snwhitehorn
592209975Snwhitehorn	cache = PCPU_GET(slb);
593209975Snwhitehorn	esid = va >> ADDR_SR_SHFT;
594209975Snwhitehorn	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
595209975Snwhitehorn
596209975Snwhitehorn	for (i = 0; i < 64; i++) {
597209975Snwhitehorn		if (cache[i].slbe == (slbe | i))
598209975Snwhitehorn			return;
599209975Snwhitehorn	}
600209975Snwhitehorn
601209975Snwhitehorn	entry.slbe = slbe;
602210704Snwhitehorn	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
603209975Snwhitehorn	if (large)
604209975Snwhitehorn		entry.slbv |= SLBV_L;
605209975Snwhitehorn
606212722Snwhitehorn	slb_insert_kernel(entry.slbe, entry.slbv);
607209975Snwhitehorn}
608209975Snwhitehorn#endif
609209975Snwhitehorn
610209975Snwhitehornstatic void
611209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
612209975Snwhitehorn    vm_offset_t kernelend)
613209975Snwhitehorn{
614279252Snwhitehorn	struct pvo_entry *pvo;
615209975Snwhitehorn	register_t msr;
616209975Snwhitehorn	vm_paddr_t pa;
617209975Snwhitehorn	vm_offset_t size, off;
618209975Snwhitehorn	uint64_t pte_lo;
619209975Snwhitehorn	int i;
620209975Snwhitehorn
621209975Snwhitehorn	if (moea64_large_page_size == 0)
622209975Snwhitehorn		hw_direct_map = 0;
623209975Snwhitehorn
624209975Snwhitehorn	DISABLE_TRANS(msr);
625209975Snwhitehorn	if (hw_direct_map) {
626209975Snwhitehorn		PMAP_LOCK(kernel_pmap);
627209975Snwhitehorn		for (i = 0; i < pregions_sz; i++) {
628209975Snwhitehorn		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
629209975Snwhitehorn		     pregions[i].mr_size; pa += moea64_large_page_size) {
630209975Snwhitehorn			pte_lo = LPTE_M;
631209975Snwhitehorn
632279252Snwhitehorn			pvo = alloc_pvo_entry(1 /* bootstrap */);
633279252Snwhitehorn			pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE;
634279252Snwhitehorn			init_pvo_entry(pvo, kernel_pmap, pa);
635279252Snwhitehorn
636209975Snwhitehorn			/*
637209975Snwhitehorn			 * Set memory access as guarded if prefetch within
638209975Snwhitehorn			 * the page could exit the available physmem area.
639209975Snwhitehorn			 */
640209975Snwhitehorn			if (pa & moea64_large_page_mask) {
641209975Snwhitehorn				pa &= moea64_large_page_mask;
642209975Snwhitehorn				pte_lo |= LPTE_G;
643209975Snwhitehorn			}
644209975Snwhitehorn			if (pa + moea64_large_page_size >
645209975Snwhitehorn			    pregions[i].mr_start + pregions[i].mr_size)
646209975Snwhitehorn				pte_lo |= LPTE_G;
647209975Snwhitehorn
648279252Snwhitehorn			pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE |
649279252Snwhitehorn			    VM_PROT_EXECUTE;
650279252Snwhitehorn			pvo->pvo_pte.pa = pa | pte_lo;
651279252Snwhitehorn			moea64_pvo_enter(mmup, pvo, NULL);
652209975Snwhitehorn		  }
653209975Snwhitehorn		}
654209975Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
655209975Snwhitehorn	} else {
656277356Snwhitehorn		size = moea64_bpvo_pool_size*sizeof(struct pvo_entry);
657209975Snwhitehorn		off = (vm_offset_t)(moea64_bpvo_pool);
658209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
659209975Snwhitehorn		moea64_kenter(mmup, pa, pa);
660209975Snwhitehorn
661209975Snwhitehorn		/*
662209975Snwhitehorn		 * Map certain important things, like ourselves.
663209975Snwhitehorn		 *
664209975Snwhitehorn		 * NOTE: We do not map the exception vector space. That code is
665209975Snwhitehorn		 * used only in real mode, and leaving it unmapped allows us to
666209975Snwhitehorn		 * catch NULL pointer deferences, instead of making NULL a valid
667209975Snwhitehorn		 * address.
668209975Snwhitehorn		 */
669209975Snwhitehorn
670209975Snwhitehorn		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
671209975Snwhitehorn		    pa += PAGE_SIZE)
672209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
673209975Snwhitehorn	}
674209975Snwhitehorn	ENABLE_TRANS(msr);
675248508Skib
676248508Skib	/*
677248508Skib	 * Allow user to override unmapped_buf_allowed for testing.
678248508Skib	 * XXXKIB Only direct map implementation was tested.
679248508Skib	 */
680248508Skib	if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed",
681248508Skib	    &unmapped_buf_allowed))
682248508Skib		unmapped_buf_allowed = hw_direct_map;
683209975Snwhitehorn}
684209975Snwhitehorn
685216174Snwhitehornvoid
686216174Snwhitehornmoea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
687209975Snwhitehorn{
688190681Snwhitehorn	int		i, j;
689216174Snwhitehorn	vm_size_t	physsz, hwphyssz;
690190681Snwhitehorn
691209975Snwhitehorn#ifndef __powerpc64__
692190681Snwhitehorn	/* We don't have a direct map since there is no BAT */
693190681Snwhitehorn	hw_direct_map = 0;
694190681Snwhitehorn
695190681Snwhitehorn	/* Make sure battable is zero, since we have no BAT */
696190681Snwhitehorn	for (i = 0; i < 16; i++) {
697190681Snwhitehorn		battable[i].batu = 0;
698190681Snwhitehorn		battable[i].batl = 0;
699190681Snwhitehorn	}
700209975Snwhitehorn#else
701209975Snwhitehorn	moea64_probe_large_page();
702190681Snwhitehorn
703209975Snwhitehorn	/* Use a direct map if we have large page support */
704209975Snwhitehorn	if (moea64_large_page_size > 0)
705209975Snwhitehorn		hw_direct_map = 1;
706209975Snwhitehorn	else
707209975Snwhitehorn		hw_direct_map = 0;
708209975Snwhitehorn#endif
709209975Snwhitehorn
710190681Snwhitehorn	/* Get physical memory regions from firmware */
711190681Snwhitehorn	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
712190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
713190681Snwhitehorn
714190681Snwhitehorn	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
715190681Snwhitehorn		panic("moea64_bootstrap: phys_avail too small");
716222614Snwhitehorn
717190681Snwhitehorn	phys_avail_count = 0;
718190681Snwhitehorn	physsz = 0;
719190681Snwhitehorn	hwphyssz = 0;
720190681Snwhitehorn	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
721190681Snwhitehorn	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
722257180Snwhitehorn		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
723257180Snwhitehorn		    regions[i].mr_start, regions[i].mr_start +
724257180Snwhitehorn		    regions[i].mr_size, regions[i].mr_size);
725190681Snwhitehorn		if (hwphyssz != 0 &&
726190681Snwhitehorn		    (physsz + regions[i].mr_size) >= hwphyssz) {
727190681Snwhitehorn			if (physsz < hwphyssz) {
728190681Snwhitehorn				phys_avail[j] = regions[i].mr_start;
729190681Snwhitehorn				phys_avail[j + 1] = regions[i].mr_start +
730190681Snwhitehorn				    hwphyssz - physsz;
731190681Snwhitehorn				physsz = hwphyssz;
732190681Snwhitehorn				phys_avail_count++;
733190681Snwhitehorn			}
734190681Snwhitehorn			break;
735190681Snwhitehorn		}
736190681Snwhitehorn		phys_avail[j] = regions[i].mr_start;
737190681Snwhitehorn		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
738190681Snwhitehorn		phys_avail_count++;
739190681Snwhitehorn		physsz += regions[i].mr_size;
740190681Snwhitehorn	}
741209975Snwhitehorn
742209975Snwhitehorn	/* Check for overlap with the kernel and exception vectors */
743209975Snwhitehorn	for (j = 0; j < 2*phys_avail_count; j+=2) {
744209975Snwhitehorn		if (phys_avail[j] < EXC_LAST)
745209975Snwhitehorn			phys_avail[j] += EXC_LAST;
746209975Snwhitehorn
747209975Snwhitehorn		if (kernelstart >= phys_avail[j] &&
748209975Snwhitehorn		    kernelstart < phys_avail[j+1]) {
749209975Snwhitehorn			if (kernelend < phys_avail[j+1]) {
750209975Snwhitehorn				phys_avail[2*phys_avail_count] =
751209975Snwhitehorn				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
752209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
753209975Snwhitehorn				    phys_avail[j+1];
754209975Snwhitehorn				phys_avail_count++;
755209975Snwhitehorn			}
756209975Snwhitehorn
757209975Snwhitehorn			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
758209975Snwhitehorn		}
759209975Snwhitehorn
760209975Snwhitehorn		if (kernelend >= phys_avail[j] &&
761209975Snwhitehorn		    kernelend < phys_avail[j+1]) {
762209975Snwhitehorn			if (kernelstart > phys_avail[j]) {
763209975Snwhitehorn				phys_avail[2*phys_avail_count] = phys_avail[j];
764209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
765209975Snwhitehorn				    kernelstart & ~PAGE_MASK;
766209975Snwhitehorn				phys_avail_count++;
767209975Snwhitehorn			}
768209975Snwhitehorn
769209975Snwhitehorn			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
770209975Snwhitehorn		}
771209975Snwhitehorn	}
772209975Snwhitehorn
773190681Snwhitehorn	physmem = btoc(physsz);
774190681Snwhitehorn
775190681Snwhitehorn#ifdef PTEGCOUNT
776190681Snwhitehorn	moea64_pteg_count = PTEGCOUNT;
777190681Snwhitehorn#else
778190681Snwhitehorn	moea64_pteg_count = 0x1000;
779190681Snwhitehorn
780190681Snwhitehorn	while (moea64_pteg_count < physmem)
781190681Snwhitehorn		moea64_pteg_count <<= 1;
782209975Snwhitehorn
783209975Snwhitehorn	moea64_pteg_count >>= 1;
784190681Snwhitehorn#endif /* PTEGCOUNT */
785216174Snwhitehorn}
786190681Snwhitehorn
787216174Snwhitehornvoid
788216174Snwhitehornmoea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
789216174Snwhitehorn{
790216174Snwhitehorn	int		i;
791190681Snwhitehorn
792190681Snwhitehorn	/*
793216174Snwhitehorn	 * Set PTEG mask
794190681Snwhitehorn	 */
795190681Snwhitehorn	moea64_pteg_mask = moea64_pteg_count - 1;
796190681Snwhitehorn
797190681Snwhitehorn	/*
798279252Snwhitehorn	 * Initialize SLB table lock and page locks
799190681Snwhitehorn	 */
800211967Snwhitehorn	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
801279252Snwhitehorn	for (i = 0; i < PV_LOCK_COUNT; i++)
802279252Snwhitehorn		mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF);
803190681Snwhitehorn
804190681Snwhitehorn	/*
805279252Snwhitehorn	 * Initialise the bootstrap pvo pool.
806190681Snwhitehorn	 */
807190681Snwhitehorn	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
808277356Snwhitehorn		moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0);
809190681Snwhitehorn	moea64_bpvo_pool_index = 0;
810190681Snwhitehorn
811190681Snwhitehorn	/*
812190681Snwhitehorn	 * Make sure kernel vsid is allocated as well as VSID 0.
813190681Snwhitehorn	 */
814209975Snwhitehorn	#ifndef __powerpc64__
815209975Snwhitehorn	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
816190681Snwhitehorn		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
817190681Snwhitehorn	moea64_vsid_bitmap[0] |= 1;
818209975Snwhitehorn	#endif
819190681Snwhitehorn
820190681Snwhitehorn	/*
821190681Snwhitehorn	 * Initialize the kernel pmap (which is statically allocated).
822190681Snwhitehorn	 */
823209975Snwhitehorn	#ifdef __powerpc64__
824209975Snwhitehorn	for (i = 0; i < 64; i++) {
825209975Snwhitehorn		pcpup->pc_slb[i].slbv = 0;
826209975Snwhitehorn		pcpup->pc_slb[i].slbe = 0;
827209975Snwhitehorn	}
828209975Snwhitehorn	#else
829190681Snwhitehorn	for (i = 0; i < 16; i++)
830190681Snwhitehorn		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
831209975Snwhitehorn	#endif
832190681Snwhitehorn
833190681Snwhitehorn	kernel_pmap->pmap_phys = kernel_pmap;
834222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
835235689Snwhitehorn	RB_INIT(&kernel_pmap->pmap_pvo);
836190681Snwhitehorn
837190681Snwhitehorn	PMAP_LOCK_INIT(kernel_pmap);
838190681Snwhitehorn
839190681Snwhitehorn	/*
840190681Snwhitehorn	 * Now map in all the other buffers we allocated earlier
841190681Snwhitehorn	 */
842190681Snwhitehorn
843209975Snwhitehorn	moea64_setup_direct_map(mmup, kernelstart, kernelend);
844216174Snwhitehorn}
845190681Snwhitehorn
846216174Snwhitehornvoid
847216174Snwhitehornmoea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
848216174Snwhitehorn{
849216174Snwhitehorn	ihandle_t	mmui;
850216174Snwhitehorn	phandle_t	chosen;
851216174Snwhitehorn	phandle_t	mmu;
852276515Snwhitehorn	ssize_t		sz;
853216174Snwhitehorn	int		i;
854216174Snwhitehorn	vm_offset_t	pa, va;
855216174Snwhitehorn	void		*dpcpu;
856216174Snwhitehorn
857190681Snwhitehorn	/*
858209975Snwhitehorn	 * Set up the Open Firmware pmap and add its mappings if not in real
859209975Snwhitehorn	 * mode.
860190681Snwhitehorn	 */
861190681Snwhitehorn
862215067Snwhitehorn	chosen = OF_finddevice("/chosen");
863290989Snwhitehorn	if (chosen != -1 && OF_getencprop(chosen, "mmu", &mmui, 4) != -1) {
864276515Snwhitehorn		mmu = OF_instance_to_package(mmui);
865276515Snwhitehorn		if (mmu == -1 ||
866276515Snwhitehorn		    (sz = OF_getproplen(mmu, "translations")) == -1)
867276515Snwhitehorn			sz = 0;
868276515Snwhitehorn		if (sz > 6144 /* tmpstksz - 2 KB headroom */)
869276515Snwhitehorn			panic("moea64_bootstrap: too many ofw translations");
870190681Snwhitehorn
871276515Snwhitehorn		if (sz > 0)
872276515Snwhitehorn			moea64_add_ofw_mappings(mmup, mmu, sz);
873190681Snwhitehorn	}
874190681Snwhitehorn
875190681Snwhitehorn	/*
876190681Snwhitehorn	 * Calculate the last available physical address.
877190681Snwhitehorn	 */
878190681Snwhitehorn	for (i = 0; phys_avail[i + 2] != 0; i += 2)
879190681Snwhitehorn		;
880190681Snwhitehorn	Maxmem = powerpc_btop(phys_avail[i + 1]);
881190681Snwhitehorn
882190681Snwhitehorn	/*
883190681Snwhitehorn	 * Initialize MMU and remap early physical mappings
884190681Snwhitehorn	 */
885216174Snwhitehorn	MMU_CPU_BOOTSTRAP(mmup,0);
886222614Snwhitehorn	mtmsr(mfmsr() | PSL_DR | PSL_IR);
887190681Snwhitehorn	pmap_bootstrapped++;
888190681Snwhitehorn	bs_remap_earlyboot();
889190681Snwhitehorn
890190681Snwhitehorn	/*
891190681Snwhitehorn	 * Set the start and end of kva.
892190681Snwhitehorn	 */
893190681Snwhitehorn	virtual_avail = VM_MIN_KERNEL_ADDRESS;
894204128Snwhitehorn	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
895190681Snwhitehorn
896190681Snwhitehorn	/*
897209975Snwhitehorn	 * Map the entire KVA range into the SLB. We must not fault there.
898209975Snwhitehorn	 */
899209975Snwhitehorn	#ifdef __powerpc64__
900209975Snwhitehorn	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
901209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, 0);
902209975Snwhitehorn	#endif
903209975Snwhitehorn
904209975Snwhitehorn	/*
905204128Snwhitehorn	 * Figure out how far we can extend virtual_end into segment 16
906204128Snwhitehorn	 * without running into existing mappings. Segment 16 is guaranteed
907204128Snwhitehorn	 * to contain neither RAM nor devices (at least on Apple hardware),
908204128Snwhitehorn	 * but will generally contain some OFW mappings we should not
909204128Snwhitehorn	 * step on.
910190681Snwhitehorn	 */
911190681Snwhitehorn
912209975Snwhitehorn	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
913204128Snwhitehorn	PMAP_LOCK(kernel_pmap);
914209975Snwhitehorn	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
915209975Snwhitehorn	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
916204128Snwhitehorn		virtual_end += PAGE_SIZE;
917204128Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
918209975Snwhitehorn	#endif
919190681Snwhitehorn
920190681Snwhitehorn	/*
921190681Snwhitehorn	 * Allocate a kernel stack with a guard page for thread0 and map it
922190681Snwhitehorn	 * into the kernel page map.
923190681Snwhitehorn	 */
924286584Skib	pa = moea64_bootstrap_alloc(kstack_pages * PAGE_SIZE, PAGE_SIZE);
925190681Snwhitehorn	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
926286584Skib	virtual_avail = va + kstack_pages * PAGE_SIZE;
927220642Sandreast	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
928190681Snwhitehorn	thread0.td_kstack = va;
929286584Skib	thread0.td_kstack_pages = kstack_pages;
930286584Skib	for (i = 0; i < kstack_pages; i++) {
931201758Smbr		moea64_kenter(mmup, va, pa);
932190681Snwhitehorn		pa += PAGE_SIZE;
933190681Snwhitehorn		va += PAGE_SIZE;
934190681Snwhitehorn	}
935190681Snwhitehorn
936190681Snwhitehorn	/*
937190681Snwhitehorn	 * Allocate virtual address space for the message buffer.
938190681Snwhitehorn	 */
939217688Spluknet	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
940204297Snwhitehorn	msgbufp = (struct msgbuf *)virtual_avail;
941204297Snwhitehorn	va = virtual_avail;
942217688Spluknet	virtual_avail += round_page(msgbufsize);
943204297Snwhitehorn	while (va < virtual_avail) {
944204297Snwhitehorn		moea64_kenter(mmup, va, pa);
945190681Snwhitehorn		pa += PAGE_SIZE;
946204297Snwhitehorn		va += PAGE_SIZE;
947190681Snwhitehorn	}
948194784Sjeff
949194784Sjeff	/*
950194784Sjeff	 * Allocate virtual address space for the dynamic percpu area.
951194784Sjeff	 */
952194784Sjeff	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
953204297Snwhitehorn	dpcpu = (void *)virtual_avail;
954209975Snwhitehorn	va = virtual_avail;
955204297Snwhitehorn	virtual_avail += DPCPU_SIZE;
956204297Snwhitehorn	while (va < virtual_avail) {
957204297Snwhitehorn		moea64_kenter(mmup, va, pa);
958194784Sjeff		pa += PAGE_SIZE;
959204297Snwhitehorn		va += PAGE_SIZE;
960194784Sjeff	}
961194784Sjeff	dpcpu_init(dpcpu, 0);
962216174Snwhitehorn
963216174Snwhitehorn	/*
964216174Snwhitehorn	 * Allocate some things for page zeroing. We put this directly
965279252Snwhitehorn	 * in the page table and use MOEA64_PTE_REPLACE to avoid any
966216174Snwhitehorn	 * of the PVO book-keeping or other parts of the VM system
967216174Snwhitehorn	 * from even knowing that this hack exists.
968216174Snwhitehorn	 */
969216174Snwhitehorn
970216174Snwhitehorn	if (!hw_direct_map) {
971216174Snwhitehorn		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
972216174Snwhitehorn		    MTX_DEF);
973216174Snwhitehorn		for (i = 0; i < 2; i++) {
974216174Snwhitehorn			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
975216174Snwhitehorn			virtual_end -= PAGE_SIZE;
976216174Snwhitehorn
977216174Snwhitehorn			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
978216174Snwhitehorn
979279252Snwhitehorn			PMAP_LOCK(kernel_pmap);
980216174Snwhitehorn			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
981216174Snwhitehorn			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
982279252Snwhitehorn			PMAP_UNLOCK(kernel_pmap);
983216174Snwhitehorn		}
984216174Snwhitehorn	}
985190681Snwhitehorn}
986190681Snwhitehorn
987286296Sjahstatic void
988286296Sjahmoea64_pmap_init_qpages(void)
989286296Sjah{
990286296Sjah	struct pcpu *pc;
991286296Sjah	int i;
992286296Sjah
993286296Sjah	if (hw_direct_map)
994286296Sjah		return;
995286296Sjah
996286296Sjah	CPU_FOREACH(i) {
997286296Sjah		pc = pcpu_find(i);
998286296Sjah		pc->pc_qmap_addr = kva_alloc(PAGE_SIZE);
999286296Sjah		if (pc->pc_qmap_addr == 0)
1000286296Sjah			panic("pmap_init_qpages: unable to allocate KVA");
1001286296Sjah		PMAP_LOCK(kernel_pmap);
1002286296Sjah		pc->pc_qmap_pvo = moea64_pvo_find_va(kernel_pmap, pc->pc_qmap_addr);
1003286296Sjah		PMAP_UNLOCK(kernel_pmap);
1004286296Sjah		mtx_init(&pc->pc_qmap_lock, "qmap lock", NULL, MTX_DEF);
1005286296Sjah	}
1006286296Sjah}
1007286296Sjah
1008286296SjahSYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, moea64_pmap_init_qpages, NULL);
1009286296Sjah
1010190681Snwhitehorn/*
1011279252Snwhitehorn * Activate a user pmap.  This mostly involves setting some non-CPU
1012279252Snwhitehorn * state.
1013190681Snwhitehorn */
1014190681Snwhitehornvoid
1015190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td)
1016190681Snwhitehorn{
1017209975Snwhitehorn	pmap_t	pm;
1018190681Snwhitehorn
1019190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1020223758Sattilio	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1021190681Snwhitehorn
1022209975Snwhitehorn	#ifdef __powerpc64__
1023209975Snwhitehorn	PCPU_SET(userslb, pm->pm_slb);
1024279594Snwhitehorn	__asm __volatile("slbmte %0, %1; isync" ::
1025279594Snwhitehorn	    "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
1026209975Snwhitehorn	#else
1027209975Snwhitehorn	PCPU_SET(curpmap, pm->pmap_phys);
1028279594Snwhitehorn	mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid);
1029209975Snwhitehorn	#endif
1030190681Snwhitehorn}
1031190681Snwhitehorn
1032190681Snwhitehornvoid
1033190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td)
1034190681Snwhitehorn{
1035190681Snwhitehorn	pmap_t	pm;
1036190681Snwhitehorn
1037279594Snwhitehorn	__asm __volatile("isync; slbie %0" :: "r"(USER_ADDR));
1038279594Snwhitehorn
1039190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1040223758Sattilio	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1041209975Snwhitehorn	#ifdef __powerpc64__
1042209975Snwhitehorn	PCPU_SET(userslb, NULL);
1043209975Snwhitehorn	#else
1044190681Snwhitehorn	PCPU_SET(curpmap, NULL);
1045209975Snwhitehorn	#endif
1046190681Snwhitehorn}
1047190681Snwhitehorn
1048190681Snwhitehornvoid
1049268591Salcmoea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
1050268591Salc{
1051268591Salc	struct	pvo_entry key, *pvo;
1052279252Snwhitehorn	vm_page_t m;
1053279252Snwhitehorn	int64_t	refchg;
1054268591Salc
1055279252Snwhitehorn	key.pvo_vaddr = sva;
1056268591Salc	PMAP_LOCK(pm);
1057268591Salc	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1058268591Salc	    pvo != NULL && PVO_VADDR(pvo) < eva;
1059268591Salc	    pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) {
1060268591Salc		if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1061268591Salc			panic("moea64_unwire: pvo %p is missing PVO_WIRED",
1062268591Salc			    pvo);
1063268591Salc		pvo->pvo_vaddr &= ~PVO_WIRED;
1064279252Snwhitehorn		refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */);
1065279252Snwhitehorn		if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1066279252Snwhitehorn		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1067279252Snwhitehorn			if (refchg < 0)
1068279252Snwhitehorn				refchg = LPTE_CHG;
1069279252Snwhitehorn			m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1070279252Snwhitehorn
1071279252Snwhitehorn			refchg |= atomic_readandclear_32(&m->md.mdpg_attrs);
1072279252Snwhitehorn			if (refchg & LPTE_CHG)
1073279252Snwhitehorn				vm_page_dirty(m);
1074279252Snwhitehorn			if (refchg & LPTE_REF)
1075279252Snwhitehorn				vm_page_aflag_set(m, PGA_REFERENCED);
1076268591Salc		}
1077268591Salc		pm->pm_stats.wired_count--;
1078268591Salc	}
1079268591Salc	PMAP_UNLOCK(pm);
1080268591Salc}
1081268591Salc
1082190681Snwhitehorn/*
1083190681Snwhitehorn * This goes through and sets the physical address of our
1084190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because
1085190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by
1086190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here
1087190681Snwhitehorn */
1088190681Snwhitehorn
1089190681Snwhitehornstatic __inline
1090285148Sjhibbitsvoid moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
1091204694Snwhitehorn
1092209975Snwhitehorn	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1093204268Snwhitehorn	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1094204268Snwhitehorn
1095279252Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.pa =
1096213307Snwhitehorn	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1097279252Snwhitehorn	MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
1098279252Snwhitehorn	    MOEA64_PTE_INVALIDATE);
1099216383Snwhitehorn	isync();
1100190681Snwhitehorn}
1101190681Snwhitehorn
1102190681Snwhitehornvoid
1103190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1104190681Snwhitehorn{
1105190681Snwhitehorn	vm_offset_t	dst;
1106190681Snwhitehorn	vm_offset_t	src;
1107190681Snwhitehorn
1108190681Snwhitehorn	dst = VM_PAGE_TO_PHYS(mdst);
1109190681Snwhitehorn	src = VM_PAGE_TO_PHYS(msrc);
1110190681Snwhitehorn
1111209975Snwhitehorn	if (hw_direct_map) {
1112234156Snwhitehorn		bcopy((void *)src, (void *)dst, PAGE_SIZE);
1113209975Snwhitehorn	} else {
1114209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1115190681Snwhitehorn
1116216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, src);
1117216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, dst);
1118190681Snwhitehorn
1119234156Snwhitehorn		bcopy((void *)moea64_scratchpage_va[0],
1120209975Snwhitehorn		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1121190681Snwhitehorn
1122209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1123209975Snwhitehorn	}
1124190681Snwhitehorn}
1125190681Snwhitehorn
1126248280Skibstatic inline void
1127248280Skibmoea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1128248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1129248280Skib{
1130248280Skib	void *a_cp, *b_cp;
1131248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
1132248280Skib	int cnt;
1133248280Skib
1134248280Skib	while (xfersize > 0) {
1135248280Skib		a_pg_offset = a_offset & PAGE_MASK;
1136248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1137248280Skib		a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) +
1138248280Skib		    a_pg_offset;
1139248280Skib		b_pg_offset = b_offset & PAGE_MASK;
1140248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1141248280Skib		b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) +
1142248280Skib		    b_pg_offset;
1143248280Skib		bcopy(a_cp, b_cp, cnt);
1144248280Skib		a_offset += cnt;
1145248280Skib		b_offset += cnt;
1146248280Skib		xfersize -= cnt;
1147248280Skib	}
1148248280Skib}
1149248280Skib
1150248280Skibstatic inline void
1151248280Skibmoea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1152248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1153248280Skib{
1154248280Skib	void *a_cp, *b_cp;
1155248280Skib	vm_offset_t a_pg_offset, b_pg_offset;
1156248280Skib	int cnt;
1157248280Skib
1158248280Skib	mtx_lock(&moea64_scratchpage_mtx);
1159248280Skib	while (xfersize > 0) {
1160248280Skib		a_pg_offset = a_offset & PAGE_MASK;
1161248280Skib		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
1162248280Skib		moea64_set_scratchpage_pa(mmu, 0,
1163248280Skib		    VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
1164248280Skib		a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset;
1165248280Skib		b_pg_offset = b_offset & PAGE_MASK;
1166248280Skib		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
1167248280Skib		moea64_set_scratchpage_pa(mmu, 1,
1168248280Skib		    VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
1169248280Skib		b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset;
1170248280Skib		bcopy(a_cp, b_cp, cnt);
1171248280Skib		a_offset += cnt;
1172248280Skib		b_offset += cnt;
1173248280Skib		xfersize -= cnt;
1174248280Skib	}
1175248280Skib	mtx_unlock(&moea64_scratchpage_mtx);
1176248280Skib}
1177248280Skib
1178190681Snwhitehornvoid
1179248280Skibmoea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
1180248280Skib    vm_page_t *mb, vm_offset_t b_offset, int xfersize)
1181248280Skib{
1182248280Skib
1183248280Skib	if (hw_direct_map) {
1184248280Skib		moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset,
1185248280Skib		    xfersize);
1186248280Skib	} else {
1187248280Skib		moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1188248280Skib		    xfersize);
1189248280Skib	}
1190248280Skib}
1191248280Skib
1192248280Skibvoid
1193190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1194190681Snwhitehorn{
1195285148Sjhibbits	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1196190681Snwhitehorn
1197190681Snwhitehorn	if (size + off > PAGE_SIZE)
1198190681Snwhitehorn		panic("moea64_zero_page: size + off > PAGE_SIZE");
1199190681Snwhitehorn
1200209975Snwhitehorn	if (hw_direct_map) {
1201209975Snwhitehorn		bzero((caddr_t)pa + off, size);
1202209975Snwhitehorn	} else {
1203209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1204216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1205209975Snwhitehorn		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1206209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1207209975Snwhitehorn	}
1208190681Snwhitehorn}
1209190681Snwhitehorn
1210204269Snwhitehorn/*
1211204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it
1212204269Snwhitehorn */
1213190681Snwhitehornvoid
1214204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m)
1215204269Snwhitehorn{
1216285148Sjhibbits	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1217209975Snwhitehorn	vm_offset_t va, off;
1218204269Snwhitehorn
1219209975Snwhitehorn	if (!hw_direct_map) {
1220209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1221204269Snwhitehorn
1222216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1223209975Snwhitehorn		va = moea64_scratchpage_va[0];
1224209975Snwhitehorn	} else {
1225209975Snwhitehorn		va = pa;
1226209975Snwhitehorn	}
1227209975Snwhitehorn
1228204269Snwhitehorn	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1229209975Snwhitehorn		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1230209975Snwhitehorn
1231209975Snwhitehorn	if (!hw_direct_map)
1232209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1233204269Snwhitehorn}
1234204269Snwhitehorn
1235204269Snwhitehornvoid
1236190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1237190681Snwhitehorn{
1238190681Snwhitehorn
1239190681Snwhitehorn	moea64_zero_page(mmu, m);
1240190681Snwhitehorn}
1241190681Snwhitehorn
1242286296Sjahvm_offset_t
1243286296Sjahmoea64_quick_enter_page(mmu_t mmu, vm_page_t m)
1244286296Sjah{
1245286296Sjah	struct pvo_entry *pvo;
1246286296Sjah	vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1247286296Sjah
1248286296Sjah	if (hw_direct_map)
1249286296Sjah		return (pa);
1250286296Sjah
1251286296Sjah	/*
1252286296Sjah 	 * MOEA64_PTE_REPLACE does some locking, so we can't just grab
1253286296Sjah	 * a critical section and access the PCPU data like on i386.
1254286296Sjah	 * Instead, pin the thread and grab the PCPU lock to prevent
1255286296Sjah	 * a preempting thread from using the same PCPU data.
1256286296Sjah	 */
1257286296Sjah	sched_pin();
1258286296Sjah
1259286296Sjah	mtx_assert(PCPU_PTR(qmap_lock), MA_NOTOWNED);
1260286296Sjah	pvo = PCPU_GET(qmap_pvo);
1261286296Sjah
1262286296Sjah	mtx_lock(PCPU_PTR(qmap_lock));
1263286296Sjah	pvo->pvo_pte.pa = moea64_calc_wimg(pa, pmap_page_get_memattr(m)) |
1264286296Sjah	    (uint64_t)pa;
1265286296Sjah	MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_INVALIDATE);
1266286296Sjah	isync();
1267286296Sjah
1268286296Sjah	return (PCPU_GET(qmap_addr));
1269286296Sjah}
1270286296Sjah
1271286296Sjahvoid
1272286296Sjahmoea64_quick_remove_page(mmu_t mmu, vm_offset_t addr)
1273286296Sjah{
1274286296Sjah	if (hw_direct_map)
1275286296Sjah		return;
1276286296Sjah
1277286296Sjah	mtx_assert(PCPU_PTR(qmap_lock), MA_OWNED);
1278286296Sjah	KASSERT(PCPU_GET(qmap_addr) == addr,
1279286296Sjah	    ("moea64_quick_remove_page: invalid address"));
1280286296Sjah	mtx_unlock(PCPU_PTR(qmap_lock));
1281286296Sjah	sched_unpin();
1282286296Sjah}
1283286296Sjah
1284190681Snwhitehorn/*
1285190681Snwhitehorn * Map the given physical page at the specified virtual address in the
1286190681Snwhitehorn * target pmap with the protection requested.  If specified the page
1287190681Snwhitehorn * will be wired down.
1288190681Snwhitehorn */
1289233957Snwhitehorn
1290269728Skibint
1291190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1292269728Skib    vm_prot_t prot, u_int flags, int8_t psind)
1293190681Snwhitehorn{
1294279252Snwhitehorn	struct		pvo_entry *pvo, *oldpvo;
1295190681Snwhitehorn	struct		pvo_head *pvo_head;
1296190681Snwhitehorn	uint64_t	pte_lo;
1297190681Snwhitehorn	int		error;
1298190681Snwhitehorn
1299269388Salc	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
1300269388Salc		VM_OBJECT_ASSERT_LOCKED(m->object);
1301269388Salc
1302279252Snwhitehorn	pvo = alloc_pvo_entry(0);
1303279252Snwhitehorn	pvo->pvo_pmap = NULL; /* to be filled in later */
1304279252Snwhitehorn	pvo->pvo_pte.prot = prot;
1305279252Snwhitehorn
1306279252Snwhitehorn	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1307279252Snwhitehorn	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo;
1308279252Snwhitehorn
1309279252Snwhitehorn	if ((flags & PMAP_ENTER_WIRED) != 0)
1310279252Snwhitehorn		pvo->pvo_vaddr |= PVO_WIRED;
1311279252Snwhitehorn
1312269388Salc	if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) {
1313235689Snwhitehorn		pvo_head = NULL;
1314190681Snwhitehorn	} else {
1315279252Snwhitehorn		pvo_head = &m->md.mdpg_pvoh;
1316279252Snwhitehorn		pvo->pvo_vaddr |= PVO_MANAGED;
1317190681Snwhitehorn	}
1318279252Snwhitehorn
1319279252Snwhitehorn	for (;;) {
1320279252Snwhitehorn		PV_PAGE_LOCK(m);
1321279252Snwhitehorn		PMAP_LOCK(pmap);
1322279252Snwhitehorn		if (pvo->pvo_pmap == NULL)
1323279252Snwhitehorn			init_pvo_entry(pvo, pmap, va);
1324279252Snwhitehorn		if (prot & VM_PROT_WRITE)
1325279252Snwhitehorn			if (pmap_bootstrapped &&
1326279252Snwhitehorn			    (m->oflags & VPO_UNMANAGED) == 0)
1327279252Snwhitehorn				vm_page_aflag_set(m, PGA_WRITEABLE);
1328190681Snwhitehorn
1329279252Snwhitehorn		oldpvo = moea64_pvo_find_va(pmap, va);
1330279252Snwhitehorn		if (oldpvo != NULL) {
1331279252Snwhitehorn			if (oldpvo->pvo_vaddr == pvo->pvo_vaddr &&
1332279252Snwhitehorn			    oldpvo->pvo_pte.pa == pvo->pvo_pte.pa &&
1333279252Snwhitehorn			    oldpvo->pvo_pte.prot == prot) {
1334279252Snwhitehorn				/* Identical mapping already exists */
1335279252Snwhitehorn				error = 0;
1336190681Snwhitehorn
1337279252Snwhitehorn				/* If not in page table, reinsert it */
1338279252Snwhitehorn				if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) {
1339279252Snwhitehorn					moea64_pte_overflow--;
1340279252Snwhitehorn					MOEA64_PTE_INSERT(mmu, oldpvo);
1341279252Snwhitehorn				}
1342190681Snwhitehorn
1343279252Snwhitehorn				/* Then just clean up and go home */
1344279252Snwhitehorn				PV_PAGE_UNLOCK(m);
1345279252Snwhitehorn				PMAP_UNLOCK(pmap);
1346279252Snwhitehorn				free_pvo_entry(pvo);
1347279252Snwhitehorn				break;
1348279252Snwhitehorn			}
1349190681Snwhitehorn
1350279252Snwhitehorn			/* Otherwise, need to kill it first */
1351279252Snwhitehorn			KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old "
1352279252Snwhitehorn			    "mapping does not match new mapping"));
1353279252Snwhitehorn			moea64_pvo_remove_from_pmap(mmu, oldpvo);
1354279252Snwhitehorn		}
1355279252Snwhitehorn		error = moea64_pvo_enter(mmu, pvo, pvo_head);
1356279252Snwhitehorn		PV_PAGE_UNLOCK(m);
1357279252Snwhitehorn		PMAP_UNLOCK(pmap);
1358190681Snwhitehorn
1359279252Snwhitehorn		/* Free any dead pages */
1360279252Snwhitehorn		if (oldpvo != NULL) {
1361279252Snwhitehorn			PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1362279252Snwhitehorn			moea64_pvo_remove_from_page(mmu, oldpvo);
1363279252Snwhitehorn			PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1364279252Snwhitehorn			free_pvo_entry(oldpvo);
1365279252Snwhitehorn		}
1366279252Snwhitehorn
1367269728Skib		if (error != ENOMEM)
1368269728Skib			break;
1369269728Skib		if ((flags & PMAP_ENTER_NOSLEEP) != 0)
1370269728Skib			return (KERN_RESOURCE_SHORTAGE);
1371269728Skib		VM_OBJECT_ASSERT_UNLOCKED(m->object);
1372269728Skib		VM_WAIT;
1373269728Skib	}
1374190681Snwhitehorn
1375190681Snwhitehorn	/*
1376190681Snwhitehorn	 * Flush the page from the instruction cache if this page is
1377190681Snwhitehorn	 * mapped executable and cacheable.
1378190681Snwhitehorn	 */
1379233949Snwhitehorn	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1380233949Snwhitehorn	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1381233949Snwhitehorn		vm_page_aflag_set(m, PGA_EXECUTABLE);
1382216174Snwhitehorn		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1383233949Snwhitehorn	}
1384269728Skib	return (KERN_SUCCESS);
1385190681Snwhitehorn}
1386190681Snwhitehorn
1387190681Snwhitehornstatic void
1388285148Sjhibbitsmoea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1389216174Snwhitehorn    vm_size_t sz)
1390190681Snwhitehorn{
1391204042Snwhitehorn
1392190681Snwhitehorn	/*
1393190681Snwhitehorn	 * This is much trickier than on older systems because
1394190681Snwhitehorn	 * we can't sync the icache on physical addresses directly
1395190681Snwhitehorn	 * without a direct map. Instead we check a couple of cases
1396190681Snwhitehorn	 * where the memory is already mapped in and, failing that,
1397190681Snwhitehorn	 * use the same trick we use for page zeroing to create
1398190681Snwhitehorn	 * a temporary mapping for this physical address.
1399190681Snwhitehorn	 */
1400190681Snwhitehorn
1401190681Snwhitehorn	if (!pmap_bootstrapped) {
1402190681Snwhitehorn		/*
1403190681Snwhitehorn		 * If PMAP is not bootstrapped, we are likely to be
1404190681Snwhitehorn		 * in real mode.
1405190681Snwhitehorn		 */
1406198341Smarcel		__syncicache((void *)pa, sz);
1407190681Snwhitehorn	} else if (pmap == kernel_pmap) {
1408198341Smarcel		__syncicache((void *)va, sz);
1409209975Snwhitehorn	} else if (hw_direct_map) {
1410209975Snwhitehorn		__syncicache((void *)pa, sz);
1411190681Snwhitehorn	} else {
1412190681Snwhitehorn		/* Use the scratch page to set up a temp mapping */
1413190681Snwhitehorn
1414190681Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1415190681Snwhitehorn
1416216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1417204042Snwhitehorn		__syncicache((void *)(moea64_scratchpage_va[1] +
1418204042Snwhitehorn		    (va & ADDR_POFF)), sz);
1419190681Snwhitehorn
1420190681Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1421190681Snwhitehorn	}
1422190681Snwhitehorn}
1423190681Snwhitehorn
1424190681Snwhitehorn/*
1425190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object.
1426190681Snwhitehorn * The sequence begins with the given page m_start.  This page is
1427190681Snwhitehorn * mapped at the given virtual address start.  Each subsequent page is
1428190681Snwhitehorn * mapped at a virtual address that is offset from start by the same
1429190681Snwhitehorn * amount as the page is offset from m_start within the object.  The
1430190681Snwhitehorn * last page in the sequence is the page with the largest offset from
1431190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given
1432190681Snwhitehorn * virtual address end.  Not every virtual page between start and end
1433190681Snwhitehorn * is mapped; only those for which a resident page exists with the
1434190681Snwhitehorn * corresponding offset from m_start are mapped.
1435190681Snwhitehorn */
1436190681Snwhitehornvoid
1437190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1438190681Snwhitehorn    vm_page_t m_start, vm_prot_t prot)
1439190681Snwhitehorn{
1440190681Snwhitehorn	vm_page_t m;
1441190681Snwhitehorn	vm_pindex_t diff, psize;
1442190681Snwhitehorn
1443250884Sattilio	VM_OBJECT_ASSERT_LOCKED(m_start->object);
1444250884Sattilio
1445190681Snwhitehorn	psize = atop(end - start);
1446190681Snwhitehorn	m = m_start;
1447190681Snwhitehorn	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1448233957Snwhitehorn		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1449269728Skib		    (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0);
1450190681Snwhitehorn		m = TAILQ_NEXT(m, listq);
1451190681Snwhitehorn	}
1452190681Snwhitehorn}
1453190681Snwhitehorn
1454190681Snwhitehornvoid
1455190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1456190681Snwhitehorn    vm_prot_t prot)
1457190681Snwhitehorn{
1458207796Salc
1459269728Skib	moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1460269728Skib	    PMAP_ENTER_NOSLEEP, 0);
1461190681Snwhitehorn}
1462190681Snwhitehorn
1463190681Snwhitehornvm_paddr_t
1464190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1465190681Snwhitehorn{
1466190681Snwhitehorn	struct	pvo_entry *pvo;
1467190681Snwhitehorn	vm_paddr_t pa;
1468190681Snwhitehorn
1469190681Snwhitehorn	PMAP_LOCK(pm);
1470209975Snwhitehorn	pvo = moea64_pvo_find_va(pm, va);
1471190681Snwhitehorn	if (pvo == NULL)
1472190681Snwhitehorn		pa = 0;
1473190681Snwhitehorn	else
1474279252Snwhitehorn		pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1475190681Snwhitehorn	PMAP_UNLOCK(pm);
1476279252Snwhitehorn
1477190681Snwhitehorn	return (pa);
1478190681Snwhitehorn}
1479190681Snwhitehorn
1480190681Snwhitehorn/*
1481190681Snwhitehorn * Atomically extract and hold the physical page with the given
1482190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given
1483190681Snwhitehorn * protection.
1484190681Snwhitehorn */
1485190681Snwhitehornvm_page_t
1486190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1487190681Snwhitehorn{
1488190681Snwhitehorn	struct	pvo_entry *pvo;
1489190681Snwhitehorn	vm_page_t m;
1490207410Skmacy        vm_paddr_t pa;
1491190681Snwhitehorn
1492190681Snwhitehorn	m = NULL;
1493207410Skmacy	pa = 0;
1494190681Snwhitehorn	PMAP_LOCK(pmap);
1495207410Skmacyretry:
1496209975Snwhitehorn	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1497279252Snwhitehorn	if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) {
1498235689Snwhitehorn		if (vm_page_pa_tryrelock(pmap,
1499279252Snwhitehorn		    pvo->pvo_pte.pa & LPTE_RPGN, &pa))
1500207410Skmacy			goto retry;
1501279252Snwhitehorn		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
1502190681Snwhitehorn		vm_page_hold(m);
1503190681Snwhitehorn	}
1504207410Skmacy	PA_UNLOCK_COND(pa);
1505190681Snwhitehorn	PMAP_UNLOCK(pmap);
1506190681Snwhitehorn	return (m);
1507190681Snwhitehorn}
1508190681Snwhitehorn
1509216174Snwhitehornstatic mmu_t installed_mmu;
1510216174Snwhitehorn
1511190681Snwhitehornstatic void *
1512280957Srstonemoea64_uma_page_alloc(uma_zone_t zone, vm_size_t bytes, uint8_t *flags,
1513280957Srstone    int wait)
1514190681Snwhitehorn{
1515279252Snwhitehorn	struct pvo_entry *pvo;
1516279252Snwhitehorn        vm_offset_t va;
1517279252Snwhitehorn        vm_page_t m;
1518327785Smarkj        int needed_lock;
1519279252Snwhitehorn
1520190681Snwhitehorn	/*
1521190681Snwhitehorn	 * This entire routine is a horrible hack to avoid bothering kmem
1522190681Snwhitehorn	 * for new KVA addresses. Because this can get called from inside
1523190681Snwhitehorn	 * kmem allocation routines, calling kmem for a new address here
1524190681Snwhitehorn	 * can lead to multiply locking non-recursive mutexes.
1525190681Snwhitehorn	 */
1526190681Snwhitehorn
1527190681Snwhitehorn	*flags = UMA_SLAB_PRIV;
1528190681Snwhitehorn	needed_lock = !PMAP_LOCKED(kernel_pmap);
1529190681Snwhitehorn
1530327785Smarkj	m = vm_page_alloc(NULL, 0,
1531327785Smarkj	    malloc2vm_flags(wait) | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
1532327785Smarkj	if (m == NULL)
1533327785Smarkj		return (NULL);
1534190681Snwhitehorn
1535204128Snwhitehorn	va = VM_PAGE_TO_PHYS(m);
1536190681Snwhitehorn
1537279252Snwhitehorn	pvo = alloc_pvo_entry(1 /* bootstrap */);
1538279252Snwhitehorn
1539279252Snwhitehorn	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE;
1540279252Snwhitehorn	pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M;
1541279252Snwhitehorn
1542233529Snwhitehorn	if (needed_lock)
1543233529Snwhitehorn		PMAP_LOCK(kernel_pmap);
1544233529Snwhitehorn
1545279252Snwhitehorn	init_pvo_entry(pvo, kernel_pmap, va);
1546279252Snwhitehorn	pvo->pvo_vaddr |= PVO_WIRED;
1547190681Snwhitehorn
1548279252Snwhitehorn	moea64_pvo_enter(installed_mmu, pvo, NULL);
1549279252Snwhitehorn
1550190681Snwhitehorn	if (needed_lock)
1551190681Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
1552198378Snwhitehorn
1553190681Snwhitehorn	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1554190681Snwhitehorn                bzero((void *)va, PAGE_SIZE);
1555190681Snwhitehorn
1556190681Snwhitehorn	return (void *)va;
1557190681Snwhitehorn}
1558190681Snwhitehorn
1559230767Skibextern int elf32_nxstack;
1560230767Skib
1561190681Snwhitehornvoid
1562190681Snwhitehornmoea64_init(mmu_t mmu)
1563190681Snwhitehorn{
1564190681Snwhitehorn
1565190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_init");
1566190681Snwhitehorn
1567279252Snwhitehorn	moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1568190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1569190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1570190681Snwhitehorn
1571190681Snwhitehorn	if (!hw_direct_map) {
1572216174Snwhitehorn		installed_mmu = mmu;
1573279252Snwhitehorn		uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc);
1574190681Snwhitehorn	}
1575190681Snwhitehorn
1576230779Skib#ifdef COMPAT_FREEBSD32
1577230767Skib	elf32_nxstack = 1;
1578230779Skib#endif
1579230767Skib
1580190681Snwhitehorn	moea64_initialized = TRUE;
1581190681Snwhitehorn}
1582190681Snwhitehorn
1583190681Snwhitehornboolean_t
1584207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m)
1585207155Salc{
1586207155Salc
1587224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1588208574Salc	    ("moea64_is_referenced: page %p is not managed", m));
1589279252Snwhitehorn
1590279252Snwhitehorn	return (moea64_query_bit(mmu, m, LPTE_REF));
1591207155Salc}
1592207155Salc
1593207155Salcboolean_t
1594190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m)
1595190681Snwhitehorn{
1596190681Snwhitehorn
1597224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1598208504Salc	    ("moea64_is_modified: page %p is not managed", m));
1599208504Salc
1600208504Salc	/*
1601254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1602225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1603208504Salc	 * is clear, no PTEs can have LPTE_CHG set.
1604208504Salc	 */
1605255503Snwhitehorn	VM_OBJECT_ASSERT_LOCKED(m->object);
1606254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1607190681Snwhitehorn		return (FALSE);
1608216174Snwhitehorn	return (moea64_query_bit(mmu, m, LPTE_CHG));
1609190681Snwhitehorn}
1610190681Snwhitehorn
1611214617Salcboolean_t
1612214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1613214617Salc{
1614214617Salc	struct pvo_entry *pvo;
1615279252Snwhitehorn	boolean_t rv = TRUE;
1616214617Salc
1617214617Salc	PMAP_LOCK(pmap);
1618214617Salc	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1619279252Snwhitehorn	if (pvo != NULL)
1620279252Snwhitehorn		rv = FALSE;
1621214617Salc	PMAP_UNLOCK(pmap);
1622214617Salc	return (rv);
1623214617Salc}
1624214617Salc
1625190681Snwhitehornvoid
1626190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m)
1627190681Snwhitehorn{
1628190681Snwhitehorn
1629224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1630208504Salc	    ("moea64_clear_modify: page %p is not managed", m));
1631248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1632254138Sattilio	KASSERT(!vm_page_xbusied(m),
1633254138Sattilio	    ("moea64_clear_modify: page %p is exclusive busied", m));
1634208504Salc
1635208504Salc	/*
1636225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1637208504Salc	 * set.  If the object containing the page is locked and the page is
1638254138Sattilio	 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
1639208504Salc	 */
1640225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
1641190681Snwhitehorn		return;
1642216174Snwhitehorn	moea64_clear_bit(mmu, m, LPTE_CHG);
1643190681Snwhitehorn}
1644190681Snwhitehorn
1645190681Snwhitehorn/*
1646190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings.
1647190681Snwhitehorn */
1648190681Snwhitehornvoid
1649190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m)
1650190681Snwhitehorn{
1651190681Snwhitehorn	struct	pvo_entry *pvo;
1652279252Snwhitehorn	int64_t	refchg, ret;
1653190681Snwhitehorn	pmap_t	pmap;
1654190681Snwhitehorn
1655224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1656208175Salc	    ("moea64_remove_write: page %p is not managed", m));
1657208175Salc
1658208175Salc	/*
1659254138Sattilio	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
1660254138Sattilio	 * set by another thread while the object is locked.  Thus,
1661254138Sattilio	 * if PGA_WRITEABLE is clear, no page table entries need updating.
1662208175Salc	 */
1663248084Sattilio	VM_OBJECT_ASSERT_WLOCKED(m->object);
1664254138Sattilio	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
1665190681Snwhitehorn		return;
1666216174Snwhitehorn	powerpc_sync();
1667279252Snwhitehorn	PV_PAGE_LOCK(m);
1668279252Snwhitehorn	refchg = 0;
1669190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1670190681Snwhitehorn		pmap = pvo->pvo_pmap;
1671190681Snwhitehorn		PMAP_LOCK(pmap);
1672279252Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_DEAD) &&
1673279252Snwhitehorn		    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1674279252Snwhitehorn			pvo->pvo_pte.prot &= ~VM_PROT_WRITE;
1675279252Snwhitehorn			ret = MOEA64_PTE_REPLACE(mmu, pvo,
1676279252Snwhitehorn			    MOEA64_PTE_PROT_UPDATE);
1677279252Snwhitehorn			if (ret < 0)
1678279252Snwhitehorn				ret = LPTE_CHG;
1679279252Snwhitehorn			refchg |= ret;
1680279252Snwhitehorn			if (pvo->pvo_pmap == kernel_pmap)
1681279252Snwhitehorn				isync();
1682190681Snwhitehorn		}
1683190681Snwhitehorn		PMAP_UNLOCK(pmap);
1684190681Snwhitehorn	}
1685279252Snwhitehorn	if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG)
1686279252Snwhitehorn		vm_page_dirty(m);
1687225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1688279252Snwhitehorn	PV_PAGE_UNLOCK(m);
1689190681Snwhitehorn}
1690190681Snwhitehorn
1691190681Snwhitehorn/*
1692190681Snwhitehorn *	moea64_ts_referenced:
1693190681Snwhitehorn *
1694190681Snwhitehorn *	Return a count of reference bits for a page, clearing those bits.
1695190681Snwhitehorn *	It is not necessary for every reference bit to be cleared, but it
1696190681Snwhitehorn *	is necessary that 0 only be returned when there are truly no
1697190681Snwhitehorn *	reference bits set.
1698190681Snwhitehorn *
1699190681Snwhitehorn *	XXX: The exact number of bits to check and clear is a matter that
1700190681Snwhitehorn *	should be tested and standardized at some point in the future for
1701190681Snwhitehorn *	optimal aging of shared pages.
1702190681Snwhitehorn */
1703238357Salcint
1704190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m)
1705190681Snwhitehorn{
1706190681Snwhitehorn
1707224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1708208990Salc	    ("moea64_ts_referenced: page %p is not managed", m));
1709216174Snwhitehorn	return (moea64_clear_bit(mmu, m, LPTE_REF));
1710190681Snwhitehorn}
1711190681Snwhitehorn
1712190681Snwhitehorn/*
1713213307Snwhitehorn * Modify the WIMG settings of all mappings for a page.
1714213307Snwhitehorn */
1715213307Snwhitehornvoid
1716213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1717213307Snwhitehorn{
1718213307Snwhitehorn	struct	pvo_entry *pvo;
1719279252Snwhitehorn	int64_t	refchg;
1720213307Snwhitehorn	pmap_t	pmap;
1721213307Snwhitehorn	uint64_t lo;
1722213307Snwhitehorn
1723224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1724213335Snwhitehorn		m->md.mdpg_cache_attrs = ma;
1725213335Snwhitehorn		return;
1726213335Snwhitehorn	}
1727213335Snwhitehorn
1728213307Snwhitehorn	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1729279252Snwhitehorn
1730279252Snwhitehorn	PV_PAGE_LOCK(m);
1731279252Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1732213307Snwhitehorn		pmap = pvo->pvo_pmap;
1733213307Snwhitehorn		PMAP_LOCK(pmap);
1734279252Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_DEAD)) {
1735279252Snwhitehorn			pvo->pvo_pte.pa &= ~LPTE_WIMG;
1736279252Snwhitehorn			pvo->pvo_pte.pa |= lo;
1737279252Snwhitehorn			refchg = MOEA64_PTE_REPLACE(mmu, pvo,
1738279252Snwhitehorn			    MOEA64_PTE_INVALIDATE);
1739279252Snwhitehorn			if (refchg < 0)
1740279252Snwhitehorn				refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ?
1741279252Snwhitehorn				    LPTE_CHG : 0;
1742279252Snwhitehorn			if ((pvo->pvo_vaddr & PVO_MANAGED) &&
1743279252Snwhitehorn			    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
1744279252Snwhitehorn				refchg |=
1745279252Snwhitehorn				    atomic_readandclear_32(&m->md.mdpg_attrs);
1746279252Snwhitehorn				if (refchg & LPTE_CHG)
1747279252Snwhitehorn					vm_page_dirty(m);
1748279252Snwhitehorn				if (refchg & LPTE_REF)
1749279252Snwhitehorn					vm_page_aflag_set(m, PGA_REFERENCED);
1750279252Snwhitehorn			}
1751213307Snwhitehorn			if (pvo->pvo_pmap == kernel_pmap)
1752213307Snwhitehorn				isync();
1753213307Snwhitehorn		}
1754213307Snwhitehorn		PMAP_UNLOCK(pmap);
1755213307Snwhitehorn	}
1756213307Snwhitehorn	m->md.mdpg_cache_attrs = ma;
1757279252Snwhitehorn	PV_PAGE_UNLOCK(m);
1758213307Snwhitehorn}
1759213307Snwhitehorn
1760213307Snwhitehorn/*
1761190681Snwhitehorn * Map a wired page into kernel virtual address space.
1762190681Snwhitehorn */
1763190681Snwhitehornvoid
1764285148Sjhibbitsmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1765190681Snwhitehorn{
1766190681Snwhitehorn	int		error;
1767279252Snwhitehorn	struct pvo_entry *pvo, *oldpvo;
1768190681Snwhitehorn
1769279252Snwhitehorn	pvo = alloc_pvo_entry(0);
1770279252Snwhitehorn	pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1771279252Snwhitehorn	pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1772279252Snwhitehorn	pvo->pvo_vaddr |= PVO_WIRED;
1773190681Snwhitehorn
1774190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1775279252Snwhitehorn	oldpvo = moea64_pvo_find_va(kernel_pmap, va);
1776279252Snwhitehorn	if (oldpvo != NULL)
1777279252Snwhitehorn		moea64_pvo_remove_from_pmap(mmu, oldpvo);
1778279252Snwhitehorn	init_pvo_entry(pvo, kernel_pmap, va);
1779279252Snwhitehorn	error = moea64_pvo_enter(mmu, pvo, NULL);
1780233529Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1781190681Snwhitehorn
1782279252Snwhitehorn	/* Free any dead pages */
1783279252Snwhitehorn	if (oldpvo != NULL) {
1784279252Snwhitehorn		PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1785279252Snwhitehorn		moea64_pvo_remove_from_page(mmu, oldpvo);
1786279252Snwhitehorn		PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN);
1787279252Snwhitehorn		free_pvo_entry(oldpvo);
1788279252Snwhitehorn	}
1789279252Snwhitehorn
1790190681Snwhitehorn	if (error != 0 && error != ENOENT)
1791209975Snwhitehorn		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1792190681Snwhitehorn		    pa, error);
1793190681Snwhitehorn}
1794190681Snwhitehorn
1795213307Snwhitehornvoid
1796236019Srajmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1797213307Snwhitehorn{
1798213307Snwhitehorn
1799213307Snwhitehorn	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1800213307Snwhitehorn}
1801213307Snwhitehorn
1802190681Snwhitehorn/*
1803190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual
1804190681Snwhitehorn * address.
1805190681Snwhitehorn */
1806236019Srajvm_paddr_t
1807190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va)
1808190681Snwhitehorn{
1809190681Snwhitehorn	struct		pvo_entry *pvo;
1810190681Snwhitehorn	vm_paddr_t pa;
1811190681Snwhitehorn
1812205370Snwhitehorn	/*
1813205370Snwhitehorn	 * Shortcut the direct-mapped case when applicable.  We never put
1814205370Snwhitehorn	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1815205370Snwhitehorn	 */
1816205370Snwhitehorn	if (va < VM_MIN_KERNEL_ADDRESS)
1817205370Snwhitehorn		return (va);
1818205370Snwhitehorn
1819190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1820209975Snwhitehorn	pvo = moea64_pvo_find_va(kernel_pmap, va);
1821209975Snwhitehorn	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1822209975Snwhitehorn	    va));
1823279252Snwhitehorn	pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1824190681Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1825190681Snwhitehorn	return (pa);
1826190681Snwhitehorn}
1827190681Snwhitehorn
1828190681Snwhitehorn/*
1829190681Snwhitehorn * Remove a wired page from kernel virtual address space.
1830190681Snwhitehorn */
1831190681Snwhitehornvoid
1832190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va)
1833190681Snwhitehorn{
1834190681Snwhitehorn	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1835190681Snwhitehorn}
1836190681Snwhitehorn
1837190681Snwhitehorn/*
1838190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space.
1839190681Snwhitehorn *
1840190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping.
1841190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region
1842190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt'
1843279252Snwhitehorn * unchanged.  Other architectures should map the pages starting at '*virt' and
1844279252Snwhitehorn * update '*virt' with the first usable address after the mapped region.
1845190681Snwhitehorn */
1846190681Snwhitehornvm_offset_t
1847236019Srajmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1848236019Sraj    vm_paddr_t pa_end, int prot)
1849190681Snwhitehorn{
1850190681Snwhitehorn	vm_offset_t	sva, va;
1851190681Snwhitehorn
1852279252Snwhitehorn	if (hw_direct_map) {
1853279252Snwhitehorn		/*
1854279252Snwhitehorn		 * Check if every page in the region is covered by the direct
1855279252Snwhitehorn		 * map. The direct map covers all of physical memory. Use
1856279252Snwhitehorn		 * moea64_calc_wimg() as a shortcut to see if the page is in
1857279252Snwhitehorn		 * physical memory as a way to see if the direct map covers it.
1858279252Snwhitehorn		 */
1859279252Snwhitehorn		for (va = pa_start; va < pa_end; va += PAGE_SIZE)
1860279252Snwhitehorn			if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M)
1861279252Snwhitehorn				break;
1862279252Snwhitehorn		if (va == pa_end)
1863279252Snwhitehorn			return (pa_start);
1864279252Snwhitehorn	}
1865190681Snwhitehorn	sva = *virt;
1866190681Snwhitehorn	va = sva;
1867279252Snwhitehorn	/* XXX respect prot argument */
1868190681Snwhitehorn	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1869190681Snwhitehorn		moea64_kenter(mmu, va, pa_start);
1870190681Snwhitehorn	*virt = va;
1871190681Snwhitehorn
1872190681Snwhitehorn	return (sva);
1873190681Snwhitehorn}
1874190681Snwhitehorn
1875190681Snwhitehorn/*
1876190681Snwhitehorn * Returns true if the pmap's pv is one of the first
1877190681Snwhitehorn * 16 pvs linked to from this page.  This count may
1878190681Snwhitehorn * be changed upwards or downwards in the future; it
1879190681Snwhitehorn * is only necessary that true be returned for a small
1880190681Snwhitehorn * subset of pmaps for proper page aging.
1881190681Snwhitehorn */
1882190681Snwhitehornboolean_t
1883190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1884190681Snwhitehorn{
1885190681Snwhitehorn        int loops;
1886190681Snwhitehorn	struct pvo_entry *pvo;
1887208990Salc	boolean_t rv;
1888190681Snwhitehorn
1889224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1890208990Salc	    ("moea64_page_exists_quick: page %p is not managed", m));
1891190681Snwhitehorn	loops = 0;
1892208990Salc	rv = FALSE;
1893279252Snwhitehorn	PV_PAGE_LOCK(m);
1894190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1895279252Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) {
1896208990Salc			rv = TRUE;
1897208990Salc			break;
1898208990Salc		}
1899190681Snwhitehorn		if (++loops >= 16)
1900190681Snwhitehorn			break;
1901190681Snwhitehorn	}
1902279252Snwhitehorn	PV_PAGE_UNLOCK(m);
1903208990Salc	return (rv);
1904190681Snwhitehorn}
1905190681Snwhitehorn
1906323968Smarkjvoid
1907323968Smarkjmoea64_page_init(mmu_t mmu __unused, vm_page_t m)
1908323968Smarkj{
1909323968Smarkj
1910323968Smarkj	m->md.mdpg_attrs = 0;
1911323968Smarkj	m->md.mdpg_cache_attrs = VM_MEMATTR_DEFAULT;
1912323968Smarkj	LIST_INIT(&m->md.mdpg_pvoh);
1913323968Smarkj}
1914323968Smarkj
1915190681Snwhitehorn/*
1916190681Snwhitehorn * Return the number of managed mappings to the given physical page
1917190681Snwhitehorn * that are wired.
1918190681Snwhitehorn */
1919190681Snwhitehornint
1920190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1921190681Snwhitehorn{
1922190681Snwhitehorn	struct pvo_entry *pvo;
1923190681Snwhitehorn	int count;
1924190681Snwhitehorn
1925190681Snwhitehorn	count = 0;
1926224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
1927190681Snwhitehorn		return (count);
1928279252Snwhitehorn	PV_PAGE_LOCK(m);
1929190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1930279252Snwhitehorn		if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED)
1931190681Snwhitehorn			count++;
1932279252Snwhitehorn	PV_PAGE_UNLOCK(m);
1933190681Snwhitehorn	return (count);
1934190681Snwhitehorn}
1935190681Snwhitehorn
1936209975Snwhitehornstatic uintptr_t	moea64_vsidcontext;
1937190681Snwhitehorn
1938209975Snwhitehornuintptr_t
1939209975Snwhitehornmoea64_get_unique_vsid(void) {
1940209975Snwhitehorn	u_int entropy;
1941209975Snwhitehorn	register_t hash;
1942209975Snwhitehorn	uint32_t mask;
1943209975Snwhitehorn	int i;
1944190681Snwhitehorn
1945190681Snwhitehorn	entropy = 0;
1946190681Snwhitehorn	__asm __volatile("mftb %0" : "=r"(entropy));
1947190681Snwhitehorn
1948211967Snwhitehorn	mtx_lock(&moea64_slb_mutex);
1949209975Snwhitehorn	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1950209975Snwhitehorn		u_int	n;
1951190681Snwhitehorn
1952190681Snwhitehorn		/*
1953190681Snwhitehorn		 * Create a new value by mutiplying by a prime and adding in
1954190681Snwhitehorn		 * entropy from the timebase register.  This is to make the
1955190681Snwhitehorn		 * VSID more random so that the PT hash function collides
1956190681Snwhitehorn		 * less often.  (Note that the prime casues gcc to do shifts
1957190681Snwhitehorn		 * instead of a multiply.)
1958190681Snwhitehorn		 */
1959190681Snwhitehorn		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1960209975Snwhitehorn		hash = moea64_vsidcontext & (NVSIDS - 1);
1961190681Snwhitehorn		if (hash == 0)		/* 0 is special, avoid it */
1962190681Snwhitehorn			continue;
1963190681Snwhitehorn		n = hash >> 5;
1964190681Snwhitehorn		mask = 1 << (hash & (VSID_NBPW - 1));
1965209975Snwhitehorn		hash = (moea64_vsidcontext & VSID_HASHMASK);
1966190681Snwhitehorn		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1967190681Snwhitehorn			/* anything free in this bucket? */
1968190681Snwhitehorn			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1969190681Snwhitehorn				entropy = (moea64_vsidcontext >> 20);
1970190681Snwhitehorn				continue;
1971190681Snwhitehorn			}
1972212322Snwhitehorn			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1973190681Snwhitehorn			mask = 1 << i;
1974298433Spfg			hash &= rounddown2(VSID_HASHMASK, VSID_NBPW);
1975190681Snwhitehorn			hash |= i;
1976190681Snwhitehorn		}
1977279940Snwhitehorn		if (hash == VSID_VRMA)	/* also special, avoid this too */
1978279940Snwhitehorn			continue;
1979212322Snwhitehorn		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1980212331Snwhitehorn		    ("Allocating in-use VSID %#zx\n", hash));
1981190681Snwhitehorn		moea64_vsid_bitmap[n] |= mask;
1982211967Snwhitehorn		mtx_unlock(&moea64_slb_mutex);
1983209975Snwhitehorn		return (hash);
1984190681Snwhitehorn	}
1985190681Snwhitehorn
1986211967Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
1987209975Snwhitehorn	panic("%s: out of segments",__func__);
1988190681Snwhitehorn}
1989190681Snwhitehorn
1990209975Snwhitehorn#ifdef __powerpc64__
1991209975Snwhitehornvoid
1992209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1993209975Snwhitehorn{
1994254667Skib
1995235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
1996209975Snwhitehorn
1997212715Snwhitehorn	pmap->pm_slb_tree_root = slb_alloc_tree();
1998209975Snwhitehorn	pmap->pm_slb = slb_alloc_user_cache();
1999212722Snwhitehorn	pmap->pm_slb_len = 0;
2000209975Snwhitehorn}
2001209975Snwhitehorn#else
2002209975Snwhitehornvoid
2003209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
2004209975Snwhitehorn{
2005209975Snwhitehorn	int	i;
2006212308Snwhitehorn	uint32_t hash;
2007209975Snwhitehorn
2008235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
2009209975Snwhitehorn
2010209975Snwhitehorn	if (pmap_bootstrapped)
2011209975Snwhitehorn		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2012209975Snwhitehorn		    (vm_offset_t)pmap);
2013209975Snwhitehorn	else
2014209975Snwhitehorn		pmap->pmap_phys = pmap;
2015209975Snwhitehorn
2016209975Snwhitehorn	/*
2017209975Snwhitehorn	 * Allocate some segment registers for this pmap.
2018209975Snwhitehorn	 */
2019209975Snwhitehorn	hash = moea64_get_unique_vsid();
2020209975Snwhitehorn
2021209975Snwhitehorn	for (i = 0; i < 16; i++)
2022209975Snwhitehorn		pmap->pm_sr[i] = VSID_MAKE(i, hash);
2023212308Snwhitehorn
2024212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2025209975Snwhitehorn}
2026209975Snwhitehorn#endif
2027209975Snwhitehorn
2028190681Snwhitehorn/*
2029190681Snwhitehorn * Initialize the pmap associated with process 0.
2030190681Snwhitehorn */
2031190681Snwhitehornvoid
2032190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm)
2033190681Snwhitehorn{
2034254667Skib
2035254667Skib	PMAP_LOCK_INIT(pm);
2036190681Snwhitehorn	moea64_pinit(mmu, pm);
2037190681Snwhitehorn	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2038190681Snwhitehorn}
2039190681Snwhitehorn
2040190681Snwhitehorn/*
2041190681Snwhitehorn * Set the physical protection on the specified range of this map as requested.
2042190681Snwhitehorn */
2043233011Snwhitehornstatic void
2044233011Snwhitehornmoea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
2045233011Snwhitehorn{
2046279252Snwhitehorn	struct vm_page *pg;
2047279252Snwhitehorn	vm_prot_t oldprot;
2048279252Snwhitehorn	int32_t refchg;
2049233011Snwhitehorn
2050233529Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2051233529Snwhitehorn
2052233011Snwhitehorn	/*
2053279252Snwhitehorn	 * Change the protection of the page.
2054233011Snwhitehorn	 */
2055279252Snwhitehorn	oldprot = pvo->pvo_pte.prot;
2056279252Snwhitehorn	pvo->pvo_pte.prot = prot;
2057279252Snwhitehorn	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2058233011Snwhitehorn
2059233011Snwhitehorn	/*
2060279252Snwhitehorn	 * If the PVO is in the page table, update mapping
2061233011Snwhitehorn	 */
2062279252Snwhitehorn	refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE);
2063279252Snwhitehorn	if (refchg < 0)
2064279252Snwhitehorn		refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0;
2065233011Snwhitehorn
2066234155Snwhitehorn	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
2067279252Snwhitehorn	    (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2068234155Snwhitehorn		if ((pg->oflags & VPO_UNMANAGED) == 0)
2069233949Snwhitehorn			vm_page_aflag_set(pg, PGA_EXECUTABLE);
2070234155Snwhitehorn		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
2071279252Snwhitehorn		    pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE);
2072233011Snwhitehorn	}
2073233434Snwhitehorn
2074233434Snwhitehorn	/*
2075233436Snwhitehorn	 * Update vm about the REF/CHG bits if the page is managed and we have
2076233436Snwhitehorn	 * removed write access.
2077233434Snwhitehorn	 */
2078279252Snwhitehorn	if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) &&
2079279252Snwhitehorn	    (oldprot & VM_PROT_WRITE)) {
2080279252Snwhitehorn		refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2081279252Snwhitehorn		if (refchg & LPTE_CHG)
2082279252Snwhitehorn			vm_page_dirty(pg);
2083279252Snwhitehorn		if (refchg & LPTE_REF)
2084279252Snwhitehorn			vm_page_aflag_set(pg, PGA_REFERENCED);
2085233434Snwhitehorn	}
2086233011Snwhitehorn}
2087233011Snwhitehorn
2088190681Snwhitehornvoid
2089190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2090190681Snwhitehorn    vm_prot_t prot)
2091190681Snwhitehorn{
2092235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
2093190681Snwhitehorn
2094233011Snwhitehorn	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
2095233011Snwhitehorn	    sva, eva, prot);
2096190681Snwhitehorn
2097190681Snwhitehorn	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2098190681Snwhitehorn	    ("moea64_protect: non current pmap"));
2099190681Snwhitehorn
2100190681Snwhitehorn	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2101190681Snwhitehorn		moea64_remove(mmu, pm, sva, eva);
2102190681Snwhitehorn		return;
2103190681Snwhitehorn	}
2104190681Snwhitehorn
2105190681Snwhitehorn	PMAP_LOCK(pm);
2106235689Snwhitehorn	key.pvo_vaddr = sva;
2107235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2108235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2109235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2110235689Snwhitehorn		moea64_pvo_protect(mmu, pm, pvo, prot);
2111190681Snwhitehorn	}
2112190681Snwhitehorn	PMAP_UNLOCK(pm);
2113190681Snwhitehorn}
2114190681Snwhitehorn
2115190681Snwhitehorn/*
2116190681Snwhitehorn * Map a list of wired pages into kernel virtual address space.  This is
2117190681Snwhitehorn * intended for temporary mappings which do not need page modification or
2118190681Snwhitehorn * references recorded.  Existing mappings in the region are overwritten.
2119190681Snwhitehorn */
2120190681Snwhitehornvoid
2121190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2122190681Snwhitehorn{
2123190681Snwhitehorn	while (count-- > 0) {
2124190681Snwhitehorn		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2125190681Snwhitehorn		va += PAGE_SIZE;
2126190681Snwhitehorn		m++;
2127190681Snwhitehorn	}
2128190681Snwhitehorn}
2129190681Snwhitehorn
2130190681Snwhitehorn/*
2131190681Snwhitehorn * Remove page mappings from kernel virtual address space.  Intended for
2132190681Snwhitehorn * temporary mappings entered by moea64_qenter.
2133190681Snwhitehorn */
2134190681Snwhitehornvoid
2135190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2136190681Snwhitehorn{
2137190681Snwhitehorn	while (count-- > 0) {
2138190681Snwhitehorn		moea64_kremove(mmu, va);
2139190681Snwhitehorn		va += PAGE_SIZE;
2140190681Snwhitehorn	}
2141190681Snwhitehorn}
2142190681Snwhitehorn
2143190681Snwhitehornvoid
2144209975Snwhitehornmoea64_release_vsid(uint64_t vsid)
2145209975Snwhitehorn{
2146212044Snwhitehorn	int idx, mask;
2147209975Snwhitehorn
2148212044Snwhitehorn	mtx_lock(&moea64_slb_mutex);
2149212044Snwhitehorn	idx = vsid & (NVSIDS-1);
2150212044Snwhitehorn	mask = 1 << (idx % VSID_NBPW);
2151212044Snwhitehorn	idx /= VSID_NBPW;
2152212308Snwhitehorn	KASSERT(moea64_vsid_bitmap[idx] & mask,
2153212308Snwhitehorn	    ("Freeing unallocated VSID %#jx", vsid));
2154212044Snwhitehorn	moea64_vsid_bitmap[idx] &= ~mask;
2155212044Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
2156209975Snwhitehorn}
2157209975Snwhitehorn
2158209975Snwhitehorn
2159209975Snwhitehornvoid
2160190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap)
2161190681Snwhitehorn{
2162190681Snwhitehorn
2163190681Snwhitehorn	/*
2164209975Snwhitehorn	 * Free segment registers' VSIDs
2165190681Snwhitehorn	 */
2166209975Snwhitehorn    #ifdef __powerpc64__
2167212715Snwhitehorn	slb_free_tree(pmap);
2168209975Snwhitehorn	slb_free_user_cache(pmap->pm_slb);
2169209975Snwhitehorn    #else
2170212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2171190681Snwhitehorn
2172212308Snwhitehorn	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2173209975Snwhitehorn    #endif
2174190681Snwhitehorn}
2175190681Snwhitehorn
2176190681Snwhitehorn/*
2177233017Snwhitehorn * Remove all pages mapped by the specified pmap
2178233017Snwhitehorn */
2179233017Snwhitehornvoid
2180233017Snwhitehornmoea64_remove_pages(mmu_t mmu, pmap_t pm)
2181233017Snwhitehorn{
2182279252Snwhitehorn	struct pvo_entry *pvo, *tpvo;
2183279252Snwhitehorn	struct pvo_tree tofree;
2184233017Snwhitehorn
2185279252Snwhitehorn	RB_INIT(&tofree);
2186279252Snwhitehorn
2187233017Snwhitehorn	PMAP_LOCK(pm);
2188235689Snwhitehorn	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2189279252Snwhitehorn		if (pvo->pvo_vaddr & PVO_WIRED)
2190279252Snwhitehorn			continue;
2191279252Snwhitehorn
2192279252Snwhitehorn		/*
2193279252Snwhitehorn		 * For locking reasons, remove this from the page table and
2194279252Snwhitehorn		 * pmap, but save delinking from the vm_page for a second
2195279252Snwhitehorn		 * pass
2196279252Snwhitehorn		 */
2197279252Snwhitehorn		moea64_pvo_remove_from_pmap(mmu, pvo);
2198279252Snwhitehorn		RB_INSERT(pvo_tree, &tofree, pvo);
2199233434Snwhitehorn	}
2200233017Snwhitehorn	PMAP_UNLOCK(pm);
2201279252Snwhitehorn
2202279252Snwhitehorn	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2203279252Snwhitehorn		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2204279252Snwhitehorn		moea64_pvo_remove_from_page(mmu, pvo);
2205279252Snwhitehorn		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2206279252Snwhitehorn		RB_REMOVE(pvo_tree, &tofree, pvo);
2207279252Snwhitehorn		free_pvo_entry(pvo);
2208279252Snwhitehorn	}
2209233017Snwhitehorn}
2210233017Snwhitehorn
2211233017Snwhitehorn/*
2212190681Snwhitehorn * Remove the given range of addresses from the specified map.
2213190681Snwhitehorn */
2214190681Snwhitehornvoid
2215190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2216190681Snwhitehorn{
2217279252Snwhitehorn	struct  pvo_entry *pvo, *tpvo, key;
2218279252Snwhitehorn	struct pvo_tree tofree;
2219190681Snwhitehorn
2220233011Snwhitehorn	/*
2221233011Snwhitehorn	 * Perform an unsynchronized read.  This is, however, safe.
2222233011Snwhitehorn	 */
2223233011Snwhitehorn	if (pm->pm_stats.resident_count == 0)
2224233011Snwhitehorn		return;
2225233011Snwhitehorn
2226279252Snwhitehorn	key.pvo_vaddr = sva;
2227279252Snwhitehorn
2228279252Snwhitehorn	RB_INIT(&tofree);
2229279252Snwhitehorn
2230190681Snwhitehorn	PMAP_LOCK(pm);
2231235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2232235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2233235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2234279252Snwhitehorn
2235279252Snwhitehorn		/*
2236279252Snwhitehorn		 * For locking reasons, remove this from the page table and
2237279252Snwhitehorn		 * pmap, but save delinking from the vm_page for a second
2238279252Snwhitehorn		 * pass
2239279252Snwhitehorn		 */
2240279252Snwhitehorn		moea64_pvo_remove_from_pmap(mmu, pvo);
2241279252Snwhitehorn		RB_INSERT(pvo_tree, &tofree, pvo);
2242190681Snwhitehorn	}
2243190681Snwhitehorn	PMAP_UNLOCK(pm);
2244279252Snwhitehorn
2245279252Snwhitehorn	RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) {
2246279252Snwhitehorn		PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2247279252Snwhitehorn		moea64_pvo_remove_from_page(mmu, pvo);
2248279252Snwhitehorn		PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN);
2249279252Snwhitehorn		RB_REMOVE(pvo_tree, &tofree, pvo);
2250279252Snwhitehorn		free_pvo_entry(pvo);
2251279252Snwhitehorn	}
2252190681Snwhitehorn}
2253190681Snwhitehorn
2254190681Snwhitehorn/*
2255190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2256190681Snwhitehorn * will reflect changes in pte's back to the vm_page.
2257190681Snwhitehorn */
2258190681Snwhitehornvoid
2259190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m)
2260190681Snwhitehorn{
2261190681Snwhitehorn	struct	pvo_entry *pvo, *next_pvo;
2262279252Snwhitehorn	struct	pvo_head freequeue;
2263279252Snwhitehorn	int	wasdead;
2264190681Snwhitehorn	pmap_t	pmap;
2265190681Snwhitehorn
2266279252Snwhitehorn	LIST_INIT(&freequeue);
2267279252Snwhitehorn
2268279252Snwhitehorn	PV_PAGE_LOCK(m);
2269233949Snwhitehorn	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2270190681Snwhitehorn		pmap = pvo->pvo_pmap;
2271190681Snwhitehorn		PMAP_LOCK(pmap);
2272279252Snwhitehorn		wasdead = (pvo->pvo_vaddr & PVO_DEAD);
2273279252Snwhitehorn		if (!wasdead)
2274279252Snwhitehorn			moea64_pvo_remove_from_pmap(mmu, pvo);
2275279252Snwhitehorn		moea64_pvo_remove_from_page(mmu, pvo);
2276279252Snwhitehorn		if (!wasdead)
2277279252Snwhitehorn			LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink);
2278190681Snwhitehorn		PMAP_UNLOCK(pmap);
2279279252Snwhitehorn
2280190681Snwhitehorn	}
2281279252Snwhitehorn	KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings"));
2282279252Snwhitehorn	KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable"));
2283279252Snwhitehorn	PV_PAGE_UNLOCK(m);
2284279252Snwhitehorn
2285279252Snwhitehorn	/* Clean up UMA allocations */
2286279252Snwhitehorn	LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo)
2287279252Snwhitehorn		free_pvo_entry(pvo);
2288190681Snwhitehorn}
2289190681Snwhitehorn
2290190681Snwhitehorn/*
2291190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map.
2292190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are
2293190681Snwhitehorn * calculated.
2294190681Snwhitehorn */
2295216174Snwhitehornvm_offset_t
2296190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align)
2297190681Snwhitehorn{
2298190681Snwhitehorn	vm_offset_t	s, e;
2299190681Snwhitehorn	int		i, j;
2300190681Snwhitehorn
2301190681Snwhitehorn	size = round_page(size);
2302190681Snwhitehorn	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2303190681Snwhitehorn		if (align != 0)
2304298433Spfg			s = roundup2(phys_avail[i], align);
2305190681Snwhitehorn		else
2306190681Snwhitehorn			s = phys_avail[i];
2307190681Snwhitehorn		e = s + size;
2308190681Snwhitehorn
2309190681Snwhitehorn		if (s < phys_avail[i] || e > phys_avail[i + 1])
2310190681Snwhitehorn			continue;
2311190681Snwhitehorn
2312215159Snwhitehorn		if (s + size > platform_real_maxaddr())
2313215159Snwhitehorn			continue;
2314215159Snwhitehorn
2315190681Snwhitehorn		if (s == phys_avail[i]) {
2316190681Snwhitehorn			phys_avail[i] += size;
2317190681Snwhitehorn		} else if (e == phys_avail[i + 1]) {
2318190681Snwhitehorn			phys_avail[i + 1] -= size;
2319190681Snwhitehorn		} else {
2320190681Snwhitehorn			for (j = phys_avail_count * 2; j > i; j -= 2) {
2321190681Snwhitehorn				phys_avail[j] = phys_avail[j - 2];
2322190681Snwhitehorn				phys_avail[j + 1] = phys_avail[j - 1];
2323190681Snwhitehorn			}
2324190681Snwhitehorn
2325190681Snwhitehorn			phys_avail[i + 3] = phys_avail[i + 1];
2326190681Snwhitehorn			phys_avail[i + 1] = s;
2327190681Snwhitehorn			phys_avail[i + 2] = e;
2328190681Snwhitehorn			phys_avail_count++;
2329190681Snwhitehorn		}
2330190681Snwhitehorn
2331190681Snwhitehorn		return (s);
2332190681Snwhitehorn	}
2333190681Snwhitehorn	panic("moea64_bootstrap_alloc: could not allocate memory");
2334190681Snwhitehorn}
2335190681Snwhitehorn
2336190681Snwhitehornstatic int
2337279252Snwhitehornmoea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head)
2338190681Snwhitehorn{
2339279252Snwhitehorn	int first, err;
2340190681Snwhitehorn
2341279252Snwhitehorn	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2342279252Snwhitehorn	KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL,
2343279252Snwhitehorn	    ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo)));
2344190681Snwhitehorn
2345212363Snwhitehorn	moea64_pvo_enter_calls++;
2346212363Snwhitehorn
2347190681Snwhitehorn	/*
2348228412Snwhitehorn	 * Add to pmap list
2349228412Snwhitehorn	 */
2350279252Snwhitehorn	RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2351228412Snwhitehorn
2352228412Snwhitehorn	/*
2353190681Snwhitehorn	 * Remember if the list was empty and therefore will be the first
2354190681Snwhitehorn	 * item.
2355190681Snwhitehorn	 */
2356235689Snwhitehorn	if (pvo_head != NULL) {
2357235689Snwhitehorn		if (LIST_FIRST(pvo_head) == NULL)
2358235689Snwhitehorn			first = 1;
2359235689Snwhitehorn		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2360235689Snwhitehorn	}
2361190681Snwhitehorn
2362279252Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED)
2363279252Snwhitehorn		pvo->pvo_pmap->pm_stats.wired_count++;
2364279252Snwhitehorn	pvo->pvo_pmap->pm_stats.resident_count++;
2365190681Snwhitehorn
2366190681Snwhitehorn	/*
2367279252Snwhitehorn	 * Insert it into the hardware page table
2368190681Snwhitehorn	 */
2369279252Snwhitehorn	err = MOEA64_PTE_INSERT(mmu, pvo);
2370279252Snwhitehorn	if (err != 0) {
2371190681Snwhitehorn		panic("moea64_pvo_enter: overflow");
2372190681Snwhitehorn	}
2373190681Snwhitehorn
2374279252Snwhitehorn	moea64_pvo_entries++;
2375279252Snwhitehorn
2376279252Snwhitehorn	if (pvo->pvo_pmap == kernel_pmap)
2377204042Snwhitehorn		isync();
2378204042Snwhitehorn
2379209975Snwhitehorn#ifdef __powerpc64__
2380209975Snwhitehorn	/*
2381209975Snwhitehorn	 * Make sure all our bootstrap mappings are in the SLB as soon
2382209975Snwhitehorn	 * as virtual memory is switched on.
2383209975Snwhitehorn	 */
2384209975Snwhitehorn	if (!pmap_bootstrapped)
2385279252Snwhitehorn		moea64_bootstrap_slb_prefault(PVO_VADDR(pvo),
2386279252Snwhitehorn		    pvo->pvo_vaddr & PVO_LARGE);
2387209975Snwhitehorn#endif
2388209975Snwhitehorn
2389190681Snwhitehorn	return (first ? ENOENT : 0);
2390190681Snwhitehorn}
2391190681Snwhitehorn
2392190681Snwhitehornstatic void
2393279252Snwhitehornmoea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo)
2394190681Snwhitehorn{
2395233949Snwhitehorn	struct	vm_page *pg;
2396279252Snwhitehorn	int32_t refchg;
2397190681Snwhitehorn
2398279252Snwhitehorn	KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap"));
2399233529Snwhitehorn	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2400279252Snwhitehorn	KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO"));
2401233529Snwhitehorn
2402190681Snwhitehorn	/*
2403279252Snwhitehorn	 * If there is an active pte entry, we need to deactivate it
2404190681Snwhitehorn	 */
2405279252Snwhitehorn	refchg = MOEA64_PTE_UNSET(mmu, pvo);
2406279252Snwhitehorn	if (refchg < 0) {
2407279252Snwhitehorn		/*
2408279252Snwhitehorn		 * If it was evicted from the page table, be pessimistic and
2409279252Snwhitehorn		 * dirty the page.
2410279252Snwhitehorn		 */
2411279252Snwhitehorn		if (pvo->pvo_pte.prot & VM_PROT_WRITE)
2412279252Snwhitehorn			refchg = LPTE_CHG;
2413279252Snwhitehorn		else
2414279252Snwhitehorn			refchg = 0;
2415190681Snwhitehorn	}
2416190681Snwhitehorn
2417190681Snwhitehorn	/*
2418190681Snwhitehorn	 * Update our statistics.
2419190681Snwhitehorn	 */
2420190681Snwhitehorn	pvo->pvo_pmap->pm_stats.resident_count--;
2421204042Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED)
2422190681Snwhitehorn		pvo->pvo_pmap->pm_stats.wired_count--;
2423190681Snwhitehorn
2424190681Snwhitehorn	/*
2425235689Snwhitehorn	 * Remove this PVO from the pmap list.
2426233529Snwhitehorn	 */
2427235689Snwhitehorn	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2428233529Snwhitehorn
2429233529Snwhitehorn	/*
2430279252Snwhitehorn	 * Mark this for the next sweep
2431233529Snwhitehorn	 */
2432279252Snwhitehorn	pvo->pvo_vaddr |= PVO_DEAD;
2433233529Snwhitehorn
2434279252Snwhitehorn	/* Send RC bits to VM */
2435279252Snwhitehorn	if ((pvo->pvo_vaddr & PVO_MANAGED) &&
2436279252Snwhitehorn	    (pvo->pvo_pte.prot & VM_PROT_WRITE)) {
2437279252Snwhitehorn		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2438279252Snwhitehorn		if (pg != NULL) {
2439279252Snwhitehorn			refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs);
2440279252Snwhitehorn			if (refchg & LPTE_CHG)
2441279252Snwhitehorn				vm_page_dirty(pg);
2442279252Snwhitehorn			if (refchg & LPTE_REF)
2443279252Snwhitehorn				vm_page_aflag_set(pg, PGA_REFERENCED);
2444279252Snwhitehorn		}
2445279252Snwhitehorn	}
2446279252Snwhitehorn}
2447279252Snwhitehorn
2448279252Snwhitehornstatic void
2449279252Snwhitehornmoea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo)
2450279252Snwhitehorn{
2451279252Snwhitehorn	struct	vm_page *pg;
2452279252Snwhitehorn
2453279252Snwhitehorn	KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page"));
2454279252Snwhitehorn
2455279252Snwhitehorn	/* Use NULL pmaps as a sentinel for races in page deletion */
2456279252Snwhitehorn	if (pvo->pvo_pmap == NULL)
2457279252Snwhitehorn		return;
2458279252Snwhitehorn	pvo->pvo_pmap = NULL;
2459279252Snwhitehorn
2460233529Snwhitehorn	/*
2461279252Snwhitehorn	 * Update vm about page writeability/executability if managed
2462190681Snwhitehorn	 */
2463279252Snwhitehorn	PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN);
2464279252Snwhitehorn	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN);
2465233949Snwhitehorn
2466279252Snwhitehorn	if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) {
2467235689Snwhitehorn		LIST_REMOVE(pvo, pvo_vlink);
2468234155Snwhitehorn		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2469279252Snwhitehorn			vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE);
2470190681Snwhitehorn	}
2471190681Snwhitehorn
2472212363Snwhitehorn	moea64_pvo_entries--;
2473212363Snwhitehorn	moea64_pvo_remove_calls++;
2474190681Snwhitehorn}
2475190681Snwhitehorn
2476190681Snwhitehornstatic struct pvo_entry *
2477209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2478190681Snwhitehorn{
2479235689Snwhitehorn	struct pvo_entry key;
2480190681Snwhitehorn
2481279252Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2482279252Snwhitehorn
2483235689Snwhitehorn	key.pvo_vaddr = va & ~ADDR_POFF;
2484235689Snwhitehorn	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2485190681Snwhitehorn}
2486190681Snwhitehorn
2487190681Snwhitehornstatic boolean_t
2488279252Snwhitehornmoea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit)
2489190681Snwhitehorn{
2490190681Snwhitehorn	struct	pvo_entry *pvo;
2491279252Snwhitehorn	int64_t ret;
2492279252Snwhitehorn	boolean_t rv;
2493190681Snwhitehorn
2494279252Snwhitehorn	/*
2495279252Snwhitehorn	 * See if this bit is stored in the page already.
2496279252Snwhitehorn	 */
2497279252Snwhitehorn	if (m->md.mdpg_attrs & ptebit)
2498279252Snwhitehorn		return (TRUE);
2499190681Snwhitehorn
2500190681Snwhitehorn	/*
2501279252Snwhitehorn	 * Examine each PTE.  Sync so that any pending REF/CHG bits are
2502279252Snwhitehorn	 * flushed to the PTEs.
2503190681Snwhitehorn	 */
2504279252Snwhitehorn	rv = FALSE;
2505216174Snwhitehorn	powerpc_sync();
2506279252Snwhitehorn	PV_PAGE_LOCK(m);
2507190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2508279252Snwhitehorn		ret = 0;
2509190681Snwhitehorn
2510190681Snwhitehorn		/*
2511190681Snwhitehorn		 * See if this pvo has a valid PTE.  if so, fetch the
2512190681Snwhitehorn		 * REF/CHG bits from the valid PTE.  If the appropriate
2513233434Snwhitehorn		 * ptebit is set, return success.
2514190681Snwhitehorn		 */
2515233529Snwhitehorn		PMAP_LOCK(pvo->pvo_pmap);
2516279252Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_DEAD))
2517279252Snwhitehorn			ret = MOEA64_PTE_SYNCH(mmu, pvo);
2518279252Snwhitehorn		PMAP_UNLOCK(pvo->pvo_pmap);
2519279252Snwhitehorn
2520279252Snwhitehorn		if (ret > 0) {
2521279252Snwhitehorn			atomic_set_32(&m->md.mdpg_attrs,
2522279252Snwhitehorn			    ret & (LPTE_CHG | LPTE_REF));
2523279252Snwhitehorn			if (ret & ptebit) {
2524279252Snwhitehorn				rv = TRUE;
2525279252Snwhitehorn				break;
2526190681Snwhitehorn			}
2527190681Snwhitehorn		}
2528190681Snwhitehorn	}
2529279252Snwhitehorn	PV_PAGE_UNLOCK(m);
2530190681Snwhitehorn
2531279252Snwhitehorn	return (rv);
2532190681Snwhitehorn}
2533190681Snwhitehorn
2534190681Snwhitehornstatic u_int
2535216174Snwhitehornmoea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2536190681Snwhitehorn{
2537190681Snwhitehorn	u_int	count;
2538190681Snwhitehorn	struct	pvo_entry *pvo;
2539279252Snwhitehorn	int64_t ret;
2540190681Snwhitehorn
2541190681Snwhitehorn	/*
2542190681Snwhitehorn	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2543279252Snwhitehorn	 * we can reset the right ones).
2544190681Snwhitehorn	 */
2545216174Snwhitehorn	powerpc_sync();
2546190681Snwhitehorn
2547190681Snwhitehorn	/*
2548279252Snwhitehorn	 * For each pvo entry, clear the pte's ptebit.
2549190681Snwhitehorn	 */
2550190681Snwhitehorn	count = 0;
2551279252Snwhitehorn	PV_PAGE_LOCK(m);
2552190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2553279252Snwhitehorn		ret = 0;
2554279252Snwhitehorn
2555233529Snwhitehorn		PMAP_LOCK(pvo->pvo_pmap);
2556279252Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_DEAD))
2557279252Snwhitehorn			ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit);
2558233529Snwhitehorn		PMAP_UNLOCK(pvo->pvo_pmap);
2559279252Snwhitehorn
2560279252Snwhitehorn		if (ret > 0 && (ret & ptebit))
2561279252Snwhitehorn			count++;
2562190681Snwhitehorn	}
2563279252Snwhitehorn	atomic_clear_32(&m->md.mdpg_attrs, ptebit);
2564279252Snwhitehorn	PV_PAGE_UNLOCK(m);
2565190681Snwhitehorn
2566190681Snwhitehorn	return (count);
2567190681Snwhitehorn}
2568190681Snwhitehorn
2569190681Snwhitehornboolean_t
2570236019Srajmoea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2571190681Snwhitehorn{
2572235689Snwhitehorn	struct pvo_entry *pvo, key;
2573204296Snwhitehorn	vm_offset_t ppa;
2574204296Snwhitehorn	int error = 0;
2575204296Snwhitehorn
2576204296Snwhitehorn	PMAP_LOCK(kernel_pmap);
2577235689Snwhitehorn	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2578235689Snwhitehorn	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2579235689Snwhitehorn	    ppa < pa + size; ppa += PAGE_SIZE,
2580235689Snwhitehorn	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2581279252Snwhitehorn		if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) {
2582204296Snwhitehorn			error = EFAULT;
2583204296Snwhitehorn			break;
2584204296Snwhitehorn		}
2585204296Snwhitehorn	}
2586204296Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
2587204296Snwhitehorn
2588204296Snwhitehorn	return (error);
2589190681Snwhitehorn}
2590190681Snwhitehorn
2591190681Snwhitehorn/*
2592190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual
2593190681Snwhitehorn * address space. Return a pointer to where it is mapped. This
2594190681Snwhitehorn * routine is intended to be used for mapping device memory,
2595190681Snwhitehorn * NOT real memory.
2596190681Snwhitehorn */
2597190681Snwhitehornvoid *
2598285148Sjhibbitsmoea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2599190681Snwhitehorn{
2600190681Snwhitehorn	vm_offset_t va, tmpva, ppa, offset;
2601190681Snwhitehorn
2602190681Snwhitehorn	ppa = trunc_page(pa);
2603190681Snwhitehorn	offset = pa & PAGE_MASK;
2604233618Snwhitehorn	size = roundup2(offset + size, PAGE_SIZE);
2605190681Snwhitehorn
2606254025Sjeff	va = kva_alloc(size);
2607190681Snwhitehorn
2608190681Snwhitehorn	if (!va)
2609190681Snwhitehorn		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2610190681Snwhitehorn
2611190681Snwhitehorn	for (tmpva = va; size > 0;) {
2612213307Snwhitehorn		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2613190681Snwhitehorn		size -= PAGE_SIZE;
2614190681Snwhitehorn		tmpva += PAGE_SIZE;
2615190681Snwhitehorn		ppa += PAGE_SIZE;
2616190681Snwhitehorn	}
2617190681Snwhitehorn
2618190681Snwhitehorn	return ((void *)(va + offset));
2619190681Snwhitehorn}
2620190681Snwhitehorn
2621213307Snwhitehornvoid *
2622236019Srajmoea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2623213307Snwhitehorn{
2624213307Snwhitehorn
2625213307Snwhitehorn	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2626213307Snwhitehorn}
2627213307Snwhitehorn
2628190681Snwhitehornvoid
2629190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2630190681Snwhitehorn{
2631190681Snwhitehorn	vm_offset_t base, offset;
2632190681Snwhitehorn
2633190681Snwhitehorn	base = trunc_page(va);
2634190681Snwhitehorn	offset = va & PAGE_MASK;
2635233618Snwhitehorn	size = roundup2(offset + size, PAGE_SIZE);
2636190681Snwhitehorn
2637254025Sjeff	kva_free(base, size);
2638190681Snwhitehorn}
2639190681Snwhitehorn
2640216174Snwhitehornvoid
2641198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2642198341Smarcel{
2643198341Smarcel	struct pvo_entry *pvo;
2644198341Smarcel	vm_offset_t lim;
2645198341Smarcel	vm_paddr_t pa;
2646198341Smarcel	vm_size_t len;
2647198341Smarcel
2648198341Smarcel	PMAP_LOCK(pm);
2649198341Smarcel	while (sz > 0) {
2650198341Smarcel		lim = round_page(va);
2651198341Smarcel		len = MIN(lim - va, sz);
2652209975Snwhitehorn		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2653279252Snwhitehorn		if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) {
2654279252Snwhitehorn			pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF);
2655216174Snwhitehorn			moea64_syncicache(mmu, pm, va, pa, len);
2656198341Smarcel		}
2657198341Smarcel		va += len;
2658198341Smarcel		sz -= len;
2659198341Smarcel	}
2660198341Smarcel	PMAP_UNLOCK(pm);
2661198341Smarcel}
2662257941Sjhibbits
2663276772Smarkjvoid
2664276772Smarkjmoea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va)
2665257941Sjhibbits{
2666276772Smarkj
2667276772Smarkj	*va = (void *)pa;
2668257941Sjhibbits}
2669257941Sjhibbits
2670276772Smarkjextern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1];
2671276772Smarkj
2672276772Smarkjvoid
2673276772Smarkjmoea64_scan_init(mmu_t mmu)
2674257941Sjhibbits{
2675257941Sjhibbits	struct pvo_entry *pvo;
2676257941Sjhibbits	vm_offset_t va;
2677276772Smarkj	int i;
2678276772Smarkj
2679276772Smarkj	if (!do_minidump) {
2680276772Smarkj		/* Initialize phys. segments for dumpsys(). */
2681276772Smarkj		memset(&dump_map, 0, sizeof(dump_map));
2682276772Smarkj		mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
2683276772Smarkj		for (i = 0; i < pregions_sz; i++) {
2684276772Smarkj			dump_map[i].pa_start = pregions[i].mr_start;
2685276772Smarkj			dump_map[i].pa_size = pregions[i].mr_size;
2686257941Sjhibbits		}
2687276772Smarkj		return;
2688276772Smarkj	}
2689276772Smarkj
2690276772Smarkj	/* Virtual segments for minidumps: */
2691276772Smarkj	memset(&dump_map, 0, sizeof(dump_map));
2692276772Smarkj
2693276772Smarkj	/* 1st: kernel .data and .bss. */
2694276772Smarkj	dump_map[0].pa_start = trunc_page((uintptr_t)_etext);
2695279252Snwhitehorn	dump_map[0].pa_size = round_page((uintptr_t)_end) -
2696279252Snwhitehorn	    dump_map[0].pa_start;
2697276772Smarkj
2698276772Smarkj	/* 2nd: msgbuf and tables (see pmap_bootstrap()). */
2699276772Smarkj	dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr;
2700276772Smarkj	dump_map[1].pa_size = round_page(msgbufp->msg_size);
2701276772Smarkj
2702276772Smarkj	/* 3rd: kernel VM. */
2703276772Smarkj	va = dump_map[1].pa_start + dump_map[1].pa_size;
2704276772Smarkj	/* Find start of next chunk (from va). */
2705276772Smarkj	while (va < virtual_end) {
2706276772Smarkj		/* Don't dump the buffer cache. */
2707276772Smarkj		if (va >= kmi.buffer_sva && va < kmi.buffer_eva) {
2708276772Smarkj			va = kmi.buffer_eva;
2709276772Smarkj			continue;
2710276772Smarkj		}
2711276772Smarkj		pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2712279252Snwhitehorn		if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2713257941Sjhibbits			break;
2714276772Smarkj		va += PAGE_SIZE;
2715276772Smarkj	}
2716276772Smarkj	if (va < virtual_end) {
2717276772Smarkj		dump_map[2].pa_start = va;
2718276772Smarkj		va += PAGE_SIZE;
2719276772Smarkj		/* Find last page in chunk. */
2720276772Smarkj		while (va < virtual_end) {
2721276772Smarkj			/* Don't run into the buffer cache. */
2722276772Smarkj			if (va == kmi.buffer_sva)
2723257941Sjhibbits				break;
2724276772Smarkj			pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF);
2725279252Snwhitehorn			if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD))
2726276772Smarkj				break;
2727276772Smarkj			va += PAGE_SIZE;
2728257941Sjhibbits		}
2729276772Smarkj		dump_map[2].pa_size = va - dump_map[2].pa_start;
2730257941Sjhibbits	}
2731257941Sjhibbits}
2732279252Snwhitehorn
2733