mmu_oea64.c revision 238357
1190681Snwhitehorn/*-
2190681Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc.
3190681Snwhitehorn * All rights reserved.
4190681Snwhitehorn *
5190681Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation
6190681Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7190681Snwhitehorn *
8190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
9190681Snwhitehorn * modification, are permitted provided that the following conditions
10190681Snwhitehorn * are met:
11190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
12190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
13190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
14190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
15190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
16190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software
17190681Snwhitehorn *    must display the following acknowledgement:
18190681Snwhitehorn *        This product includes software developed by the NetBSD
19190681Snwhitehorn *        Foundation, Inc. and its contributors.
20190681Snwhitehorn * 4. Neither the name of The NetBSD Foundation nor the names of its
21190681Snwhitehorn *    contributors may be used to endorse or promote products derived
22190681Snwhitehorn *    from this software without specific prior written permission.
23190681Snwhitehorn *
24190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25190681Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26190681Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27190681Snwhitehorn * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28190681Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29190681Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30190681Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31190681Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32190681Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33190681Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34190681Snwhitehorn * POSSIBILITY OF SUCH DAMAGE.
35190681Snwhitehorn */
36190681Snwhitehorn/*-
37190681Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38190681Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH.
39190681Snwhitehorn * All rights reserved.
40190681Snwhitehorn *
41190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
42190681Snwhitehorn * modification, are permitted provided that the following conditions
43190681Snwhitehorn * are met:
44190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
45190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
46190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
47190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
48190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
49190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software
50190681Snwhitehorn *    must display the following acknowledgement:
51190681Snwhitehorn *	This product includes software developed by TooLs GmbH.
52190681Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products
53190681Snwhitehorn *    derived from this software without specific prior written permission.
54190681Snwhitehorn *
55190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65190681Snwhitehorn *
66190681Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67190681Snwhitehorn */
68190681Snwhitehorn/*-
69190681Snwhitehorn * Copyright (C) 2001 Benno Rice.
70190681Snwhitehorn * All rights reserved.
71190681Snwhitehorn *
72190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
73190681Snwhitehorn * modification, are permitted provided that the following conditions
74190681Snwhitehorn * are met:
75190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
76190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
77190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
78190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
79190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
80190681Snwhitehorn *
81190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91190681Snwhitehorn */
92190681Snwhitehorn
93190681Snwhitehorn#include <sys/cdefs.h>
94190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 238357 2012-07-10 22:10:21Z alc $");
95190681Snwhitehorn
96190681Snwhitehorn/*
97190681Snwhitehorn * Manages physical address maps.
98190681Snwhitehorn *
99190681Snwhitehorn * In addition to hardware address maps, this module is called upon to
100190681Snwhitehorn * provide software-use-only maps which may or may not be stored in the
101190681Snwhitehorn * same form as hardware maps.  These pseudo-maps are used to store
102190681Snwhitehorn * intermediate results from copy operations to and from address spaces.
103190681Snwhitehorn *
104190681Snwhitehorn * Since the information managed by this module is also stored by the
105190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual
106190681Snwhitehorn * to physical mappings at almost any time.  However, invalidations of
107190681Snwhitehorn * mappings must be done as requested.
108190681Snwhitehorn *
109190681Snwhitehorn * In order to cope with hardware architectures which make virtual to
110190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate
111190681Snwhitehorn * reduced protection operations until such time as they are actually
112190681Snwhitehorn * necessary.  This module is given full information as to which processors
113190681Snwhitehorn * are currently using which maps, and to when physical maps must be made
114190681Snwhitehorn * correct.
115190681Snwhitehorn */
116190681Snwhitehorn
117230779Skib#include "opt_compat.h"
118190681Snwhitehorn#include "opt_kstack_pages.h"
119190681Snwhitehorn
120190681Snwhitehorn#include <sys/param.h>
121190681Snwhitehorn#include <sys/kernel.h>
122222813Sattilio#include <sys/queue.h>
123222813Sattilio#include <sys/cpuset.h>
124190681Snwhitehorn#include <sys/ktr.h>
125190681Snwhitehorn#include <sys/lock.h>
126190681Snwhitehorn#include <sys/msgbuf.h>
127190681Snwhitehorn#include <sys/mutex.h>
128190681Snwhitehorn#include <sys/proc.h>
129233529Snwhitehorn#include <sys/rwlock.h>
130222813Sattilio#include <sys/sched.h>
131190681Snwhitehorn#include <sys/sysctl.h>
132190681Snwhitehorn#include <sys/systm.h>
133190681Snwhitehorn#include <sys/vmmeter.h>
134190681Snwhitehorn
135190681Snwhitehorn#include <sys/kdb.h>
136190681Snwhitehorn
137190681Snwhitehorn#include <dev/ofw/openfirm.h>
138190681Snwhitehorn
139190681Snwhitehorn#include <vm/vm.h>
140190681Snwhitehorn#include <vm/vm_param.h>
141190681Snwhitehorn#include <vm/vm_kern.h>
142190681Snwhitehorn#include <vm/vm_page.h>
143190681Snwhitehorn#include <vm/vm_map.h>
144190681Snwhitehorn#include <vm/vm_object.h>
145190681Snwhitehorn#include <vm/vm_extern.h>
146190681Snwhitehorn#include <vm/vm_pageout.h>
147190681Snwhitehorn#include <vm/vm_pager.h>
148190681Snwhitehorn#include <vm/uma.h>
149190681Snwhitehorn
150209975Snwhitehorn#include <machine/_inttypes.h>
151190681Snwhitehorn#include <machine/cpu.h>
152192067Snwhitehorn#include <machine/platform.h>
153190681Snwhitehorn#include <machine/frame.h>
154190681Snwhitehorn#include <machine/md_var.h>
155190681Snwhitehorn#include <machine/psl.h>
156190681Snwhitehorn#include <machine/bat.h>
157209975Snwhitehorn#include <machine/hid.h>
158190681Snwhitehorn#include <machine/pte.h>
159190681Snwhitehorn#include <machine/sr.h>
160190681Snwhitehorn#include <machine/trap.h>
161190681Snwhitehorn#include <machine/mmuvar.h>
162190681Snwhitehorn
163216174Snwhitehorn#include "mmu_oea64.h"
164190681Snwhitehorn#include "mmu_if.h"
165216174Snwhitehorn#include "moea64_if.h"
166190681Snwhitehorn
167209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid);
168209975Snwhitehornuintptr_t moea64_get_unique_vsid(void);
169190681Snwhitehorn
170222614Snwhitehorn#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
171222614Snwhitehorn#define ENABLE_TRANS(msr)	mtmsr(msr)
172190681Snwhitehorn
173190681Snwhitehorn#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
174190681Snwhitehorn#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
175204268Snwhitehorn#define	VSID_HASH_MASK		0x0000007fffffffffULL
176190681Snwhitehorn
177233529Snwhitehorn/*
178233529Snwhitehorn * Locking semantics:
179233529Snwhitehorn * -- Read lock: if no modifications are being made to either the PVO lists
180233529Snwhitehorn *    or page table or if any modifications being made result in internal
181233529Snwhitehorn *    changes (e.g. wiring, protection) such that the existence of the PVOs
182233529Snwhitehorn *    is unchanged and they remain associated with the same pmap (in which
183233529Snwhitehorn *    case the changes should be protected by the pmap lock)
184233529Snwhitehorn * -- Write lock: required if PTEs/PVOs are being inserted or removed.
185233529Snwhitehorn */
186190681Snwhitehorn
187233529Snwhitehorn#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock)
188233529Snwhitehorn#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock)
189233529Snwhitehorn#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock)
190233529Snwhitehorn#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock)
191233529Snwhitehorn
192190681Snwhitehornstruct ofw_map {
193209975Snwhitehorn	cell_t	om_va;
194209975Snwhitehorn	cell_t	om_len;
195209975Snwhitehorn	cell_t	om_pa_hi;
196209975Snwhitehorn	cell_t	om_pa_lo;
197209975Snwhitehorn	cell_t	om_mode;
198190681Snwhitehorn};
199190681Snwhitehorn
200190681Snwhitehorn/*
201190681Snwhitehorn * Map of physical memory regions.
202190681Snwhitehorn */
203190681Snwhitehornstatic struct	mem_region *regions;
204190681Snwhitehornstatic struct	mem_region *pregions;
205209975Snwhitehornstatic u_int	phys_avail_count;
206209975Snwhitehornstatic int	regions_sz, pregions_sz;
207190681Snwhitehorn
208190681Snwhitehornextern void bs_remap_earlyboot(void);
209190681Snwhitehorn
210190681Snwhitehorn/*
211190681Snwhitehorn * Lock for the pteg and pvo tables.
212190681Snwhitehorn */
213233529Snwhitehornstruct rwlock	moea64_table_lock;
214211967Snwhitehornstruct mtx	moea64_slb_mutex;
215190681Snwhitehorn
216190681Snwhitehorn/*
217190681Snwhitehorn * PTEG data.
218190681Snwhitehorn */
219190681Snwhitehornu_int		moea64_pteg_count;
220190681Snwhitehornu_int		moea64_pteg_mask;
221190681Snwhitehorn
222190681Snwhitehorn/*
223190681Snwhitehorn * PVO data.
224190681Snwhitehorn */
225190681Snwhitehornstruct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
226190681Snwhitehorn
227190681Snwhitehornuma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
228190681Snwhitehornuma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
229190681Snwhitehorn
230190681Snwhitehorn#define	BPVO_POOL_SIZE	327680
231190681Snwhitehornstatic struct	pvo_entry *moea64_bpvo_pool;
232190681Snwhitehornstatic int	moea64_bpvo_pool_index = 0;
233190681Snwhitehorn
234190681Snwhitehorn#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
235209975Snwhitehorn#ifdef __powerpc64__
236209975Snwhitehorn#define	NVSIDS		(NPMAPS * 16)
237209975Snwhitehorn#define VSID_HASHMASK	0xffffffffUL
238209975Snwhitehorn#else
239209975Snwhitehorn#define NVSIDS		NPMAPS
240209975Snwhitehorn#define VSID_HASHMASK	0xfffffUL
241209975Snwhitehorn#endif
242209975Snwhitehornstatic u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
243190681Snwhitehorn
244190681Snwhitehornstatic boolean_t moea64_initialized = FALSE;
245190681Snwhitehorn
246190681Snwhitehorn/*
247190681Snwhitehorn * Statistics.
248190681Snwhitehorn */
249190681Snwhitehornu_int	moea64_pte_valid = 0;
250190681Snwhitehornu_int	moea64_pte_overflow = 0;
251190681Snwhitehornu_int	moea64_pvo_entries = 0;
252190681Snwhitehornu_int	moea64_pvo_enter_calls = 0;
253190681Snwhitehornu_int	moea64_pvo_remove_calls = 0;
254190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
255190681Snwhitehorn    &moea64_pte_valid, 0, "");
256190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
257190681Snwhitehorn    &moea64_pte_overflow, 0, "");
258190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
259190681Snwhitehorn    &moea64_pvo_entries, 0, "");
260190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
261190681Snwhitehorn    &moea64_pvo_enter_calls, 0, "");
262190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
263190681Snwhitehorn    &moea64_pvo_remove_calls, 0, "");
264190681Snwhitehorn
265190681Snwhitehornvm_offset_t	moea64_scratchpage_va[2];
266216174Snwhitehornstruct pvo_entry *moea64_scratchpage_pvo[2];
267216174Snwhitehornuintptr_t	moea64_scratchpage_pte[2];
268190681Snwhitehornstruct	mtx	moea64_scratchpage_mtx;
269190681Snwhitehorn
270209975Snwhitehornuint64_t 	moea64_large_page_mask = 0;
271209975Snwhitehornint		moea64_large_page_size = 0;
272209975Snwhitehornint		moea64_large_page_shift = 0;
273209975Snwhitehorn
274190681Snwhitehorn/*
275190681Snwhitehorn * PVO calls.
276190681Snwhitehorn */
277216174Snwhitehornstatic int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
278198378Snwhitehorn		    vm_offset_t, vm_offset_t, uint64_t, int);
279216174Snwhitehornstatic void	moea64_pvo_remove(mmu_t, struct pvo_entry *);
280209975Snwhitehornstatic struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
281190681Snwhitehorn
282190681Snwhitehorn/*
283190681Snwhitehorn * Utility routines.
284190681Snwhitehorn */
285216174Snwhitehornstatic boolean_t	moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
286216174Snwhitehornstatic u_int		moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
287190681Snwhitehornstatic void		moea64_kremove(mmu_t, vm_offset_t);
288216174Snwhitehornstatic void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
289198341Smarcel			    vm_offset_t pa, vm_size_t sz);
290190681Snwhitehorn
291190681Snwhitehorn/*
292190681Snwhitehorn * Kernel MMU interface
293190681Snwhitehorn */
294190681Snwhitehornvoid moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
295190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t);
296190681Snwhitehornvoid moea64_clear_reference(mmu_t, vm_page_t);
297190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
298190681Snwhitehornvoid moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
299190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
300190681Snwhitehorn    vm_prot_t);
301190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
302190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
303190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
304190681Snwhitehornvoid moea64_init(mmu_t);
305190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t);
306214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
307207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t);
308238357Salcint moea64_ts_referenced(mmu_t, vm_page_t);
309236019Srajvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int);
310190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
311190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t);
312190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t);
313190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t);
314190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
315190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
316190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int);
317190681Snwhitehornvoid moea64_release(mmu_t, pmap_t);
318190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
319233017Snwhitehornvoid moea64_remove_pages(mmu_t, pmap_t);
320190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t);
321190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t);
322190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t);
323190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int);
324190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t);
325190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *);
326190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *);
327236019Srajvoid *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
328213307Snwhitehornvoid *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
329190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
330236019Srajvm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
331213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
332213307Snwhitehornvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
333236019Srajvoid moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
334236019Srajboolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
335198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
336190681Snwhitehorn
337209975Snwhitehornstatic mmu_method_t moea64_methods[] = {
338190681Snwhitehorn	MMUMETHOD(mmu_change_wiring,	moea64_change_wiring),
339190681Snwhitehorn	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
340190681Snwhitehorn	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
341190681Snwhitehorn	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
342190681Snwhitehorn	MMUMETHOD(mmu_enter,		moea64_enter),
343190681Snwhitehorn	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
344190681Snwhitehorn	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
345190681Snwhitehorn	MMUMETHOD(mmu_extract,		moea64_extract),
346190681Snwhitehorn	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
347190681Snwhitehorn	MMUMETHOD(mmu_init,		moea64_init),
348190681Snwhitehorn	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
349214617Salc	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
350207155Salc	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
351190681Snwhitehorn	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
352190681Snwhitehorn	MMUMETHOD(mmu_map,     		moea64_map),
353190681Snwhitehorn	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
354190681Snwhitehorn	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
355190681Snwhitehorn	MMUMETHOD(mmu_pinit,		moea64_pinit),
356190681Snwhitehorn	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
357190681Snwhitehorn	MMUMETHOD(mmu_protect,		moea64_protect),
358190681Snwhitehorn	MMUMETHOD(mmu_qenter,		moea64_qenter),
359190681Snwhitehorn	MMUMETHOD(mmu_qremove,		moea64_qremove),
360190681Snwhitehorn	MMUMETHOD(mmu_release,		moea64_release),
361190681Snwhitehorn	MMUMETHOD(mmu_remove,		moea64_remove),
362233017Snwhitehorn	MMUMETHOD(mmu_remove_pages,	moea64_remove_pages),
363190681Snwhitehorn	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
364190681Snwhitehorn	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
365198341Smarcel	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
366190681Snwhitehorn	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
367190681Snwhitehorn	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
368190681Snwhitehorn	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
369190681Snwhitehorn	MMUMETHOD(mmu_activate,		moea64_activate),
370190681Snwhitehorn	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
371213307Snwhitehorn	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
372190681Snwhitehorn
373190681Snwhitehorn	/* Internal interfaces */
374190681Snwhitehorn	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
375213307Snwhitehorn	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
376190681Snwhitehorn	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
377190681Snwhitehorn	MMUMETHOD(mmu_kextract,		moea64_kextract),
378190681Snwhitehorn	MMUMETHOD(mmu_kenter,		moea64_kenter),
379213307Snwhitehorn	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
380190681Snwhitehorn	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
381190681Snwhitehorn
382190681Snwhitehorn	{ 0, 0 }
383190681Snwhitehorn};
384190681Snwhitehorn
385216174SnwhitehornMMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
386190681Snwhitehorn
387190681Snwhitehornstatic __inline u_int
388209975Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
389190681Snwhitehorn{
390204268Snwhitehorn	uint64_t hash;
391209975Snwhitehorn	int shift;
392190681Snwhitehorn
393209975Snwhitehorn	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
394204268Snwhitehorn	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
395209975Snwhitehorn	    shift);
396190681Snwhitehorn	return (hash & moea64_pteg_mask);
397190681Snwhitehorn}
398190681Snwhitehorn
399190681Snwhitehornstatic __inline struct pvo_head *
400190681Snwhitehornvm_page_to_pvoh(vm_page_t m)
401190681Snwhitehorn{
402190681Snwhitehorn
403190681Snwhitehorn	return (&m->md.mdpg_pvoh);
404190681Snwhitehorn}
405190681Snwhitehorn
406190681Snwhitehornstatic __inline void
407190681Snwhitehornmoea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
408209975Snwhitehorn    uint64_t pte_lo, int flags)
409190681Snwhitehorn{
410209975Snwhitehorn
411190681Snwhitehorn	/*
412190681Snwhitehorn	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
413190681Snwhitehorn	 * set when the real pte is set in memory.
414190681Snwhitehorn	 *
415190681Snwhitehorn	 * Note: Don't set the valid bit for correct operation of tlb update.
416190681Snwhitehorn	 */
417190681Snwhitehorn	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
418190681Snwhitehorn	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
419190681Snwhitehorn
420209975Snwhitehorn	if (flags & PVO_LARGE)
421209975Snwhitehorn		pt->pte_hi |= LPTE_BIG;
422209975Snwhitehorn
423190681Snwhitehorn	pt->pte_lo = pte_lo;
424190681Snwhitehorn}
425190681Snwhitehorn
426190681Snwhitehornstatic __inline uint64_t
427213307Snwhitehornmoea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
428190681Snwhitehorn{
429190681Snwhitehorn	uint64_t pte_lo;
430190681Snwhitehorn	int i;
431190681Snwhitehorn
432213307Snwhitehorn	if (ma != VM_MEMATTR_DEFAULT) {
433213307Snwhitehorn		switch (ma) {
434213307Snwhitehorn		case VM_MEMATTR_UNCACHEABLE:
435213307Snwhitehorn			return (LPTE_I | LPTE_G);
436213307Snwhitehorn		case VM_MEMATTR_WRITE_COMBINING:
437213307Snwhitehorn		case VM_MEMATTR_WRITE_BACK:
438213307Snwhitehorn		case VM_MEMATTR_PREFETCHABLE:
439213307Snwhitehorn			return (LPTE_I);
440213307Snwhitehorn		case VM_MEMATTR_WRITE_THROUGH:
441213307Snwhitehorn			return (LPTE_W | LPTE_M);
442213307Snwhitehorn		}
443213307Snwhitehorn	}
444213307Snwhitehorn
445190681Snwhitehorn	/*
446190681Snwhitehorn	 * Assume the page is cache inhibited and access is guarded unless
447190681Snwhitehorn	 * it's in our available memory array.
448190681Snwhitehorn	 */
449190681Snwhitehorn	pte_lo = LPTE_I | LPTE_G;
450190681Snwhitehorn	for (i = 0; i < pregions_sz; i++) {
451190681Snwhitehorn		if ((pa >= pregions[i].mr_start) &&
452190681Snwhitehorn		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
453190681Snwhitehorn			pte_lo &= ~(LPTE_I | LPTE_G);
454190681Snwhitehorn			pte_lo |= LPTE_M;
455190681Snwhitehorn			break;
456190681Snwhitehorn		}
457190681Snwhitehorn	}
458190681Snwhitehorn
459190681Snwhitehorn	return pte_lo;
460190681Snwhitehorn}
461190681Snwhitehorn
462190681Snwhitehorn/*
463190681Snwhitehorn * Quick sort callout for comparing memory regions.
464190681Snwhitehorn */
465190681Snwhitehornstatic int	om_cmp(const void *a, const void *b);
466190681Snwhitehorn
467190681Snwhitehornstatic int
468190681Snwhitehornom_cmp(const void *a, const void *b)
469190681Snwhitehorn{
470190681Snwhitehorn	const struct	ofw_map *mapa;
471190681Snwhitehorn	const struct	ofw_map *mapb;
472190681Snwhitehorn
473190681Snwhitehorn	mapa = a;
474190681Snwhitehorn	mapb = b;
475190681Snwhitehorn	if (mapa->om_pa_hi < mapb->om_pa_hi)
476190681Snwhitehorn		return (-1);
477190681Snwhitehorn	else if (mapa->om_pa_hi > mapb->om_pa_hi)
478190681Snwhitehorn		return (1);
479190681Snwhitehorn	else if (mapa->om_pa_lo < mapb->om_pa_lo)
480190681Snwhitehorn		return (-1);
481190681Snwhitehorn	else if (mapa->om_pa_lo > mapb->om_pa_lo)
482190681Snwhitehorn		return (1);
483190681Snwhitehorn	else
484190681Snwhitehorn		return (0);
485190681Snwhitehorn}
486190681Snwhitehorn
487190681Snwhitehornstatic void
488199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
489199226Snwhitehorn{
490199226Snwhitehorn	struct ofw_map	translations[sz/sizeof(struct ofw_map)];
491199226Snwhitehorn	register_t	msr;
492199226Snwhitehorn	vm_offset_t	off;
493204128Snwhitehorn	vm_paddr_t	pa_base;
494216563Snwhitehorn	int		i;
495199226Snwhitehorn
496199226Snwhitehorn	bzero(translations, sz);
497199226Snwhitehorn	if (OF_getprop(mmu, "translations", translations, sz) == -1)
498199226Snwhitehorn		panic("moea64_bootstrap: can't get ofw translations");
499199226Snwhitehorn
500199226Snwhitehorn	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
501199226Snwhitehorn	sz /= sizeof(*translations);
502199226Snwhitehorn	qsort(translations, sz, sizeof (*translations), om_cmp);
503199226Snwhitehorn
504216563Snwhitehorn	for (i = 0; i < sz; i++) {
505199226Snwhitehorn		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
506199226Snwhitehorn		    (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
507199226Snwhitehorn		    translations[i].om_len);
508199226Snwhitehorn
509199226Snwhitehorn		if (translations[i].om_pa_lo % PAGE_SIZE)
510199226Snwhitehorn			panic("OFW translation not page-aligned!");
511199226Snwhitehorn
512209975Snwhitehorn		pa_base = translations[i].om_pa_lo;
513209975Snwhitehorn
514209975Snwhitehorn	      #ifdef __powerpc64__
515209975Snwhitehorn		pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
516209975Snwhitehorn	      #else
517199226Snwhitehorn		if (translations[i].om_pa_hi)
518199226Snwhitehorn			panic("OFW translations above 32-bit boundary!");
519209975Snwhitehorn	      #endif
520199226Snwhitehorn
521199226Snwhitehorn		/* Now enter the pages for this mapping */
522199226Snwhitehorn
523199226Snwhitehorn		DISABLE_TRANS(msr);
524199226Snwhitehorn		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
525209975Snwhitehorn			if (moea64_pvo_find_va(kernel_pmap,
526209975Snwhitehorn			    translations[i].om_va + off) != NULL)
527209975Snwhitehorn				continue;
528209975Snwhitehorn
529204128Snwhitehorn			moea64_kenter(mmup, translations[i].om_va + off,
530204128Snwhitehorn			    pa_base + off);
531199226Snwhitehorn		}
532199226Snwhitehorn		ENABLE_TRANS(msr);
533199226Snwhitehorn	}
534199226Snwhitehorn}
535199226Snwhitehorn
536209975Snwhitehorn#ifdef __powerpc64__
537199226Snwhitehornstatic void
538209975Snwhitehornmoea64_probe_large_page(void)
539190681Snwhitehorn{
540209975Snwhitehorn	uint16_t pvr = mfpvr() >> 16;
541209975Snwhitehorn
542209975Snwhitehorn	switch (pvr) {
543209975Snwhitehorn	case IBM970:
544209975Snwhitehorn	case IBM970FX:
545209975Snwhitehorn	case IBM970MP:
546209975Snwhitehorn		powerpc_sync(); isync();
547209975Snwhitehorn		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
548209975Snwhitehorn		powerpc_sync(); isync();
549209975Snwhitehorn
550209975Snwhitehorn		/* FALLTHROUGH */
551209975Snwhitehorn	case IBMCELLBE:
552209975Snwhitehorn		moea64_large_page_size = 0x1000000; /* 16 MB */
553209975Snwhitehorn		moea64_large_page_shift = 24;
554209975Snwhitehorn		break;
555209975Snwhitehorn	default:
556209975Snwhitehorn		moea64_large_page_size = 0;
557209975Snwhitehorn	}
558209975Snwhitehorn
559209975Snwhitehorn	moea64_large_page_mask = moea64_large_page_size - 1;
560209975Snwhitehorn}
561209975Snwhitehorn
562209975Snwhitehornstatic void
563209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large)
564209975Snwhitehorn{
565209975Snwhitehorn	struct slb *cache;
566209975Snwhitehorn	struct slb entry;
567209975Snwhitehorn	uint64_t esid, slbe;
568209975Snwhitehorn	uint64_t i;
569209975Snwhitehorn
570209975Snwhitehorn	cache = PCPU_GET(slb);
571209975Snwhitehorn	esid = va >> ADDR_SR_SHFT;
572209975Snwhitehorn	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
573209975Snwhitehorn
574209975Snwhitehorn	for (i = 0; i < 64; i++) {
575209975Snwhitehorn		if (cache[i].slbe == (slbe | i))
576209975Snwhitehorn			return;
577209975Snwhitehorn	}
578209975Snwhitehorn
579209975Snwhitehorn	entry.slbe = slbe;
580210704Snwhitehorn	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
581209975Snwhitehorn	if (large)
582209975Snwhitehorn		entry.slbv |= SLBV_L;
583209975Snwhitehorn
584212722Snwhitehorn	slb_insert_kernel(entry.slbe, entry.slbv);
585209975Snwhitehorn}
586209975Snwhitehorn#endif
587209975Snwhitehorn
588209975Snwhitehornstatic void
589209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
590209975Snwhitehorn    vm_offset_t kernelend)
591209975Snwhitehorn{
592209975Snwhitehorn	register_t msr;
593209975Snwhitehorn	vm_paddr_t pa;
594209975Snwhitehorn	vm_offset_t size, off;
595209975Snwhitehorn	uint64_t pte_lo;
596209975Snwhitehorn	int i;
597209975Snwhitehorn
598209975Snwhitehorn	if (moea64_large_page_size == 0)
599209975Snwhitehorn		hw_direct_map = 0;
600209975Snwhitehorn
601209975Snwhitehorn	DISABLE_TRANS(msr);
602209975Snwhitehorn	if (hw_direct_map) {
603233529Snwhitehorn		LOCK_TABLE_WR();
604209975Snwhitehorn		PMAP_LOCK(kernel_pmap);
605209975Snwhitehorn		for (i = 0; i < pregions_sz; i++) {
606209975Snwhitehorn		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
607209975Snwhitehorn		     pregions[i].mr_size; pa += moea64_large_page_size) {
608209975Snwhitehorn			pte_lo = LPTE_M;
609209975Snwhitehorn
610209975Snwhitehorn			/*
611209975Snwhitehorn			 * Set memory access as guarded if prefetch within
612209975Snwhitehorn			 * the page could exit the available physmem area.
613209975Snwhitehorn			 */
614209975Snwhitehorn			if (pa & moea64_large_page_mask) {
615209975Snwhitehorn				pa &= moea64_large_page_mask;
616209975Snwhitehorn				pte_lo |= LPTE_G;
617209975Snwhitehorn			}
618209975Snwhitehorn			if (pa + moea64_large_page_size >
619209975Snwhitehorn			    pregions[i].mr_start + pregions[i].mr_size)
620209975Snwhitehorn				pte_lo |= LPTE_G;
621209975Snwhitehorn
622216174Snwhitehorn			moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
623235689Snwhitehorn				    NULL, pa, pa, pte_lo,
624235689Snwhitehorn				    PVO_WIRED | PVO_LARGE);
625209975Snwhitehorn		  }
626209975Snwhitehorn		}
627209975Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
628233529Snwhitehorn		UNLOCK_TABLE_WR();
629209975Snwhitehorn	} else {
630209975Snwhitehorn		size = sizeof(struct pvo_head) * moea64_pteg_count;
631209975Snwhitehorn		off = (vm_offset_t)(moea64_pvo_table);
632209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
633209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
634209975Snwhitehorn		size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
635209975Snwhitehorn		off = (vm_offset_t)(moea64_bpvo_pool);
636209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
637209975Snwhitehorn		moea64_kenter(mmup, pa, pa);
638209975Snwhitehorn
639209975Snwhitehorn		/*
640209975Snwhitehorn		 * Map certain important things, like ourselves.
641209975Snwhitehorn		 *
642209975Snwhitehorn		 * NOTE: We do not map the exception vector space. That code is
643209975Snwhitehorn		 * used only in real mode, and leaving it unmapped allows us to
644209975Snwhitehorn		 * catch NULL pointer deferences, instead of making NULL a valid
645209975Snwhitehorn		 * address.
646209975Snwhitehorn		 */
647209975Snwhitehorn
648209975Snwhitehorn		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
649209975Snwhitehorn		    pa += PAGE_SIZE)
650209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
651209975Snwhitehorn	}
652209975Snwhitehorn	ENABLE_TRANS(msr);
653209975Snwhitehorn}
654209975Snwhitehorn
655216174Snwhitehornvoid
656216174Snwhitehornmoea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
657209975Snwhitehorn{
658190681Snwhitehorn	int		i, j;
659216174Snwhitehorn	vm_size_t	physsz, hwphyssz;
660190681Snwhitehorn
661209975Snwhitehorn#ifndef __powerpc64__
662190681Snwhitehorn	/* We don't have a direct map since there is no BAT */
663190681Snwhitehorn	hw_direct_map = 0;
664190681Snwhitehorn
665190681Snwhitehorn	/* Make sure battable is zero, since we have no BAT */
666190681Snwhitehorn	for (i = 0; i < 16; i++) {
667190681Snwhitehorn		battable[i].batu = 0;
668190681Snwhitehorn		battable[i].batl = 0;
669190681Snwhitehorn	}
670209975Snwhitehorn#else
671209975Snwhitehorn	moea64_probe_large_page();
672190681Snwhitehorn
673209975Snwhitehorn	/* Use a direct map if we have large page support */
674209975Snwhitehorn	if (moea64_large_page_size > 0)
675209975Snwhitehorn		hw_direct_map = 1;
676209975Snwhitehorn	else
677209975Snwhitehorn		hw_direct_map = 0;
678209975Snwhitehorn#endif
679209975Snwhitehorn
680190681Snwhitehorn	/* Get physical memory regions from firmware */
681190681Snwhitehorn	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
682190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
683190681Snwhitehorn
684190681Snwhitehorn	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
685190681Snwhitehorn		panic("moea64_bootstrap: phys_avail too small");
686222614Snwhitehorn
687190681Snwhitehorn	phys_avail_count = 0;
688190681Snwhitehorn	physsz = 0;
689190681Snwhitehorn	hwphyssz = 0;
690190681Snwhitehorn	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
691190681Snwhitehorn	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
692190681Snwhitehorn		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
693190681Snwhitehorn		    regions[i].mr_start + regions[i].mr_size,
694190681Snwhitehorn		    regions[i].mr_size);
695190681Snwhitehorn		if (hwphyssz != 0 &&
696190681Snwhitehorn		    (physsz + regions[i].mr_size) >= hwphyssz) {
697190681Snwhitehorn			if (physsz < hwphyssz) {
698190681Snwhitehorn				phys_avail[j] = regions[i].mr_start;
699190681Snwhitehorn				phys_avail[j + 1] = regions[i].mr_start +
700190681Snwhitehorn				    hwphyssz - physsz;
701190681Snwhitehorn				physsz = hwphyssz;
702190681Snwhitehorn				phys_avail_count++;
703190681Snwhitehorn			}
704190681Snwhitehorn			break;
705190681Snwhitehorn		}
706190681Snwhitehorn		phys_avail[j] = regions[i].mr_start;
707190681Snwhitehorn		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
708190681Snwhitehorn		phys_avail_count++;
709190681Snwhitehorn		physsz += regions[i].mr_size;
710190681Snwhitehorn	}
711209975Snwhitehorn
712209975Snwhitehorn	/* Check for overlap with the kernel and exception vectors */
713209975Snwhitehorn	for (j = 0; j < 2*phys_avail_count; j+=2) {
714209975Snwhitehorn		if (phys_avail[j] < EXC_LAST)
715209975Snwhitehorn			phys_avail[j] += EXC_LAST;
716209975Snwhitehorn
717209975Snwhitehorn		if (kernelstart >= phys_avail[j] &&
718209975Snwhitehorn		    kernelstart < phys_avail[j+1]) {
719209975Snwhitehorn			if (kernelend < phys_avail[j+1]) {
720209975Snwhitehorn				phys_avail[2*phys_avail_count] =
721209975Snwhitehorn				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
722209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
723209975Snwhitehorn				    phys_avail[j+1];
724209975Snwhitehorn				phys_avail_count++;
725209975Snwhitehorn			}
726209975Snwhitehorn
727209975Snwhitehorn			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
728209975Snwhitehorn		}
729209975Snwhitehorn
730209975Snwhitehorn		if (kernelend >= phys_avail[j] &&
731209975Snwhitehorn		    kernelend < phys_avail[j+1]) {
732209975Snwhitehorn			if (kernelstart > phys_avail[j]) {
733209975Snwhitehorn				phys_avail[2*phys_avail_count] = phys_avail[j];
734209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
735209975Snwhitehorn				    kernelstart & ~PAGE_MASK;
736209975Snwhitehorn				phys_avail_count++;
737209975Snwhitehorn			}
738209975Snwhitehorn
739209975Snwhitehorn			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
740209975Snwhitehorn		}
741209975Snwhitehorn	}
742209975Snwhitehorn
743190681Snwhitehorn	physmem = btoc(physsz);
744190681Snwhitehorn
745190681Snwhitehorn#ifdef PTEGCOUNT
746190681Snwhitehorn	moea64_pteg_count = PTEGCOUNT;
747190681Snwhitehorn#else
748190681Snwhitehorn	moea64_pteg_count = 0x1000;
749190681Snwhitehorn
750190681Snwhitehorn	while (moea64_pteg_count < physmem)
751190681Snwhitehorn		moea64_pteg_count <<= 1;
752209975Snwhitehorn
753209975Snwhitehorn	moea64_pteg_count >>= 1;
754190681Snwhitehorn#endif /* PTEGCOUNT */
755216174Snwhitehorn}
756190681Snwhitehorn
757216174Snwhitehornvoid
758216174Snwhitehornmoea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
759216174Snwhitehorn{
760216174Snwhitehorn	vm_size_t	size;
761216174Snwhitehorn	register_t	msr;
762216174Snwhitehorn	int		i;
763190681Snwhitehorn
764190681Snwhitehorn	/*
765216174Snwhitehorn	 * Set PTEG mask
766190681Snwhitehorn	 */
767190681Snwhitehorn	moea64_pteg_mask = moea64_pteg_count - 1;
768190681Snwhitehorn
769190681Snwhitehorn	/*
770190681Snwhitehorn	 * Allocate pv/overflow lists.
771190681Snwhitehorn	 */
772190681Snwhitehorn	size = sizeof(struct pvo_head) * moea64_pteg_count;
773190681Snwhitehorn
774190681Snwhitehorn	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
775190681Snwhitehorn	    PAGE_SIZE);
776190681Snwhitehorn	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
777190681Snwhitehorn
778190681Snwhitehorn	DISABLE_TRANS(msr);
779190681Snwhitehorn	for (i = 0; i < moea64_pteg_count; i++)
780190681Snwhitehorn		LIST_INIT(&moea64_pvo_table[i]);
781190681Snwhitehorn	ENABLE_TRANS(msr);
782190681Snwhitehorn
783190681Snwhitehorn	/*
784190681Snwhitehorn	 * Initialize the lock that synchronizes access to the pteg and pvo
785190681Snwhitehorn	 * tables.
786190681Snwhitehorn	 */
787233529Snwhitehorn	rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE);
788211967Snwhitehorn	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
789190681Snwhitehorn
790190681Snwhitehorn	/*
791190681Snwhitehorn	 * Initialise the unmanaged pvo pool.
792190681Snwhitehorn	 */
793190681Snwhitehorn	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
794190681Snwhitehorn		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
795190681Snwhitehorn	moea64_bpvo_pool_index = 0;
796190681Snwhitehorn
797190681Snwhitehorn	/*
798190681Snwhitehorn	 * Make sure kernel vsid is allocated as well as VSID 0.
799190681Snwhitehorn	 */
800209975Snwhitehorn	#ifndef __powerpc64__
801209975Snwhitehorn	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
802190681Snwhitehorn		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
803190681Snwhitehorn	moea64_vsid_bitmap[0] |= 1;
804209975Snwhitehorn	#endif
805190681Snwhitehorn
806190681Snwhitehorn	/*
807190681Snwhitehorn	 * Initialize the kernel pmap (which is statically allocated).
808190681Snwhitehorn	 */
809209975Snwhitehorn	#ifdef __powerpc64__
810209975Snwhitehorn	for (i = 0; i < 64; i++) {
811209975Snwhitehorn		pcpup->pc_slb[i].slbv = 0;
812209975Snwhitehorn		pcpup->pc_slb[i].slbe = 0;
813209975Snwhitehorn	}
814209975Snwhitehorn	#else
815190681Snwhitehorn	for (i = 0; i < 16; i++)
816190681Snwhitehorn		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
817209975Snwhitehorn	#endif
818190681Snwhitehorn
819190681Snwhitehorn	kernel_pmap->pmap_phys = kernel_pmap;
820222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
821235689Snwhitehorn	RB_INIT(&kernel_pmap->pmap_pvo);
822190681Snwhitehorn
823190681Snwhitehorn	PMAP_LOCK_INIT(kernel_pmap);
824190681Snwhitehorn
825190681Snwhitehorn	/*
826190681Snwhitehorn	 * Now map in all the other buffers we allocated earlier
827190681Snwhitehorn	 */
828190681Snwhitehorn
829209975Snwhitehorn	moea64_setup_direct_map(mmup, kernelstart, kernelend);
830216174Snwhitehorn}
831190681Snwhitehorn
832216174Snwhitehornvoid
833216174Snwhitehornmoea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
834216174Snwhitehorn{
835216174Snwhitehorn	ihandle_t	mmui;
836216174Snwhitehorn	phandle_t	chosen;
837216174Snwhitehorn	phandle_t	mmu;
838216174Snwhitehorn	size_t		sz;
839216174Snwhitehorn	int		i;
840216174Snwhitehorn	vm_offset_t	pa, va;
841216174Snwhitehorn	void		*dpcpu;
842216174Snwhitehorn
843190681Snwhitehorn	/*
844209975Snwhitehorn	 * Set up the Open Firmware pmap and add its mappings if not in real
845209975Snwhitehorn	 * mode.
846190681Snwhitehorn	 */
847190681Snwhitehorn
848215067Snwhitehorn	chosen = OF_finddevice("/chosen");
849215067Snwhitehorn	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
850215158Snwhitehorn	    mmu = OF_instance_to_package(mmui);
851215158Snwhitehorn	    if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1)
852215158Snwhitehorn		sz = 0;
853199226Snwhitehorn	    if (sz > 6144 /* tmpstksz - 2 KB headroom */)
854199226Snwhitehorn		panic("moea64_bootstrap: too many ofw translations");
855190681Snwhitehorn
856215158Snwhitehorn	    if (sz > 0)
857215158Snwhitehorn		moea64_add_ofw_mappings(mmup, mmu, sz);
858190681Snwhitehorn	}
859190681Snwhitehorn
860190681Snwhitehorn	/*
861190681Snwhitehorn	 * Calculate the last available physical address.
862190681Snwhitehorn	 */
863190681Snwhitehorn	for (i = 0; phys_avail[i + 2] != 0; i += 2)
864190681Snwhitehorn		;
865190681Snwhitehorn	Maxmem = powerpc_btop(phys_avail[i + 1]);
866190681Snwhitehorn
867190681Snwhitehorn	/*
868190681Snwhitehorn	 * Initialize MMU and remap early physical mappings
869190681Snwhitehorn	 */
870216174Snwhitehorn	MMU_CPU_BOOTSTRAP(mmup,0);
871222614Snwhitehorn	mtmsr(mfmsr() | PSL_DR | PSL_IR);
872190681Snwhitehorn	pmap_bootstrapped++;
873190681Snwhitehorn	bs_remap_earlyboot();
874190681Snwhitehorn
875190681Snwhitehorn	/*
876190681Snwhitehorn	 * Set the start and end of kva.
877190681Snwhitehorn	 */
878190681Snwhitehorn	virtual_avail = VM_MIN_KERNEL_ADDRESS;
879204128Snwhitehorn	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
880190681Snwhitehorn
881190681Snwhitehorn	/*
882209975Snwhitehorn	 * Map the entire KVA range into the SLB. We must not fault there.
883209975Snwhitehorn	 */
884209975Snwhitehorn	#ifdef __powerpc64__
885209975Snwhitehorn	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
886209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, 0);
887209975Snwhitehorn	#endif
888209975Snwhitehorn
889209975Snwhitehorn	/*
890204128Snwhitehorn	 * Figure out how far we can extend virtual_end into segment 16
891204128Snwhitehorn	 * without running into existing mappings. Segment 16 is guaranteed
892204128Snwhitehorn	 * to contain neither RAM nor devices (at least on Apple hardware),
893204128Snwhitehorn	 * but will generally contain some OFW mappings we should not
894204128Snwhitehorn	 * step on.
895190681Snwhitehorn	 */
896190681Snwhitehorn
897209975Snwhitehorn	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
898204128Snwhitehorn	PMAP_LOCK(kernel_pmap);
899209975Snwhitehorn	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
900209975Snwhitehorn	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
901204128Snwhitehorn		virtual_end += PAGE_SIZE;
902204128Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
903209975Snwhitehorn	#endif
904190681Snwhitehorn
905190681Snwhitehorn	/*
906190681Snwhitehorn	 * Allocate a kernel stack with a guard page for thread0 and map it
907190681Snwhitehorn	 * into the kernel page map.
908190681Snwhitehorn	 */
909190681Snwhitehorn	pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
910190681Snwhitehorn	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
911190681Snwhitehorn	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
912220642Sandreast	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
913190681Snwhitehorn	thread0.td_kstack = va;
914190681Snwhitehorn	thread0.td_kstack_pages = KSTACK_PAGES;
915190681Snwhitehorn	for (i = 0; i < KSTACK_PAGES; i++) {
916201758Smbr		moea64_kenter(mmup, va, pa);
917190681Snwhitehorn		pa += PAGE_SIZE;
918190681Snwhitehorn		va += PAGE_SIZE;
919190681Snwhitehorn	}
920190681Snwhitehorn
921190681Snwhitehorn	/*
922190681Snwhitehorn	 * Allocate virtual address space for the message buffer.
923190681Snwhitehorn	 */
924217688Spluknet	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
925204297Snwhitehorn	msgbufp = (struct msgbuf *)virtual_avail;
926204297Snwhitehorn	va = virtual_avail;
927217688Spluknet	virtual_avail += round_page(msgbufsize);
928204297Snwhitehorn	while (va < virtual_avail) {
929204297Snwhitehorn		moea64_kenter(mmup, va, pa);
930190681Snwhitehorn		pa += PAGE_SIZE;
931204297Snwhitehorn		va += PAGE_SIZE;
932190681Snwhitehorn	}
933194784Sjeff
934194784Sjeff	/*
935194784Sjeff	 * Allocate virtual address space for the dynamic percpu area.
936194784Sjeff	 */
937194784Sjeff	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
938204297Snwhitehorn	dpcpu = (void *)virtual_avail;
939209975Snwhitehorn	va = virtual_avail;
940204297Snwhitehorn	virtual_avail += DPCPU_SIZE;
941204297Snwhitehorn	while (va < virtual_avail) {
942204297Snwhitehorn		moea64_kenter(mmup, va, pa);
943194784Sjeff		pa += PAGE_SIZE;
944204297Snwhitehorn		va += PAGE_SIZE;
945194784Sjeff	}
946194784Sjeff	dpcpu_init(dpcpu, 0);
947216174Snwhitehorn
948216174Snwhitehorn	/*
949216174Snwhitehorn	 * Allocate some things for page zeroing. We put this directly
950216174Snwhitehorn	 * in the page table, marked with LPTE_LOCKED, to avoid any
951216174Snwhitehorn	 * of the PVO book-keeping or other parts of the VM system
952216174Snwhitehorn	 * from even knowing that this hack exists.
953216174Snwhitehorn	 */
954216174Snwhitehorn
955216174Snwhitehorn	if (!hw_direct_map) {
956216174Snwhitehorn		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
957216174Snwhitehorn		    MTX_DEF);
958216174Snwhitehorn		for (i = 0; i < 2; i++) {
959216174Snwhitehorn			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
960216174Snwhitehorn			virtual_end -= PAGE_SIZE;
961216174Snwhitehorn
962216174Snwhitehorn			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
963216174Snwhitehorn
964216174Snwhitehorn			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
965216174Snwhitehorn			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
966233529Snwhitehorn			LOCK_TABLE_RD();
967216174Snwhitehorn			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
968216174Snwhitehorn			    mmup, moea64_scratchpage_pvo[i]);
969216174Snwhitehorn			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
970216174Snwhitehorn			    |= LPTE_LOCKED;
971216174Snwhitehorn			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
972216174Snwhitehorn			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
973216174Snwhitehorn			    moea64_scratchpage_pvo[i]->pvo_vpn);
974233529Snwhitehorn			UNLOCK_TABLE_RD();
975216174Snwhitehorn		}
976216174Snwhitehorn	}
977190681Snwhitehorn}
978190681Snwhitehorn
979190681Snwhitehorn/*
980209975Snwhitehorn * Activate a user pmap.  The pmap must be activated before its address
981190681Snwhitehorn * space can be accessed in any way.
982190681Snwhitehorn */
983190681Snwhitehornvoid
984190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td)
985190681Snwhitehorn{
986209975Snwhitehorn	pmap_t	pm;
987190681Snwhitehorn
988190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
989223758Sattilio	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
990190681Snwhitehorn
991209975Snwhitehorn	#ifdef __powerpc64__
992209975Snwhitehorn	PCPU_SET(userslb, pm->pm_slb);
993209975Snwhitehorn	#else
994209975Snwhitehorn	PCPU_SET(curpmap, pm->pmap_phys);
995209975Snwhitehorn	#endif
996190681Snwhitehorn}
997190681Snwhitehorn
998190681Snwhitehornvoid
999190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td)
1000190681Snwhitehorn{
1001190681Snwhitehorn	pmap_t	pm;
1002190681Snwhitehorn
1003190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1004223758Sattilio	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1005209975Snwhitehorn	#ifdef __powerpc64__
1006209975Snwhitehorn	PCPU_SET(userslb, NULL);
1007209975Snwhitehorn	#else
1008190681Snwhitehorn	PCPU_SET(curpmap, NULL);
1009209975Snwhitehorn	#endif
1010190681Snwhitehorn}
1011190681Snwhitehorn
1012190681Snwhitehornvoid
1013190681Snwhitehornmoea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1014190681Snwhitehorn{
1015190681Snwhitehorn	struct	pvo_entry *pvo;
1016216174Snwhitehorn	uintptr_t pt;
1017209975Snwhitehorn	uint64_t vsid;
1018209975Snwhitehorn	int	i, ptegidx;
1019190681Snwhitehorn
1020233529Snwhitehorn	LOCK_TABLE_WR();
1021190681Snwhitehorn	PMAP_LOCK(pm);
1022209975Snwhitehorn	pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1023190681Snwhitehorn
1024190681Snwhitehorn	if (pvo != NULL) {
1025216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1026209975Snwhitehorn
1027190681Snwhitehorn		if (wired) {
1028190681Snwhitehorn			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1029190681Snwhitehorn				pm->pm_stats.wired_count++;
1030190681Snwhitehorn			pvo->pvo_vaddr |= PVO_WIRED;
1031209975Snwhitehorn			pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1032190681Snwhitehorn		} else {
1033190681Snwhitehorn			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1034190681Snwhitehorn				pm->pm_stats.wired_count--;
1035190681Snwhitehorn			pvo->pvo_vaddr &= ~PVO_WIRED;
1036209975Snwhitehorn			pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1037190681Snwhitehorn		}
1038209975Snwhitehorn
1039216174Snwhitehorn		if (pt != -1) {
1040209975Snwhitehorn			/* Update wiring flag in page table. */
1041216174Snwhitehorn			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1042209975Snwhitehorn			    pvo->pvo_vpn);
1043209975Snwhitehorn		} else if (wired) {
1044209975Snwhitehorn			/*
1045209975Snwhitehorn			 * If we are wiring the page, and it wasn't in the
1046209975Snwhitehorn			 * page table before, add it.
1047209975Snwhitehorn			 */
1048209975Snwhitehorn			vsid = PVO_VSID(pvo);
1049209975Snwhitehorn			ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1050209975Snwhitehorn			    pvo->pvo_vaddr & PVO_LARGE);
1051209975Snwhitehorn
1052216174Snwhitehorn			i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
1053216174Snwhitehorn
1054209975Snwhitehorn			if (i >= 0) {
1055209975Snwhitehorn				PVO_PTEGIDX_CLR(pvo);
1056209975Snwhitehorn				PVO_PTEGIDX_SET(pvo, i);
1057209975Snwhitehorn			}
1058209975Snwhitehorn		}
1059209975Snwhitehorn
1060190681Snwhitehorn	}
1061233529Snwhitehorn	UNLOCK_TABLE_WR();
1062190681Snwhitehorn	PMAP_UNLOCK(pm);
1063190681Snwhitehorn}
1064190681Snwhitehorn
1065190681Snwhitehorn/*
1066190681Snwhitehorn * This goes through and sets the physical address of our
1067190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because
1068190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by
1069190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here
1070190681Snwhitehorn */
1071190681Snwhitehorn
1072190681Snwhitehornstatic __inline
1073216174Snwhitehornvoid moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1074204694Snwhitehorn
1075209975Snwhitehorn	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1076204268Snwhitehorn	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1077204268Snwhitehorn
1078216174Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1079204694Snwhitehorn	    ~(LPTE_WIMG | LPTE_RPGN);
1080216174Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1081213307Snwhitehorn	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1082216174Snwhitehorn	MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1083216174Snwhitehorn	    &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1084216174Snwhitehorn	    moea64_scratchpage_pvo[which]->pvo_vpn);
1085216383Snwhitehorn	isync();
1086190681Snwhitehorn}
1087190681Snwhitehorn
1088190681Snwhitehornvoid
1089190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1090190681Snwhitehorn{
1091190681Snwhitehorn	vm_offset_t	dst;
1092190681Snwhitehorn	vm_offset_t	src;
1093190681Snwhitehorn
1094190681Snwhitehorn	dst = VM_PAGE_TO_PHYS(mdst);
1095190681Snwhitehorn	src = VM_PAGE_TO_PHYS(msrc);
1096190681Snwhitehorn
1097209975Snwhitehorn	if (hw_direct_map) {
1098234156Snwhitehorn		bcopy((void *)src, (void *)dst, PAGE_SIZE);
1099209975Snwhitehorn	} else {
1100209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1101190681Snwhitehorn
1102216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, src);
1103216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, dst);
1104190681Snwhitehorn
1105234156Snwhitehorn		bcopy((void *)moea64_scratchpage_va[0],
1106209975Snwhitehorn		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1107190681Snwhitehorn
1108209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1109209975Snwhitehorn	}
1110190681Snwhitehorn}
1111190681Snwhitehorn
1112190681Snwhitehornvoid
1113190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1114190681Snwhitehorn{
1115190681Snwhitehorn	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1116190681Snwhitehorn
1117190681Snwhitehorn	if (size + off > PAGE_SIZE)
1118190681Snwhitehorn		panic("moea64_zero_page: size + off > PAGE_SIZE");
1119190681Snwhitehorn
1120209975Snwhitehorn	if (hw_direct_map) {
1121209975Snwhitehorn		bzero((caddr_t)pa + off, size);
1122209975Snwhitehorn	} else {
1123209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1124216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1125209975Snwhitehorn		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1126209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1127209975Snwhitehorn	}
1128190681Snwhitehorn}
1129190681Snwhitehorn
1130204269Snwhitehorn/*
1131204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it
1132204269Snwhitehorn */
1133190681Snwhitehornvoid
1134204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m)
1135204269Snwhitehorn{
1136204269Snwhitehorn	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1137209975Snwhitehorn	vm_offset_t va, off;
1138204269Snwhitehorn
1139209975Snwhitehorn	if (!hw_direct_map) {
1140209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1141204269Snwhitehorn
1142216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1143209975Snwhitehorn		va = moea64_scratchpage_va[0];
1144209975Snwhitehorn	} else {
1145209975Snwhitehorn		va = pa;
1146209975Snwhitehorn	}
1147209975Snwhitehorn
1148204269Snwhitehorn	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1149209975Snwhitehorn		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1150209975Snwhitehorn
1151209975Snwhitehorn	if (!hw_direct_map)
1152209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1153204269Snwhitehorn}
1154204269Snwhitehorn
1155204269Snwhitehornvoid
1156190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1157190681Snwhitehorn{
1158190681Snwhitehorn
1159190681Snwhitehorn	moea64_zero_page(mmu, m);
1160190681Snwhitehorn}
1161190681Snwhitehorn
1162190681Snwhitehorn/*
1163190681Snwhitehorn * Map the given physical page at the specified virtual address in the
1164190681Snwhitehorn * target pmap with the protection requested.  If specified the page
1165190681Snwhitehorn * will be wired down.
1166190681Snwhitehorn */
1167233957Snwhitehorn
1168190681Snwhitehornvoid
1169190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1170190681Snwhitehorn    vm_prot_t prot, boolean_t wired)
1171190681Snwhitehorn{
1172190681Snwhitehorn	struct		pvo_head *pvo_head;
1173190681Snwhitehorn	uma_zone_t	zone;
1174190681Snwhitehorn	vm_page_t	pg;
1175190681Snwhitehorn	uint64_t	pte_lo;
1176190681Snwhitehorn	u_int		pvo_flags;
1177190681Snwhitehorn	int		error;
1178190681Snwhitehorn
1179190681Snwhitehorn	if (!moea64_initialized) {
1180235689Snwhitehorn		pvo_head = NULL;
1181190681Snwhitehorn		pg = NULL;
1182190681Snwhitehorn		zone = moea64_upvo_zone;
1183190681Snwhitehorn		pvo_flags = 0;
1184190681Snwhitehorn	} else {
1185190681Snwhitehorn		pvo_head = vm_page_to_pvoh(m);
1186190681Snwhitehorn		pg = m;
1187190681Snwhitehorn		zone = moea64_mpvo_zone;
1188190681Snwhitehorn		pvo_flags = PVO_MANAGED;
1189190681Snwhitehorn	}
1190190681Snwhitehorn
1191224746Skib	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
1192224746Skib	    VM_OBJECT_LOCKED(m->object),
1193233957Snwhitehorn	    ("moea64_enter: page %p is not busy", m));
1194190681Snwhitehorn
1195190681Snwhitehorn	/* XXX change the pvo head for fake pages */
1196224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1197190681Snwhitehorn		pvo_flags &= ~PVO_MANAGED;
1198235689Snwhitehorn		pvo_head = NULL;
1199190681Snwhitehorn		zone = moea64_upvo_zone;
1200190681Snwhitehorn	}
1201190681Snwhitehorn
1202213307Snwhitehorn	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1203190681Snwhitehorn
1204190681Snwhitehorn	if (prot & VM_PROT_WRITE) {
1205190681Snwhitehorn		pte_lo |= LPTE_BW;
1206208810Salc		if (pmap_bootstrapped &&
1207224746Skib		    (m->oflags & VPO_UNMANAGED) == 0)
1208225418Skib			vm_page_aflag_set(m, PGA_WRITEABLE);
1209190681Snwhitehorn	} else
1210190681Snwhitehorn		pte_lo |= LPTE_BR;
1211190681Snwhitehorn
1212217341Snwhitehorn	if ((prot & VM_PROT_EXECUTE) == 0)
1213217341Snwhitehorn		pte_lo |= LPTE_NOEXEC;
1214190681Snwhitehorn
1215190681Snwhitehorn	if (wired)
1216190681Snwhitehorn		pvo_flags |= PVO_WIRED;
1217190681Snwhitehorn
1218233957Snwhitehorn	LOCK_TABLE_WR();
1219233957Snwhitehorn	PMAP_LOCK(pmap);
1220216174Snwhitehorn	error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1221216174Snwhitehorn	    VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
1222233957Snwhitehorn	PMAP_UNLOCK(pmap);
1223233957Snwhitehorn	UNLOCK_TABLE_WR();
1224190681Snwhitehorn
1225190681Snwhitehorn	/*
1226190681Snwhitehorn	 * Flush the page from the instruction cache if this page is
1227190681Snwhitehorn	 * mapped executable and cacheable.
1228190681Snwhitehorn	 */
1229233949Snwhitehorn	if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) &&
1230233949Snwhitehorn	    (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1231233949Snwhitehorn		vm_page_aflag_set(m, PGA_EXECUTABLE);
1232216174Snwhitehorn		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1233233949Snwhitehorn	}
1234190681Snwhitehorn}
1235190681Snwhitehorn
1236190681Snwhitehornstatic void
1237216174Snwhitehornmoea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1238216174Snwhitehorn    vm_size_t sz)
1239190681Snwhitehorn{
1240204042Snwhitehorn
1241190681Snwhitehorn	/*
1242190681Snwhitehorn	 * This is much trickier than on older systems because
1243190681Snwhitehorn	 * we can't sync the icache on physical addresses directly
1244190681Snwhitehorn	 * without a direct map. Instead we check a couple of cases
1245190681Snwhitehorn	 * where the memory is already mapped in and, failing that,
1246190681Snwhitehorn	 * use the same trick we use for page zeroing to create
1247190681Snwhitehorn	 * a temporary mapping for this physical address.
1248190681Snwhitehorn	 */
1249190681Snwhitehorn
1250190681Snwhitehorn	if (!pmap_bootstrapped) {
1251190681Snwhitehorn		/*
1252190681Snwhitehorn		 * If PMAP is not bootstrapped, we are likely to be
1253190681Snwhitehorn		 * in real mode.
1254190681Snwhitehorn		 */
1255198341Smarcel		__syncicache((void *)pa, sz);
1256190681Snwhitehorn	} else if (pmap == kernel_pmap) {
1257198341Smarcel		__syncicache((void *)va, sz);
1258209975Snwhitehorn	} else if (hw_direct_map) {
1259209975Snwhitehorn		__syncicache((void *)pa, sz);
1260190681Snwhitehorn	} else {
1261190681Snwhitehorn		/* Use the scratch page to set up a temp mapping */
1262190681Snwhitehorn
1263190681Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1264190681Snwhitehorn
1265216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1266204042Snwhitehorn		__syncicache((void *)(moea64_scratchpage_va[1] +
1267204042Snwhitehorn		    (va & ADDR_POFF)), sz);
1268190681Snwhitehorn
1269190681Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1270190681Snwhitehorn	}
1271190681Snwhitehorn}
1272190681Snwhitehorn
1273190681Snwhitehorn/*
1274190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object.
1275190681Snwhitehorn * The sequence begins with the given page m_start.  This page is
1276190681Snwhitehorn * mapped at the given virtual address start.  Each subsequent page is
1277190681Snwhitehorn * mapped at a virtual address that is offset from start by the same
1278190681Snwhitehorn * amount as the page is offset from m_start within the object.  The
1279190681Snwhitehorn * last page in the sequence is the page with the largest offset from
1280190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given
1281190681Snwhitehorn * virtual address end.  Not every virtual page between start and end
1282190681Snwhitehorn * is mapped; only those for which a resident page exists with the
1283190681Snwhitehorn * corresponding offset from m_start are mapped.
1284190681Snwhitehorn */
1285190681Snwhitehornvoid
1286190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1287190681Snwhitehorn    vm_page_t m_start, vm_prot_t prot)
1288190681Snwhitehorn{
1289190681Snwhitehorn	vm_page_t m;
1290190681Snwhitehorn	vm_pindex_t diff, psize;
1291190681Snwhitehorn
1292190681Snwhitehorn	psize = atop(end - start);
1293190681Snwhitehorn	m = m_start;
1294190681Snwhitehorn	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1295233957Snwhitehorn		moea64_enter(mmu, pm, start + ptoa(diff), m, prot &
1296190681Snwhitehorn		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1297190681Snwhitehorn		m = TAILQ_NEXT(m, listq);
1298190681Snwhitehorn	}
1299190681Snwhitehorn}
1300190681Snwhitehorn
1301190681Snwhitehornvoid
1302190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1303190681Snwhitehorn    vm_prot_t prot)
1304190681Snwhitehorn{
1305207796Salc
1306233957Snwhitehorn	moea64_enter(mmu, pm, va, m,
1307216174Snwhitehorn	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1308190681Snwhitehorn}
1309190681Snwhitehorn
1310190681Snwhitehornvm_paddr_t
1311190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1312190681Snwhitehorn{
1313190681Snwhitehorn	struct	pvo_entry *pvo;
1314190681Snwhitehorn	vm_paddr_t pa;
1315190681Snwhitehorn
1316190681Snwhitehorn	PMAP_LOCK(pm);
1317209975Snwhitehorn	pvo = moea64_pvo_find_va(pm, va);
1318190681Snwhitehorn	if (pvo == NULL)
1319190681Snwhitehorn		pa = 0;
1320190681Snwhitehorn	else
1321209975Snwhitehorn		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1322209975Snwhitehorn		    (va - PVO_VADDR(pvo));
1323190681Snwhitehorn	PMAP_UNLOCK(pm);
1324190681Snwhitehorn	return (pa);
1325190681Snwhitehorn}
1326190681Snwhitehorn
1327190681Snwhitehorn/*
1328190681Snwhitehorn * Atomically extract and hold the physical page with the given
1329190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given
1330190681Snwhitehorn * protection.
1331190681Snwhitehorn */
1332190681Snwhitehornvm_page_t
1333190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1334190681Snwhitehorn{
1335190681Snwhitehorn	struct	pvo_entry *pvo;
1336190681Snwhitehorn	vm_page_t m;
1337207410Skmacy        vm_paddr_t pa;
1338190681Snwhitehorn
1339190681Snwhitehorn	m = NULL;
1340207410Skmacy	pa = 0;
1341190681Snwhitehorn	PMAP_LOCK(pmap);
1342207410Skmacyretry:
1343209975Snwhitehorn	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1344190681Snwhitehorn	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1345190681Snwhitehorn	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1346190681Snwhitehorn	     (prot & VM_PROT_WRITE) == 0)) {
1347235689Snwhitehorn		if (vm_page_pa_tryrelock(pmap,
1348207410Skmacy			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1349207410Skmacy			goto retry;
1350190681Snwhitehorn		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1351190681Snwhitehorn		vm_page_hold(m);
1352190681Snwhitehorn	}
1353207410Skmacy	PA_UNLOCK_COND(pa);
1354190681Snwhitehorn	PMAP_UNLOCK(pmap);
1355190681Snwhitehorn	return (m);
1356190681Snwhitehorn}
1357190681Snwhitehorn
1358216174Snwhitehornstatic mmu_t installed_mmu;
1359216174Snwhitehorn
1360190681Snwhitehornstatic void *
1361190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1362190681Snwhitehorn{
1363190681Snwhitehorn	/*
1364190681Snwhitehorn	 * This entire routine is a horrible hack to avoid bothering kmem
1365190681Snwhitehorn	 * for new KVA addresses. Because this can get called from inside
1366190681Snwhitehorn	 * kmem allocation routines, calling kmem for a new address here
1367190681Snwhitehorn	 * can lead to multiply locking non-recursive mutexes.
1368190681Snwhitehorn	 */
1369190681Snwhitehorn        vm_offset_t va;
1370190681Snwhitehorn
1371190681Snwhitehorn        vm_page_t m;
1372190681Snwhitehorn        int pflags, needed_lock;
1373190681Snwhitehorn
1374190681Snwhitehorn	*flags = UMA_SLAB_PRIV;
1375190681Snwhitehorn	needed_lock = !PMAP_LOCKED(kernel_pmap);
1376190681Snwhitehorn
1377190681Snwhitehorn        if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1378190681Snwhitehorn                pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1379190681Snwhitehorn        else
1380190681Snwhitehorn                pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1381190681Snwhitehorn        if (wait & M_ZERO)
1382190681Snwhitehorn                pflags |= VM_ALLOC_ZERO;
1383190681Snwhitehorn
1384190681Snwhitehorn        for (;;) {
1385228522Salc                m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ);
1386190681Snwhitehorn                if (m == NULL) {
1387190681Snwhitehorn                        if (wait & M_NOWAIT)
1388190681Snwhitehorn                                return (NULL);
1389190681Snwhitehorn                        VM_WAIT;
1390190681Snwhitehorn                } else
1391190681Snwhitehorn                        break;
1392190681Snwhitehorn        }
1393190681Snwhitehorn
1394204128Snwhitehorn	va = VM_PAGE_TO_PHYS(m);
1395190681Snwhitehorn
1396233529Snwhitehorn	LOCK_TABLE_WR();
1397233529Snwhitehorn	if (needed_lock)
1398233529Snwhitehorn		PMAP_LOCK(kernel_pmap);
1399233529Snwhitehorn
1400216174Snwhitehorn	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1401235689Snwhitehorn	    NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP);
1402190681Snwhitehorn
1403190681Snwhitehorn	if (needed_lock)
1404190681Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
1405233529Snwhitehorn	UNLOCK_TABLE_WR();
1406198378Snwhitehorn
1407190681Snwhitehorn	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1408190681Snwhitehorn                bzero((void *)va, PAGE_SIZE);
1409190681Snwhitehorn
1410190681Snwhitehorn	return (void *)va;
1411190681Snwhitehorn}
1412190681Snwhitehorn
1413230767Skibextern int elf32_nxstack;
1414230767Skib
1415190681Snwhitehornvoid
1416190681Snwhitehornmoea64_init(mmu_t mmu)
1417190681Snwhitehorn{
1418190681Snwhitehorn
1419190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_init");
1420190681Snwhitehorn
1421190681Snwhitehorn	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1422190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1423190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1424190681Snwhitehorn	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1425190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1426190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1427190681Snwhitehorn
1428190681Snwhitehorn	if (!hw_direct_map) {
1429216174Snwhitehorn		installed_mmu = mmu;
1430190681Snwhitehorn		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1431190681Snwhitehorn		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1432190681Snwhitehorn	}
1433190681Snwhitehorn
1434230779Skib#ifdef COMPAT_FREEBSD32
1435230767Skib	elf32_nxstack = 1;
1436230779Skib#endif
1437230767Skib
1438190681Snwhitehorn	moea64_initialized = TRUE;
1439190681Snwhitehorn}
1440190681Snwhitehorn
1441190681Snwhitehornboolean_t
1442207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m)
1443207155Salc{
1444207155Salc
1445224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1446208574Salc	    ("moea64_is_referenced: page %p is not managed", m));
1447216174Snwhitehorn	return (moea64_query_bit(mmu, m, PTE_REF));
1448207155Salc}
1449207155Salc
1450207155Salcboolean_t
1451190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m)
1452190681Snwhitehorn{
1453190681Snwhitehorn
1454224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1455208504Salc	    ("moea64_is_modified: page %p is not managed", m));
1456208504Salc
1457208504Salc	/*
1458225418Skib	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
1459225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1460208504Salc	 * is clear, no PTEs can have LPTE_CHG set.
1461208504Salc	 */
1462208504Salc	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1463208504Salc	if ((m->oflags & VPO_BUSY) == 0 &&
1464225418Skib	    (m->aflags & PGA_WRITEABLE) == 0)
1465190681Snwhitehorn		return (FALSE);
1466216174Snwhitehorn	return (moea64_query_bit(mmu, m, LPTE_CHG));
1467190681Snwhitehorn}
1468190681Snwhitehorn
1469214617Salcboolean_t
1470214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1471214617Salc{
1472214617Salc	struct pvo_entry *pvo;
1473214617Salc	boolean_t rv;
1474214617Salc
1475214617Salc	PMAP_LOCK(pmap);
1476214617Salc	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1477214617Salc	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1478214617Salc	PMAP_UNLOCK(pmap);
1479214617Salc	return (rv);
1480214617Salc}
1481214617Salc
1482190681Snwhitehornvoid
1483190681Snwhitehornmoea64_clear_reference(mmu_t mmu, vm_page_t m)
1484190681Snwhitehorn{
1485190681Snwhitehorn
1486224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1487208504Salc	    ("moea64_clear_reference: page %p is not managed", m));
1488216174Snwhitehorn	moea64_clear_bit(mmu, m, LPTE_REF);
1489190681Snwhitehorn}
1490190681Snwhitehorn
1491190681Snwhitehornvoid
1492190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m)
1493190681Snwhitehorn{
1494190681Snwhitehorn
1495224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1496208504Salc	    ("moea64_clear_modify: page %p is not managed", m));
1497208504Salc	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1498208504Salc	KASSERT((m->oflags & VPO_BUSY) == 0,
1499208504Salc	    ("moea64_clear_modify: page %p is busy", m));
1500208504Salc
1501208504Salc	/*
1502225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1503208504Salc	 * set.  If the object containing the page is locked and the page is
1504225418Skib	 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
1505208504Salc	 */
1506225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
1507190681Snwhitehorn		return;
1508216174Snwhitehorn	moea64_clear_bit(mmu, m, LPTE_CHG);
1509190681Snwhitehorn}
1510190681Snwhitehorn
1511190681Snwhitehorn/*
1512190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings.
1513190681Snwhitehorn */
1514190681Snwhitehornvoid
1515190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m)
1516190681Snwhitehorn{
1517190681Snwhitehorn	struct	pvo_entry *pvo;
1518216174Snwhitehorn	uintptr_t pt;
1519190681Snwhitehorn	pmap_t	pmap;
1520233434Snwhitehorn	uint64_t lo = 0;
1521190681Snwhitehorn
1522224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1523208175Salc	    ("moea64_remove_write: page %p is not managed", m));
1524208175Salc
1525208175Salc	/*
1526225418Skib	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
1527225418Skib	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
1528208175Salc	 * is clear, no page table entries need updating.
1529208175Salc	 */
1530208175Salc	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1531208175Salc	if ((m->oflags & VPO_BUSY) == 0 &&
1532225418Skib	    (m->aflags & PGA_WRITEABLE) == 0)
1533190681Snwhitehorn		return;
1534216174Snwhitehorn	powerpc_sync();
1535233529Snwhitehorn	LOCK_TABLE_RD();
1536190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1537190681Snwhitehorn		pmap = pvo->pvo_pmap;
1538190681Snwhitehorn		PMAP_LOCK(pmap);
1539190681Snwhitehorn		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1540216174Snwhitehorn			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1541190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1542190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1543216174Snwhitehorn			if (pt != -1) {
1544216174Snwhitehorn				MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1545190681Snwhitehorn				lo |= pvo->pvo_pte.lpte.pte_lo;
1546190681Snwhitehorn				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1547216174Snwhitehorn				MOEA64_PTE_CHANGE(mmu, pt,
1548216174Snwhitehorn				    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1549209975Snwhitehorn				if (pvo->pvo_pmap == kernel_pmap)
1550209975Snwhitehorn					isync();
1551190681Snwhitehorn			}
1552190681Snwhitehorn		}
1553233530Snwhitehorn		if ((lo & LPTE_CHG) != 0)
1554233530Snwhitehorn			vm_page_dirty(m);
1555190681Snwhitehorn		PMAP_UNLOCK(pmap);
1556190681Snwhitehorn	}
1557233529Snwhitehorn	UNLOCK_TABLE_RD();
1558225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1559190681Snwhitehorn}
1560190681Snwhitehorn
1561190681Snwhitehorn/*
1562190681Snwhitehorn *	moea64_ts_referenced:
1563190681Snwhitehorn *
1564190681Snwhitehorn *	Return a count of reference bits for a page, clearing those bits.
1565190681Snwhitehorn *	It is not necessary for every reference bit to be cleared, but it
1566190681Snwhitehorn *	is necessary that 0 only be returned when there are truly no
1567190681Snwhitehorn *	reference bits set.
1568190681Snwhitehorn *
1569190681Snwhitehorn *	XXX: The exact number of bits to check and clear is a matter that
1570190681Snwhitehorn *	should be tested and standardized at some point in the future for
1571190681Snwhitehorn *	optimal aging of shared pages.
1572190681Snwhitehorn */
1573238357Salcint
1574190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m)
1575190681Snwhitehorn{
1576190681Snwhitehorn
1577224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1578208990Salc	    ("moea64_ts_referenced: page %p is not managed", m));
1579216174Snwhitehorn	return (moea64_clear_bit(mmu, m, LPTE_REF));
1580190681Snwhitehorn}
1581190681Snwhitehorn
1582190681Snwhitehorn/*
1583213307Snwhitehorn * Modify the WIMG settings of all mappings for a page.
1584213307Snwhitehorn */
1585213307Snwhitehornvoid
1586213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1587213307Snwhitehorn{
1588213307Snwhitehorn	struct	pvo_entry *pvo;
1589213335Snwhitehorn	struct  pvo_head *pvo_head;
1590216174Snwhitehorn	uintptr_t pt;
1591213307Snwhitehorn	pmap_t	pmap;
1592213307Snwhitehorn	uint64_t lo;
1593213307Snwhitehorn
1594224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1595213335Snwhitehorn		m->md.mdpg_cache_attrs = ma;
1596213335Snwhitehorn		return;
1597213335Snwhitehorn	}
1598213335Snwhitehorn
1599213335Snwhitehorn	pvo_head = vm_page_to_pvoh(m);
1600213307Snwhitehorn	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1601233529Snwhitehorn	LOCK_TABLE_RD();
1602213335Snwhitehorn	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1603213307Snwhitehorn		pmap = pvo->pvo_pmap;
1604213307Snwhitehorn		PMAP_LOCK(pmap);
1605216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1606213307Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1607213307Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= lo;
1608216174Snwhitehorn		if (pt != -1) {
1609216174Snwhitehorn			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1610213307Snwhitehorn			    pvo->pvo_vpn);
1611213307Snwhitehorn			if (pvo->pvo_pmap == kernel_pmap)
1612213307Snwhitehorn				isync();
1613213307Snwhitehorn		}
1614213307Snwhitehorn		PMAP_UNLOCK(pmap);
1615213307Snwhitehorn	}
1616233529Snwhitehorn	UNLOCK_TABLE_RD();
1617213307Snwhitehorn	m->md.mdpg_cache_attrs = ma;
1618213307Snwhitehorn}
1619213307Snwhitehorn
1620213307Snwhitehorn/*
1621190681Snwhitehorn * Map a wired page into kernel virtual address space.
1622190681Snwhitehorn */
1623190681Snwhitehornvoid
1624213307Snwhitehornmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1625190681Snwhitehorn{
1626190681Snwhitehorn	uint64_t	pte_lo;
1627190681Snwhitehorn	int		error;
1628190681Snwhitehorn
1629213307Snwhitehorn	pte_lo = moea64_calc_wimg(pa, ma);
1630190681Snwhitehorn
1631233529Snwhitehorn	LOCK_TABLE_WR();
1632190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1633216174Snwhitehorn	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1634235689Snwhitehorn	    NULL, va, pa, pte_lo, PVO_WIRED);
1635233529Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1636233529Snwhitehorn	UNLOCK_TABLE_WR();
1637190681Snwhitehorn
1638190681Snwhitehorn	if (error != 0 && error != ENOENT)
1639209975Snwhitehorn		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1640190681Snwhitehorn		    pa, error);
1641190681Snwhitehorn}
1642190681Snwhitehorn
1643213307Snwhitehornvoid
1644236019Srajmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa)
1645213307Snwhitehorn{
1646213307Snwhitehorn
1647213307Snwhitehorn	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1648213307Snwhitehorn}
1649213307Snwhitehorn
1650190681Snwhitehorn/*
1651190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual
1652190681Snwhitehorn * address.
1653190681Snwhitehorn */
1654236019Srajvm_paddr_t
1655190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va)
1656190681Snwhitehorn{
1657190681Snwhitehorn	struct		pvo_entry *pvo;
1658190681Snwhitehorn	vm_paddr_t pa;
1659190681Snwhitehorn
1660205370Snwhitehorn	/*
1661205370Snwhitehorn	 * Shortcut the direct-mapped case when applicable.  We never put
1662205370Snwhitehorn	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1663205370Snwhitehorn	 */
1664205370Snwhitehorn	if (va < VM_MIN_KERNEL_ADDRESS)
1665205370Snwhitehorn		return (va);
1666205370Snwhitehorn
1667190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1668209975Snwhitehorn	pvo = moea64_pvo_find_va(kernel_pmap, va);
1669209975Snwhitehorn	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1670209975Snwhitehorn	    va));
1671223471Sandreast	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1672190681Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1673190681Snwhitehorn	return (pa);
1674190681Snwhitehorn}
1675190681Snwhitehorn
1676190681Snwhitehorn/*
1677190681Snwhitehorn * Remove a wired page from kernel virtual address space.
1678190681Snwhitehorn */
1679190681Snwhitehornvoid
1680190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va)
1681190681Snwhitehorn{
1682190681Snwhitehorn	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1683190681Snwhitehorn}
1684190681Snwhitehorn
1685190681Snwhitehorn/*
1686190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space.
1687190681Snwhitehorn *
1688190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping.
1689190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region
1690190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt'
1691190681Snwhitehorn * unchanged.  We cannot and therefore do not; *virt is updated with the
1692190681Snwhitehorn * first usable address after the mapped region.
1693190681Snwhitehorn */
1694190681Snwhitehornvm_offset_t
1695236019Srajmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start,
1696236019Sraj    vm_paddr_t pa_end, int prot)
1697190681Snwhitehorn{
1698190681Snwhitehorn	vm_offset_t	sva, va;
1699190681Snwhitehorn
1700190681Snwhitehorn	sva = *virt;
1701190681Snwhitehorn	va = sva;
1702190681Snwhitehorn	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1703190681Snwhitehorn		moea64_kenter(mmu, va, pa_start);
1704190681Snwhitehorn	*virt = va;
1705190681Snwhitehorn
1706190681Snwhitehorn	return (sva);
1707190681Snwhitehorn}
1708190681Snwhitehorn
1709190681Snwhitehorn/*
1710190681Snwhitehorn * Returns true if the pmap's pv is one of the first
1711190681Snwhitehorn * 16 pvs linked to from this page.  This count may
1712190681Snwhitehorn * be changed upwards or downwards in the future; it
1713190681Snwhitehorn * is only necessary that true be returned for a small
1714190681Snwhitehorn * subset of pmaps for proper page aging.
1715190681Snwhitehorn */
1716190681Snwhitehornboolean_t
1717190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1718190681Snwhitehorn{
1719190681Snwhitehorn        int loops;
1720190681Snwhitehorn	struct pvo_entry *pvo;
1721208990Salc	boolean_t rv;
1722190681Snwhitehorn
1723224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1724208990Salc	    ("moea64_page_exists_quick: page %p is not managed", m));
1725190681Snwhitehorn	loops = 0;
1726208990Salc	rv = FALSE;
1727233529Snwhitehorn	LOCK_TABLE_RD();
1728190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1729208990Salc		if (pvo->pvo_pmap == pmap) {
1730208990Salc			rv = TRUE;
1731208990Salc			break;
1732208990Salc		}
1733190681Snwhitehorn		if (++loops >= 16)
1734190681Snwhitehorn			break;
1735190681Snwhitehorn	}
1736233529Snwhitehorn	UNLOCK_TABLE_RD();
1737208990Salc	return (rv);
1738190681Snwhitehorn}
1739190681Snwhitehorn
1740190681Snwhitehorn/*
1741190681Snwhitehorn * Return the number of managed mappings to the given physical page
1742190681Snwhitehorn * that are wired.
1743190681Snwhitehorn */
1744190681Snwhitehornint
1745190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1746190681Snwhitehorn{
1747190681Snwhitehorn	struct pvo_entry *pvo;
1748190681Snwhitehorn	int count;
1749190681Snwhitehorn
1750190681Snwhitehorn	count = 0;
1751224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
1752190681Snwhitehorn		return (count);
1753233529Snwhitehorn	LOCK_TABLE_RD();
1754190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1755190681Snwhitehorn		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1756190681Snwhitehorn			count++;
1757233529Snwhitehorn	UNLOCK_TABLE_RD();
1758190681Snwhitehorn	return (count);
1759190681Snwhitehorn}
1760190681Snwhitehorn
1761209975Snwhitehornstatic uintptr_t	moea64_vsidcontext;
1762190681Snwhitehorn
1763209975Snwhitehornuintptr_t
1764209975Snwhitehornmoea64_get_unique_vsid(void) {
1765209975Snwhitehorn	u_int entropy;
1766209975Snwhitehorn	register_t hash;
1767209975Snwhitehorn	uint32_t mask;
1768209975Snwhitehorn	int i;
1769190681Snwhitehorn
1770190681Snwhitehorn	entropy = 0;
1771190681Snwhitehorn	__asm __volatile("mftb %0" : "=r"(entropy));
1772190681Snwhitehorn
1773211967Snwhitehorn	mtx_lock(&moea64_slb_mutex);
1774209975Snwhitehorn	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1775209975Snwhitehorn		u_int	n;
1776190681Snwhitehorn
1777190681Snwhitehorn		/*
1778190681Snwhitehorn		 * Create a new value by mutiplying by a prime and adding in
1779190681Snwhitehorn		 * entropy from the timebase register.  This is to make the
1780190681Snwhitehorn		 * VSID more random so that the PT hash function collides
1781190681Snwhitehorn		 * less often.  (Note that the prime casues gcc to do shifts
1782190681Snwhitehorn		 * instead of a multiply.)
1783190681Snwhitehorn		 */
1784190681Snwhitehorn		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1785209975Snwhitehorn		hash = moea64_vsidcontext & (NVSIDS - 1);
1786190681Snwhitehorn		if (hash == 0)		/* 0 is special, avoid it */
1787190681Snwhitehorn			continue;
1788190681Snwhitehorn		n = hash >> 5;
1789190681Snwhitehorn		mask = 1 << (hash & (VSID_NBPW - 1));
1790209975Snwhitehorn		hash = (moea64_vsidcontext & VSID_HASHMASK);
1791190681Snwhitehorn		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1792190681Snwhitehorn			/* anything free in this bucket? */
1793190681Snwhitehorn			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1794190681Snwhitehorn				entropy = (moea64_vsidcontext >> 20);
1795190681Snwhitehorn				continue;
1796190681Snwhitehorn			}
1797212322Snwhitehorn			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1798190681Snwhitehorn			mask = 1 << i;
1799209975Snwhitehorn			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1800190681Snwhitehorn			hash |= i;
1801190681Snwhitehorn		}
1802212322Snwhitehorn		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1803212331Snwhitehorn		    ("Allocating in-use VSID %#zx\n", hash));
1804190681Snwhitehorn		moea64_vsid_bitmap[n] |= mask;
1805211967Snwhitehorn		mtx_unlock(&moea64_slb_mutex);
1806209975Snwhitehorn		return (hash);
1807190681Snwhitehorn	}
1808190681Snwhitehorn
1809211967Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
1810209975Snwhitehorn	panic("%s: out of segments",__func__);
1811190681Snwhitehorn}
1812190681Snwhitehorn
1813209975Snwhitehorn#ifdef __powerpc64__
1814209975Snwhitehornvoid
1815209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1816209975Snwhitehorn{
1817209975Snwhitehorn	PMAP_LOCK_INIT(pmap);
1818235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
1819209975Snwhitehorn
1820212715Snwhitehorn	pmap->pm_slb_tree_root = slb_alloc_tree();
1821209975Snwhitehorn	pmap->pm_slb = slb_alloc_user_cache();
1822212722Snwhitehorn	pmap->pm_slb_len = 0;
1823209975Snwhitehorn}
1824209975Snwhitehorn#else
1825209975Snwhitehornvoid
1826209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1827209975Snwhitehorn{
1828209975Snwhitehorn	int	i;
1829212308Snwhitehorn	uint32_t hash;
1830209975Snwhitehorn
1831209975Snwhitehorn	PMAP_LOCK_INIT(pmap);
1832235689Snwhitehorn	RB_INIT(&pmap->pmap_pvo);
1833209975Snwhitehorn
1834209975Snwhitehorn	if (pmap_bootstrapped)
1835209975Snwhitehorn		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1836209975Snwhitehorn		    (vm_offset_t)pmap);
1837209975Snwhitehorn	else
1838209975Snwhitehorn		pmap->pmap_phys = pmap;
1839209975Snwhitehorn
1840209975Snwhitehorn	/*
1841209975Snwhitehorn	 * Allocate some segment registers for this pmap.
1842209975Snwhitehorn	 */
1843209975Snwhitehorn	hash = moea64_get_unique_vsid();
1844209975Snwhitehorn
1845209975Snwhitehorn	for (i = 0; i < 16; i++)
1846209975Snwhitehorn		pmap->pm_sr[i] = VSID_MAKE(i, hash);
1847212308Snwhitehorn
1848212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1849209975Snwhitehorn}
1850209975Snwhitehorn#endif
1851209975Snwhitehorn
1852190681Snwhitehorn/*
1853190681Snwhitehorn * Initialize the pmap associated with process 0.
1854190681Snwhitehorn */
1855190681Snwhitehornvoid
1856190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm)
1857190681Snwhitehorn{
1858190681Snwhitehorn	moea64_pinit(mmu, pm);
1859190681Snwhitehorn	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1860190681Snwhitehorn}
1861190681Snwhitehorn
1862190681Snwhitehorn/*
1863190681Snwhitehorn * Set the physical protection on the specified range of this map as requested.
1864190681Snwhitehorn */
1865233011Snwhitehornstatic void
1866233011Snwhitehornmoea64_pvo_protect(mmu_t mmu,  pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot)
1867233011Snwhitehorn{
1868233011Snwhitehorn	uintptr_t pt;
1869233949Snwhitehorn	struct	vm_page *pg;
1870233436Snwhitehorn	uint64_t oldlo;
1871233011Snwhitehorn
1872233529Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
1873233529Snwhitehorn
1874233011Snwhitehorn	/*
1875233011Snwhitehorn	 * Grab the PTE pointer before we diddle with the cached PTE
1876233011Snwhitehorn	 * copy.
1877233011Snwhitehorn	 */
1878233011Snwhitehorn	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1879233011Snwhitehorn
1880233011Snwhitehorn	/*
1881233011Snwhitehorn	 * Change the protection of the page.
1882233011Snwhitehorn	 */
1883233436Snwhitehorn	oldlo = pvo->pvo_pte.lpte.pte_lo;
1884233011Snwhitehorn	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1885233011Snwhitehorn	pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1886233011Snwhitehorn	if ((prot & VM_PROT_EXECUTE) == 0)
1887233011Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1888233436Snwhitehorn	if (prot & VM_PROT_WRITE)
1889233436Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_BW;
1890233436Snwhitehorn	else
1891233436Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1892233011Snwhitehorn
1893233949Snwhitehorn	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1894233949Snwhitehorn
1895233011Snwhitehorn	/*
1896233011Snwhitehorn	 * If the PVO is in the page table, update that pte as well.
1897233011Snwhitehorn	 */
1898234155Snwhitehorn	if (pt != -1)
1899233011Snwhitehorn		MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1900233011Snwhitehorn		    pvo->pvo_vpn);
1901234155Snwhitehorn	if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) &&
1902234155Snwhitehorn	    (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1903234155Snwhitehorn		if ((pg->oflags & VPO_UNMANAGED) == 0)
1904233949Snwhitehorn			vm_page_aflag_set(pg, PGA_EXECUTABLE);
1905234155Snwhitehorn		moea64_syncicache(mmu, pm, PVO_VADDR(pvo),
1906234155Snwhitehorn		    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE);
1907233011Snwhitehorn	}
1908233434Snwhitehorn
1909233434Snwhitehorn	/*
1910233436Snwhitehorn	 * Update vm about the REF/CHG bits if the page is managed and we have
1911233436Snwhitehorn	 * removed write access.
1912233434Snwhitehorn	 */
1913233436Snwhitehorn	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED &&
1914233436Snwhitehorn	    (oldlo & LPTE_PP) != LPTE_BR && !(prot && VM_PROT_WRITE)) {
1915233434Snwhitehorn		if (pg != NULL) {
1916233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
1917233434Snwhitehorn				vm_page_dirty(pg);
1918233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
1919233434Snwhitehorn				vm_page_aflag_set(pg, PGA_REFERENCED);
1920233434Snwhitehorn		}
1921233434Snwhitehorn	}
1922233011Snwhitehorn}
1923233011Snwhitehorn
1924190681Snwhitehornvoid
1925190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1926190681Snwhitehorn    vm_prot_t prot)
1927190681Snwhitehorn{
1928235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
1929190681Snwhitehorn
1930233011Snwhitehorn	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm,
1931233011Snwhitehorn	    sva, eva, prot);
1932190681Snwhitehorn
1933190681Snwhitehorn	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1934190681Snwhitehorn	    ("moea64_protect: non current pmap"));
1935190681Snwhitehorn
1936190681Snwhitehorn	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1937190681Snwhitehorn		moea64_remove(mmu, pm, sva, eva);
1938190681Snwhitehorn		return;
1939190681Snwhitehorn	}
1940190681Snwhitehorn
1941233529Snwhitehorn	LOCK_TABLE_RD();
1942190681Snwhitehorn	PMAP_LOCK(pm);
1943235689Snwhitehorn	key.pvo_vaddr = sva;
1944235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
1945235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
1946235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
1947235689Snwhitehorn		moea64_pvo_protect(mmu, pm, pvo, prot);
1948190681Snwhitehorn	}
1949233529Snwhitehorn	UNLOCK_TABLE_RD();
1950190681Snwhitehorn	PMAP_UNLOCK(pm);
1951190681Snwhitehorn}
1952190681Snwhitehorn
1953190681Snwhitehorn/*
1954190681Snwhitehorn * Map a list of wired pages into kernel virtual address space.  This is
1955190681Snwhitehorn * intended for temporary mappings which do not need page modification or
1956190681Snwhitehorn * references recorded.  Existing mappings in the region are overwritten.
1957190681Snwhitehorn */
1958190681Snwhitehornvoid
1959190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1960190681Snwhitehorn{
1961190681Snwhitehorn	while (count-- > 0) {
1962190681Snwhitehorn		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1963190681Snwhitehorn		va += PAGE_SIZE;
1964190681Snwhitehorn		m++;
1965190681Snwhitehorn	}
1966190681Snwhitehorn}
1967190681Snwhitehorn
1968190681Snwhitehorn/*
1969190681Snwhitehorn * Remove page mappings from kernel virtual address space.  Intended for
1970190681Snwhitehorn * temporary mappings entered by moea64_qenter.
1971190681Snwhitehorn */
1972190681Snwhitehornvoid
1973190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1974190681Snwhitehorn{
1975190681Snwhitehorn	while (count-- > 0) {
1976190681Snwhitehorn		moea64_kremove(mmu, va);
1977190681Snwhitehorn		va += PAGE_SIZE;
1978190681Snwhitehorn	}
1979190681Snwhitehorn}
1980190681Snwhitehorn
1981190681Snwhitehornvoid
1982209975Snwhitehornmoea64_release_vsid(uint64_t vsid)
1983209975Snwhitehorn{
1984212044Snwhitehorn	int idx, mask;
1985209975Snwhitehorn
1986212044Snwhitehorn	mtx_lock(&moea64_slb_mutex);
1987212044Snwhitehorn	idx = vsid & (NVSIDS-1);
1988212044Snwhitehorn	mask = 1 << (idx % VSID_NBPW);
1989212044Snwhitehorn	idx /= VSID_NBPW;
1990212308Snwhitehorn	KASSERT(moea64_vsid_bitmap[idx] & mask,
1991212308Snwhitehorn	    ("Freeing unallocated VSID %#jx", vsid));
1992212044Snwhitehorn	moea64_vsid_bitmap[idx] &= ~mask;
1993212044Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
1994209975Snwhitehorn}
1995209975Snwhitehorn
1996209975Snwhitehorn
1997209975Snwhitehornvoid
1998190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap)
1999190681Snwhitehorn{
2000190681Snwhitehorn
2001190681Snwhitehorn	/*
2002209975Snwhitehorn	 * Free segment registers' VSIDs
2003190681Snwhitehorn	 */
2004209975Snwhitehorn    #ifdef __powerpc64__
2005212715Snwhitehorn	slb_free_tree(pmap);
2006209975Snwhitehorn	slb_free_user_cache(pmap->pm_slb);
2007209975Snwhitehorn    #else
2008212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2009190681Snwhitehorn
2010212308Snwhitehorn	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2011209975Snwhitehorn    #endif
2012209975Snwhitehorn
2013190681Snwhitehorn	PMAP_LOCK_DESTROY(pmap);
2014190681Snwhitehorn}
2015190681Snwhitehorn
2016190681Snwhitehorn/*
2017233017Snwhitehorn * Remove all pages mapped by the specified pmap
2018233017Snwhitehorn */
2019233017Snwhitehornvoid
2020233017Snwhitehornmoea64_remove_pages(mmu_t mmu, pmap_t pm)
2021233017Snwhitehorn{
2022233017Snwhitehorn	struct	pvo_entry *pvo, *tpvo;
2023233017Snwhitehorn
2024233529Snwhitehorn	LOCK_TABLE_WR();
2025233017Snwhitehorn	PMAP_LOCK(pm);
2026235689Snwhitehorn	RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) {
2027233434Snwhitehorn		if (!(pvo->pvo_vaddr & PVO_WIRED))
2028233434Snwhitehorn			moea64_pvo_remove(mmu, pvo);
2029233434Snwhitehorn	}
2030233529Snwhitehorn	UNLOCK_TABLE_WR();
2031233017Snwhitehorn	PMAP_UNLOCK(pm);
2032233017Snwhitehorn}
2033233017Snwhitehorn
2034233017Snwhitehorn/*
2035190681Snwhitehorn * Remove the given range of addresses from the specified map.
2036190681Snwhitehorn */
2037190681Snwhitehornvoid
2038190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2039190681Snwhitehorn{
2040235689Snwhitehorn	struct	pvo_entry *pvo, *tpvo, key;
2041190681Snwhitehorn
2042233011Snwhitehorn	/*
2043233011Snwhitehorn	 * Perform an unsynchronized read.  This is, however, safe.
2044233011Snwhitehorn	 */
2045233011Snwhitehorn	if (pm->pm_stats.resident_count == 0)
2046233011Snwhitehorn		return;
2047233011Snwhitehorn
2048233529Snwhitehorn	LOCK_TABLE_WR();
2049190681Snwhitehorn	PMAP_LOCK(pm);
2050235689Snwhitehorn	key.pvo_vaddr = sva;
2051235689Snwhitehorn	for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key);
2052235689Snwhitehorn	    pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) {
2053235689Snwhitehorn		tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo);
2054235689Snwhitehorn		moea64_pvo_remove(mmu, pvo);
2055190681Snwhitehorn	}
2056233529Snwhitehorn	UNLOCK_TABLE_WR();
2057190681Snwhitehorn	PMAP_UNLOCK(pm);
2058190681Snwhitehorn}
2059190681Snwhitehorn
2060190681Snwhitehorn/*
2061190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2062190681Snwhitehorn * will reflect changes in pte's back to the vm_page.
2063190681Snwhitehorn */
2064190681Snwhitehornvoid
2065190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m)
2066190681Snwhitehorn{
2067190681Snwhitehorn	struct	pvo_entry *pvo, *next_pvo;
2068190681Snwhitehorn	pmap_t	pmap;
2069190681Snwhitehorn
2070233529Snwhitehorn	LOCK_TABLE_WR();
2071233949Snwhitehorn	LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) {
2072190681Snwhitehorn		pmap = pvo->pvo_pmap;
2073190681Snwhitehorn		PMAP_LOCK(pmap);
2074216174Snwhitehorn		moea64_pvo_remove(mmu, pvo);
2075190681Snwhitehorn		PMAP_UNLOCK(pmap);
2076190681Snwhitehorn	}
2077233529Snwhitehorn	UNLOCK_TABLE_WR();
2078233434Snwhitehorn	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m))
2079204042Snwhitehorn		vm_page_dirty(m);
2080225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2081233949Snwhitehorn	vm_page_aflag_clear(m, PGA_EXECUTABLE);
2082190681Snwhitehorn}
2083190681Snwhitehorn
2084190681Snwhitehorn/*
2085190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map.
2086190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are
2087190681Snwhitehorn * calculated.
2088190681Snwhitehorn */
2089216174Snwhitehornvm_offset_t
2090190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align)
2091190681Snwhitehorn{
2092190681Snwhitehorn	vm_offset_t	s, e;
2093190681Snwhitehorn	int		i, j;
2094190681Snwhitehorn
2095190681Snwhitehorn	size = round_page(size);
2096190681Snwhitehorn	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2097190681Snwhitehorn		if (align != 0)
2098190681Snwhitehorn			s = (phys_avail[i] + align - 1) & ~(align - 1);
2099190681Snwhitehorn		else
2100190681Snwhitehorn			s = phys_avail[i];
2101190681Snwhitehorn		e = s + size;
2102190681Snwhitehorn
2103190681Snwhitehorn		if (s < phys_avail[i] || e > phys_avail[i + 1])
2104190681Snwhitehorn			continue;
2105190681Snwhitehorn
2106215159Snwhitehorn		if (s + size > platform_real_maxaddr())
2107215159Snwhitehorn			continue;
2108215159Snwhitehorn
2109190681Snwhitehorn		if (s == phys_avail[i]) {
2110190681Snwhitehorn			phys_avail[i] += size;
2111190681Snwhitehorn		} else if (e == phys_avail[i + 1]) {
2112190681Snwhitehorn			phys_avail[i + 1] -= size;
2113190681Snwhitehorn		} else {
2114190681Snwhitehorn			for (j = phys_avail_count * 2; j > i; j -= 2) {
2115190681Snwhitehorn				phys_avail[j] = phys_avail[j - 2];
2116190681Snwhitehorn				phys_avail[j + 1] = phys_avail[j - 1];
2117190681Snwhitehorn			}
2118190681Snwhitehorn
2119190681Snwhitehorn			phys_avail[i + 3] = phys_avail[i + 1];
2120190681Snwhitehorn			phys_avail[i + 1] = s;
2121190681Snwhitehorn			phys_avail[i + 2] = e;
2122190681Snwhitehorn			phys_avail_count++;
2123190681Snwhitehorn		}
2124190681Snwhitehorn
2125190681Snwhitehorn		return (s);
2126190681Snwhitehorn	}
2127190681Snwhitehorn	panic("moea64_bootstrap_alloc: could not allocate memory");
2128190681Snwhitehorn}
2129190681Snwhitehorn
2130190681Snwhitehornstatic int
2131216174Snwhitehornmoea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2132216174Snwhitehorn    struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2133216174Snwhitehorn    uint64_t pte_lo, int flags)
2134190681Snwhitehorn{
2135190681Snwhitehorn	struct	 pvo_entry *pvo;
2136190681Snwhitehorn	uint64_t vsid;
2137190681Snwhitehorn	int	 first;
2138190681Snwhitehorn	u_int	 ptegidx;
2139190681Snwhitehorn	int	 i;
2140190681Snwhitehorn	int      bootstrap;
2141190681Snwhitehorn
2142190681Snwhitehorn	/*
2143190681Snwhitehorn	 * One nasty thing that can happen here is that the UMA calls to
2144190681Snwhitehorn	 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2145190681Snwhitehorn	 * which calls UMA...
2146190681Snwhitehorn	 *
2147190681Snwhitehorn	 * We break the loop by detecting recursion and allocating out of
2148190681Snwhitehorn	 * the bootstrap pool.
2149190681Snwhitehorn	 */
2150190681Snwhitehorn
2151190681Snwhitehorn	first = 0;
2152190681Snwhitehorn	bootstrap = (flags & PVO_BOOTSTRAP);
2153190681Snwhitehorn
2154190681Snwhitehorn	if (!moea64_initialized)
2155190681Snwhitehorn		bootstrap = 1;
2156190681Snwhitehorn
2157233529Snwhitehorn	PMAP_LOCK_ASSERT(pm, MA_OWNED);
2158233529Snwhitehorn	rw_assert(&moea64_table_lock, RA_WLOCKED);
2159233529Snwhitehorn
2160190681Snwhitehorn	/*
2161190681Snwhitehorn	 * Compute the PTE Group index.
2162190681Snwhitehorn	 */
2163190681Snwhitehorn	va &= ~ADDR_POFF;
2164190681Snwhitehorn	vsid = va_to_vsid(pm, va);
2165209975Snwhitehorn	ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2166190681Snwhitehorn
2167190681Snwhitehorn	/*
2168190681Snwhitehorn	 * Remove any existing mapping for this page.  Reuse the pvo entry if
2169190681Snwhitehorn	 * there is a mapping.
2170190681Snwhitehorn	 */
2171212363Snwhitehorn	moea64_pvo_enter_calls++;
2172212363Snwhitehorn
2173190681Snwhitehorn	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2174190681Snwhitehorn		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2175190681Snwhitehorn			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2176217341Snwhitehorn			    (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2177217341Snwhitehorn			    == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2178209975Snwhitehorn			    	if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2179209975Snwhitehorn					/* Re-insert if spilled */
2180216174Snwhitehorn					i = MOEA64_PTE_INSERT(mmu, ptegidx,
2181209975Snwhitehorn					    &pvo->pvo_pte.lpte);
2182209975Snwhitehorn					if (i >= 0)
2183209975Snwhitehorn						PVO_PTEGIDX_SET(pvo, i);
2184209975Snwhitehorn					moea64_pte_overflow--;
2185209975Snwhitehorn				}
2186190681Snwhitehorn				return (0);
2187190681Snwhitehorn			}
2188216174Snwhitehorn			moea64_pvo_remove(mmu, pvo);
2189190681Snwhitehorn			break;
2190190681Snwhitehorn		}
2191190681Snwhitehorn	}
2192190681Snwhitehorn
2193190681Snwhitehorn	/*
2194190681Snwhitehorn	 * If we aren't overwriting a mapping, try to allocate.
2195190681Snwhitehorn	 */
2196190681Snwhitehorn	if (bootstrap) {
2197190681Snwhitehorn		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2198209975Snwhitehorn			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2199190681Snwhitehorn			      moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2200190681Snwhitehorn			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2201190681Snwhitehorn		}
2202190681Snwhitehorn		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2203190681Snwhitehorn		moea64_bpvo_pool_index++;
2204190681Snwhitehorn		bootstrap = 1;
2205190681Snwhitehorn	} else {
2206190681Snwhitehorn		pvo = uma_zalloc(zone, M_NOWAIT);
2207190681Snwhitehorn	}
2208190681Snwhitehorn
2209233529Snwhitehorn	if (pvo == NULL)
2210190681Snwhitehorn		return (ENOMEM);
2211190681Snwhitehorn
2212190681Snwhitehorn	moea64_pvo_entries++;
2213190681Snwhitehorn	pvo->pvo_vaddr = va;
2214209975Snwhitehorn	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2215209975Snwhitehorn	    | (vsid << 16);
2216190681Snwhitehorn	pvo->pvo_pmap = pm;
2217190681Snwhitehorn	LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2218190681Snwhitehorn	pvo->pvo_vaddr &= ~ADDR_POFF;
2219190681Snwhitehorn
2220190681Snwhitehorn	if (flags & PVO_WIRED)
2221190681Snwhitehorn		pvo->pvo_vaddr |= PVO_WIRED;
2222235689Snwhitehorn	if (pvo_head != NULL)
2223190681Snwhitehorn		pvo->pvo_vaddr |= PVO_MANAGED;
2224190681Snwhitehorn	if (bootstrap)
2225190681Snwhitehorn		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2226209975Snwhitehorn	if (flags & PVO_LARGE)
2227209975Snwhitehorn		pvo->pvo_vaddr |= PVO_LARGE;
2228190681Snwhitehorn
2229190681Snwhitehorn	moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2230209975Snwhitehorn	    (uint64_t)(pa) | pte_lo, flags);
2231190681Snwhitehorn
2232190681Snwhitehorn	/*
2233228412Snwhitehorn	 * Add to pmap list
2234228412Snwhitehorn	 */
2235235689Snwhitehorn	RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo);
2236228412Snwhitehorn
2237228412Snwhitehorn	/*
2238190681Snwhitehorn	 * Remember if the list was empty and therefore will be the first
2239190681Snwhitehorn	 * item.
2240190681Snwhitehorn	 */
2241235689Snwhitehorn	if (pvo_head != NULL) {
2242235689Snwhitehorn		if (LIST_FIRST(pvo_head) == NULL)
2243235689Snwhitehorn			first = 1;
2244235689Snwhitehorn		LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2245235689Snwhitehorn	}
2246190681Snwhitehorn
2247209975Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED) {
2248209975Snwhitehorn		pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2249190681Snwhitehorn		pm->pm_stats.wired_count++;
2250209975Snwhitehorn	}
2251190681Snwhitehorn	pm->pm_stats.resident_count++;
2252190681Snwhitehorn
2253190681Snwhitehorn	/*
2254190681Snwhitehorn	 * We hope this succeeds but it isn't required.
2255190681Snwhitehorn	 */
2256216174Snwhitehorn	i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2257190681Snwhitehorn	if (i >= 0) {
2258190681Snwhitehorn		PVO_PTEGIDX_SET(pvo, i);
2259190681Snwhitehorn	} else {
2260190681Snwhitehorn		panic("moea64_pvo_enter: overflow");
2261190681Snwhitehorn		moea64_pte_overflow++;
2262190681Snwhitehorn	}
2263190681Snwhitehorn
2264204042Snwhitehorn	if (pm == kernel_pmap)
2265204042Snwhitehorn		isync();
2266204042Snwhitehorn
2267209975Snwhitehorn#ifdef __powerpc64__
2268209975Snwhitehorn	/*
2269209975Snwhitehorn	 * Make sure all our bootstrap mappings are in the SLB as soon
2270209975Snwhitehorn	 * as virtual memory is switched on.
2271209975Snwhitehorn	 */
2272209975Snwhitehorn	if (!pmap_bootstrapped)
2273209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2274209975Snwhitehorn#endif
2275209975Snwhitehorn
2276190681Snwhitehorn	return (first ? ENOENT : 0);
2277190681Snwhitehorn}
2278190681Snwhitehorn
2279190681Snwhitehornstatic void
2280216174Snwhitehornmoea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2281190681Snwhitehorn{
2282233949Snwhitehorn	struct	vm_page *pg;
2283216174Snwhitehorn	uintptr_t pt;
2284190681Snwhitehorn
2285233529Snwhitehorn	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
2286233529Snwhitehorn	rw_assert(&moea64_table_lock, RA_WLOCKED);
2287233529Snwhitehorn
2288190681Snwhitehorn	/*
2289190681Snwhitehorn	 * If there is an active pte entry, we need to deactivate it (and
2290190681Snwhitehorn	 * save the ref & cfg bits).
2291190681Snwhitehorn	 */
2292216174Snwhitehorn	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2293216174Snwhitehorn	if (pt != -1) {
2294216174Snwhitehorn		MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2295190681Snwhitehorn		PVO_PTEGIDX_CLR(pvo);
2296190681Snwhitehorn	} else {
2297190681Snwhitehorn		moea64_pte_overflow--;
2298190681Snwhitehorn	}
2299190681Snwhitehorn
2300190681Snwhitehorn	/*
2301190681Snwhitehorn	 * Update our statistics.
2302190681Snwhitehorn	 */
2303190681Snwhitehorn	pvo->pvo_pmap->pm_stats.resident_count--;
2304204042Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED)
2305190681Snwhitehorn		pvo->pvo_pmap->pm_stats.wired_count--;
2306190681Snwhitehorn
2307190681Snwhitehorn	/*
2308235689Snwhitehorn	 * Remove this PVO from the pmap list.
2309233529Snwhitehorn	 */
2310235689Snwhitehorn	RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo);
2311233529Snwhitehorn
2312233529Snwhitehorn	/*
2313233529Snwhitehorn	 * Remove this from the overflow list and return it to the pool
2314233529Snwhitehorn	 * if we aren't going to reuse it.
2315233529Snwhitehorn	 */
2316233529Snwhitehorn	LIST_REMOVE(pvo, pvo_olink);
2317233529Snwhitehorn
2318233529Snwhitehorn	/*
2319233434Snwhitehorn	 * Update vm about the REF/CHG bits if the page is managed.
2320190681Snwhitehorn	 */
2321233949Snwhitehorn	pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2322233949Snwhitehorn
2323234155Snwhitehorn	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) {
2324235689Snwhitehorn		LIST_REMOVE(pvo, pvo_vlink);
2325234155Snwhitehorn		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
2326233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG)
2327233434Snwhitehorn				vm_page_dirty(pg);
2328233434Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF)
2329233434Snwhitehorn				vm_page_aflag_set(pg, PGA_REFERENCED);
2330233529Snwhitehorn			if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2331233529Snwhitehorn				vm_page_aflag_clear(pg, PGA_WRITEABLE);
2332190681Snwhitehorn		}
2333234155Snwhitehorn		if (LIST_EMPTY(vm_page_to_pvoh(pg)))
2334234155Snwhitehorn			vm_page_aflag_clear(pg, PGA_EXECUTABLE);
2335190681Snwhitehorn	}
2336190681Snwhitehorn
2337212363Snwhitehorn	moea64_pvo_entries--;
2338212363Snwhitehorn	moea64_pvo_remove_calls++;
2339212363Snwhitehorn
2340190681Snwhitehorn	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2341204042Snwhitehorn		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2342190681Snwhitehorn		    moea64_upvo_zone, pvo);
2343190681Snwhitehorn}
2344190681Snwhitehorn
2345190681Snwhitehornstatic struct pvo_entry *
2346209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2347190681Snwhitehorn{
2348235689Snwhitehorn	struct pvo_entry key;
2349190681Snwhitehorn
2350235689Snwhitehorn	key.pvo_vaddr = va & ~ADDR_POFF;
2351235689Snwhitehorn	return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key));
2352190681Snwhitehorn}
2353190681Snwhitehorn
2354190681Snwhitehornstatic boolean_t
2355216174Snwhitehornmoea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2356190681Snwhitehorn{
2357190681Snwhitehorn	struct	pvo_entry *pvo;
2358216174Snwhitehorn	uintptr_t pt;
2359190681Snwhitehorn
2360233529Snwhitehorn	LOCK_TABLE_RD();
2361190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2362190681Snwhitehorn		/*
2363233434Snwhitehorn		 * See if we saved the bit off.  If so, return success.
2364190681Snwhitehorn		 */
2365190681Snwhitehorn		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2366233529Snwhitehorn			UNLOCK_TABLE_RD();
2367190681Snwhitehorn			return (TRUE);
2368190681Snwhitehorn		}
2369190681Snwhitehorn	}
2370190681Snwhitehorn
2371190681Snwhitehorn	/*
2372190681Snwhitehorn	 * No luck, now go through the hard part of looking at the PTEs
2373190681Snwhitehorn	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2374190681Snwhitehorn	 * the PTEs.
2375190681Snwhitehorn	 */
2376216174Snwhitehorn	powerpc_sync();
2377190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2378190681Snwhitehorn
2379190681Snwhitehorn		/*
2380190681Snwhitehorn		 * See if this pvo has a valid PTE.  if so, fetch the
2381190681Snwhitehorn		 * REF/CHG bits from the valid PTE.  If the appropriate
2382233434Snwhitehorn		 * ptebit is set, return success.
2383190681Snwhitehorn		 */
2384233529Snwhitehorn		PMAP_LOCK(pvo->pvo_pmap);
2385216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2386216174Snwhitehorn		if (pt != -1) {
2387216174Snwhitehorn			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2388190681Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2389233529Snwhitehorn				PMAP_UNLOCK(pvo->pvo_pmap);
2390233529Snwhitehorn				UNLOCK_TABLE_RD();
2391190681Snwhitehorn				return (TRUE);
2392190681Snwhitehorn			}
2393190681Snwhitehorn		}
2394233529Snwhitehorn		PMAP_UNLOCK(pvo->pvo_pmap);
2395190681Snwhitehorn	}
2396190681Snwhitehorn
2397233529Snwhitehorn	UNLOCK_TABLE_RD();
2398190681Snwhitehorn	return (FALSE);
2399190681Snwhitehorn}
2400190681Snwhitehorn
2401190681Snwhitehornstatic u_int
2402216174Snwhitehornmoea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2403190681Snwhitehorn{
2404190681Snwhitehorn	u_int	count;
2405190681Snwhitehorn	struct	pvo_entry *pvo;
2406216174Snwhitehorn	uintptr_t pt;
2407190681Snwhitehorn
2408190681Snwhitehorn	/*
2409190681Snwhitehorn	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2410190681Snwhitehorn	 * we can reset the right ones).  note that since the pvo entries and
2411190681Snwhitehorn	 * list heads are accessed via BAT0 and are never placed in the page
2412190681Snwhitehorn	 * table, we don't have to worry about further accesses setting the
2413190681Snwhitehorn	 * REF/CHG bits.
2414190681Snwhitehorn	 */
2415216174Snwhitehorn	powerpc_sync();
2416190681Snwhitehorn
2417190681Snwhitehorn	/*
2418190681Snwhitehorn	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2419190681Snwhitehorn	 * valid pte clear the ptebit from the valid pte.
2420190681Snwhitehorn	 */
2421190681Snwhitehorn	count = 0;
2422233529Snwhitehorn	LOCK_TABLE_RD();
2423190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2424233529Snwhitehorn		PMAP_LOCK(pvo->pvo_pmap);
2425216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2426216174Snwhitehorn		if (pt != -1) {
2427216174Snwhitehorn			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2428190681Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2429190681Snwhitehorn				count++;
2430216174Snwhitehorn				MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2431216174Snwhitehorn				    pvo->pvo_vpn, ptebit);
2432190681Snwhitehorn			}
2433190681Snwhitehorn		}
2434190681Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2435233529Snwhitehorn		PMAP_UNLOCK(pvo->pvo_pmap);
2436190681Snwhitehorn	}
2437190681Snwhitehorn
2438233529Snwhitehorn	UNLOCK_TABLE_RD();
2439190681Snwhitehorn	return (count);
2440190681Snwhitehorn}
2441190681Snwhitehorn
2442190681Snwhitehornboolean_t
2443236019Srajmoea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2444190681Snwhitehorn{
2445235689Snwhitehorn	struct pvo_entry *pvo, key;
2446204296Snwhitehorn	vm_offset_t ppa;
2447204296Snwhitehorn	int error = 0;
2448204296Snwhitehorn
2449204296Snwhitehorn	PMAP_LOCK(kernel_pmap);
2450235689Snwhitehorn	key.pvo_vaddr = ppa = pa & ~ADDR_POFF;
2451235689Snwhitehorn	for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key);
2452235689Snwhitehorn	    ppa < pa + size; ppa += PAGE_SIZE,
2453235689Snwhitehorn	    pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) {
2454204296Snwhitehorn		if (pvo == NULL ||
2455204296Snwhitehorn		    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2456204296Snwhitehorn			error = EFAULT;
2457204296Snwhitehorn			break;
2458204296Snwhitehorn		}
2459204296Snwhitehorn	}
2460204296Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
2461204296Snwhitehorn
2462204296Snwhitehorn	return (error);
2463190681Snwhitehorn}
2464190681Snwhitehorn
2465190681Snwhitehorn/*
2466190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual
2467190681Snwhitehorn * address space. Return a pointer to where it is mapped. This
2468190681Snwhitehorn * routine is intended to be used for mapping device memory,
2469190681Snwhitehorn * NOT real memory.
2470190681Snwhitehorn */
2471190681Snwhitehornvoid *
2472213307Snwhitehornmoea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2473190681Snwhitehorn{
2474190681Snwhitehorn	vm_offset_t va, tmpva, ppa, offset;
2475190681Snwhitehorn
2476190681Snwhitehorn	ppa = trunc_page(pa);
2477190681Snwhitehorn	offset = pa & PAGE_MASK;
2478233618Snwhitehorn	size = roundup2(offset + size, PAGE_SIZE);
2479190681Snwhitehorn
2480190681Snwhitehorn	va = kmem_alloc_nofault(kernel_map, size);
2481190681Snwhitehorn
2482190681Snwhitehorn	if (!va)
2483190681Snwhitehorn		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2484190681Snwhitehorn
2485190681Snwhitehorn	for (tmpva = va; size > 0;) {
2486213307Snwhitehorn		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2487190681Snwhitehorn		size -= PAGE_SIZE;
2488190681Snwhitehorn		tmpva += PAGE_SIZE;
2489190681Snwhitehorn		ppa += PAGE_SIZE;
2490190681Snwhitehorn	}
2491190681Snwhitehorn
2492190681Snwhitehorn	return ((void *)(va + offset));
2493190681Snwhitehorn}
2494190681Snwhitehorn
2495213307Snwhitehornvoid *
2496236019Srajmoea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size)
2497213307Snwhitehorn{
2498213307Snwhitehorn
2499213307Snwhitehorn	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2500213307Snwhitehorn}
2501213307Snwhitehorn
2502190681Snwhitehornvoid
2503190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2504190681Snwhitehorn{
2505190681Snwhitehorn	vm_offset_t base, offset;
2506190681Snwhitehorn
2507190681Snwhitehorn	base = trunc_page(va);
2508190681Snwhitehorn	offset = va & PAGE_MASK;
2509233618Snwhitehorn	size = roundup2(offset + size, PAGE_SIZE);
2510190681Snwhitehorn
2511190681Snwhitehorn	kmem_free(kernel_map, base, size);
2512190681Snwhitehorn}
2513190681Snwhitehorn
2514216174Snwhitehornvoid
2515198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2516198341Smarcel{
2517198341Smarcel	struct pvo_entry *pvo;
2518198341Smarcel	vm_offset_t lim;
2519198341Smarcel	vm_paddr_t pa;
2520198341Smarcel	vm_size_t len;
2521198341Smarcel
2522198341Smarcel	PMAP_LOCK(pm);
2523198341Smarcel	while (sz > 0) {
2524198341Smarcel		lim = round_page(va);
2525198341Smarcel		len = MIN(lim - va, sz);
2526209975Snwhitehorn		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2527222666Snwhitehorn		if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2528222666Snwhitehorn			pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2529198341Smarcel			    (va & ADDR_POFF);
2530216174Snwhitehorn			moea64_syncicache(mmu, pm, va, pa, len);
2531198341Smarcel		}
2532198341Smarcel		va += len;
2533198341Smarcel		sz -= len;
2534198341Smarcel	}
2535198341Smarcel	PMAP_UNLOCK(pm);
2536198341Smarcel}
2537