mmu_oea64.c revision 228412
1190681Snwhitehorn/*-
2190681Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc.
3190681Snwhitehorn * All rights reserved.
4190681Snwhitehorn *
5190681Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation
6190681Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7190681Snwhitehorn *
8190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
9190681Snwhitehorn * modification, are permitted provided that the following conditions
10190681Snwhitehorn * are met:
11190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
12190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
13190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
14190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
15190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
16190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software
17190681Snwhitehorn *    must display the following acknowledgement:
18190681Snwhitehorn *        This product includes software developed by the NetBSD
19190681Snwhitehorn *        Foundation, Inc. and its contributors.
20190681Snwhitehorn * 4. Neither the name of The NetBSD Foundation nor the names of its
21190681Snwhitehorn *    contributors may be used to endorse or promote products derived
22190681Snwhitehorn *    from this software without specific prior written permission.
23190681Snwhitehorn *
24190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25190681Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26190681Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27190681Snwhitehorn * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28190681Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29190681Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30190681Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31190681Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32190681Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33190681Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34190681Snwhitehorn * POSSIBILITY OF SUCH DAMAGE.
35190681Snwhitehorn */
36190681Snwhitehorn/*-
37190681Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38190681Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH.
39190681Snwhitehorn * All rights reserved.
40190681Snwhitehorn *
41190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
42190681Snwhitehorn * modification, are permitted provided that the following conditions
43190681Snwhitehorn * are met:
44190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
45190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
46190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
47190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
48190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
49190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software
50190681Snwhitehorn *    must display the following acknowledgement:
51190681Snwhitehorn *	This product includes software developed by TooLs GmbH.
52190681Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products
53190681Snwhitehorn *    derived from this software without specific prior written permission.
54190681Snwhitehorn *
55190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65190681Snwhitehorn *
66190681Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67190681Snwhitehorn */
68190681Snwhitehorn/*-
69190681Snwhitehorn * Copyright (C) 2001 Benno Rice.
70190681Snwhitehorn * All rights reserved.
71190681Snwhitehorn *
72190681Snwhitehorn * Redistribution and use in source and binary forms, with or without
73190681Snwhitehorn * modification, are permitted provided that the following conditions
74190681Snwhitehorn * are met:
75190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright
76190681Snwhitehorn *    notice, this list of conditions and the following disclaimer.
77190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
78190681Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
79190681Snwhitehorn *    documentation and/or other materials provided with the distribution.
80190681Snwhitehorn *
81190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91190681Snwhitehorn */
92190681Snwhitehorn
93190681Snwhitehorn#include <sys/cdefs.h>
94190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 228412 2011-12-11 17:19:48Z nwhitehorn $");
95190681Snwhitehorn
96190681Snwhitehorn/*
97190681Snwhitehorn * Manages physical address maps.
98190681Snwhitehorn *
99190681Snwhitehorn * In addition to hardware address maps, this module is called upon to
100190681Snwhitehorn * provide software-use-only maps which may or may not be stored in the
101190681Snwhitehorn * same form as hardware maps.  These pseudo-maps are used to store
102190681Snwhitehorn * intermediate results from copy operations to and from address spaces.
103190681Snwhitehorn *
104190681Snwhitehorn * Since the information managed by this module is also stored by the
105190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual
106190681Snwhitehorn * to physical mappings at almost any time.  However, invalidations of
107190681Snwhitehorn * mappings must be done as requested.
108190681Snwhitehorn *
109190681Snwhitehorn * In order to cope with hardware architectures which make virtual to
110190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate
111190681Snwhitehorn * reduced protection operations until such time as they are actually
112190681Snwhitehorn * necessary.  This module is given full information as to which processors
113190681Snwhitehorn * are currently using which maps, and to when physical maps must be made
114190681Snwhitehorn * correct.
115190681Snwhitehorn */
116190681Snwhitehorn
117190681Snwhitehorn#include "opt_kstack_pages.h"
118190681Snwhitehorn
119190681Snwhitehorn#include <sys/param.h>
120190681Snwhitehorn#include <sys/kernel.h>
121222813Sattilio#include <sys/queue.h>
122222813Sattilio#include <sys/cpuset.h>
123190681Snwhitehorn#include <sys/ktr.h>
124190681Snwhitehorn#include <sys/lock.h>
125190681Snwhitehorn#include <sys/msgbuf.h>
126190681Snwhitehorn#include <sys/mutex.h>
127190681Snwhitehorn#include <sys/proc.h>
128222813Sattilio#include <sys/sched.h>
129190681Snwhitehorn#include <sys/sysctl.h>
130190681Snwhitehorn#include <sys/systm.h>
131190681Snwhitehorn#include <sys/vmmeter.h>
132190681Snwhitehorn
133190681Snwhitehorn#include <sys/kdb.h>
134190681Snwhitehorn
135190681Snwhitehorn#include <dev/ofw/openfirm.h>
136190681Snwhitehorn
137190681Snwhitehorn#include <vm/vm.h>
138190681Snwhitehorn#include <vm/vm_param.h>
139190681Snwhitehorn#include <vm/vm_kern.h>
140190681Snwhitehorn#include <vm/vm_page.h>
141190681Snwhitehorn#include <vm/vm_map.h>
142190681Snwhitehorn#include <vm/vm_object.h>
143190681Snwhitehorn#include <vm/vm_extern.h>
144190681Snwhitehorn#include <vm/vm_pageout.h>
145190681Snwhitehorn#include <vm/vm_pager.h>
146190681Snwhitehorn#include <vm/uma.h>
147190681Snwhitehorn
148209975Snwhitehorn#include <machine/_inttypes.h>
149190681Snwhitehorn#include <machine/cpu.h>
150192067Snwhitehorn#include <machine/platform.h>
151190681Snwhitehorn#include <machine/frame.h>
152190681Snwhitehorn#include <machine/md_var.h>
153190681Snwhitehorn#include <machine/psl.h>
154190681Snwhitehorn#include <machine/bat.h>
155209975Snwhitehorn#include <machine/hid.h>
156190681Snwhitehorn#include <machine/pte.h>
157190681Snwhitehorn#include <machine/sr.h>
158190681Snwhitehorn#include <machine/trap.h>
159190681Snwhitehorn#include <machine/mmuvar.h>
160190681Snwhitehorn
161216174Snwhitehorn#include "mmu_oea64.h"
162190681Snwhitehorn#include "mmu_if.h"
163216174Snwhitehorn#include "moea64_if.h"
164190681Snwhitehorn
165209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid);
166209975Snwhitehornuintptr_t moea64_get_unique_vsid(void);
167190681Snwhitehorn
168222614Snwhitehorn#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
169222614Snwhitehorn#define ENABLE_TRANS(msr)	mtmsr(msr)
170190681Snwhitehorn
171190681Snwhitehorn#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
172190681Snwhitehorn#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
173204268Snwhitehorn#define	VSID_HASH_MASK		0x0000007fffffffffULL
174190681Snwhitehorn
175190681Snwhitehorn#define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
176190681Snwhitehorn#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
177190681Snwhitehorn#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
178190681Snwhitehorn
179190681Snwhitehornstruct ofw_map {
180209975Snwhitehorn	cell_t	om_va;
181209975Snwhitehorn	cell_t	om_len;
182209975Snwhitehorn	cell_t	om_pa_hi;
183209975Snwhitehorn	cell_t	om_pa_lo;
184209975Snwhitehorn	cell_t	om_mode;
185190681Snwhitehorn};
186190681Snwhitehorn
187190681Snwhitehorn/*
188190681Snwhitehorn * Map of physical memory regions.
189190681Snwhitehorn */
190190681Snwhitehornstatic struct	mem_region *regions;
191190681Snwhitehornstatic struct	mem_region *pregions;
192209975Snwhitehornstatic u_int	phys_avail_count;
193209975Snwhitehornstatic int	regions_sz, pregions_sz;
194190681Snwhitehorn
195190681Snwhitehornextern void bs_remap_earlyboot(void);
196190681Snwhitehorn
197190681Snwhitehorn/*
198190681Snwhitehorn * Lock for the pteg and pvo tables.
199190681Snwhitehorn */
200190681Snwhitehornstruct mtx	moea64_table_mutex;
201211967Snwhitehornstruct mtx	moea64_slb_mutex;
202190681Snwhitehorn
203190681Snwhitehorn/*
204190681Snwhitehorn * PTEG data.
205190681Snwhitehorn */
206190681Snwhitehornu_int		moea64_pteg_count;
207190681Snwhitehornu_int		moea64_pteg_mask;
208190681Snwhitehorn
209190681Snwhitehorn/*
210190681Snwhitehorn * PVO data.
211190681Snwhitehorn */
212190681Snwhitehornstruct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
213213335Snwhitehornstruct	pvo_head moea64_pvo_kunmanaged =	/* list of unmanaged pages */
214190681Snwhitehorn    LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
215190681Snwhitehorn
216190681Snwhitehornuma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
217190681Snwhitehornuma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
218190681Snwhitehorn
219190681Snwhitehorn#define	BPVO_POOL_SIZE	327680
220190681Snwhitehornstatic struct	pvo_entry *moea64_bpvo_pool;
221190681Snwhitehornstatic int	moea64_bpvo_pool_index = 0;
222190681Snwhitehorn
223190681Snwhitehorn#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
224209975Snwhitehorn#ifdef __powerpc64__
225209975Snwhitehorn#define	NVSIDS		(NPMAPS * 16)
226209975Snwhitehorn#define VSID_HASHMASK	0xffffffffUL
227209975Snwhitehorn#else
228209975Snwhitehorn#define NVSIDS		NPMAPS
229209975Snwhitehorn#define VSID_HASHMASK	0xfffffUL
230209975Snwhitehorn#endif
231209975Snwhitehornstatic u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
232190681Snwhitehorn
233190681Snwhitehornstatic boolean_t moea64_initialized = FALSE;
234190681Snwhitehorn
235190681Snwhitehorn/*
236190681Snwhitehorn * Statistics.
237190681Snwhitehorn */
238190681Snwhitehornu_int	moea64_pte_valid = 0;
239190681Snwhitehornu_int	moea64_pte_overflow = 0;
240190681Snwhitehornu_int	moea64_pvo_entries = 0;
241190681Snwhitehornu_int	moea64_pvo_enter_calls = 0;
242190681Snwhitehornu_int	moea64_pvo_remove_calls = 0;
243190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
244190681Snwhitehorn    &moea64_pte_valid, 0, "");
245190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
246190681Snwhitehorn    &moea64_pte_overflow, 0, "");
247190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
248190681Snwhitehorn    &moea64_pvo_entries, 0, "");
249190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
250190681Snwhitehorn    &moea64_pvo_enter_calls, 0, "");
251190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
252190681Snwhitehorn    &moea64_pvo_remove_calls, 0, "");
253190681Snwhitehorn
254190681Snwhitehornvm_offset_t	moea64_scratchpage_va[2];
255216174Snwhitehornstruct pvo_entry *moea64_scratchpage_pvo[2];
256216174Snwhitehornuintptr_t	moea64_scratchpage_pte[2];
257190681Snwhitehornstruct	mtx	moea64_scratchpage_mtx;
258190681Snwhitehorn
259209975Snwhitehornuint64_t 	moea64_large_page_mask = 0;
260209975Snwhitehornint		moea64_large_page_size = 0;
261209975Snwhitehornint		moea64_large_page_shift = 0;
262209975Snwhitehorn
263190681Snwhitehorn/*
264190681Snwhitehorn * PVO calls.
265190681Snwhitehorn */
266216174Snwhitehornstatic int	moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *,
267198378Snwhitehorn		    vm_offset_t, vm_offset_t, uint64_t, int);
268216174Snwhitehornstatic void	moea64_pvo_remove(mmu_t, struct pvo_entry *);
269209975Snwhitehornstatic struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
270190681Snwhitehorn
271190681Snwhitehorn/*
272190681Snwhitehorn * Utility routines.
273190681Snwhitehorn */
274216174Snwhitehornstatic void		moea64_enter_locked(mmu_t, pmap_t, vm_offset_t,
275216174Snwhitehorn			    vm_page_t, vm_prot_t, boolean_t);
276216174Snwhitehornstatic boolean_t	moea64_query_bit(mmu_t, vm_page_t, u_int64_t);
277216174Snwhitehornstatic u_int		moea64_clear_bit(mmu_t, vm_page_t, u_int64_t);
278190681Snwhitehornstatic void		moea64_kremove(mmu_t, vm_offset_t);
279216174Snwhitehornstatic void		moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
280198341Smarcel			    vm_offset_t pa, vm_size_t sz);
281190681Snwhitehorn
282190681Snwhitehorn/*
283190681Snwhitehorn * Kernel MMU interface
284190681Snwhitehorn */
285190681Snwhitehornvoid moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
286190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t);
287190681Snwhitehornvoid moea64_clear_reference(mmu_t, vm_page_t);
288190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
289190681Snwhitehornvoid moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
290190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
291190681Snwhitehorn    vm_prot_t);
292190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
293190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
294190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
295190681Snwhitehornvoid moea64_init(mmu_t);
296190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t);
297214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
298207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t);
299190681Snwhitehornboolean_t moea64_ts_referenced(mmu_t, vm_page_t);
300190681Snwhitehornvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
301190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
302190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t);
303190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t);
304190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t);
305190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
306190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
307190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int);
308190681Snwhitehornvoid moea64_release(mmu_t, pmap_t);
309190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
310190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t);
311190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t);
312190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t);
313190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int);
314190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t);
315190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *);
316190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *);
317190681Snwhitehornvoid *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
318213307Snwhitehornvoid *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
319190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
320190681Snwhitehornvm_offset_t moea64_kextract(mmu_t, vm_offset_t);
321213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
322213307Snwhitehornvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
323190681Snwhitehornvoid moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
324190681Snwhitehornboolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
325198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
326190681Snwhitehorn
327209975Snwhitehornstatic mmu_method_t moea64_methods[] = {
328190681Snwhitehorn	MMUMETHOD(mmu_change_wiring,	moea64_change_wiring),
329190681Snwhitehorn	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
330190681Snwhitehorn	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
331190681Snwhitehorn	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
332190681Snwhitehorn	MMUMETHOD(mmu_enter,		moea64_enter),
333190681Snwhitehorn	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
334190681Snwhitehorn	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
335190681Snwhitehorn	MMUMETHOD(mmu_extract,		moea64_extract),
336190681Snwhitehorn	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
337190681Snwhitehorn	MMUMETHOD(mmu_init,		moea64_init),
338190681Snwhitehorn	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
339214617Salc	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
340207155Salc	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
341190681Snwhitehorn	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
342190681Snwhitehorn	MMUMETHOD(mmu_map,     		moea64_map),
343190681Snwhitehorn	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
344190681Snwhitehorn	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
345190681Snwhitehorn	MMUMETHOD(mmu_pinit,		moea64_pinit),
346190681Snwhitehorn	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
347190681Snwhitehorn	MMUMETHOD(mmu_protect,		moea64_protect),
348190681Snwhitehorn	MMUMETHOD(mmu_qenter,		moea64_qenter),
349190681Snwhitehorn	MMUMETHOD(mmu_qremove,		moea64_qremove),
350190681Snwhitehorn	MMUMETHOD(mmu_release,		moea64_release),
351190681Snwhitehorn	MMUMETHOD(mmu_remove,		moea64_remove),
352190681Snwhitehorn	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
353190681Snwhitehorn	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
354198341Smarcel	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
355190681Snwhitehorn	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
356190681Snwhitehorn	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
357190681Snwhitehorn	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
358190681Snwhitehorn	MMUMETHOD(mmu_activate,		moea64_activate),
359190681Snwhitehorn	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
360213307Snwhitehorn	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
361190681Snwhitehorn
362190681Snwhitehorn	/* Internal interfaces */
363190681Snwhitehorn	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
364213307Snwhitehorn	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
365190681Snwhitehorn	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
366190681Snwhitehorn	MMUMETHOD(mmu_kextract,		moea64_kextract),
367190681Snwhitehorn	MMUMETHOD(mmu_kenter,		moea64_kenter),
368213307Snwhitehorn	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
369190681Snwhitehorn	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
370190681Snwhitehorn
371190681Snwhitehorn	{ 0, 0 }
372190681Snwhitehorn};
373190681Snwhitehorn
374216174SnwhitehornMMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0);
375190681Snwhitehorn
376190681Snwhitehornstatic __inline u_int
377209975Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
378190681Snwhitehorn{
379204268Snwhitehorn	uint64_t hash;
380209975Snwhitehorn	int shift;
381190681Snwhitehorn
382209975Snwhitehorn	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
383204268Snwhitehorn	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
384209975Snwhitehorn	    shift);
385190681Snwhitehorn	return (hash & moea64_pteg_mask);
386190681Snwhitehorn}
387190681Snwhitehorn
388190681Snwhitehornstatic __inline struct pvo_head *
389190681Snwhitehornvm_page_to_pvoh(vm_page_t m)
390190681Snwhitehorn{
391190681Snwhitehorn
392190681Snwhitehorn	return (&m->md.mdpg_pvoh);
393190681Snwhitehorn}
394190681Snwhitehorn
395190681Snwhitehornstatic __inline void
396190681Snwhitehornmoea64_attr_clear(vm_page_t m, u_int64_t ptebit)
397190681Snwhitehorn{
398190681Snwhitehorn
399190681Snwhitehorn	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
400190681Snwhitehorn	m->md.mdpg_attrs &= ~ptebit;
401190681Snwhitehorn}
402190681Snwhitehorn
403190681Snwhitehornstatic __inline u_int64_t
404190681Snwhitehornmoea64_attr_fetch(vm_page_t m)
405190681Snwhitehorn{
406190681Snwhitehorn
407190681Snwhitehorn	return (m->md.mdpg_attrs);
408190681Snwhitehorn}
409190681Snwhitehorn
410190681Snwhitehornstatic __inline void
411190681Snwhitehornmoea64_attr_save(vm_page_t m, u_int64_t ptebit)
412190681Snwhitehorn{
413190681Snwhitehorn
414190681Snwhitehorn	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
415190681Snwhitehorn	m->md.mdpg_attrs |= ptebit;
416190681Snwhitehorn}
417190681Snwhitehorn
418190681Snwhitehornstatic __inline void
419190681Snwhitehornmoea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
420209975Snwhitehorn    uint64_t pte_lo, int flags)
421190681Snwhitehorn{
422209975Snwhitehorn
423190681Snwhitehorn	ASSERT_TABLE_LOCK();
424190681Snwhitehorn
425190681Snwhitehorn	/*
426190681Snwhitehorn	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
427190681Snwhitehorn	 * set when the real pte is set in memory.
428190681Snwhitehorn	 *
429190681Snwhitehorn	 * Note: Don't set the valid bit for correct operation of tlb update.
430190681Snwhitehorn	 */
431190681Snwhitehorn	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
432190681Snwhitehorn	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
433190681Snwhitehorn
434209975Snwhitehorn	if (flags & PVO_LARGE)
435209975Snwhitehorn		pt->pte_hi |= LPTE_BIG;
436209975Snwhitehorn
437190681Snwhitehorn	pt->pte_lo = pte_lo;
438190681Snwhitehorn}
439190681Snwhitehorn
440190681Snwhitehornstatic __inline uint64_t
441213307Snwhitehornmoea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
442190681Snwhitehorn{
443190681Snwhitehorn	uint64_t pte_lo;
444190681Snwhitehorn	int i;
445190681Snwhitehorn
446213307Snwhitehorn	if (ma != VM_MEMATTR_DEFAULT) {
447213307Snwhitehorn		switch (ma) {
448213307Snwhitehorn		case VM_MEMATTR_UNCACHEABLE:
449213307Snwhitehorn			return (LPTE_I | LPTE_G);
450213307Snwhitehorn		case VM_MEMATTR_WRITE_COMBINING:
451213307Snwhitehorn		case VM_MEMATTR_WRITE_BACK:
452213307Snwhitehorn		case VM_MEMATTR_PREFETCHABLE:
453213307Snwhitehorn			return (LPTE_I);
454213307Snwhitehorn		case VM_MEMATTR_WRITE_THROUGH:
455213307Snwhitehorn			return (LPTE_W | LPTE_M);
456213307Snwhitehorn		}
457213307Snwhitehorn	}
458213307Snwhitehorn
459190681Snwhitehorn	/*
460190681Snwhitehorn	 * Assume the page is cache inhibited and access is guarded unless
461190681Snwhitehorn	 * it's in our available memory array.
462190681Snwhitehorn	 */
463190681Snwhitehorn	pte_lo = LPTE_I | LPTE_G;
464190681Snwhitehorn	for (i = 0; i < pregions_sz; i++) {
465190681Snwhitehorn		if ((pa >= pregions[i].mr_start) &&
466190681Snwhitehorn		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
467190681Snwhitehorn			pte_lo &= ~(LPTE_I | LPTE_G);
468190681Snwhitehorn			pte_lo |= LPTE_M;
469190681Snwhitehorn			break;
470190681Snwhitehorn		}
471190681Snwhitehorn	}
472190681Snwhitehorn
473190681Snwhitehorn	return pte_lo;
474190681Snwhitehorn}
475190681Snwhitehorn
476190681Snwhitehorn/*
477190681Snwhitehorn * Quick sort callout for comparing memory regions.
478190681Snwhitehorn */
479190681Snwhitehornstatic int	om_cmp(const void *a, const void *b);
480190681Snwhitehorn
481190681Snwhitehornstatic int
482190681Snwhitehornom_cmp(const void *a, const void *b)
483190681Snwhitehorn{
484190681Snwhitehorn	const struct	ofw_map *mapa;
485190681Snwhitehorn	const struct	ofw_map *mapb;
486190681Snwhitehorn
487190681Snwhitehorn	mapa = a;
488190681Snwhitehorn	mapb = b;
489190681Snwhitehorn	if (mapa->om_pa_hi < mapb->om_pa_hi)
490190681Snwhitehorn		return (-1);
491190681Snwhitehorn	else if (mapa->om_pa_hi > mapb->om_pa_hi)
492190681Snwhitehorn		return (1);
493190681Snwhitehorn	else if (mapa->om_pa_lo < mapb->om_pa_lo)
494190681Snwhitehorn		return (-1);
495190681Snwhitehorn	else if (mapa->om_pa_lo > mapb->om_pa_lo)
496190681Snwhitehorn		return (1);
497190681Snwhitehorn	else
498190681Snwhitehorn		return (0);
499190681Snwhitehorn}
500190681Snwhitehorn
501190681Snwhitehornstatic void
502199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
503199226Snwhitehorn{
504199226Snwhitehorn	struct ofw_map	translations[sz/sizeof(struct ofw_map)];
505199226Snwhitehorn	register_t	msr;
506199226Snwhitehorn	vm_offset_t	off;
507204128Snwhitehorn	vm_paddr_t	pa_base;
508216563Snwhitehorn	int		i;
509199226Snwhitehorn
510199226Snwhitehorn	bzero(translations, sz);
511199226Snwhitehorn	if (OF_getprop(mmu, "translations", translations, sz) == -1)
512199226Snwhitehorn		panic("moea64_bootstrap: can't get ofw translations");
513199226Snwhitehorn
514199226Snwhitehorn	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
515199226Snwhitehorn	sz /= sizeof(*translations);
516199226Snwhitehorn	qsort(translations, sz, sizeof (*translations), om_cmp);
517199226Snwhitehorn
518216563Snwhitehorn	for (i = 0; i < sz; i++) {
519199226Snwhitehorn		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
520199226Snwhitehorn		    (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
521199226Snwhitehorn		    translations[i].om_len);
522199226Snwhitehorn
523199226Snwhitehorn		if (translations[i].om_pa_lo % PAGE_SIZE)
524199226Snwhitehorn			panic("OFW translation not page-aligned!");
525199226Snwhitehorn
526209975Snwhitehorn		pa_base = translations[i].om_pa_lo;
527209975Snwhitehorn
528209975Snwhitehorn	      #ifdef __powerpc64__
529209975Snwhitehorn		pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
530209975Snwhitehorn	      #else
531199226Snwhitehorn		if (translations[i].om_pa_hi)
532199226Snwhitehorn			panic("OFW translations above 32-bit boundary!");
533209975Snwhitehorn	      #endif
534199226Snwhitehorn
535199226Snwhitehorn		/* Now enter the pages for this mapping */
536199226Snwhitehorn
537199226Snwhitehorn		DISABLE_TRANS(msr);
538199226Snwhitehorn		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
539209975Snwhitehorn			if (moea64_pvo_find_va(kernel_pmap,
540209975Snwhitehorn			    translations[i].om_va + off) != NULL)
541209975Snwhitehorn				continue;
542209975Snwhitehorn
543204128Snwhitehorn			moea64_kenter(mmup, translations[i].om_va + off,
544204128Snwhitehorn			    pa_base + off);
545199226Snwhitehorn		}
546199226Snwhitehorn		ENABLE_TRANS(msr);
547199226Snwhitehorn	}
548199226Snwhitehorn}
549199226Snwhitehorn
550209975Snwhitehorn#ifdef __powerpc64__
551199226Snwhitehornstatic void
552209975Snwhitehornmoea64_probe_large_page(void)
553190681Snwhitehorn{
554209975Snwhitehorn	uint16_t pvr = mfpvr() >> 16;
555209975Snwhitehorn
556209975Snwhitehorn	switch (pvr) {
557209975Snwhitehorn	case IBM970:
558209975Snwhitehorn	case IBM970FX:
559209975Snwhitehorn	case IBM970MP:
560209975Snwhitehorn		powerpc_sync(); isync();
561209975Snwhitehorn		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
562209975Snwhitehorn		powerpc_sync(); isync();
563209975Snwhitehorn
564209975Snwhitehorn		/* FALLTHROUGH */
565209975Snwhitehorn	case IBMCELLBE:
566209975Snwhitehorn		moea64_large_page_size = 0x1000000; /* 16 MB */
567209975Snwhitehorn		moea64_large_page_shift = 24;
568209975Snwhitehorn		break;
569209975Snwhitehorn	default:
570209975Snwhitehorn		moea64_large_page_size = 0;
571209975Snwhitehorn	}
572209975Snwhitehorn
573209975Snwhitehorn	moea64_large_page_mask = moea64_large_page_size - 1;
574209975Snwhitehorn}
575209975Snwhitehorn
576209975Snwhitehornstatic void
577209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large)
578209975Snwhitehorn{
579209975Snwhitehorn	struct slb *cache;
580209975Snwhitehorn	struct slb entry;
581209975Snwhitehorn	uint64_t esid, slbe;
582209975Snwhitehorn	uint64_t i;
583209975Snwhitehorn
584209975Snwhitehorn	cache = PCPU_GET(slb);
585209975Snwhitehorn	esid = va >> ADDR_SR_SHFT;
586209975Snwhitehorn	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
587209975Snwhitehorn
588209975Snwhitehorn	for (i = 0; i < 64; i++) {
589209975Snwhitehorn		if (cache[i].slbe == (slbe | i))
590209975Snwhitehorn			return;
591209975Snwhitehorn	}
592209975Snwhitehorn
593209975Snwhitehorn	entry.slbe = slbe;
594210704Snwhitehorn	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
595209975Snwhitehorn	if (large)
596209975Snwhitehorn		entry.slbv |= SLBV_L;
597209975Snwhitehorn
598212722Snwhitehorn	slb_insert_kernel(entry.slbe, entry.slbv);
599209975Snwhitehorn}
600209975Snwhitehorn#endif
601209975Snwhitehorn
602209975Snwhitehornstatic void
603209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
604209975Snwhitehorn    vm_offset_t kernelend)
605209975Snwhitehorn{
606209975Snwhitehorn	register_t msr;
607209975Snwhitehorn	vm_paddr_t pa;
608209975Snwhitehorn	vm_offset_t size, off;
609209975Snwhitehorn	uint64_t pte_lo;
610209975Snwhitehorn	int i;
611209975Snwhitehorn
612209975Snwhitehorn	if (moea64_large_page_size == 0)
613209975Snwhitehorn		hw_direct_map = 0;
614209975Snwhitehorn
615209975Snwhitehorn	DISABLE_TRANS(msr);
616209975Snwhitehorn	if (hw_direct_map) {
617209975Snwhitehorn		PMAP_LOCK(kernel_pmap);
618209975Snwhitehorn		for (i = 0; i < pregions_sz; i++) {
619209975Snwhitehorn		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
620209975Snwhitehorn		     pregions[i].mr_size; pa += moea64_large_page_size) {
621209975Snwhitehorn			pte_lo = LPTE_M;
622209975Snwhitehorn
623209975Snwhitehorn			/*
624209975Snwhitehorn			 * Set memory access as guarded if prefetch within
625209975Snwhitehorn			 * the page could exit the available physmem area.
626209975Snwhitehorn			 */
627209975Snwhitehorn			if (pa & moea64_large_page_mask) {
628209975Snwhitehorn				pa &= moea64_large_page_mask;
629209975Snwhitehorn				pte_lo |= LPTE_G;
630209975Snwhitehorn			}
631209975Snwhitehorn			if (pa + moea64_large_page_size >
632209975Snwhitehorn			    pregions[i].mr_start + pregions[i].mr_size)
633209975Snwhitehorn				pte_lo |= LPTE_G;
634209975Snwhitehorn
635216174Snwhitehorn			moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone,
636209975Snwhitehorn				    &moea64_pvo_kunmanaged, pa, pa,
637217341Snwhitehorn				    pte_lo, PVO_WIRED | PVO_LARGE);
638209975Snwhitehorn		  }
639209975Snwhitehorn		}
640209975Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
641209975Snwhitehorn	} else {
642209975Snwhitehorn		size = sizeof(struct pvo_head) * moea64_pteg_count;
643209975Snwhitehorn		off = (vm_offset_t)(moea64_pvo_table);
644209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
645209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
646209975Snwhitehorn		size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
647209975Snwhitehorn		off = (vm_offset_t)(moea64_bpvo_pool);
648209975Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
649209975Snwhitehorn		moea64_kenter(mmup, pa, pa);
650209975Snwhitehorn
651209975Snwhitehorn		/*
652209975Snwhitehorn		 * Map certain important things, like ourselves.
653209975Snwhitehorn		 *
654209975Snwhitehorn		 * NOTE: We do not map the exception vector space. That code is
655209975Snwhitehorn		 * used only in real mode, and leaving it unmapped allows us to
656209975Snwhitehorn		 * catch NULL pointer deferences, instead of making NULL a valid
657209975Snwhitehorn		 * address.
658209975Snwhitehorn		 */
659209975Snwhitehorn
660209975Snwhitehorn		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
661209975Snwhitehorn		    pa += PAGE_SIZE)
662209975Snwhitehorn			moea64_kenter(mmup, pa, pa);
663209975Snwhitehorn	}
664209975Snwhitehorn	ENABLE_TRANS(msr);
665209975Snwhitehorn}
666209975Snwhitehorn
667216174Snwhitehornvoid
668216174Snwhitehornmoea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
669209975Snwhitehorn{
670190681Snwhitehorn	int		i, j;
671216174Snwhitehorn	vm_size_t	physsz, hwphyssz;
672190681Snwhitehorn
673209975Snwhitehorn#ifndef __powerpc64__
674190681Snwhitehorn	/* We don't have a direct map since there is no BAT */
675190681Snwhitehorn	hw_direct_map = 0;
676190681Snwhitehorn
677190681Snwhitehorn	/* Make sure battable is zero, since we have no BAT */
678190681Snwhitehorn	for (i = 0; i < 16; i++) {
679190681Snwhitehorn		battable[i].batu = 0;
680190681Snwhitehorn		battable[i].batl = 0;
681190681Snwhitehorn	}
682209975Snwhitehorn#else
683209975Snwhitehorn	moea64_probe_large_page();
684190681Snwhitehorn
685209975Snwhitehorn	/* Use a direct map if we have large page support */
686209975Snwhitehorn	if (moea64_large_page_size > 0)
687209975Snwhitehorn		hw_direct_map = 1;
688209975Snwhitehorn	else
689209975Snwhitehorn		hw_direct_map = 0;
690209975Snwhitehorn#endif
691209975Snwhitehorn
692190681Snwhitehorn	/* Get physical memory regions from firmware */
693190681Snwhitehorn	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
694190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
695190681Snwhitehorn
696190681Snwhitehorn	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
697190681Snwhitehorn		panic("moea64_bootstrap: phys_avail too small");
698222614Snwhitehorn
699190681Snwhitehorn	phys_avail_count = 0;
700190681Snwhitehorn	physsz = 0;
701190681Snwhitehorn	hwphyssz = 0;
702190681Snwhitehorn	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
703190681Snwhitehorn	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
704190681Snwhitehorn		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
705190681Snwhitehorn		    regions[i].mr_start + regions[i].mr_size,
706190681Snwhitehorn		    regions[i].mr_size);
707190681Snwhitehorn		if (hwphyssz != 0 &&
708190681Snwhitehorn		    (physsz + regions[i].mr_size) >= hwphyssz) {
709190681Snwhitehorn			if (physsz < hwphyssz) {
710190681Snwhitehorn				phys_avail[j] = regions[i].mr_start;
711190681Snwhitehorn				phys_avail[j + 1] = regions[i].mr_start +
712190681Snwhitehorn				    hwphyssz - physsz;
713190681Snwhitehorn				physsz = hwphyssz;
714190681Snwhitehorn				phys_avail_count++;
715190681Snwhitehorn			}
716190681Snwhitehorn			break;
717190681Snwhitehorn		}
718190681Snwhitehorn		phys_avail[j] = regions[i].mr_start;
719190681Snwhitehorn		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
720190681Snwhitehorn		phys_avail_count++;
721190681Snwhitehorn		physsz += regions[i].mr_size;
722190681Snwhitehorn	}
723209975Snwhitehorn
724209975Snwhitehorn	/* Check for overlap with the kernel and exception vectors */
725209975Snwhitehorn	for (j = 0; j < 2*phys_avail_count; j+=2) {
726209975Snwhitehorn		if (phys_avail[j] < EXC_LAST)
727209975Snwhitehorn			phys_avail[j] += EXC_LAST;
728209975Snwhitehorn
729209975Snwhitehorn		if (kernelstart >= phys_avail[j] &&
730209975Snwhitehorn		    kernelstart < phys_avail[j+1]) {
731209975Snwhitehorn			if (kernelend < phys_avail[j+1]) {
732209975Snwhitehorn				phys_avail[2*phys_avail_count] =
733209975Snwhitehorn				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
734209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
735209975Snwhitehorn				    phys_avail[j+1];
736209975Snwhitehorn				phys_avail_count++;
737209975Snwhitehorn			}
738209975Snwhitehorn
739209975Snwhitehorn			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
740209975Snwhitehorn		}
741209975Snwhitehorn
742209975Snwhitehorn		if (kernelend >= phys_avail[j] &&
743209975Snwhitehorn		    kernelend < phys_avail[j+1]) {
744209975Snwhitehorn			if (kernelstart > phys_avail[j]) {
745209975Snwhitehorn				phys_avail[2*phys_avail_count] = phys_avail[j];
746209975Snwhitehorn				phys_avail[2*phys_avail_count + 1] =
747209975Snwhitehorn				    kernelstart & ~PAGE_MASK;
748209975Snwhitehorn				phys_avail_count++;
749209975Snwhitehorn			}
750209975Snwhitehorn
751209975Snwhitehorn			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
752209975Snwhitehorn		}
753209975Snwhitehorn	}
754209975Snwhitehorn
755190681Snwhitehorn	physmem = btoc(physsz);
756190681Snwhitehorn
757190681Snwhitehorn#ifdef PTEGCOUNT
758190681Snwhitehorn	moea64_pteg_count = PTEGCOUNT;
759190681Snwhitehorn#else
760190681Snwhitehorn	moea64_pteg_count = 0x1000;
761190681Snwhitehorn
762190681Snwhitehorn	while (moea64_pteg_count < physmem)
763190681Snwhitehorn		moea64_pteg_count <<= 1;
764209975Snwhitehorn
765209975Snwhitehorn	moea64_pteg_count >>= 1;
766190681Snwhitehorn#endif /* PTEGCOUNT */
767216174Snwhitehorn}
768190681Snwhitehorn
769216174Snwhitehornvoid
770216174Snwhitehornmoea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
771216174Snwhitehorn{
772216174Snwhitehorn	vm_size_t	size;
773216174Snwhitehorn	register_t	msr;
774216174Snwhitehorn	int		i;
775190681Snwhitehorn
776190681Snwhitehorn	/*
777216174Snwhitehorn	 * Set PTEG mask
778190681Snwhitehorn	 */
779190681Snwhitehorn	moea64_pteg_mask = moea64_pteg_count - 1;
780190681Snwhitehorn
781190681Snwhitehorn	/*
782190681Snwhitehorn	 * Allocate pv/overflow lists.
783190681Snwhitehorn	 */
784190681Snwhitehorn	size = sizeof(struct pvo_head) * moea64_pteg_count;
785190681Snwhitehorn
786190681Snwhitehorn	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
787190681Snwhitehorn	    PAGE_SIZE);
788190681Snwhitehorn	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
789190681Snwhitehorn
790190681Snwhitehorn	DISABLE_TRANS(msr);
791190681Snwhitehorn	for (i = 0; i < moea64_pteg_count; i++)
792190681Snwhitehorn		LIST_INIT(&moea64_pvo_table[i]);
793190681Snwhitehorn	ENABLE_TRANS(msr);
794190681Snwhitehorn
795190681Snwhitehorn	/*
796190681Snwhitehorn	 * Initialize the lock that synchronizes access to the pteg and pvo
797190681Snwhitehorn	 * tables.
798190681Snwhitehorn	 */
799190681Snwhitehorn	mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
800190681Snwhitehorn	    MTX_RECURSE);
801211967Snwhitehorn	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
802190681Snwhitehorn
803190681Snwhitehorn	/*
804190681Snwhitehorn	 * Initialise the unmanaged pvo pool.
805190681Snwhitehorn	 */
806190681Snwhitehorn	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
807190681Snwhitehorn		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
808190681Snwhitehorn	moea64_bpvo_pool_index = 0;
809190681Snwhitehorn
810190681Snwhitehorn	/*
811190681Snwhitehorn	 * Make sure kernel vsid is allocated as well as VSID 0.
812190681Snwhitehorn	 */
813209975Snwhitehorn	#ifndef __powerpc64__
814209975Snwhitehorn	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
815190681Snwhitehorn		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
816190681Snwhitehorn	moea64_vsid_bitmap[0] |= 1;
817209975Snwhitehorn	#endif
818190681Snwhitehorn
819190681Snwhitehorn	/*
820190681Snwhitehorn	 * Initialize the kernel pmap (which is statically allocated).
821190681Snwhitehorn	 */
822209975Snwhitehorn	#ifdef __powerpc64__
823209975Snwhitehorn	for (i = 0; i < 64; i++) {
824209975Snwhitehorn		pcpup->pc_slb[i].slbv = 0;
825209975Snwhitehorn		pcpup->pc_slb[i].slbe = 0;
826209975Snwhitehorn	}
827209975Snwhitehorn	#else
828190681Snwhitehorn	for (i = 0; i < 16; i++)
829190681Snwhitehorn		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
830209975Snwhitehorn	#endif
831190681Snwhitehorn
832190681Snwhitehorn	kernel_pmap->pmap_phys = kernel_pmap;
833222813Sattilio	CPU_FILL(&kernel_pmap->pm_active);
834228412Snwhitehorn	LIST_INIT(&kernel_pmap->pmap_pvo);
835190681Snwhitehorn
836190681Snwhitehorn	PMAP_LOCK_INIT(kernel_pmap);
837190681Snwhitehorn
838190681Snwhitehorn	/*
839190681Snwhitehorn	 * Now map in all the other buffers we allocated earlier
840190681Snwhitehorn	 */
841190681Snwhitehorn
842209975Snwhitehorn	moea64_setup_direct_map(mmup, kernelstart, kernelend);
843216174Snwhitehorn}
844190681Snwhitehorn
845216174Snwhitehornvoid
846216174Snwhitehornmoea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
847216174Snwhitehorn{
848216174Snwhitehorn	ihandle_t	mmui;
849216174Snwhitehorn	phandle_t	chosen;
850216174Snwhitehorn	phandle_t	mmu;
851216174Snwhitehorn	size_t		sz;
852216174Snwhitehorn	int		i;
853216174Snwhitehorn	vm_offset_t	pa, va;
854216174Snwhitehorn	void		*dpcpu;
855216174Snwhitehorn
856190681Snwhitehorn	/*
857209975Snwhitehorn	 * Set up the Open Firmware pmap and add its mappings if not in real
858209975Snwhitehorn	 * mode.
859190681Snwhitehorn	 */
860190681Snwhitehorn
861215067Snwhitehorn	chosen = OF_finddevice("/chosen");
862215067Snwhitehorn	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
863215158Snwhitehorn	    mmu = OF_instance_to_package(mmui);
864215158Snwhitehorn	    if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1)
865215158Snwhitehorn		sz = 0;
866199226Snwhitehorn	    if (sz > 6144 /* tmpstksz - 2 KB headroom */)
867199226Snwhitehorn		panic("moea64_bootstrap: too many ofw translations");
868190681Snwhitehorn
869215158Snwhitehorn	    if (sz > 0)
870215158Snwhitehorn		moea64_add_ofw_mappings(mmup, mmu, sz);
871190681Snwhitehorn	}
872190681Snwhitehorn
873190681Snwhitehorn	/*
874190681Snwhitehorn	 * Calculate the last available physical address.
875190681Snwhitehorn	 */
876190681Snwhitehorn	for (i = 0; phys_avail[i + 2] != 0; i += 2)
877190681Snwhitehorn		;
878190681Snwhitehorn	Maxmem = powerpc_btop(phys_avail[i + 1]);
879190681Snwhitehorn
880190681Snwhitehorn	/*
881190681Snwhitehorn	 * Initialize MMU and remap early physical mappings
882190681Snwhitehorn	 */
883216174Snwhitehorn	MMU_CPU_BOOTSTRAP(mmup,0);
884222614Snwhitehorn	mtmsr(mfmsr() | PSL_DR | PSL_IR);
885190681Snwhitehorn	pmap_bootstrapped++;
886190681Snwhitehorn	bs_remap_earlyboot();
887190681Snwhitehorn
888190681Snwhitehorn	/*
889190681Snwhitehorn	 * Set the start and end of kva.
890190681Snwhitehorn	 */
891190681Snwhitehorn	virtual_avail = VM_MIN_KERNEL_ADDRESS;
892204128Snwhitehorn	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
893190681Snwhitehorn
894190681Snwhitehorn	/*
895209975Snwhitehorn	 * Map the entire KVA range into the SLB. We must not fault there.
896209975Snwhitehorn	 */
897209975Snwhitehorn	#ifdef __powerpc64__
898209975Snwhitehorn	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
899209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, 0);
900209975Snwhitehorn	#endif
901209975Snwhitehorn
902209975Snwhitehorn	/*
903204128Snwhitehorn	 * Figure out how far we can extend virtual_end into segment 16
904204128Snwhitehorn	 * without running into existing mappings. Segment 16 is guaranteed
905204128Snwhitehorn	 * to contain neither RAM nor devices (at least on Apple hardware),
906204128Snwhitehorn	 * but will generally contain some OFW mappings we should not
907204128Snwhitehorn	 * step on.
908190681Snwhitehorn	 */
909190681Snwhitehorn
910209975Snwhitehorn	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
911204128Snwhitehorn	PMAP_LOCK(kernel_pmap);
912209975Snwhitehorn	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
913209975Snwhitehorn	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
914204128Snwhitehorn		virtual_end += PAGE_SIZE;
915204128Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
916209975Snwhitehorn	#endif
917190681Snwhitehorn
918190681Snwhitehorn	/*
919190681Snwhitehorn	 * Allocate a kernel stack with a guard page for thread0 and map it
920190681Snwhitehorn	 * into the kernel page map.
921190681Snwhitehorn	 */
922190681Snwhitehorn	pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
923190681Snwhitehorn	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
924190681Snwhitehorn	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
925220642Sandreast	CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va);
926190681Snwhitehorn	thread0.td_kstack = va;
927190681Snwhitehorn	thread0.td_kstack_pages = KSTACK_PAGES;
928190681Snwhitehorn	for (i = 0; i < KSTACK_PAGES; i++) {
929201758Smbr		moea64_kenter(mmup, va, pa);
930190681Snwhitehorn		pa += PAGE_SIZE;
931190681Snwhitehorn		va += PAGE_SIZE;
932190681Snwhitehorn	}
933190681Snwhitehorn
934190681Snwhitehorn	/*
935190681Snwhitehorn	 * Allocate virtual address space for the message buffer.
936190681Snwhitehorn	 */
937217688Spluknet	pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE);
938204297Snwhitehorn	msgbufp = (struct msgbuf *)virtual_avail;
939204297Snwhitehorn	va = virtual_avail;
940217688Spluknet	virtual_avail += round_page(msgbufsize);
941204297Snwhitehorn	while (va < virtual_avail) {
942204297Snwhitehorn		moea64_kenter(mmup, va, pa);
943190681Snwhitehorn		pa += PAGE_SIZE;
944204297Snwhitehorn		va += PAGE_SIZE;
945190681Snwhitehorn	}
946194784Sjeff
947194784Sjeff	/*
948194784Sjeff	 * Allocate virtual address space for the dynamic percpu area.
949194784Sjeff	 */
950194784Sjeff	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
951204297Snwhitehorn	dpcpu = (void *)virtual_avail;
952209975Snwhitehorn	va = virtual_avail;
953204297Snwhitehorn	virtual_avail += DPCPU_SIZE;
954204297Snwhitehorn	while (va < virtual_avail) {
955204297Snwhitehorn		moea64_kenter(mmup, va, pa);
956194784Sjeff		pa += PAGE_SIZE;
957204297Snwhitehorn		va += PAGE_SIZE;
958194784Sjeff	}
959194784Sjeff	dpcpu_init(dpcpu, 0);
960216174Snwhitehorn
961216174Snwhitehorn	/*
962216174Snwhitehorn	 * Allocate some things for page zeroing. We put this directly
963216174Snwhitehorn	 * in the page table, marked with LPTE_LOCKED, to avoid any
964216174Snwhitehorn	 * of the PVO book-keeping or other parts of the VM system
965216174Snwhitehorn	 * from even knowing that this hack exists.
966216174Snwhitehorn	 */
967216174Snwhitehorn
968216174Snwhitehorn	if (!hw_direct_map) {
969216174Snwhitehorn		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
970216174Snwhitehorn		    MTX_DEF);
971216174Snwhitehorn		for (i = 0; i < 2; i++) {
972216174Snwhitehorn			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
973216174Snwhitehorn			virtual_end -= PAGE_SIZE;
974216174Snwhitehorn
975216174Snwhitehorn			moea64_kenter(mmup, moea64_scratchpage_va[i], 0);
976216174Snwhitehorn
977216174Snwhitehorn			moea64_scratchpage_pvo[i] = moea64_pvo_find_va(
978216174Snwhitehorn			    kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]);
979216174Snwhitehorn			LOCK_TABLE();
980216174Snwhitehorn			moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE(
981216174Snwhitehorn			    mmup, moea64_scratchpage_pvo[i]);
982216174Snwhitehorn			moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi
983216174Snwhitehorn			    |= LPTE_LOCKED;
984216174Snwhitehorn			MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i],
985216174Snwhitehorn			    &moea64_scratchpage_pvo[i]->pvo_pte.lpte,
986216174Snwhitehorn			    moea64_scratchpage_pvo[i]->pvo_vpn);
987216174Snwhitehorn			UNLOCK_TABLE();
988216174Snwhitehorn		}
989216174Snwhitehorn	}
990190681Snwhitehorn}
991190681Snwhitehorn
992190681Snwhitehorn/*
993209975Snwhitehorn * Activate a user pmap.  The pmap must be activated before its address
994190681Snwhitehorn * space can be accessed in any way.
995190681Snwhitehorn */
996190681Snwhitehornvoid
997190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td)
998190681Snwhitehorn{
999209975Snwhitehorn	pmap_t	pm;
1000190681Snwhitehorn
1001190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1002223758Sattilio	CPU_SET(PCPU_GET(cpuid), &pm->pm_active);
1003190681Snwhitehorn
1004209975Snwhitehorn	#ifdef __powerpc64__
1005209975Snwhitehorn	PCPU_SET(userslb, pm->pm_slb);
1006209975Snwhitehorn	#else
1007209975Snwhitehorn	PCPU_SET(curpmap, pm->pmap_phys);
1008209975Snwhitehorn	#endif
1009190681Snwhitehorn}
1010190681Snwhitehorn
1011190681Snwhitehornvoid
1012190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td)
1013190681Snwhitehorn{
1014190681Snwhitehorn	pmap_t	pm;
1015190681Snwhitehorn
1016190681Snwhitehorn	pm = &td->td_proc->p_vmspace->vm_pmap;
1017223758Sattilio	CPU_CLR(PCPU_GET(cpuid), &pm->pm_active);
1018209975Snwhitehorn	#ifdef __powerpc64__
1019209975Snwhitehorn	PCPU_SET(userslb, NULL);
1020209975Snwhitehorn	#else
1021190681Snwhitehorn	PCPU_SET(curpmap, NULL);
1022209975Snwhitehorn	#endif
1023190681Snwhitehorn}
1024190681Snwhitehorn
1025190681Snwhitehornvoid
1026190681Snwhitehornmoea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1027190681Snwhitehorn{
1028190681Snwhitehorn	struct	pvo_entry *pvo;
1029216174Snwhitehorn	uintptr_t pt;
1030209975Snwhitehorn	uint64_t vsid;
1031209975Snwhitehorn	int	i, ptegidx;
1032190681Snwhitehorn
1033190681Snwhitehorn	PMAP_LOCK(pm);
1034209975Snwhitehorn	pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1035190681Snwhitehorn
1036190681Snwhitehorn	if (pvo != NULL) {
1037209975Snwhitehorn		LOCK_TABLE();
1038216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1039209975Snwhitehorn
1040190681Snwhitehorn		if (wired) {
1041190681Snwhitehorn			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1042190681Snwhitehorn				pm->pm_stats.wired_count++;
1043190681Snwhitehorn			pvo->pvo_vaddr |= PVO_WIRED;
1044209975Snwhitehorn			pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1045190681Snwhitehorn		} else {
1046190681Snwhitehorn			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1047190681Snwhitehorn				pm->pm_stats.wired_count--;
1048190681Snwhitehorn			pvo->pvo_vaddr &= ~PVO_WIRED;
1049209975Snwhitehorn			pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1050190681Snwhitehorn		}
1051209975Snwhitehorn
1052216174Snwhitehorn		if (pt != -1) {
1053209975Snwhitehorn			/* Update wiring flag in page table. */
1054216174Snwhitehorn			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1055209975Snwhitehorn			    pvo->pvo_vpn);
1056209975Snwhitehorn		} else if (wired) {
1057209975Snwhitehorn			/*
1058209975Snwhitehorn			 * If we are wiring the page, and it wasn't in the
1059209975Snwhitehorn			 * page table before, add it.
1060209975Snwhitehorn			 */
1061209975Snwhitehorn			vsid = PVO_VSID(pvo);
1062209975Snwhitehorn			ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1063209975Snwhitehorn			    pvo->pvo_vaddr & PVO_LARGE);
1064209975Snwhitehorn
1065216174Snwhitehorn			i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
1066216174Snwhitehorn
1067209975Snwhitehorn			if (i >= 0) {
1068209975Snwhitehorn				PVO_PTEGIDX_CLR(pvo);
1069209975Snwhitehorn				PVO_PTEGIDX_SET(pvo, i);
1070209975Snwhitehorn			}
1071209975Snwhitehorn		}
1072209975Snwhitehorn
1073209975Snwhitehorn		UNLOCK_TABLE();
1074190681Snwhitehorn	}
1075190681Snwhitehorn	PMAP_UNLOCK(pm);
1076190681Snwhitehorn}
1077190681Snwhitehorn
1078190681Snwhitehorn/*
1079190681Snwhitehorn * This goes through and sets the physical address of our
1080190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because
1081190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by
1082190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here
1083190681Snwhitehorn */
1084190681Snwhitehorn
1085190681Snwhitehornstatic __inline
1086216174Snwhitehornvoid moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1087204694Snwhitehorn
1088209975Snwhitehorn	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1089204268Snwhitehorn	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1090204268Snwhitehorn
1091216174Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &=
1092204694Snwhitehorn	    ~(LPTE_WIMG | LPTE_RPGN);
1093216174Snwhitehorn	moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |=
1094213307Snwhitehorn	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1095216174Snwhitehorn	MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which],
1096216174Snwhitehorn	    &moea64_scratchpage_pvo[which]->pvo_pte.lpte,
1097216174Snwhitehorn	    moea64_scratchpage_pvo[which]->pvo_vpn);
1098216383Snwhitehorn	isync();
1099190681Snwhitehorn}
1100190681Snwhitehorn
1101190681Snwhitehornvoid
1102190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1103190681Snwhitehorn{
1104190681Snwhitehorn	vm_offset_t	dst;
1105190681Snwhitehorn	vm_offset_t	src;
1106190681Snwhitehorn
1107190681Snwhitehorn	dst = VM_PAGE_TO_PHYS(mdst);
1108190681Snwhitehorn	src = VM_PAGE_TO_PHYS(msrc);
1109190681Snwhitehorn
1110209975Snwhitehorn	if (hw_direct_map) {
1111209975Snwhitehorn		kcopy((void *)src, (void *)dst, PAGE_SIZE);
1112209975Snwhitehorn	} else {
1113209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1114190681Snwhitehorn
1115216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, src);
1116216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, dst);
1117190681Snwhitehorn
1118209975Snwhitehorn		kcopy((void *)moea64_scratchpage_va[0],
1119209975Snwhitehorn		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1120190681Snwhitehorn
1121209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1122209975Snwhitehorn	}
1123190681Snwhitehorn}
1124190681Snwhitehorn
1125190681Snwhitehornvoid
1126190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1127190681Snwhitehorn{
1128190681Snwhitehorn	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1129190681Snwhitehorn
1130190681Snwhitehorn	if (size + off > PAGE_SIZE)
1131190681Snwhitehorn		panic("moea64_zero_page: size + off > PAGE_SIZE");
1132190681Snwhitehorn
1133209975Snwhitehorn	if (hw_direct_map) {
1134209975Snwhitehorn		bzero((caddr_t)pa + off, size);
1135209975Snwhitehorn	} else {
1136209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1137216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1138209975Snwhitehorn		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1139209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1140209975Snwhitehorn	}
1141190681Snwhitehorn}
1142190681Snwhitehorn
1143204269Snwhitehorn/*
1144204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it
1145204269Snwhitehorn */
1146190681Snwhitehornvoid
1147204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m)
1148204269Snwhitehorn{
1149204269Snwhitehorn	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1150209975Snwhitehorn	vm_offset_t va, off;
1151204269Snwhitehorn
1152209975Snwhitehorn	if (!hw_direct_map) {
1153209975Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1154204269Snwhitehorn
1155216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 0, pa);
1156209975Snwhitehorn		va = moea64_scratchpage_va[0];
1157209975Snwhitehorn	} else {
1158209975Snwhitehorn		va = pa;
1159209975Snwhitehorn	}
1160209975Snwhitehorn
1161204269Snwhitehorn	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1162209975Snwhitehorn		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1163209975Snwhitehorn
1164209975Snwhitehorn	if (!hw_direct_map)
1165209975Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1166204269Snwhitehorn}
1167204269Snwhitehorn
1168204269Snwhitehornvoid
1169190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1170190681Snwhitehorn{
1171190681Snwhitehorn
1172190681Snwhitehorn	moea64_zero_page(mmu, m);
1173190681Snwhitehorn}
1174190681Snwhitehorn
1175190681Snwhitehorn/*
1176190681Snwhitehorn * Map the given physical page at the specified virtual address in the
1177190681Snwhitehorn * target pmap with the protection requested.  If specified the page
1178190681Snwhitehorn * will be wired down.
1179190681Snwhitehorn */
1180190681Snwhitehornvoid
1181190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1182190681Snwhitehorn    vm_prot_t prot, boolean_t wired)
1183190681Snwhitehorn{
1184190681Snwhitehorn
1185190681Snwhitehorn	vm_page_lock_queues();
1186190681Snwhitehorn	PMAP_LOCK(pmap);
1187216174Snwhitehorn	moea64_enter_locked(mmu, pmap, va, m, prot, wired);
1188190681Snwhitehorn	vm_page_unlock_queues();
1189190681Snwhitehorn	PMAP_UNLOCK(pmap);
1190190681Snwhitehorn}
1191190681Snwhitehorn
1192190681Snwhitehorn/*
1193190681Snwhitehorn * Map the given physical page at the specified virtual address in the
1194190681Snwhitehorn * target pmap with the protection requested.  If specified the page
1195190681Snwhitehorn * will be wired down.
1196190681Snwhitehorn *
1197190681Snwhitehorn * The page queues and pmap must be locked.
1198190681Snwhitehorn */
1199190681Snwhitehorn
1200190681Snwhitehornstatic void
1201216174Snwhitehornmoea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1202216174Snwhitehorn    vm_prot_t prot, boolean_t wired)
1203190681Snwhitehorn{
1204190681Snwhitehorn	struct		pvo_head *pvo_head;
1205190681Snwhitehorn	uma_zone_t	zone;
1206190681Snwhitehorn	vm_page_t	pg;
1207190681Snwhitehorn	uint64_t	pte_lo;
1208190681Snwhitehorn	u_int		pvo_flags;
1209190681Snwhitehorn	int		error;
1210190681Snwhitehorn
1211190681Snwhitehorn	if (!moea64_initialized) {
1212190681Snwhitehorn		pvo_head = &moea64_pvo_kunmanaged;
1213190681Snwhitehorn		pg = NULL;
1214190681Snwhitehorn		zone = moea64_upvo_zone;
1215190681Snwhitehorn		pvo_flags = 0;
1216190681Snwhitehorn	} else {
1217190681Snwhitehorn		pvo_head = vm_page_to_pvoh(m);
1218190681Snwhitehorn		pg = m;
1219190681Snwhitehorn		zone = moea64_mpvo_zone;
1220190681Snwhitehorn		pvo_flags = PVO_MANAGED;
1221190681Snwhitehorn	}
1222190681Snwhitehorn
1223190681Snwhitehorn	if (pmap_bootstrapped)
1224190681Snwhitehorn		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1225190681Snwhitehorn	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1226224746Skib	KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 ||
1227224746Skib	    VM_OBJECT_LOCKED(m->object),
1228208175Salc	    ("moea64_enter_locked: page %p is not busy", m));
1229190681Snwhitehorn
1230190681Snwhitehorn	/* XXX change the pvo head for fake pages */
1231224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1232190681Snwhitehorn		pvo_flags &= ~PVO_MANAGED;
1233190681Snwhitehorn		pvo_head = &moea64_pvo_kunmanaged;
1234190681Snwhitehorn		zone = moea64_upvo_zone;
1235190681Snwhitehorn	}
1236190681Snwhitehorn
1237213307Snwhitehorn	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1238190681Snwhitehorn
1239190681Snwhitehorn	if (prot & VM_PROT_WRITE) {
1240190681Snwhitehorn		pte_lo |= LPTE_BW;
1241208810Salc		if (pmap_bootstrapped &&
1242224746Skib		    (m->oflags & VPO_UNMANAGED) == 0)
1243225418Skib			vm_page_aflag_set(m, PGA_WRITEABLE);
1244190681Snwhitehorn	} else
1245190681Snwhitehorn		pte_lo |= LPTE_BR;
1246190681Snwhitehorn
1247217341Snwhitehorn	if ((prot & VM_PROT_EXECUTE) == 0)
1248217341Snwhitehorn		pte_lo |= LPTE_NOEXEC;
1249190681Snwhitehorn
1250190681Snwhitehorn	if (wired)
1251190681Snwhitehorn		pvo_flags |= PVO_WIRED;
1252190681Snwhitehorn
1253216174Snwhitehorn	error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va,
1254216174Snwhitehorn	    VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags);
1255190681Snwhitehorn
1256190681Snwhitehorn	/*
1257190681Snwhitehorn	 * Flush the page from the instruction cache if this page is
1258190681Snwhitehorn	 * mapped executable and cacheable.
1259190681Snwhitehorn	 */
1260216174Snwhitehorn	if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0)
1261216174Snwhitehorn		moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1262190681Snwhitehorn}
1263190681Snwhitehorn
1264190681Snwhitehornstatic void
1265216174Snwhitehornmoea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1266216174Snwhitehorn    vm_size_t sz)
1267190681Snwhitehorn{
1268204042Snwhitehorn
1269190681Snwhitehorn	/*
1270190681Snwhitehorn	 * This is much trickier than on older systems because
1271190681Snwhitehorn	 * we can't sync the icache on physical addresses directly
1272190681Snwhitehorn	 * without a direct map. Instead we check a couple of cases
1273190681Snwhitehorn	 * where the memory is already mapped in and, failing that,
1274190681Snwhitehorn	 * use the same trick we use for page zeroing to create
1275190681Snwhitehorn	 * a temporary mapping for this physical address.
1276190681Snwhitehorn	 */
1277190681Snwhitehorn
1278190681Snwhitehorn	if (!pmap_bootstrapped) {
1279190681Snwhitehorn		/*
1280190681Snwhitehorn		 * If PMAP is not bootstrapped, we are likely to be
1281190681Snwhitehorn		 * in real mode.
1282190681Snwhitehorn		 */
1283198341Smarcel		__syncicache((void *)pa, sz);
1284190681Snwhitehorn	} else if (pmap == kernel_pmap) {
1285198341Smarcel		__syncicache((void *)va, sz);
1286209975Snwhitehorn	} else if (hw_direct_map) {
1287209975Snwhitehorn		__syncicache((void *)pa, sz);
1288190681Snwhitehorn	} else {
1289190681Snwhitehorn		/* Use the scratch page to set up a temp mapping */
1290190681Snwhitehorn
1291190681Snwhitehorn		mtx_lock(&moea64_scratchpage_mtx);
1292190681Snwhitehorn
1293216174Snwhitehorn		moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF);
1294204042Snwhitehorn		__syncicache((void *)(moea64_scratchpage_va[1] +
1295204042Snwhitehorn		    (va & ADDR_POFF)), sz);
1296190681Snwhitehorn
1297190681Snwhitehorn		mtx_unlock(&moea64_scratchpage_mtx);
1298190681Snwhitehorn	}
1299190681Snwhitehorn}
1300190681Snwhitehorn
1301190681Snwhitehorn/*
1302190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object.
1303190681Snwhitehorn * The sequence begins with the given page m_start.  This page is
1304190681Snwhitehorn * mapped at the given virtual address start.  Each subsequent page is
1305190681Snwhitehorn * mapped at a virtual address that is offset from start by the same
1306190681Snwhitehorn * amount as the page is offset from m_start within the object.  The
1307190681Snwhitehorn * last page in the sequence is the page with the largest offset from
1308190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given
1309190681Snwhitehorn * virtual address end.  Not every virtual page between start and end
1310190681Snwhitehorn * is mapped; only those for which a resident page exists with the
1311190681Snwhitehorn * corresponding offset from m_start are mapped.
1312190681Snwhitehorn */
1313190681Snwhitehornvoid
1314190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1315190681Snwhitehorn    vm_page_t m_start, vm_prot_t prot)
1316190681Snwhitehorn{
1317190681Snwhitehorn	vm_page_t m;
1318190681Snwhitehorn	vm_pindex_t diff, psize;
1319190681Snwhitehorn
1320190681Snwhitehorn	psize = atop(end - start);
1321190681Snwhitehorn	m = m_start;
1322208574Salc	vm_page_lock_queues();
1323190681Snwhitehorn	PMAP_LOCK(pm);
1324190681Snwhitehorn	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1325216174Snwhitehorn		moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot &
1326190681Snwhitehorn		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1327190681Snwhitehorn		m = TAILQ_NEXT(m, listq);
1328190681Snwhitehorn	}
1329208574Salc	vm_page_unlock_queues();
1330190681Snwhitehorn	PMAP_UNLOCK(pm);
1331190681Snwhitehorn}
1332190681Snwhitehorn
1333190681Snwhitehornvoid
1334190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1335190681Snwhitehorn    vm_prot_t prot)
1336190681Snwhitehorn{
1337207796Salc
1338207796Salc	vm_page_lock_queues();
1339190681Snwhitehorn	PMAP_LOCK(pm);
1340216174Snwhitehorn	moea64_enter_locked(mmu, pm, va, m,
1341216174Snwhitehorn	    prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1342207796Salc	vm_page_unlock_queues();
1343190681Snwhitehorn	PMAP_UNLOCK(pm);
1344190681Snwhitehorn}
1345190681Snwhitehorn
1346190681Snwhitehornvm_paddr_t
1347190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1348190681Snwhitehorn{
1349190681Snwhitehorn	struct	pvo_entry *pvo;
1350190681Snwhitehorn	vm_paddr_t pa;
1351190681Snwhitehorn
1352190681Snwhitehorn	PMAP_LOCK(pm);
1353209975Snwhitehorn	pvo = moea64_pvo_find_va(pm, va);
1354190681Snwhitehorn	if (pvo == NULL)
1355190681Snwhitehorn		pa = 0;
1356190681Snwhitehorn	else
1357209975Snwhitehorn		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1358209975Snwhitehorn		    (va - PVO_VADDR(pvo));
1359190681Snwhitehorn	PMAP_UNLOCK(pm);
1360190681Snwhitehorn	return (pa);
1361190681Snwhitehorn}
1362190681Snwhitehorn
1363190681Snwhitehorn/*
1364190681Snwhitehorn * Atomically extract and hold the physical page with the given
1365190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given
1366190681Snwhitehorn * protection.
1367190681Snwhitehorn */
1368190681Snwhitehornvm_page_t
1369190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1370190681Snwhitehorn{
1371190681Snwhitehorn	struct	pvo_entry *pvo;
1372190681Snwhitehorn	vm_page_t m;
1373207410Skmacy        vm_paddr_t pa;
1374190681Snwhitehorn
1375190681Snwhitehorn	m = NULL;
1376207410Skmacy	pa = 0;
1377190681Snwhitehorn	PMAP_LOCK(pmap);
1378207410Skmacyretry:
1379209975Snwhitehorn	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1380190681Snwhitehorn	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1381190681Snwhitehorn	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1382190681Snwhitehorn	     (prot & VM_PROT_WRITE) == 0)) {
1383207410Skmacy		if (vm_page_pa_tryrelock(pmap,
1384207410Skmacy			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1385207410Skmacy			goto retry;
1386190681Snwhitehorn		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1387190681Snwhitehorn		vm_page_hold(m);
1388190681Snwhitehorn	}
1389207410Skmacy	PA_UNLOCK_COND(pa);
1390190681Snwhitehorn	PMAP_UNLOCK(pmap);
1391190681Snwhitehorn	return (m);
1392190681Snwhitehorn}
1393190681Snwhitehorn
1394216174Snwhitehornstatic mmu_t installed_mmu;
1395216174Snwhitehorn
1396190681Snwhitehornstatic void *
1397190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1398190681Snwhitehorn{
1399190681Snwhitehorn	/*
1400190681Snwhitehorn	 * This entire routine is a horrible hack to avoid bothering kmem
1401190681Snwhitehorn	 * for new KVA addresses. Because this can get called from inside
1402190681Snwhitehorn	 * kmem allocation routines, calling kmem for a new address here
1403190681Snwhitehorn	 * can lead to multiply locking non-recursive mutexes.
1404190681Snwhitehorn	 */
1405190681Snwhitehorn	static vm_pindex_t color;
1406190681Snwhitehorn        vm_offset_t va;
1407190681Snwhitehorn
1408190681Snwhitehorn        vm_page_t m;
1409190681Snwhitehorn        int pflags, needed_lock;
1410190681Snwhitehorn
1411190681Snwhitehorn	*flags = UMA_SLAB_PRIV;
1412190681Snwhitehorn	needed_lock = !PMAP_LOCKED(kernel_pmap);
1413190681Snwhitehorn
1414190681Snwhitehorn	if (needed_lock)
1415190681Snwhitehorn		PMAP_LOCK(kernel_pmap);
1416190681Snwhitehorn
1417190681Snwhitehorn        if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1418190681Snwhitehorn                pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1419190681Snwhitehorn        else
1420190681Snwhitehorn                pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1421190681Snwhitehorn        if (wait & M_ZERO)
1422190681Snwhitehorn                pflags |= VM_ALLOC_ZERO;
1423190681Snwhitehorn
1424190681Snwhitehorn        for (;;) {
1425190681Snwhitehorn                m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
1426190681Snwhitehorn                if (m == NULL) {
1427190681Snwhitehorn                        if (wait & M_NOWAIT)
1428190681Snwhitehorn                                return (NULL);
1429190681Snwhitehorn                        VM_WAIT;
1430190681Snwhitehorn                } else
1431190681Snwhitehorn                        break;
1432190681Snwhitehorn        }
1433190681Snwhitehorn
1434204128Snwhitehorn	va = VM_PAGE_TO_PHYS(m);
1435190681Snwhitehorn
1436216174Snwhitehorn	moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone,
1437204128Snwhitehorn	    &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
1438198378Snwhitehorn	    PVO_WIRED | PVO_BOOTSTRAP);
1439190681Snwhitehorn
1440190681Snwhitehorn	if (needed_lock)
1441190681Snwhitehorn		PMAP_UNLOCK(kernel_pmap);
1442198378Snwhitehorn
1443190681Snwhitehorn	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1444190681Snwhitehorn                bzero((void *)va, PAGE_SIZE);
1445190681Snwhitehorn
1446190681Snwhitehorn	return (void *)va;
1447190681Snwhitehorn}
1448190681Snwhitehorn
1449190681Snwhitehornvoid
1450190681Snwhitehornmoea64_init(mmu_t mmu)
1451190681Snwhitehorn{
1452190681Snwhitehorn
1453190681Snwhitehorn	CTR0(KTR_PMAP, "moea64_init");
1454190681Snwhitehorn
1455190681Snwhitehorn	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1456190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1457190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1458190681Snwhitehorn	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1459190681Snwhitehorn	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1460190681Snwhitehorn	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1461190681Snwhitehorn
1462190681Snwhitehorn	if (!hw_direct_map) {
1463216174Snwhitehorn		installed_mmu = mmu;
1464190681Snwhitehorn		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1465190681Snwhitehorn		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1466190681Snwhitehorn	}
1467190681Snwhitehorn
1468190681Snwhitehorn	moea64_initialized = TRUE;
1469190681Snwhitehorn}
1470190681Snwhitehorn
1471190681Snwhitehornboolean_t
1472207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m)
1473207155Salc{
1474207155Salc
1475224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1476208574Salc	    ("moea64_is_referenced: page %p is not managed", m));
1477216174Snwhitehorn	return (moea64_query_bit(mmu, m, PTE_REF));
1478207155Salc}
1479207155Salc
1480207155Salcboolean_t
1481190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m)
1482190681Snwhitehorn{
1483190681Snwhitehorn
1484224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1485208504Salc	    ("moea64_is_modified: page %p is not managed", m));
1486208504Salc
1487208504Salc	/*
1488225418Skib	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be
1489225418Skib	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
1490208504Salc	 * is clear, no PTEs can have LPTE_CHG set.
1491208504Salc	 */
1492208504Salc	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1493208504Salc	if ((m->oflags & VPO_BUSY) == 0 &&
1494225418Skib	    (m->aflags & PGA_WRITEABLE) == 0)
1495190681Snwhitehorn		return (FALSE);
1496216174Snwhitehorn	return (moea64_query_bit(mmu, m, LPTE_CHG));
1497190681Snwhitehorn}
1498190681Snwhitehorn
1499214617Salcboolean_t
1500214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1501214617Salc{
1502214617Salc	struct pvo_entry *pvo;
1503214617Salc	boolean_t rv;
1504214617Salc
1505214617Salc	PMAP_LOCK(pmap);
1506214617Salc	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1507214617Salc	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1508214617Salc	PMAP_UNLOCK(pmap);
1509214617Salc	return (rv);
1510214617Salc}
1511214617Salc
1512190681Snwhitehornvoid
1513190681Snwhitehornmoea64_clear_reference(mmu_t mmu, vm_page_t m)
1514190681Snwhitehorn{
1515190681Snwhitehorn
1516224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1517208504Salc	    ("moea64_clear_reference: page %p is not managed", m));
1518216174Snwhitehorn	moea64_clear_bit(mmu, m, LPTE_REF);
1519190681Snwhitehorn}
1520190681Snwhitehorn
1521190681Snwhitehornvoid
1522190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m)
1523190681Snwhitehorn{
1524190681Snwhitehorn
1525224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1526208504Salc	    ("moea64_clear_modify: page %p is not managed", m));
1527208504Salc	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1528208504Salc	KASSERT((m->oflags & VPO_BUSY) == 0,
1529208504Salc	    ("moea64_clear_modify: page %p is busy", m));
1530208504Salc
1531208504Salc	/*
1532225418Skib	 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG
1533208504Salc	 * set.  If the object containing the page is locked and the page is
1534225418Skib	 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set.
1535208504Salc	 */
1536225418Skib	if ((m->aflags & PGA_WRITEABLE) == 0)
1537190681Snwhitehorn		return;
1538216174Snwhitehorn	moea64_clear_bit(mmu, m, LPTE_CHG);
1539190681Snwhitehorn}
1540190681Snwhitehorn
1541190681Snwhitehorn/*
1542190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings.
1543190681Snwhitehorn */
1544190681Snwhitehornvoid
1545190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m)
1546190681Snwhitehorn{
1547190681Snwhitehorn	struct	pvo_entry *pvo;
1548216174Snwhitehorn	uintptr_t pt;
1549190681Snwhitehorn	pmap_t	pmap;
1550190681Snwhitehorn	uint64_t lo;
1551190681Snwhitehorn
1552224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1553208175Salc	    ("moea64_remove_write: page %p is not managed", m));
1554208175Salc
1555208175Salc	/*
1556225418Skib	 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by
1557225418Skib	 * another thread while the object is locked.  Thus, if PGA_WRITEABLE
1558208175Salc	 * is clear, no page table entries need updating.
1559208175Salc	 */
1560208175Salc	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1561208175Salc	if ((m->oflags & VPO_BUSY) == 0 &&
1562225418Skib	    (m->aflags & PGA_WRITEABLE) == 0)
1563190681Snwhitehorn		return;
1564207796Salc	vm_page_lock_queues();
1565190681Snwhitehorn	lo = moea64_attr_fetch(m);
1566216174Snwhitehorn	powerpc_sync();
1567190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1568190681Snwhitehorn		pmap = pvo->pvo_pmap;
1569190681Snwhitehorn		PMAP_LOCK(pmap);
1570205370Snwhitehorn		LOCK_TABLE();
1571190681Snwhitehorn		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1572216174Snwhitehorn			pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1573190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1574190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1575216174Snwhitehorn			if (pt != -1) {
1576216174Snwhitehorn				MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
1577190681Snwhitehorn				lo |= pvo->pvo_pte.lpte.pte_lo;
1578190681Snwhitehorn				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1579216174Snwhitehorn				MOEA64_PTE_CHANGE(mmu, pt,
1580216174Snwhitehorn				    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
1581209975Snwhitehorn				if (pvo->pvo_pmap == kernel_pmap)
1582209975Snwhitehorn					isync();
1583190681Snwhitehorn			}
1584190681Snwhitehorn		}
1585205370Snwhitehorn		UNLOCK_TABLE();
1586190681Snwhitehorn		PMAP_UNLOCK(pmap);
1587190681Snwhitehorn	}
1588190681Snwhitehorn	if ((lo & LPTE_CHG) != 0) {
1589190681Snwhitehorn		moea64_attr_clear(m, LPTE_CHG);
1590190681Snwhitehorn		vm_page_dirty(m);
1591190681Snwhitehorn	}
1592225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
1593207796Salc	vm_page_unlock_queues();
1594190681Snwhitehorn}
1595190681Snwhitehorn
1596190681Snwhitehorn/*
1597190681Snwhitehorn *	moea64_ts_referenced:
1598190681Snwhitehorn *
1599190681Snwhitehorn *	Return a count of reference bits for a page, clearing those bits.
1600190681Snwhitehorn *	It is not necessary for every reference bit to be cleared, but it
1601190681Snwhitehorn *	is necessary that 0 only be returned when there are truly no
1602190681Snwhitehorn *	reference bits set.
1603190681Snwhitehorn *
1604190681Snwhitehorn *	XXX: The exact number of bits to check and clear is a matter that
1605190681Snwhitehorn *	should be tested and standardized at some point in the future for
1606190681Snwhitehorn *	optimal aging of shared pages.
1607190681Snwhitehorn */
1608190681Snwhitehornboolean_t
1609190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m)
1610190681Snwhitehorn{
1611190681Snwhitehorn
1612224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1613208990Salc	    ("moea64_ts_referenced: page %p is not managed", m));
1614216174Snwhitehorn	return (moea64_clear_bit(mmu, m, LPTE_REF));
1615190681Snwhitehorn}
1616190681Snwhitehorn
1617190681Snwhitehorn/*
1618213307Snwhitehorn * Modify the WIMG settings of all mappings for a page.
1619213307Snwhitehorn */
1620213307Snwhitehornvoid
1621213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1622213307Snwhitehorn{
1623213307Snwhitehorn	struct	pvo_entry *pvo;
1624213335Snwhitehorn	struct  pvo_head *pvo_head;
1625216174Snwhitehorn	uintptr_t pt;
1626213307Snwhitehorn	pmap_t	pmap;
1627213307Snwhitehorn	uint64_t lo;
1628213307Snwhitehorn
1629224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0) {
1630213335Snwhitehorn		m->md.mdpg_cache_attrs = ma;
1631213335Snwhitehorn		return;
1632213335Snwhitehorn	}
1633213335Snwhitehorn
1634213307Snwhitehorn	vm_page_lock_queues();
1635213335Snwhitehorn	pvo_head = vm_page_to_pvoh(m);
1636213307Snwhitehorn	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1637213335Snwhitehorn	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1638213307Snwhitehorn		pmap = pvo->pvo_pmap;
1639213307Snwhitehorn		PMAP_LOCK(pmap);
1640213307Snwhitehorn		LOCK_TABLE();
1641216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1642213307Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1643213307Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= lo;
1644216174Snwhitehorn		if (pt != -1) {
1645216174Snwhitehorn			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1646213307Snwhitehorn			    pvo->pvo_vpn);
1647213307Snwhitehorn			if (pvo->pvo_pmap == kernel_pmap)
1648213307Snwhitehorn				isync();
1649213307Snwhitehorn		}
1650213307Snwhitehorn		UNLOCK_TABLE();
1651213307Snwhitehorn		PMAP_UNLOCK(pmap);
1652213307Snwhitehorn	}
1653213307Snwhitehorn	m->md.mdpg_cache_attrs = ma;
1654213307Snwhitehorn	vm_page_unlock_queues();
1655213307Snwhitehorn}
1656213307Snwhitehorn
1657213307Snwhitehorn/*
1658190681Snwhitehorn * Map a wired page into kernel virtual address space.
1659190681Snwhitehorn */
1660190681Snwhitehornvoid
1661213307Snwhitehornmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1662190681Snwhitehorn{
1663190681Snwhitehorn	uint64_t	pte_lo;
1664190681Snwhitehorn	int		error;
1665190681Snwhitehorn
1666213307Snwhitehorn	pte_lo = moea64_calc_wimg(pa, ma);
1667190681Snwhitehorn
1668190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1669216174Snwhitehorn	error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone,
1670217341Snwhitehorn	    &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED);
1671190681Snwhitehorn
1672190681Snwhitehorn	if (error != 0 && error != ENOENT)
1673209975Snwhitehorn		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1674190681Snwhitehorn		    pa, error);
1675190681Snwhitehorn
1676190681Snwhitehorn	/*
1677190681Snwhitehorn	 * Flush the memory from the instruction cache.
1678190681Snwhitehorn	 */
1679216174Snwhitehorn	if ((pte_lo & (LPTE_I | LPTE_G)) == 0)
1680190681Snwhitehorn		__syncicache((void *)va, PAGE_SIZE);
1681190681Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1682190681Snwhitehorn}
1683190681Snwhitehorn
1684213307Snwhitehornvoid
1685213307Snwhitehornmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1686213307Snwhitehorn{
1687213307Snwhitehorn
1688213307Snwhitehorn	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1689213307Snwhitehorn}
1690213307Snwhitehorn
1691190681Snwhitehorn/*
1692190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual
1693190681Snwhitehorn * address.
1694190681Snwhitehorn */
1695190681Snwhitehornvm_offset_t
1696190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va)
1697190681Snwhitehorn{
1698190681Snwhitehorn	struct		pvo_entry *pvo;
1699190681Snwhitehorn	vm_paddr_t pa;
1700190681Snwhitehorn
1701205370Snwhitehorn	/*
1702205370Snwhitehorn	 * Shortcut the direct-mapped case when applicable.  We never put
1703205370Snwhitehorn	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1704205370Snwhitehorn	 */
1705205370Snwhitehorn	if (va < VM_MIN_KERNEL_ADDRESS)
1706205370Snwhitehorn		return (va);
1707205370Snwhitehorn
1708190681Snwhitehorn	PMAP_LOCK(kernel_pmap);
1709209975Snwhitehorn	pvo = moea64_pvo_find_va(kernel_pmap, va);
1710209975Snwhitehorn	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
1711209975Snwhitehorn	    va));
1712223471Sandreast	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo));
1713190681Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
1714190681Snwhitehorn	return (pa);
1715190681Snwhitehorn}
1716190681Snwhitehorn
1717190681Snwhitehorn/*
1718190681Snwhitehorn * Remove a wired page from kernel virtual address space.
1719190681Snwhitehorn */
1720190681Snwhitehornvoid
1721190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va)
1722190681Snwhitehorn{
1723190681Snwhitehorn	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
1724190681Snwhitehorn}
1725190681Snwhitehorn
1726190681Snwhitehorn/*
1727190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space.
1728190681Snwhitehorn *
1729190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping.
1730190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region
1731190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt'
1732190681Snwhitehorn * unchanged.  We cannot and therefore do not; *virt is updated with the
1733190681Snwhitehorn * first usable address after the mapped region.
1734190681Snwhitehorn */
1735190681Snwhitehornvm_offset_t
1736190681Snwhitehornmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
1737190681Snwhitehorn    vm_offset_t pa_end, int prot)
1738190681Snwhitehorn{
1739190681Snwhitehorn	vm_offset_t	sva, va;
1740190681Snwhitehorn
1741190681Snwhitehorn	sva = *virt;
1742190681Snwhitehorn	va = sva;
1743190681Snwhitehorn	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
1744190681Snwhitehorn		moea64_kenter(mmu, va, pa_start);
1745190681Snwhitehorn	*virt = va;
1746190681Snwhitehorn
1747190681Snwhitehorn	return (sva);
1748190681Snwhitehorn}
1749190681Snwhitehorn
1750190681Snwhitehorn/*
1751190681Snwhitehorn * Returns true if the pmap's pv is one of the first
1752190681Snwhitehorn * 16 pvs linked to from this page.  This count may
1753190681Snwhitehorn * be changed upwards or downwards in the future; it
1754190681Snwhitehorn * is only necessary that true be returned for a small
1755190681Snwhitehorn * subset of pmaps for proper page aging.
1756190681Snwhitehorn */
1757190681Snwhitehornboolean_t
1758190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
1759190681Snwhitehorn{
1760190681Snwhitehorn        int loops;
1761190681Snwhitehorn	struct pvo_entry *pvo;
1762208990Salc	boolean_t rv;
1763190681Snwhitehorn
1764224746Skib	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
1765208990Salc	    ("moea64_page_exists_quick: page %p is not managed", m));
1766190681Snwhitehorn	loops = 0;
1767208990Salc	rv = FALSE;
1768208990Salc	vm_page_lock_queues();
1769190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1770208990Salc		if (pvo->pvo_pmap == pmap) {
1771208990Salc			rv = TRUE;
1772208990Salc			break;
1773208990Salc		}
1774190681Snwhitehorn		if (++loops >= 16)
1775190681Snwhitehorn			break;
1776190681Snwhitehorn	}
1777208990Salc	vm_page_unlock_queues();
1778208990Salc	return (rv);
1779190681Snwhitehorn}
1780190681Snwhitehorn
1781190681Snwhitehorn/*
1782190681Snwhitehorn * Return the number of managed mappings to the given physical page
1783190681Snwhitehorn * that are wired.
1784190681Snwhitehorn */
1785190681Snwhitehornint
1786190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
1787190681Snwhitehorn{
1788190681Snwhitehorn	struct pvo_entry *pvo;
1789190681Snwhitehorn	int count;
1790190681Snwhitehorn
1791190681Snwhitehorn	count = 0;
1792224746Skib	if ((m->oflags & VPO_UNMANAGED) != 0)
1793190681Snwhitehorn		return (count);
1794207796Salc	vm_page_lock_queues();
1795190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
1796190681Snwhitehorn		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1797190681Snwhitehorn			count++;
1798207796Salc	vm_page_unlock_queues();
1799190681Snwhitehorn	return (count);
1800190681Snwhitehorn}
1801190681Snwhitehorn
1802209975Snwhitehornstatic uintptr_t	moea64_vsidcontext;
1803190681Snwhitehorn
1804209975Snwhitehornuintptr_t
1805209975Snwhitehornmoea64_get_unique_vsid(void) {
1806209975Snwhitehorn	u_int entropy;
1807209975Snwhitehorn	register_t hash;
1808209975Snwhitehorn	uint32_t mask;
1809209975Snwhitehorn	int i;
1810190681Snwhitehorn
1811190681Snwhitehorn	entropy = 0;
1812190681Snwhitehorn	__asm __volatile("mftb %0" : "=r"(entropy));
1813190681Snwhitehorn
1814211967Snwhitehorn	mtx_lock(&moea64_slb_mutex);
1815209975Snwhitehorn	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
1816209975Snwhitehorn		u_int	n;
1817190681Snwhitehorn
1818190681Snwhitehorn		/*
1819190681Snwhitehorn		 * Create a new value by mutiplying by a prime and adding in
1820190681Snwhitehorn		 * entropy from the timebase register.  This is to make the
1821190681Snwhitehorn		 * VSID more random so that the PT hash function collides
1822190681Snwhitehorn		 * less often.  (Note that the prime casues gcc to do shifts
1823190681Snwhitehorn		 * instead of a multiply.)
1824190681Snwhitehorn		 */
1825190681Snwhitehorn		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
1826209975Snwhitehorn		hash = moea64_vsidcontext & (NVSIDS - 1);
1827190681Snwhitehorn		if (hash == 0)		/* 0 is special, avoid it */
1828190681Snwhitehorn			continue;
1829190681Snwhitehorn		n = hash >> 5;
1830190681Snwhitehorn		mask = 1 << (hash & (VSID_NBPW - 1));
1831209975Snwhitehorn		hash = (moea64_vsidcontext & VSID_HASHMASK);
1832190681Snwhitehorn		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
1833190681Snwhitehorn			/* anything free in this bucket? */
1834190681Snwhitehorn			if (moea64_vsid_bitmap[n] == 0xffffffff) {
1835190681Snwhitehorn				entropy = (moea64_vsidcontext >> 20);
1836190681Snwhitehorn				continue;
1837190681Snwhitehorn			}
1838212322Snwhitehorn			i = ffs(~moea64_vsid_bitmap[n]) - 1;
1839190681Snwhitehorn			mask = 1 << i;
1840209975Snwhitehorn			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
1841190681Snwhitehorn			hash |= i;
1842190681Snwhitehorn		}
1843212322Snwhitehorn		KASSERT(!(moea64_vsid_bitmap[n] & mask),
1844212331Snwhitehorn		    ("Allocating in-use VSID %#zx\n", hash));
1845190681Snwhitehorn		moea64_vsid_bitmap[n] |= mask;
1846211967Snwhitehorn		mtx_unlock(&moea64_slb_mutex);
1847209975Snwhitehorn		return (hash);
1848190681Snwhitehorn	}
1849190681Snwhitehorn
1850211967Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
1851209975Snwhitehorn	panic("%s: out of segments",__func__);
1852190681Snwhitehorn}
1853190681Snwhitehorn
1854209975Snwhitehorn#ifdef __powerpc64__
1855209975Snwhitehornvoid
1856209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1857209975Snwhitehorn{
1858209975Snwhitehorn	PMAP_LOCK_INIT(pmap);
1859228412Snwhitehorn	LIST_INIT(&pmap->pmap_pvo);
1860209975Snwhitehorn
1861212715Snwhitehorn	pmap->pm_slb_tree_root = slb_alloc_tree();
1862209975Snwhitehorn	pmap->pm_slb = slb_alloc_user_cache();
1863212722Snwhitehorn	pmap->pm_slb_len = 0;
1864209975Snwhitehorn}
1865209975Snwhitehorn#else
1866209975Snwhitehornvoid
1867209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap)
1868209975Snwhitehorn{
1869209975Snwhitehorn	int	i;
1870212308Snwhitehorn	uint32_t hash;
1871209975Snwhitehorn
1872209975Snwhitehorn	PMAP_LOCK_INIT(pmap);
1873228412Snwhitehorn	LIST_INIT(&pmap->pmap_pvo);
1874209975Snwhitehorn
1875209975Snwhitehorn	if (pmap_bootstrapped)
1876209975Snwhitehorn		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
1877209975Snwhitehorn		    (vm_offset_t)pmap);
1878209975Snwhitehorn	else
1879209975Snwhitehorn		pmap->pmap_phys = pmap;
1880209975Snwhitehorn
1881209975Snwhitehorn	/*
1882209975Snwhitehorn	 * Allocate some segment registers for this pmap.
1883209975Snwhitehorn	 */
1884209975Snwhitehorn	hash = moea64_get_unique_vsid();
1885209975Snwhitehorn
1886209975Snwhitehorn	for (i = 0; i < 16; i++)
1887209975Snwhitehorn		pmap->pm_sr[i] = VSID_MAKE(i, hash);
1888212308Snwhitehorn
1889212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
1890209975Snwhitehorn}
1891209975Snwhitehorn#endif
1892209975Snwhitehorn
1893190681Snwhitehorn/*
1894190681Snwhitehorn * Initialize the pmap associated with process 0.
1895190681Snwhitehorn */
1896190681Snwhitehornvoid
1897190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm)
1898190681Snwhitehorn{
1899190681Snwhitehorn	moea64_pinit(mmu, pm);
1900190681Snwhitehorn	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
1901190681Snwhitehorn}
1902190681Snwhitehorn
1903190681Snwhitehorn/*
1904190681Snwhitehorn * Set the physical protection on the specified range of this map as requested.
1905190681Snwhitehorn */
1906190681Snwhitehornvoid
1907190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
1908190681Snwhitehorn    vm_prot_t prot)
1909190681Snwhitehorn{
1910190681Snwhitehorn	struct	pvo_entry *pvo;
1911216174Snwhitehorn	uintptr_t pt;
1912190681Snwhitehorn
1913190681Snwhitehorn	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
1914190681Snwhitehorn	    eva, prot);
1915190681Snwhitehorn
1916190681Snwhitehorn
1917190681Snwhitehorn	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
1918190681Snwhitehorn	    ("moea64_protect: non current pmap"));
1919190681Snwhitehorn
1920190681Snwhitehorn	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
1921190681Snwhitehorn		moea64_remove(mmu, pm, sva, eva);
1922190681Snwhitehorn		return;
1923190681Snwhitehorn	}
1924190681Snwhitehorn
1925190681Snwhitehorn	vm_page_lock_queues();
1926190681Snwhitehorn	PMAP_LOCK(pm);
1927190681Snwhitehorn	for (; sva < eva; sva += PAGE_SIZE) {
1928209975Snwhitehorn		pvo = moea64_pvo_find_va(pm, sva);
1929190681Snwhitehorn		if (pvo == NULL)
1930190681Snwhitehorn			continue;
1931190681Snwhitehorn
1932190681Snwhitehorn		/*
1933190681Snwhitehorn		 * Grab the PTE pointer before we diddle with the cached PTE
1934190681Snwhitehorn		 * copy.
1935190681Snwhitehorn		 */
1936190681Snwhitehorn		LOCK_TABLE();
1937216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
1938190681Snwhitehorn
1939190681Snwhitehorn		/*
1940190681Snwhitehorn		 * Change the protection of the page.
1941190681Snwhitehorn		 */
1942190681Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1943190681Snwhitehorn		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1944190681Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
1945190681Snwhitehorn		if ((prot & VM_PROT_EXECUTE) == 0)
1946190681Snwhitehorn			pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
1947190681Snwhitehorn
1948190681Snwhitehorn		/*
1949190681Snwhitehorn		 * If the PVO is in the page table, update that pte as well.
1950190681Snwhitehorn		 */
1951216174Snwhitehorn		if (pt != -1) {
1952216174Snwhitehorn			MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte,
1953216174Snwhitehorn			    pvo->pvo_vpn);
1954190681Snwhitehorn			if ((pvo->pvo_pte.lpte.pte_lo &
1955190681Snwhitehorn			    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1956216174Snwhitehorn				moea64_syncicache(mmu, pm, sva,
1957198341Smarcel				    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
1958198341Smarcel				    PAGE_SIZE);
1959190681Snwhitehorn			}
1960190681Snwhitehorn		}
1961190681Snwhitehorn		UNLOCK_TABLE();
1962190681Snwhitehorn	}
1963190681Snwhitehorn	vm_page_unlock_queues();
1964190681Snwhitehorn	PMAP_UNLOCK(pm);
1965190681Snwhitehorn}
1966190681Snwhitehorn
1967190681Snwhitehorn/*
1968190681Snwhitehorn * Map a list of wired pages into kernel virtual address space.  This is
1969190681Snwhitehorn * intended for temporary mappings which do not need page modification or
1970190681Snwhitehorn * references recorded.  Existing mappings in the region are overwritten.
1971190681Snwhitehorn */
1972190681Snwhitehornvoid
1973190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
1974190681Snwhitehorn{
1975190681Snwhitehorn	while (count-- > 0) {
1976190681Snwhitehorn		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
1977190681Snwhitehorn		va += PAGE_SIZE;
1978190681Snwhitehorn		m++;
1979190681Snwhitehorn	}
1980190681Snwhitehorn}
1981190681Snwhitehorn
1982190681Snwhitehorn/*
1983190681Snwhitehorn * Remove page mappings from kernel virtual address space.  Intended for
1984190681Snwhitehorn * temporary mappings entered by moea64_qenter.
1985190681Snwhitehorn */
1986190681Snwhitehornvoid
1987190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count)
1988190681Snwhitehorn{
1989190681Snwhitehorn	while (count-- > 0) {
1990190681Snwhitehorn		moea64_kremove(mmu, va);
1991190681Snwhitehorn		va += PAGE_SIZE;
1992190681Snwhitehorn	}
1993190681Snwhitehorn}
1994190681Snwhitehorn
1995190681Snwhitehornvoid
1996209975Snwhitehornmoea64_release_vsid(uint64_t vsid)
1997209975Snwhitehorn{
1998212044Snwhitehorn	int idx, mask;
1999209975Snwhitehorn
2000212044Snwhitehorn	mtx_lock(&moea64_slb_mutex);
2001212044Snwhitehorn	idx = vsid & (NVSIDS-1);
2002212044Snwhitehorn	mask = 1 << (idx % VSID_NBPW);
2003212044Snwhitehorn	idx /= VSID_NBPW;
2004212308Snwhitehorn	KASSERT(moea64_vsid_bitmap[idx] & mask,
2005212308Snwhitehorn	    ("Freeing unallocated VSID %#jx", vsid));
2006212044Snwhitehorn	moea64_vsid_bitmap[idx] &= ~mask;
2007212044Snwhitehorn	mtx_unlock(&moea64_slb_mutex);
2008209975Snwhitehorn}
2009209975Snwhitehorn
2010209975Snwhitehorn
2011209975Snwhitehornvoid
2012190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap)
2013190681Snwhitehorn{
2014190681Snwhitehorn
2015190681Snwhitehorn	/*
2016209975Snwhitehorn	 * Free segment registers' VSIDs
2017190681Snwhitehorn	 */
2018209975Snwhitehorn    #ifdef __powerpc64__
2019212715Snwhitehorn	slb_free_tree(pmap);
2020209975Snwhitehorn	slb_free_user_cache(pmap->pm_slb);
2021209975Snwhitehorn    #else
2022212308Snwhitehorn	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2023190681Snwhitehorn
2024212308Snwhitehorn	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2025209975Snwhitehorn    #endif
2026209975Snwhitehorn
2027190681Snwhitehorn	PMAP_LOCK_DESTROY(pmap);
2028190681Snwhitehorn}
2029190681Snwhitehorn
2030190681Snwhitehorn/*
2031190681Snwhitehorn * Remove the given range of addresses from the specified map.
2032190681Snwhitehorn */
2033190681Snwhitehornvoid
2034190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2035190681Snwhitehorn{
2036190681Snwhitehorn	struct	pvo_entry *pvo;
2037190681Snwhitehorn
2038190681Snwhitehorn	vm_page_lock_queues();
2039190681Snwhitehorn	PMAP_LOCK(pm);
2040228412Snwhitehorn	if ((eva - sva)/PAGE_SIZE < 10) {
2041228412Snwhitehorn		for (; sva < eva; sva += PAGE_SIZE) {
2042228412Snwhitehorn			pvo = moea64_pvo_find_va(pm, sva);
2043228412Snwhitehorn			if (pvo != NULL)
2044228412Snwhitehorn				moea64_pvo_remove(mmu, pvo);
2045228412Snwhitehorn		}
2046228412Snwhitehorn	} else {
2047228412Snwhitehorn		LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) {
2048228412Snwhitehorn			if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva)
2049228412Snwhitehorn				continue;
2050216174Snwhitehorn			moea64_pvo_remove(mmu, pvo);
2051228412Snwhitehorn		}
2052190681Snwhitehorn	}
2053190681Snwhitehorn	vm_page_unlock_queues();
2054190681Snwhitehorn	PMAP_UNLOCK(pm);
2055190681Snwhitehorn}
2056190681Snwhitehorn
2057190681Snwhitehorn/*
2058190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2059190681Snwhitehorn * will reflect changes in pte's back to the vm_page.
2060190681Snwhitehorn */
2061190681Snwhitehornvoid
2062190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m)
2063190681Snwhitehorn{
2064190681Snwhitehorn	struct  pvo_head *pvo_head;
2065190681Snwhitehorn	struct	pvo_entry *pvo, *next_pvo;
2066190681Snwhitehorn	pmap_t	pmap;
2067190681Snwhitehorn
2068207796Salc	vm_page_lock_queues();
2069190681Snwhitehorn	pvo_head = vm_page_to_pvoh(m);
2070190681Snwhitehorn	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2071190681Snwhitehorn		next_pvo = LIST_NEXT(pvo, pvo_vlink);
2072190681Snwhitehorn
2073190681Snwhitehorn		pmap = pvo->pvo_pmap;
2074190681Snwhitehorn		PMAP_LOCK(pmap);
2075216174Snwhitehorn		moea64_pvo_remove(mmu, pvo);
2076190681Snwhitehorn		PMAP_UNLOCK(pmap);
2077190681Snwhitehorn	}
2078225418Skib	if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) {
2079204042Snwhitehorn		moea64_attr_clear(m, LPTE_CHG);
2080204042Snwhitehorn		vm_page_dirty(m);
2081204042Snwhitehorn	}
2082225418Skib	vm_page_aflag_clear(m, PGA_WRITEABLE);
2083207796Salc	vm_page_unlock_queues();
2084190681Snwhitehorn}
2085190681Snwhitehorn
2086190681Snwhitehorn/*
2087190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map.
2088190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are
2089190681Snwhitehorn * calculated.
2090190681Snwhitehorn */
2091216174Snwhitehornvm_offset_t
2092190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align)
2093190681Snwhitehorn{
2094190681Snwhitehorn	vm_offset_t	s, e;
2095190681Snwhitehorn	int		i, j;
2096190681Snwhitehorn
2097190681Snwhitehorn	size = round_page(size);
2098190681Snwhitehorn	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2099190681Snwhitehorn		if (align != 0)
2100190681Snwhitehorn			s = (phys_avail[i] + align - 1) & ~(align - 1);
2101190681Snwhitehorn		else
2102190681Snwhitehorn			s = phys_avail[i];
2103190681Snwhitehorn		e = s + size;
2104190681Snwhitehorn
2105190681Snwhitehorn		if (s < phys_avail[i] || e > phys_avail[i + 1])
2106190681Snwhitehorn			continue;
2107190681Snwhitehorn
2108215159Snwhitehorn		if (s + size > platform_real_maxaddr())
2109215159Snwhitehorn			continue;
2110215159Snwhitehorn
2111190681Snwhitehorn		if (s == phys_avail[i]) {
2112190681Snwhitehorn			phys_avail[i] += size;
2113190681Snwhitehorn		} else if (e == phys_avail[i + 1]) {
2114190681Snwhitehorn			phys_avail[i + 1] -= size;
2115190681Snwhitehorn		} else {
2116190681Snwhitehorn			for (j = phys_avail_count * 2; j > i; j -= 2) {
2117190681Snwhitehorn				phys_avail[j] = phys_avail[j - 2];
2118190681Snwhitehorn				phys_avail[j + 1] = phys_avail[j - 1];
2119190681Snwhitehorn			}
2120190681Snwhitehorn
2121190681Snwhitehorn			phys_avail[i + 3] = phys_avail[i + 1];
2122190681Snwhitehorn			phys_avail[i + 1] = s;
2123190681Snwhitehorn			phys_avail[i + 2] = e;
2124190681Snwhitehorn			phys_avail_count++;
2125190681Snwhitehorn		}
2126190681Snwhitehorn
2127190681Snwhitehorn		return (s);
2128190681Snwhitehorn	}
2129190681Snwhitehorn	panic("moea64_bootstrap_alloc: could not allocate memory");
2130190681Snwhitehorn}
2131190681Snwhitehorn
2132190681Snwhitehornstatic int
2133216174Snwhitehornmoea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone,
2134216174Snwhitehorn    struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa,
2135216174Snwhitehorn    uint64_t pte_lo, int flags)
2136190681Snwhitehorn{
2137190681Snwhitehorn	struct	 pvo_entry *pvo;
2138190681Snwhitehorn	uint64_t vsid;
2139190681Snwhitehorn	int	 first;
2140190681Snwhitehorn	u_int	 ptegidx;
2141190681Snwhitehorn	int	 i;
2142190681Snwhitehorn	int      bootstrap;
2143190681Snwhitehorn
2144190681Snwhitehorn	/*
2145190681Snwhitehorn	 * One nasty thing that can happen here is that the UMA calls to
2146190681Snwhitehorn	 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2147190681Snwhitehorn	 * which calls UMA...
2148190681Snwhitehorn	 *
2149190681Snwhitehorn	 * We break the loop by detecting recursion and allocating out of
2150190681Snwhitehorn	 * the bootstrap pool.
2151190681Snwhitehorn	 */
2152190681Snwhitehorn
2153190681Snwhitehorn	first = 0;
2154190681Snwhitehorn	bootstrap = (flags & PVO_BOOTSTRAP);
2155190681Snwhitehorn
2156190681Snwhitehorn	if (!moea64_initialized)
2157190681Snwhitehorn		bootstrap = 1;
2158190681Snwhitehorn
2159190681Snwhitehorn	/*
2160190681Snwhitehorn	 * Compute the PTE Group index.
2161190681Snwhitehorn	 */
2162190681Snwhitehorn	va &= ~ADDR_POFF;
2163190681Snwhitehorn	vsid = va_to_vsid(pm, va);
2164209975Snwhitehorn	ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2165190681Snwhitehorn
2166190681Snwhitehorn	/*
2167190681Snwhitehorn	 * Remove any existing mapping for this page.  Reuse the pvo entry if
2168190681Snwhitehorn	 * there is a mapping.
2169190681Snwhitehorn	 */
2170198378Snwhitehorn	LOCK_TABLE();
2171190681Snwhitehorn
2172212363Snwhitehorn	moea64_pvo_enter_calls++;
2173212363Snwhitehorn
2174190681Snwhitehorn	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2175190681Snwhitehorn		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2176190681Snwhitehorn			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2177217341Snwhitehorn			    (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP))
2178217341Snwhitehorn			    == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) {
2179209975Snwhitehorn			    	if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2180209975Snwhitehorn					/* Re-insert if spilled */
2181216174Snwhitehorn					i = MOEA64_PTE_INSERT(mmu, ptegidx,
2182209975Snwhitehorn					    &pvo->pvo_pte.lpte);
2183209975Snwhitehorn					if (i >= 0)
2184209975Snwhitehorn						PVO_PTEGIDX_SET(pvo, i);
2185209975Snwhitehorn					moea64_pte_overflow--;
2186209975Snwhitehorn				}
2187198378Snwhitehorn				UNLOCK_TABLE();
2188190681Snwhitehorn				return (0);
2189190681Snwhitehorn			}
2190216174Snwhitehorn			moea64_pvo_remove(mmu, pvo);
2191190681Snwhitehorn			break;
2192190681Snwhitehorn		}
2193190681Snwhitehorn	}
2194190681Snwhitehorn
2195190681Snwhitehorn	/*
2196190681Snwhitehorn	 * If we aren't overwriting a mapping, try to allocate.
2197190681Snwhitehorn	 */
2198190681Snwhitehorn	if (bootstrap) {
2199190681Snwhitehorn		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2200209975Snwhitehorn			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2201190681Snwhitehorn			      moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2202190681Snwhitehorn			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2203190681Snwhitehorn		}
2204190681Snwhitehorn		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2205190681Snwhitehorn		moea64_bpvo_pool_index++;
2206190681Snwhitehorn		bootstrap = 1;
2207190681Snwhitehorn	} else {
2208198378Snwhitehorn		/*
2209204719Snwhitehorn		 * Note: drop the table lock around the UMA allocation in
2210198378Snwhitehorn		 * case the UMA allocator needs to manipulate the page
2211198378Snwhitehorn		 * table. The mapping we are working with is already
2212198378Snwhitehorn		 * protected by the PMAP lock.
2213198378Snwhitehorn		 */
2214198378Snwhitehorn		UNLOCK_TABLE();
2215190681Snwhitehorn		pvo = uma_zalloc(zone, M_NOWAIT);
2216198378Snwhitehorn		LOCK_TABLE();
2217190681Snwhitehorn	}
2218190681Snwhitehorn
2219190681Snwhitehorn	if (pvo == NULL) {
2220198378Snwhitehorn		UNLOCK_TABLE();
2221190681Snwhitehorn		return (ENOMEM);
2222190681Snwhitehorn	}
2223190681Snwhitehorn
2224190681Snwhitehorn	moea64_pvo_entries++;
2225190681Snwhitehorn	pvo->pvo_vaddr = va;
2226209975Snwhitehorn	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2227209975Snwhitehorn	    | (vsid << 16);
2228190681Snwhitehorn	pvo->pvo_pmap = pm;
2229190681Snwhitehorn	LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2230190681Snwhitehorn	pvo->pvo_vaddr &= ~ADDR_POFF;
2231190681Snwhitehorn
2232190681Snwhitehorn	if (flags & PVO_WIRED)
2233190681Snwhitehorn		pvo->pvo_vaddr |= PVO_WIRED;
2234190681Snwhitehorn	if (pvo_head != &moea64_pvo_kunmanaged)
2235190681Snwhitehorn		pvo->pvo_vaddr |= PVO_MANAGED;
2236190681Snwhitehorn	if (bootstrap)
2237190681Snwhitehorn		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2238209975Snwhitehorn	if (flags & PVO_LARGE)
2239209975Snwhitehorn		pvo->pvo_vaddr |= PVO_LARGE;
2240190681Snwhitehorn
2241190681Snwhitehorn	moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2242209975Snwhitehorn	    (uint64_t)(pa) | pte_lo, flags);
2243190681Snwhitehorn
2244190681Snwhitehorn	/*
2245228412Snwhitehorn	 * Add to pmap list
2246228412Snwhitehorn	 */
2247228412Snwhitehorn	LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink);
2248228412Snwhitehorn
2249228412Snwhitehorn	/*
2250190681Snwhitehorn	 * Remember if the list was empty and therefore will be the first
2251190681Snwhitehorn	 * item.
2252190681Snwhitehorn	 */
2253190681Snwhitehorn	if (LIST_FIRST(pvo_head) == NULL)
2254190681Snwhitehorn		first = 1;
2255190681Snwhitehorn	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2256190681Snwhitehorn
2257209975Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED) {
2258209975Snwhitehorn		pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2259190681Snwhitehorn		pm->pm_stats.wired_count++;
2260209975Snwhitehorn	}
2261190681Snwhitehorn	pm->pm_stats.resident_count++;
2262190681Snwhitehorn
2263190681Snwhitehorn	/*
2264190681Snwhitehorn	 * We hope this succeeds but it isn't required.
2265190681Snwhitehorn	 */
2266216174Snwhitehorn	i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte);
2267190681Snwhitehorn	if (i >= 0) {
2268190681Snwhitehorn		PVO_PTEGIDX_SET(pvo, i);
2269190681Snwhitehorn	} else {
2270190681Snwhitehorn		panic("moea64_pvo_enter: overflow");
2271190681Snwhitehorn		moea64_pte_overflow++;
2272190681Snwhitehorn	}
2273190681Snwhitehorn
2274204042Snwhitehorn	if (pm == kernel_pmap)
2275204042Snwhitehorn		isync();
2276204042Snwhitehorn
2277198378Snwhitehorn	UNLOCK_TABLE();
2278190681Snwhitehorn
2279209975Snwhitehorn#ifdef __powerpc64__
2280209975Snwhitehorn	/*
2281209975Snwhitehorn	 * Make sure all our bootstrap mappings are in the SLB as soon
2282209975Snwhitehorn	 * as virtual memory is switched on.
2283209975Snwhitehorn	 */
2284209975Snwhitehorn	if (!pmap_bootstrapped)
2285209975Snwhitehorn		moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2286209975Snwhitehorn#endif
2287209975Snwhitehorn
2288190681Snwhitehorn	return (first ? ENOENT : 0);
2289190681Snwhitehorn}
2290190681Snwhitehorn
2291190681Snwhitehornstatic void
2292216174Snwhitehornmoea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo)
2293190681Snwhitehorn{
2294216174Snwhitehorn	uintptr_t pt;
2295190681Snwhitehorn
2296190681Snwhitehorn	/*
2297190681Snwhitehorn	 * If there is an active pte entry, we need to deactivate it (and
2298190681Snwhitehorn	 * save the ref & cfg bits).
2299190681Snwhitehorn	 */
2300190681Snwhitehorn	LOCK_TABLE();
2301216174Snwhitehorn	pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2302216174Snwhitehorn	if (pt != -1) {
2303216174Snwhitehorn		MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2304190681Snwhitehorn		PVO_PTEGIDX_CLR(pvo);
2305190681Snwhitehorn	} else {
2306190681Snwhitehorn		moea64_pte_overflow--;
2307190681Snwhitehorn	}
2308190681Snwhitehorn
2309190681Snwhitehorn	/*
2310190681Snwhitehorn	 * Update our statistics.
2311190681Snwhitehorn	 */
2312190681Snwhitehorn	pvo->pvo_pmap->pm_stats.resident_count--;
2313204042Snwhitehorn	if (pvo->pvo_vaddr & PVO_WIRED)
2314190681Snwhitehorn		pvo->pvo_pmap->pm_stats.wired_count--;
2315190681Snwhitehorn
2316190681Snwhitehorn	/*
2317190681Snwhitehorn	 * Save the REF/CHG bits into their cache if the page is managed.
2318190681Snwhitehorn	 */
2319224746Skib	if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) {
2320190681Snwhitehorn		struct	vm_page *pg;
2321190681Snwhitehorn
2322190681Snwhitehorn		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2323190681Snwhitehorn		if (pg != NULL) {
2324190681Snwhitehorn			moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
2325190681Snwhitehorn			    (LPTE_REF | LPTE_CHG));
2326190681Snwhitehorn		}
2327190681Snwhitehorn	}
2328190681Snwhitehorn
2329190681Snwhitehorn	/*
2330228412Snwhitehorn	 * Remove this PVO from the PV and pmap lists.
2331190681Snwhitehorn	 */
2332190681Snwhitehorn	LIST_REMOVE(pvo, pvo_vlink);
2333228412Snwhitehorn	LIST_REMOVE(pvo, pvo_plink);
2334190681Snwhitehorn
2335190681Snwhitehorn	/*
2336190681Snwhitehorn	 * Remove this from the overflow list and return it to the pool
2337190681Snwhitehorn	 * if we aren't going to reuse it.
2338190681Snwhitehorn	 */
2339190681Snwhitehorn	LIST_REMOVE(pvo, pvo_olink);
2340212363Snwhitehorn
2341212363Snwhitehorn	moea64_pvo_entries--;
2342212363Snwhitehorn	moea64_pvo_remove_calls++;
2343212363Snwhitehorn
2344204694Snwhitehorn	UNLOCK_TABLE();
2345204694Snwhitehorn
2346190681Snwhitehorn	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2347204042Snwhitehorn		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2348190681Snwhitehorn		    moea64_upvo_zone, pvo);
2349190681Snwhitehorn}
2350190681Snwhitehorn
2351190681Snwhitehornstatic struct pvo_entry *
2352209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2353190681Snwhitehorn{
2354190681Snwhitehorn	struct		pvo_entry *pvo;
2355190681Snwhitehorn	int		ptegidx;
2356190681Snwhitehorn	uint64_t	vsid;
2357209975Snwhitehorn	#ifdef __powerpc64__
2358212715Snwhitehorn	uint64_t	slbv;
2359190681Snwhitehorn
2360212715Snwhitehorn	if (pm == kernel_pmap) {
2361212715Snwhitehorn		slbv = kernel_va_to_slbv(va);
2362212715Snwhitehorn	} else {
2363212715Snwhitehorn		struct slb *slb;
2364212715Snwhitehorn		slb = user_va_to_slb_entry(pm, va);
2365212715Snwhitehorn		/* The page is not mapped if the segment isn't */
2366212715Snwhitehorn		if (slb == NULL)
2367212715Snwhitehorn			return NULL;
2368212715Snwhitehorn		slbv = slb->slbv;
2369212715Snwhitehorn	}
2370209975Snwhitehorn
2371212715Snwhitehorn	vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
2372212715Snwhitehorn	if (slbv & SLBV_L)
2373209975Snwhitehorn		va &= ~moea64_large_page_mask;
2374209975Snwhitehorn	else
2375209975Snwhitehorn		va &= ~ADDR_POFF;
2376212715Snwhitehorn	ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L);
2377209975Snwhitehorn	#else
2378190681Snwhitehorn	va &= ~ADDR_POFF;
2379190681Snwhitehorn	vsid = va_to_vsid(pm, va);
2380209975Snwhitehorn	ptegidx = va_to_pteg(vsid, va, 0);
2381209975Snwhitehorn	#endif
2382190681Snwhitehorn
2383190681Snwhitehorn	LOCK_TABLE();
2384190681Snwhitehorn	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2385209975Snwhitehorn		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
2386190681Snwhitehorn			break;
2387190681Snwhitehorn	}
2388190681Snwhitehorn	UNLOCK_TABLE();
2389190681Snwhitehorn
2390190681Snwhitehorn	return (pvo);
2391190681Snwhitehorn}
2392190681Snwhitehorn
2393190681Snwhitehornstatic boolean_t
2394216174Snwhitehornmoea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2395190681Snwhitehorn{
2396190681Snwhitehorn	struct	pvo_entry *pvo;
2397216174Snwhitehorn	uintptr_t pt;
2398190681Snwhitehorn
2399190681Snwhitehorn	if (moea64_attr_fetch(m) & ptebit)
2400190681Snwhitehorn		return (TRUE);
2401190681Snwhitehorn
2402208574Salc	vm_page_lock_queues();
2403205370Snwhitehorn
2404190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2405190681Snwhitehorn
2406190681Snwhitehorn		/*
2407190681Snwhitehorn		 * See if we saved the bit off.  If so, cache it and return
2408190681Snwhitehorn		 * success.
2409190681Snwhitehorn		 */
2410190681Snwhitehorn		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2411190681Snwhitehorn			moea64_attr_save(m, ptebit);
2412208574Salc			vm_page_unlock_queues();
2413190681Snwhitehorn			return (TRUE);
2414190681Snwhitehorn		}
2415190681Snwhitehorn	}
2416190681Snwhitehorn
2417190681Snwhitehorn	/*
2418190681Snwhitehorn	 * No luck, now go through the hard part of looking at the PTEs
2419190681Snwhitehorn	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2420190681Snwhitehorn	 * the PTEs.
2421190681Snwhitehorn	 */
2422216174Snwhitehorn	powerpc_sync();
2423190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2424190681Snwhitehorn
2425190681Snwhitehorn		/*
2426190681Snwhitehorn		 * See if this pvo has a valid PTE.  if so, fetch the
2427190681Snwhitehorn		 * REF/CHG bits from the valid PTE.  If the appropriate
2428190681Snwhitehorn		 * ptebit is set, cache it and return success.
2429190681Snwhitehorn		 */
2430205370Snwhitehorn		LOCK_TABLE();
2431216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2432216174Snwhitehorn		if (pt != -1) {
2433216174Snwhitehorn			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2434190681Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2435190681Snwhitehorn				UNLOCK_TABLE();
2436190681Snwhitehorn
2437190681Snwhitehorn				moea64_attr_save(m, ptebit);
2438208574Salc				vm_page_unlock_queues();
2439190681Snwhitehorn				return (TRUE);
2440190681Snwhitehorn			}
2441190681Snwhitehorn		}
2442205370Snwhitehorn		UNLOCK_TABLE();
2443190681Snwhitehorn	}
2444190681Snwhitehorn
2445208574Salc	vm_page_unlock_queues();
2446190681Snwhitehorn	return (FALSE);
2447190681Snwhitehorn}
2448190681Snwhitehorn
2449190681Snwhitehornstatic u_int
2450216174Snwhitehornmoea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit)
2451190681Snwhitehorn{
2452190681Snwhitehorn	u_int	count;
2453190681Snwhitehorn	struct	pvo_entry *pvo;
2454216174Snwhitehorn	uintptr_t pt;
2455190681Snwhitehorn
2456208990Salc	vm_page_lock_queues();
2457205370Snwhitehorn
2458190681Snwhitehorn	/*
2459190681Snwhitehorn	 * Clear the cached value.
2460190681Snwhitehorn	 */
2461190681Snwhitehorn	moea64_attr_clear(m, ptebit);
2462190681Snwhitehorn
2463190681Snwhitehorn	/*
2464190681Snwhitehorn	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2465190681Snwhitehorn	 * we can reset the right ones).  note that since the pvo entries and
2466190681Snwhitehorn	 * list heads are accessed via BAT0 and are never placed in the page
2467190681Snwhitehorn	 * table, we don't have to worry about further accesses setting the
2468190681Snwhitehorn	 * REF/CHG bits.
2469190681Snwhitehorn	 */
2470216174Snwhitehorn	powerpc_sync();
2471190681Snwhitehorn
2472190681Snwhitehorn	/*
2473190681Snwhitehorn	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2474190681Snwhitehorn	 * valid pte clear the ptebit from the valid pte.
2475190681Snwhitehorn	 */
2476190681Snwhitehorn	count = 0;
2477190681Snwhitehorn	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2478190681Snwhitehorn
2479205370Snwhitehorn		LOCK_TABLE();
2480216174Snwhitehorn		pt = MOEA64_PVO_TO_PTE(mmu, pvo);
2481216174Snwhitehorn		if (pt != -1) {
2482216174Snwhitehorn			MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte);
2483190681Snwhitehorn			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2484190681Snwhitehorn				count++;
2485216174Snwhitehorn				MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte,
2486216174Snwhitehorn				    pvo->pvo_vpn, ptebit);
2487190681Snwhitehorn			}
2488190681Snwhitehorn		}
2489190681Snwhitehorn		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2490205370Snwhitehorn		UNLOCK_TABLE();
2491190681Snwhitehorn	}
2492190681Snwhitehorn
2493208990Salc	vm_page_unlock_queues();
2494190681Snwhitehorn	return (count);
2495190681Snwhitehorn}
2496190681Snwhitehorn
2497190681Snwhitehornboolean_t
2498190681Snwhitehornmoea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2499190681Snwhitehorn{
2500204296Snwhitehorn	struct pvo_entry *pvo;
2501204296Snwhitehorn	vm_offset_t ppa;
2502204296Snwhitehorn	int error = 0;
2503204296Snwhitehorn
2504204296Snwhitehorn	PMAP_LOCK(kernel_pmap);
2505204296Snwhitehorn	for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
2506209975Snwhitehorn		pvo = moea64_pvo_find_va(kernel_pmap, ppa);
2507204296Snwhitehorn		if (pvo == NULL ||
2508204296Snwhitehorn		    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
2509204296Snwhitehorn			error = EFAULT;
2510204296Snwhitehorn			break;
2511204296Snwhitehorn		}
2512204296Snwhitehorn	}
2513204296Snwhitehorn	PMAP_UNLOCK(kernel_pmap);
2514204296Snwhitehorn
2515204296Snwhitehorn	return (error);
2516190681Snwhitehorn}
2517190681Snwhitehorn
2518190681Snwhitehorn/*
2519190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual
2520190681Snwhitehorn * address space. Return a pointer to where it is mapped. This
2521190681Snwhitehorn * routine is intended to be used for mapping device memory,
2522190681Snwhitehorn * NOT real memory.
2523190681Snwhitehorn */
2524190681Snwhitehornvoid *
2525213307Snwhitehornmoea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2526190681Snwhitehorn{
2527190681Snwhitehorn	vm_offset_t va, tmpva, ppa, offset;
2528190681Snwhitehorn
2529190681Snwhitehorn	ppa = trunc_page(pa);
2530190681Snwhitehorn	offset = pa & PAGE_MASK;
2531190681Snwhitehorn	size = roundup(offset + size, PAGE_SIZE);
2532190681Snwhitehorn
2533190681Snwhitehorn	va = kmem_alloc_nofault(kernel_map, size);
2534190681Snwhitehorn
2535190681Snwhitehorn	if (!va)
2536190681Snwhitehorn		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
2537190681Snwhitehorn
2538190681Snwhitehorn	for (tmpva = va; size > 0;) {
2539213307Snwhitehorn		moea64_kenter_attr(mmu, tmpva, ppa, ma);
2540190681Snwhitehorn		size -= PAGE_SIZE;
2541190681Snwhitehorn		tmpva += PAGE_SIZE;
2542190681Snwhitehorn		ppa += PAGE_SIZE;
2543190681Snwhitehorn	}
2544190681Snwhitehorn
2545190681Snwhitehorn	return ((void *)(va + offset));
2546190681Snwhitehorn}
2547190681Snwhitehorn
2548213307Snwhitehornvoid *
2549213307Snwhitehornmoea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
2550213307Snwhitehorn{
2551213307Snwhitehorn
2552213307Snwhitehorn	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
2553213307Snwhitehorn}
2554213307Snwhitehorn
2555190681Snwhitehornvoid
2556190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
2557190681Snwhitehorn{
2558190681Snwhitehorn	vm_offset_t base, offset;
2559190681Snwhitehorn
2560190681Snwhitehorn	base = trunc_page(va);
2561190681Snwhitehorn	offset = va & PAGE_MASK;
2562190681Snwhitehorn	size = roundup(offset + size, PAGE_SIZE);
2563190681Snwhitehorn
2564190681Snwhitehorn	kmem_free(kernel_map, base, size);
2565190681Snwhitehorn}
2566190681Snwhitehorn
2567216174Snwhitehornvoid
2568198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
2569198341Smarcel{
2570198341Smarcel	struct pvo_entry *pvo;
2571198341Smarcel	vm_offset_t lim;
2572198341Smarcel	vm_paddr_t pa;
2573198341Smarcel	vm_size_t len;
2574198341Smarcel
2575198341Smarcel	PMAP_LOCK(pm);
2576198341Smarcel	while (sz > 0) {
2577198341Smarcel		lim = round_page(va);
2578198341Smarcel		len = MIN(lim - va, sz);
2579209975Snwhitehorn		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
2580222666Snwhitehorn		if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) {
2581222666Snwhitehorn			pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
2582198341Smarcel			    (va & ADDR_POFF);
2583216174Snwhitehorn			moea64_syncicache(mmu, pm, va, pa, len);
2584198341Smarcel		}
2585198341Smarcel		va += len;
2586198341Smarcel		sz -= len;
2587198341Smarcel	}
2588198341Smarcel	PMAP_UNLOCK(pm);
2589198341Smarcel}
2590