mmu_oea64.c revision 215067
1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *        This product includes software developed by the NetBSD
19 *        Foundation, Inc. and its contributors.
20 * 4. Neither the name of The NetBSD Foundation nor the names of its
21 *    contributors may be used to endorse or promote products derived
22 *    from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36/*-
37 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38 * Copyright (C) 1995, 1996 TooLs GmbH.
39 * All rights reserved.
40 *
41 * Redistribution and use in source and binary forms, with or without
42 * modification, are permitted provided that the following conditions
43 * are met:
44 * 1. Redistributions of source code must retain the above copyright
45 *    notice, this list of conditions and the following disclaimer.
46 * 2. Redistributions in binary form must reproduce the above copyright
47 *    notice, this list of conditions and the following disclaimer in the
48 *    documentation and/or other materials provided with the distribution.
49 * 3. All advertising materials mentioning features or use of this software
50 *    must display the following acknowledgement:
51 *	This product includes software developed by TooLs GmbH.
52 * 4. The name of TooLs GmbH may not be used to endorse or promote products
53 *    derived from this software without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *
66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67 */
68/*-
69 * Copyright (C) 2001 Benno Rice.
70 * All rights reserved.
71 *
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions
74 * are met:
75 * 1. Redistributions of source code must retain the above copyright
76 *    notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce the above copyright
78 *    notice, this list of conditions and the following disclaimer in the
79 *    documentation and/or other materials provided with the distribution.
80 *
81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91 */
92
93#include <sys/cdefs.h>
94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 215067 2010-11-09 23:53:47Z nwhitehorn $");
95
96/*
97 * Manages physical address maps.
98 *
99 * In addition to hardware address maps, this module is called upon to
100 * provide software-use-only maps which may or may not be stored in the
101 * same form as hardware maps.  These pseudo-maps are used to store
102 * intermediate results from copy operations to and from address spaces.
103 *
104 * Since the information managed by this module is also stored by the
105 * logical address mapping module, this module may throw away valid virtual
106 * to physical mappings at almost any time.  However, invalidations of
107 * mappings must be done as requested.
108 *
109 * In order to cope with hardware architectures which make virtual to
110 * physical map invalidates expensive, this module may delay invalidate
111 * reduced protection operations until such time as they are actually
112 * necessary.  This module is given full information as to which processors
113 * are currently using which maps, and to when physical maps must be made
114 * correct.
115 */
116
117#include "opt_kstack_pages.h"
118
119#include <sys/param.h>
120#include <sys/kernel.h>
121#include <sys/ktr.h>
122#include <sys/lock.h>
123#include <sys/msgbuf.h>
124#include <sys/mutex.h>
125#include <sys/proc.h>
126#include <sys/sysctl.h>
127#include <sys/systm.h>
128#include <sys/vmmeter.h>
129
130#include <sys/kdb.h>
131
132#include <dev/ofw/openfirm.h>
133
134#include <vm/vm.h>
135#include <vm/vm_param.h>
136#include <vm/vm_kern.h>
137#include <vm/vm_page.h>
138#include <vm/vm_map.h>
139#include <vm/vm_object.h>
140#include <vm/vm_extern.h>
141#include <vm/vm_pageout.h>
142#include <vm/vm_pager.h>
143#include <vm/uma.h>
144
145#include <machine/_inttypes.h>
146#include <machine/cpu.h>
147#include <machine/platform.h>
148#include <machine/frame.h>
149#include <machine/md_var.h>
150#include <machine/psl.h>
151#include <machine/bat.h>
152#include <machine/hid.h>
153#include <machine/pte.h>
154#include <machine/sr.h>
155#include <machine/trap.h>
156#include <machine/mmuvar.h>
157
158#include "mmu_if.h"
159
160#define	MOEA_DEBUG
161
162#define TODO	panic("%s: not implemented", __func__);
163void moea64_release_vsid(uint64_t vsid);
164uintptr_t moea64_get_unique_vsid(void);
165
166static __inline register_t
167cntlzd(volatile register_t a) {
168	register_t b;
169	__asm ("cntlzd %0, %1" : "=r"(b) : "r"(a));
170	return b;
171}
172
173#define	PTESYNC()	__asm __volatile("ptesync");
174#define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
175#define	SYNC()		__asm __volatile("sync");
176#define	EIEIO()		__asm __volatile("eieio");
177
178/*
179 * The tlbie instruction must be executed in 64-bit mode
180 * so we have to twiddle MSR[SF] around every invocation.
181 * Just to add to the fun, exceptions must be off as well
182 * so that we can't trap in 64-bit mode. What a pain.
183 */
184struct mtx	tlbie_mutex;
185
186static __inline void
187TLBIE(uint64_t vpn) {
188#ifndef __powerpc64__
189	register_t vpn_hi, vpn_lo;
190	register_t msr;
191	register_t scratch;
192#endif
193
194	vpn <<= ADDR_PIDX_SHFT;
195	vpn &= ~(0xffffULL << 48);
196
197	mtx_lock_spin(&tlbie_mutex);
198#ifdef __powerpc64__
199	__asm __volatile("\
200	    ptesync; \
201	    tlbie %0; \
202	    eieio; \
203	    tlbsync; \
204	    ptesync;"
205	:: "r"(vpn) : "memory");
206#else
207	vpn_hi = (uint32_t)(vpn >> 32);
208	vpn_lo = (uint32_t)vpn;
209
210	__asm __volatile("\
211	    mfmsr %0; \
212	    mr %1, %0; \
213	    insrdi %1,%5,1,0; \
214	    mtmsrd %1; isync; \
215	    ptesync; \
216	    \
217	    sld %1,%2,%4; \
218	    or %1,%1,%3; \
219	    tlbie %1; \
220	    \
221	    mtmsrd %0; isync; \
222	    eieio; \
223	    tlbsync; \
224	    ptesync;"
225	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
226	    : "memory");
227#endif
228	mtx_unlock_spin(&tlbie_mutex);
229}
230
231#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync()
232#define ENABLE_TRANS(msr)	mtmsr(msr); isync()
233
234#define	VSID_MAKE(sr, hash)	((sr) | (((hash) & 0xfffff) << 4))
235#define	VSID_TO_HASH(vsid)	(((vsid) >> 4) & 0xfffff)
236#define	VSID_HASH_MASK		0x0000007fffffffffULL
237
238#define	PVO_PTEGIDX_MASK	0x007UL		/* which PTEG slot */
239#define	PVO_PTEGIDX_VALID	0x008UL		/* slot is valid */
240#define	PVO_WIRED		0x010UL		/* PVO entry is wired */
241#define	PVO_MANAGED		0x020UL		/* PVO entry is managed */
242#define	PVO_BOOTSTRAP		0x080UL		/* PVO entry allocated during
243						   bootstrap */
244#define PVO_FAKE		0x100UL		/* fictitious phys page */
245#define PVO_LARGE		0x200UL		/* large page */
246#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
247#define PVO_ISFAKE(pvo)		((pvo)->pvo_vaddr & PVO_FAKE)
248#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
249#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
250#define	PVO_PTEGIDX_CLR(pvo)	\
251	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
252#define	PVO_PTEGIDX_SET(pvo, i)	\
253	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
254#define	PVO_VSID(pvo)		((pvo)->pvo_vpn >> 16)
255
256#define	MOEA_PVO_CHECK(pvo)
257
258#define LOCK_TABLE() mtx_lock(&moea64_table_mutex)
259#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex);
260#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED)
261
262struct ofw_map {
263	cell_t	om_va;
264	cell_t	om_len;
265	cell_t	om_pa_hi;
266	cell_t	om_pa_lo;
267	cell_t	om_mode;
268};
269
270/*
271 * Map of physical memory regions.
272 */
273static struct	mem_region *regions;
274static struct	mem_region *pregions;
275static u_int	phys_avail_count;
276static int	regions_sz, pregions_sz;
277
278extern struct pmap ofw_pmap;
279
280extern void bs_remap_earlyboot(void);
281
282
283/*
284 * Lock for the pteg and pvo tables.
285 */
286struct mtx	moea64_table_mutex;
287struct mtx	moea64_slb_mutex;
288
289/*
290 * PTEG data.
291 */
292static struct	lpteg *moea64_pteg_table;
293u_int		moea64_pteg_count;
294u_int		moea64_pteg_mask;
295
296/*
297 * PVO data.
298 */
299struct	pvo_head *moea64_pvo_table;		/* pvo entries by pteg index */
300struct	pvo_head moea64_pvo_kunmanaged =	/* list of unmanaged pages */
301    LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged);
302
303uma_zone_t	moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */
304uma_zone_t	moea64_mpvo_zone; /* zone for pvo entries for managed pages */
305
306#define	BPVO_POOL_SIZE	327680
307static struct	pvo_entry *moea64_bpvo_pool;
308static int	moea64_bpvo_pool_index = 0;
309
310#define	VSID_NBPW	(sizeof(u_int32_t) * 8)
311#ifdef __powerpc64__
312#define	NVSIDS		(NPMAPS * 16)
313#define VSID_HASHMASK	0xffffffffUL
314#else
315#define NVSIDS		NPMAPS
316#define VSID_HASHMASK	0xfffffUL
317#endif
318static u_int	moea64_vsid_bitmap[NVSIDS / VSID_NBPW];
319
320static boolean_t moea64_initialized = FALSE;
321
322/*
323 * Statistics.
324 */
325u_int	moea64_pte_valid = 0;
326u_int	moea64_pte_overflow = 0;
327u_int	moea64_pvo_entries = 0;
328u_int	moea64_pvo_enter_calls = 0;
329u_int	moea64_pvo_remove_calls = 0;
330SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD,
331    &moea64_pte_valid, 0, "");
332SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD,
333    &moea64_pte_overflow, 0, "");
334SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD,
335    &moea64_pvo_entries, 0, "");
336SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD,
337    &moea64_pvo_enter_calls, 0, "");
338SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD,
339    &moea64_pvo_remove_calls, 0, "");
340
341vm_offset_t	moea64_scratchpage_va[2];
342uint64_t	moea64_scratchpage_vpn[2];
343struct	lpte 	*moea64_scratchpage_pte[2];
344struct	mtx	moea64_scratchpage_mtx;
345
346uint64_t 	moea64_large_page_mask = 0;
347int		moea64_large_page_size = 0;
348int		moea64_large_page_shift = 0;
349
350/*
351 * Allocate physical memory for use in moea64_bootstrap.
352 */
353static vm_offset_t	moea64_bootstrap_alloc(vm_size_t, u_int);
354
355/*
356 * PTE calls.
357 */
358static int		moea64_pte_insert(u_int, struct lpte *);
359
360/*
361 * PVO calls.
362 */
363static int	moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *,
364		    vm_offset_t, vm_offset_t, uint64_t, int);
365static void	moea64_pvo_remove(struct pvo_entry *);
366static struct	pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t);
367static struct	lpte *moea64_pvo_to_pte(const struct pvo_entry *);
368
369/*
370 * Utility routines.
371 */
372static void		moea64_bootstrap(mmu_t mmup,
373			    vm_offset_t kernelstart, vm_offset_t kernelend);
374static void		moea64_cpu_bootstrap(mmu_t, int ap);
375static void		moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t,
376			    vm_prot_t, boolean_t);
377static boolean_t	moea64_query_bit(vm_page_t, u_int64_t);
378static u_int		moea64_clear_bit(vm_page_t, u_int64_t);
379static void		moea64_kremove(mmu_t, vm_offset_t);
380static void		moea64_syncicache(pmap_t pmap, vm_offset_t va,
381			    vm_offset_t pa, vm_size_t sz);
382static void		tlbia(void);
383#ifdef __powerpc64__
384static void		slbia(void);
385#endif
386
387/*
388 * Kernel MMU interface
389 */
390void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
391void moea64_clear_modify(mmu_t, vm_page_t);
392void moea64_clear_reference(mmu_t, vm_page_t);
393void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
394void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
395void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t,
396    vm_prot_t);
397void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
398vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t);
399vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
400void moea64_init(mmu_t);
401boolean_t moea64_is_modified(mmu_t, vm_page_t);
402boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
403boolean_t moea64_is_referenced(mmu_t, vm_page_t);
404boolean_t moea64_ts_referenced(mmu_t, vm_page_t);
405vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
406boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t);
407int moea64_page_wired_mappings(mmu_t, vm_page_t);
408void moea64_pinit(mmu_t, pmap_t);
409void moea64_pinit0(mmu_t, pmap_t);
410void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
411void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
412void moea64_qremove(mmu_t, vm_offset_t, int);
413void moea64_release(mmu_t, pmap_t);
414void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
415void moea64_remove_all(mmu_t, vm_page_t);
416void moea64_remove_write(mmu_t, vm_page_t);
417void moea64_zero_page(mmu_t, vm_page_t);
418void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
419void moea64_zero_page_idle(mmu_t, vm_page_t);
420void moea64_activate(mmu_t, struct thread *);
421void moea64_deactivate(mmu_t, struct thread *);
422void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t);
423void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
424void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
425vm_offset_t moea64_kextract(mmu_t, vm_offset_t);
426void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
427void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
428void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t);
429boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
430static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
431
432static mmu_method_t moea64_methods[] = {
433	MMUMETHOD(mmu_change_wiring,	moea64_change_wiring),
434	MMUMETHOD(mmu_clear_modify,	moea64_clear_modify),
435	MMUMETHOD(mmu_clear_reference,	moea64_clear_reference),
436	MMUMETHOD(mmu_copy_page,	moea64_copy_page),
437	MMUMETHOD(mmu_enter,		moea64_enter),
438	MMUMETHOD(mmu_enter_object,	moea64_enter_object),
439	MMUMETHOD(mmu_enter_quick,	moea64_enter_quick),
440	MMUMETHOD(mmu_extract,		moea64_extract),
441	MMUMETHOD(mmu_extract_and_hold,	moea64_extract_and_hold),
442	MMUMETHOD(mmu_init,		moea64_init),
443	MMUMETHOD(mmu_is_modified,	moea64_is_modified),
444	MMUMETHOD(mmu_is_prefaultable,	moea64_is_prefaultable),
445	MMUMETHOD(mmu_is_referenced,	moea64_is_referenced),
446	MMUMETHOD(mmu_ts_referenced,	moea64_ts_referenced),
447	MMUMETHOD(mmu_map,     		moea64_map),
448	MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick),
449	MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings),
450	MMUMETHOD(mmu_pinit,		moea64_pinit),
451	MMUMETHOD(mmu_pinit0,		moea64_pinit0),
452	MMUMETHOD(mmu_protect,		moea64_protect),
453	MMUMETHOD(mmu_qenter,		moea64_qenter),
454	MMUMETHOD(mmu_qremove,		moea64_qremove),
455	MMUMETHOD(mmu_release,		moea64_release),
456	MMUMETHOD(mmu_remove,		moea64_remove),
457	MMUMETHOD(mmu_remove_all,      	moea64_remove_all),
458	MMUMETHOD(mmu_remove_write,	moea64_remove_write),
459	MMUMETHOD(mmu_sync_icache,	moea64_sync_icache),
460	MMUMETHOD(mmu_zero_page,       	moea64_zero_page),
461	MMUMETHOD(mmu_zero_page_area,	moea64_zero_page_area),
462	MMUMETHOD(mmu_zero_page_idle,	moea64_zero_page_idle),
463	MMUMETHOD(mmu_activate,		moea64_activate),
464	MMUMETHOD(mmu_deactivate,      	moea64_deactivate),
465	MMUMETHOD(mmu_page_set_memattr,	moea64_page_set_memattr),
466
467	/* Internal interfaces */
468	MMUMETHOD(mmu_bootstrap,       	moea64_bootstrap),
469	MMUMETHOD(mmu_cpu_bootstrap,   	moea64_cpu_bootstrap),
470	MMUMETHOD(mmu_mapdev,		moea64_mapdev),
471	MMUMETHOD(mmu_mapdev_attr,	moea64_mapdev_attr),
472	MMUMETHOD(mmu_unmapdev,		moea64_unmapdev),
473	MMUMETHOD(mmu_kextract,		moea64_kextract),
474	MMUMETHOD(mmu_kenter,		moea64_kenter),
475	MMUMETHOD(mmu_kenter_attr,	moea64_kenter_attr),
476	MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped),
477
478	{ 0, 0 }
479};
480
481MMU_DEF(oea64_mmu, MMU_TYPE_G5, moea64_methods, 0);
482
483static __inline u_int
484va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
485{
486	uint64_t hash;
487	int shift;
488
489	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
490	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
491	    shift);
492	return (hash & moea64_pteg_mask);
493}
494
495static __inline struct pvo_head *
496vm_page_to_pvoh(vm_page_t m)
497{
498
499	return (&m->md.mdpg_pvoh);
500}
501
502static __inline void
503moea64_attr_clear(vm_page_t m, u_int64_t ptebit)
504{
505
506	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
507	m->md.mdpg_attrs &= ~ptebit;
508}
509
510static __inline u_int64_t
511moea64_attr_fetch(vm_page_t m)
512{
513
514	return (m->md.mdpg_attrs);
515}
516
517static __inline void
518moea64_attr_save(vm_page_t m, u_int64_t ptebit)
519{
520
521	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
522	m->md.mdpg_attrs |= ptebit;
523}
524
525static __inline void
526moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va,
527    uint64_t pte_lo, int flags)
528{
529
530	ASSERT_TABLE_LOCK();
531
532	/*
533	 * Construct a PTE.  Default to IMB initially.  Valid bit only gets
534	 * set when the real pte is set in memory.
535	 *
536	 * Note: Don't set the valid bit for correct operation of tlb update.
537	 */
538	pt->pte_hi = (vsid << LPTE_VSID_SHIFT) |
539	    (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API);
540
541	if (flags & PVO_LARGE)
542		pt->pte_hi |= LPTE_BIG;
543
544	pt->pte_lo = pte_lo;
545}
546
547static __inline void
548moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt)
549{
550
551	ASSERT_TABLE_LOCK();
552
553	pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
554}
555
556static __inline void
557moea64_pte_clear(struct lpte *pt, uint64_t vpn, u_int64_t ptebit)
558{
559	ASSERT_TABLE_LOCK();
560
561	/*
562	 * As shown in Section 7.6.3.2.3
563	 */
564	pt->pte_lo &= ~ptebit;
565	TLBIE(vpn);
566}
567
568static __inline void
569moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt)
570{
571
572	ASSERT_TABLE_LOCK();
573	pvo_pt->pte_hi |= LPTE_VALID;
574
575	/*
576	 * Update the PTE as defined in section 7.6.3.1.
577	 * Note that the REF/CHG bits are from pvo_pt and thus should have
578	 * been saved so this routine can restore them (if desired).
579	 */
580	pt->pte_lo = pvo_pt->pte_lo;
581	EIEIO();
582	pt->pte_hi = pvo_pt->pte_hi;
583	PTESYNC();
584	moea64_pte_valid++;
585}
586
587static __inline void
588moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn)
589{
590	ASSERT_TABLE_LOCK();
591	pvo_pt->pte_hi &= ~LPTE_VALID;
592
593	/*
594	 * Force the reg & chg bits back into the PTEs.
595	 */
596	SYNC();
597
598	/*
599	 * Invalidate the pte.
600	 */
601	pt->pte_hi &= ~LPTE_VALID;
602	TLBIE(vpn);
603
604	/*
605	 * Save the reg & chg bits.
606	 */
607	moea64_pte_synch(pt, pvo_pt);
608	moea64_pte_valid--;
609}
610
611static __inline void
612moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn)
613{
614
615	/*
616	 * Invalidate the PTE
617	 */
618	moea64_pte_unset(pt, pvo_pt, vpn);
619	moea64_pte_set(pt, pvo_pt);
620}
621
622static __inline uint64_t
623moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
624{
625	uint64_t pte_lo;
626	int i;
627
628	if (ma != VM_MEMATTR_DEFAULT) {
629		switch (ma) {
630		case VM_MEMATTR_UNCACHEABLE:
631			return (LPTE_I | LPTE_G);
632		case VM_MEMATTR_WRITE_COMBINING:
633		case VM_MEMATTR_WRITE_BACK:
634		case VM_MEMATTR_PREFETCHABLE:
635			return (LPTE_I);
636		case VM_MEMATTR_WRITE_THROUGH:
637			return (LPTE_W | LPTE_M);
638		}
639	}
640
641	/*
642	 * Assume the page is cache inhibited and access is guarded unless
643	 * it's in our available memory array.
644	 */
645	pte_lo = LPTE_I | LPTE_G;
646	for (i = 0; i < pregions_sz; i++) {
647		if ((pa >= pregions[i].mr_start) &&
648		    (pa < (pregions[i].mr_start + pregions[i].mr_size))) {
649			pte_lo &= ~(LPTE_I | LPTE_G);
650			pte_lo |= LPTE_M;
651			break;
652		}
653	}
654
655	return pte_lo;
656}
657
658/*
659 * Quick sort callout for comparing memory regions.
660 */
661static int	mr_cmp(const void *a, const void *b);
662static int	om_cmp(const void *a, const void *b);
663
664static int
665mr_cmp(const void *a, const void *b)
666{
667	const struct	mem_region *regiona;
668	const struct	mem_region *regionb;
669
670	regiona = a;
671	regionb = b;
672	if (regiona->mr_start < regionb->mr_start)
673		return (-1);
674	else if (regiona->mr_start > regionb->mr_start)
675		return (1);
676	else
677		return (0);
678}
679
680static int
681om_cmp(const void *a, const void *b)
682{
683	const struct	ofw_map *mapa;
684	const struct	ofw_map *mapb;
685
686	mapa = a;
687	mapb = b;
688	if (mapa->om_pa_hi < mapb->om_pa_hi)
689		return (-1);
690	else if (mapa->om_pa_hi > mapb->om_pa_hi)
691		return (1);
692	else if (mapa->om_pa_lo < mapb->om_pa_lo)
693		return (-1);
694	else if (mapa->om_pa_lo > mapb->om_pa_lo)
695		return (1);
696	else
697		return (0);
698}
699
700static void
701moea64_cpu_bootstrap(mmu_t mmup, int ap)
702{
703	int i = 0;
704	#ifdef __powerpc64__
705	struct slb *slb = PCPU_GET(slb);
706	#endif
707
708	/*
709	 * Initialize segment registers and MMU
710	 */
711
712	mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync();
713
714	/*
715	 * Install kernel SLB entries
716	 */
717
718	#ifdef __powerpc64__
719		slbia();
720
721		for (i = 0; i < 64; i++) {
722			if (!(slb[i].slbe & SLBE_VALID))
723				continue;
724
725			__asm __volatile ("slbmte %0, %1" ::
726			    "r"(slb[i].slbv), "r"(slb[i].slbe));
727		}
728	#else
729		for (i = 0; i < 16; i++)
730			mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
731	#endif
732
733	/*
734	 * Install page table
735	 */
736
737	__asm __volatile ("ptesync; mtsdr1 %0; isync"
738	    :: "r"((uintptr_t)moea64_pteg_table
739		     | (64 - cntlzd(moea64_pteg_mask >> 11))));
740	tlbia();
741}
742
743static void
744moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz)
745{
746	struct ofw_map	translations[sz/sizeof(struct ofw_map)];
747	register_t	msr;
748	vm_offset_t	off;
749	vm_paddr_t	pa_base;
750	int		i, ofw_mappings;
751
752	bzero(translations, sz);
753	if (OF_getprop(mmu, "translations", translations, sz) == -1)
754		panic("moea64_bootstrap: can't get ofw translations");
755
756	CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations");
757	sz /= sizeof(*translations);
758	qsort(translations, sz, sizeof (*translations), om_cmp);
759
760	for (i = 0, ofw_mappings = 0; i < sz; i++) {
761		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
762		    (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
763		    translations[i].om_len);
764
765		if (translations[i].om_pa_lo % PAGE_SIZE)
766			panic("OFW translation not page-aligned!");
767
768		pa_base = translations[i].om_pa_lo;
769
770	      #ifdef __powerpc64__
771		pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
772	      #else
773		if (translations[i].om_pa_hi)
774			panic("OFW translations above 32-bit boundary!");
775	      #endif
776
777		/* Now enter the pages for this mapping */
778
779		DISABLE_TRANS(msr);
780		for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
781			if (moea64_pvo_find_va(kernel_pmap,
782			    translations[i].om_va + off) != NULL)
783				continue;
784
785			moea64_kenter(mmup, translations[i].om_va + off,
786			    pa_base + off);
787
788			ofw_mappings++;
789		}
790		ENABLE_TRANS(msr);
791	}
792}
793
794#ifdef __powerpc64__
795static void
796moea64_probe_large_page(void)
797{
798	uint16_t pvr = mfpvr() >> 16;
799
800	switch (pvr) {
801	case IBM970:
802	case IBM970FX:
803	case IBM970MP:
804		powerpc_sync(); isync();
805		mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG);
806		powerpc_sync(); isync();
807
808		/* FALLTHROUGH */
809	case IBMCELLBE:
810		moea64_large_page_size = 0x1000000; /* 16 MB */
811		moea64_large_page_shift = 24;
812		break;
813	default:
814		moea64_large_page_size = 0;
815	}
816
817	moea64_large_page_mask = moea64_large_page_size - 1;
818}
819
820static void
821moea64_bootstrap_slb_prefault(vm_offset_t va, int large)
822{
823	struct slb *cache;
824	struct slb entry;
825	uint64_t esid, slbe;
826	uint64_t i;
827
828	cache = PCPU_GET(slb);
829	esid = va >> ADDR_SR_SHFT;
830	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
831
832	for (i = 0; i < 64; i++) {
833		if (cache[i].slbe == (slbe | i))
834			return;
835	}
836
837	entry.slbe = slbe;
838	entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT;
839	if (large)
840		entry.slbv |= SLBV_L;
841
842	slb_insert_kernel(entry.slbe, entry.slbv);
843}
844#endif
845
846static void
847moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart,
848    vm_offset_t kernelend)
849{
850	register_t msr;
851	vm_paddr_t pa;
852	vm_offset_t size, off;
853	uint64_t pte_lo;
854	int i;
855
856	if (moea64_large_page_size == 0)
857		hw_direct_map = 0;
858
859	DISABLE_TRANS(msr);
860	if (hw_direct_map) {
861		PMAP_LOCK(kernel_pmap);
862		for (i = 0; i < pregions_sz; i++) {
863		  for (pa = pregions[i].mr_start; pa < pregions[i].mr_start +
864		     pregions[i].mr_size; pa += moea64_large_page_size) {
865			pte_lo = LPTE_M;
866
867			/*
868			 * Set memory access as guarded if prefetch within
869			 * the page could exit the available physmem area.
870			 */
871			if (pa & moea64_large_page_mask) {
872				pa &= moea64_large_page_mask;
873				pte_lo |= LPTE_G;
874			}
875			if (pa + moea64_large_page_size >
876			    pregions[i].mr_start + pregions[i].mr_size)
877				pte_lo |= LPTE_G;
878
879			moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
880				    &moea64_pvo_kunmanaged, pa, pa,
881				    pte_lo, PVO_WIRED | PVO_LARGE |
882				    VM_PROT_EXECUTE);
883		  }
884		}
885		PMAP_UNLOCK(kernel_pmap);
886	} else {
887		size = moea64_pteg_count * sizeof(struct lpteg);
888		off = (vm_offset_t)(moea64_pteg_table);
889		for (pa = off; pa < off + size; pa += PAGE_SIZE)
890			moea64_kenter(mmup, pa, pa);
891		size = sizeof(struct pvo_head) * moea64_pteg_count;
892		off = (vm_offset_t)(moea64_pvo_table);
893		for (pa = off; pa < off + size; pa += PAGE_SIZE)
894			moea64_kenter(mmup, pa, pa);
895		size = BPVO_POOL_SIZE*sizeof(struct pvo_entry);
896		off = (vm_offset_t)(moea64_bpvo_pool);
897		for (pa = off; pa < off + size; pa += PAGE_SIZE)
898		moea64_kenter(mmup, pa, pa);
899
900		/*
901		 * Map certain important things, like ourselves.
902		 *
903		 * NOTE: We do not map the exception vector space. That code is
904		 * used only in real mode, and leaving it unmapped allows us to
905		 * catch NULL pointer deferences, instead of making NULL a valid
906		 * address.
907		 */
908
909		for (pa = kernelstart & ~PAGE_MASK; pa < kernelend;
910		    pa += PAGE_SIZE)
911			moea64_kenter(mmup, pa, pa);
912	}
913	ENABLE_TRANS(msr);
914}
915
916static void
917moea64_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend)
918{
919	ihandle_t	mmui;
920	phandle_t	chosen;
921	phandle_t	mmu;
922	size_t		sz;
923	int		i, j;
924	vm_size_t	size, physsz, hwphyssz;
925	vm_offset_t	pa, va;
926	register_t	msr;
927	void		*dpcpu;
928
929#ifndef __powerpc64__
930	/* We don't have a direct map since there is no BAT */
931	hw_direct_map = 0;
932
933	/* Make sure battable is zero, since we have no BAT */
934	for (i = 0; i < 16; i++) {
935		battable[i].batu = 0;
936		battable[i].batl = 0;
937	}
938#else
939	moea64_probe_large_page();
940
941	/* Use a direct map if we have large page support */
942	if (moea64_large_page_size > 0)
943		hw_direct_map = 1;
944	else
945		hw_direct_map = 0;
946#endif
947
948	/* Get physical memory regions from firmware */
949	mem_regions(&pregions, &pregions_sz, &regions, &regions_sz);
950	CTR0(KTR_PMAP, "moea64_bootstrap: physical memory");
951
952	qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp);
953	if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz)
954		panic("moea64_bootstrap: phys_avail too small");
955	qsort(regions, regions_sz, sizeof(*regions), mr_cmp);
956	phys_avail_count = 0;
957	physsz = 0;
958	hwphyssz = 0;
959	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
960	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
961		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
962		    regions[i].mr_start + regions[i].mr_size,
963		    regions[i].mr_size);
964		if (hwphyssz != 0 &&
965		    (physsz + regions[i].mr_size) >= hwphyssz) {
966			if (physsz < hwphyssz) {
967				phys_avail[j] = regions[i].mr_start;
968				phys_avail[j + 1] = regions[i].mr_start +
969				    hwphyssz - physsz;
970				physsz = hwphyssz;
971				phys_avail_count++;
972			}
973			break;
974		}
975		phys_avail[j] = regions[i].mr_start;
976		phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size;
977		phys_avail_count++;
978		physsz += regions[i].mr_size;
979	}
980
981	/* Check for overlap with the kernel and exception vectors */
982	for (j = 0; j < 2*phys_avail_count; j+=2) {
983		if (phys_avail[j] < EXC_LAST)
984			phys_avail[j] += EXC_LAST;
985
986		if (kernelstart >= phys_avail[j] &&
987		    kernelstart < phys_avail[j+1]) {
988			if (kernelend < phys_avail[j+1]) {
989				phys_avail[2*phys_avail_count] =
990				    (kernelend & ~PAGE_MASK) + PAGE_SIZE;
991				phys_avail[2*phys_avail_count + 1] =
992				    phys_avail[j+1];
993				phys_avail_count++;
994			}
995
996			phys_avail[j+1] = kernelstart & ~PAGE_MASK;
997		}
998
999		if (kernelend >= phys_avail[j] &&
1000		    kernelend < phys_avail[j+1]) {
1001			if (kernelstart > phys_avail[j]) {
1002				phys_avail[2*phys_avail_count] = phys_avail[j];
1003				phys_avail[2*phys_avail_count + 1] =
1004				    kernelstart & ~PAGE_MASK;
1005				phys_avail_count++;
1006			}
1007
1008			phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE;
1009		}
1010	}
1011
1012	physmem = btoc(physsz);
1013
1014	/*
1015	 * Allocate PTEG table.
1016	 */
1017#ifdef PTEGCOUNT
1018	moea64_pteg_count = PTEGCOUNT;
1019#else
1020	moea64_pteg_count = 0x1000;
1021
1022	while (moea64_pteg_count < physmem)
1023		moea64_pteg_count <<= 1;
1024
1025	moea64_pteg_count >>= 1;
1026#endif /* PTEGCOUNT */
1027
1028	size = moea64_pteg_count * sizeof(struct lpteg);
1029	CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
1030	    moea64_pteg_count, size);
1031
1032	/*
1033	 * We now need to allocate memory. This memory, to be allocated,
1034	 * has to reside in a page table. The page table we are about to
1035	 * allocate. We don't have BAT. So drop to data real mode for a minute
1036	 * as a measure of last resort. We do this a couple times.
1037	 */
1038
1039	moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
1040	DISABLE_TRANS(msr);
1041	bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
1042	ENABLE_TRANS(msr);
1043
1044	moea64_pteg_mask = moea64_pteg_count - 1;
1045
1046	CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
1047
1048	/*
1049	 * Allocate pv/overflow lists.
1050	 */
1051	size = sizeof(struct pvo_head) * moea64_pteg_count;
1052
1053	moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size,
1054	    PAGE_SIZE);
1055	CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table);
1056
1057	DISABLE_TRANS(msr);
1058	for (i = 0; i < moea64_pteg_count; i++)
1059		LIST_INIT(&moea64_pvo_table[i]);
1060	ENABLE_TRANS(msr);
1061
1062	/*
1063	 * Initialize the lock that synchronizes access to the pteg and pvo
1064	 * tables.
1065	 */
1066	mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF |
1067	    MTX_RECURSE);
1068	mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF);
1069
1070	/*
1071	 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU.
1072	 */
1073	mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN);
1074
1075	/*
1076	 * Initialise the unmanaged pvo pool.
1077	 */
1078	moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc(
1079		BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0);
1080	moea64_bpvo_pool_index = 0;
1081
1082	/*
1083	 * Make sure kernel vsid is allocated as well as VSID 0.
1084	 */
1085	#ifndef __powerpc64__
1086	moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW]
1087		|= 1 << (KERNEL_VSIDBITS % VSID_NBPW);
1088	moea64_vsid_bitmap[0] |= 1;
1089	#endif
1090
1091	/*
1092	 * Initialize the kernel pmap (which is statically allocated).
1093	 */
1094	#ifdef __powerpc64__
1095	for (i = 0; i < 64; i++) {
1096		pcpup->pc_slb[i].slbv = 0;
1097		pcpup->pc_slb[i].slbe = 0;
1098	}
1099	#else
1100	for (i = 0; i < 16; i++)
1101		kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i;
1102	#endif
1103
1104	kernel_pmap->pmap_phys = kernel_pmap;
1105	kernel_pmap->pm_active = ~0;
1106
1107	PMAP_LOCK_INIT(kernel_pmap);
1108
1109	/*
1110	 * Now map in all the other buffers we allocated earlier
1111	 */
1112
1113	moea64_setup_direct_map(mmup, kernelstart, kernelend);
1114
1115	/*
1116	 * Set up the Open Firmware pmap and add its mappings if not in real
1117	 * mode.
1118	 */
1119
1120	chosen = OF_finddevice("/chosen");
1121	if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) {
1122	    #ifndef __powerpc64__
1123	    moea64_pinit(mmup, &ofw_pmap);
1124
1125	    for (i = 0; i < 16; i++)
1126		ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i];
1127	    #endif
1128
1129	    if ((mmu = OF_instance_to_package(mmui)) == -1)
1130		panic("moea64_bootstrap: can't get mmu package");
1131	    if ((sz = OF_getproplen(mmu, "translations")) == -1)
1132		panic("moea64_bootstrap: can't get ofw translation count");
1133	    if (sz > 6144 /* tmpstksz - 2 KB headroom */)
1134		panic("moea64_bootstrap: too many ofw translations");
1135
1136	    moea64_add_ofw_mappings(mmup, mmu, sz);
1137	}
1138
1139#ifdef SMP
1140	TLBSYNC();
1141#endif
1142
1143	/*
1144	 * Calculate the last available physical address.
1145	 */
1146	for (i = 0; phys_avail[i + 2] != 0; i += 2)
1147		;
1148	Maxmem = powerpc_btop(phys_avail[i + 1]);
1149
1150	/*
1151	 * Initialize MMU and remap early physical mappings
1152	 */
1153	moea64_cpu_bootstrap(mmup,0);
1154	mtmsr(mfmsr() | PSL_DR | PSL_IR); isync();
1155	pmap_bootstrapped++;
1156	bs_remap_earlyboot();
1157
1158	/*
1159	 * Set the start and end of kva.
1160	 */
1161	virtual_avail = VM_MIN_KERNEL_ADDRESS;
1162	virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS;
1163
1164	/*
1165	 * Map the entire KVA range into the SLB. We must not fault there.
1166	 */
1167	#ifdef __powerpc64__
1168	for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH)
1169		moea64_bootstrap_slb_prefault(va, 0);
1170	#endif
1171
1172	/*
1173	 * Figure out how far we can extend virtual_end into segment 16
1174	 * without running into existing mappings. Segment 16 is guaranteed
1175	 * to contain neither RAM nor devices (at least on Apple hardware),
1176	 * but will generally contain some OFW mappings we should not
1177	 * step on.
1178	 */
1179
1180	#ifndef __powerpc64__	/* KVA is in high memory on PPC64 */
1181	PMAP_LOCK(kernel_pmap);
1182	while (virtual_end < VM_MAX_KERNEL_ADDRESS &&
1183	    moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL)
1184		virtual_end += PAGE_SIZE;
1185	PMAP_UNLOCK(kernel_pmap);
1186	#endif
1187
1188	/*
1189	 * Allocate some things for page zeroing. We put this directly
1190	 * in the page table, marked with LPTE_LOCKED, to avoid any
1191	 * of the PVO book-keeping or other parts of the VM system
1192	 * from even knowing that this hack exists.
1193	 */
1194
1195	if (!hw_direct_map) {
1196		mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL,
1197		    MTX_DEF);
1198		for (i = 0; i < 2; i++) {
1199			struct lpte pt;
1200			uint64_t vsid;
1201			int pteidx, ptegidx;
1202
1203			moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE;
1204			virtual_end -= PAGE_SIZE;
1205
1206			LOCK_TABLE();
1207
1208			vsid = va_to_vsid(kernel_pmap,
1209			    moea64_scratchpage_va[i]);
1210			moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i],
1211			    LPTE_NOEXEC, 0);
1212			pt.pte_hi |= LPTE_LOCKED;
1213
1214			moea64_scratchpage_vpn[i] = (vsid << 16) |
1215			    ((moea64_scratchpage_va[i] & ADDR_PIDX) >>
1216			    ADDR_PIDX_SHFT);
1217			ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i], 0);
1218			pteidx = moea64_pte_insert(ptegidx, &pt);
1219			if (pt.pte_hi & LPTE_HID)
1220				ptegidx ^= moea64_pteg_mask;
1221
1222			moea64_scratchpage_pte[i] =
1223			    &moea64_pteg_table[ptegidx].pt[pteidx];
1224
1225			UNLOCK_TABLE();
1226		}
1227	}
1228
1229	/*
1230	 * Allocate a kernel stack with a guard page for thread0 and map it
1231	 * into the kernel page map.
1232	 */
1233	pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE);
1234	va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE;
1235	virtual_avail = va + KSTACK_PAGES * PAGE_SIZE;
1236	CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va);
1237	thread0.td_kstack = va;
1238	thread0.td_kstack_pages = KSTACK_PAGES;
1239	for (i = 0; i < KSTACK_PAGES; i++) {
1240		moea64_kenter(mmup, va, pa);
1241		pa += PAGE_SIZE;
1242		va += PAGE_SIZE;
1243	}
1244
1245	/*
1246	 * Allocate virtual address space for the message buffer.
1247	 */
1248	pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE);
1249	msgbufp = (struct msgbuf *)virtual_avail;
1250	va = virtual_avail;
1251	virtual_avail += round_page(MSGBUF_SIZE);
1252	while (va < virtual_avail) {
1253		moea64_kenter(mmup, va, pa);
1254		pa += PAGE_SIZE;
1255		va += PAGE_SIZE;
1256	}
1257
1258	/*
1259	 * Allocate virtual address space for the dynamic percpu area.
1260	 */
1261	pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE);
1262	dpcpu = (void *)virtual_avail;
1263	va = virtual_avail;
1264	virtual_avail += DPCPU_SIZE;
1265	while (va < virtual_avail) {
1266		moea64_kenter(mmup, va, pa);
1267		pa += PAGE_SIZE;
1268		va += PAGE_SIZE;
1269	}
1270	dpcpu_init(dpcpu, 0);
1271}
1272
1273/*
1274 * Activate a user pmap.  The pmap must be activated before its address
1275 * space can be accessed in any way.
1276 */
1277void
1278moea64_activate(mmu_t mmu, struct thread *td)
1279{
1280	pmap_t	pm;
1281
1282	pm = &td->td_proc->p_vmspace->vm_pmap;
1283	pm->pm_active |= PCPU_GET(cpumask);
1284
1285	#ifdef __powerpc64__
1286	PCPU_SET(userslb, pm->pm_slb);
1287	#else
1288	PCPU_SET(curpmap, pm->pmap_phys);
1289	#endif
1290}
1291
1292void
1293moea64_deactivate(mmu_t mmu, struct thread *td)
1294{
1295	pmap_t	pm;
1296
1297	pm = &td->td_proc->p_vmspace->vm_pmap;
1298	pm->pm_active &= ~(PCPU_GET(cpumask));
1299	#ifdef __powerpc64__
1300	PCPU_SET(userslb, NULL);
1301	#else
1302	PCPU_SET(curpmap, NULL);
1303	#endif
1304}
1305
1306void
1307moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired)
1308{
1309	struct	pvo_entry *pvo;
1310	struct	lpte *pt;
1311	uint64_t vsid;
1312	int	i, ptegidx;
1313
1314	PMAP_LOCK(pm);
1315	pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
1316
1317	if (pvo != NULL) {
1318		LOCK_TABLE();
1319		pt = moea64_pvo_to_pte(pvo);
1320
1321		if (wired) {
1322			if ((pvo->pvo_vaddr & PVO_WIRED) == 0)
1323				pm->pm_stats.wired_count++;
1324			pvo->pvo_vaddr |= PVO_WIRED;
1325			pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
1326		} else {
1327			if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
1328				pm->pm_stats.wired_count--;
1329			pvo->pvo_vaddr &= ~PVO_WIRED;
1330			pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED;
1331		}
1332
1333		if (pt != NULL) {
1334			/* Update wiring flag in page table. */
1335			moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1336			    pvo->pvo_vpn);
1337		} else if (wired) {
1338			/*
1339			 * If we are wiring the page, and it wasn't in the
1340			 * page table before, add it.
1341			 */
1342			vsid = PVO_VSID(pvo);
1343			ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
1344			    pvo->pvo_vaddr & PVO_LARGE);
1345
1346			i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte);
1347			if (i >= 0) {
1348				PVO_PTEGIDX_CLR(pvo);
1349				PVO_PTEGIDX_SET(pvo, i);
1350			}
1351		}
1352
1353		UNLOCK_TABLE();
1354	}
1355	PMAP_UNLOCK(pm);
1356}
1357
1358/*
1359 * This goes through and sets the physical address of our
1360 * special scratch PTE to the PA we want to zero or copy. Because
1361 * of locking issues (this can get called in pvo_enter() by
1362 * the UMA allocator), we can't use most other utility functions here
1363 */
1364
1365static __inline
1366void moea64_set_scratchpage_pa(int which, vm_offset_t pa) {
1367
1368	KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1369	mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1370
1371	moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID;
1372	TLBIE(moea64_scratchpage_vpn[which]);
1373
1374	moea64_scratchpage_pte[which]->pte_lo &=
1375	    ~(LPTE_WIMG | LPTE_RPGN);
1376	moea64_scratchpage_pte[which]->pte_lo |=
1377	    moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1378	EIEIO();
1379
1380	moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID;
1381	PTESYNC(); isync();
1382}
1383
1384void
1385moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst)
1386{
1387	vm_offset_t	dst;
1388	vm_offset_t	src;
1389
1390	dst = VM_PAGE_TO_PHYS(mdst);
1391	src = VM_PAGE_TO_PHYS(msrc);
1392
1393	if (hw_direct_map) {
1394		kcopy((void *)src, (void *)dst, PAGE_SIZE);
1395	} else {
1396		mtx_lock(&moea64_scratchpage_mtx);
1397
1398		moea64_set_scratchpage_pa(0,src);
1399		moea64_set_scratchpage_pa(1,dst);
1400
1401		kcopy((void *)moea64_scratchpage_va[0],
1402		    (void *)moea64_scratchpage_va[1], PAGE_SIZE);
1403
1404		mtx_unlock(&moea64_scratchpage_mtx);
1405	}
1406}
1407
1408void
1409moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1410{
1411	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1412
1413	if (!moea64_initialized)
1414		panic("moea64_zero_page: can't zero pa %#" PRIxPTR, pa);
1415	if (size + off > PAGE_SIZE)
1416		panic("moea64_zero_page: size + off > PAGE_SIZE");
1417
1418	if (hw_direct_map) {
1419		bzero((caddr_t)pa + off, size);
1420	} else {
1421		mtx_lock(&moea64_scratchpage_mtx);
1422		moea64_set_scratchpage_pa(0,pa);
1423		bzero((caddr_t)moea64_scratchpage_va[0] + off, size);
1424		mtx_unlock(&moea64_scratchpage_mtx);
1425	}
1426}
1427
1428/*
1429 * Zero a page of physical memory by temporarily mapping it
1430 */
1431void
1432moea64_zero_page(mmu_t mmu, vm_page_t m)
1433{
1434	vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1435	vm_offset_t va, off;
1436
1437	if (!moea64_initialized)
1438		panic("moea64_zero_page: can't zero pa %#zx", pa);
1439
1440	if (!hw_direct_map) {
1441		mtx_lock(&moea64_scratchpage_mtx);
1442
1443		moea64_set_scratchpage_pa(0,pa);
1444		va = moea64_scratchpage_va[0];
1445	} else {
1446		va = pa;
1447	}
1448
1449	for (off = 0; off < PAGE_SIZE; off += cacheline_size)
1450		__asm __volatile("dcbz 0,%0" :: "r"(va + off));
1451
1452	if (!hw_direct_map)
1453		mtx_unlock(&moea64_scratchpage_mtx);
1454}
1455
1456void
1457moea64_zero_page_idle(mmu_t mmu, vm_page_t m)
1458{
1459
1460	moea64_zero_page(mmu, m);
1461}
1462
1463/*
1464 * Map the given physical page at the specified virtual address in the
1465 * target pmap with the protection requested.  If specified the page
1466 * will be wired down.
1467 */
1468void
1469moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
1470    vm_prot_t prot, boolean_t wired)
1471{
1472
1473	vm_page_lock_queues();
1474	PMAP_LOCK(pmap);
1475	moea64_enter_locked(pmap, va, m, prot, wired);
1476	vm_page_unlock_queues();
1477	PMAP_UNLOCK(pmap);
1478}
1479
1480/*
1481 * Map the given physical page at the specified virtual address in the
1482 * target pmap with the protection requested.  If specified the page
1483 * will be wired down.
1484 *
1485 * The page queues and pmap must be locked.
1486 */
1487
1488static void
1489moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
1490    boolean_t wired)
1491{
1492	struct		pvo_head *pvo_head;
1493	uma_zone_t	zone;
1494	vm_page_t	pg;
1495	uint64_t	pte_lo;
1496	u_int		pvo_flags;
1497	int		error;
1498
1499	if (!moea64_initialized) {
1500		pvo_head = &moea64_pvo_kunmanaged;
1501		pg = NULL;
1502		zone = moea64_upvo_zone;
1503		pvo_flags = 0;
1504	} else {
1505		pvo_head = vm_page_to_pvoh(m);
1506		pg = m;
1507		zone = moea64_mpvo_zone;
1508		pvo_flags = PVO_MANAGED;
1509	}
1510
1511	if (pmap_bootstrapped)
1512		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
1513	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1514	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 ||
1515	    (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object),
1516	    ("moea64_enter_locked: page %p is not busy", m));
1517
1518	/* XXX change the pvo head for fake pages */
1519	if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) {
1520		pvo_flags &= ~PVO_MANAGED;
1521		pvo_head = &moea64_pvo_kunmanaged;
1522		zone = moea64_upvo_zone;
1523	}
1524
1525	pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m));
1526
1527	if (prot & VM_PROT_WRITE) {
1528		pte_lo |= LPTE_BW;
1529		if (pmap_bootstrapped &&
1530		    (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0)
1531			vm_page_flag_set(m, PG_WRITEABLE);
1532	} else
1533		pte_lo |= LPTE_BR;
1534
1535	if (prot & VM_PROT_EXECUTE)
1536		pvo_flags |= VM_PROT_EXECUTE;
1537
1538	if (wired)
1539		pvo_flags |= PVO_WIRED;
1540
1541	if ((m->flags & PG_FICTITIOUS) != 0)
1542		pvo_flags |= PVO_FAKE;
1543
1544	error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m),
1545	    pte_lo, pvo_flags);
1546
1547	/*
1548	 * Flush the page from the instruction cache if this page is
1549	 * mapped executable and cacheable.
1550	 */
1551	if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1552		moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1553	}
1554}
1555
1556static void
1557moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz)
1558{
1559
1560	/*
1561	 * This is much trickier than on older systems because
1562	 * we can't sync the icache on physical addresses directly
1563	 * without a direct map. Instead we check a couple of cases
1564	 * where the memory is already mapped in and, failing that,
1565	 * use the same trick we use for page zeroing to create
1566	 * a temporary mapping for this physical address.
1567	 */
1568
1569	if (!pmap_bootstrapped) {
1570		/*
1571		 * If PMAP is not bootstrapped, we are likely to be
1572		 * in real mode.
1573		 */
1574		__syncicache((void *)pa, sz);
1575	} else if (pmap == kernel_pmap) {
1576		__syncicache((void *)va, sz);
1577	} else if (hw_direct_map) {
1578		__syncicache((void *)pa, sz);
1579	} else {
1580		/* Use the scratch page to set up a temp mapping */
1581
1582		mtx_lock(&moea64_scratchpage_mtx);
1583
1584		moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF);
1585		__syncicache((void *)(moea64_scratchpage_va[1] +
1586		    (va & ADDR_POFF)), sz);
1587
1588		mtx_unlock(&moea64_scratchpage_mtx);
1589	}
1590}
1591
1592/*
1593 * Maps a sequence of resident pages belonging to the same object.
1594 * The sequence begins with the given page m_start.  This page is
1595 * mapped at the given virtual address start.  Each subsequent page is
1596 * mapped at a virtual address that is offset from start by the same
1597 * amount as the page is offset from m_start within the object.  The
1598 * last page in the sequence is the page with the largest offset from
1599 * m_start that can be mapped at a virtual address less than the given
1600 * virtual address end.  Not every virtual page between start and end
1601 * is mapped; only those for which a resident page exists with the
1602 * corresponding offset from m_start are mapped.
1603 */
1604void
1605moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end,
1606    vm_page_t m_start, vm_prot_t prot)
1607{
1608	vm_page_t m;
1609	vm_pindex_t diff, psize;
1610
1611	psize = atop(end - start);
1612	m = m_start;
1613	vm_page_lock_queues();
1614	PMAP_LOCK(pm);
1615	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
1616		moea64_enter_locked(pm, start + ptoa(diff), m, prot &
1617		    (VM_PROT_READ | VM_PROT_EXECUTE), FALSE);
1618		m = TAILQ_NEXT(m, listq);
1619	}
1620	vm_page_unlock_queues();
1621	PMAP_UNLOCK(pm);
1622}
1623
1624void
1625moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m,
1626    vm_prot_t prot)
1627{
1628
1629	vm_page_lock_queues();
1630	PMAP_LOCK(pm);
1631	moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE),
1632	    FALSE);
1633	vm_page_unlock_queues();
1634	PMAP_UNLOCK(pm);
1635}
1636
1637vm_paddr_t
1638moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va)
1639{
1640	struct	pvo_entry *pvo;
1641	vm_paddr_t pa;
1642
1643	PMAP_LOCK(pm);
1644	pvo = moea64_pvo_find_va(pm, va);
1645	if (pvo == NULL)
1646		pa = 0;
1647	else
1648		pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) |
1649		    (va - PVO_VADDR(pvo));
1650	PMAP_UNLOCK(pm);
1651	return (pa);
1652}
1653
1654/*
1655 * Atomically extract and hold the physical page with the given
1656 * pmap and virtual address pair if that mapping permits the given
1657 * protection.
1658 */
1659vm_page_t
1660moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1661{
1662	struct	pvo_entry *pvo;
1663	vm_page_t m;
1664        vm_paddr_t pa;
1665
1666	m = NULL;
1667	pa = 0;
1668	PMAP_LOCK(pmap);
1669retry:
1670	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1671	if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
1672	    ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW ||
1673	     (prot & VM_PROT_WRITE) == 0)) {
1674		if (vm_page_pa_tryrelock(pmap,
1675			pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa))
1676			goto retry;
1677		m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
1678		vm_page_hold(m);
1679	}
1680	PA_UNLOCK_COND(pa);
1681	PMAP_UNLOCK(pmap);
1682	return (m);
1683}
1684
1685static void *
1686moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
1687{
1688	/*
1689	 * This entire routine is a horrible hack to avoid bothering kmem
1690	 * for new KVA addresses. Because this can get called from inside
1691	 * kmem allocation routines, calling kmem for a new address here
1692	 * can lead to multiply locking non-recursive mutexes.
1693	 */
1694	static vm_pindex_t color;
1695        vm_offset_t va;
1696
1697        vm_page_t m;
1698        int pflags, needed_lock;
1699
1700	*flags = UMA_SLAB_PRIV;
1701	needed_lock = !PMAP_LOCKED(kernel_pmap);
1702
1703	if (needed_lock)
1704		PMAP_LOCK(kernel_pmap);
1705
1706        if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
1707                pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
1708        else
1709                pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
1710        if (wait & M_ZERO)
1711                pflags |= VM_ALLOC_ZERO;
1712
1713        for (;;) {
1714                m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ);
1715                if (m == NULL) {
1716                        if (wait & M_NOWAIT)
1717                                return (NULL);
1718                        VM_WAIT;
1719                } else
1720                        break;
1721        }
1722
1723	va = VM_PAGE_TO_PHYS(m);
1724
1725	moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
1726	    &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M,
1727	    PVO_WIRED | PVO_BOOTSTRAP);
1728
1729	if (needed_lock)
1730		PMAP_UNLOCK(kernel_pmap);
1731
1732	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
1733                bzero((void *)va, PAGE_SIZE);
1734
1735	return (void *)va;
1736}
1737
1738void
1739moea64_init(mmu_t mmu)
1740{
1741
1742	CTR0(KTR_PMAP, "moea64_init");
1743
1744	moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry),
1745	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1746	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1747	moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry),
1748	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
1749	    UMA_ZONE_VM | UMA_ZONE_NOFREE);
1750
1751	if (!hw_direct_map) {
1752		uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc);
1753		uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc);
1754	}
1755
1756	moea64_initialized = TRUE;
1757}
1758
1759boolean_t
1760moea64_is_referenced(mmu_t mmu, vm_page_t m)
1761{
1762
1763	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1764	    ("moea64_is_referenced: page %p is not managed", m));
1765	return (moea64_query_bit(m, PTE_REF));
1766}
1767
1768boolean_t
1769moea64_is_modified(mmu_t mmu, vm_page_t m)
1770{
1771
1772	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1773	    ("moea64_is_modified: page %p is not managed", m));
1774
1775	/*
1776	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be
1777	 * concurrently set while the object is locked.  Thus, if PG_WRITEABLE
1778	 * is clear, no PTEs can have LPTE_CHG set.
1779	 */
1780	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1781	if ((m->oflags & VPO_BUSY) == 0 &&
1782	    (m->flags & PG_WRITEABLE) == 0)
1783		return (FALSE);
1784	return (moea64_query_bit(m, LPTE_CHG));
1785}
1786
1787boolean_t
1788moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va)
1789{
1790	struct pvo_entry *pvo;
1791	boolean_t rv;
1792
1793	PMAP_LOCK(pmap);
1794	pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF);
1795	rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0;
1796	PMAP_UNLOCK(pmap);
1797	return (rv);
1798}
1799
1800void
1801moea64_clear_reference(mmu_t mmu, vm_page_t m)
1802{
1803
1804	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1805	    ("moea64_clear_reference: page %p is not managed", m));
1806	moea64_clear_bit(m, LPTE_REF);
1807}
1808
1809void
1810moea64_clear_modify(mmu_t mmu, vm_page_t m)
1811{
1812
1813	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1814	    ("moea64_clear_modify: page %p is not managed", m));
1815	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1816	KASSERT((m->oflags & VPO_BUSY) == 0,
1817	    ("moea64_clear_modify: page %p is busy", m));
1818
1819	/*
1820	 * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG
1821	 * set.  If the object containing the page is locked and the page is
1822	 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set.
1823	 */
1824	if ((m->flags & PG_WRITEABLE) == 0)
1825		return;
1826	moea64_clear_bit(m, LPTE_CHG);
1827}
1828
1829/*
1830 * Clear the write and modified bits in each of the given page's mappings.
1831 */
1832void
1833moea64_remove_write(mmu_t mmu, vm_page_t m)
1834{
1835	struct	pvo_entry *pvo;
1836	struct	lpte *pt;
1837	pmap_t	pmap;
1838	uint64_t lo;
1839
1840	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1841	    ("moea64_remove_write: page %p is not managed", m));
1842
1843	/*
1844	 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by
1845	 * another thread while the object is locked.  Thus, if PG_WRITEABLE
1846	 * is clear, no page table entries need updating.
1847	 */
1848	VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED);
1849	if ((m->oflags & VPO_BUSY) == 0 &&
1850	    (m->flags & PG_WRITEABLE) == 0)
1851		return;
1852	vm_page_lock_queues();
1853	lo = moea64_attr_fetch(m);
1854	SYNC();
1855	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
1856		pmap = pvo->pvo_pmap;
1857		PMAP_LOCK(pmap);
1858		LOCK_TABLE();
1859		if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) {
1860			pt = moea64_pvo_to_pte(pvo);
1861			pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
1862			pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
1863			if (pt != NULL) {
1864				moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
1865				lo |= pvo->pvo_pte.lpte.pte_lo;
1866				pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG;
1867				moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1868				    pvo->pvo_vpn);
1869				if (pvo->pvo_pmap == kernel_pmap)
1870					isync();
1871			}
1872		}
1873		UNLOCK_TABLE();
1874		PMAP_UNLOCK(pmap);
1875	}
1876	if ((lo & LPTE_CHG) != 0) {
1877		moea64_attr_clear(m, LPTE_CHG);
1878		vm_page_dirty(m);
1879	}
1880	vm_page_flag_clear(m, PG_WRITEABLE);
1881	vm_page_unlock_queues();
1882}
1883
1884/*
1885 *	moea64_ts_referenced:
1886 *
1887 *	Return a count of reference bits for a page, clearing those bits.
1888 *	It is not necessary for every reference bit to be cleared, but it
1889 *	is necessary that 0 only be returned when there are truly no
1890 *	reference bits set.
1891 *
1892 *	XXX: The exact number of bits to check and clear is a matter that
1893 *	should be tested and standardized at some point in the future for
1894 *	optimal aging of shared pages.
1895 */
1896boolean_t
1897moea64_ts_referenced(mmu_t mmu, vm_page_t m)
1898{
1899
1900	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
1901	    ("moea64_ts_referenced: page %p is not managed", m));
1902	return (moea64_clear_bit(m, LPTE_REF));
1903}
1904
1905/*
1906 * Modify the WIMG settings of all mappings for a page.
1907 */
1908void
1909moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma)
1910{
1911	struct	pvo_entry *pvo;
1912	struct  pvo_head *pvo_head;
1913	struct	lpte *pt;
1914	pmap_t	pmap;
1915	uint64_t lo;
1916
1917	if (m->flags & PG_FICTITIOUS) {
1918		m->md.mdpg_cache_attrs = ma;
1919		return;
1920	}
1921
1922	vm_page_lock_queues();
1923	pvo_head = vm_page_to_pvoh(m);
1924	lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma);
1925	LIST_FOREACH(pvo, pvo_head, pvo_vlink) {
1926		pmap = pvo->pvo_pmap;
1927		PMAP_LOCK(pmap);
1928		LOCK_TABLE();
1929		pt = moea64_pvo_to_pte(pvo);
1930		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG;
1931		pvo->pvo_pte.lpte.pte_lo |= lo;
1932		if (pt != NULL) {
1933			moea64_pte_change(pt, &pvo->pvo_pte.lpte,
1934			    pvo->pvo_vpn);
1935			if (pvo->pvo_pmap == kernel_pmap)
1936				isync();
1937		}
1938		UNLOCK_TABLE();
1939		PMAP_UNLOCK(pmap);
1940	}
1941	m->md.mdpg_cache_attrs = ma;
1942	vm_page_unlock_queues();
1943}
1944
1945/*
1946 * Map a wired page into kernel virtual address space.
1947 */
1948void
1949moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1950{
1951	uint64_t	pte_lo;
1952	int		error;
1953
1954	pte_lo = moea64_calc_wimg(pa, ma);
1955
1956	PMAP_LOCK(kernel_pmap);
1957	error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone,
1958	    &moea64_pvo_kunmanaged, va, pa, pte_lo,
1959	    PVO_WIRED | VM_PROT_EXECUTE);
1960
1961	if (error != 0 && error != ENOENT)
1962		panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va,
1963		    pa, error);
1964
1965	/*
1966	 * Flush the memory from the instruction cache.
1967	 */
1968	if ((pte_lo & (LPTE_I | LPTE_G)) == 0) {
1969		__syncicache((void *)va, PAGE_SIZE);
1970	}
1971	PMAP_UNLOCK(kernel_pmap);
1972}
1973
1974void
1975moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
1976{
1977
1978	moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT);
1979}
1980
1981/*
1982 * Extract the physical page address associated with the given kernel virtual
1983 * address.
1984 */
1985vm_offset_t
1986moea64_kextract(mmu_t mmu, vm_offset_t va)
1987{
1988	struct		pvo_entry *pvo;
1989	vm_paddr_t pa;
1990
1991	/*
1992	 * Shortcut the direct-mapped case when applicable.  We never put
1993	 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS.
1994	 */
1995	if (va < VM_MIN_KERNEL_ADDRESS)
1996		return (va);
1997
1998	PMAP_LOCK(kernel_pmap);
1999	pvo = moea64_pvo_find_va(kernel_pmap, va);
2000	KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR,
2001	    va));
2002	pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) + (va - PVO_VADDR(pvo));
2003	PMAP_UNLOCK(kernel_pmap);
2004	return (pa);
2005}
2006
2007/*
2008 * Remove a wired page from kernel virtual address space.
2009 */
2010void
2011moea64_kremove(mmu_t mmu, vm_offset_t va)
2012{
2013	moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE);
2014}
2015
2016/*
2017 * Map a range of physical addresses into kernel virtual address space.
2018 *
2019 * The value passed in *virt is a suggested virtual address for the mapping.
2020 * Architectures which can support a direct-mapped physical to virtual region
2021 * can return the appropriate address within that region, leaving '*virt'
2022 * unchanged.  We cannot and therefore do not; *virt is updated with the
2023 * first usable address after the mapped region.
2024 */
2025vm_offset_t
2026moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start,
2027    vm_offset_t pa_end, int prot)
2028{
2029	vm_offset_t	sva, va;
2030
2031	sva = *virt;
2032	va = sva;
2033	for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE)
2034		moea64_kenter(mmu, va, pa_start);
2035	*virt = va;
2036
2037	return (sva);
2038}
2039
2040/*
2041 * Returns true if the pmap's pv is one of the first
2042 * 16 pvs linked to from this page.  This count may
2043 * be changed upwards or downwards in the future; it
2044 * is only necessary that true be returned for a small
2045 * subset of pmaps for proper page aging.
2046 */
2047boolean_t
2048moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m)
2049{
2050        int loops;
2051	struct pvo_entry *pvo;
2052	boolean_t rv;
2053
2054	KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0,
2055	    ("moea64_page_exists_quick: page %p is not managed", m));
2056	loops = 0;
2057	rv = FALSE;
2058	vm_page_lock_queues();
2059	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2060		if (pvo->pvo_pmap == pmap) {
2061			rv = TRUE;
2062			break;
2063		}
2064		if (++loops >= 16)
2065			break;
2066	}
2067	vm_page_unlock_queues();
2068	return (rv);
2069}
2070
2071/*
2072 * Return the number of managed mappings to the given physical page
2073 * that are wired.
2074 */
2075int
2076moea64_page_wired_mappings(mmu_t mmu, vm_page_t m)
2077{
2078	struct pvo_entry *pvo;
2079	int count;
2080
2081	count = 0;
2082	if ((m->flags & PG_FICTITIOUS) != 0)
2083		return (count);
2084	vm_page_lock_queues();
2085	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink)
2086		if ((pvo->pvo_vaddr & PVO_WIRED) != 0)
2087			count++;
2088	vm_page_unlock_queues();
2089	return (count);
2090}
2091
2092static uintptr_t	moea64_vsidcontext;
2093
2094uintptr_t
2095moea64_get_unique_vsid(void) {
2096	u_int entropy;
2097	register_t hash;
2098	uint32_t mask;
2099	int i;
2100
2101	entropy = 0;
2102	__asm __volatile("mftb %0" : "=r"(entropy));
2103
2104	mtx_lock(&moea64_slb_mutex);
2105	for (i = 0; i < NVSIDS; i += VSID_NBPW) {
2106		u_int	n;
2107
2108		/*
2109		 * Create a new value by mutiplying by a prime and adding in
2110		 * entropy from the timebase register.  This is to make the
2111		 * VSID more random so that the PT hash function collides
2112		 * less often.  (Note that the prime casues gcc to do shifts
2113		 * instead of a multiply.)
2114		 */
2115		moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy;
2116		hash = moea64_vsidcontext & (NVSIDS - 1);
2117		if (hash == 0)		/* 0 is special, avoid it */
2118			continue;
2119		n = hash >> 5;
2120		mask = 1 << (hash & (VSID_NBPW - 1));
2121		hash = (moea64_vsidcontext & VSID_HASHMASK);
2122		if (moea64_vsid_bitmap[n] & mask) {	/* collision? */
2123			/* anything free in this bucket? */
2124			if (moea64_vsid_bitmap[n] == 0xffffffff) {
2125				entropy = (moea64_vsidcontext >> 20);
2126				continue;
2127			}
2128			i = ffs(~moea64_vsid_bitmap[n]) - 1;
2129			mask = 1 << i;
2130			hash &= VSID_HASHMASK & ~(VSID_NBPW - 1);
2131			hash |= i;
2132		}
2133		KASSERT(!(moea64_vsid_bitmap[n] & mask),
2134		    ("Allocating in-use VSID %#zx\n", hash));
2135		moea64_vsid_bitmap[n] |= mask;
2136		mtx_unlock(&moea64_slb_mutex);
2137		return (hash);
2138	}
2139
2140	mtx_unlock(&moea64_slb_mutex);
2141	panic("%s: out of segments",__func__);
2142}
2143
2144#ifdef __powerpc64__
2145void
2146moea64_pinit(mmu_t mmu, pmap_t pmap)
2147{
2148	PMAP_LOCK_INIT(pmap);
2149
2150	pmap->pm_slb_tree_root = slb_alloc_tree();
2151	pmap->pm_slb = slb_alloc_user_cache();
2152	pmap->pm_slb_len = 0;
2153}
2154#else
2155void
2156moea64_pinit(mmu_t mmu, pmap_t pmap)
2157{
2158	int	i;
2159	uint32_t hash;
2160
2161	PMAP_LOCK_INIT(pmap);
2162
2163	if (pmap_bootstrapped)
2164		pmap->pmap_phys = (pmap_t)moea64_kextract(mmu,
2165		    (vm_offset_t)pmap);
2166	else
2167		pmap->pmap_phys = pmap;
2168
2169	/*
2170	 * Allocate some segment registers for this pmap.
2171	 */
2172	hash = moea64_get_unique_vsid();
2173
2174	for (i = 0; i < 16; i++)
2175		pmap->pm_sr[i] = VSID_MAKE(i, hash);
2176
2177	KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0"));
2178}
2179#endif
2180
2181/*
2182 * Initialize the pmap associated with process 0.
2183 */
2184void
2185moea64_pinit0(mmu_t mmu, pmap_t pm)
2186{
2187	moea64_pinit(mmu, pm);
2188	bzero(&pm->pm_stats, sizeof(pm->pm_stats));
2189}
2190
2191/*
2192 * Set the physical protection on the specified range of this map as requested.
2193 */
2194void
2195moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva,
2196    vm_prot_t prot)
2197{
2198	struct	pvo_entry *pvo;
2199	struct	lpte *pt;
2200
2201	CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva,
2202	    eva, prot);
2203
2204
2205	KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap,
2206	    ("moea64_protect: non current pmap"));
2207
2208	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2209		moea64_remove(mmu, pm, sva, eva);
2210		return;
2211	}
2212
2213	vm_page_lock_queues();
2214	PMAP_LOCK(pm);
2215	for (; sva < eva; sva += PAGE_SIZE) {
2216		pvo = moea64_pvo_find_va(pm, sva);
2217		if (pvo == NULL)
2218			continue;
2219
2220		/*
2221		 * Grab the PTE pointer before we diddle with the cached PTE
2222		 * copy.
2223		 */
2224		LOCK_TABLE();
2225		pt = moea64_pvo_to_pte(pvo);
2226
2227		/*
2228		 * Change the protection of the page.
2229		 */
2230		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP;
2231		pvo->pvo_pte.lpte.pte_lo |= LPTE_BR;
2232		pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC;
2233		if ((prot & VM_PROT_EXECUTE) == 0)
2234			pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC;
2235
2236		/*
2237		 * If the PVO is in the page table, update that pte as well.
2238		 */
2239		if (pt != NULL) {
2240			moea64_pte_change(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2241			if ((pvo->pvo_pte.lpte.pte_lo &
2242			    (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
2243				moea64_syncicache(pm, sva,
2244				    pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN,
2245				    PAGE_SIZE);
2246			}
2247		}
2248		UNLOCK_TABLE();
2249	}
2250	vm_page_unlock_queues();
2251	PMAP_UNLOCK(pm);
2252}
2253
2254/*
2255 * Map a list of wired pages into kernel virtual address space.  This is
2256 * intended for temporary mappings which do not need page modification or
2257 * references recorded.  Existing mappings in the region are overwritten.
2258 */
2259void
2260moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count)
2261{
2262	while (count-- > 0) {
2263		moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
2264		va += PAGE_SIZE;
2265		m++;
2266	}
2267}
2268
2269/*
2270 * Remove page mappings from kernel virtual address space.  Intended for
2271 * temporary mappings entered by moea64_qenter.
2272 */
2273void
2274moea64_qremove(mmu_t mmu, vm_offset_t va, int count)
2275{
2276	while (count-- > 0) {
2277		moea64_kremove(mmu, va);
2278		va += PAGE_SIZE;
2279	}
2280}
2281
2282void
2283moea64_release_vsid(uint64_t vsid)
2284{
2285	int idx, mask;
2286
2287	mtx_lock(&moea64_slb_mutex);
2288	idx = vsid & (NVSIDS-1);
2289	mask = 1 << (idx % VSID_NBPW);
2290	idx /= VSID_NBPW;
2291	KASSERT(moea64_vsid_bitmap[idx] & mask,
2292	    ("Freeing unallocated VSID %#jx", vsid));
2293	moea64_vsid_bitmap[idx] &= ~mask;
2294	mtx_unlock(&moea64_slb_mutex);
2295}
2296
2297
2298void
2299moea64_release(mmu_t mmu, pmap_t pmap)
2300{
2301
2302	/*
2303	 * Free segment registers' VSIDs
2304	 */
2305    #ifdef __powerpc64__
2306	slb_free_tree(pmap);
2307	slb_free_user_cache(pmap->pm_slb);
2308    #else
2309	KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0"));
2310
2311	moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0]));
2312    #endif
2313
2314	PMAP_LOCK_DESTROY(pmap);
2315}
2316
2317/*
2318 * Remove the given range of addresses from the specified map.
2319 */
2320void
2321moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva)
2322{
2323	struct	pvo_entry *pvo;
2324
2325	vm_page_lock_queues();
2326	PMAP_LOCK(pm);
2327	for (; sva < eva; sva += PAGE_SIZE) {
2328		pvo = moea64_pvo_find_va(pm, sva);
2329		if (pvo != NULL)
2330			moea64_pvo_remove(pvo);
2331	}
2332	vm_page_unlock_queues();
2333	PMAP_UNLOCK(pm);
2334}
2335
2336/*
2337 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove()
2338 * will reflect changes in pte's back to the vm_page.
2339 */
2340void
2341moea64_remove_all(mmu_t mmu, vm_page_t m)
2342{
2343	struct  pvo_head *pvo_head;
2344	struct	pvo_entry *pvo, *next_pvo;
2345	pmap_t	pmap;
2346
2347	vm_page_lock_queues();
2348	pvo_head = vm_page_to_pvoh(m);
2349	for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) {
2350		next_pvo = LIST_NEXT(pvo, pvo_vlink);
2351
2352		MOEA_PVO_CHECK(pvo);	/* sanity check */
2353		pmap = pvo->pvo_pmap;
2354		PMAP_LOCK(pmap);
2355		moea64_pvo_remove(pvo);
2356		PMAP_UNLOCK(pmap);
2357	}
2358	if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) {
2359		moea64_attr_clear(m, LPTE_CHG);
2360		vm_page_dirty(m);
2361	}
2362	vm_page_flag_clear(m, PG_WRITEABLE);
2363	vm_page_unlock_queues();
2364}
2365
2366/*
2367 * Allocate a physical page of memory directly from the phys_avail map.
2368 * Can only be called from moea64_bootstrap before avail start and end are
2369 * calculated.
2370 */
2371static vm_offset_t
2372moea64_bootstrap_alloc(vm_size_t size, u_int align)
2373{
2374	vm_offset_t	s, e;
2375	int		i, j;
2376
2377	size = round_page(size);
2378	for (i = 0; phys_avail[i + 1] != 0; i += 2) {
2379		if (align != 0)
2380			s = (phys_avail[i] + align - 1) & ~(align - 1);
2381		else
2382			s = phys_avail[i];
2383		e = s + size;
2384
2385		if (s < phys_avail[i] || e > phys_avail[i + 1])
2386			continue;
2387
2388		if (s == phys_avail[i]) {
2389			phys_avail[i] += size;
2390		} else if (e == phys_avail[i + 1]) {
2391			phys_avail[i + 1] -= size;
2392		} else {
2393			for (j = phys_avail_count * 2; j > i; j -= 2) {
2394				phys_avail[j] = phys_avail[j - 2];
2395				phys_avail[j + 1] = phys_avail[j - 1];
2396			}
2397
2398			phys_avail[i + 3] = phys_avail[i + 1];
2399			phys_avail[i + 1] = s;
2400			phys_avail[i + 2] = e;
2401			phys_avail_count++;
2402		}
2403
2404		return (s);
2405	}
2406	panic("moea64_bootstrap_alloc: could not allocate memory");
2407}
2408
2409static void
2410tlbia(void)
2411{
2412	vm_offset_t i;
2413	#ifndef __powerpc64__
2414	register_t msr, scratch;
2415	#endif
2416
2417	TLBSYNC();
2418
2419	for (i = 0; i < 0xFF000; i += 0x00001000) {
2420		#ifdef __powerpc64__
2421		__asm __volatile("tlbiel %0" :: "r"(i));
2422		#else
2423		__asm __volatile("\
2424		    mfmsr %0; \
2425		    mr %1, %0; \
2426		    insrdi %1,%3,1,0; \
2427		    mtmsrd %1; \
2428		    isync; \
2429		    \
2430		    tlbiel %2; \
2431		    \
2432		    mtmsrd %0; \
2433		    isync;"
2434		: "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
2435		#endif
2436	}
2437
2438	EIEIO();
2439	TLBSYNC();
2440}
2441
2442#ifdef __powerpc64__
2443static void
2444slbia(void)
2445{
2446	register_t seg0;
2447
2448	__asm __volatile ("slbia");
2449	__asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0));
2450}
2451#endif
2452
2453static int
2454moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head,
2455    vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags)
2456{
2457	struct	 pvo_entry *pvo;
2458	uint64_t vsid;
2459	int	 first;
2460	u_int	 ptegidx;
2461	int	 i;
2462	int      bootstrap;
2463
2464	/*
2465	 * One nasty thing that can happen here is that the UMA calls to
2466	 * allocate new PVOs need to map more memory, which calls pvo_enter(),
2467	 * which calls UMA...
2468	 *
2469	 * We break the loop by detecting recursion and allocating out of
2470	 * the bootstrap pool.
2471	 */
2472
2473	first = 0;
2474	bootstrap = (flags & PVO_BOOTSTRAP);
2475
2476	if (!moea64_initialized)
2477		bootstrap = 1;
2478
2479	/*
2480	 * Compute the PTE Group index.
2481	 */
2482	va &= ~ADDR_POFF;
2483	vsid = va_to_vsid(pm, va);
2484	ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE);
2485
2486	/*
2487	 * Remove any existing mapping for this page.  Reuse the pvo entry if
2488	 * there is a mapping.
2489	 */
2490	LOCK_TABLE();
2491
2492	moea64_pvo_enter_calls++;
2493
2494	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2495		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) {
2496			if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa &&
2497			    (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) ==
2498			    (pte_lo & LPTE_PP)) {
2499			    	if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) {
2500					/* Re-insert if spilled */
2501					i = moea64_pte_insert(ptegidx,
2502					    &pvo->pvo_pte.lpte);
2503					if (i >= 0)
2504						PVO_PTEGIDX_SET(pvo, i);
2505					moea64_pte_overflow--;
2506				}
2507				UNLOCK_TABLE();
2508				return (0);
2509			}
2510			moea64_pvo_remove(pvo);
2511			break;
2512		}
2513	}
2514
2515	/*
2516	 * If we aren't overwriting a mapping, try to allocate.
2517	 */
2518	if (bootstrap) {
2519		if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) {
2520			panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd",
2521			      moea64_bpvo_pool_index, BPVO_POOL_SIZE,
2522			      BPVO_POOL_SIZE * sizeof(struct pvo_entry));
2523		}
2524		pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index];
2525		moea64_bpvo_pool_index++;
2526		bootstrap = 1;
2527	} else {
2528		/*
2529		 * Note: drop the table lock around the UMA allocation in
2530		 * case the UMA allocator needs to manipulate the page
2531		 * table. The mapping we are working with is already
2532		 * protected by the PMAP lock.
2533		 */
2534		UNLOCK_TABLE();
2535		pvo = uma_zalloc(zone, M_NOWAIT);
2536		LOCK_TABLE();
2537	}
2538
2539	if (pvo == NULL) {
2540		UNLOCK_TABLE();
2541		return (ENOMEM);
2542	}
2543
2544	moea64_pvo_entries++;
2545	pvo->pvo_vaddr = va;
2546	pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT)
2547	    | (vsid << 16);
2548	pvo->pvo_pmap = pm;
2549	LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink);
2550	pvo->pvo_vaddr &= ~ADDR_POFF;
2551
2552	if (!(flags & VM_PROT_EXECUTE))
2553		pte_lo |= LPTE_NOEXEC;
2554	if (flags & PVO_WIRED)
2555		pvo->pvo_vaddr |= PVO_WIRED;
2556	if (pvo_head != &moea64_pvo_kunmanaged)
2557		pvo->pvo_vaddr |= PVO_MANAGED;
2558	if (bootstrap)
2559		pvo->pvo_vaddr |= PVO_BOOTSTRAP;
2560	if (flags & PVO_FAKE)
2561		pvo->pvo_vaddr |= PVO_FAKE;
2562	if (flags & PVO_LARGE)
2563		pvo->pvo_vaddr |= PVO_LARGE;
2564
2565	moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va,
2566	    (uint64_t)(pa) | pte_lo, flags);
2567
2568	/*
2569	 * Remember if the list was empty and therefore will be the first
2570	 * item.
2571	 */
2572	if (LIST_FIRST(pvo_head) == NULL)
2573		first = 1;
2574	LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink);
2575
2576	if (pvo->pvo_vaddr & PVO_WIRED) {
2577		pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED;
2578		pm->pm_stats.wired_count++;
2579	}
2580	pm->pm_stats.resident_count++;
2581
2582	/*
2583	 * We hope this succeeds but it isn't required.
2584	 */
2585	i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte);
2586	if (i >= 0) {
2587		PVO_PTEGIDX_SET(pvo, i);
2588	} else {
2589		panic("moea64_pvo_enter: overflow");
2590		moea64_pte_overflow++;
2591	}
2592
2593	if (pm == kernel_pmap)
2594		isync();
2595
2596	UNLOCK_TABLE();
2597
2598#ifdef __powerpc64__
2599	/*
2600	 * Make sure all our bootstrap mappings are in the SLB as soon
2601	 * as virtual memory is switched on.
2602	 */
2603	if (!pmap_bootstrapped)
2604		moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE);
2605#endif
2606
2607	return (first ? ENOENT : 0);
2608}
2609
2610static void
2611moea64_pvo_remove(struct pvo_entry *pvo)
2612{
2613	struct	lpte *pt;
2614
2615	/*
2616	 * If there is an active pte entry, we need to deactivate it (and
2617	 * save the ref & cfg bits).
2618	 */
2619	LOCK_TABLE();
2620	pt = moea64_pvo_to_pte(pvo);
2621	if (pt != NULL) {
2622		moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2623		PVO_PTEGIDX_CLR(pvo);
2624	} else {
2625		moea64_pte_overflow--;
2626	}
2627
2628	/*
2629	 * Update our statistics.
2630	 */
2631	pvo->pvo_pmap->pm_stats.resident_count--;
2632	if (pvo->pvo_vaddr & PVO_WIRED)
2633		pvo->pvo_pmap->pm_stats.wired_count--;
2634
2635	/*
2636	 * Save the REF/CHG bits into their cache if the page is managed.
2637	 */
2638	if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) {
2639		struct	vm_page *pg;
2640
2641		pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN);
2642		if (pg != NULL) {
2643			moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo &
2644			    (LPTE_REF | LPTE_CHG));
2645		}
2646	}
2647
2648	/*
2649	 * Remove this PVO from the PV list.
2650	 */
2651	LIST_REMOVE(pvo, pvo_vlink);
2652
2653	/*
2654	 * Remove this from the overflow list and return it to the pool
2655	 * if we aren't going to reuse it.
2656	 */
2657	LIST_REMOVE(pvo, pvo_olink);
2658
2659	moea64_pvo_entries--;
2660	moea64_pvo_remove_calls++;
2661
2662	UNLOCK_TABLE();
2663
2664	if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP))
2665		uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone :
2666		    moea64_upvo_zone, pvo);
2667}
2668
2669static struct pvo_entry *
2670moea64_pvo_find_va(pmap_t pm, vm_offset_t va)
2671{
2672	struct		pvo_entry *pvo;
2673	int		ptegidx;
2674	uint64_t	vsid;
2675	#ifdef __powerpc64__
2676	uint64_t	slbv;
2677
2678	if (pm == kernel_pmap) {
2679		slbv = kernel_va_to_slbv(va);
2680	} else {
2681		struct slb *slb;
2682		slb = user_va_to_slb_entry(pm, va);
2683		/* The page is not mapped if the segment isn't */
2684		if (slb == NULL)
2685			return NULL;
2686		slbv = slb->slbv;
2687	}
2688
2689	vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT;
2690	if (slbv & SLBV_L)
2691		va &= ~moea64_large_page_mask;
2692	else
2693		va &= ~ADDR_POFF;
2694	ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L);
2695	#else
2696	va &= ~ADDR_POFF;
2697	vsid = va_to_vsid(pm, va);
2698	ptegidx = va_to_pteg(vsid, va, 0);
2699	#endif
2700
2701	LOCK_TABLE();
2702	LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) {
2703		if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va)
2704			break;
2705	}
2706	UNLOCK_TABLE();
2707
2708	return (pvo);
2709}
2710
2711static struct lpte *
2712moea64_pvo_to_pte(const struct pvo_entry *pvo)
2713{
2714	struct lpte 	*pt;
2715	int		pteidx, ptegidx;
2716	uint64_t	vsid;
2717
2718	ASSERT_TABLE_LOCK();
2719
2720	/* If the PTEG index is not set, then there is no page table entry */
2721	if (!PVO_PTEGIDX_ISSET(pvo))
2722		return (NULL);
2723
2724	/*
2725	 * Calculate the ptegidx
2726	 */
2727	vsid = PVO_VSID(pvo);
2728	ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
2729	    pvo->pvo_vaddr & PVO_LARGE);
2730
2731	/*
2732	 * We can find the actual pte entry without searching by grabbing
2733	 * the PTEG index from 3 unused bits in pvo_vaddr and by
2734	 * noticing the HID bit.
2735	 */
2736	if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
2737		ptegidx ^= moea64_pteg_mask;
2738
2739	pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo);
2740
2741	if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
2742	    !PVO_PTEGIDX_ISSET(pvo)) {
2743		panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
2744		    "valid pte index", pvo);
2745	}
2746
2747	if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
2748	    PVO_PTEGIDX_ISSET(pvo)) {
2749		panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
2750		    "pvo but no valid pte", pvo);
2751	}
2752
2753	pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
2754	if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
2755	    LPTE_VALID) {
2756		if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
2757			panic("moea64_pvo_to_pte: pvo %p has valid pte in "
2758			    "moea64_pteg_table %p but invalid in pvo", pvo, pt);
2759		}
2760
2761		if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
2762		    ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
2763			panic("moea64_pvo_to_pte: pvo %p pte does not match "
2764			    "pte %p in moea64_pteg_table difference is %#x",
2765			    pvo, pt,
2766			    (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
2767		}
2768
2769		return (pt);
2770	}
2771
2772	if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
2773		panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
2774		    "moea64_pteg_table but valid in pvo", pvo, pt);
2775	}
2776
2777	return (NULL);
2778}
2779
2780static __inline int
2781moea64_pte_spillable_ident(u_int ptegidx)
2782{
2783	struct	lpte *pt;
2784	int	i, j, k;
2785
2786	/* Start at a random slot */
2787	i = mftb() % 8;
2788	k = -1;
2789	for (j = 0; j < 8; j++) {
2790		pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8];
2791		if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED))
2792			continue;
2793
2794		/* This is a candidate, so remember it */
2795		k = (i + j) % 8;
2796
2797		/* Try to get a page that has not been used lately */
2798		if (!(pt->pte_lo & LPTE_REF))
2799			return (k);
2800	}
2801
2802	return (k);
2803}
2804
2805static int
2806moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt)
2807{
2808	struct	lpte *pt;
2809	struct	pvo_entry *pvo;
2810	u_int	pteg_bktidx;
2811	int	i;
2812
2813	ASSERT_TABLE_LOCK();
2814
2815	/*
2816	 * First try primary hash.
2817	 */
2818	pteg_bktidx = ptegidx;
2819	for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
2820		if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
2821			pvo_pt->pte_hi &= ~LPTE_HID;
2822			moea64_pte_set(pt, pvo_pt);
2823			return (i);
2824		}
2825	}
2826
2827	/*
2828	 * Now try secondary hash.
2829	 */
2830	pteg_bktidx ^= moea64_pteg_mask;
2831	for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
2832		if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
2833			pvo_pt->pte_hi |= LPTE_HID;
2834			moea64_pte_set(pt, pvo_pt);
2835			return (i);
2836		}
2837	}
2838
2839	/*
2840	 * Out of luck. Find a PTE to sacrifice.
2841	 */
2842	pteg_bktidx = ptegidx;
2843	i = moea64_pte_spillable_ident(pteg_bktidx);
2844	if (i < 0) {
2845		pteg_bktidx ^= moea64_pteg_mask;
2846		i = moea64_pte_spillable_ident(pteg_bktidx);
2847	}
2848
2849	if (i < 0) {
2850		/* No freeable slots in either PTEG? We're hosed. */
2851		panic("moea64_pte_insert: overflow");
2852		return (-1);
2853	}
2854
2855	if (pteg_bktidx == ptegidx)
2856		pvo_pt->pte_hi &= ~LPTE_HID;
2857	else
2858		pvo_pt->pte_hi |= LPTE_HID;
2859
2860	/*
2861	 * Synchronize the sacrifice PTE with its PVO, then mark both
2862	 * invalid. The PVO will be reused when/if the VM system comes
2863	 * here after a fault.
2864	 */
2865	pt = &moea64_pteg_table[pteg_bktidx].pt[i];
2866
2867	if (pt->pte_hi & LPTE_HID)
2868		pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
2869
2870	LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
2871		if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) {
2872			KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
2873			    ("Invalid PVO for valid PTE!"));
2874			moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn);
2875			PVO_PTEGIDX_CLR(pvo);
2876			moea64_pte_overflow++;
2877			break;
2878		}
2879	}
2880
2881	KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi,
2882	   ("Unable to find PVO for spilled PTE"));
2883
2884	/*
2885	 * Set the new PTE.
2886	 */
2887	moea64_pte_set(pt, pvo_pt);
2888
2889	return (i);
2890}
2891
2892static boolean_t
2893moea64_query_bit(vm_page_t m, u_int64_t ptebit)
2894{
2895	struct	pvo_entry *pvo;
2896	struct	lpte *pt;
2897
2898	if (moea64_attr_fetch(m) & ptebit)
2899		return (TRUE);
2900
2901	vm_page_lock_queues();
2902
2903	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2904		MOEA_PVO_CHECK(pvo);	/* sanity check */
2905
2906		/*
2907		 * See if we saved the bit off.  If so, cache it and return
2908		 * success.
2909		 */
2910		if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2911			moea64_attr_save(m, ptebit);
2912			MOEA_PVO_CHECK(pvo);	/* sanity check */
2913			vm_page_unlock_queues();
2914			return (TRUE);
2915		}
2916	}
2917
2918	/*
2919	 * No luck, now go through the hard part of looking at the PTEs
2920	 * themselves.  Sync so that any pending REF/CHG bits are flushed to
2921	 * the PTEs.
2922	 */
2923	SYNC();
2924	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2925		MOEA_PVO_CHECK(pvo);	/* sanity check */
2926
2927		/*
2928		 * See if this pvo has a valid PTE.  if so, fetch the
2929		 * REF/CHG bits from the valid PTE.  If the appropriate
2930		 * ptebit is set, cache it and return success.
2931		 */
2932		LOCK_TABLE();
2933		pt = moea64_pvo_to_pte(pvo);
2934		if (pt != NULL) {
2935			moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
2936			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2937				UNLOCK_TABLE();
2938
2939				moea64_attr_save(m, ptebit);
2940				MOEA_PVO_CHECK(pvo);	/* sanity check */
2941				vm_page_unlock_queues();
2942				return (TRUE);
2943			}
2944		}
2945		UNLOCK_TABLE();
2946	}
2947
2948	vm_page_unlock_queues();
2949	return (FALSE);
2950}
2951
2952static u_int
2953moea64_clear_bit(vm_page_t m, u_int64_t ptebit)
2954{
2955	u_int	count;
2956	struct	pvo_entry *pvo;
2957	struct	lpte *pt;
2958
2959	vm_page_lock_queues();
2960
2961	/*
2962	 * Clear the cached value.
2963	 */
2964	moea64_attr_clear(m, ptebit);
2965
2966	/*
2967	 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so
2968	 * we can reset the right ones).  note that since the pvo entries and
2969	 * list heads are accessed via BAT0 and are never placed in the page
2970	 * table, we don't have to worry about further accesses setting the
2971	 * REF/CHG bits.
2972	 */
2973	SYNC();
2974
2975	/*
2976	 * For each pvo entry, clear the pvo's ptebit.  If this pvo has a
2977	 * valid pte clear the ptebit from the valid pte.
2978	 */
2979	count = 0;
2980	LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) {
2981		MOEA_PVO_CHECK(pvo);	/* sanity check */
2982
2983		LOCK_TABLE();
2984		pt = moea64_pvo_to_pte(pvo);
2985		if (pt != NULL) {
2986			moea64_pte_synch(pt, &pvo->pvo_pte.lpte);
2987			if (pvo->pvo_pte.lpte.pte_lo & ptebit) {
2988				count++;
2989				moea64_pte_clear(pt, pvo->pvo_vpn, ptebit);
2990			}
2991		}
2992		pvo->pvo_pte.lpte.pte_lo &= ~ptebit;
2993		MOEA_PVO_CHECK(pvo);	/* sanity check */
2994		UNLOCK_TABLE();
2995	}
2996
2997	vm_page_unlock_queues();
2998	return (count);
2999}
3000
3001boolean_t
3002moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size)
3003{
3004	struct pvo_entry *pvo;
3005	vm_offset_t ppa;
3006	int error = 0;
3007
3008	PMAP_LOCK(kernel_pmap);
3009	for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) {
3010		pvo = moea64_pvo_find_va(kernel_pmap, ppa);
3011		if (pvo == NULL ||
3012		    (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) {
3013			error = EFAULT;
3014			break;
3015		}
3016	}
3017	PMAP_UNLOCK(kernel_pmap);
3018
3019	return (error);
3020}
3021
3022/*
3023 * Map a set of physical memory pages into the kernel virtual
3024 * address space. Return a pointer to where it is mapped. This
3025 * routine is intended to be used for mapping device memory,
3026 * NOT real memory.
3027 */
3028void *
3029moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
3030{
3031	vm_offset_t va, tmpva, ppa, offset;
3032
3033	ppa = trunc_page(pa);
3034	offset = pa & PAGE_MASK;
3035	size = roundup(offset + size, PAGE_SIZE);
3036
3037	va = kmem_alloc_nofault(kernel_map, size);
3038
3039	if (!va)
3040		panic("moea64_mapdev: Couldn't alloc kernel virtual memory");
3041
3042	for (tmpva = va; size > 0;) {
3043		moea64_kenter_attr(mmu, tmpva, ppa, ma);
3044		size -= PAGE_SIZE;
3045		tmpva += PAGE_SIZE;
3046		ppa += PAGE_SIZE;
3047	}
3048
3049	return ((void *)(va + offset));
3050}
3051
3052void *
3053moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size)
3054{
3055
3056	return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT);
3057}
3058
3059void
3060moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size)
3061{
3062	vm_offset_t base, offset;
3063
3064	base = trunc_page(va);
3065	offset = va & PAGE_MASK;
3066	size = roundup(offset + size, PAGE_SIZE);
3067
3068	kmem_free(kernel_map, base, size);
3069}
3070
3071static void
3072moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz)
3073{
3074	struct pvo_entry *pvo;
3075	vm_offset_t lim;
3076	vm_paddr_t pa;
3077	vm_size_t len;
3078
3079	PMAP_LOCK(pm);
3080	while (sz > 0) {
3081		lim = round_page(va);
3082		len = MIN(lim - va, sz);
3083		pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF);
3084		if (pvo != NULL) {
3085			pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) |
3086			    (va & ADDR_POFF);
3087			moea64_syncicache(pm, va, pa, len);
3088		}
3089		va += len;
3090		sz -= len;
3091	}
3092	PMAP_UNLOCK(pm);
3093}
3094