1216174Snwhitehorn/*-
2216174Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc.
3216174Snwhitehorn * All rights reserved.
4216174Snwhitehorn *
5216174Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation
6216174Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7216174Snwhitehorn *
8216174Snwhitehorn * Redistribution and use in source and binary forms, with or without
9216174Snwhitehorn * modification, are permitted provided that the following conditions
10216174Snwhitehorn * are met:
11216174Snwhitehorn * 1. Redistributions of source code must retain the above copyright
12216174Snwhitehorn *    notice, this list of conditions and the following disclaimer.
13216174Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
14216174Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
15216174Snwhitehorn *    documentation and/or other materials provided with the distribution.
16216174Snwhitehorn * 3. All advertising materials mentioning features or use of this software
17216174Snwhitehorn *    must display the following acknowledgement:
18216174Snwhitehorn *        This product includes software developed by the NetBSD
19216174Snwhitehorn *        Foundation, Inc. and its contributors.
20216174Snwhitehorn * 4. Neither the name of The NetBSD Foundation nor the names of its
21216174Snwhitehorn *    contributors may be used to endorse or promote products derived
22216174Snwhitehorn *    from this software without specific prior written permission.
23216174Snwhitehorn *
24216174Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25216174Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26216174Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27216174Snwhitehorn * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28216174Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29216174Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30216174Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31216174Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32216174Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33216174Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34216174Snwhitehorn * POSSIBILITY OF SUCH DAMAGE.
35216174Snwhitehorn */
36216174Snwhitehorn/*-
37216174Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank.
38216174Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH.
39216174Snwhitehorn * All rights reserved.
40216174Snwhitehorn *
41216174Snwhitehorn * Redistribution and use in source and binary forms, with or without
42216174Snwhitehorn * modification, are permitted provided that the following conditions
43216174Snwhitehorn * are met:
44216174Snwhitehorn * 1. Redistributions of source code must retain the above copyright
45216174Snwhitehorn *    notice, this list of conditions and the following disclaimer.
46216174Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
47216174Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
48216174Snwhitehorn *    documentation and/or other materials provided with the distribution.
49216174Snwhitehorn * 3. All advertising materials mentioning features or use of this software
50216174Snwhitehorn *    must display the following acknowledgement:
51216174Snwhitehorn *	This product includes software developed by TooLs GmbH.
52216174Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products
53216174Snwhitehorn *    derived from this software without specific prior written permission.
54216174Snwhitehorn *
55216174Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
56216174Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
57216174Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
58216174Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59216174Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
60216174Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
61216174Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
62216174Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
63216174Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
64216174Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65216174Snwhitehorn *
66216174Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
67216174Snwhitehorn */
68216174Snwhitehorn/*-
69216174Snwhitehorn * Copyright (C) 2001 Benno Rice.
70216174Snwhitehorn * All rights reserved.
71216174Snwhitehorn *
72216174Snwhitehorn * Redistribution and use in source and binary forms, with or without
73216174Snwhitehorn * modification, are permitted provided that the following conditions
74216174Snwhitehorn * are met:
75216174Snwhitehorn * 1. Redistributions of source code must retain the above copyright
76216174Snwhitehorn *    notice, this list of conditions and the following disclaimer.
77216174Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright
78216174Snwhitehorn *    notice, this list of conditions and the following disclaimer in the
79216174Snwhitehorn *    documentation and/or other materials provided with the distribution.
80216174Snwhitehorn *
81216174Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
82216174Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
83216174Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
84216174Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
85216174Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
86216174Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
87216174Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
88216174Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
89216174Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
90216174Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
91216174Snwhitehorn */
92216174Snwhitehorn
93216174Snwhitehorn#include <sys/cdefs.h>
94216174Snwhitehorn__FBSDID("$FreeBSD$");
95216174Snwhitehorn
96216174Snwhitehorn/*
97216174Snwhitehorn * Native 64-bit page table operations for running without a hypervisor.
98216174Snwhitehorn */
99216174Snwhitehorn
100216174Snwhitehorn#include <sys/param.h>
101216174Snwhitehorn#include <sys/kernel.h>
102216174Snwhitehorn#include <sys/ktr.h>
103216174Snwhitehorn#include <sys/lock.h>
104216174Snwhitehorn#include <sys/mutex.h>
105216174Snwhitehorn#include <sys/proc.h>
106233618Snwhitehorn#include <sys/sched.h>
107216174Snwhitehorn#include <sys/sysctl.h>
108216174Snwhitehorn#include <sys/systm.h>
109216174Snwhitehorn
110216174Snwhitehorn#include <sys/kdb.h>
111216174Snwhitehorn
112216174Snwhitehorn#include <vm/vm.h>
113216174Snwhitehorn#include <vm/vm_param.h>
114216174Snwhitehorn#include <vm/vm_kern.h>
115216174Snwhitehorn#include <vm/vm_page.h>
116216174Snwhitehorn#include <vm/vm_map.h>
117216174Snwhitehorn#include <vm/vm_object.h>
118216174Snwhitehorn#include <vm/vm_extern.h>
119216174Snwhitehorn#include <vm/vm_pageout.h>
120216174Snwhitehorn
121216174Snwhitehorn#include <machine/md_var.h>
122216174Snwhitehorn#include <machine/mmuvar.h>
123216174Snwhitehorn
124216174Snwhitehorn#include "mmu_oea64.h"
125216174Snwhitehorn#include "mmu_if.h"
126216174Snwhitehorn#include "moea64_if.h"
127216174Snwhitehorn
128216174Snwhitehorn#define	PTESYNC()	__asm __volatile("ptesync");
129216174Snwhitehorn#define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
130216174Snwhitehorn#define	SYNC()		__asm __volatile("sync");
131216174Snwhitehorn#define	EIEIO()		__asm __volatile("eieio");
132216174Snwhitehorn
133216174Snwhitehorn#define	VSID_HASH_MASK	0x0000007fffffffffULL
134216174Snwhitehorn
135216174Snwhitehornstatic __inline void
136216174SnwhitehornTLBIE(uint64_t vpn) {
137216174Snwhitehorn#ifndef __powerpc64__
138216174Snwhitehorn	register_t vpn_hi, vpn_lo;
139216174Snwhitehorn	register_t msr;
140234745Snwhitehorn	register_t scratch, intr;
141216174Snwhitehorn#endif
142216174Snwhitehorn
143234745Snwhitehorn	static volatile u_int tlbie_lock = 0;
144234745Snwhitehorn
145216174Snwhitehorn	vpn <<= ADDR_PIDX_SHFT;
146216174Snwhitehorn	vpn &= ~(0xffffULL << 48);
147216174Snwhitehorn
148234745Snwhitehorn	/* Hobo spinlock: we need stronger guarantees than mutexes provide */
149234745Snwhitehorn	while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
150234745Snwhitehorn	isync(); /* Flush instruction queue once lock acquired */
151234745Snwhitehorn
152216174Snwhitehorn#ifdef __powerpc64__
153233618Snwhitehorn	__asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
154234745Snwhitehorn	__asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
155216174Snwhitehorn#else
156216174Snwhitehorn	vpn_hi = (uint32_t)(vpn >> 32);
157216174Snwhitehorn	vpn_lo = (uint32_t)vpn;
158216174Snwhitehorn
159234745Snwhitehorn	intr = intr_disable();
160216174Snwhitehorn	__asm __volatile("\
161216174Snwhitehorn	    mfmsr %0; \
162216174Snwhitehorn	    mr %1, %0; \
163216174Snwhitehorn	    insrdi %1,%5,1,0; \
164216174Snwhitehorn	    mtmsrd %1; isync; \
165216174Snwhitehorn	    \
166216174Snwhitehorn	    sld %1,%2,%4; \
167216174Snwhitehorn	    or %1,%1,%3; \
168216174Snwhitehorn	    tlbie %1; \
169216174Snwhitehorn	    \
170216174Snwhitehorn	    mtmsrd %0; isync; \
171216174Snwhitehorn	    eieio; \
172216174Snwhitehorn	    tlbsync; \
173216174Snwhitehorn	    ptesync;"
174216174Snwhitehorn	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
175216174Snwhitehorn	    : "memory");
176234760Snwhitehorn	intr_restore(intr);
177216174Snwhitehorn#endif
178234745Snwhitehorn
179234745Snwhitehorn	/* No barriers or special ops -- taken care of by ptesync above */
180234745Snwhitehorn	tlbie_lock = 0;
181216174Snwhitehorn}
182216174Snwhitehorn
183222614Snwhitehorn#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
184222614Snwhitehorn#define ENABLE_TRANS(msr)	mtmsr(msr)
185216174Snwhitehorn
186216174Snwhitehorn/*
187216174Snwhitehorn * PTEG data.
188216174Snwhitehorn */
189216174Snwhitehornstatic struct	lpteg *moea64_pteg_table;
190216174Snwhitehorn
191216174Snwhitehorn/*
192216174Snwhitehorn * PTE calls.
193216174Snwhitehorn */
194216174Snwhitehornstatic int	moea64_pte_insert_native(mmu_t, u_int, struct lpte *);
195216174Snwhitehornstatic uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *);
196216174Snwhitehornstatic void	moea64_pte_synch_native(mmu_t, uintptr_t pt,
197216174Snwhitehorn		    struct lpte *pvo_pt);
198216174Snwhitehornstatic void	moea64_pte_clear_native(mmu_t, uintptr_t pt,
199216174Snwhitehorn		    struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit);
200216174Snwhitehornstatic void	moea64_pte_change_native(mmu_t, uintptr_t pt,
201216174Snwhitehorn		    struct lpte *pvo_pt, uint64_t vpn);
202216174Snwhitehornstatic void	moea64_pte_unset_native(mmu_t mmu, uintptr_t pt,
203216174Snwhitehorn		    struct lpte *pvo_pt, uint64_t vpn);
204216174Snwhitehorn
205216174Snwhitehorn/*
206216174Snwhitehorn * Utility routines.
207216174Snwhitehorn */
208216174Snwhitehornstatic void		moea64_bootstrap_native(mmu_t mmup,
209216174Snwhitehorn			    vm_offset_t kernelstart, vm_offset_t kernelend);
210216174Snwhitehornstatic void		moea64_cpu_bootstrap_native(mmu_t, int ap);
211216174Snwhitehornstatic void		tlbia(void);
212216174Snwhitehorn
213216174Snwhitehornstatic mmu_method_t moea64_native_methods[] = {
214216174Snwhitehorn	/* Internal interfaces */
215216174Snwhitehorn	MMUMETHOD(mmu_bootstrap,	moea64_bootstrap_native),
216216174Snwhitehorn	MMUMETHOD(mmu_cpu_bootstrap,	moea64_cpu_bootstrap_native),
217216174Snwhitehorn
218216174Snwhitehorn	MMUMETHOD(moea64_pte_synch,	moea64_pte_synch_native),
219216174Snwhitehorn	MMUMETHOD(moea64_pte_clear,	moea64_pte_clear_native),
220216174Snwhitehorn	MMUMETHOD(moea64_pte_unset,	moea64_pte_unset_native),
221216174Snwhitehorn	MMUMETHOD(moea64_pte_change,	moea64_pte_change_native),
222216174Snwhitehorn	MMUMETHOD(moea64_pte_insert,	moea64_pte_insert_native),
223216174Snwhitehorn	MMUMETHOD(moea64_pvo_to_pte,	moea64_pvo_to_pte_native),
224216174Snwhitehorn
225216174Snwhitehorn	{ 0, 0 }
226216174Snwhitehorn};
227216174Snwhitehorn
228216174SnwhitehornMMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
229216174Snwhitehorn    0, oea64_mmu);
230216174Snwhitehorn
231216174Snwhitehornstatic __inline u_int
232216174Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
233216174Snwhitehorn{
234216174Snwhitehorn	uint64_t hash;
235216174Snwhitehorn	int shift;
236216174Snwhitehorn
237216174Snwhitehorn	shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
238216174Snwhitehorn	hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
239216174Snwhitehorn	    shift);
240216174Snwhitehorn	return (hash & moea64_pteg_mask);
241216174Snwhitehorn}
242216174Snwhitehorn
243216174Snwhitehornstatic void
244216174Snwhitehornmoea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt)
245216174Snwhitehorn{
246216174Snwhitehorn	struct lpte *pt = (struct lpte *)pt_cookie;
247216174Snwhitehorn
248216174Snwhitehorn	pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
249216174Snwhitehorn}
250216174Snwhitehorn
251216174Snwhitehornstatic void
252216174Snwhitehornmoea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
253216174Snwhitehorn    uint64_t vpn, uint64_t ptebit)
254216174Snwhitehorn{
255216174Snwhitehorn	struct lpte *pt = (struct lpte *)pt_cookie;
256216174Snwhitehorn
257216174Snwhitehorn	/*
258216174Snwhitehorn	 * As shown in Section 7.6.3.2.3
259216174Snwhitehorn	 */
260216174Snwhitehorn	pt->pte_lo &= ~ptebit;
261234745Snwhitehorn	critical_enter();
262216174Snwhitehorn	TLBIE(vpn);
263234745Snwhitehorn	critical_exit();
264216174Snwhitehorn}
265216174Snwhitehorn
266216174Snwhitehornstatic void
267216174Snwhitehornmoea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt)
268216174Snwhitehorn{
269216174Snwhitehorn
270216174Snwhitehorn	pvo_pt->pte_hi |= LPTE_VALID;
271216174Snwhitehorn
272216174Snwhitehorn	/*
273216174Snwhitehorn	 * Update the PTE as defined in section 7.6.3.1.
274216174Snwhitehorn	 * Note that the REF/CHG bits are from pvo_pt and thus should have
275216174Snwhitehorn	 * been saved so this routine can restore them (if desired).
276216174Snwhitehorn	 */
277216174Snwhitehorn	pt->pte_lo = pvo_pt->pte_lo;
278216174Snwhitehorn	EIEIO();
279216174Snwhitehorn	pt->pte_hi = pvo_pt->pte_hi;
280216174Snwhitehorn	PTESYNC();
281216765Snwhitehorn
282216765Snwhitehorn	/* Keep statistics for unlocked pages */
283216765Snwhitehorn	if (!(pvo_pt->pte_hi & LPTE_LOCKED))
284216765Snwhitehorn		moea64_pte_valid++;
285216174Snwhitehorn}
286216174Snwhitehorn
287216174Snwhitehornstatic void
288216174Snwhitehornmoea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
289216174Snwhitehorn    uint64_t vpn)
290216174Snwhitehorn{
291216174Snwhitehorn	struct lpte *pt = (struct lpte *)pt_cookie;
292216174Snwhitehorn
293216174Snwhitehorn	/*
294216174Snwhitehorn	 * Invalidate the pte.
295216174Snwhitehorn	 */
296233964Snwhitehorn	isync();
297234745Snwhitehorn	critical_enter();
298233964Snwhitehorn	pvo_pt->pte_hi &= ~LPTE_VALID;
299216174Snwhitehorn	pt->pte_hi &= ~LPTE_VALID;
300233964Snwhitehorn	PTESYNC();
301216174Snwhitehorn	TLBIE(vpn);
302234745Snwhitehorn	critical_exit();
303216174Snwhitehorn
304216174Snwhitehorn	/*
305216174Snwhitehorn	 * Save the reg & chg bits.
306216174Snwhitehorn	 */
307216174Snwhitehorn	moea64_pte_synch_native(mmu, pt_cookie, pvo_pt);
308216765Snwhitehorn
309216765Snwhitehorn	/* Keep statistics for unlocked pages */
310216765Snwhitehorn	if (!(pvo_pt->pte_hi & LPTE_LOCKED))
311216765Snwhitehorn		moea64_pte_valid--;
312216174Snwhitehorn}
313216174Snwhitehorn
314216174Snwhitehornstatic void
315216174Snwhitehornmoea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt,
316216174Snwhitehorn    uint64_t vpn)
317216174Snwhitehorn{
318216174Snwhitehorn
319216174Snwhitehorn	/*
320216174Snwhitehorn	 * Invalidate the PTE
321216174Snwhitehorn	 */
322216174Snwhitehorn	moea64_pte_unset_native(mmu, pt, pvo_pt, vpn);
323216174Snwhitehorn	moea64_pte_set_native((struct lpte *)pt, pvo_pt);
324216174Snwhitehorn}
325216174Snwhitehorn
326216174Snwhitehornstatic void
327216174Snwhitehornmoea64_cpu_bootstrap_native(mmu_t mmup, int ap)
328216174Snwhitehorn{
329216174Snwhitehorn	int i = 0;
330216174Snwhitehorn	#ifdef __powerpc64__
331216174Snwhitehorn	struct slb *slb = PCPU_GET(slb);
332216174Snwhitehorn	register_t seg0;
333216174Snwhitehorn	#endif
334216174Snwhitehorn
335216174Snwhitehorn	/*
336216174Snwhitehorn	 * Initialize segment registers and MMU
337216174Snwhitehorn	 */
338216174Snwhitehorn
339222614Snwhitehorn	mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
340216174Snwhitehorn
341216174Snwhitehorn	/*
342216174Snwhitehorn	 * Install kernel SLB entries
343216174Snwhitehorn	 */
344216174Snwhitehorn
345216174Snwhitehorn	#ifdef __powerpc64__
346216174Snwhitehorn		__asm __volatile ("slbia");
347216174Snwhitehorn		__asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
348216174Snwhitehorn		    "r"(0));
349216174Snwhitehorn
350216174Snwhitehorn		for (i = 0; i < 64; i++) {
351216174Snwhitehorn			if (!(slb[i].slbe & SLBE_VALID))
352216174Snwhitehorn				continue;
353216174Snwhitehorn
354216174Snwhitehorn			__asm __volatile ("slbmte %0, %1" ::
355216174Snwhitehorn			    "r"(slb[i].slbv), "r"(slb[i].slbe));
356216174Snwhitehorn		}
357216174Snwhitehorn	#else
358216174Snwhitehorn		for (i = 0; i < 16; i++)
359216174Snwhitehorn			mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
360216174Snwhitehorn	#endif
361216174Snwhitehorn
362216174Snwhitehorn	/*
363216174Snwhitehorn	 * Install page table
364216174Snwhitehorn	 */
365216174Snwhitehorn
366216174Snwhitehorn	__asm __volatile ("ptesync; mtsdr1 %0; isync"
367216174Snwhitehorn	    :: "r"((uintptr_t)moea64_pteg_table
368216174Snwhitehorn		     | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
369216174Snwhitehorn	tlbia();
370216174Snwhitehorn}
371216174Snwhitehorn
372216174Snwhitehornstatic void
373216174Snwhitehornmoea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
374216174Snwhitehorn    vm_offset_t kernelend)
375216174Snwhitehorn{
376216174Snwhitehorn	vm_size_t	size;
377216174Snwhitehorn	vm_offset_t	off;
378216174Snwhitehorn	vm_paddr_t	pa;
379216174Snwhitehorn	register_t	msr;
380216174Snwhitehorn
381216174Snwhitehorn	moea64_early_bootstrap(mmup, kernelstart, kernelend);
382216174Snwhitehorn
383216174Snwhitehorn	/*
384216174Snwhitehorn	 * Allocate PTEG table.
385216174Snwhitehorn	 */
386216174Snwhitehorn
387216174Snwhitehorn	size = moea64_pteg_count * sizeof(struct lpteg);
388216174Snwhitehorn	CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
389216174Snwhitehorn	    moea64_pteg_count, size);
390216174Snwhitehorn
391216174Snwhitehorn	/*
392216174Snwhitehorn	 * We now need to allocate memory. This memory, to be allocated,
393216174Snwhitehorn	 * has to reside in a page table. The page table we are about to
394216174Snwhitehorn	 * allocate. We don't have BAT. So drop to data real mode for a minute
395216174Snwhitehorn	 * as a measure of last resort. We do this a couple times.
396216174Snwhitehorn	 */
397216174Snwhitehorn
398216174Snwhitehorn	moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
399216174Snwhitehorn	DISABLE_TRANS(msr);
400216174Snwhitehorn	bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
401216174Snwhitehorn	ENABLE_TRANS(msr);
402216174Snwhitehorn
403216174Snwhitehorn	CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
404216174Snwhitehorn
405216174Snwhitehorn	moea64_mid_bootstrap(mmup, kernelstart, kernelend);
406216174Snwhitehorn
407216174Snwhitehorn	/*
408216174Snwhitehorn	 * Add a mapping for the page table itself if there is no direct map.
409216174Snwhitehorn	 */
410216174Snwhitehorn	if (!hw_direct_map) {
411216174Snwhitehorn		size = moea64_pteg_count * sizeof(struct lpteg);
412216174Snwhitehorn		off = (vm_offset_t)(moea64_pteg_table);
413216174Snwhitehorn		DISABLE_TRANS(msr);
414216174Snwhitehorn		for (pa = off; pa < off + size; pa += PAGE_SIZE)
415216174Snwhitehorn			pmap_kenter(pa, pa);
416216174Snwhitehorn		ENABLE_TRANS(msr);
417216174Snwhitehorn	}
418216174Snwhitehorn
419216174Snwhitehorn	/* Bring up virtual memory */
420216174Snwhitehorn	moea64_late_bootstrap(mmup, kernelstart, kernelend);
421216174Snwhitehorn}
422216174Snwhitehorn
423216174Snwhitehornstatic void
424216174Snwhitehorntlbia(void)
425216174Snwhitehorn{
426216174Snwhitehorn	vm_offset_t i;
427216174Snwhitehorn	#ifndef __powerpc64__
428216174Snwhitehorn	register_t msr, scratch;
429216174Snwhitehorn	#endif
430216174Snwhitehorn
431216174Snwhitehorn	TLBSYNC();
432216174Snwhitehorn
433216174Snwhitehorn	for (i = 0; i < 0xFF000; i += 0x00001000) {
434216174Snwhitehorn		#ifdef __powerpc64__
435216174Snwhitehorn		__asm __volatile("tlbiel %0" :: "r"(i));
436216174Snwhitehorn		#else
437216174Snwhitehorn		__asm __volatile("\
438216174Snwhitehorn		    mfmsr %0; \
439216174Snwhitehorn		    mr %1, %0; \
440216174Snwhitehorn		    insrdi %1,%3,1,0; \
441216174Snwhitehorn		    mtmsrd %1; \
442216174Snwhitehorn		    isync; \
443216174Snwhitehorn		    \
444216174Snwhitehorn		    tlbiel %2; \
445216174Snwhitehorn		    \
446216174Snwhitehorn		    mtmsrd %0; \
447216174Snwhitehorn		    isync;"
448216174Snwhitehorn		: "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
449216174Snwhitehorn		#endif
450216174Snwhitehorn	}
451216174Snwhitehorn
452216174Snwhitehorn	EIEIO();
453216174Snwhitehorn	TLBSYNC();
454216174Snwhitehorn}
455216174Snwhitehorn
456216174Snwhitehornstatic uintptr_t
457216174Snwhitehornmoea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo)
458216174Snwhitehorn{
459216174Snwhitehorn	struct lpte 	*pt;
460216174Snwhitehorn	int		pteidx, ptegidx;
461216174Snwhitehorn	uint64_t	vsid;
462216174Snwhitehorn
463216174Snwhitehorn	/* If the PTEG index is not set, then there is no page table entry */
464216174Snwhitehorn	if (!PVO_PTEGIDX_ISSET(pvo))
465216174Snwhitehorn		return (-1);
466216174Snwhitehorn
467216174Snwhitehorn	/*
468216174Snwhitehorn	 * Calculate the ptegidx
469216174Snwhitehorn	 */
470216174Snwhitehorn	vsid = PVO_VSID(pvo);
471216174Snwhitehorn	ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
472216174Snwhitehorn	    pvo->pvo_vaddr & PVO_LARGE);
473216174Snwhitehorn
474216174Snwhitehorn	/*
475216174Snwhitehorn	 * We can find the actual pte entry without searching by grabbing
476216174Snwhitehorn	 * the PTEG index from 3 unused bits in pvo_vaddr and by
477216174Snwhitehorn	 * noticing the HID bit.
478216174Snwhitehorn	 */
479216174Snwhitehorn	if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
480216174Snwhitehorn		ptegidx ^= moea64_pteg_mask;
481216174Snwhitehorn
482216174Snwhitehorn	pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo);
483216174Snwhitehorn
484216174Snwhitehorn	if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
485216174Snwhitehorn	    !PVO_PTEGIDX_ISSET(pvo)) {
486216174Snwhitehorn		panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
487216174Snwhitehorn		    "valid pte index", pvo);
488216174Snwhitehorn	}
489216174Snwhitehorn
490216174Snwhitehorn	if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
491216174Snwhitehorn	    PVO_PTEGIDX_ISSET(pvo)) {
492216174Snwhitehorn		panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
493216174Snwhitehorn		    "pvo but no valid pte", pvo);
494216174Snwhitehorn	}
495216174Snwhitehorn
496216174Snwhitehorn	pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
497216174Snwhitehorn	if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
498216174Snwhitehorn	    LPTE_VALID) {
499216174Snwhitehorn		if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
500216174Snwhitehorn			panic("moea64_pvo_to_pte: pvo %p has valid pte in "
501216174Snwhitehorn			    "moea64_pteg_table %p but invalid in pvo", pvo, pt);
502216174Snwhitehorn		}
503216174Snwhitehorn
504216174Snwhitehorn		if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
505216174Snwhitehorn		    ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
506216174Snwhitehorn			panic("moea64_pvo_to_pte: pvo %p pte does not match "
507216174Snwhitehorn			    "pte %p in moea64_pteg_table difference is %#x",
508216174Snwhitehorn			    pvo, pt,
509216174Snwhitehorn			    (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
510216174Snwhitehorn		}
511216174Snwhitehorn
512216174Snwhitehorn		return ((uintptr_t)pt);
513216174Snwhitehorn	}
514216174Snwhitehorn
515216174Snwhitehorn	if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
516216174Snwhitehorn		panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
517216174Snwhitehorn		    "moea64_pteg_table but valid in pvo", pvo, pt);
518216174Snwhitehorn	}
519216174Snwhitehorn
520216174Snwhitehorn	return (-1);
521216174Snwhitehorn}
522216174Snwhitehorn
523216174Snwhitehornstatic __inline int
524216174Snwhitehornmoea64_pte_spillable_ident(u_int ptegidx)
525216174Snwhitehorn{
526216174Snwhitehorn	struct	lpte *pt;
527216174Snwhitehorn	int	i, j, k;
528216174Snwhitehorn
529216174Snwhitehorn	/* Start at a random slot */
530216174Snwhitehorn	i = mftb() % 8;
531216174Snwhitehorn	k = -1;
532216174Snwhitehorn	for (j = 0; j < 8; j++) {
533216174Snwhitehorn		pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8];
534216174Snwhitehorn		if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED))
535216174Snwhitehorn			continue;
536216174Snwhitehorn
537216174Snwhitehorn		/* This is a candidate, so remember it */
538216174Snwhitehorn		k = (i + j) % 8;
539216174Snwhitehorn
540216174Snwhitehorn		/* Try to get a page that has not been used lately */
541216174Snwhitehorn		if (!(pt->pte_lo & LPTE_REF))
542216174Snwhitehorn			return (k);
543216174Snwhitehorn	}
544216174Snwhitehorn
545216174Snwhitehorn	return (k);
546216174Snwhitehorn}
547216174Snwhitehorn
548216174Snwhitehornstatic int
549216174Snwhitehornmoea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
550216174Snwhitehorn{
551216174Snwhitehorn	struct	lpte *pt;
552216174Snwhitehorn	struct	pvo_entry *pvo;
553216174Snwhitehorn	u_int	pteg_bktidx;
554216174Snwhitehorn	int	i;
555216174Snwhitehorn
556216174Snwhitehorn	/*
557216174Snwhitehorn	 * First try primary hash.
558216174Snwhitehorn	 */
559216174Snwhitehorn	pteg_bktidx = ptegidx;
560216174Snwhitehorn	for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
561216174Snwhitehorn		if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
562216174Snwhitehorn			pvo_pt->pte_hi &= ~LPTE_HID;
563216174Snwhitehorn			moea64_pte_set_native(pt, pvo_pt);
564216174Snwhitehorn			return (i);
565216174Snwhitehorn		}
566216174Snwhitehorn	}
567216174Snwhitehorn
568216174Snwhitehorn	/*
569216174Snwhitehorn	 * Now try secondary hash.
570216174Snwhitehorn	 */
571216174Snwhitehorn	pteg_bktidx ^= moea64_pteg_mask;
572216174Snwhitehorn	for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
573216174Snwhitehorn		if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
574216174Snwhitehorn			pvo_pt->pte_hi |= LPTE_HID;
575216174Snwhitehorn			moea64_pte_set_native(pt, pvo_pt);
576216174Snwhitehorn			return (i);
577216174Snwhitehorn		}
578216174Snwhitehorn	}
579216174Snwhitehorn
580216174Snwhitehorn	/*
581216174Snwhitehorn	 * Out of luck. Find a PTE to sacrifice.
582216174Snwhitehorn	 */
583216174Snwhitehorn	pteg_bktidx = ptegidx;
584216174Snwhitehorn	i = moea64_pte_spillable_ident(pteg_bktidx);
585216174Snwhitehorn	if (i < 0) {
586216174Snwhitehorn		pteg_bktidx ^= moea64_pteg_mask;
587216174Snwhitehorn		i = moea64_pte_spillable_ident(pteg_bktidx);
588216174Snwhitehorn	}
589216174Snwhitehorn
590216174Snwhitehorn	if (i < 0) {
591216174Snwhitehorn		/* No freeable slots in either PTEG? We're hosed. */
592216174Snwhitehorn		panic("moea64_pte_insert: overflow");
593216174Snwhitehorn		return (-1);
594216174Snwhitehorn	}
595216174Snwhitehorn
596216174Snwhitehorn	if (pteg_bktidx == ptegidx)
597216174Snwhitehorn		pvo_pt->pte_hi &= ~LPTE_HID;
598216174Snwhitehorn	else
599216174Snwhitehorn		pvo_pt->pte_hi |= LPTE_HID;
600216174Snwhitehorn
601216174Snwhitehorn	/*
602216174Snwhitehorn	 * Synchronize the sacrifice PTE with its PVO, then mark both
603216174Snwhitehorn	 * invalid. The PVO will be reused when/if the VM system comes
604216174Snwhitehorn	 * here after a fault.
605216174Snwhitehorn	 */
606216174Snwhitehorn	pt = &moea64_pteg_table[pteg_bktidx].pt[i];
607216174Snwhitehorn
608216174Snwhitehorn	if (pt->pte_hi & LPTE_HID)
609216174Snwhitehorn		pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
610216174Snwhitehorn
611216174Snwhitehorn	LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
612216174Snwhitehorn		if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) {
613216174Snwhitehorn			KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
614216174Snwhitehorn			    ("Invalid PVO for valid PTE!"));
615216174Snwhitehorn			moea64_pte_unset_native(mmu, (uintptr_t)pt,
616216174Snwhitehorn			    &pvo->pvo_pte.lpte, pvo->pvo_vpn);
617216174Snwhitehorn			PVO_PTEGIDX_CLR(pvo);
618216174Snwhitehorn			moea64_pte_overflow++;
619216174Snwhitehorn			break;
620216174Snwhitehorn		}
621216174Snwhitehorn	}
622216174Snwhitehorn
623216174Snwhitehorn	KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi,
624216174Snwhitehorn	   ("Unable to find PVO for spilled PTE"));
625216174Snwhitehorn
626216174Snwhitehorn	/*
627216174Snwhitehorn	 * Set the new PTE.
628216174Snwhitehorn	 */
629216174Snwhitehorn	moea64_pte_set_native(pt, pvo_pt);
630216174Snwhitehorn
631216174Snwhitehorn	return (i);
632216174Snwhitehorn}
633216174Snwhitehorn
634