1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29/*-
30 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
31 * Copyright (C) 1995, 1996 TooLs GmbH.
32 * All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 *    notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 *    notice, this list of conditions and the following disclaimer in the
41 *    documentation and/or other materials provided with the distribution.
42 * 3. All advertising materials mentioning features or use of this software
43 *    must display the following acknowledgement:
44 *	This product includes software developed by TooLs GmbH.
45 * 4. The name of TooLs GmbH may not be used to endorse or promote products
46 *    derived from this software without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 *
59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $
60 */
61/*-
62 * Copyright (C) 2001 Benno Rice.
63 * All rights reserved.
64 *
65 * Redistribution and use in source and binary forms, with or without
66 * modification, are permitted provided that the following conditions
67 * are met:
68 * 1. Redistributions of source code must retain the above copyright
69 *    notice, this list of conditions and the following disclaimer.
70 * 2. Redistributions in binary form must reproduce the above copyright
71 *    notice, this list of conditions and the following disclaimer in the
72 *    documentation and/or other materials provided with the distribution.
73 *
74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR
75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
87__FBSDID("$FreeBSD: releng/11.0/sys/powerpc/aim/moea64_native.c 290990 2015-11-17 16:09:26Z nwhitehorn $");
88
89/*
90 * Native 64-bit page table operations for running without a hypervisor.
91 */
92
93#include <sys/param.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/sched.h>
100#include <sys/sysctl.h>
101#include <sys/systm.h>
102#include <sys/rwlock.h>
103#include <sys/endian.h>
104
105#include <sys/kdb.h>
106
107#include <vm/vm.h>
108#include <vm/vm_param.h>
109#include <vm/vm_kern.h>
110#include <vm/vm_page.h>
111#include <vm/vm_map.h>
112#include <vm/vm_object.h>
113#include <vm/vm_extern.h>
114#include <vm/vm_pageout.h>
115
116#include <machine/md_var.h>
117#include <machine/mmuvar.h>
118
119#include "mmu_oea64.h"
120#include "mmu_if.h"
121#include "moea64_if.h"
122
123#define	PTESYNC()	__asm __volatile("ptesync");
124#define	TLBSYNC()	__asm __volatile("tlbsync; ptesync");
125#define	SYNC()		__asm __volatile("sync");
126#define	EIEIO()		__asm __volatile("eieio");
127
128#define	VSID_HASH_MASK	0x0000007fffffffffULL
129
130static __inline void
131TLBIE(uint64_t vpn) {
132#ifndef __powerpc64__
133	register_t vpn_hi, vpn_lo;
134	register_t msr;
135	register_t scratch, intr;
136#endif
137
138	static volatile u_int tlbie_lock = 0;
139
140	vpn <<= ADDR_PIDX_SHFT;
141	vpn &= ~(0xffffULL << 48);
142
143	/* Hobo spinlock: we need stronger guarantees than mutexes provide */
144	while (!atomic_cmpset_int(&tlbie_lock, 0, 1));
145	isync(); /* Flush instruction queue once lock acquired */
146
147#ifdef __powerpc64__
148	__asm __volatile("tlbie %0" :: "r"(vpn) : "memory");
149	__asm __volatile("eieio; tlbsync; ptesync" ::: "memory");
150#else
151	vpn_hi = (uint32_t)(vpn >> 32);
152	vpn_lo = (uint32_t)vpn;
153
154	intr = intr_disable();
155	__asm __volatile("\
156	    mfmsr %0; \
157	    mr %1, %0; \
158	    insrdi %1,%5,1,0; \
159	    mtmsrd %1; isync; \
160	    \
161	    sld %1,%2,%4; \
162	    or %1,%1,%3; \
163	    tlbie %1; \
164	    \
165	    mtmsrd %0; isync; \
166	    eieio; \
167	    tlbsync; \
168	    ptesync;"
169	: "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)
170	    : "memory");
171	intr_restore(intr);
172#endif
173
174	/* No barriers or special ops -- taken care of by ptesync above */
175	tlbie_lock = 0;
176}
177
178#define DISABLE_TRANS(msr)	msr = mfmsr(); mtmsr(msr & ~PSL_DR)
179#define ENABLE_TRANS(msr)	mtmsr(msr)
180
181/*
182 * PTEG data.
183 */
184static volatile struct lpte *moea64_pteg_table;
185static struct rwlock moea64_eviction_lock;
186
187/*
188 * PTE calls.
189 */
190static int	moea64_pte_insert_native(mmu_t, struct pvo_entry *);
191static int64_t	moea64_pte_synch_native(mmu_t, struct pvo_entry *);
192static int64_t	moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
193static int64_t	moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
194static int64_t	moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
195
196/*
197 * Utility routines.
198 */
199static void	moea64_bootstrap_native(mmu_t mmup,
200		    vm_offset_t kernelstart, vm_offset_t kernelend);
201static void	moea64_cpu_bootstrap_native(mmu_t, int ap);
202static void	tlbia(void);
203
204static mmu_method_t moea64_native_methods[] = {
205	/* Internal interfaces */
206	MMUMETHOD(mmu_bootstrap,	moea64_bootstrap_native),
207	MMUMETHOD(mmu_cpu_bootstrap,	moea64_cpu_bootstrap_native),
208
209	MMUMETHOD(moea64_pte_synch,	moea64_pte_synch_native),
210	MMUMETHOD(moea64_pte_clear,	moea64_pte_clear_native),
211	MMUMETHOD(moea64_pte_unset,	moea64_pte_unset_native),
212	MMUMETHOD(moea64_pte_replace,	moea64_pte_replace_native),
213	MMUMETHOD(moea64_pte_insert,	moea64_pte_insert_native),
214
215	{ 0, 0 }
216};
217
218MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
219    0, oea64_mmu);
220
221static int64_t
222moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
223{
224	volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
225	struct lpte properpt;
226	uint64_t ptelo;
227
228	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
229
230	moea64_pte_from_pvo(pvo, &properpt);
231
232	rw_rlock(&moea64_eviction_lock);
233	if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
234	    (properpt.pte_hi & LPTE_AVPN_MASK)) {
235		/* Evicted */
236		rw_runlock(&moea64_eviction_lock);
237		return (-1);
238	}
239
240	PTESYNC();
241	ptelo = be64toh(pt->pte_lo);
242
243	rw_runlock(&moea64_eviction_lock);
244
245	return (ptelo & (LPTE_REF | LPTE_CHG));
246}
247
248static int64_t
249moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
250{
251	volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
252	struct lpte properpt;
253	uint64_t ptelo;
254
255	PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
256
257	moea64_pte_from_pvo(pvo, &properpt);
258
259	rw_rlock(&moea64_eviction_lock);
260	if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
261	    (properpt.pte_hi & LPTE_AVPN_MASK)) {
262		/* Evicted */
263		rw_runlock(&moea64_eviction_lock);
264		return (-1);
265	}
266
267	if (ptebit == LPTE_REF) {
268		/* See "Resetting the Reference Bit" in arch manual */
269		PTESYNC();
270		/* 2-step here safe: precision is not guaranteed */
271		ptelo = be64toh(pt->pte_lo);
272
273		/* One-byte store to avoid touching the C bit */
274		((volatile uint8_t *)(&pt->pte_lo))[6] =
275#if BYTE_ORDER == BIG_ENDIAN
276		    ((uint8_t *)(&properpt.pte_lo))[6];
277#else
278		    ((uint8_t *)(&properpt.pte_lo))[1];
279#endif
280		rw_runlock(&moea64_eviction_lock);
281
282		critical_enter();
283		TLBIE(pvo->pvo_vpn);
284		critical_exit();
285	} else {
286		rw_runlock(&moea64_eviction_lock);
287		ptelo = moea64_pte_unset_native(mmu, pvo);
288		moea64_pte_insert_native(mmu, pvo);
289	}
290
291	return (ptelo & (LPTE_REF | LPTE_CHG));
292}
293
294static int64_t
295moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
296{
297	volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
298	struct lpte properpt;
299	uint64_t ptelo;
300
301	moea64_pte_from_pvo(pvo, &properpt);
302
303	rw_rlock(&moea64_eviction_lock);
304	if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) !=
305	    (properpt.pte_hi & LPTE_AVPN_MASK)) {
306		/* Evicted */
307		moea64_pte_overflow--;
308		rw_runlock(&moea64_eviction_lock);
309		return (-1);
310	}
311
312	/*
313	 * Invalidate the pte, briefly locking it to collect RC bits. No
314	 * atomics needed since this is protected against eviction by the lock.
315	 */
316	isync();
317	critical_enter();
318	pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED);
319	PTESYNC();
320	TLBIE(pvo->pvo_vpn);
321	ptelo = be64toh(pt->pte_lo);
322	*((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */
323	critical_exit();
324	rw_runlock(&moea64_eviction_lock);
325
326	/* Keep statistics */
327	moea64_pte_valid--;
328
329	return (ptelo & (LPTE_CHG | LPTE_REF));
330}
331
332static int64_t
333moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
334{
335	volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
336	struct lpte properpt;
337	int64_t ptelo;
338
339	if (flags == 0) {
340		/* Just some software bits changing. */
341		moea64_pte_from_pvo(pvo, &properpt);
342
343		rw_rlock(&moea64_eviction_lock);
344		if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) !=
345		    (properpt.pte_hi & LPTE_AVPN_MASK)) {
346			rw_runlock(&moea64_eviction_lock);
347			return (-1);
348		}
349		pt->pte_hi = htobe64(properpt.pte_hi);
350		ptelo = be64toh(pt->pte_lo);
351		rw_runlock(&moea64_eviction_lock);
352	} else {
353		/* Otherwise, need reinsertion and deletion */
354		ptelo = moea64_pte_unset_native(mmu, pvo);
355		moea64_pte_insert_native(mmu, pvo);
356	}
357
358	return (ptelo);
359}
360
361static void
362moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
363{
364	int i = 0;
365	#ifdef __powerpc64__
366	struct slb *slb = PCPU_GET(slb);
367	register_t seg0;
368	#endif
369
370	/*
371	 * Initialize segment registers and MMU
372	 */
373
374	mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR);
375
376	/*
377	 * Install kernel SLB entries
378	 */
379
380	#ifdef __powerpc64__
381		__asm __volatile ("slbia");
382		__asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) :
383		    "r"(0));
384
385		for (i = 0; i < 64; i++) {
386			if (!(slb[i].slbe & SLBE_VALID))
387				continue;
388
389			__asm __volatile ("slbmte %0, %1" ::
390			    "r"(slb[i].slbv), "r"(slb[i].slbe));
391		}
392	#else
393		for (i = 0; i < 16; i++)
394			mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]);
395	#endif
396
397	/*
398	 * Install page table
399	 */
400
401	__asm __volatile ("ptesync; mtsdr1 %0; isync"
402	    :: "r"((uintptr_t)moea64_pteg_table
403		     | (uintptr_t)(flsl(moea64_pteg_mask >> 11))));
404	tlbia();
405}
406
407static void
408moea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart,
409    vm_offset_t kernelend)
410{
411	vm_size_t	size;
412	vm_offset_t	off;
413	vm_paddr_t	pa;
414	register_t	msr;
415
416	moea64_early_bootstrap(mmup, kernelstart, kernelend);
417
418	/*
419	 * Allocate PTEG table.
420	 */
421
422	size = moea64_pteg_count * sizeof(struct lpteg);
423	CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
424	    moea64_pteg_count, size);
425	rw_init(&moea64_eviction_lock, "pte eviction");
426
427	/*
428	 * We now need to allocate memory. This memory, to be allocated,
429	 * has to reside in a page table. The page table we are about to
430	 * allocate. We don't have BAT. So drop to data real mode for a minute
431	 * as a measure of last resort. We do this a couple times.
432	 */
433
434	moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size);
435	DISABLE_TRANS(msr);
436	bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count *
437	    sizeof(struct lpteg));
438	ENABLE_TRANS(msr);
439
440	CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
441
442	moea64_mid_bootstrap(mmup, kernelstart, kernelend);
443
444	/*
445	 * Add a mapping for the page table itself if there is no direct map.
446	 */
447	if (!hw_direct_map) {
448		size = moea64_pteg_count * sizeof(struct lpteg);
449		off = (vm_offset_t)(moea64_pteg_table);
450		DISABLE_TRANS(msr);
451		for (pa = off; pa < off + size; pa += PAGE_SIZE)
452			pmap_kenter(pa, pa);
453		ENABLE_TRANS(msr);
454	}
455
456	/* Bring up virtual memory */
457	moea64_late_bootstrap(mmup, kernelstart, kernelend);
458}
459
460static void
461tlbia(void)
462{
463	vm_offset_t i;
464	#ifndef __powerpc64__
465	register_t msr, scratch;
466	#endif
467
468	TLBSYNC();
469
470	for (i = 0; i < 0xFF000; i += 0x00001000) {
471		#ifdef __powerpc64__
472		__asm __volatile("tlbiel %0" :: "r"(i));
473		#else
474		__asm __volatile("\
475		    mfmsr %0; \
476		    mr %1, %0; \
477		    insrdi %1,%3,1,0; \
478		    mtmsrd %1; \
479		    isync; \
480		    \
481		    tlbiel %2; \
482		    \
483		    mtmsrd %0; \
484		    isync;"
485		: "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
486		#endif
487	}
488
489	EIEIO();
490	TLBSYNC();
491}
492
493static int
494atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi)
495{
496	int	ret;
497	uint32_t oldhihalf;
498
499	/*
500	 * Note: in principle, if just the locked bit were set here, we
501	 * could avoid needing the eviction lock. However, eviction occurs
502	 * so rarely that it isn't worth bothering about in practice.
503	 */
504
505	__asm __volatile (
506		"1:\tlwarx %1, 0, %3\n\t"	/* load old value */
507		"and. %0,%1,%4\n\t"		/* check if any bits set */
508		"bne 2f\n\t"			/* exit if any set */
509		"stwcx. %5, 0, %3\n\t"      	/* attempt to store */
510		"bne- 1b\n\t"			/* spin if failed */
511		"li %0, 1\n\t"			/* success - retval = 1 */
512		"b 3f\n\t"			/* we've succeeded */
513		"2:\n\t"
514		"stwcx. %1, 0, %3\n\t"       	/* clear reservation (74xx) */
515		"li %0, 0\n\t"			/* failure - retval = 0 */
516		"3:\n\t"
517		: "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi)
518		: "r" ((volatile char *)&pte->pte_hi + 4),
519		  "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED),
520		  "m" (pte->pte_hi)
521		: "cr0", "cr1", "cr2", "memory");
522
523	*oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf;
524
525	return (ret);
526}
527
528static uintptr_t
529moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
530    uint64_t mask)
531{
532	volatile struct lpte *pt;
533	uint64_t oldptehi, va;
534	uintptr_t k;
535	int i, j;
536
537	/* Start at a random slot */
538	i = mftb() % 8;
539	for (j = 0; j < 8; j++) {
540		k = slotbase + (i + j) % 8;
541		pt = &moea64_pteg_table[k];
542		/* Invalidate and seize lock only if no bits in mask set */
543		if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */
544			break;
545	}
546
547	if (j == 8)
548		return (-1);
549
550	if (oldptehi & LPTE_VALID) {
551		KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry"));
552		/*
553		 * Need to invalidate old entry completely: see
554		 * "Modifying a Page Table Entry". Need to reconstruct
555		 * the virtual address for the outgoing entry to do that.
556		 */
557		if (oldptehi & LPTE_BIG)
558			va = oldptehi >> moea64_large_page_shift;
559		else
560			va = oldptehi >> ADDR_PIDX_SHFT;
561		if (oldptehi & LPTE_HID)
562			va = (((k >> 3) ^ moea64_pteg_mask) ^ va) &
563			    VSID_HASH_MASK;
564		else
565			va = ((k >> 3) ^ va) & VSID_HASH_MASK;
566		va |= (oldptehi & LPTE_AVPN_MASK) <<
567		    (ADDR_API_SHFT64 - ADDR_PIDX_SHFT);
568		PTESYNC();
569		TLBIE(va);
570		moea64_pte_valid--;
571		moea64_pte_overflow++;
572	}
573
574	/*
575	 * Update the PTE as per "Adding a Page Table Entry". Lock is released
576	 * by setting the high doubleworld.
577	 */
578	pt->pte_lo = htobe64(pvo_pt->pte_lo);
579	EIEIO();
580	pt->pte_hi = htobe64(pvo_pt->pte_hi);
581	PTESYNC();
582
583	/* Keep statistics */
584	moea64_pte_valid++;
585
586	return (k);
587}
588
589static int
590moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
591{
592	struct lpte insertpt;
593	uintptr_t slot;
594
595	/* Initialize PTE */
596	moea64_pte_from_pvo(pvo, &insertpt);
597
598	/* Make sure further insertion is locked out during evictions */
599	rw_rlock(&moea64_eviction_lock);
600
601	/*
602	 * First try primary hash.
603	 */
604	pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
605	slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
606	    LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
607	if (slot != -1) {
608		rw_runlock(&moea64_eviction_lock);
609		pvo->pvo_pte.slot = slot;
610		return (0);
611	}
612
613	/*
614	 * Now try secondary hash.
615	 */
616	pvo->pvo_vaddr ^= PVO_HID;
617	insertpt.pte_hi ^= LPTE_HID;
618	pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
619	slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
620	    LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
621	if (slot != -1) {
622		rw_runlock(&moea64_eviction_lock);
623		pvo->pvo_pte.slot = slot;
624		return (0);
625	}
626
627	/*
628	 * Out of luck. Find a PTE to sacrifice.
629	 */
630
631	/* Lock out all insertions for a bit */
632	if (!rw_try_upgrade(&moea64_eviction_lock)) {
633		rw_runlock(&moea64_eviction_lock);
634		rw_wlock(&moea64_eviction_lock);
635	}
636
637	slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
638	    LPTE_WIRED | LPTE_LOCKED);
639	if (slot != -1) {
640		rw_wunlock(&moea64_eviction_lock);
641		pvo->pvo_pte.slot = slot;
642		return (0);
643	}
644
645	/* Try other hash table. Now we're getting desperate... */
646	pvo->pvo_vaddr ^= PVO_HID;
647	insertpt.pte_hi ^= LPTE_HID;
648	pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
649	slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
650	    LPTE_WIRED | LPTE_LOCKED);
651	if (slot != -1) {
652		rw_wunlock(&moea64_eviction_lock);
653		pvo->pvo_pte.slot = slot;
654		return (0);
655	}
656
657	/* No freeable slots in either PTEG? We're hosed. */
658	rw_wunlock(&moea64_eviction_lock);
659	panic("moea64_pte_insert: overflow");
660	return (-1);
661}
662
663