Deleted Added
full compact
moea64_native.c (263289) moea64_native.c (279252)
1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 70 unchanged lines hidden (view full) ---

79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2001 The NetBSD Foundation, Inc.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to The NetBSD Foundation
6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc.
7 *
8 * Redistribution and use in source and binary forms, with or without

--- 70 unchanged lines hidden (view full) ---

79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
87__FBSDID("$FreeBSD: head/sys/powerpc/aim/moea64_native.c 263289 2014-03-18 01:40:25Z emaste $");
87__FBSDID("$FreeBSD: head/sys/powerpc/aim/moea64_native.c 279252 2015-02-24 21:37:20Z nwhitehorn $");
88
89/*
90 * Native 64-bit page table operations for running without a hypervisor.
91 */
92
93#include <sys/param.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/sched.h>
100#include <sys/sysctl.h>
101#include <sys/systm.h>
88
89/*
90 * Native 64-bit page table operations for running without a hypervisor.
91 */
92
93#include <sys/param.h>
94#include <sys/kernel.h>
95#include <sys/ktr.h>
96#include <sys/lock.h>
97#include <sys/mutex.h>
98#include <sys/proc.h>
99#include <sys/sched.h>
100#include <sys/sysctl.h>
101#include <sys/systm.h>
102#include <sys/rwlock.h>
103#include <sys/endian.h>
102
103#include <sys/kdb.h>
104
105#include <vm/vm.h>
106#include <vm/vm_param.h>
107#include <vm/vm_kern.h>
108#include <vm/vm_page.h>
109#include <vm/vm_map.h>

--- 64 unchanged lines hidden (view full) ---

174}
175
176#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
177#define ENABLE_TRANS(msr) mtmsr(msr)
178
179/*
180 * PTEG data.
181 */
104
105#include <sys/kdb.h>
106
107#include <vm/vm.h>
108#include <vm/vm_param.h>
109#include <vm/vm_kern.h>
110#include <vm/vm_page.h>
111#include <vm/vm_map.h>

--- 64 unchanged lines hidden (view full) ---

176}
177
178#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR)
179#define ENABLE_TRANS(msr) mtmsr(msr)
180
181/*
182 * PTEG data.
183 */
182static struct lpteg *moea64_pteg_table;
184static volatile struct lpte *moea64_pteg_table;
185static struct rwlock moea64_eviction_lock;
183
184/*
185 * PTE calls.
186 */
186
187/*
188 * PTE calls.
189 */
187static int moea64_pte_insert_native(mmu_t, u_int, struct lpte *);
188static uintptr_t moea64_pvo_to_pte_native(mmu_t, const struct pvo_entry *);
189static void moea64_pte_synch_native(mmu_t, uintptr_t pt,
190 struct lpte *pvo_pt);
191static void moea64_pte_clear_native(mmu_t, uintptr_t pt,
192 struct lpte *pvo_pt, uint64_t vpn, uint64_t ptebit);
193static void moea64_pte_change_native(mmu_t, uintptr_t pt,
194 struct lpte *pvo_pt, uint64_t vpn);
195static void moea64_pte_unset_native(mmu_t mmu, uintptr_t pt,
196 struct lpte *pvo_pt, uint64_t vpn);
190static int moea64_pte_insert_native(mmu_t, struct pvo_entry *);
191static int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *);
192static int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t);
193static int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int);
194static int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *);
197
198/*
199 * Utility routines.
200 */
195
196/*
197 * Utility routines.
198 */
201static void moea64_bootstrap_native(mmu_t mmup,
202 vm_offset_t kernelstart, vm_offset_t kernelend);
203static void moea64_cpu_bootstrap_native(mmu_t, int ap);
204static void tlbia(void);
199static void moea64_bootstrap_native(mmu_t mmup,
200 vm_offset_t kernelstart, vm_offset_t kernelend);
201static void moea64_cpu_bootstrap_native(mmu_t, int ap);
202static void tlbia(void);
205
206static mmu_method_t moea64_native_methods[] = {
207 /* Internal interfaces */
208 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
209 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
210
211 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
212 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
213 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
203
204static mmu_method_t moea64_native_methods[] = {
205 /* Internal interfaces */
206 MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native),
207 MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native),
208
209 MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native),
210 MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native),
211 MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native),
214 MMUMETHOD(moea64_pte_change, moea64_pte_change_native),
212 MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native),
215 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
213 MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native),
216 MMUMETHOD(moea64_pvo_to_pte, moea64_pvo_to_pte_native),
217
218 { 0, 0 }
219};
220
221MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
222 0, oea64_mmu);
223
214
215 { 0, 0 }
216};
217
218MMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods,
219 0, oea64_mmu);
220
224static __inline u_int
225va_to_pteg(uint64_t vsid, vm_offset_t addr, int large)
221static int64_t
222moea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo)
226{
223{
227 uint64_t hash;
228 int shift;
224 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
225 struct lpte properpt;
226 uint64_t ptelo;
229
227
230 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT;
231 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >>
232 shift);
233 return (hash & moea64_pteg_mask);
234}
228 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
235
229
236static void
237moea64_pte_synch_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt)
238{
239 struct lpte *pt = (struct lpte *)pt_cookie;
230 moea64_pte_from_pvo(pvo, &properpt);
240
231
241 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG);
232 rw_rlock(&moea64_eviction_lock);
233 if ((pt->pte_hi & LPTE_AVPN_MASK) !=
234 (properpt.pte_hi & LPTE_AVPN_MASK)) {
235 /* Evicted */
236 rw_runlock(&moea64_eviction_lock);
237 return (-1);
238 }
239
240 PTESYNC();
241 ptelo = be64toh(pt->pte_lo);
242
243 rw_runlock(&moea64_eviction_lock);
244
245 return (ptelo & (LPTE_REF | LPTE_CHG));
242}
243
246}
247
244static void
245moea64_pte_clear_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
246 uint64_t vpn, uint64_t ptebit)
248static int64_t
249moea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit)
247{
250{
248 struct lpte *pt = (struct lpte *)pt_cookie;
251 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
252 struct lpte properpt;
253 uint64_t ptelo;
249
254
250 /*
251 * As shown in Section 7.6.3.2.3
252 */
253 pt->pte_lo &= ~ptebit;
254 critical_enter();
255 TLBIE(vpn);
256 critical_exit();
257}
255 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
258
256
259static void
260moea64_pte_set_native(struct lpte *pt, struct lpte *pvo_pt)
261{
257 moea64_pte_from_pvo(pvo, &properpt);
262
258
263 pvo_pt->pte_hi |= LPTE_VALID;
259 rw_rlock(&moea64_eviction_lock);
260 if ((pt->pte_hi & LPTE_AVPN_MASK) !=
261 (properpt.pte_hi & LPTE_AVPN_MASK)) {
262 /* Evicted */
263 rw_runlock(&moea64_eviction_lock);
264 return (-1);
265 }
264
266
265 /*
266 * Update the PTE as defined in section 7.6.3.1.
267 * Note that the REF/CHG bits are from pvo_pt and thus should have
268 * been saved so this routine can restore them (if desired).
269 */
270 pt->pte_lo = pvo_pt->pte_lo;
271 EIEIO();
272 pt->pte_hi = pvo_pt->pte_hi;
273 PTESYNC();
267 if (ptebit == LPTE_REF) {
268 /* See "Resetting the Reference Bit" in arch manual */
269 PTESYNC();
270 /* 2-step here safe: precision is not guaranteed */
271 ptelo |= pt->pte_lo;
274
272
275 /* Keep statistics for unlocked pages */
276 if (!(pvo_pt->pte_hi & LPTE_LOCKED))
277 moea64_pte_valid++;
273 /* One-byte store to avoid touching the C bit */
274 ((volatile uint8_t *)(&pt->pte_lo))[6] =
275 ((uint8_t *)(&properpt.pte_lo))[6];
276 rw_runlock(&moea64_eviction_lock);
277
278 critical_enter();
279 TLBIE(pvo->pvo_vpn);
280 critical_exit();
281 } else {
282 rw_runlock(&moea64_eviction_lock);
283 ptelo = moea64_pte_unset_native(mmu, pvo);
284 moea64_pte_insert_native(mmu, pvo);
285 }
286
287 return (ptelo & (LPTE_REF | LPTE_CHG));
278}
279
288}
289
280static void
281moea64_pte_unset_native(mmu_t mmu, uintptr_t pt_cookie, struct lpte *pvo_pt,
282 uint64_t vpn)
290static int64_t
291moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo)
283{
292{
284 struct lpte *pt = (struct lpte *)pt_cookie;
293 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
294 struct lpte properpt;
295 uint64_t ptelo;
285
296
297 moea64_pte_from_pvo(pvo, &properpt);
298
299 rw_rlock(&moea64_eviction_lock);
300 if ((pt->pte_hi & LPTE_AVPN_MASK) !=
301 (properpt.pte_hi & LPTE_AVPN_MASK)) {
302 /* Evicted */
303 moea64_pte_overflow--;
304 rw_runlock(&moea64_eviction_lock);
305 return (-1);
306 }
307
286 /*
308 /*
287 * Invalidate the pte.
309 * Invalidate the pte, briefly locking it to collect RC bits. No
310 * atomics needed since this is protected against eviction by the lock.
288 */
289 isync();
290 critical_enter();
311 */
312 isync();
313 critical_enter();
291 pvo_pt->pte_hi &= ~LPTE_VALID;
292 pt->pte_hi &= ~LPTE_VALID;
314 pt->pte_hi = (pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED;
293 PTESYNC();
315 PTESYNC();
294 TLBIE(vpn);
316 TLBIE(pvo->pvo_vpn);
317 ptelo = be64toh(pt->pte_lo);
318 *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */
295 critical_exit();
319 critical_exit();
320 rw_runlock(&moea64_eviction_lock);
296
321
297 /*
298 * Save the reg & chg bits.
299 */
300 moea64_pte_synch_native(mmu, pt_cookie, pvo_pt);
322 /* Keep statistics */
323 moea64_pte_valid--;
301
324
302 /* Keep statistics for unlocked pages */
303 if (!(pvo_pt->pte_hi & LPTE_LOCKED))
304 moea64_pte_valid--;
325 return (ptelo & (LPTE_CHG | LPTE_REF));
305}
306
326}
327
307static void
308moea64_pte_change_native(mmu_t mmu, uintptr_t pt, struct lpte *pvo_pt,
309 uint64_t vpn)
328static int64_t
329moea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags)
310{
330{
331 volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot;
332 struct lpte properpt;
333 int64_t ptelo;
311
334
312 /*
313 * Invalidate the PTE
314 */
315 moea64_pte_unset_native(mmu, pt, pvo_pt, vpn);
316 moea64_pte_set_native((struct lpte *)pt, pvo_pt);
335 if (flags == 0) {
336 /* Just some software bits changing. */
337 moea64_pte_from_pvo(pvo, &properpt);
338
339 rw_rlock(&moea64_eviction_lock);
340 if ((pt->pte_hi & LPTE_AVPN_MASK) !=
341 (properpt.pte_hi & LPTE_AVPN_MASK)) {
342 rw_runlock(&moea64_eviction_lock);
343 return (-1);
344 }
345 pt->pte_hi = properpt.pte_hi;
346 ptelo = pt->pte_lo;
347 rw_runlock(&moea64_eviction_lock);
348 } else {
349 /* Otherwise, need reinsertion and deletion */
350 ptelo = moea64_pte_unset_native(mmu, pvo);
351 moea64_pte_insert_native(mmu, pvo);
352 }
353
354 return (ptelo);
317}
318
319static void
320moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
321{
322 int i = 0;
323 #ifdef __powerpc64__
324 struct slb *slb = PCPU_GET(slb);

--- 50 unchanged lines hidden (view full) ---

375
376 /*
377 * Allocate PTEG table.
378 */
379
380 size = moea64_pteg_count * sizeof(struct lpteg);
381 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
382 moea64_pteg_count, size);
355}
356
357static void
358moea64_cpu_bootstrap_native(mmu_t mmup, int ap)
359{
360 int i = 0;
361 #ifdef __powerpc64__
362 struct slb *slb = PCPU_GET(slb);

--- 50 unchanged lines hidden (view full) ---

413
414 /*
415 * Allocate PTEG table.
416 */
417
418 size = moea64_pteg_count * sizeof(struct lpteg);
419 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes",
420 moea64_pteg_count, size);
421 rw_init(&moea64_eviction_lock, "pte eviction");
383
384 /*
385 * We now need to allocate memory. This memory, to be allocated,
386 * has to reside in a page table. The page table we are about to
387 * allocate. We don't have BAT. So drop to data real mode for a minute
388 * as a measure of last resort. We do this a couple times.
389 */
390
422
423 /*
424 * We now need to allocate memory. This memory, to be allocated,
425 * has to reside in a page table. The page table we are about to
426 * allocate. We don't have BAT. So drop to data real mode for a minute
427 * as a measure of last resort. We do this a couple times.
428 */
429
391 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size);
430 moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size);
392 DISABLE_TRANS(msr);
431 DISABLE_TRANS(msr);
393 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg));
432 bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count *
433 sizeof(struct lpteg));
394 ENABLE_TRANS(msr);
395
396 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
397
398 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
399
400 /*
401 * Add a mapping for the page table itself if there is no direct map.

--- 39 unchanged lines hidden (view full) ---

441 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
442 #endif
443 }
444
445 EIEIO();
446 TLBSYNC();
447}
448
434 ENABLE_TRANS(msr);
435
436 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table);
437
438 moea64_mid_bootstrap(mmup, kernelstart, kernelend);
439
440 /*
441 * Add a mapping for the page table itself if there is no direct map.

--- 39 unchanged lines hidden (view full) ---

481 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1));
482 #endif
483 }
484
485 EIEIO();
486 TLBSYNC();
487}
488
449static uintptr_t
450moea64_pvo_to_pte_native(mmu_t mmu, const struct pvo_entry *pvo)
489static int
490atomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi)
451{
491{
452 struct lpte *pt;
453 int pteidx, ptegidx;
454 uint64_t vsid;
492 int ret;
493 uint32_t oldhihalf;
455
494
456 /* If the PTEG index is not set, then there is no page table entry */
457 if (!PVO_PTEGIDX_ISSET(pvo))
458 return (-1);
459
460 /*
495 /*
461 * Calculate the ptegidx
496 * Note: in principle, if just the locked bit were set here, we
497 * could avoid needing the eviction lock. However, eviction occurs
498 * so rarely that it isn't worth bothering about in practice.
462 */
499 */
463 vsid = PVO_VSID(pvo);
464 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo),
465 pvo->pvo_vaddr & PVO_LARGE);
466
500
467 /*
468 * We can find the actual pte entry without searching by grabbing
469 * the PTEG index from 3 unused bits in pvo_vaddr and by
470 * noticing the HID bit.
471 */
472 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID)
473 ptegidx ^= moea64_pteg_mask;
501 __asm __volatile (
502 "1:\tlwarx %1, 0, %3\n\t" /* load old value */
503 "and. %0,%1,%4\n\t" /* check if any bits set */
504 "bne 2f\n\t" /* exit if any set */
505 "stwcx. %5, 0, %3\n\t" /* attempt to store */
506 "bne- 1b\n\t" /* spin if failed */
507 "li %0, 1\n\t" /* success - retval = 1 */
508 "b 3f\n\t" /* we've succeeded */
509 "2:\n\t"
510 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */
511 "li %0, 0\n\t" /* failure - retval = 0 */
512 "3:\n\t"
513 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi)
514 : "r" ((volatile char *)&pte->pte_hi + 4),
515 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED),
516 "m" (pte->pte_hi)
517 : "cr0", "cr1", "cr2", "memory");
474
518
475 pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo);
519 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf;
476
520
477 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) &&
478 !PVO_PTEGIDX_ISSET(pvo)) {
479 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no "
480 "valid pte index", pvo);
481 }
482
483 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 &&
484 PVO_PTEGIDX_ISSET(pvo)) {
485 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo "
486 "pvo but no valid pte", pvo);
487 }
488
489 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7];
490 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) ==
491 LPTE_VALID) {
492 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) {
493 panic("moea64_pvo_to_pte: pvo %p has valid pte in "
494 "moea64_pteg_table %p but invalid in pvo", pvo, pt);
495 }
496
497 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) &
498 ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) {
499 panic("moea64_pvo_to_pte: pvo %p pte does not match "
500 "pte %p in moea64_pteg_table difference is %#x",
501 pvo, pt,
502 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo));
503 }
504
505 return ((uintptr_t)pt);
506 }
507
508 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) {
509 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in "
510 "moea64_pteg_table but valid in pvo", pvo, pt);
511 }
512
513 return (-1);
521 return (ret);
514}
515
522}
523
516static __inline int
517moea64_pte_spillable_ident(u_int ptegidx)
524static uintptr_t
525moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase,
526 uint64_t mask)
518{
527{
519 struct lpte *pt;
520 int i, j, k;
528 volatile struct lpte *pt;
529 uint64_t oldptehi, va;
530 uintptr_t k;
531 int i, j;
521
522 /* Start at a random slot */
523 i = mftb() % 8;
532
533 /* Start at a random slot */
534 i = mftb() % 8;
524 k = -1;
525 for (j = 0; j < 8; j++) {
535 for (j = 0; j < 8; j++) {
526 pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8];
527 if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED))
528 continue;
536 k = slotbase + (i + j) % 8;
537 pt = &moea64_pteg_table[k];
538 /* Invalidate and seize lock only if no bits in mask set */
539 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */
540 break;
541 }
529
542
530 /* This is a candidate, so remember it */
531 k = (i + j) % 8;
543 if (j == 8)
544 return (-1);
532
545
533 /* Try to get a page that has not been used lately */
534 if (!(pt->pte_lo & LPTE_REF))
535 return (k);
546 if (oldptehi & LPTE_VALID) {
547 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry"));
548 /*
549 * Need to invalidate old entry completely: see
550 * "Modifying a Page Table Entry". Need to reconstruct
551 * the virtual address for the outgoing entry to do that.
552 */
553 if (oldptehi & LPTE_BIG)
554 va = oldptehi >> moea64_large_page_shift;
555 else
556 va = oldptehi >> ADDR_PIDX_SHFT;
557 if (oldptehi & LPTE_HID)
558 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) &
559 VSID_HASH_MASK;
560 else
561 va = ((k >> 3) ^ va) & VSID_HASH_MASK;
562 va |= (oldptehi & LPTE_AVPN_MASK) <<
563 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT);
564 PTESYNC();
565 TLBIE(va);
566 moea64_pte_valid--;
567 moea64_pte_overflow++;
536 }
568 }
537
569
570 /*
571 * Update the PTE as per "Adding a Page Table Entry". Lock is released
572 * by setting the high doubleworld.
573 */
574 pt->pte_lo = pvo_pt->pte_lo;
575 EIEIO();
576 pt->pte_hi = pvo_pt->pte_hi;
577 PTESYNC();
578
579 /* Keep statistics */
580 moea64_pte_valid++;
581
538 return (k);
539}
540
541static int
582 return (k);
583}
584
585static int
542moea64_pte_insert_native(mmu_t mmu, u_int ptegidx, struct lpte *pvo_pt)
586moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo)
543{
587{
544 struct lpte *pt;
545 struct pvo_entry *pvo;
546 u_int pteg_bktidx;
547 int i;
588 struct lpte insertpt;
589 uintptr_t slot;
548
590
591 /* Initialize PTE */
592 moea64_pte_from_pvo(pvo, &insertpt);
593
594 /* Make sure further insertion is locked out during evictions */
595 rw_rlock(&moea64_eviction_lock);
596
549 /*
550 * First try primary hash.
551 */
597 /*
598 * First try primary hash.
599 */
552 pteg_bktidx = ptegidx;
553 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
554 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
555 pvo_pt->pte_hi &= ~LPTE_HID;
556 moea64_pte_set_native(pt, pvo_pt);
557 return (i);
558 }
600 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */
601 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
602 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
603 if (slot != -1) {
604 rw_runlock(&moea64_eviction_lock);
605 pvo->pvo_pte.slot = slot;
606 return (0);
559 }
560
561 /*
562 * Now try secondary hash.
563 */
607 }
608
609 /*
610 * Now try secondary hash.
611 */
564 pteg_bktidx ^= moea64_pteg_mask;
565 for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) {
566 if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) {
567 pvo_pt->pte_hi |= LPTE_HID;
568 moea64_pte_set_native(pt, pvo_pt);
569 return (i);
570 }
612 pvo->pvo_vaddr ^= PVO_HID;
613 insertpt.pte_hi ^= LPTE_HID;
614 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
615 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
616 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED);
617 if (slot != -1) {
618 rw_runlock(&moea64_eviction_lock);
619 pvo->pvo_pte.slot = slot;
620 return (0);
571 }
572
573 /*
574 * Out of luck. Find a PTE to sacrifice.
575 */
621 }
622
623 /*
624 * Out of luck. Find a PTE to sacrifice.
625 */
576 pteg_bktidx = ptegidx;
577 i = moea64_pte_spillable_ident(pteg_bktidx);
578 if (i < 0) {
579 pteg_bktidx ^= moea64_pteg_mask;
580 i = moea64_pte_spillable_ident(pteg_bktidx);
626
627 /* Lock out all insertions for a bit */
628 if (!rw_try_upgrade(&moea64_eviction_lock)) {
629 rw_runlock(&moea64_eviction_lock);
630 rw_wlock(&moea64_eviction_lock);
581 }
582
631 }
632
583 if (i < 0) {
584 /* No freeable slots in either PTEG? We're hosed. */
585 panic("moea64_pte_insert: overflow");
586 return (-1);
633 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
634 LPTE_WIRED | LPTE_LOCKED);
635 if (slot != -1) {
636 rw_wunlock(&moea64_eviction_lock);
637 pvo->pvo_pte.slot = slot;
638 return (0);
587 }
588
639 }
640
589 if (pteg_bktidx == ptegidx)
590 pvo_pt->pte_hi &= ~LPTE_HID;
591 else
592 pvo_pt->pte_hi |= LPTE_HID;
593
594 /*
595 * Synchronize the sacrifice PTE with its PVO, then mark both
596 * invalid. The PVO will be reused when/if the VM system comes
597 * here after a fault.
598 */
599 pt = &moea64_pteg_table[pteg_bktidx].pt[i];
600
601 if (pt->pte_hi & LPTE_HID)
602 pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */
603
604 LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) {
605 if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) {
606 KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID,
607 ("Invalid PVO for valid PTE!"));
608 moea64_pte_unset_native(mmu, (uintptr_t)pt,
609 &pvo->pvo_pte.lpte, pvo->pvo_vpn);
610 PVO_PTEGIDX_CLR(pvo);
611 moea64_pte_overflow++;
612 break;
613 }
641 /* Try other hash table. Now we're getting desperate... */
642 pvo->pvo_vaddr ^= PVO_HID;
643 insertpt.pte_hi ^= LPTE_HID;
644 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3);
645 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot,
646 LPTE_WIRED | LPTE_LOCKED);
647 if (slot != -1) {
648 rw_wunlock(&moea64_eviction_lock);
649 pvo->pvo_pte.slot = slot;
650 return (0);
614 }
615
651 }
652
616 KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi,
617 ("Unable to find PVO for spilled PTE"));
618
619 /*
620 * Set the new PTE.
621 */
622 moea64_pte_set_native(pt, pvo_pt);
623
624 return (i);
653 /* No freeable slots in either PTEG? We're hosed. */
654 rw_wunlock(&moea64_eviction_lock);
655 panic("moea64_pte_insert: overflow");
656 return (-1);
625}
626
657}
658