152284Sobrien/*- 252284Sobrien * Copyright (c) 2001 The NetBSD Foundation, Inc. 3169689Skan * All rights reserved. 490075Sobrien * 552284Sobrien * This code is derived from software contributed to The NetBSD Foundation 652284Sobrien * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7132718Skan * 852284Sobrien * Redistribution and use in source and binary forms, with or without 9132718Skan * modification, are permitted provided that the following conditions 1052284Sobrien * are met: 1152284Sobrien * 1. Redistributions of source code must retain the above copyright 1252284Sobrien * notice, this list of conditions and the following disclaimer. 1352284Sobrien * 2. Redistributions in binary form must reproduce the above copyright 14132718Skan * notice, this list of conditions and the following disclaimer in the 1552284Sobrien * documentation and/or other materials provided with the distribution. 1652284Sobrien * 1752284Sobrien * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 1852284Sobrien * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 1952284Sobrien * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20132718Skan * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21169689Skan * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22169689Skan * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2352284Sobrien * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24132718Skan * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25132718Skan * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26132718Skan * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27132718Skan * POSSIBILITY OF SUCH DAMAGE. 28132718Skan */ 29132718Skan/*- 3052284Sobrien * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3190075Sobrien * Copyright (C) 1995, 1996 TooLs GmbH. 3290075Sobrien * All rights reserved. 3390075Sobrien * 3452284Sobrien * Redistribution and use in source and binary forms, with or without 3590075Sobrien * modification, are permitted provided that the following conditions 3652284Sobrien * are met: 3790075Sobrien * 1. Redistributions of source code must retain the above copyright 3890075Sobrien * notice, this list of conditions and the following disclaimer. 3952284Sobrien * 2. Redistributions in binary form must reproduce the above copyright 4090075Sobrien * notice, this list of conditions and the following disclaimer in the 4190075Sobrien * documentation and/or other materials provided with the distribution. 4252284Sobrien * 3. All advertising materials mentioning features or use of this software 4390075Sobrien * must display the following acknowledgement: 4490075Sobrien * This product includes software developed by TooLs GmbH. 4590075Sobrien * 4. The name of TooLs GmbH may not be used to endorse or promote products 4690075Sobrien * derived from this software without specific prior written permission. 4790075Sobrien * 4890075Sobrien * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 4990075Sobrien * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5052284Sobrien * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5152284Sobrien * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5252284Sobrien * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 5352284Sobrien * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 5452284Sobrien * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55117395Skan * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 5652284Sobrien * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57117395Skan * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 5852284Sobrien * 59117395Skan * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6052284Sobrien */ 6152284Sobrien/*- 6252284Sobrien * Copyright (C) 2001 Benno Rice. 63117395Skan * All rights reserved. 6452284Sobrien * 6552284Sobrien * Redistribution and use in source and binary forms, with or without 6652284Sobrien * modification, are permitted provided that the following conditions 6790075Sobrien * are met: 6852284Sobrien * 1. Redistributions of source code must retain the above copyright 6990075Sobrien * notice, this list of conditions and the following disclaimer. 7090075Sobrien * 2. Redistributions in binary form must reproduce the above copyright 7190075Sobrien * notice, this list of conditions and the following disclaimer in the 7252284Sobrien * documentation and/or other materials provided with the distribution. 7352284Sobrien * 7490075Sobrien * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 7552284Sobrien * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 7652284Sobrien * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 7790075Sobrien * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 7890075Sobrien * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 7952284Sobrien * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8052284Sobrien * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8190075Sobrien * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8290075Sobrien * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 8390075Sobrien * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 8490075Sobrien */ 8590075Sobrien 8690075Sobrien#include <sys/cdefs.h> 8790075Sobrien__FBSDID("$FreeBSD: releng/11.0/sys/powerpc/aim/moea64_native.c 290990 2015-11-17 16:09:26Z nwhitehorn $"); 8890075Sobrien 8990075Sobrien/* 9090075Sobrien * Native 64-bit page table operations for running without a hypervisor. 91132718Skan */ 92132718Skan 93132718Skan#include <sys/param.h> 9452284Sobrien#include <sys/kernel.h> 9552284Sobrien#include <sys/ktr.h> 9652284Sobrien#include <sys/lock.h> 9752284Sobrien#include <sys/mutex.h> 9890075Sobrien#include <sys/proc.h> 9952284Sobrien#include <sys/sched.h> 10090075Sobrien#include <sys/sysctl.h> 10190075Sobrien#include <sys/systm.h> 102132718Skan#include <sys/rwlock.h> 103132718Skan#include <sys/endian.h> 10452284Sobrien 10552284Sobrien#include <sys/kdb.h> 10652284Sobrien 10752284Sobrien#include <vm/vm.h> 10852284Sobrien#include <vm/vm_param.h> 10952284Sobrien#include <vm/vm_kern.h> 11052284Sobrien#include <vm/vm_page.h> 11152284Sobrien#include <vm/vm_map.h> 11252284Sobrien#include <vm/vm_object.h> 11390075Sobrien#include <vm/vm_extern.h> 11490075Sobrien#include <vm/vm_pageout.h> 11590075Sobrien 11690075Sobrien#include <machine/md_var.h> 11790075Sobrien#include <machine/mmuvar.h> 11890075Sobrien 11990075Sobrien#include "mmu_oea64.h" 12052284Sobrien#include "mmu_if.h" 12152284Sobrien#include "moea64_if.h" 12252284Sobrien 12352284Sobrien#define PTESYNC() __asm __volatile("ptesync"); 12452284Sobrien#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 12552284Sobrien#define SYNC() __asm __volatile("sync"); 12652284Sobrien#define EIEIO() __asm __volatile("eieio"); 12752284Sobrien 12852284Sobrien#define VSID_HASH_MASK 0x0000007fffffffffULL 12990075Sobrien 13090075Sobrienstatic __inline void 13152284SobrienTLBIE(uint64_t vpn) { 13252284Sobrien#ifndef __powerpc64__ 13390075Sobrien register_t vpn_hi, vpn_lo; 13452284Sobrien register_t msr; 13552284Sobrien register_t scratch, intr; 13652284Sobrien#endif 13790075Sobrien 13890075Sobrien static volatile u_int tlbie_lock = 0; 13990075Sobrien 14090075Sobrien vpn <<= ADDR_PIDX_SHFT; 14190075Sobrien vpn &= ~(0xffffULL << 48); 142132718Skan 14390075Sobrien /* Hobo spinlock: we need stronger guarantees than mutexes provide */ 14490075Sobrien while (!atomic_cmpset_int(&tlbie_lock, 0, 1)); 14552284Sobrien isync(); /* Flush instruction queue once lock acquired */ 14652284Sobrien 14752284Sobrien#ifdef __powerpc64__ 14852284Sobrien __asm __volatile("tlbie %0" :: "r"(vpn) : "memory"); 14952284Sobrien __asm __volatile("eieio; tlbsync; ptesync" ::: "memory"); 15090075Sobrien#else 151117395Skan vpn_hi = (uint32_t)(vpn >> 32); 15252284Sobrien vpn_lo = (uint32_t)vpn; 15352284Sobrien 15452284Sobrien intr = intr_disable(); 15552284Sobrien __asm __volatile("\ 15652284Sobrien mfmsr %0; \ 15752284Sobrien mr %1, %0; \ 15890075Sobrien insrdi %1,%5,1,0; \ 15952284Sobrien mtmsrd %1; isync; \ 16090075Sobrien \ 16152284Sobrien sld %1,%2,%4; \ 16290075Sobrien or %1,%1,%3; \ 16390075Sobrien tlbie %1; \ 16490075Sobrien \ 16590075Sobrien mtmsrd %0; isync; \ 166132718Skan eieio; \ 167132718Skan tlbsync; \ 16890075Sobrien ptesync;" 16990075Sobrien : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 17052284Sobrien : "memory"); 17152284Sobrien intr_restore(intr); 17252284Sobrien#endif 17352284Sobrien 17452284Sobrien /* No barriers or special ops -- taken care of by ptesync above */ 17552284Sobrien tlbie_lock = 0; 17690075Sobrien} 17752284Sobrien 17890075Sobrien#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 17990075Sobrien#define ENABLE_TRANS(msr) mtmsr(msr) 18090075Sobrien 18190075Sobrien/* 18290075Sobrien * PTEG data. 18390075Sobrien */ 18490075Sobrienstatic volatile struct lpte *moea64_pteg_table; 18590075Sobrienstatic struct rwlock moea64_eviction_lock; 18690075Sobrien 18790075Sobrien/* 18852284Sobrien * PTE calls. 18952284Sobrien */ 19052284Sobrienstatic int moea64_pte_insert_native(mmu_t, struct pvo_entry *); 19152284Sobrienstatic int64_t moea64_pte_synch_native(mmu_t, struct pvo_entry *); 19252284Sobrienstatic int64_t moea64_pte_clear_native(mmu_t, struct pvo_entry *, uint64_t); 19390075Sobrienstatic int64_t moea64_pte_replace_native(mmu_t, struct pvo_entry *, int); 19490075Sobrienstatic int64_t moea64_pte_unset_native(mmu_t mmu, struct pvo_entry *); 19552284Sobrien 196117395Skan/* 197117395Skan * Utility routines. 19852284Sobrien */ 19952284Sobrienstatic void moea64_bootstrap_native(mmu_t mmup, 20052284Sobrien vm_offset_t kernelstart, vm_offset_t kernelend); 20152284Sobrienstatic void moea64_cpu_bootstrap_native(mmu_t, int ap); 20252284Sobrienstatic void tlbia(void); 20352284Sobrien 20452284Sobrienstatic mmu_method_t moea64_native_methods[] = { 20590075Sobrien /* Internal interfaces */ 20690075Sobrien MMUMETHOD(mmu_bootstrap, moea64_bootstrap_native), 20752284Sobrien MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap_native), 208117395Skan 20990075Sobrien MMUMETHOD(moea64_pte_synch, moea64_pte_synch_native), 210117395Skan MMUMETHOD(moea64_pte_clear, moea64_pte_clear_native), 211117395Skan MMUMETHOD(moea64_pte_unset, moea64_pte_unset_native), 21290075Sobrien MMUMETHOD(moea64_pte_replace, moea64_pte_replace_native), 21390075Sobrien MMUMETHOD(moea64_pte_insert, moea64_pte_insert_native), 21490075Sobrien 21590075Sobrien { 0, 0 } 216132718Skan}; 21790075Sobrien 21890075SobrienMMU_DEF_INHERIT(oea64_mmu_native, MMU_TYPE_G5, moea64_native_methods, 21952284Sobrien 0, oea64_mmu); 220117395Skan 221117395Skanstatic int64_t 222117395Skanmoea64_pte_synch_native(mmu_t mmu, struct pvo_entry *pvo) 223117395Skan{ 224169689Skan volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 225169689Skan struct lpte properpt; 22652284Sobrien uint64_t ptelo; 22752284Sobrien 22852284Sobrien PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 22952284Sobrien 23052284Sobrien moea64_pte_from_pvo(pvo, &properpt); 23152284Sobrien 23252284Sobrien rw_rlock(&moea64_eviction_lock); 23390075Sobrien if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 23490075Sobrien (properpt.pte_hi & LPTE_AVPN_MASK)) { 23552284Sobrien /* Evicted */ 23652284Sobrien rw_runlock(&moea64_eviction_lock); 23752284Sobrien return (-1); 238132718Skan } 23990075Sobrien 24090075Sobrien PTESYNC(); 24190075Sobrien ptelo = be64toh(pt->pte_lo); 242132718Skan 24390075Sobrien rw_runlock(&moea64_eviction_lock); 24490075Sobrien 24590075Sobrien return (ptelo & (LPTE_REF | LPTE_CHG)); 24652284Sobrien} 24752284Sobrien 24852284Sobrienstatic int64_t 24952284Sobrienmoea64_pte_clear_native(mmu_t mmu, struct pvo_entry *pvo, uint64_t ptebit) 25052284Sobrien{ 25152284Sobrien volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 25252284Sobrien struct lpte properpt; 25352284Sobrien uint64_t ptelo; 25452284Sobrien 25552284Sobrien PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 25652284Sobrien 25752284Sobrien moea64_pte_from_pvo(pvo, &properpt); 25852284Sobrien 25952284Sobrien rw_rlock(&moea64_eviction_lock); 26052284Sobrien if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 26152284Sobrien (properpt.pte_hi & LPTE_AVPN_MASK)) { 26252284Sobrien /* Evicted */ 26352284Sobrien rw_runlock(&moea64_eviction_lock); 26452284Sobrien return (-1); 26552284Sobrien } 26652284Sobrien 26752284Sobrien if (ptebit == LPTE_REF) { 26852284Sobrien /* See "Resetting the Reference Bit" in arch manual */ 26952284Sobrien PTESYNC(); 27052284Sobrien /* 2-step here safe: precision is not guaranteed */ 27152284Sobrien ptelo = be64toh(pt->pte_lo); 27290075Sobrien 273117395Skan /* One-byte store to avoid touching the C bit */ 274117395Skan ((volatile uint8_t *)(&pt->pte_lo))[6] = 275117395Skan#if BYTE_ORDER == BIG_ENDIAN 276117395Skan ((uint8_t *)(&properpt.pte_lo))[6]; 277117395Skan#else 278117395Skan ((uint8_t *)(&properpt.pte_lo))[1]; 279117395Skan#endif 28090075Sobrien rw_runlock(&moea64_eviction_lock); 28190075Sobrien 28252284Sobrien critical_enter(); 28352284Sobrien TLBIE(pvo->pvo_vpn); 28452284Sobrien critical_exit(); 28590075Sobrien } else { 28690075Sobrien rw_runlock(&moea64_eviction_lock); 28790075Sobrien ptelo = moea64_pte_unset_native(mmu, pvo); 288117395Skan moea64_pte_insert_native(mmu, pvo); 289117395Skan } 290117395Skan 291117395Skan return (ptelo & (LPTE_REF | LPTE_CHG)); 29290075Sobrien} 29390075Sobrien 29490075Sobrienstatic int64_t 29590075Sobrienmoea64_pte_unset_native(mmu_t mmu, struct pvo_entry *pvo) 29690075Sobrien{ 297117395Skan volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 298117395Skan struct lpte properpt; 29990075Sobrien uint64_t ptelo; 300117395Skan 30190075Sobrien moea64_pte_from_pvo(pvo, &properpt); 30290075Sobrien 30390075Sobrien rw_rlock(&moea64_eviction_lock); 30452284Sobrien if ((be64toh(pt->pte_hi & LPTE_AVPN_MASK)) != 30552284Sobrien (properpt.pte_hi & LPTE_AVPN_MASK)) { 30652284Sobrien /* Evicted */ 30752284Sobrien moea64_pte_overflow--; 30852284Sobrien rw_runlock(&moea64_eviction_lock); 30952284Sobrien return (-1); 31052284Sobrien } 311132718Skan 31290075Sobrien /* 31390075Sobrien * Invalidate the pte, briefly locking it to collect RC bits. No 31490075Sobrien * atomics needed since this is protected against eviction by the lock. 31590075Sobrien */ 316117395Skan isync(); 317132718Skan critical_enter(); 31890075Sobrien pt->pte_hi = be64toh((pt->pte_hi & ~LPTE_VALID) | LPTE_LOCKED); 31990075Sobrien PTESYNC(); 32090075Sobrien TLBIE(pvo->pvo_vpn); 32190075Sobrien ptelo = be64toh(pt->pte_lo); 32290075Sobrien *((volatile int32_t *)(&pt->pte_hi) + 1) = 0; /* Release lock */ 32390075Sobrien critical_exit(); 32490075Sobrien rw_runlock(&moea64_eviction_lock); 325117395Skan 326117395Skan /* Keep statistics */ 32790075Sobrien moea64_pte_valid--; 32890075Sobrien 32990075Sobrien return (ptelo & (LPTE_CHG | LPTE_REF)); 33052284Sobrien} 33152284Sobrien 33290075Sobrienstatic int64_t 33390075Sobrienmoea64_pte_replace_native(mmu_t mmu, struct pvo_entry *pvo, int flags) 33490075Sobrien{ 33590075Sobrien volatile struct lpte *pt = moea64_pteg_table + pvo->pvo_pte.slot; 33690075Sobrien struct lpte properpt; 337117395Skan int64_t ptelo; 33890075Sobrien 33990075Sobrien if (flags == 0) { 34090075Sobrien /* Just some software bits changing. */ 34152284Sobrien moea64_pte_from_pvo(pvo, &properpt); 34252284Sobrien 34352284Sobrien rw_rlock(&moea64_eviction_lock); 34452284Sobrien if ((be64toh(pt->pte_hi) & LPTE_AVPN_MASK) != 34552284Sobrien (properpt.pte_hi & LPTE_AVPN_MASK)) { 34652284Sobrien rw_runlock(&moea64_eviction_lock); 34752284Sobrien return (-1); 34852284Sobrien } 34952284Sobrien pt->pte_hi = htobe64(properpt.pte_hi); 35052284Sobrien ptelo = be64toh(pt->pte_lo); 35152284Sobrien rw_runlock(&moea64_eviction_lock); 35252284Sobrien } else { 35352284Sobrien /* Otherwise, need reinsertion and deletion */ 35452284Sobrien ptelo = moea64_pte_unset_native(mmu, pvo); 35552284Sobrien moea64_pte_insert_native(mmu, pvo); 35652284Sobrien } 35752284Sobrien 35852284Sobrien return (ptelo); 35952284Sobrien} 36052284Sobrien 36152284Sobrienstatic void 36252284Sobrienmoea64_cpu_bootstrap_native(mmu_t mmup, int ap) 36352284Sobrien{ 36452284Sobrien int i = 0; 36552284Sobrien #ifdef __powerpc64__ 36652284Sobrien struct slb *slb = PCPU_GET(slb); 36752284Sobrien register_t seg0; 36852284Sobrien #endif 36952284Sobrien 37052284Sobrien /* 37152284Sobrien * Initialize segment registers and MMU 37252284Sobrien */ 37352284Sobrien 37452284Sobrien mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); 37552284Sobrien 37652284Sobrien /* 37752284Sobrien * Install kernel SLB entries 37852284Sobrien */ 37952284Sobrien 38090075Sobrien #ifdef __powerpc64__ 38152284Sobrien __asm __volatile ("slbia"); 38252284Sobrien __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : 38352284Sobrien "r"(0)); 38452284Sobrien 38552284Sobrien for (i = 0; i < 64; i++) { 38652284Sobrien if (!(slb[i].slbe & SLBE_VALID)) 387117395Skan continue; 38852284Sobrien 38990075Sobrien __asm __volatile ("slbmte %0, %1" :: 39090075Sobrien "r"(slb[i].slbv), "r"(slb[i].slbe)); 39190075Sobrien } 39290075Sobrien #else 39390075Sobrien for (i = 0; i < 16; i++) 39490075Sobrien mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 395132718Skan #endif 39690075Sobrien 397132718Skan /* 39890075Sobrien * Install page table 39990075Sobrien */ 40090075Sobrien 401132718Skan __asm __volatile ("ptesync; mtsdr1 %0; isync" 40290075Sobrien :: "r"((uintptr_t)moea64_pteg_table 40390075Sobrien | (uintptr_t)(flsl(moea64_pteg_mask >> 11)))); 40490075Sobrien tlbia(); 40590075Sobrien} 40690075Sobrien 40790075Sobrienstatic void 40890075Sobrienmoea64_bootstrap_native(mmu_t mmup, vm_offset_t kernelstart, 40990075Sobrien vm_offset_t kernelend) 41090075Sobrien{ 41190075Sobrien vm_size_t size; 41290075Sobrien vm_offset_t off; 41390075Sobrien vm_paddr_t pa; 41490075Sobrien register_t msr; 41590075Sobrien 416132718Skan moea64_early_bootstrap(mmup, kernelstart, kernelend); 41790075Sobrien 41890075Sobrien /* 41952284Sobrien * Allocate PTEG table. 42052284Sobrien */ 42152284Sobrien 42252284Sobrien size = moea64_pteg_count * sizeof(struct lpteg); 42352284Sobrien CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 42452284Sobrien moea64_pteg_count, size); 42552284Sobrien rw_init(&moea64_eviction_lock, "pte eviction"); 42652284Sobrien 42752284Sobrien /* 42890075Sobrien * We now need to allocate memory. This memory, to be allocated, 42952284Sobrien * has to reside in a page table. The page table we are about to 43052284Sobrien * allocate. We don't have BAT. So drop to data real mode for a minute 43152284Sobrien * as a measure of last resort. We do this a couple times. 432169689Skan */ 43390075Sobrien 434169689Skan moea64_pteg_table = (struct lpte *)moea64_bootstrap_alloc(size, size); 435169689Skan DISABLE_TRANS(msr); 436169689Skan bzero(__DEVOLATILE(void *, moea64_pteg_table), moea64_pteg_count * 43790075Sobrien sizeof(struct lpteg)); 43852284Sobrien ENABLE_TRANS(msr); 43952284Sobrien 440169689Skan CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 441132718Skan 44252284Sobrien moea64_mid_bootstrap(mmup, kernelstart, kernelend); 44352284Sobrien 44452284Sobrien /* 44552284Sobrien * Add a mapping for the page table itself if there is no direct map. 44652284Sobrien */ 447132718Skan if (!hw_direct_map) { 448169689Skan size = moea64_pteg_count * sizeof(struct lpteg); 449169689Skan off = (vm_offset_t)(moea64_pteg_table); 450169689Skan DISABLE_TRANS(msr); 451169689Skan for (pa = off; pa < off + size; pa += PAGE_SIZE) 452169689Skan pmap_kenter(pa, pa); 453169689Skan ENABLE_TRANS(msr); 454169689Skan } 455169689Skan 456132718Skan /* Bring up virtual memory */ 45790075Sobrien moea64_late_bootstrap(mmup, kernelstart, kernelend); 45852284Sobrien} 45952284Sobrien 46052284Sobrienstatic void 46152284Sobrientlbia(void) 46252284Sobrien{ 46352284Sobrien vm_offset_t i; 464132718Skan #ifndef __powerpc64__ 46552284Sobrien register_t msr, scratch; 46652284Sobrien #endif 46752284Sobrien 46852284Sobrien TLBSYNC(); 46952284Sobrien 47052284Sobrien for (i = 0; i < 0xFF000; i += 0x00001000) { 47152284Sobrien #ifdef __powerpc64__ 472132718Skan __asm __volatile("tlbiel %0" :: "r"(i)); 47352284Sobrien #else 47490075Sobrien __asm __volatile("\ 475132718Skan mfmsr %0; \ 47652284Sobrien mr %1, %0; \ 47752284Sobrien insrdi %1,%3,1,0; \ 47852284Sobrien mtmsrd %1; \ 47952284Sobrien isync; \ 48052284Sobrien \ 48152284Sobrien tlbiel %2; \ 48252284Sobrien \ 48352284Sobrien mtmsrd %0; \ 48452284Sobrien isync;" 48552284Sobrien : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 48652284Sobrien #endif 48752284Sobrien } 48852284Sobrien 48952284Sobrien EIEIO(); 49052284Sobrien TLBSYNC(); 49152284Sobrien} 49252284Sobrien 49352284Sobrienstatic int 494132718Skanatomic_pte_lock(volatile struct lpte *pte, uint64_t bitmask, uint64_t *oldhi) 49552284Sobrien{ 49652284Sobrien int ret; 49752284Sobrien uint32_t oldhihalf; 49852284Sobrien 499 /* 500 * Note: in principle, if just the locked bit were set here, we 501 * could avoid needing the eviction lock. However, eviction occurs 502 * so rarely that it isn't worth bothering about in practice. 503 */ 504 505 __asm __volatile ( 506 "1:\tlwarx %1, 0, %3\n\t" /* load old value */ 507 "and. %0,%1,%4\n\t" /* check if any bits set */ 508 "bne 2f\n\t" /* exit if any set */ 509 "stwcx. %5, 0, %3\n\t" /* attempt to store */ 510 "bne- 1b\n\t" /* spin if failed */ 511 "li %0, 1\n\t" /* success - retval = 1 */ 512 "b 3f\n\t" /* we've succeeded */ 513 "2:\n\t" 514 "stwcx. %1, 0, %3\n\t" /* clear reservation (74xx) */ 515 "li %0, 0\n\t" /* failure - retval = 0 */ 516 "3:\n\t" 517 : "=&r" (ret), "=&r"(oldhihalf), "=m" (pte->pte_hi) 518 : "r" ((volatile char *)&pte->pte_hi + 4), 519 "r" ((uint32_t)bitmask), "r" ((uint32_t)LPTE_LOCKED), 520 "m" (pte->pte_hi) 521 : "cr0", "cr1", "cr2", "memory"); 522 523 *oldhi = (pte->pte_hi & 0xffffffff00000000ULL) | oldhihalf; 524 525 return (ret); 526} 527 528static uintptr_t 529moea64_insert_to_pteg_native(struct lpte *pvo_pt, uintptr_t slotbase, 530 uint64_t mask) 531{ 532 volatile struct lpte *pt; 533 uint64_t oldptehi, va; 534 uintptr_t k; 535 int i, j; 536 537 /* Start at a random slot */ 538 i = mftb() % 8; 539 for (j = 0; j < 8; j++) { 540 k = slotbase + (i + j) % 8; 541 pt = &moea64_pteg_table[k]; 542 /* Invalidate and seize lock only if no bits in mask set */ 543 if (atomic_pte_lock(pt, mask, &oldptehi)) /* Lock obtained */ 544 break; 545 } 546 547 if (j == 8) 548 return (-1); 549 550 if (oldptehi & LPTE_VALID) { 551 KASSERT(!(oldptehi & LPTE_WIRED), ("Unmapped wired entry")); 552 /* 553 * Need to invalidate old entry completely: see 554 * "Modifying a Page Table Entry". Need to reconstruct 555 * the virtual address for the outgoing entry to do that. 556 */ 557 if (oldptehi & LPTE_BIG) 558 va = oldptehi >> moea64_large_page_shift; 559 else 560 va = oldptehi >> ADDR_PIDX_SHFT; 561 if (oldptehi & LPTE_HID) 562 va = (((k >> 3) ^ moea64_pteg_mask) ^ va) & 563 VSID_HASH_MASK; 564 else 565 va = ((k >> 3) ^ va) & VSID_HASH_MASK; 566 va |= (oldptehi & LPTE_AVPN_MASK) << 567 (ADDR_API_SHFT64 - ADDR_PIDX_SHFT); 568 PTESYNC(); 569 TLBIE(va); 570 moea64_pte_valid--; 571 moea64_pte_overflow++; 572 } 573 574 /* 575 * Update the PTE as per "Adding a Page Table Entry". Lock is released 576 * by setting the high doubleworld. 577 */ 578 pt->pte_lo = htobe64(pvo_pt->pte_lo); 579 EIEIO(); 580 pt->pte_hi = htobe64(pvo_pt->pte_hi); 581 PTESYNC(); 582 583 /* Keep statistics */ 584 moea64_pte_valid++; 585 586 return (k); 587} 588 589static int 590moea64_pte_insert_native(mmu_t mmu, struct pvo_entry *pvo) 591{ 592 struct lpte insertpt; 593 uintptr_t slot; 594 595 /* Initialize PTE */ 596 moea64_pte_from_pvo(pvo, &insertpt); 597 598 /* Make sure further insertion is locked out during evictions */ 599 rw_rlock(&moea64_eviction_lock); 600 601 /* 602 * First try primary hash. 603 */ 604 pvo->pvo_pte.slot &= ~7ULL; /* Base slot address */ 605 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 606 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 607 if (slot != -1) { 608 rw_runlock(&moea64_eviction_lock); 609 pvo->pvo_pte.slot = slot; 610 return (0); 611 } 612 613 /* 614 * Now try secondary hash. 615 */ 616 pvo->pvo_vaddr ^= PVO_HID; 617 insertpt.pte_hi ^= LPTE_HID; 618 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 619 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 620 LPTE_VALID | LPTE_WIRED | LPTE_LOCKED); 621 if (slot != -1) { 622 rw_runlock(&moea64_eviction_lock); 623 pvo->pvo_pte.slot = slot; 624 return (0); 625 } 626 627 /* 628 * Out of luck. Find a PTE to sacrifice. 629 */ 630 631 /* Lock out all insertions for a bit */ 632 if (!rw_try_upgrade(&moea64_eviction_lock)) { 633 rw_runlock(&moea64_eviction_lock); 634 rw_wlock(&moea64_eviction_lock); 635 } 636 637 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 638 LPTE_WIRED | LPTE_LOCKED); 639 if (slot != -1) { 640 rw_wunlock(&moea64_eviction_lock); 641 pvo->pvo_pte.slot = slot; 642 return (0); 643 } 644 645 /* Try other hash table. Now we're getting desperate... */ 646 pvo->pvo_vaddr ^= PVO_HID; 647 insertpt.pte_hi ^= LPTE_HID; 648 pvo->pvo_pte.slot ^= (moea64_pteg_mask << 3); 649 slot = moea64_insert_to_pteg_native(&insertpt, pvo->pvo_pte.slot, 650 LPTE_WIRED | LPTE_LOCKED); 651 if (slot != -1) { 652 rw_wunlock(&moea64_eviction_lock); 653 pvo->pvo_pte.slot = slot; 654 return (0); 655 } 656 657 /* No freeable slots in either PTEG? We're hosed. */ 658 rw_wunlock(&moea64_eviction_lock); 659 panic("moea64_pte_insert: overflow"); 660 return (-1); 661} 662 663