mmu_oea64.c revision 207410
1190681Snwhitehorn/*- 2190681Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc. 3190681Snwhitehorn * All rights reserved. 4190681Snwhitehorn * 5190681Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation 6190681Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7190681Snwhitehorn * 8190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 9190681Snwhitehorn * modification, are permitted provided that the following conditions 10190681Snwhitehorn * are met: 11190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 12190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 13190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 14190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 15190681Snwhitehorn * documentation and/or other materials provided with the distribution. 16190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software 17190681Snwhitehorn * must display the following acknowledgement: 18190681Snwhitehorn * This product includes software developed by the NetBSD 19190681Snwhitehorn * Foundation, Inc. and its contributors. 20190681Snwhitehorn * 4. Neither the name of The NetBSD Foundation nor the names of its 21190681Snwhitehorn * contributors may be used to endorse or promote products derived 22190681Snwhitehorn * from this software without specific prior written permission. 23190681Snwhitehorn * 24190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25190681Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26190681Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27190681Snwhitehorn * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28190681Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29190681Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30190681Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31190681Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32190681Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33190681Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34190681Snwhitehorn * POSSIBILITY OF SUCH DAMAGE. 35190681Snwhitehorn */ 36190681Snwhitehorn/*- 37190681Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38190681Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH. 39190681Snwhitehorn * All rights reserved. 40190681Snwhitehorn * 41190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 42190681Snwhitehorn * modification, are permitted provided that the following conditions 43190681Snwhitehorn * are met: 44190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 45190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 46190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 47190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 48190681Snwhitehorn * documentation and/or other materials provided with the distribution. 49190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software 50190681Snwhitehorn * must display the following acknowledgement: 51190681Snwhitehorn * This product includes software developed by TooLs GmbH. 52190681Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products 53190681Snwhitehorn * derived from this software without specific prior written permission. 54190681Snwhitehorn * 55190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65190681Snwhitehorn * 66190681Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67190681Snwhitehorn */ 68190681Snwhitehorn/*- 69190681Snwhitehorn * Copyright (C) 2001 Benno Rice. 70190681Snwhitehorn * All rights reserved. 71190681Snwhitehorn * 72190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 73190681Snwhitehorn * modification, are permitted provided that the following conditions 74190681Snwhitehorn * are met: 75190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 76190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 77190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 78190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 79190681Snwhitehorn * documentation and/or other materials provided with the distribution. 80190681Snwhitehorn * 81190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91190681Snwhitehorn */ 92190681Snwhitehorn 93190681Snwhitehorn#include <sys/cdefs.h> 94190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 207410 2010-04-30 00:46:43Z kmacy $"); 95190681Snwhitehorn 96190681Snwhitehorn/* 97190681Snwhitehorn * Manages physical address maps. 98190681Snwhitehorn * 99190681Snwhitehorn * In addition to hardware address maps, this module is called upon to 100190681Snwhitehorn * provide software-use-only maps which may or may not be stored in the 101190681Snwhitehorn * same form as hardware maps. These pseudo-maps are used to store 102190681Snwhitehorn * intermediate results from copy operations to and from address spaces. 103190681Snwhitehorn * 104190681Snwhitehorn * Since the information managed by this module is also stored by the 105190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual 106190681Snwhitehorn * to physical mappings at almost any time. However, invalidations of 107190681Snwhitehorn * mappings must be done as requested. 108190681Snwhitehorn * 109190681Snwhitehorn * In order to cope with hardware architectures which make virtual to 110190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate 111190681Snwhitehorn * reduced protection operations until such time as they are actually 112190681Snwhitehorn * necessary. This module is given full information as to which processors 113190681Snwhitehorn * are currently using which maps, and to when physical maps must be made 114190681Snwhitehorn * correct. 115190681Snwhitehorn */ 116190681Snwhitehorn 117190681Snwhitehorn#include "opt_kstack_pages.h" 118190681Snwhitehorn 119190681Snwhitehorn#include <sys/param.h> 120190681Snwhitehorn#include <sys/kernel.h> 121190681Snwhitehorn#include <sys/ktr.h> 122190681Snwhitehorn#include <sys/lock.h> 123190681Snwhitehorn#include <sys/msgbuf.h> 124190681Snwhitehorn#include <sys/mutex.h> 125190681Snwhitehorn#include <sys/proc.h> 126190681Snwhitehorn#include <sys/sysctl.h> 127190681Snwhitehorn#include <sys/systm.h> 128190681Snwhitehorn#include <sys/vmmeter.h> 129190681Snwhitehorn 130190681Snwhitehorn#include <sys/kdb.h> 131190681Snwhitehorn 132190681Snwhitehorn#include <dev/ofw/openfirm.h> 133190681Snwhitehorn 134190681Snwhitehorn#include <vm/vm.h> 135190681Snwhitehorn#include <vm/vm_param.h> 136190681Snwhitehorn#include <vm/vm_kern.h> 137190681Snwhitehorn#include <vm/vm_page.h> 138190681Snwhitehorn#include <vm/vm_map.h> 139190681Snwhitehorn#include <vm/vm_object.h> 140190681Snwhitehorn#include <vm/vm_extern.h> 141190681Snwhitehorn#include <vm/vm_pageout.h> 142190681Snwhitehorn#include <vm/vm_pager.h> 143190681Snwhitehorn#include <vm/uma.h> 144190681Snwhitehorn 145190681Snwhitehorn#include <machine/cpu.h> 146192067Snwhitehorn#include <machine/platform.h> 147190681Snwhitehorn#include <machine/frame.h> 148190681Snwhitehorn#include <machine/md_var.h> 149190681Snwhitehorn#include <machine/psl.h> 150190681Snwhitehorn#include <machine/bat.h> 151190681Snwhitehorn#include <machine/pte.h> 152190681Snwhitehorn#include <machine/sr.h> 153190681Snwhitehorn#include <machine/trap.h> 154190681Snwhitehorn#include <machine/mmuvar.h> 155190681Snwhitehorn 156190681Snwhitehorn#include "mmu_if.h" 157190681Snwhitehorn 158190681Snwhitehorn#define MOEA_DEBUG 159190681Snwhitehorn 160190681Snwhitehorn#define TODO panic("%s: not implemented", __func__); 161190681Snwhitehorn 162190681Snwhitehornstatic __inline u_int32_t 163190681Snwhitehorncntlzw(volatile u_int32_t a) { 164190681Snwhitehorn u_int32_t b; 165190681Snwhitehorn __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a)); 166190681Snwhitehorn return b; 167190681Snwhitehorn} 168190681Snwhitehorn 169190681Snwhitehornstatic __inline uint64_t 170190681Snwhitehornva_to_vsid(pmap_t pm, vm_offset_t va) 171190681Snwhitehorn{ 172190681Snwhitehorn return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 173190681Snwhitehorn} 174190681Snwhitehorn 175204042Snwhitehorn#define PTESYNC() __asm __volatile("ptesync"); 176190681Snwhitehorn#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 177190681Snwhitehorn#define SYNC() __asm __volatile("sync"); 178190681Snwhitehorn#define EIEIO() __asm __volatile("eieio"); 179190681Snwhitehorn 180190681Snwhitehorn/* 181190681Snwhitehorn * The tlbie instruction must be executed in 64-bit mode 182190681Snwhitehorn * so we have to twiddle MSR[SF] around every invocation. 183190681Snwhitehorn * Just to add to the fun, exceptions must be off as well 184190681Snwhitehorn * so that we can't trap in 64-bit mode. What a pain. 185190681Snwhitehorn */ 186198378Snwhitehornstruct mtx tlbie_mutex; 187190681Snwhitehorn 188190681Snwhitehornstatic __inline void 189190681SnwhitehornTLBIE(pmap_t pmap, vm_offset_t va) { 190198378Snwhitehorn uint64_t vpn; 191198378Snwhitehorn register_t vpn_hi, vpn_lo; 192190681Snwhitehorn register_t msr; 193190681Snwhitehorn register_t scratch; 194190681Snwhitehorn 195190681Snwhitehorn vpn = (uint64_t)(va & ADDR_PIDX); 196190681Snwhitehorn if (pmap != NULL) 197190681Snwhitehorn vpn |= (va_to_vsid(pmap,va) << 28); 198204042Snwhitehorn vpn &= ~(0xffffULL << 48); 199190681Snwhitehorn 200190681Snwhitehorn vpn_hi = (uint32_t)(vpn >> 32); 201190681Snwhitehorn vpn_lo = (uint32_t)vpn; 202190681Snwhitehorn 203198378Snwhitehorn mtx_lock_spin(&tlbie_mutex); 204190681Snwhitehorn __asm __volatile("\ 205190681Snwhitehorn mfmsr %0; \ 206204042Snwhitehorn mr %1, %0; \ 207198378Snwhitehorn insrdi %1,%5,1,0; \ 208190681Snwhitehorn mtmsrd %1; \ 209190681Snwhitehorn ptesync; \ 210190681Snwhitehorn \ 211190681Snwhitehorn sld %1,%2,%4; \ 212190681Snwhitehorn or %1,%1,%3; \ 213190681Snwhitehorn tlbie %1; \ 214190681Snwhitehorn \ 215190681Snwhitehorn mtmsrd %0; \ 216190681Snwhitehorn eieio; \ 217190681Snwhitehorn tlbsync; \ 218190681Snwhitehorn ptesync;" 219204042Snwhitehorn : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 220204042Snwhitehorn : "memory"); 221198378Snwhitehorn mtx_unlock_spin(&tlbie_mutex); 222190681Snwhitehorn} 223190681Snwhitehorn 224190681Snwhitehorn#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 225190681Snwhitehorn#define ENABLE_TRANS(msr) mtmsr(msr); isync() 226190681Snwhitehorn 227190681Snwhitehorn#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 228190681Snwhitehorn#define VSID_TO_SR(vsid) ((vsid) & 0xf) 229190681Snwhitehorn#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 230204268Snwhitehorn#define VSID_HASH_MASK 0x0000007fffffffffULL 231190681Snwhitehorn 232204042Snwhitehorn#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 233204042Snwhitehorn#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 234204042Snwhitehorn#define PVO_WIRED 0x010UL /* PVO entry is wired */ 235204042Snwhitehorn#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 236204042Snwhitehorn#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 237190681Snwhitehorn bootstrap */ 238204042Snwhitehorn#define PVO_FAKE 0x100UL /* fictitious phys page */ 239190681Snwhitehorn#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 240190681Snwhitehorn#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 241190681Snwhitehorn#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 242190681Snwhitehorn#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 243190681Snwhitehorn#define PVO_PTEGIDX_CLR(pvo) \ 244190681Snwhitehorn ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 245190681Snwhitehorn#define PVO_PTEGIDX_SET(pvo, i) \ 246190681Snwhitehorn ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 247190681Snwhitehorn 248190681Snwhitehorn#define MOEA_PVO_CHECK(pvo) 249190681Snwhitehorn 250190681Snwhitehorn#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 251190681Snwhitehorn#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 252190681Snwhitehorn#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 253190681Snwhitehorn 254190681Snwhitehornstruct ofw_map { 255190681Snwhitehorn vm_offset_t om_va; 256190681Snwhitehorn vm_size_t om_len; 257190681Snwhitehorn vm_offset_t om_pa_hi; 258190681Snwhitehorn vm_offset_t om_pa_lo; 259190681Snwhitehorn u_int om_mode; 260190681Snwhitehorn}; 261190681Snwhitehorn 262190681Snwhitehorn/* 263190681Snwhitehorn * Map of physical memory regions. 264190681Snwhitehorn */ 265190681Snwhitehornstatic struct mem_region *regions; 266190681Snwhitehornstatic struct mem_region *pregions; 267190681Snwhitehornextern u_int phys_avail_count; 268190681Snwhitehornextern int regions_sz, pregions_sz; 269190681Snwhitehornextern int ofw_real_mode; 270190681Snwhitehorn 271190681Snwhitehornextern struct pmap ofw_pmap; 272190681Snwhitehorn 273190681Snwhitehornextern void bs_remap_earlyboot(void); 274190681Snwhitehorn 275190681Snwhitehorn 276190681Snwhitehorn/* 277190681Snwhitehorn * Lock for the pteg and pvo tables. 278190681Snwhitehorn */ 279190681Snwhitehornstruct mtx moea64_table_mutex; 280190681Snwhitehorn 281190681Snwhitehorn/* 282190681Snwhitehorn * PTEG data. 283190681Snwhitehorn */ 284190681Snwhitehornstatic struct lpteg *moea64_pteg_table; 285190681Snwhitehornu_int moea64_pteg_count; 286190681Snwhitehornu_int moea64_pteg_mask; 287190681Snwhitehorn 288190681Snwhitehorn/* 289190681Snwhitehorn * PVO data. 290190681Snwhitehorn */ 291190681Snwhitehornstruct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 292190681Snwhitehorn/* lists of unmanaged pages */ 293190681Snwhitehornstruct pvo_head moea64_pvo_kunmanaged = 294190681Snwhitehorn LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 295190681Snwhitehornstruct pvo_head moea64_pvo_unmanaged = 296190681Snwhitehorn LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 297190681Snwhitehorn 298190681Snwhitehornuma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 299190681Snwhitehornuma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 300190681Snwhitehorn 301190681Snwhitehorn#define BPVO_POOL_SIZE 327680 302190681Snwhitehornstatic struct pvo_entry *moea64_bpvo_pool; 303190681Snwhitehornstatic int moea64_bpvo_pool_index = 0; 304190681Snwhitehorn 305190681Snwhitehorn#define VSID_NBPW (sizeof(u_int32_t) * 8) 306190681Snwhitehornstatic u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW]; 307190681Snwhitehorn 308190681Snwhitehornstatic boolean_t moea64_initialized = FALSE; 309190681Snwhitehorn 310190681Snwhitehorn/* 311190681Snwhitehorn * Statistics. 312190681Snwhitehorn */ 313190681Snwhitehornu_int moea64_pte_valid = 0; 314190681Snwhitehornu_int moea64_pte_overflow = 0; 315190681Snwhitehornu_int moea64_pvo_entries = 0; 316190681Snwhitehornu_int moea64_pvo_enter_calls = 0; 317190681Snwhitehornu_int moea64_pvo_remove_calls = 0; 318190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 319190681Snwhitehorn &moea64_pte_valid, 0, ""); 320190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 321190681Snwhitehorn &moea64_pte_overflow, 0, ""); 322190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 323190681Snwhitehorn &moea64_pvo_entries, 0, ""); 324190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 325190681Snwhitehorn &moea64_pvo_enter_calls, 0, ""); 326190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 327190681Snwhitehorn &moea64_pvo_remove_calls, 0, ""); 328190681Snwhitehorn 329190681Snwhitehornvm_offset_t moea64_scratchpage_va[2]; 330190681Snwhitehornstruct lpte *moea64_scratchpage_pte[2]; 331190681Snwhitehornstruct mtx moea64_scratchpage_mtx; 332190681Snwhitehorn 333190681Snwhitehorn/* 334190681Snwhitehorn * Allocate physical memory for use in moea64_bootstrap. 335190681Snwhitehorn */ 336190681Snwhitehornstatic vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 337190681Snwhitehorn 338190681Snwhitehorn/* 339190681Snwhitehorn * PTE calls. 340190681Snwhitehorn */ 341190681Snwhitehornstatic int moea64_pte_insert(u_int, struct lpte *); 342190681Snwhitehorn 343190681Snwhitehorn/* 344190681Snwhitehorn * PVO calls. 345190681Snwhitehorn */ 346190681Snwhitehornstatic int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 347198378Snwhitehorn vm_offset_t, vm_offset_t, uint64_t, int); 348190681Snwhitehornstatic void moea64_pvo_remove(struct pvo_entry *, int); 349190681Snwhitehornstatic struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 350190681Snwhitehornstatic struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 351190681Snwhitehorn 352190681Snwhitehorn/* 353190681Snwhitehorn * Utility routines. 354190681Snwhitehorn */ 355190681Snwhitehornstatic void moea64_bridge_bootstrap(mmu_t mmup, 356190681Snwhitehorn vm_offset_t kernelstart, vm_offset_t kernelend); 357190681Snwhitehornstatic void moea64_bridge_cpu_bootstrap(mmu_t, int ap); 358190681Snwhitehornstatic void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 359190681Snwhitehorn vm_prot_t, boolean_t); 360190681Snwhitehornstatic boolean_t moea64_query_bit(vm_page_t, u_int64_t); 361190681Snwhitehornstatic u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *); 362190681Snwhitehornstatic void moea64_kremove(mmu_t, vm_offset_t); 363190681Snwhitehornstatic void moea64_syncicache(pmap_t pmap, vm_offset_t va, 364198341Smarcel vm_offset_t pa, vm_size_t sz); 365190681Snwhitehornstatic void tlbia(void); 366190681Snwhitehorn 367190681Snwhitehorn/* 368190681Snwhitehorn * Kernel MMU interface 369190681Snwhitehorn */ 370190681Snwhitehornvoid moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 371190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t); 372190681Snwhitehornvoid moea64_clear_reference(mmu_t, vm_page_t); 373190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 374190681Snwhitehornvoid moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 375190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 376190681Snwhitehorn vm_prot_t); 377190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 378190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 379190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 380190681Snwhitehornvoid moea64_init(mmu_t); 381190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t); 382207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t); 383190681Snwhitehornboolean_t moea64_ts_referenced(mmu_t, vm_page_t); 384190681Snwhitehornvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 385190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 386190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t); 387190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t); 388190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t); 389190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 390190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 391190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int); 392190681Snwhitehornvoid moea64_release(mmu_t, pmap_t); 393190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 394190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t); 395190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t); 396190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t); 397190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int); 398190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t); 399190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *); 400190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *); 401190681Snwhitehornvoid *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 402190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 403190681Snwhitehornvm_offset_t moea64_kextract(mmu_t, vm_offset_t); 404190681Snwhitehornvoid moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 405190681Snwhitehornboolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 406198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 407190681Snwhitehorn 408190681Snwhitehornstatic mmu_method_t moea64_bridge_methods[] = { 409190681Snwhitehorn MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 410190681Snwhitehorn MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 411190681Snwhitehorn MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 412190681Snwhitehorn MMUMETHOD(mmu_copy_page, moea64_copy_page), 413190681Snwhitehorn MMUMETHOD(mmu_enter, moea64_enter), 414190681Snwhitehorn MMUMETHOD(mmu_enter_object, moea64_enter_object), 415190681Snwhitehorn MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 416190681Snwhitehorn MMUMETHOD(mmu_extract, moea64_extract), 417190681Snwhitehorn MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 418190681Snwhitehorn MMUMETHOD(mmu_init, moea64_init), 419190681Snwhitehorn MMUMETHOD(mmu_is_modified, moea64_is_modified), 420207155Salc MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 421190681Snwhitehorn MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 422190681Snwhitehorn MMUMETHOD(mmu_map, moea64_map), 423190681Snwhitehorn MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 424190681Snwhitehorn MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 425190681Snwhitehorn MMUMETHOD(mmu_pinit, moea64_pinit), 426190681Snwhitehorn MMUMETHOD(mmu_pinit0, moea64_pinit0), 427190681Snwhitehorn MMUMETHOD(mmu_protect, moea64_protect), 428190681Snwhitehorn MMUMETHOD(mmu_qenter, moea64_qenter), 429190681Snwhitehorn MMUMETHOD(mmu_qremove, moea64_qremove), 430190681Snwhitehorn MMUMETHOD(mmu_release, moea64_release), 431190681Snwhitehorn MMUMETHOD(mmu_remove, moea64_remove), 432190681Snwhitehorn MMUMETHOD(mmu_remove_all, moea64_remove_all), 433190681Snwhitehorn MMUMETHOD(mmu_remove_write, moea64_remove_write), 434198341Smarcel MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 435190681Snwhitehorn MMUMETHOD(mmu_zero_page, moea64_zero_page), 436190681Snwhitehorn MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 437190681Snwhitehorn MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 438190681Snwhitehorn MMUMETHOD(mmu_activate, moea64_activate), 439190681Snwhitehorn MMUMETHOD(mmu_deactivate, moea64_deactivate), 440190681Snwhitehorn 441190681Snwhitehorn /* Internal interfaces */ 442190681Snwhitehorn MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap), 443190681Snwhitehorn MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap), 444190681Snwhitehorn MMUMETHOD(mmu_mapdev, moea64_mapdev), 445190681Snwhitehorn MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 446190681Snwhitehorn MMUMETHOD(mmu_kextract, moea64_kextract), 447190681Snwhitehorn MMUMETHOD(mmu_kenter, moea64_kenter), 448190681Snwhitehorn MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 449190681Snwhitehorn 450190681Snwhitehorn { 0, 0 } 451190681Snwhitehorn}; 452190681Snwhitehorn 453190681Snwhitehornstatic mmu_def_t oea64_bridge_mmu = { 454190681Snwhitehorn MMU_TYPE_G5, 455190681Snwhitehorn moea64_bridge_methods, 456190681Snwhitehorn 0 457190681Snwhitehorn}; 458190681SnwhitehornMMU_DEF(oea64_bridge_mmu); 459190681Snwhitehorn 460190681Snwhitehornstatic __inline u_int 461190681Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr) 462190681Snwhitehorn{ 463204268Snwhitehorn uint64_t hash; 464190681Snwhitehorn 465204268Snwhitehorn hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 466190681Snwhitehorn ADDR_PIDX_SHFT); 467190681Snwhitehorn return (hash & moea64_pteg_mask); 468190681Snwhitehorn} 469190681Snwhitehorn 470190681Snwhitehornstatic __inline struct pvo_head * 471190681Snwhitehornpa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 472190681Snwhitehorn{ 473190681Snwhitehorn struct vm_page *pg; 474190681Snwhitehorn 475190681Snwhitehorn pg = PHYS_TO_VM_PAGE(pa); 476190681Snwhitehorn 477190681Snwhitehorn if (pg_p != NULL) 478190681Snwhitehorn *pg_p = pg; 479190681Snwhitehorn 480190681Snwhitehorn if (pg == NULL) 481190681Snwhitehorn return (&moea64_pvo_unmanaged); 482190681Snwhitehorn 483190681Snwhitehorn return (&pg->md.mdpg_pvoh); 484190681Snwhitehorn} 485190681Snwhitehorn 486190681Snwhitehornstatic __inline struct pvo_head * 487190681Snwhitehornvm_page_to_pvoh(vm_page_t m) 488190681Snwhitehorn{ 489190681Snwhitehorn 490190681Snwhitehorn return (&m->md.mdpg_pvoh); 491190681Snwhitehorn} 492190681Snwhitehorn 493190681Snwhitehornstatic __inline void 494190681Snwhitehornmoea64_attr_clear(vm_page_t m, u_int64_t ptebit) 495190681Snwhitehorn{ 496190681Snwhitehorn 497190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 498190681Snwhitehorn m->md.mdpg_attrs &= ~ptebit; 499190681Snwhitehorn} 500190681Snwhitehorn 501190681Snwhitehornstatic __inline u_int64_t 502190681Snwhitehornmoea64_attr_fetch(vm_page_t m) 503190681Snwhitehorn{ 504190681Snwhitehorn 505190681Snwhitehorn return (m->md.mdpg_attrs); 506190681Snwhitehorn} 507190681Snwhitehorn 508190681Snwhitehornstatic __inline void 509190681Snwhitehornmoea64_attr_save(vm_page_t m, u_int64_t ptebit) 510190681Snwhitehorn{ 511190681Snwhitehorn 512190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 513190681Snwhitehorn m->md.mdpg_attrs |= ptebit; 514190681Snwhitehorn} 515190681Snwhitehorn 516190681Snwhitehornstatic __inline void 517190681Snwhitehornmoea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 518190681Snwhitehorn uint64_t pte_lo) 519190681Snwhitehorn{ 520190681Snwhitehorn ASSERT_TABLE_LOCK(); 521190681Snwhitehorn 522190681Snwhitehorn /* 523190681Snwhitehorn * Construct a PTE. Default to IMB initially. Valid bit only gets 524190681Snwhitehorn * set when the real pte is set in memory. 525190681Snwhitehorn * 526190681Snwhitehorn * Note: Don't set the valid bit for correct operation of tlb update. 527190681Snwhitehorn */ 528190681Snwhitehorn pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 529190681Snwhitehorn (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 530190681Snwhitehorn 531190681Snwhitehorn pt->pte_lo = pte_lo; 532190681Snwhitehorn} 533190681Snwhitehorn 534190681Snwhitehornstatic __inline void 535190681Snwhitehornmoea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 536190681Snwhitehorn{ 537190681Snwhitehorn 538190681Snwhitehorn ASSERT_TABLE_LOCK(); 539190681Snwhitehorn 540190681Snwhitehorn pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 541190681Snwhitehorn} 542190681Snwhitehorn 543190681Snwhitehornstatic __inline void 544190681Snwhitehornmoea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit) 545190681Snwhitehorn{ 546190681Snwhitehorn ASSERT_TABLE_LOCK(); 547190681Snwhitehorn 548190681Snwhitehorn /* 549190681Snwhitehorn * As shown in Section 7.6.3.2.3 550190681Snwhitehorn */ 551190681Snwhitehorn pt->pte_lo &= ~ptebit; 552190681Snwhitehorn TLBIE(pmap,va); 553190681Snwhitehorn} 554190681Snwhitehorn 555190681Snwhitehornstatic __inline void 556190681Snwhitehornmoea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 557190681Snwhitehorn{ 558190681Snwhitehorn 559190681Snwhitehorn ASSERT_TABLE_LOCK(); 560190681Snwhitehorn pvo_pt->pte_hi |= LPTE_VALID; 561190681Snwhitehorn 562190681Snwhitehorn /* 563190681Snwhitehorn * Update the PTE as defined in section 7.6.3.1. 564190681Snwhitehorn * Note that the REF/CHG bits are from pvo_pt and thus should have 565190681Snwhitehorn * been saved so this routine can restore them (if desired). 566190681Snwhitehorn */ 567190681Snwhitehorn pt->pte_lo = pvo_pt->pte_lo; 568190681Snwhitehorn EIEIO(); 569190681Snwhitehorn pt->pte_hi = pvo_pt->pte_hi; 570204042Snwhitehorn PTESYNC(); 571190681Snwhitehorn moea64_pte_valid++; 572190681Snwhitehorn} 573190681Snwhitehorn 574190681Snwhitehornstatic __inline void 575190681Snwhitehornmoea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 576190681Snwhitehorn{ 577190681Snwhitehorn ASSERT_TABLE_LOCK(); 578190681Snwhitehorn pvo_pt->pte_hi &= ~LPTE_VALID; 579190681Snwhitehorn 580190681Snwhitehorn /* 581190681Snwhitehorn * Force the reg & chg bits back into the PTEs. 582190681Snwhitehorn */ 583190681Snwhitehorn SYNC(); 584190681Snwhitehorn 585190681Snwhitehorn /* 586190681Snwhitehorn * Invalidate the pte. 587190681Snwhitehorn */ 588190681Snwhitehorn pt->pte_hi &= ~LPTE_VALID; 589190681Snwhitehorn TLBIE(pmap,va); 590190681Snwhitehorn 591190681Snwhitehorn /* 592190681Snwhitehorn * Save the reg & chg bits. 593190681Snwhitehorn */ 594190681Snwhitehorn moea64_pte_synch(pt, pvo_pt); 595190681Snwhitehorn moea64_pte_valid--; 596190681Snwhitehorn} 597190681Snwhitehorn 598190681Snwhitehornstatic __inline void 599190681Snwhitehornmoea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 600190681Snwhitehorn{ 601190681Snwhitehorn 602190681Snwhitehorn /* 603190681Snwhitehorn * Invalidate the PTE 604190681Snwhitehorn */ 605190681Snwhitehorn moea64_pte_unset(pt, pvo_pt, pmap, va); 606190681Snwhitehorn moea64_pte_set(pt, pvo_pt); 607204042Snwhitehorn if (pmap == kernel_pmap) 608204042Snwhitehorn isync(); 609190681Snwhitehorn} 610190681Snwhitehorn 611190681Snwhitehornstatic __inline uint64_t 612190681Snwhitehornmoea64_calc_wimg(vm_offset_t pa) 613190681Snwhitehorn{ 614190681Snwhitehorn uint64_t pte_lo; 615190681Snwhitehorn int i; 616190681Snwhitehorn 617190681Snwhitehorn /* 618190681Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 619190681Snwhitehorn * it's in our available memory array. 620190681Snwhitehorn */ 621190681Snwhitehorn pte_lo = LPTE_I | LPTE_G; 622190681Snwhitehorn for (i = 0; i < pregions_sz; i++) { 623190681Snwhitehorn if ((pa >= pregions[i].mr_start) && 624190681Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 625190681Snwhitehorn pte_lo &= ~(LPTE_I | LPTE_G); 626190681Snwhitehorn pte_lo |= LPTE_M; 627190681Snwhitehorn break; 628190681Snwhitehorn } 629190681Snwhitehorn } 630190681Snwhitehorn 631190681Snwhitehorn return pte_lo; 632190681Snwhitehorn} 633190681Snwhitehorn 634190681Snwhitehorn/* 635190681Snwhitehorn * Quick sort callout for comparing memory regions. 636190681Snwhitehorn */ 637190681Snwhitehornstatic int mr_cmp(const void *a, const void *b); 638190681Snwhitehornstatic int om_cmp(const void *a, const void *b); 639190681Snwhitehorn 640190681Snwhitehornstatic int 641190681Snwhitehornmr_cmp(const void *a, const void *b) 642190681Snwhitehorn{ 643190681Snwhitehorn const struct mem_region *regiona; 644190681Snwhitehorn const struct mem_region *regionb; 645190681Snwhitehorn 646190681Snwhitehorn regiona = a; 647190681Snwhitehorn regionb = b; 648190681Snwhitehorn if (regiona->mr_start < regionb->mr_start) 649190681Snwhitehorn return (-1); 650190681Snwhitehorn else if (regiona->mr_start > regionb->mr_start) 651190681Snwhitehorn return (1); 652190681Snwhitehorn else 653190681Snwhitehorn return (0); 654190681Snwhitehorn} 655190681Snwhitehorn 656190681Snwhitehornstatic int 657190681Snwhitehornom_cmp(const void *a, const void *b) 658190681Snwhitehorn{ 659190681Snwhitehorn const struct ofw_map *mapa; 660190681Snwhitehorn const struct ofw_map *mapb; 661190681Snwhitehorn 662190681Snwhitehorn mapa = a; 663190681Snwhitehorn mapb = b; 664190681Snwhitehorn if (mapa->om_pa_hi < mapb->om_pa_hi) 665190681Snwhitehorn return (-1); 666190681Snwhitehorn else if (mapa->om_pa_hi > mapb->om_pa_hi) 667190681Snwhitehorn return (1); 668190681Snwhitehorn else if (mapa->om_pa_lo < mapb->om_pa_lo) 669190681Snwhitehorn return (-1); 670190681Snwhitehorn else if (mapa->om_pa_lo > mapb->om_pa_lo) 671190681Snwhitehorn return (1); 672190681Snwhitehorn else 673190681Snwhitehorn return (0); 674190681Snwhitehorn} 675190681Snwhitehorn 676190681Snwhitehornstatic void 677190681Snwhitehornmoea64_bridge_cpu_bootstrap(mmu_t mmup, int ap) 678190681Snwhitehorn{ 679190681Snwhitehorn int i = 0; 680190681Snwhitehorn 681190681Snwhitehorn /* 682190681Snwhitehorn * Initialize segment registers and MMU 683190681Snwhitehorn */ 684190681Snwhitehorn 685190681Snwhitehorn mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 686190681Snwhitehorn for (i = 0; i < 16; i++) { 687190681Snwhitehorn mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 688190681Snwhitehorn } 689204042Snwhitehorn __asm __volatile ("ptesync; mtsdr1 %0; isync" 690190681Snwhitehorn :: "r"((u_int)moea64_pteg_table 691190681Snwhitehorn | (32 - cntlzw(moea64_pteg_mask >> 11)))); 692190681Snwhitehorn tlbia(); 693190681Snwhitehorn} 694190681Snwhitehorn 695190681Snwhitehornstatic void 696199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 697199226Snwhitehorn{ 698199226Snwhitehorn struct ofw_map translations[sz/sizeof(struct ofw_map)]; 699199226Snwhitehorn register_t msr; 700199226Snwhitehorn vm_offset_t off; 701204128Snwhitehorn vm_paddr_t pa_base; 702199226Snwhitehorn int i, ofw_mappings; 703199226Snwhitehorn 704199226Snwhitehorn bzero(translations, sz); 705199226Snwhitehorn if (OF_getprop(mmu, "translations", translations, sz) == -1) 706199226Snwhitehorn panic("moea64_bootstrap: can't get ofw translations"); 707199226Snwhitehorn 708199226Snwhitehorn CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 709199226Snwhitehorn sz /= sizeof(*translations); 710199226Snwhitehorn qsort(translations, sz, sizeof (*translations), om_cmp); 711199226Snwhitehorn 712199226Snwhitehorn for (i = 0, ofw_mappings = 0; i < sz; i++) { 713199226Snwhitehorn CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 714199226Snwhitehorn (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 715199226Snwhitehorn translations[i].om_len); 716199226Snwhitehorn 717199226Snwhitehorn if (translations[i].om_pa_lo % PAGE_SIZE) 718199226Snwhitehorn panic("OFW translation not page-aligned!"); 719199226Snwhitehorn 720199226Snwhitehorn if (translations[i].om_pa_hi) 721199226Snwhitehorn panic("OFW translations above 32-bit boundary!"); 722199226Snwhitehorn 723204128Snwhitehorn pa_base = translations[i].om_pa_lo; 724204128Snwhitehorn 725199226Snwhitehorn /* Now enter the pages for this mapping */ 726199226Snwhitehorn 727199226Snwhitehorn DISABLE_TRANS(msr); 728199226Snwhitehorn for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 729204128Snwhitehorn moea64_kenter(mmup, translations[i].om_va + off, 730204128Snwhitehorn pa_base + off); 731199226Snwhitehorn 732199226Snwhitehorn ofw_mappings++; 733199226Snwhitehorn } 734199226Snwhitehorn ENABLE_TRANS(msr); 735199226Snwhitehorn } 736199226Snwhitehorn} 737199226Snwhitehorn 738199226Snwhitehornstatic void 739190681Snwhitehornmoea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 740190681Snwhitehorn{ 741190681Snwhitehorn ihandle_t mmui; 742190681Snwhitehorn phandle_t chosen; 743190681Snwhitehorn phandle_t mmu; 744199226Snwhitehorn size_t sz; 745190681Snwhitehorn int i, j; 746190681Snwhitehorn vm_size_t size, physsz, hwphyssz; 747190681Snwhitehorn vm_offset_t pa, va, off; 748199226Snwhitehorn register_t msr; 749194784Sjeff void *dpcpu; 750190681Snwhitehorn 751190681Snwhitehorn /* We don't have a direct map since there is no BAT */ 752190681Snwhitehorn hw_direct_map = 0; 753190681Snwhitehorn 754190681Snwhitehorn /* Make sure battable is zero, since we have no BAT */ 755190681Snwhitehorn for (i = 0; i < 16; i++) { 756190681Snwhitehorn battable[i].batu = 0; 757190681Snwhitehorn battable[i].batl = 0; 758190681Snwhitehorn } 759190681Snwhitehorn 760190681Snwhitehorn /* Get physical memory regions from firmware */ 761190681Snwhitehorn mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 762190681Snwhitehorn CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 763190681Snwhitehorn 764190681Snwhitehorn qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 765190681Snwhitehorn if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 766190681Snwhitehorn panic("moea64_bootstrap: phys_avail too small"); 767190681Snwhitehorn qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 768190681Snwhitehorn phys_avail_count = 0; 769190681Snwhitehorn physsz = 0; 770190681Snwhitehorn hwphyssz = 0; 771190681Snwhitehorn TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 772190681Snwhitehorn for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 773190681Snwhitehorn CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 774190681Snwhitehorn regions[i].mr_start + regions[i].mr_size, 775190681Snwhitehorn regions[i].mr_size); 776190681Snwhitehorn if (hwphyssz != 0 && 777190681Snwhitehorn (physsz + regions[i].mr_size) >= hwphyssz) { 778190681Snwhitehorn if (physsz < hwphyssz) { 779190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 780190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + 781190681Snwhitehorn hwphyssz - physsz; 782190681Snwhitehorn physsz = hwphyssz; 783190681Snwhitehorn phys_avail_count++; 784190681Snwhitehorn } 785190681Snwhitehorn break; 786190681Snwhitehorn } 787190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 788190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 789190681Snwhitehorn phys_avail_count++; 790190681Snwhitehorn physsz += regions[i].mr_size; 791190681Snwhitehorn } 792190681Snwhitehorn physmem = btoc(physsz); 793190681Snwhitehorn 794190681Snwhitehorn /* 795190681Snwhitehorn * Allocate PTEG table. 796190681Snwhitehorn */ 797190681Snwhitehorn#ifdef PTEGCOUNT 798190681Snwhitehorn moea64_pteg_count = PTEGCOUNT; 799190681Snwhitehorn#else 800190681Snwhitehorn moea64_pteg_count = 0x1000; 801190681Snwhitehorn 802190681Snwhitehorn while (moea64_pteg_count < physmem) 803190681Snwhitehorn moea64_pteg_count <<= 1; 804190681Snwhitehorn#endif /* PTEGCOUNT */ 805190681Snwhitehorn 806190681Snwhitehorn size = moea64_pteg_count * sizeof(struct lpteg); 807190681Snwhitehorn CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 808190681Snwhitehorn moea64_pteg_count, size); 809190681Snwhitehorn 810190681Snwhitehorn /* 811190681Snwhitehorn * We now need to allocate memory. This memory, to be allocated, 812190681Snwhitehorn * has to reside in a page table. The page table we are about to 813190681Snwhitehorn * allocate. We don't have BAT. So drop to data real mode for a minute 814190681Snwhitehorn * as a measure of last resort. We do this a couple times. 815190681Snwhitehorn */ 816190681Snwhitehorn 817190681Snwhitehorn moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 818190681Snwhitehorn DISABLE_TRANS(msr); 819190681Snwhitehorn bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 820190681Snwhitehorn ENABLE_TRANS(msr); 821190681Snwhitehorn 822190681Snwhitehorn moea64_pteg_mask = moea64_pteg_count - 1; 823190681Snwhitehorn 824190681Snwhitehorn CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 825190681Snwhitehorn 826190681Snwhitehorn /* 827190681Snwhitehorn * Allocate pv/overflow lists. 828190681Snwhitehorn */ 829190681Snwhitehorn size = sizeof(struct pvo_head) * moea64_pteg_count; 830190681Snwhitehorn 831190681Snwhitehorn moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 832190681Snwhitehorn PAGE_SIZE); 833190681Snwhitehorn CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 834190681Snwhitehorn 835190681Snwhitehorn DISABLE_TRANS(msr); 836190681Snwhitehorn for (i = 0; i < moea64_pteg_count; i++) 837190681Snwhitehorn LIST_INIT(&moea64_pvo_table[i]); 838190681Snwhitehorn ENABLE_TRANS(msr); 839190681Snwhitehorn 840190681Snwhitehorn /* 841190681Snwhitehorn * Initialize the lock that synchronizes access to the pteg and pvo 842190681Snwhitehorn * tables. 843190681Snwhitehorn */ 844190681Snwhitehorn mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 845190681Snwhitehorn MTX_RECURSE); 846190681Snwhitehorn 847190681Snwhitehorn /* 848198378Snwhitehorn * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 849198378Snwhitehorn */ 850198378Snwhitehorn mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 851198378Snwhitehorn 852198378Snwhitehorn /* 853190681Snwhitehorn * Initialise the unmanaged pvo pool. 854190681Snwhitehorn */ 855190681Snwhitehorn moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 856190681Snwhitehorn BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 857190681Snwhitehorn moea64_bpvo_pool_index = 0; 858190681Snwhitehorn 859190681Snwhitehorn /* 860190681Snwhitehorn * Make sure kernel vsid is allocated as well as VSID 0. 861190681Snwhitehorn */ 862190681Snwhitehorn moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 863190681Snwhitehorn |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 864190681Snwhitehorn moea64_vsid_bitmap[0] |= 1; 865190681Snwhitehorn 866190681Snwhitehorn /* 867190681Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 868190681Snwhitehorn */ 869190681Snwhitehorn for (i = 0; i < 16; i++) 870190681Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 871190681Snwhitehorn 872190681Snwhitehorn kernel_pmap->pmap_phys = kernel_pmap; 873190681Snwhitehorn kernel_pmap->pm_active = ~0; 874190681Snwhitehorn 875190681Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 876190681Snwhitehorn 877190681Snwhitehorn /* 878190681Snwhitehorn * Now map in all the other buffers we allocated earlier 879190681Snwhitehorn */ 880190681Snwhitehorn 881190681Snwhitehorn DISABLE_TRANS(msr); 882190681Snwhitehorn size = moea64_pteg_count * sizeof(struct lpteg); 883190681Snwhitehorn off = (vm_offset_t)(moea64_pteg_table); 884190681Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 885190681Snwhitehorn moea64_kenter(mmup, pa, pa); 886190681Snwhitehorn size = sizeof(struct pvo_head) * moea64_pteg_count; 887190681Snwhitehorn off = (vm_offset_t)(moea64_pvo_table); 888190681Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 889190681Snwhitehorn moea64_kenter(mmup, pa, pa); 890190681Snwhitehorn size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 891190681Snwhitehorn off = (vm_offset_t)(moea64_bpvo_pool); 892190681Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 893190681Snwhitehorn moea64_kenter(mmup, pa, pa); 894190681Snwhitehorn 895190681Snwhitehorn /* 896198400Snwhitehorn * Map certain important things, like ourselves. 897198400Snwhitehorn * 898198400Snwhitehorn * NOTE: We do not map the exception vector space. That code is 899198400Snwhitehorn * used only in real mode, and leaving it unmapped allows us to 900198400Snwhitehorn * catch NULL pointer deferences, instead of making NULL a valid 901198400Snwhitehorn * address. 902190681Snwhitehorn */ 903190681Snwhitehorn 904190681Snwhitehorn for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE) 905190681Snwhitehorn moea64_kenter(mmup, pa, pa); 906190681Snwhitehorn ENABLE_TRANS(msr); 907190681Snwhitehorn 908190681Snwhitehorn if (!ofw_real_mode) { 909190681Snwhitehorn /* 910190681Snwhitehorn * Set up the Open Firmware pmap and add its mappings. 911190681Snwhitehorn */ 912190681Snwhitehorn 913190681Snwhitehorn moea64_pinit(mmup, &ofw_pmap); 914204128Snwhitehorn for (i = 0; i < 16; i++) 915204128Snwhitehorn ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 916190681Snwhitehorn 917190681Snwhitehorn if ((chosen = OF_finddevice("/chosen")) == -1) 918190681Snwhitehorn panic("moea64_bootstrap: can't find /chosen"); 919190681Snwhitehorn OF_getprop(chosen, "mmu", &mmui, 4); 920190681Snwhitehorn if ((mmu = OF_instance_to_package(mmui)) == -1) 921190681Snwhitehorn panic("moea64_bootstrap: can't get mmu package"); 922190681Snwhitehorn if ((sz = OF_getproplen(mmu, "translations")) == -1) 923190681Snwhitehorn panic("moea64_bootstrap: can't get ofw translation count"); 924199226Snwhitehorn if (sz > 6144 /* tmpstksz - 2 KB headroom */) 925199226Snwhitehorn panic("moea64_bootstrap: too many ofw translations"); 926190681Snwhitehorn 927199226Snwhitehorn moea64_add_ofw_mappings(mmup, mmu, sz); 928190681Snwhitehorn } 929190681Snwhitehorn 930190681Snwhitehorn#ifdef SMP 931190681Snwhitehorn TLBSYNC(); 932190681Snwhitehorn#endif 933190681Snwhitehorn 934190681Snwhitehorn /* 935190681Snwhitehorn * Calculate the last available physical address. 936190681Snwhitehorn */ 937190681Snwhitehorn for (i = 0; phys_avail[i + 2] != 0; i += 2) 938190681Snwhitehorn ; 939190681Snwhitehorn Maxmem = powerpc_btop(phys_avail[i + 1]); 940190681Snwhitehorn 941190681Snwhitehorn /* 942190681Snwhitehorn * Initialize MMU and remap early physical mappings 943190681Snwhitehorn */ 944190681Snwhitehorn moea64_bridge_cpu_bootstrap(mmup,0); 945190681Snwhitehorn mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 946190681Snwhitehorn pmap_bootstrapped++; 947190681Snwhitehorn bs_remap_earlyboot(); 948190681Snwhitehorn 949190681Snwhitehorn /* 950190681Snwhitehorn * Set the start and end of kva. 951190681Snwhitehorn */ 952190681Snwhitehorn virtual_avail = VM_MIN_KERNEL_ADDRESS; 953204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 954190681Snwhitehorn 955190681Snwhitehorn /* 956204128Snwhitehorn * Figure out how far we can extend virtual_end into segment 16 957204128Snwhitehorn * without running into existing mappings. Segment 16 is guaranteed 958204128Snwhitehorn * to contain neither RAM nor devices (at least on Apple hardware), 959204128Snwhitehorn * but will generally contain some OFW mappings we should not 960204128Snwhitehorn * step on. 961190681Snwhitehorn */ 962190681Snwhitehorn 963204128Snwhitehorn PMAP_LOCK(kernel_pmap); 964204128Snwhitehorn while (moea64_pvo_find_va(kernel_pmap, virtual_end+1, NULL) == NULL) 965204128Snwhitehorn virtual_end += PAGE_SIZE; 966204128Snwhitehorn PMAP_UNLOCK(kernel_pmap); 967190681Snwhitehorn 968190681Snwhitehorn /* 969204694Snwhitehorn * Allocate some things for page zeroing. We put this directly 970204694Snwhitehorn * in the page table, marked with LPTE_LOCKED, to avoid any 971204694Snwhitehorn * of the PVO book-keeping or other parts of the VM system 972204694Snwhitehorn * from even knowing that this hack exists. 973190681Snwhitehorn */ 974190681Snwhitehorn 975190681Snwhitehorn mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); 976190681Snwhitehorn for (i = 0; i < 2; i++) { 977204694Snwhitehorn struct lpte pt; 978204694Snwhitehorn uint64_t vsid; 979204694Snwhitehorn int pteidx, ptegidx; 980204694Snwhitehorn 981204297Snwhitehorn moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 982204297Snwhitehorn virtual_end -= PAGE_SIZE; 983190681Snwhitehorn 984204694Snwhitehorn LOCK_TABLE(); 985204694Snwhitehorn 986204694Snwhitehorn vsid = va_to_vsid(kernel_pmap, moea64_scratchpage_va[i]); 987204694Snwhitehorn moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i], 988204694Snwhitehorn LPTE_NOEXEC); 989204694Snwhitehorn pt.pte_hi |= LPTE_LOCKED; 990190681Snwhitehorn 991204694Snwhitehorn ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i]); 992204694Snwhitehorn pteidx = moea64_pte_insert(ptegidx, &pt); 993204694Snwhitehorn if (pt.pte_hi & LPTE_HID) 994204694Snwhitehorn ptegidx ^= moea64_pteg_mask; 995204694Snwhitehorn 996204694Snwhitehorn moea64_scratchpage_pte[i] = 997204694Snwhitehorn &moea64_pteg_table[ptegidx].pt[pteidx]; 998204694Snwhitehorn 999190681Snwhitehorn UNLOCK_TABLE(); 1000190681Snwhitehorn } 1001190681Snwhitehorn 1002190681Snwhitehorn /* 1003190681Snwhitehorn * Allocate a kernel stack with a guard page for thread0 and map it 1004190681Snwhitehorn * into the kernel page map. 1005190681Snwhitehorn */ 1006190681Snwhitehorn pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 1007190681Snwhitehorn va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1008190681Snwhitehorn virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 1009190681Snwhitehorn CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 1010190681Snwhitehorn thread0.td_kstack = va; 1011190681Snwhitehorn thread0.td_kstack_pages = KSTACK_PAGES; 1012190681Snwhitehorn for (i = 0; i < KSTACK_PAGES; i++) { 1013201758Smbr moea64_kenter(mmup, va, pa); 1014190681Snwhitehorn pa += PAGE_SIZE; 1015190681Snwhitehorn va += PAGE_SIZE; 1016190681Snwhitehorn } 1017190681Snwhitehorn 1018190681Snwhitehorn /* 1019190681Snwhitehorn * Allocate virtual address space for the message buffer. 1020190681Snwhitehorn */ 1021190681Snwhitehorn pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1022204297Snwhitehorn msgbufp = (struct msgbuf *)virtual_avail; 1023204297Snwhitehorn va = virtual_avail; 1024204297Snwhitehorn virtual_avail += round_page(MSGBUF_SIZE); 1025204297Snwhitehorn while (va < virtual_avail) { 1026204297Snwhitehorn moea64_kenter(mmup, va, pa); 1027190681Snwhitehorn pa += PAGE_SIZE; 1028204297Snwhitehorn va += PAGE_SIZE; 1029190681Snwhitehorn } 1030194784Sjeff 1031194784Sjeff /* 1032194784Sjeff * Allocate virtual address space for the dynamic percpu area. 1033194784Sjeff */ 1034194784Sjeff pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1035204297Snwhitehorn dpcpu = (void *)virtual_avail; 1036204297Snwhitehorn virtual_avail += DPCPU_SIZE; 1037204297Snwhitehorn while (va < virtual_avail) { 1038204297Snwhitehorn moea64_kenter(mmup, va, pa); 1039194784Sjeff pa += PAGE_SIZE; 1040204297Snwhitehorn va += PAGE_SIZE; 1041194784Sjeff } 1042194784Sjeff dpcpu_init(dpcpu, 0); 1043190681Snwhitehorn} 1044190681Snwhitehorn 1045190681Snwhitehorn/* 1046190681Snwhitehorn * Activate a user pmap. The pmap must be activated before it's address 1047190681Snwhitehorn * space can be accessed in any way. 1048190681Snwhitehorn */ 1049190681Snwhitehornvoid 1050190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td) 1051190681Snwhitehorn{ 1052190681Snwhitehorn pmap_t pm, pmr; 1053190681Snwhitehorn 1054190681Snwhitehorn /* 1055190681Snwhitehorn * Load all the data we need up front to encourage the compiler to 1056190681Snwhitehorn * not issue any loads while we have interrupts disabled below. 1057190681Snwhitehorn */ 1058190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 1059190681Snwhitehorn pmr = pm->pmap_phys; 1060190681Snwhitehorn 1061190681Snwhitehorn pm->pm_active |= PCPU_GET(cpumask); 1062190681Snwhitehorn PCPU_SET(curpmap, pmr); 1063190681Snwhitehorn} 1064190681Snwhitehorn 1065190681Snwhitehornvoid 1066190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td) 1067190681Snwhitehorn{ 1068190681Snwhitehorn pmap_t pm; 1069190681Snwhitehorn 1070190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 1071190681Snwhitehorn pm->pm_active &= ~(PCPU_GET(cpumask)); 1072190681Snwhitehorn PCPU_SET(curpmap, NULL); 1073190681Snwhitehorn} 1074190681Snwhitehorn 1075190681Snwhitehornvoid 1076190681Snwhitehornmoea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1077190681Snwhitehorn{ 1078190681Snwhitehorn struct pvo_entry *pvo; 1079190681Snwhitehorn 1080190681Snwhitehorn PMAP_LOCK(pm); 1081190681Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1082190681Snwhitehorn 1083190681Snwhitehorn if (pvo != NULL) { 1084190681Snwhitehorn if (wired) { 1085190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1086190681Snwhitehorn pm->pm_stats.wired_count++; 1087190681Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 1088190681Snwhitehorn } else { 1089190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1090190681Snwhitehorn pm->pm_stats.wired_count--; 1091190681Snwhitehorn pvo->pvo_vaddr &= ~PVO_WIRED; 1092190681Snwhitehorn } 1093190681Snwhitehorn } 1094190681Snwhitehorn PMAP_UNLOCK(pm); 1095190681Snwhitehorn} 1096190681Snwhitehorn 1097190681Snwhitehorn/* 1098190681Snwhitehorn * This goes through and sets the physical address of our 1099190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because 1100190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by 1101190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here 1102190681Snwhitehorn */ 1103190681Snwhitehorn 1104190681Snwhitehornstatic __inline 1105190681Snwhitehornvoid moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1106204694Snwhitehorn 1107204268Snwhitehorn mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1108204268Snwhitehorn 1109190681Snwhitehorn moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1110190681Snwhitehorn TLBIE(kernel_pmap, moea64_scratchpage_va[which]); 1111190681Snwhitehorn 1112204694Snwhitehorn moea64_scratchpage_pte[which]->pte_lo &= 1113204694Snwhitehorn ~(LPTE_WIMG | LPTE_RPGN); 1114204694Snwhitehorn moea64_scratchpage_pte[which]->pte_lo |= 1115204694Snwhitehorn moea64_calc_wimg(pa) | (uint64_t)pa; 1116190681Snwhitehorn EIEIO(); 1117190681Snwhitehorn 1118190681Snwhitehorn moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1119204042Snwhitehorn PTESYNC(); isync(); 1120190681Snwhitehorn} 1121190681Snwhitehorn 1122190681Snwhitehornvoid 1123190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1124190681Snwhitehorn{ 1125190681Snwhitehorn vm_offset_t dst; 1126190681Snwhitehorn vm_offset_t src; 1127190681Snwhitehorn 1128190681Snwhitehorn dst = VM_PAGE_TO_PHYS(mdst); 1129190681Snwhitehorn src = VM_PAGE_TO_PHYS(msrc); 1130190681Snwhitehorn 1131190681Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1132190681Snwhitehorn 1133190681Snwhitehorn moea64_set_scratchpage_pa(0,src); 1134190681Snwhitehorn moea64_set_scratchpage_pa(1,dst); 1135190681Snwhitehorn 1136190681Snwhitehorn kcopy((void *)moea64_scratchpage_va[0], 1137190681Snwhitehorn (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1138190681Snwhitehorn 1139190681Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1140190681Snwhitehorn} 1141190681Snwhitehorn 1142190681Snwhitehornvoid 1143190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1144190681Snwhitehorn{ 1145190681Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1146190681Snwhitehorn 1147190681Snwhitehorn if (!moea64_initialized) 1148190681Snwhitehorn panic("moea64_zero_page: can't zero pa %#x", pa); 1149190681Snwhitehorn if (size + off > PAGE_SIZE) 1150190681Snwhitehorn panic("moea64_zero_page: size + off > PAGE_SIZE"); 1151190681Snwhitehorn 1152190681Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1153190681Snwhitehorn 1154190681Snwhitehorn moea64_set_scratchpage_pa(0,pa); 1155190681Snwhitehorn bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1156190681Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1157190681Snwhitehorn} 1158190681Snwhitehorn 1159204269Snwhitehorn/* 1160204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it 1161204269Snwhitehorn */ 1162190681Snwhitehornvoid 1163204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m) 1164204269Snwhitehorn{ 1165204269Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1166204269Snwhitehorn vm_offset_t off; 1167204269Snwhitehorn 1168204269Snwhitehorn if (!moea64_initialized) 1169204269Snwhitehorn panic("moea64_zero_page: can't zero pa %#x", pa); 1170204269Snwhitehorn 1171204269Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1172204269Snwhitehorn 1173204269Snwhitehorn moea64_set_scratchpage_pa(0,pa); 1174204269Snwhitehorn for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1175204269Snwhitehorn __asm __volatile("dcbz 0,%0" :: 1176204269Snwhitehorn "r"(moea64_scratchpage_va[0] + off)); 1177204269Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1178204269Snwhitehorn} 1179204269Snwhitehorn 1180204269Snwhitehornvoid 1181190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1182190681Snwhitehorn{ 1183190681Snwhitehorn 1184190681Snwhitehorn moea64_zero_page(mmu, m); 1185190681Snwhitehorn} 1186190681Snwhitehorn 1187190681Snwhitehorn/* 1188190681Snwhitehorn * Map the given physical page at the specified virtual address in the 1189190681Snwhitehorn * target pmap with the protection requested. If specified the page 1190190681Snwhitehorn * will be wired down. 1191190681Snwhitehorn */ 1192190681Snwhitehornvoid 1193190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1194190681Snwhitehorn vm_prot_t prot, boolean_t wired) 1195190681Snwhitehorn{ 1196190681Snwhitehorn 1197190681Snwhitehorn vm_page_lock_queues(); 1198190681Snwhitehorn PMAP_LOCK(pmap); 1199190681Snwhitehorn moea64_enter_locked(pmap, va, m, prot, wired); 1200190681Snwhitehorn vm_page_unlock_queues(); 1201190681Snwhitehorn PMAP_UNLOCK(pmap); 1202190681Snwhitehorn} 1203190681Snwhitehorn 1204190681Snwhitehorn/* 1205190681Snwhitehorn * Map the given physical page at the specified virtual address in the 1206190681Snwhitehorn * target pmap with the protection requested. If specified the page 1207190681Snwhitehorn * will be wired down. 1208190681Snwhitehorn * 1209190681Snwhitehorn * The page queues and pmap must be locked. 1210190681Snwhitehorn */ 1211190681Snwhitehorn 1212190681Snwhitehornstatic void 1213190681Snwhitehornmoea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1214190681Snwhitehorn boolean_t wired) 1215190681Snwhitehorn{ 1216190681Snwhitehorn struct pvo_head *pvo_head; 1217190681Snwhitehorn uma_zone_t zone; 1218190681Snwhitehorn vm_page_t pg; 1219190681Snwhitehorn uint64_t pte_lo; 1220190681Snwhitehorn u_int pvo_flags; 1221190681Snwhitehorn int error; 1222190681Snwhitehorn 1223190681Snwhitehorn if (!moea64_initialized) { 1224190681Snwhitehorn pvo_head = &moea64_pvo_kunmanaged; 1225190681Snwhitehorn pg = NULL; 1226190681Snwhitehorn zone = moea64_upvo_zone; 1227190681Snwhitehorn pvo_flags = 0; 1228190681Snwhitehorn } else { 1229190681Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1230190681Snwhitehorn pg = m; 1231190681Snwhitehorn zone = moea64_mpvo_zone; 1232190681Snwhitehorn pvo_flags = PVO_MANAGED; 1233190681Snwhitehorn } 1234190681Snwhitehorn 1235190681Snwhitehorn if (pmap_bootstrapped) 1236190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1237190681Snwhitehorn PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1238190681Snwhitehorn 1239190681Snwhitehorn /* XXX change the pvo head for fake pages */ 1240190681Snwhitehorn if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1241190681Snwhitehorn pvo_flags &= ~PVO_MANAGED; 1242190681Snwhitehorn pvo_head = &moea64_pvo_kunmanaged; 1243190681Snwhitehorn zone = moea64_upvo_zone; 1244190681Snwhitehorn } 1245190681Snwhitehorn 1246190681Snwhitehorn pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1247190681Snwhitehorn 1248190681Snwhitehorn if (prot & VM_PROT_WRITE) { 1249190681Snwhitehorn pte_lo |= LPTE_BW; 1250190681Snwhitehorn if (pmap_bootstrapped) 1251190681Snwhitehorn vm_page_flag_set(m, PG_WRITEABLE); 1252190681Snwhitehorn } else 1253190681Snwhitehorn pte_lo |= LPTE_BR; 1254190681Snwhitehorn 1255190681Snwhitehorn if (prot & VM_PROT_EXECUTE) 1256190681Snwhitehorn pvo_flags |= VM_PROT_EXECUTE; 1257190681Snwhitehorn 1258190681Snwhitehorn if (wired) 1259190681Snwhitehorn pvo_flags |= PVO_WIRED; 1260190681Snwhitehorn 1261190681Snwhitehorn if ((m->flags & PG_FICTITIOUS) != 0) 1262190681Snwhitehorn pvo_flags |= PVO_FAKE; 1263190681Snwhitehorn 1264190681Snwhitehorn error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1265198378Snwhitehorn pte_lo, pvo_flags); 1266190681Snwhitehorn 1267190681Snwhitehorn /* 1268190681Snwhitehorn * Flush the page from the instruction cache if this page is 1269190681Snwhitehorn * mapped executable and cacheable. 1270190681Snwhitehorn */ 1271190681Snwhitehorn if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1272198341Smarcel moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1273190681Snwhitehorn } 1274190681Snwhitehorn} 1275190681Snwhitehorn 1276190681Snwhitehornstatic void 1277198341Smarcelmoea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1278190681Snwhitehorn{ 1279204042Snwhitehorn 1280190681Snwhitehorn /* 1281190681Snwhitehorn * This is much trickier than on older systems because 1282190681Snwhitehorn * we can't sync the icache on physical addresses directly 1283190681Snwhitehorn * without a direct map. Instead we check a couple of cases 1284190681Snwhitehorn * where the memory is already mapped in and, failing that, 1285190681Snwhitehorn * use the same trick we use for page zeroing to create 1286190681Snwhitehorn * a temporary mapping for this physical address. 1287190681Snwhitehorn */ 1288190681Snwhitehorn 1289190681Snwhitehorn if (!pmap_bootstrapped) { 1290190681Snwhitehorn /* 1291190681Snwhitehorn * If PMAP is not bootstrapped, we are likely to be 1292190681Snwhitehorn * in real mode. 1293190681Snwhitehorn */ 1294198341Smarcel __syncicache((void *)pa, sz); 1295190681Snwhitehorn } else if (pmap == kernel_pmap) { 1296198341Smarcel __syncicache((void *)va, sz); 1297190681Snwhitehorn } else { 1298190681Snwhitehorn /* Use the scratch page to set up a temp mapping */ 1299190681Snwhitehorn 1300190681Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1301190681Snwhitehorn 1302204042Snwhitehorn moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1303204042Snwhitehorn __syncicache((void *)(moea64_scratchpage_va[1] + 1304204042Snwhitehorn (va & ADDR_POFF)), sz); 1305190681Snwhitehorn 1306190681Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1307190681Snwhitehorn } 1308190681Snwhitehorn} 1309190681Snwhitehorn 1310190681Snwhitehorn/* 1311190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object. 1312190681Snwhitehorn * The sequence begins with the given page m_start. This page is 1313190681Snwhitehorn * mapped at the given virtual address start. Each subsequent page is 1314190681Snwhitehorn * mapped at a virtual address that is offset from start by the same 1315190681Snwhitehorn * amount as the page is offset from m_start within the object. The 1316190681Snwhitehorn * last page in the sequence is the page with the largest offset from 1317190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given 1318190681Snwhitehorn * virtual address end. Not every virtual page between start and end 1319190681Snwhitehorn * is mapped; only those for which a resident page exists with the 1320190681Snwhitehorn * corresponding offset from m_start are mapped. 1321190681Snwhitehorn */ 1322190681Snwhitehornvoid 1323190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1324190681Snwhitehorn vm_page_t m_start, vm_prot_t prot) 1325190681Snwhitehorn{ 1326190681Snwhitehorn vm_page_t m; 1327190681Snwhitehorn vm_pindex_t diff, psize; 1328190681Snwhitehorn 1329190681Snwhitehorn psize = atop(end - start); 1330190681Snwhitehorn m = m_start; 1331190681Snwhitehorn PMAP_LOCK(pm); 1332190681Snwhitehorn while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1333190681Snwhitehorn moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1334190681Snwhitehorn (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1335190681Snwhitehorn m = TAILQ_NEXT(m, listq); 1336190681Snwhitehorn } 1337190681Snwhitehorn PMAP_UNLOCK(pm); 1338190681Snwhitehorn} 1339190681Snwhitehorn 1340190681Snwhitehornvoid 1341190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1342190681Snwhitehorn vm_prot_t prot) 1343190681Snwhitehorn{ 1344190681Snwhitehorn PMAP_LOCK(pm); 1345190681Snwhitehorn moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1346190681Snwhitehorn FALSE); 1347190681Snwhitehorn PMAP_UNLOCK(pm); 1348190681Snwhitehorn 1349190681Snwhitehorn} 1350190681Snwhitehorn 1351190681Snwhitehornvm_paddr_t 1352190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1353190681Snwhitehorn{ 1354190681Snwhitehorn struct pvo_entry *pvo; 1355190681Snwhitehorn vm_paddr_t pa; 1356190681Snwhitehorn 1357190681Snwhitehorn PMAP_LOCK(pm); 1358190681Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1359190681Snwhitehorn if (pvo == NULL) 1360190681Snwhitehorn pa = 0; 1361190681Snwhitehorn else 1362190681Snwhitehorn pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1363190681Snwhitehorn PMAP_UNLOCK(pm); 1364190681Snwhitehorn return (pa); 1365190681Snwhitehorn} 1366190681Snwhitehorn 1367190681Snwhitehorn/* 1368190681Snwhitehorn * Atomically extract and hold the physical page with the given 1369190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given 1370190681Snwhitehorn * protection. 1371190681Snwhitehorn */ 1372190681Snwhitehornvm_page_t 1373190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1374190681Snwhitehorn{ 1375190681Snwhitehorn struct pvo_entry *pvo; 1376190681Snwhitehorn vm_page_t m; 1377207410Skmacy vm_paddr_t pa; 1378190681Snwhitehorn 1379190681Snwhitehorn m = NULL; 1380207410Skmacy pa = 0; 1381190681Snwhitehorn PMAP_LOCK(pmap); 1382207410Skmacyretry: 1383190681Snwhitehorn pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1384190681Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1385190681Snwhitehorn ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1386190681Snwhitehorn (prot & VM_PROT_WRITE) == 0)) { 1387207410Skmacy if (vm_page_pa_tryrelock(pmap, 1388207410Skmacy pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1389207410Skmacy goto retry; 1390190681Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1391190681Snwhitehorn vm_page_hold(m); 1392190681Snwhitehorn } 1393207410Skmacy PA_UNLOCK_COND(pa); 1394190681Snwhitehorn PMAP_UNLOCK(pmap); 1395190681Snwhitehorn return (m); 1396190681Snwhitehorn} 1397190681Snwhitehorn 1398190681Snwhitehornstatic void * 1399190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1400190681Snwhitehorn{ 1401190681Snwhitehorn /* 1402190681Snwhitehorn * This entire routine is a horrible hack to avoid bothering kmem 1403190681Snwhitehorn * for new KVA addresses. Because this can get called from inside 1404190681Snwhitehorn * kmem allocation routines, calling kmem for a new address here 1405190681Snwhitehorn * can lead to multiply locking non-recursive mutexes. 1406190681Snwhitehorn */ 1407190681Snwhitehorn static vm_pindex_t color; 1408190681Snwhitehorn vm_offset_t va; 1409190681Snwhitehorn 1410190681Snwhitehorn vm_page_t m; 1411190681Snwhitehorn int pflags, needed_lock; 1412190681Snwhitehorn 1413190681Snwhitehorn *flags = UMA_SLAB_PRIV; 1414190681Snwhitehorn needed_lock = !PMAP_LOCKED(kernel_pmap); 1415190681Snwhitehorn 1416190681Snwhitehorn if (needed_lock) 1417190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1418190681Snwhitehorn 1419190681Snwhitehorn if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1420190681Snwhitehorn pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1421190681Snwhitehorn else 1422190681Snwhitehorn pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1423190681Snwhitehorn if (wait & M_ZERO) 1424190681Snwhitehorn pflags |= VM_ALLOC_ZERO; 1425190681Snwhitehorn 1426190681Snwhitehorn for (;;) { 1427190681Snwhitehorn m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1428190681Snwhitehorn if (m == NULL) { 1429190681Snwhitehorn if (wait & M_NOWAIT) 1430190681Snwhitehorn return (NULL); 1431190681Snwhitehorn VM_WAIT; 1432190681Snwhitehorn } else 1433190681Snwhitehorn break; 1434190681Snwhitehorn } 1435190681Snwhitehorn 1436204128Snwhitehorn va = VM_PAGE_TO_PHYS(m); 1437190681Snwhitehorn 1438190681Snwhitehorn moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1439204128Snwhitehorn &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1440198378Snwhitehorn PVO_WIRED | PVO_BOOTSTRAP); 1441190681Snwhitehorn 1442190681Snwhitehorn if (needed_lock) 1443190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1444198378Snwhitehorn 1445190681Snwhitehorn if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1446190681Snwhitehorn bzero((void *)va, PAGE_SIZE); 1447190681Snwhitehorn 1448190681Snwhitehorn return (void *)va; 1449190681Snwhitehorn} 1450190681Snwhitehorn 1451190681Snwhitehornvoid 1452190681Snwhitehornmoea64_init(mmu_t mmu) 1453190681Snwhitehorn{ 1454190681Snwhitehorn 1455190681Snwhitehorn CTR0(KTR_PMAP, "moea64_init"); 1456190681Snwhitehorn 1457190681Snwhitehorn moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1458190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1459190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1460190681Snwhitehorn moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1461190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1462190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1463190681Snwhitehorn 1464190681Snwhitehorn if (!hw_direct_map) { 1465190681Snwhitehorn uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1466190681Snwhitehorn uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1467190681Snwhitehorn } 1468190681Snwhitehorn 1469190681Snwhitehorn moea64_initialized = TRUE; 1470190681Snwhitehorn} 1471190681Snwhitehorn 1472190681Snwhitehornboolean_t 1473207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m) 1474207155Salc{ 1475207155Salc 1476207155Salc if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1477207155Salc return (FALSE); 1478207155Salc return (moea64_query_bit(m, PTE_REF)); 1479207155Salc} 1480207155Salc 1481207155Salcboolean_t 1482190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m) 1483190681Snwhitehorn{ 1484190681Snwhitehorn 1485190681Snwhitehorn if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1486190681Snwhitehorn return (FALSE); 1487190681Snwhitehorn 1488190681Snwhitehorn return (moea64_query_bit(m, LPTE_CHG)); 1489190681Snwhitehorn} 1490190681Snwhitehorn 1491190681Snwhitehornvoid 1492190681Snwhitehornmoea64_clear_reference(mmu_t mmu, vm_page_t m) 1493190681Snwhitehorn{ 1494190681Snwhitehorn 1495190681Snwhitehorn if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1496190681Snwhitehorn return; 1497190681Snwhitehorn moea64_clear_bit(m, LPTE_REF, NULL); 1498190681Snwhitehorn} 1499190681Snwhitehorn 1500190681Snwhitehornvoid 1501190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m) 1502190681Snwhitehorn{ 1503190681Snwhitehorn 1504190681Snwhitehorn if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1505190681Snwhitehorn return; 1506190681Snwhitehorn moea64_clear_bit(m, LPTE_CHG, NULL); 1507190681Snwhitehorn} 1508190681Snwhitehorn 1509190681Snwhitehorn/* 1510190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings. 1511190681Snwhitehorn */ 1512190681Snwhitehornvoid 1513190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m) 1514190681Snwhitehorn{ 1515190681Snwhitehorn struct pvo_entry *pvo; 1516190681Snwhitehorn struct lpte *pt; 1517190681Snwhitehorn pmap_t pmap; 1518190681Snwhitehorn uint64_t lo; 1519190681Snwhitehorn 1520190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1521190681Snwhitehorn if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1522190681Snwhitehorn (m->flags & PG_WRITEABLE) == 0) 1523190681Snwhitehorn return; 1524190681Snwhitehorn lo = moea64_attr_fetch(m); 1525190681Snwhitehorn SYNC(); 1526190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1527190681Snwhitehorn pmap = pvo->pvo_pmap; 1528190681Snwhitehorn PMAP_LOCK(pmap); 1529205370Snwhitehorn LOCK_TABLE(); 1530190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1531190681Snwhitehorn pt = moea64_pvo_to_pte(pvo, -1); 1532190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1533190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1534190681Snwhitehorn if (pt != NULL) { 1535190681Snwhitehorn moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1536190681Snwhitehorn lo |= pvo->pvo_pte.lpte.pte_lo; 1537190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1538190681Snwhitehorn moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1539204042Snwhitehorn pvo->pvo_pmap, PVO_VADDR(pvo)); 1540190681Snwhitehorn } 1541190681Snwhitehorn } 1542205370Snwhitehorn UNLOCK_TABLE(); 1543190681Snwhitehorn PMAP_UNLOCK(pmap); 1544190681Snwhitehorn } 1545190681Snwhitehorn if ((lo & LPTE_CHG) != 0) { 1546190681Snwhitehorn moea64_attr_clear(m, LPTE_CHG); 1547190681Snwhitehorn vm_page_dirty(m); 1548190681Snwhitehorn } 1549190681Snwhitehorn vm_page_flag_clear(m, PG_WRITEABLE); 1550190681Snwhitehorn} 1551190681Snwhitehorn 1552190681Snwhitehorn/* 1553190681Snwhitehorn * moea64_ts_referenced: 1554190681Snwhitehorn * 1555190681Snwhitehorn * Return a count of reference bits for a page, clearing those bits. 1556190681Snwhitehorn * It is not necessary for every reference bit to be cleared, but it 1557190681Snwhitehorn * is necessary that 0 only be returned when there are truly no 1558190681Snwhitehorn * reference bits set. 1559190681Snwhitehorn * 1560190681Snwhitehorn * XXX: The exact number of bits to check and clear is a matter that 1561190681Snwhitehorn * should be tested and standardized at some point in the future for 1562190681Snwhitehorn * optimal aging of shared pages. 1563190681Snwhitehorn */ 1564190681Snwhitehornboolean_t 1565190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m) 1566190681Snwhitehorn{ 1567190681Snwhitehorn int count; 1568190681Snwhitehorn 1569190681Snwhitehorn if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1570190681Snwhitehorn return (0); 1571190681Snwhitehorn 1572190681Snwhitehorn count = moea64_clear_bit(m, LPTE_REF, NULL); 1573190681Snwhitehorn 1574190681Snwhitehorn return (count); 1575190681Snwhitehorn} 1576190681Snwhitehorn 1577190681Snwhitehorn/* 1578190681Snwhitehorn * Map a wired page into kernel virtual address space. 1579190681Snwhitehorn */ 1580190681Snwhitehornvoid 1581190681Snwhitehornmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1582190681Snwhitehorn{ 1583190681Snwhitehorn uint64_t pte_lo; 1584190681Snwhitehorn int error; 1585190681Snwhitehorn 1586204128Snwhitehorn#if 0 1587190681Snwhitehorn if (!pmap_bootstrapped) { 1588204128Snwhitehorn if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end) 1589190681Snwhitehorn panic("Trying to enter an address in KVA -- %#x!\n",pa); 1590190681Snwhitehorn } 1591204128Snwhitehorn#endif 1592190681Snwhitehorn 1593190681Snwhitehorn pte_lo = moea64_calc_wimg(pa); 1594190681Snwhitehorn 1595190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1596190681Snwhitehorn error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1597190681Snwhitehorn &moea64_pvo_kunmanaged, va, pa, pte_lo, 1598198378Snwhitehorn PVO_WIRED | VM_PROT_EXECUTE); 1599190681Snwhitehorn 1600190681Snwhitehorn if (error != 0 && error != ENOENT) 1601190681Snwhitehorn panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1602190681Snwhitehorn pa, error); 1603190681Snwhitehorn 1604190681Snwhitehorn /* 1605190681Snwhitehorn * Flush the memory from the instruction cache. 1606190681Snwhitehorn */ 1607190681Snwhitehorn if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1608190681Snwhitehorn __syncicache((void *)va, PAGE_SIZE); 1609190681Snwhitehorn } 1610190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1611190681Snwhitehorn} 1612190681Snwhitehorn 1613190681Snwhitehorn/* 1614190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual 1615190681Snwhitehorn * address. 1616190681Snwhitehorn */ 1617190681Snwhitehornvm_offset_t 1618190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va) 1619190681Snwhitehorn{ 1620190681Snwhitehorn struct pvo_entry *pvo; 1621190681Snwhitehorn vm_paddr_t pa; 1622190681Snwhitehorn 1623205370Snwhitehorn /* 1624205370Snwhitehorn * Shortcut the direct-mapped case when applicable. We never put 1625205370Snwhitehorn * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1626205370Snwhitehorn */ 1627205370Snwhitehorn if (va < VM_MIN_KERNEL_ADDRESS) 1628205370Snwhitehorn return (va); 1629205370Snwhitehorn 1630190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1631190681Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1632190681Snwhitehorn KASSERT(pvo != NULL, ("moea64_kextract: no addr found")); 1633190681Snwhitehorn pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1634190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1635190681Snwhitehorn return (pa); 1636190681Snwhitehorn} 1637190681Snwhitehorn 1638190681Snwhitehorn/* 1639190681Snwhitehorn * Remove a wired page from kernel virtual address space. 1640190681Snwhitehorn */ 1641190681Snwhitehornvoid 1642190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va) 1643190681Snwhitehorn{ 1644190681Snwhitehorn moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1645190681Snwhitehorn} 1646190681Snwhitehorn 1647190681Snwhitehorn/* 1648190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space. 1649190681Snwhitehorn * 1650190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping. 1651190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region 1652190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt' 1653190681Snwhitehorn * unchanged. We cannot and therefore do not; *virt is updated with the 1654190681Snwhitehorn * first usable address after the mapped region. 1655190681Snwhitehorn */ 1656190681Snwhitehornvm_offset_t 1657190681Snwhitehornmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1658190681Snwhitehorn vm_offset_t pa_end, int prot) 1659190681Snwhitehorn{ 1660190681Snwhitehorn vm_offset_t sva, va; 1661190681Snwhitehorn 1662190681Snwhitehorn sva = *virt; 1663190681Snwhitehorn va = sva; 1664190681Snwhitehorn for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1665190681Snwhitehorn moea64_kenter(mmu, va, pa_start); 1666190681Snwhitehorn *virt = va; 1667190681Snwhitehorn 1668190681Snwhitehorn return (sva); 1669190681Snwhitehorn} 1670190681Snwhitehorn 1671190681Snwhitehorn/* 1672190681Snwhitehorn * Returns true if the pmap's pv is one of the first 1673190681Snwhitehorn * 16 pvs linked to from this page. This count may 1674190681Snwhitehorn * be changed upwards or downwards in the future; it 1675190681Snwhitehorn * is only necessary that true be returned for a small 1676190681Snwhitehorn * subset of pmaps for proper page aging. 1677190681Snwhitehorn */ 1678190681Snwhitehornboolean_t 1679190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1680190681Snwhitehorn{ 1681190681Snwhitehorn int loops; 1682190681Snwhitehorn struct pvo_entry *pvo; 1683190681Snwhitehorn 1684190681Snwhitehorn if (!moea64_initialized || (m->flags & PG_FICTITIOUS)) 1685190681Snwhitehorn return FALSE; 1686190681Snwhitehorn 1687205370Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1688205370Snwhitehorn 1689190681Snwhitehorn loops = 0; 1690190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1691205370Snwhitehorn if (pvo->pvo_pmap == pmap) 1692190681Snwhitehorn return (TRUE); 1693190681Snwhitehorn if (++loops >= 16) 1694190681Snwhitehorn break; 1695190681Snwhitehorn } 1696190681Snwhitehorn 1697190681Snwhitehorn return (FALSE); 1698190681Snwhitehorn} 1699190681Snwhitehorn 1700190681Snwhitehorn/* 1701190681Snwhitehorn * Return the number of managed mappings to the given physical page 1702190681Snwhitehorn * that are wired. 1703190681Snwhitehorn */ 1704190681Snwhitehornint 1705190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1706190681Snwhitehorn{ 1707190681Snwhitehorn struct pvo_entry *pvo; 1708190681Snwhitehorn int count; 1709190681Snwhitehorn 1710190681Snwhitehorn count = 0; 1711190681Snwhitehorn if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0) 1712190681Snwhitehorn return (count); 1713190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1714190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1715190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1716190681Snwhitehorn count++; 1717190681Snwhitehorn return (count); 1718190681Snwhitehorn} 1719190681Snwhitehorn 1720190681Snwhitehornstatic u_int moea64_vsidcontext; 1721190681Snwhitehorn 1722190681Snwhitehornvoid 1723190681Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 1724190681Snwhitehorn{ 1725190681Snwhitehorn int i, mask; 1726190681Snwhitehorn u_int entropy; 1727190681Snwhitehorn 1728190681Snwhitehorn PMAP_LOCK_INIT(pmap); 1729190681Snwhitehorn 1730190681Snwhitehorn entropy = 0; 1731190681Snwhitehorn __asm __volatile("mftb %0" : "=r"(entropy)); 1732190681Snwhitehorn 1733190681Snwhitehorn if (pmap_bootstrapped) 1734190681Snwhitehorn pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap); 1735190681Snwhitehorn else 1736190681Snwhitehorn pmap->pmap_phys = pmap; 1737190681Snwhitehorn 1738190681Snwhitehorn /* 1739190681Snwhitehorn * Allocate some segment registers for this pmap. 1740190681Snwhitehorn */ 1741190681Snwhitehorn for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1742190681Snwhitehorn u_int hash, n; 1743190681Snwhitehorn 1744190681Snwhitehorn /* 1745190681Snwhitehorn * Create a new value by mutiplying by a prime and adding in 1746190681Snwhitehorn * entropy from the timebase register. This is to make the 1747190681Snwhitehorn * VSID more random so that the PT hash function collides 1748190681Snwhitehorn * less often. (Note that the prime casues gcc to do shifts 1749190681Snwhitehorn * instead of a multiply.) 1750190681Snwhitehorn */ 1751190681Snwhitehorn moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1752190681Snwhitehorn hash = moea64_vsidcontext & (NPMAPS - 1); 1753190681Snwhitehorn if (hash == 0) /* 0 is special, avoid it */ 1754190681Snwhitehorn continue; 1755190681Snwhitehorn n = hash >> 5; 1756190681Snwhitehorn mask = 1 << (hash & (VSID_NBPW - 1)); 1757190681Snwhitehorn hash = (moea64_vsidcontext & 0xfffff); 1758190681Snwhitehorn if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1759190681Snwhitehorn /* anything free in this bucket? */ 1760190681Snwhitehorn if (moea64_vsid_bitmap[n] == 0xffffffff) { 1761190681Snwhitehorn entropy = (moea64_vsidcontext >> 20); 1762190681Snwhitehorn continue; 1763190681Snwhitehorn } 1764190681Snwhitehorn i = ffs(~moea64_vsid_bitmap[i]) - 1; 1765190681Snwhitehorn mask = 1 << i; 1766190681Snwhitehorn hash &= 0xfffff & ~(VSID_NBPW - 1); 1767190681Snwhitehorn hash |= i; 1768190681Snwhitehorn } 1769190681Snwhitehorn moea64_vsid_bitmap[n] |= mask; 1770190681Snwhitehorn for (i = 0; i < 16; i++) { 1771190681Snwhitehorn pmap->pm_sr[i] = VSID_MAKE(i, hash); 1772190681Snwhitehorn } 1773190681Snwhitehorn return; 1774190681Snwhitehorn } 1775190681Snwhitehorn 1776190681Snwhitehorn panic("moea64_pinit: out of segments"); 1777190681Snwhitehorn} 1778190681Snwhitehorn 1779190681Snwhitehorn/* 1780190681Snwhitehorn * Initialize the pmap associated with process 0. 1781190681Snwhitehorn */ 1782190681Snwhitehornvoid 1783190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm) 1784190681Snwhitehorn{ 1785190681Snwhitehorn moea64_pinit(mmu, pm); 1786190681Snwhitehorn bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1787190681Snwhitehorn} 1788190681Snwhitehorn 1789190681Snwhitehorn/* 1790190681Snwhitehorn * Set the physical protection on the specified range of this map as requested. 1791190681Snwhitehorn */ 1792190681Snwhitehornvoid 1793190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1794190681Snwhitehorn vm_prot_t prot) 1795190681Snwhitehorn{ 1796190681Snwhitehorn struct pvo_entry *pvo; 1797190681Snwhitehorn struct lpte *pt; 1798190681Snwhitehorn int pteidx; 1799190681Snwhitehorn 1800190681Snwhitehorn CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1801190681Snwhitehorn eva, prot); 1802190681Snwhitehorn 1803190681Snwhitehorn 1804190681Snwhitehorn KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1805190681Snwhitehorn ("moea64_protect: non current pmap")); 1806190681Snwhitehorn 1807190681Snwhitehorn if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1808190681Snwhitehorn moea64_remove(mmu, pm, sva, eva); 1809190681Snwhitehorn return; 1810190681Snwhitehorn } 1811190681Snwhitehorn 1812190681Snwhitehorn vm_page_lock_queues(); 1813190681Snwhitehorn PMAP_LOCK(pm); 1814190681Snwhitehorn for (; sva < eva; sva += PAGE_SIZE) { 1815190681Snwhitehorn pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1816190681Snwhitehorn if (pvo == NULL) 1817190681Snwhitehorn continue; 1818190681Snwhitehorn 1819190681Snwhitehorn /* 1820190681Snwhitehorn * Grab the PTE pointer before we diddle with the cached PTE 1821190681Snwhitehorn * copy. 1822190681Snwhitehorn */ 1823190681Snwhitehorn LOCK_TABLE(); 1824190681Snwhitehorn pt = moea64_pvo_to_pte(pvo, pteidx); 1825190681Snwhitehorn 1826190681Snwhitehorn /* 1827190681Snwhitehorn * Change the protection of the page. 1828190681Snwhitehorn */ 1829190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1830190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1831190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1832190681Snwhitehorn if ((prot & VM_PROT_EXECUTE) == 0) 1833190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1834190681Snwhitehorn 1835190681Snwhitehorn /* 1836190681Snwhitehorn * If the PVO is in the page table, update that pte as well. 1837190681Snwhitehorn */ 1838190681Snwhitehorn if (pt != NULL) { 1839190681Snwhitehorn moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1840204042Snwhitehorn pvo->pvo_pmap, PVO_VADDR(pvo)); 1841190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & 1842190681Snwhitehorn (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1843198341Smarcel moea64_syncicache(pm, sva, 1844198341Smarcel pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1845198341Smarcel PAGE_SIZE); 1846190681Snwhitehorn } 1847190681Snwhitehorn } 1848190681Snwhitehorn UNLOCK_TABLE(); 1849190681Snwhitehorn } 1850190681Snwhitehorn vm_page_unlock_queues(); 1851190681Snwhitehorn PMAP_UNLOCK(pm); 1852190681Snwhitehorn} 1853190681Snwhitehorn 1854190681Snwhitehorn/* 1855190681Snwhitehorn * Map a list of wired pages into kernel virtual address space. This is 1856190681Snwhitehorn * intended for temporary mappings which do not need page modification or 1857190681Snwhitehorn * references recorded. Existing mappings in the region are overwritten. 1858190681Snwhitehorn */ 1859190681Snwhitehornvoid 1860190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1861190681Snwhitehorn{ 1862190681Snwhitehorn while (count-- > 0) { 1863190681Snwhitehorn moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1864190681Snwhitehorn va += PAGE_SIZE; 1865190681Snwhitehorn m++; 1866190681Snwhitehorn } 1867190681Snwhitehorn} 1868190681Snwhitehorn 1869190681Snwhitehorn/* 1870190681Snwhitehorn * Remove page mappings from kernel virtual address space. Intended for 1871190681Snwhitehorn * temporary mappings entered by moea64_qenter. 1872190681Snwhitehorn */ 1873190681Snwhitehornvoid 1874190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1875190681Snwhitehorn{ 1876190681Snwhitehorn while (count-- > 0) { 1877190681Snwhitehorn moea64_kremove(mmu, va); 1878190681Snwhitehorn va += PAGE_SIZE; 1879190681Snwhitehorn } 1880190681Snwhitehorn} 1881190681Snwhitehorn 1882190681Snwhitehornvoid 1883190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap) 1884190681Snwhitehorn{ 1885190681Snwhitehorn int idx, mask; 1886190681Snwhitehorn 1887190681Snwhitehorn /* 1888190681Snwhitehorn * Free segment register's VSID 1889190681Snwhitehorn */ 1890190681Snwhitehorn if (pmap->pm_sr[0] == 0) 1891190681Snwhitehorn panic("moea64_release"); 1892190681Snwhitehorn 1893190681Snwhitehorn idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1894190681Snwhitehorn mask = 1 << (idx % VSID_NBPW); 1895190681Snwhitehorn idx /= VSID_NBPW; 1896190681Snwhitehorn moea64_vsid_bitmap[idx] &= ~mask; 1897190681Snwhitehorn PMAP_LOCK_DESTROY(pmap); 1898190681Snwhitehorn} 1899190681Snwhitehorn 1900190681Snwhitehorn/* 1901190681Snwhitehorn * Remove the given range of addresses from the specified map. 1902190681Snwhitehorn */ 1903190681Snwhitehornvoid 1904190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1905190681Snwhitehorn{ 1906190681Snwhitehorn struct pvo_entry *pvo; 1907190681Snwhitehorn int pteidx; 1908190681Snwhitehorn 1909190681Snwhitehorn vm_page_lock_queues(); 1910190681Snwhitehorn PMAP_LOCK(pm); 1911190681Snwhitehorn for (; sva < eva; sva += PAGE_SIZE) { 1912190681Snwhitehorn pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1913190681Snwhitehorn if (pvo != NULL) { 1914190681Snwhitehorn moea64_pvo_remove(pvo, pteidx); 1915190681Snwhitehorn } 1916190681Snwhitehorn } 1917190681Snwhitehorn vm_page_unlock_queues(); 1918190681Snwhitehorn PMAP_UNLOCK(pm); 1919190681Snwhitehorn} 1920190681Snwhitehorn 1921190681Snwhitehorn/* 1922190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 1923190681Snwhitehorn * will reflect changes in pte's back to the vm_page. 1924190681Snwhitehorn */ 1925190681Snwhitehornvoid 1926190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m) 1927190681Snwhitehorn{ 1928190681Snwhitehorn struct pvo_head *pvo_head; 1929190681Snwhitehorn struct pvo_entry *pvo, *next_pvo; 1930190681Snwhitehorn pmap_t pmap; 1931190681Snwhitehorn 1932190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1933190681Snwhitehorn 1934190681Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1935190681Snwhitehorn for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1936190681Snwhitehorn next_pvo = LIST_NEXT(pvo, pvo_vlink); 1937190681Snwhitehorn 1938190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 1939190681Snwhitehorn pmap = pvo->pvo_pmap; 1940190681Snwhitehorn PMAP_LOCK(pmap); 1941190681Snwhitehorn moea64_pvo_remove(pvo, -1); 1942190681Snwhitehorn PMAP_UNLOCK(pmap); 1943190681Snwhitehorn } 1944204042Snwhitehorn if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 1945204042Snwhitehorn moea64_attr_clear(m, LPTE_CHG); 1946204042Snwhitehorn vm_page_dirty(m); 1947204042Snwhitehorn } 1948190681Snwhitehorn vm_page_flag_clear(m, PG_WRITEABLE); 1949190681Snwhitehorn} 1950190681Snwhitehorn 1951190681Snwhitehorn/* 1952190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map. 1953190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are 1954190681Snwhitehorn * calculated. 1955190681Snwhitehorn */ 1956190681Snwhitehornstatic vm_offset_t 1957190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align) 1958190681Snwhitehorn{ 1959190681Snwhitehorn vm_offset_t s, e; 1960190681Snwhitehorn int i, j; 1961190681Snwhitehorn 1962190681Snwhitehorn size = round_page(size); 1963190681Snwhitehorn for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1964190681Snwhitehorn if (align != 0) 1965190681Snwhitehorn s = (phys_avail[i] + align - 1) & ~(align - 1); 1966190681Snwhitehorn else 1967190681Snwhitehorn s = phys_avail[i]; 1968190681Snwhitehorn e = s + size; 1969190681Snwhitehorn 1970190681Snwhitehorn if (s < phys_avail[i] || e > phys_avail[i + 1]) 1971190681Snwhitehorn continue; 1972190681Snwhitehorn 1973190681Snwhitehorn if (s == phys_avail[i]) { 1974190681Snwhitehorn phys_avail[i] += size; 1975190681Snwhitehorn } else if (e == phys_avail[i + 1]) { 1976190681Snwhitehorn phys_avail[i + 1] -= size; 1977190681Snwhitehorn } else { 1978190681Snwhitehorn for (j = phys_avail_count * 2; j > i; j -= 2) { 1979190681Snwhitehorn phys_avail[j] = phys_avail[j - 2]; 1980190681Snwhitehorn phys_avail[j + 1] = phys_avail[j - 1]; 1981190681Snwhitehorn } 1982190681Snwhitehorn 1983190681Snwhitehorn phys_avail[i + 3] = phys_avail[i + 1]; 1984190681Snwhitehorn phys_avail[i + 1] = s; 1985190681Snwhitehorn phys_avail[i + 2] = e; 1986190681Snwhitehorn phys_avail_count++; 1987190681Snwhitehorn } 1988190681Snwhitehorn 1989190681Snwhitehorn return (s); 1990190681Snwhitehorn } 1991190681Snwhitehorn panic("moea64_bootstrap_alloc: could not allocate memory"); 1992190681Snwhitehorn} 1993190681Snwhitehorn 1994190681Snwhitehornstatic void 1995190681Snwhitehorntlbia(void) 1996190681Snwhitehorn{ 1997190681Snwhitehorn vm_offset_t i; 1998198378Snwhitehorn register_t msr, scratch; 1999190681Snwhitehorn 2000198378Snwhitehorn for (i = 0; i < 0xFF000; i += 0x00001000) { 2001198378Snwhitehorn __asm __volatile("\ 2002198378Snwhitehorn mfmsr %0; \ 2003198378Snwhitehorn mr %1, %0; \ 2004198378Snwhitehorn insrdi %1,%3,1,0; \ 2005198378Snwhitehorn mtmsrd %1; \ 2006198378Snwhitehorn ptesync; \ 2007198378Snwhitehorn \ 2008198378Snwhitehorn tlbiel %2; \ 2009198378Snwhitehorn \ 2010198378Snwhitehorn mtmsrd %0; \ 2011198378Snwhitehorn eieio; \ 2012198378Snwhitehorn tlbsync; \ 2013198378Snwhitehorn ptesync;" 2014198378Snwhitehorn : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 2015198378Snwhitehorn } 2016190681Snwhitehorn} 2017190681Snwhitehorn 2018190681Snwhitehornstatic int 2019190681Snwhitehornmoea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 2020198378Snwhitehorn vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 2021190681Snwhitehorn{ 2022190681Snwhitehorn struct pvo_entry *pvo; 2023190681Snwhitehorn uint64_t vsid; 2024190681Snwhitehorn int first; 2025190681Snwhitehorn u_int ptegidx; 2026190681Snwhitehorn int i; 2027190681Snwhitehorn int bootstrap; 2028190681Snwhitehorn 2029190681Snwhitehorn /* 2030190681Snwhitehorn * One nasty thing that can happen here is that the UMA calls to 2031190681Snwhitehorn * allocate new PVOs need to map more memory, which calls pvo_enter(), 2032190681Snwhitehorn * which calls UMA... 2033190681Snwhitehorn * 2034190681Snwhitehorn * We break the loop by detecting recursion and allocating out of 2035190681Snwhitehorn * the bootstrap pool. 2036190681Snwhitehorn */ 2037190681Snwhitehorn 2038190681Snwhitehorn moea64_pvo_enter_calls++; 2039190681Snwhitehorn first = 0; 2040190681Snwhitehorn bootstrap = (flags & PVO_BOOTSTRAP); 2041190681Snwhitehorn 2042190681Snwhitehorn if (!moea64_initialized) 2043190681Snwhitehorn bootstrap = 1; 2044190681Snwhitehorn 2045190681Snwhitehorn /* 2046190681Snwhitehorn * Compute the PTE Group index. 2047190681Snwhitehorn */ 2048190681Snwhitehorn va &= ~ADDR_POFF; 2049190681Snwhitehorn vsid = va_to_vsid(pm, va); 2050190681Snwhitehorn ptegidx = va_to_pteg(vsid, va); 2051190681Snwhitehorn 2052190681Snwhitehorn /* 2053190681Snwhitehorn * Remove any existing mapping for this page. Reuse the pvo entry if 2054190681Snwhitehorn * there is a mapping. 2055190681Snwhitehorn */ 2056198378Snwhitehorn LOCK_TABLE(); 2057190681Snwhitehorn 2058190681Snwhitehorn LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2059190681Snwhitehorn if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2060190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2061190681Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2062190681Snwhitehorn (pte_lo & LPTE_PP)) { 2063198378Snwhitehorn UNLOCK_TABLE(); 2064190681Snwhitehorn return (0); 2065190681Snwhitehorn } 2066190681Snwhitehorn moea64_pvo_remove(pvo, -1); 2067190681Snwhitehorn break; 2068190681Snwhitehorn } 2069190681Snwhitehorn } 2070190681Snwhitehorn 2071190681Snwhitehorn /* 2072190681Snwhitehorn * If we aren't overwriting a mapping, try to allocate. 2073190681Snwhitehorn */ 2074190681Snwhitehorn if (bootstrap) { 2075190681Snwhitehorn if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2076190681Snwhitehorn panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2077190681Snwhitehorn moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2078190681Snwhitehorn BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2079190681Snwhitehorn } 2080190681Snwhitehorn pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2081190681Snwhitehorn moea64_bpvo_pool_index++; 2082190681Snwhitehorn bootstrap = 1; 2083190681Snwhitehorn } else { 2084198378Snwhitehorn /* 2085204719Snwhitehorn * Note: drop the table lock around the UMA allocation in 2086198378Snwhitehorn * case the UMA allocator needs to manipulate the page 2087198378Snwhitehorn * table. The mapping we are working with is already 2088198378Snwhitehorn * protected by the PMAP lock. 2089198378Snwhitehorn */ 2090198378Snwhitehorn UNLOCK_TABLE(); 2091190681Snwhitehorn pvo = uma_zalloc(zone, M_NOWAIT); 2092198378Snwhitehorn LOCK_TABLE(); 2093190681Snwhitehorn } 2094190681Snwhitehorn 2095190681Snwhitehorn if (pvo == NULL) { 2096198378Snwhitehorn UNLOCK_TABLE(); 2097190681Snwhitehorn return (ENOMEM); 2098190681Snwhitehorn } 2099190681Snwhitehorn 2100190681Snwhitehorn moea64_pvo_entries++; 2101190681Snwhitehorn pvo->pvo_vaddr = va; 2102190681Snwhitehorn pvo->pvo_pmap = pm; 2103190681Snwhitehorn LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2104190681Snwhitehorn pvo->pvo_vaddr &= ~ADDR_POFF; 2105190681Snwhitehorn 2106190681Snwhitehorn if (!(flags & VM_PROT_EXECUTE)) 2107190681Snwhitehorn pte_lo |= LPTE_NOEXEC; 2108190681Snwhitehorn if (flags & PVO_WIRED) 2109190681Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 2110190681Snwhitehorn if (pvo_head != &moea64_pvo_kunmanaged) 2111190681Snwhitehorn pvo->pvo_vaddr |= PVO_MANAGED; 2112190681Snwhitehorn if (bootstrap) 2113190681Snwhitehorn pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2114190681Snwhitehorn if (flags & PVO_FAKE) 2115190681Snwhitehorn pvo->pvo_vaddr |= PVO_FAKE; 2116190681Snwhitehorn 2117190681Snwhitehorn moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2118190681Snwhitehorn (uint64_t)(pa) | pte_lo); 2119190681Snwhitehorn 2120190681Snwhitehorn /* 2121190681Snwhitehorn * Remember if the list was empty and therefore will be the first 2122190681Snwhitehorn * item. 2123190681Snwhitehorn */ 2124190681Snwhitehorn if (LIST_FIRST(pvo_head) == NULL) 2125190681Snwhitehorn first = 1; 2126190681Snwhitehorn LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2127190681Snwhitehorn 2128204042Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2129190681Snwhitehorn pm->pm_stats.wired_count++; 2130190681Snwhitehorn pm->pm_stats.resident_count++; 2131190681Snwhitehorn 2132190681Snwhitehorn /* 2133190681Snwhitehorn * We hope this succeeds but it isn't required. 2134190681Snwhitehorn */ 2135190681Snwhitehorn i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2136190681Snwhitehorn if (i >= 0) { 2137190681Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 2138190681Snwhitehorn } else { 2139190681Snwhitehorn panic("moea64_pvo_enter: overflow"); 2140190681Snwhitehorn moea64_pte_overflow++; 2141190681Snwhitehorn } 2142190681Snwhitehorn 2143204042Snwhitehorn if (pm == kernel_pmap) 2144204042Snwhitehorn isync(); 2145204042Snwhitehorn 2146198378Snwhitehorn UNLOCK_TABLE(); 2147190681Snwhitehorn 2148190681Snwhitehorn return (first ? ENOENT : 0); 2149190681Snwhitehorn} 2150190681Snwhitehorn 2151190681Snwhitehornstatic void 2152190681Snwhitehornmoea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2153190681Snwhitehorn{ 2154190681Snwhitehorn struct lpte *pt; 2155190681Snwhitehorn 2156190681Snwhitehorn /* 2157190681Snwhitehorn * If there is an active pte entry, we need to deactivate it (and 2158190681Snwhitehorn * save the ref & cfg bits). 2159190681Snwhitehorn */ 2160190681Snwhitehorn LOCK_TABLE(); 2161190681Snwhitehorn pt = moea64_pvo_to_pte(pvo, pteidx); 2162190681Snwhitehorn if (pt != NULL) { 2163190681Snwhitehorn moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap, 2164204042Snwhitehorn PVO_VADDR(pvo)); 2165190681Snwhitehorn PVO_PTEGIDX_CLR(pvo); 2166190681Snwhitehorn } else { 2167190681Snwhitehorn moea64_pte_overflow--; 2168190681Snwhitehorn } 2169190681Snwhitehorn 2170190681Snwhitehorn /* 2171190681Snwhitehorn * Update our statistics. 2172190681Snwhitehorn */ 2173190681Snwhitehorn pvo->pvo_pmap->pm_stats.resident_count--; 2174204042Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2175190681Snwhitehorn pvo->pvo_pmap->pm_stats.wired_count--; 2176190681Snwhitehorn 2177190681Snwhitehorn /* 2178190681Snwhitehorn * Save the REF/CHG bits into their cache if the page is managed. 2179190681Snwhitehorn */ 2180190681Snwhitehorn if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2181190681Snwhitehorn struct vm_page *pg; 2182190681Snwhitehorn 2183190681Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2184190681Snwhitehorn if (pg != NULL) { 2185190681Snwhitehorn moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2186190681Snwhitehorn (LPTE_REF | LPTE_CHG)); 2187190681Snwhitehorn } 2188190681Snwhitehorn } 2189190681Snwhitehorn 2190190681Snwhitehorn /* 2191190681Snwhitehorn * Remove this PVO from the PV list. 2192190681Snwhitehorn */ 2193190681Snwhitehorn LIST_REMOVE(pvo, pvo_vlink); 2194190681Snwhitehorn 2195190681Snwhitehorn /* 2196190681Snwhitehorn * Remove this from the overflow list and return it to the pool 2197190681Snwhitehorn * if we aren't going to reuse it. 2198190681Snwhitehorn */ 2199190681Snwhitehorn LIST_REMOVE(pvo, pvo_olink); 2200204694Snwhitehorn UNLOCK_TABLE(); 2201204694Snwhitehorn 2202190681Snwhitehorn if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2203204042Snwhitehorn uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2204190681Snwhitehorn moea64_upvo_zone, pvo); 2205204694Snwhitehorn 2206190681Snwhitehorn moea64_pvo_entries--; 2207190681Snwhitehorn moea64_pvo_remove_calls++; 2208190681Snwhitehorn} 2209190681Snwhitehorn 2210190681Snwhitehornstatic __inline int 2211190681Snwhitehornmoea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2212190681Snwhitehorn{ 2213190681Snwhitehorn 2214190681Snwhitehorn /* 2215190681Snwhitehorn * We can find the actual pte entry without searching by grabbing 2216204268Snwhitehorn * the PTEG index from 3 unused bits in pvo_vaddr and by 2217190681Snwhitehorn * noticing the HID bit. 2218190681Snwhitehorn */ 2219190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2220204268Snwhitehorn ptegidx ^= moea64_pteg_mask; 2221190681Snwhitehorn 2222204268Snwhitehorn return ((ptegidx << 3) | PVO_PTEGIDX_GET(pvo)); 2223190681Snwhitehorn} 2224190681Snwhitehorn 2225190681Snwhitehornstatic struct pvo_entry * 2226190681Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2227190681Snwhitehorn{ 2228190681Snwhitehorn struct pvo_entry *pvo; 2229190681Snwhitehorn int ptegidx; 2230190681Snwhitehorn uint64_t vsid; 2231190681Snwhitehorn 2232190681Snwhitehorn va &= ~ADDR_POFF; 2233190681Snwhitehorn vsid = va_to_vsid(pm, va); 2234190681Snwhitehorn ptegidx = va_to_pteg(vsid, va); 2235190681Snwhitehorn 2236190681Snwhitehorn LOCK_TABLE(); 2237190681Snwhitehorn LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2238190681Snwhitehorn if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2239190681Snwhitehorn if (pteidx_p) 2240190681Snwhitehorn *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx); 2241190681Snwhitehorn break; 2242190681Snwhitehorn } 2243190681Snwhitehorn } 2244190681Snwhitehorn UNLOCK_TABLE(); 2245190681Snwhitehorn 2246190681Snwhitehorn return (pvo); 2247190681Snwhitehorn} 2248190681Snwhitehorn 2249190681Snwhitehornstatic struct lpte * 2250190681Snwhitehornmoea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2251190681Snwhitehorn{ 2252190681Snwhitehorn struct lpte *pt; 2253190681Snwhitehorn 2254190681Snwhitehorn /* 2255190681Snwhitehorn * If we haven't been supplied the ptegidx, calculate it. 2256190681Snwhitehorn */ 2257190681Snwhitehorn if (pteidx == -1) { 2258190681Snwhitehorn int ptegidx; 2259190681Snwhitehorn uint64_t vsid; 2260190681Snwhitehorn 2261204042Snwhitehorn vsid = va_to_vsid(pvo->pvo_pmap, PVO_VADDR(pvo)); 2262204042Snwhitehorn ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo)); 2263190681Snwhitehorn pteidx = moea64_pvo_pte_index(pvo, ptegidx); 2264190681Snwhitehorn } 2265190681Snwhitehorn 2266190681Snwhitehorn pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2267190681Snwhitehorn 2268190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2269190681Snwhitehorn !PVO_PTEGIDX_ISSET(pvo)) { 2270190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2271190681Snwhitehorn "valid pte index", pvo); 2272190681Snwhitehorn } 2273190681Snwhitehorn 2274190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2275190681Snwhitehorn PVO_PTEGIDX_ISSET(pvo)) { 2276190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2277190681Snwhitehorn "pvo but no valid pte", pvo); 2278190681Snwhitehorn } 2279190681Snwhitehorn 2280190681Snwhitehorn if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2281190681Snwhitehorn LPTE_VALID) { 2282190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2283190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2284190681Snwhitehorn "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2285190681Snwhitehorn } 2286190681Snwhitehorn 2287190681Snwhitehorn if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2288205163Snwhitehorn ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 2289190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p pte does not match " 2290190681Snwhitehorn "pte %p in moea64_pteg_table difference is %#x", 2291190681Snwhitehorn pvo, pt, 2292190681Snwhitehorn (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2293190681Snwhitehorn } 2294190681Snwhitehorn 2295190681Snwhitehorn ASSERT_TABLE_LOCK(); 2296190681Snwhitehorn return (pt); 2297190681Snwhitehorn } 2298190681Snwhitehorn 2299190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2300190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2301190681Snwhitehorn "moea64_pteg_table but valid in pvo", pvo, pt); 2302190681Snwhitehorn } 2303190681Snwhitehorn 2304190681Snwhitehorn return (NULL); 2305190681Snwhitehorn} 2306190681Snwhitehorn 2307190681Snwhitehornstatic int 2308190681Snwhitehornmoea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2309190681Snwhitehorn{ 2310190681Snwhitehorn struct lpte *pt; 2311190681Snwhitehorn int i; 2312190681Snwhitehorn 2313190681Snwhitehorn ASSERT_TABLE_LOCK(); 2314190681Snwhitehorn 2315190681Snwhitehorn /* 2316190681Snwhitehorn * First try primary hash. 2317190681Snwhitehorn */ 2318190681Snwhitehorn for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2319204268Snwhitehorn if ((pt->pte_hi & LPTE_VALID) == 0 && 2320204268Snwhitehorn (pt->pte_hi & LPTE_LOCKED) == 0) { 2321190681Snwhitehorn pvo_pt->pte_hi &= ~LPTE_HID; 2322190681Snwhitehorn moea64_pte_set(pt, pvo_pt); 2323190681Snwhitehorn return (i); 2324190681Snwhitehorn } 2325190681Snwhitehorn } 2326190681Snwhitehorn 2327190681Snwhitehorn /* 2328190681Snwhitehorn * Now try secondary hash. 2329190681Snwhitehorn */ 2330190681Snwhitehorn ptegidx ^= moea64_pteg_mask; 2331190681Snwhitehorn 2332190681Snwhitehorn for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2333204268Snwhitehorn if ((pt->pte_hi & LPTE_VALID) == 0 && 2334204268Snwhitehorn (pt->pte_hi & LPTE_LOCKED) == 0) { 2335190681Snwhitehorn pvo_pt->pte_hi |= LPTE_HID; 2336190681Snwhitehorn moea64_pte_set(pt, pvo_pt); 2337190681Snwhitehorn return (i); 2338190681Snwhitehorn } 2339190681Snwhitehorn } 2340190681Snwhitehorn 2341190681Snwhitehorn panic("moea64_pte_insert: overflow"); 2342190681Snwhitehorn return (-1); 2343190681Snwhitehorn} 2344190681Snwhitehorn 2345190681Snwhitehornstatic boolean_t 2346190681Snwhitehornmoea64_query_bit(vm_page_t m, u_int64_t ptebit) 2347190681Snwhitehorn{ 2348190681Snwhitehorn struct pvo_entry *pvo; 2349190681Snwhitehorn struct lpte *pt; 2350190681Snwhitehorn 2351190681Snwhitehorn if (moea64_attr_fetch(m) & ptebit) 2352190681Snwhitehorn return (TRUE); 2353190681Snwhitehorn 2354205370Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2355205370Snwhitehorn 2356190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2357190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2358190681Snwhitehorn 2359190681Snwhitehorn /* 2360190681Snwhitehorn * See if we saved the bit off. If so, cache it and return 2361190681Snwhitehorn * success. 2362190681Snwhitehorn */ 2363190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2364190681Snwhitehorn moea64_attr_save(m, ptebit); 2365190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2366190681Snwhitehorn return (TRUE); 2367190681Snwhitehorn } 2368190681Snwhitehorn } 2369190681Snwhitehorn 2370190681Snwhitehorn /* 2371190681Snwhitehorn * No luck, now go through the hard part of looking at the PTEs 2372190681Snwhitehorn * themselves. Sync so that any pending REF/CHG bits are flushed to 2373190681Snwhitehorn * the PTEs. 2374190681Snwhitehorn */ 2375190681Snwhitehorn SYNC(); 2376190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2377190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2378190681Snwhitehorn 2379190681Snwhitehorn /* 2380190681Snwhitehorn * See if this pvo has a valid PTE. if so, fetch the 2381190681Snwhitehorn * REF/CHG bits from the valid PTE. If the appropriate 2382190681Snwhitehorn * ptebit is set, cache it and return success. 2383190681Snwhitehorn */ 2384205370Snwhitehorn LOCK_TABLE(); 2385190681Snwhitehorn pt = moea64_pvo_to_pte(pvo, -1); 2386190681Snwhitehorn if (pt != NULL) { 2387190681Snwhitehorn moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2388190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2389190681Snwhitehorn UNLOCK_TABLE(); 2390190681Snwhitehorn 2391190681Snwhitehorn moea64_attr_save(m, ptebit); 2392190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2393190681Snwhitehorn return (TRUE); 2394190681Snwhitehorn } 2395190681Snwhitehorn } 2396205370Snwhitehorn UNLOCK_TABLE(); 2397190681Snwhitehorn } 2398190681Snwhitehorn 2399190681Snwhitehorn return (FALSE); 2400190681Snwhitehorn} 2401190681Snwhitehorn 2402190681Snwhitehornstatic u_int 2403190681Snwhitehornmoea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit) 2404190681Snwhitehorn{ 2405190681Snwhitehorn u_int count; 2406190681Snwhitehorn struct pvo_entry *pvo; 2407190681Snwhitehorn struct lpte *pt; 2408190681Snwhitehorn uint64_t rv; 2409190681Snwhitehorn 2410205370Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2411205370Snwhitehorn 2412190681Snwhitehorn /* 2413190681Snwhitehorn * Clear the cached value. 2414190681Snwhitehorn */ 2415190681Snwhitehorn rv = moea64_attr_fetch(m); 2416190681Snwhitehorn moea64_attr_clear(m, ptebit); 2417190681Snwhitehorn 2418190681Snwhitehorn /* 2419190681Snwhitehorn * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2420190681Snwhitehorn * we can reset the right ones). note that since the pvo entries and 2421190681Snwhitehorn * list heads are accessed via BAT0 and are never placed in the page 2422190681Snwhitehorn * table, we don't have to worry about further accesses setting the 2423190681Snwhitehorn * REF/CHG bits. 2424190681Snwhitehorn */ 2425190681Snwhitehorn SYNC(); 2426190681Snwhitehorn 2427190681Snwhitehorn /* 2428190681Snwhitehorn * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2429190681Snwhitehorn * valid pte clear the ptebit from the valid pte. 2430190681Snwhitehorn */ 2431190681Snwhitehorn count = 0; 2432190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2433190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2434190681Snwhitehorn 2435205370Snwhitehorn LOCK_TABLE(); 2436190681Snwhitehorn pt = moea64_pvo_to_pte(pvo, -1); 2437190681Snwhitehorn if (pt != NULL) { 2438190681Snwhitehorn moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2439190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2440190681Snwhitehorn count++; 2441190681Snwhitehorn moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit); 2442190681Snwhitehorn } 2443190681Snwhitehorn } 2444190681Snwhitehorn rv |= pvo->pvo_pte.lpte.pte_lo; 2445190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2446190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2447205370Snwhitehorn UNLOCK_TABLE(); 2448190681Snwhitehorn } 2449190681Snwhitehorn 2450190681Snwhitehorn if (origbit != NULL) { 2451190681Snwhitehorn *origbit = rv; 2452190681Snwhitehorn } 2453190681Snwhitehorn 2454190681Snwhitehorn return (count); 2455190681Snwhitehorn} 2456190681Snwhitehorn 2457190681Snwhitehornboolean_t 2458190681Snwhitehornmoea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2459190681Snwhitehorn{ 2460204296Snwhitehorn struct pvo_entry *pvo; 2461204296Snwhitehorn vm_offset_t ppa; 2462204296Snwhitehorn int error = 0; 2463204296Snwhitehorn 2464204296Snwhitehorn PMAP_LOCK(kernel_pmap); 2465204296Snwhitehorn for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2466204296Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, ppa, NULL); 2467204296Snwhitehorn if (pvo == NULL || 2468204296Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2469204296Snwhitehorn error = EFAULT; 2470204296Snwhitehorn break; 2471204296Snwhitehorn } 2472204296Snwhitehorn } 2473204296Snwhitehorn PMAP_UNLOCK(kernel_pmap); 2474204296Snwhitehorn 2475204296Snwhitehorn return (error); 2476190681Snwhitehorn} 2477190681Snwhitehorn 2478190681Snwhitehorn/* 2479190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual 2480190681Snwhitehorn * address space. Return a pointer to where it is mapped. This 2481190681Snwhitehorn * routine is intended to be used for mapping device memory, 2482190681Snwhitehorn * NOT real memory. 2483190681Snwhitehorn */ 2484190681Snwhitehornvoid * 2485190681Snwhitehornmoea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2486190681Snwhitehorn{ 2487190681Snwhitehorn vm_offset_t va, tmpva, ppa, offset; 2488190681Snwhitehorn 2489190681Snwhitehorn ppa = trunc_page(pa); 2490190681Snwhitehorn offset = pa & PAGE_MASK; 2491190681Snwhitehorn size = roundup(offset + size, PAGE_SIZE); 2492190681Snwhitehorn 2493190681Snwhitehorn va = kmem_alloc_nofault(kernel_map, size); 2494190681Snwhitehorn 2495190681Snwhitehorn if (!va) 2496190681Snwhitehorn panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2497190681Snwhitehorn 2498190681Snwhitehorn for (tmpva = va; size > 0;) { 2499190681Snwhitehorn moea64_kenter(mmu, tmpva, ppa); 2500190681Snwhitehorn size -= PAGE_SIZE; 2501190681Snwhitehorn tmpva += PAGE_SIZE; 2502190681Snwhitehorn ppa += PAGE_SIZE; 2503190681Snwhitehorn } 2504190681Snwhitehorn 2505190681Snwhitehorn return ((void *)(va + offset)); 2506190681Snwhitehorn} 2507190681Snwhitehorn 2508190681Snwhitehornvoid 2509190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2510190681Snwhitehorn{ 2511190681Snwhitehorn vm_offset_t base, offset; 2512190681Snwhitehorn 2513190681Snwhitehorn base = trunc_page(va); 2514190681Snwhitehorn offset = va & PAGE_MASK; 2515190681Snwhitehorn size = roundup(offset + size, PAGE_SIZE); 2516190681Snwhitehorn 2517190681Snwhitehorn kmem_free(kernel_map, base, size); 2518190681Snwhitehorn} 2519190681Snwhitehorn 2520198341Smarcelstatic void 2521198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2522198341Smarcel{ 2523198341Smarcel struct pvo_entry *pvo; 2524198341Smarcel vm_offset_t lim; 2525198341Smarcel vm_paddr_t pa; 2526198341Smarcel vm_size_t len; 2527198341Smarcel 2528198341Smarcel PMAP_LOCK(pm); 2529198341Smarcel while (sz > 0) { 2530198341Smarcel lim = round_page(va); 2531198341Smarcel len = MIN(lim - va, sz); 2532198341Smarcel pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2533198341Smarcel if (pvo != NULL) { 2534205163Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) | 2535198341Smarcel (va & ADDR_POFF); 2536198341Smarcel moea64_syncicache(pm, va, pa, len); 2537198341Smarcel } 2538198341Smarcel va += len; 2539198341Smarcel sz -= len; 2540198341Smarcel } 2541198341Smarcel PMAP_UNLOCK(pm); 2542198341Smarcel} 2543