mmu_oea64.c revision 214617
1190681Snwhitehorn/*- 2190681Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc. 3190681Snwhitehorn * All rights reserved. 4190681Snwhitehorn * 5190681Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation 6190681Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7190681Snwhitehorn * 8190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 9190681Snwhitehorn * modification, are permitted provided that the following conditions 10190681Snwhitehorn * are met: 11190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 12190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 13190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 14190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 15190681Snwhitehorn * documentation and/or other materials provided with the distribution. 16190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software 17190681Snwhitehorn * must display the following acknowledgement: 18190681Snwhitehorn * This product includes software developed by the NetBSD 19190681Snwhitehorn * Foundation, Inc. and its contributors. 20190681Snwhitehorn * 4. Neither the name of The NetBSD Foundation nor the names of its 21190681Snwhitehorn * contributors may be used to endorse or promote products derived 22190681Snwhitehorn * from this software without specific prior written permission. 23190681Snwhitehorn * 24190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25190681Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26190681Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27190681Snwhitehorn * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28190681Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29190681Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30190681Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31190681Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32190681Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33190681Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34190681Snwhitehorn * POSSIBILITY OF SUCH DAMAGE. 35190681Snwhitehorn */ 36190681Snwhitehorn/*- 37190681Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38190681Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH. 39190681Snwhitehorn * All rights reserved. 40190681Snwhitehorn * 41190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 42190681Snwhitehorn * modification, are permitted provided that the following conditions 43190681Snwhitehorn * are met: 44190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 45190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 46190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 47190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 48190681Snwhitehorn * documentation and/or other materials provided with the distribution. 49190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software 50190681Snwhitehorn * must display the following acknowledgement: 51190681Snwhitehorn * This product includes software developed by TooLs GmbH. 52190681Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products 53190681Snwhitehorn * derived from this software without specific prior written permission. 54190681Snwhitehorn * 55190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65190681Snwhitehorn * 66190681Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67190681Snwhitehorn */ 68190681Snwhitehorn/*- 69190681Snwhitehorn * Copyright (C) 2001 Benno Rice. 70190681Snwhitehorn * All rights reserved. 71190681Snwhitehorn * 72190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 73190681Snwhitehorn * modification, are permitted provided that the following conditions 74190681Snwhitehorn * are met: 75190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 76190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 77190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 78190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 79190681Snwhitehorn * documentation and/or other materials provided with the distribution. 80190681Snwhitehorn * 81190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91190681Snwhitehorn */ 92190681Snwhitehorn 93190681Snwhitehorn#include <sys/cdefs.h> 94190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 214617 2010-11-01 02:22:48Z alc $"); 95190681Snwhitehorn 96190681Snwhitehorn/* 97190681Snwhitehorn * Manages physical address maps. 98190681Snwhitehorn * 99190681Snwhitehorn * In addition to hardware address maps, this module is called upon to 100190681Snwhitehorn * provide software-use-only maps which may or may not be stored in the 101190681Snwhitehorn * same form as hardware maps. These pseudo-maps are used to store 102190681Snwhitehorn * intermediate results from copy operations to and from address spaces. 103190681Snwhitehorn * 104190681Snwhitehorn * Since the information managed by this module is also stored by the 105190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual 106190681Snwhitehorn * to physical mappings at almost any time. However, invalidations of 107190681Snwhitehorn * mappings must be done as requested. 108190681Snwhitehorn * 109190681Snwhitehorn * In order to cope with hardware architectures which make virtual to 110190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate 111190681Snwhitehorn * reduced protection operations until such time as they are actually 112190681Snwhitehorn * necessary. This module is given full information as to which processors 113190681Snwhitehorn * are currently using which maps, and to when physical maps must be made 114190681Snwhitehorn * correct. 115190681Snwhitehorn */ 116190681Snwhitehorn 117190681Snwhitehorn#include "opt_kstack_pages.h" 118190681Snwhitehorn 119190681Snwhitehorn#include <sys/param.h> 120190681Snwhitehorn#include <sys/kernel.h> 121190681Snwhitehorn#include <sys/ktr.h> 122190681Snwhitehorn#include <sys/lock.h> 123190681Snwhitehorn#include <sys/msgbuf.h> 124190681Snwhitehorn#include <sys/mutex.h> 125190681Snwhitehorn#include <sys/proc.h> 126190681Snwhitehorn#include <sys/sysctl.h> 127190681Snwhitehorn#include <sys/systm.h> 128190681Snwhitehorn#include <sys/vmmeter.h> 129190681Snwhitehorn 130190681Snwhitehorn#include <sys/kdb.h> 131190681Snwhitehorn 132190681Snwhitehorn#include <dev/ofw/openfirm.h> 133190681Snwhitehorn 134190681Snwhitehorn#include <vm/vm.h> 135190681Snwhitehorn#include <vm/vm_param.h> 136190681Snwhitehorn#include <vm/vm_kern.h> 137190681Snwhitehorn#include <vm/vm_page.h> 138190681Snwhitehorn#include <vm/vm_map.h> 139190681Snwhitehorn#include <vm/vm_object.h> 140190681Snwhitehorn#include <vm/vm_extern.h> 141190681Snwhitehorn#include <vm/vm_pageout.h> 142190681Snwhitehorn#include <vm/vm_pager.h> 143190681Snwhitehorn#include <vm/uma.h> 144190681Snwhitehorn 145209975Snwhitehorn#include <machine/_inttypes.h> 146190681Snwhitehorn#include <machine/cpu.h> 147192067Snwhitehorn#include <machine/platform.h> 148190681Snwhitehorn#include <machine/frame.h> 149190681Snwhitehorn#include <machine/md_var.h> 150190681Snwhitehorn#include <machine/psl.h> 151190681Snwhitehorn#include <machine/bat.h> 152209975Snwhitehorn#include <machine/hid.h> 153190681Snwhitehorn#include <machine/pte.h> 154190681Snwhitehorn#include <machine/sr.h> 155190681Snwhitehorn#include <machine/trap.h> 156190681Snwhitehorn#include <machine/mmuvar.h> 157190681Snwhitehorn 158190681Snwhitehorn#include "mmu_if.h" 159190681Snwhitehorn 160190681Snwhitehorn#define MOEA_DEBUG 161190681Snwhitehorn 162190681Snwhitehorn#define TODO panic("%s: not implemented", __func__); 163209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid); 164209975Snwhitehornuintptr_t moea64_get_unique_vsid(void); 165190681Snwhitehorn 166209975Snwhitehornstatic __inline register_t 167209975Snwhitehorncntlzd(volatile register_t a) { 168209975Snwhitehorn register_t b; 169209975Snwhitehorn __asm ("cntlzd %0, %1" : "=r"(b) : "r"(a)); 170190681Snwhitehorn return b; 171190681Snwhitehorn} 172190681Snwhitehorn 173204042Snwhitehorn#define PTESYNC() __asm __volatile("ptesync"); 174190681Snwhitehorn#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 175190681Snwhitehorn#define SYNC() __asm __volatile("sync"); 176190681Snwhitehorn#define EIEIO() __asm __volatile("eieio"); 177190681Snwhitehorn 178190681Snwhitehorn/* 179190681Snwhitehorn * The tlbie instruction must be executed in 64-bit mode 180190681Snwhitehorn * so we have to twiddle MSR[SF] around every invocation. 181190681Snwhitehorn * Just to add to the fun, exceptions must be off as well 182190681Snwhitehorn * so that we can't trap in 64-bit mode. What a pain. 183190681Snwhitehorn */ 184198378Snwhitehornstruct mtx tlbie_mutex; 185190681Snwhitehorn 186190681Snwhitehornstatic __inline void 187209975SnwhitehornTLBIE(uint64_t vpn) { 188209975Snwhitehorn#ifndef __powerpc64__ 189198378Snwhitehorn register_t vpn_hi, vpn_lo; 190190681Snwhitehorn register_t msr; 191190681Snwhitehorn register_t scratch; 192209975Snwhitehorn#endif 193190681Snwhitehorn 194209975Snwhitehorn vpn <<= ADDR_PIDX_SHFT; 195204042Snwhitehorn vpn &= ~(0xffffULL << 48); 196190681Snwhitehorn 197209975Snwhitehorn mtx_lock_spin(&tlbie_mutex); 198209975Snwhitehorn#ifdef __powerpc64__ 199209975Snwhitehorn __asm __volatile("\ 200209975Snwhitehorn ptesync; \ 201209975Snwhitehorn tlbie %0; \ 202209975Snwhitehorn eieio; \ 203209975Snwhitehorn tlbsync; \ 204209975Snwhitehorn ptesync;" 205209975Snwhitehorn :: "r"(vpn) : "memory"); 206209975Snwhitehorn#else 207190681Snwhitehorn vpn_hi = (uint32_t)(vpn >> 32); 208190681Snwhitehorn vpn_lo = (uint32_t)vpn; 209190681Snwhitehorn 210190681Snwhitehorn __asm __volatile("\ 211190681Snwhitehorn mfmsr %0; \ 212204042Snwhitehorn mr %1, %0; \ 213198378Snwhitehorn insrdi %1,%5,1,0; \ 214213407Snwhitehorn mtmsrd %1; isync; \ 215190681Snwhitehorn ptesync; \ 216190681Snwhitehorn \ 217190681Snwhitehorn sld %1,%2,%4; \ 218190681Snwhitehorn or %1,%1,%3; \ 219190681Snwhitehorn tlbie %1; \ 220190681Snwhitehorn \ 221213407Snwhitehorn mtmsrd %0; isync; \ 222190681Snwhitehorn eieio; \ 223190681Snwhitehorn tlbsync; \ 224190681Snwhitehorn ptesync;" 225204042Snwhitehorn : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 226204042Snwhitehorn : "memory"); 227209975Snwhitehorn#endif 228198378Snwhitehorn mtx_unlock_spin(&tlbie_mutex); 229190681Snwhitehorn} 230190681Snwhitehorn 231190681Snwhitehorn#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 232190681Snwhitehorn#define ENABLE_TRANS(msr) mtmsr(msr); isync() 233190681Snwhitehorn 234190681Snwhitehorn#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 235190681Snwhitehorn#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 236204268Snwhitehorn#define VSID_HASH_MASK 0x0000007fffffffffULL 237190681Snwhitehorn 238204042Snwhitehorn#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 239204042Snwhitehorn#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 240204042Snwhitehorn#define PVO_WIRED 0x010UL /* PVO entry is wired */ 241204042Snwhitehorn#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 242204042Snwhitehorn#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 243190681Snwhitehorn bootstrap */ 244204042Snwhitehorn#define PVO_FAKE 0x100UL /* fictitious phys page */ 245209975Snwhitehorn#define PVO_LARGE 0x200UL /* large page */ 246190681Snwhitehorn#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 247190681Snwhitehorn#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 248190681Snwhitehorn#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 249190681Snwhitehorn#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 250190681Snwhitehorn#define PVO_PTEGIDX_CLR(pvo) \ 251190681Snwhitehorn ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 252190681Snwhitehorn#define PVO_PTEGIDX_SET(pvo, i) \ 253190681Snwhitehorn ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 254209975Snwhitehorn#define PVO_VSID(pvo) ((pvo)->pvo_vpn >> 16) 255190681Snwhitehorn 256190681Snwhitehorn#define MOEA_PVO_CHECK(pvo) 257190681Snwhitehorn 258190681Snwhitehorn#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 259190681Snwhitehorn#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 260190681Snwhitehorn#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 261190681Snwhitehorn 262190681Snwhitehornstruct ofw_map { 263209975Snwhitehorn cell_t om_va; 264209975Snwhitehorn cell_t om_len; 265209975Snwhitehorn cell_t om_pa_hi; 266209975Snwhitehorn cell_t om_pa_lo; 267209975Snwhitehorn cell_t om_mode; 268190681Snwhitehorn}; 269190681Snwhitehorn 270190681Snwhitehorn/* 271190681Snwhitehorn * Map of physical memory regions. 272190681Snwhitehorn */ 273190681Snwhitehornstatic struct mem_region *regions; 274190681Snwhitehornstatic struct mem_region *pregions; 275209975Snwhitehornstatic u_int phys_avail_count; 276209975Snwhitehornstatic int regions_sz, pregions_sz; 277190681Snwhitehornextern int ofw_real_mode; 278190681Snwhitehorn 279190681Snwhitehornextern struct pmap ofw_pmap; 280190681Snwhitehorn 281190681Snwhitehornextern void bs_remap_earlyboot(void); 282190681Snwhitehorn 283190681Snwhitehorn 284190681Snwhitehorn/* 285190681Snwhitehorn * Lock for the pteg and pvo tables. 286190681Snwhitehorn */ 287190681Snwhitehornstruct mtx moea64_table_mutex; 288211967Snwhitehornstruct mtx moea64_slb_mutex; 289190681Snwhitehorn 290190681Snwhitehorn/* 291190681Snwhitehorn * PTEG data. 292190681Snwhitehorn */ 293190681Snwhitehornstatic struct lpteg *moea64_pteg_table; 294190681Snwhitehornu_int moea64_pteg_count; 295190681Snwhitehornu_int moea64_pteg_mask; 296190681Snwhitehorn 297190681Snwhitehorn/* 298190681Snwhitehorn * PVO data. 299190681Snwhitehorn */ 300190681Snwhitehornstruct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 301213335Snwhitehornstruct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */ 302190681Snwhitehorn LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 303190681Snwhitehorn 304190681Snwhitehornuma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 305190681Snwhitehornuma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 306190681Snwhitehorn 307190681Snwhitehorn#define BPVO_POOL_SIZE 327680 308190681Snwhitehornstatic struct pvo_entry *moea64_bpvo_pool; 309190681Snwhitehornstatic int moea64_bpvo_pool_index = 0; 310190681Snwhitehorn 311190681Snwhitehorn#define VSID_NBPW (sizeof(u_int32_t) * 8) 312209975Snwhitehorn#ifdef __powerpc64__ 313209975Snwhitehorn#define NVSIDS (NPMAPS * 16) 314209975Snwhitehorn#define VSID_HASHMASK 0xffffffffUL 315209975Snwhitehorn#else 316209975Snwhitehorn#define NVSIDS NPMAPS 317209975Snwhitehorn#define VSID_HASHMASK 0xfffffUL 318209975Snwhitehorn#endif 319209975Snwhitehornstatic u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 320190681Snwhitehorn 321190681Snwhitehornstatic boolean_t moea64_initialized = FALSE; 322190681Snwhitehorn 323190681Snwhitehorn/* 324190681Snwhitehorn * Statistics. 325190681Snwhitehorn */ 326190681Snwhitehornu_int moea64_pte_valid = 0; 327190681Snwhitehornu_int moea64_pte_overflow = 0; 328190681Snwhitehornu_int moea64_pvo_entries = 0; 329190681Snwhitehornu_int moea64_pvo_enter_calls = 0; 330190681Snwhitehornu_int moea64_pvo_remove_calls = 0; 331190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 332190681Snwhitehorn &moea64_pte_valid, 0, ""); 333190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 334190681Snwhitehorn &moea64_pte_overflow, 0, ""); 335190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 336190681Snwhitehorn &moea64_pvo_entries, 0, ""); 337190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 338190681Snwhitehorn &moea64_pvo_enter_calls, 0, ""); 339190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 340190681Snwhitehorn &moea64_pvo_remove_calls, 0, ""); 341190681Snwhitehorn 342190681Snwhitehornvm_offset_t moea64_scratchpage_va[2]; 343209975Snwhitehornuint64_t moea64_scratchpage_vpn[2]; 344190681Snwhitehornstruct lpte *moea64_scratchpage_pte[2]; 345190681Snwhitehornstruct mtx moea64_scratchpage_mtx; 346190681Snwhitehorn 347209975Snwhitehornuint64_t moea64_large_page_mask = 0; 348209975Snwhitehornint moea64_large_page_size = 0; 349209975Snwhitehornint moea64_large_page_shift = 0; 350209975Snwhitehorn 351190681Snwhitehorn/* 352190681Snwhitehorn * Allocate physical memory for use in moea64_bootstrap. 353190681Snwhitehorn */ 354190681Snwhitehornstatic vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 355190681Snwhitehorn 356190681Snwhitehorn/* 357190681Snwhitehorn * PTE calls. 358190681Snwhitehorn */ 359190681Snwhitehornstatic int moea64_pte_insert(u_int, struct lpte *); 360190681Snwhitehorn 361190681Snwhitehorn/* 362190681Snwhitehorn * PVO calls. 363190681Snwhitehorn */ 364190681Snwhitehornstatic int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 365198378Snwhitehorn vm_offset_t, vm_offset_t, uint64_t, int); 366209975Snwhitehornstatic void moea64_pvo_remove(struct pvo_entry *); 367209975Snwhitehornstatic struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 368209975Snwhitehornstatic struct lpte *moea64_pvo_to_pte(const struct pvo_entry *); 369190681Snwhitehorn 370190681Snwhitehorn/* 371190681Snwhitehorn * Utility routines. 372190681Snwhitehorn */ 373209975Snwhitehornstatic void moea64_bootstrap(mmu_t mmup, 374190681Snwhitehorn vm_offset_t kernelstart, vm_offset_t kernelend); 375209975Snwhitehornstatic void moea64_cpu_bootstrap(mmu_t, int ap); 376190681Snwhitehornstatic void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 377190681Snwhitehorn vm_prot_t, boolean_t); 378190681Snwhitehornstatic boolean_t moea64_query_bit(vm_page_t, u_int64_t); 379208990Salcstatic u_int moea64_clear_bit(vm_page_t, u_int64_t); 380190681Snwhitehornstatic void moea64_kremove(mmu_t, vm_offset_t); 381190681Snwhitehornstatic void moea64_syncicache(pmap_t pmap, vm_offset_t va, 382198341Smarcel vm_offset_t pa, vm_size_t sz); 383190681Snwhitehornstatic void tlbia(void); 384209975Snwhitehorn#ifdef __powerpc64__ 385209975Snwhitehornstatic void slbia(void); 386209975Snwhitehorn#endif 387190681Snwhitehorn 388190681Snwhitehorn/* 389190681Snwhitehorn * Kernel MMU interface 390190681Snwhitehorn */ 391190681Snwhitehornvoid moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 392190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t); 393190681Snwhitehornvoid moea64_clear_reference(mmu_t, vm_page_t); 394190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 395190681Snwhitehornvoid moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 396190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 397190681Snwhitehorn vm_prot_t); 398190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 399190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 400190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 401190681Snwhitehornvoid moea64_init(mmu_t); 402190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t); 403214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 404207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t); 405190681Snwhitehornboolean_t moea64_ts_referenced(mmu_t, vm_page_t); 406190681Snwhitehornvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 407190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 408190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t); 409190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t); 410190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t); 411190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 412190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 413190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int); 414190681Snwhitehornvoid moea64_release(mmu_t, pmap_t); 415190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 416190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t); 417190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t); 418190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t); 419190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int); 420190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t); 421190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *); 422190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *); 423190681Snwhitehornvoid *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 424213307Snwhitehornvoid *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 425190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 426190681Snwhitehornvm_offset_t moea64_kextract(mmu_t, vm_offset_t); 427213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 428213307Snwhitehornvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 429190681Snwhitehornvoid moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 430190681Snwhitehornboolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 431198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 432190681Snwhitehorn 433209975Snwhitehornstatic mmu_method_t moea64_methods[] = { 434190681Snwhitehorn MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 435190681Snwhitehorn MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 436190681Snwhitehorn MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 437190681Snwhitehorn MMUMETHOD(mmu_copy_page, moea64_copy_page), 438190681Snwhitehorn MMUMETHOD(mmu_enter, moea64_enter), 439190681Snwhitehorn MMUMETHOD(mmu_enter_object, moea64_enter_object), 440190681Snwhitehorn MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 441190681Snwhitehorn MMUMETHOD(mmu_extract, moea64_extract), 442190681Snwhitehorn MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 443190681Snwhitehorn MMUMETHOD(mmu_init, moea64_init), 444190681Snwhitehorn MMUMETHOD(mmu_is_modified, moea64_is_modified), 445214617Salc MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 446207155Salc MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 447190681Snwhitehorn MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 448190681Snwhitehorn MMUMETHOD(mmu_map, moea64_map), 449190681Snwhitehorn MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 450190681Snwhitehorn MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 451190681Snwhitehorn MMUMETHOD(mmu_pinit, moea64_pinit), 452190681Snwhitehorn MMUMETHOD(mmu_pinit0, moea64_pinit0), 453190681Snwhitehorn MMUMETHOD(mmu_protect, moea64_protect), 454190681Snwhitehorn MMUMETHOD(mmu_qenter, moea64_qenter), 455190681Snwhitehorn MMUMETHOD(mmu_qremove, moea64_qremove), 456190681Snwhitehorn MMUMETHOD(mmu_release, moea64_release), 457190681Snwhitehorn MMUMETHOD(mmu_remove, moea64_remove), 458190681Snwhitehorn MMUMETHOD(mmu_remove_all, moea64_remove_all), 459190681Snwhitehorn MMUMETHOD(mmu_remove_write, moea64_remove_write), 460198341Smarcel MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 461190681Snwhitehorn MMUMETHOD(mmu_zero_page, moea64_zero_page), 462190681Snwhitehorn MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 463190681Snwhitehorn MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 464190681Snwhitehorn MMUMETHOD(mmu_activate, moea64_activate), 465190681Snwhitehorn MMUMETHOD(mmu_deactivate, moea64_deactivate), 466213307Snwhitehorn MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 467190681Snwhitehorn 468190681Snwhitehorn /* Internal interfaces */ 469209975Snwhitehorn MMUMETHOD(mmu_bootstrap, moea64_bootstrap), 470209975Snwhitehorn MMUMETHOD(mmu_cpu_bootstrap, moea64_cpu_bootstrap), 471190681Snwhitehorn MMUMETHOD(mmu_mapdev, moea64_mapdev), 472213307Snwhitehorn MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 473190681Snwhitehorn MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 474190681Snwhitehorn MMUMETHOD(mmu_kextract, moea64_kextract), 475190681Snwhitehorn MMUMETHOD(mmu_kenter, moea64_kenter), 476213307Snwhitehorn MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 477190681Snwhitehorn MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 478190681Snwhitehorn 479190681Snwhitehorn { 0, 0 } 480190681Snwhitehorn}; 481190681Snwhitehorn 482212627SgrehanMMU_DEF(oea64_mmu, MMU_TYPE_G5, moea64_methods, 0); 483190681Snwhitehorn 484190681Snwhitehornstatic __inline u_int 485209975Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 486190681Snwhitehorn{ 487204268Snwhitehorn uint64_t hash; 488209975Snwhitehorn int shift; 489190681Snwhitehorn 490209975Snwhitehorn shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 491204268Snwhitehorn hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 492209975Snwhitehorn shift); 493190681Snwhitehorn return (hash & moea64_pteg_mask); 494190681Snwhitehorn} 495190681Snwhitehorn 496190681Snwhitehornstatic __inline struct pvo_head * 497190681Snwhitehornvm_page_to_pvoh(vm_page_t m) 498190681Snwhitehorn{ 499190681Snwhitehorn 500190681Snwhitehorn return (&m->md.mdpg_pvoh); 501190681Snwhitehorn} 502190681Snwhitehorn 503190681Snwhitehornstatic __inline void 504190681Snwhitehornmoea64_attr_clear(vm_page_t m, u_int64_t ptebit) 505190681Snwhitehorn{ 506190681Snwhitehorn 507190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 508190681Snwhitehorn m->md.mdpg_attrs &= ~ptebit; 509190681Snwhitehorn} 510190681Snwhitehorn 511190681Snwhitehornstatic __inline u_int64_t 512190681Snwhitehornmoea64_attr_fetch(vm_page_t m) 513190681Snwhitehorn{ 514190681Snwhitehorn 515190681Snwhitehorn return (m->md.mdpg_attrs); 516190681Snwhitehorn} 517190681Snwhitehorn 518190681Snwhitehornstatic __inline void 519190681Snwhitehornmoea64_attr_save(vm_page_t m, u_int64_t ptebit) 520190681Snwhitehorn{ 521190681Snwhitehorn 522190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 523190681Snwhitehorn m->md.mdpg_attrs |= ptebit; 524190681Snwhitehorn} 525190681Snwhitehorn 526190681Snwhitehornstatic __inline void 527190681Snwhitehornmoea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 528209975Snwhitehorn uint64_t pte_lo, int flags) 529190681Snwhitehorn{ 530209975Snwhitehorn 531190681Snwhitehorn ASSERT_TABLE_LOCK(); 532190681Snwhitehorn 533190681Snwhitehorn /* 534190681Snwhitehorn * Construct a PTE. Default to IMB initially. Valid bit only gets 535190681Snwhitehorn * set when the real pte is set in memory. 536190681Snwhitehorn * 537190681Snwhitehorn * Note: Don't set the valid bit for correct operation of tlb update. 538190681Snwhitehorn */ 539190681Snwhitehorn pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 540190681Snwhitehorn (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 541190681Snwhitehorn 542209975Snwhitehorn if (flags & PVO_LARGE) 543209975Snwhitehorn pt->pte_hi |= LPTE_BIG; 544209975Snwhitehorn 545190681Snwhitehorn pt->pte_lo = pte_lo; 546190681Snwhitehorn} 547190681Snwhitehorn 548190681Snwhitehornstatic __inline void 549190681Snwhitehornmoea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 550190681Snwhitehorn{ 551190681Snwhitehorn 552190681Snwhitehorn ASSERT_TABLE_LOCK(); 553190681Snwhitehorn 554190681Snwhitehorn pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 555190681Snwhitehorn} 556190681Snwhitehorn 557190681Snwhitehornstatic __inline void 558209975Snwhitehornmoea64_pte_clear(struct lpte *pt, uint64_t vpn, u_int64_t ptebit) 559190681Snwhitehorn{ 560190681Snwhitehorn ASSERT_TABLE_LOCK(); 561190681Snwhitehorn 562190681Snwhitehorn /* 563190681Snwhitehorn * As shown in Section 7.6.3.2.3 564190681Snwhitehorn */ 565190681Snwhitehorn pt->pte_lo &= ~ptebit; 566209975Snwhitehorn TLBIE(vpn); 567190681Snwhitehorn} 568190681Snwhitehorn 569190681Snwhitehornstatic __inline void 570190681Snwhitehornmoea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 571190681Snwhitehorn{ 572190681Snwhitehorn 573190681Snwhitehorn ASSERT_TABLE_LOCK(); 574190681Snwhitehorn pvo_pt->pte_hi |= LPTE_VALID; 575190681Snwhitehorn 576190681Snwhitehorn /* 577190681Snwhitehorn * Update the PTE as defined in section 7.6.3.1. 578190681Snwhitehorn * Note that the REF/CHG bits are from pvo_pt and thus should have 579190681Snwhitehorn * been saved so this routine can restore them (if desired). 580190681Snwhitehorn */ 581190681Snwhitehorn pt->pte_lo = pvo_pt->pte_lo; 582190681Snwhitehorn EIEIO(); 583190681Snwhitehorn pt->pte_hi = pvo_pt->pte_hi; 584204042Snwhitehorn PTESYNC(); 585190681Snwhitehorn moea64_pte_valid++; 586190681Snwhitehorn} 587190681Snwhitehorn 588190681Snwhitehornstatic __inline void 589209975Snwhitehornmoea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) 590190681Snwhitehorn{ 591190681Snwhitehorn ASSERT_TABLE_LOCK(); 592190681Snwhitehorn pvo_pt->pte_hi &= ~LPTE_VALID; 593190681Snwhitehorn 594190681Snwhitehorn /* 595190681Snwhitehorn * Force the reg & chg bits back into the PTEs. 596190681Snwhitehorn */ 597190681Snwhitehorn SYNC(); 598190681Snwhitehorn 599190681Snwhitehorn /* 600190681Snwhitehorn * Invalidate the pte. 601190681Snwhitehorn */ 602190681Snwhitehorn pt->pte_hi &= ~LPTE_VALID; 603209975Snwhitehorn TLBIE(vpn); 604190681Snwhitehorn 605190681Snwhitehorn /* 606190681Snwhitehorn * Save the reg & chg bits. 607190681Snwhitehorn */ 608190681Snwhitehorn moea64_pte_synch(pt, pvo_pt); 609190681Snwhitehorn moea64_pte_valid--; 610190681Snwhitehorn} 611190681Snwhitehorn 612190681Snwhitehornstatic __inline void 613209975Snwhitehornmoea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, uint64_t vpn) 614190681Snwhitehorn{ 615190681Snwhitehorn 616190681Snwhitehorn /* 617190681Snwhitehorn * Invalidate the PTE 618190681Snwhitehorn */ 619209975Snwhitehorn moea64_pte_unset(pt, pvo_pt, vpn); 620190681Snwhitehorn moea64_pte_set(pt, pvo_pt); 621190681Snwhitehorn} 622190681Snwhitehorn 623190681Snwhitehornstatic __inline uint64_t 624213307Snwhitehornmoea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 625190681Snwhitehorn{ 626190681Snwhitehorn uint64_t pte_lo; 627190681Snwhitehorn int i; 628190681Snwhitehorn 629213307Snwhitehorn if (ma != VM_MEMATTR_DEFAULT) { 630213307Snwhitehorn switch (ma) { 631213307Snwhitehorn case VM_MEMATTR_UNCACHEABLE: 632213307Snwhitehorn return (LPTE_I | LPTE_G); 633213307Snwhitehorn case VM_MEMATTR_WRITE_COMBINING: 634213307Snwhitehorn case VM_MEMATTR_WRITE_BACK: 635213307Snwhitehorn case VM_MEMATTR_PREFETCHABLE: 636213307Snwhitehorn return (LPTE_I); 637213307Snwhitehorn case VM_MEMATTR_WRITE_THROUGH: 638213307Snwhitehorn return (LPTE_W | LPTE_M); 639213307Snwhitehorn } 640213307Snwhitehorn } 641213307Snwhitehorn 642190681Snwhitehorn /* 643190681Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 644190681Snwhitehorn * it's in our available memory array. 645190681Snwhitehorn */ 646190681Snwhitehorn pte_lo = LPTE_I | LPTE_G; 647190681Snwhitehorn for (i = 0; i < pregions_sz; i++) { 648190681Snwhitehorn if ((pa >= pregions[i].mr_start) && 649190681Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 650190681Snwhitehorn pte_lo &= ~(LPTE_I | LPTE_G); 651190681Snwhitehorn pte_lo |= LPTE_M; 652190681Snwhitehorn break; 653190681Snwhitehorn } 654190681Snwhitehorn } 655190681Snwhitehorn 656190681Snwhitehorn return pte_lo; 657190681Snwhitehorn} 658190681Snwhitehorn 659190681Snwhitehorn/* 660190681Snwhitehorn * Quick sort callout for comparing memory regions. 661190681Snwhitehorn */ 662190681Snwhitehornstatic int mr_cmp(const void *a, const void *b); 663190681Snwhitehornstatic int om_cmp(const void *a, const void *b); 664190681Snwhitehorn 665190681Snwhitehornstatic int 666190681Snwhitehornmr_cmp(const void *a, const void *b) 667190681Snwhitehorn{ 668190681Snwhitehorn const struct mem_region *regiona; 669190681Snwhitehorn const struct mem_region *regionb; 670190681Snwhitehorn 671190681Snwhitehorn regiona = a; 672190681Snwhitehorn regionb = b; 673190681Snwhitehorn if (regiona->mr_start < regionb->mr_start) 674190681Snwhitehorn return (-1); 675190681Snwhitehorn else if (regiona->mr_start > regionb->mr_start) 676190681Snwhitehorn return (1); 677190681Snwhitehorn else 678190681Snwhitehorn return (0); 679190681Snwhitehorn} 680190681Snwhitehorn 681190681Snwhitehornstatic int 682190681Snwhitehornom_cmp(const void *a, const void *b) 683190681Snwhitehorn{ 684190681Snwhitehorn const struct ofw_map *mapa; 685190681Snwhitehorn const struct ofw_map *mapb; 686190681Snwhitehorn 687190681Snwhitehorn mapa = a; 688190681Snwhitehorn mapb = b; 689190681Snwhitehorn if (mapa->om_pa_hi < mapb->om_pa_hi) 690190681Snwhitehorn return (-1); 691190681Snwhitehorn else if (mapa->om_pa_hi > mapb->om_pa_hi) 692190681Snwhitehorn return (1); 693190681Snwhitehorn else if (mapa->om_pa_lo < mapb->om_pa_lo) 694190681Snwhitehorn return (-1); 695190681Snwhitehorn else if (mapa->om_pa_lo > mapb->om_pa_lo) 696190681Snwhitehorn return (1); 697190681Snwhitehorn else 698190681Snwhitehorn return (0); 699190681Snwhitehorn} 700190681Snwhitehorn 701190681Snwhitehornstatic void 702209975Snwhitehornmoea64_cpu_bootstrap(mmu_t mmup, int ap) 703190681Snwhitehorn{ 704190681Snwhitehorn int i = 0; 705209975Snwhitehorn #ifdef __powerpc64__ 706209975Snwhitehorn struct slb *slb = PCPU_GET(slb); 707209975Snwhitehorn #endif 708190681Snwhitehorn 709190681Snwhitehorn /* 710190681Snwhitehorn * Initialize segment registers and MMU 711190681Snwhitehorn */ 712190681Snwhitehorn 713190681Snwhitehorn mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 714209975Snwhitehorn 715209975Snwhitehorn /* 716209975Snwhitehorn * Install kernel SLB entries 717209975Snwhitehorn */ 718209975Snwhitehorn 719209975Snwhitehorn #ifdef __powerpc64__ 720209975Snwhitehorn slbia(); 721209975Snwhitehorn 722209975Snwhitehorn for (i = 0; i < 64; i++) { 723209975Snwhitehorn if (!(slb[i].slbe & SLBE_VALID)) 724209975Snwhitehorn continue; 725209975Snwhitehorn 726209975Snwhitehorn __asm __volatile ("slbmte %0, %1" :: 727209975Snwhitehorn "r"(slb[i].slbv), "r"(slb[i].slbe)); 728209975Snwhitehorn } 729209975Snwhitehorn #else 730209975Snwhitehorn for (i = 0; i < 16; i++) 731209975Snwhitehorn mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 732209975Snwhitehorn #endif 733209975Snwhitehorn 734209975Snwhitehorn /* 735209975Snwhitehorn * Install page table 736209975Snwhitehorn */ 737209975Snwhitehorn 738204042Snwhitehorn __asm __volatile ("ptesync; mtsdr1 %0; isync" 739209975Snwhitehorn :: "r"((uintptr_t)moea64_pteg_table 740209975Snwhitehorn | (64 - cntlzd(moea64_pteg_mask >> 11)))); 741190681Snwhitehorn tlbia(); 742190681Snwhitehorn} 743190681Snwhitehorn 744190681Snwhitehornstatic void 745199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 746199226Snwhitehorn{ 747199226Snwhitehorn struct ofw_map translations[sz/sizeof(struct ofw_map)]; 748199226Snwhitehorn register_t msr; 749199226Snwhitehorn vm_offset_t off; 750204128Snwhitehorn vm_paddr_t pa_base; 751199226Snwhitehorn int i, ofw_mappings; 752199226Snwhitehorn 753199226Snwhitehorn bzero(translations, sz); 754199226Snwhitehorn if (OF_getprop(mmu, "translations", translations, sz) == -1) 755199226Snwhitehorn panic("moea64_bootstrap: can't get ofw translations"); 756199226Snwhitehorn 757199226Snwhitehorn CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 758199226Snwhitehorn sz /= sizeof(*translations); 759199226Snwhitehorn qsort(translations, sz, sizeof (*translations), om_cmp); 760199226Snwhitehorn 761199226Snwhitehorn for (i = 0, ofw_mappings = 0; i < sz; i++) { 762199226Snwhitehorn CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 763199226Snwhitehorn (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 764199226Snwhitehorn translations[i].om_len); 765199226Snwhitehorn 766199226Snwhitehorn if (translations[i].om_pa_lo % PAGE_SIZE) 767199226Snwhitehorn panic("OFW translation not page-aligned!"); 768199226Snwhitehorn 769209975Snwhitehorn pa_base = translations[i].om_pa_lo; 770209975Snwhitehorn 771209975Snwhitehorn #ifdef __powerpc64__ 772209975Snwhitehorn pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 773209975Snwhitehorn #else 774199226Snwhitehorn if (translations[i].om_pa_hi) 775199226Snwhitehorn panic("OFW translations above 32-bit boundary!"); 776209975Snwhitehorn #endif 777199226Snwhitehorn 778199226Snwhitehorn /* Now enter the pages for this mapping */ 779199226Snwhitehorn 780199226Snwhitehorn DISABLE_TRANS(msr); 781199226Snwhitehorn for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 782209975Snwhitehorn if (moea64_pvo_find_va(kernel_pmap, 783209975Snwhitehorn translations[i].om_va + off) != NULL) 784209975Snwhitehorn continue; 785209975Snwhitehorn 786204128Snwhitehorn moea64_kenter(mmup, translations[i].om_va + off, 787204128Snwhitehorn pa_base + off); 788199226Snwhitehorn 789199226Snwhitehorn ofw_mappings++; 790199226Snwhitehorn } 791199226Snwhitehorn ENABLE_TRANS(msr); 792199226Snwhitehorn } 793199226Snwhitehorn} 794199226Snwhitehorn 795209975Snwhitehorn#ifdef __powerpc64__ 796199226Snwhitehornstatic void 797209975Snwhitehornmoea64_probe_large_page(void) 798190681Snwhitehorn{ 799209975Snwhitehorn uint16_t pvr = mfpvr() >> 16; 800209975Snwhitehorn 801209975Snwhitehorn switch (pvr) { 802209975Snwhitehorn case IBM970: 803209975Snwhitehorn case IBM970FX: 804209975Snwhitehorn case IBM970MP: 805209975Snwhitehorn powerpc_sync(); isync(); 806209975Snwhitehorn mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 807209975Snwhitehorn powerpc_sync(); isync(); 808209975Snwhitehorn 809209975Snwhitehorn /* FALLTHROUGH */ 810209975Snwhitehorn case IBMCELLBE: 811209975Snwhitehorn moea64_large_page_size = 0x1000000; /* 16 MB */ 812209975Snwhitehorn moea64_large_page_shift = 24; 813209975Snwhitehorn break; 814209975Snwhitehorn default: 815209975Snwhitehorn moea64_large_page_size = 0; 816209975Snwhitehorn } 817209975Snwhitehorn 818209975Snwhitehorn moea64_large_page_mask = moea64_large_page_size - 1; 819209975Snwhitehorn} 820209975Snwhitehorn 821209975Snwhitehornstatic void 822209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large) 823209975Snwhitehorn{ 824209975Snwhitehorn struct slb *cache; 825209975Snwhitehorn struct slb entry; 826209975Snwhitehorn uint64_t esid, slbe; 827209975Snwhitehorn uint64_t i; 828209975Snwhitehorn 829209975Snwhitehorn cache = PCPU_GET(slb); 830209975Snwhitehorn esid = va >> ADDR_SR_SHFT; 831209975Snwhitehorn slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 832209975Snwhitehorn 833209975Snwhitehorn for (i = 0; i < 64; i++) { 834209975Snwhitehorn if (cache[i].slbe == (slbe | i)) 835209975Snwhitehorn return; 836209975Snwhitehorn } 837209975Snwhitehorn 838209975Snwhitehorn entry.slbe = slbe; 839210704Snwhitehorn entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 840209975Snwhitehorn if (large) 841209975Snwhitehorn entry.slbv |= SLBV_L; 842209975Snwhitehorn 843212722Snwhitehorn slb_insert_kernel(entry.slbe, entry.slbv); 844209975Snwhitehorn} 845209975Snwhitehorn#endif 846209975Snwhitehorn 847209975Snwhitehornstatic void 848209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 849209975Snwhitehorn vm_offset_t kernelend) 850209975Snwhitehorn{ 851209975Snwhitehorn register_t msr; 852209975Snwhitehorn vm_paddr_t pa; 853209975Snwhitehorn vm_offset_t size, off; 854209975Snwhitehorn uint64_t pte_lo; 855209975Snwhitehorn int i; 856209975Snwhitehorn 857209975Snwhitehorn if (moea64_large_page_size == 0) 858209975Snwhitehorn hw_direct_map = 0; 859209975Snwhitehorn 860209975Snwhitehorn DISABLE_TRANS(msr); 861209975Snwhitehorn if (hw_direct_map) { 862209975Snwhitehorn PMAP_LOCK(kernel_pmap); 863209975Snwhitehorn for (i = 0; i < pregions_sz; i++) { 864209975Snwhitehorn for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 865209975Snwhitehorn pregions[i].mr_size; pa += moea64_large_page_size) { 866209975Snwhitehorn pte_lo = LPTE_M; 867209975Snwhitehorn 868209975Snwhitehorn /* 869209975Snwhitehorn * Set memory access as guarded if prefetch within 870209975Snwhitehorn * the page could exit the available physmem area. 871209975Snwhitehorn */ 872209975Snwhitehorn if (pa & moea64_large_page_mask) { 873209975Snwhitehorn pa &= moea64_large_page_mask; 874209975Snwhitehorn pte_lo |= LPTE_G; 875209975Snwhitehorn } 876209975Snwhitehorn if (pa + moea64_large_page_size > 877209975Snwhitehorn pregions[i].mr_start + pregions[i].mr_size) 878209975Snwhitehorn pte_lo |= LPTE_G; 879209975Snwhitehorn 880209975Snwhitehorn moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 881209975Snwhitehorn &moea64_pvo_kunmanaged, pa, pa, 882209975Snwhitehorn pte_lo, PVO_WIRED | PVO_LARGE | 883209975Snwhitehorn VM_PROT_EXECUTE); 884209975Snwhitehorn } 885209975Snwhitehorn } 886209975Snwhitehorn PMAP_UNLOCK(kernel_pmap); 887209975Snwhitehorn } else { 888209975Snwhitehorn size = moea64_pteg_count * sizeof(struct lpteg); 889209975Snwhitehorn off = (vm_offset_t)(moea64_pteg_table); 890209975Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 891209975Snwhitehorn moea64_kenter(mmup, pa, pa); 892209975Snwhitehorn size = sizeof(struct pvo_head) * moea64_pteg_count; 893209975Snwhitehorn off = (vm_offset_t)(moea64_pvo_table); 894209975Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 895209975Snwhitehorn moea64_kenter(mmup, pa, pa); 896209975Snwhitehorn size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 897209975Snwhitehorn off = (vm_offset_t)(moea64_bpvo_pool); 898209975Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 899209975Snwhitehorn moea64_kenter(mmup, pa, pa); 900209975Snwhitehorn 901209975Snwhitehorn /* 902209975Snwhitehorn * Map certain important things, like ourselves. 903209975Snwhitehorn * 904209975Snwhitehorn * NOTE: We do not map the exception vector space. That code is 905209975Snwhitehorn * used only in real mode, and leaving it unmapped allows us to 906209975Snwhitehorn * catch NULL pointer deferences, instead of making NULL a valid 907209975Snwhitehorn * address. 908209975Snwhitehorn */ 909209975Snwhitehorn 910209975Snwhitehorn for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 911209975Snwhitehorn pa += PAGE_SIZE) 912209975Snwhitehorn moea64_kenter(mmup, pa, pa); 913209975Snwhitehorn } 914209975Snwhitehorn ENABLE_TRANS(msr); 915209975Snwhitehorn} 916209975Snwhitehorn 917209975Snwhitehornstatic void 918209975Snwhitehornmoea64_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 919209975Snwhitehorn{ 920190681Snwhitehorn ihandle_t mmui; 921190681Snwhitehorn phandle_t chosen; 922190681Snwhitehorn phandle_t mmu; 923199226Snwhitehorn size_t sz; 924190681Snwhitehorn int i, j; 925190681Snwhitehorn vm_size_t size, physsz, hwphyssz; 926209975Snwhitehorn vm_offset_t pa, va; 927199226Snwhitehorn register_t msr; 928194784Sjeff void *dpcpu; 929190681Snwhitehorn 930209975Snwhitehorn#ifndef __powerpc64__ 931190681Snwhitehorn /* We don't have a direct map since there is no BAT */ 932190681Snwhitehorn hw_direct_map = 0; 933190681Snwhitehorn 934190681Snwhitehorn /* Make sure battable is zero, since we have no BAT */ 935190681Snwhitehorn for (i = 0; i < 16; i++) { 936190681Snwhitehorn battable[i].batu = 0; 937190681Snwhitehorn battable[i].batl = 0; 938190681Snwhitehorn } 939209975Snwhitehorn#else 940209975Snwhitehorn moea64_probe_large_page(); 941190681Snwhitehorn 942209975Snwhitehorn /* Use a direct map if we have large page support */ 943209975Snwhitehorn if (moea64_large_page_size > 0) 944209975Snwhitehorn hw_direct_map = 1; 945209975Snwhitehorn else 946209975Snwhitehorn hw_direct_map = 0; 947209975Snwhitehorn#endif 948209975Snwhitehorn 949190681Snwhitehorn /* Get physical memory regions from firmware */ 950190681Snwhitehorn mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 951190681Snwhitehorn CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 952190681Snwhitehorn 953190681Snwhitehorn qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 954190681Snwhitehorn if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 955190681Snwhitehorn panic("moea64_bootstrap: phys_avail too small"); 956190681Snwhitehorn qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 957190681Snwhitehorn phys_avail_count = 0; 958190681Snwhitehorn physsz = 0; 959190681Snwhitehorn hwphyssz = 0; 960190681Snwhitehorn TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 961190681Snwhitehorn for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 962190681Snwhitehorn CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 963190681Snwhitehorn regions[i].mr_start + regions[i].mr_size, 964190681Snwhitehorn regions[i].mr_size); 965190681Snwhitehorn if (hwphyssz != 0 && 966190681Snwhitehorn (physsz + regions[i].mr_size) >= hwphyssz) { 967190681Snwhitehorn if (physsz < hwphyssz) { 968190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 969190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + 970190681Snwhitehorn hwphyssz - physsz; 971190681Snwhitehorn physsz = hwphyssz; 972190681Snwhitehorn phys_avail_count++; 973190681Snwhitehorn } 974190681Snwhitehorn break; 975190681Snwhitehorn } 976190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 977190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 978190681Snwhitehorn phys_avail_count++; 979190681Snwhitehorn physsz += regions[i].mr_size; 980190681Snwhitehorn } 981209975Snwhitehorn 982209975Snwhitehorn /* Check for overlap with the kernel and exception vectors */ 983209975Snwhitehorn for (j = 0; j < 2*phys_avail_count; j+=2) { 984209975Snwhitehorn if (phys_avail[j] < EXC_LAST) 985209975Snwhitehorn phys_avail[j] += EXC_LAST; 986209975Snwhitehorn 987209975Snwhitehorn if (kernelstart >= phys_avail[j] && 988209975Snwhitehorn kernelstart < phys_avail[j+1]) { 989209975Snwhitehorn if (kernelend < phys_avail[j+1]) { 990209975Snwhitehorn phys_avail[2*phys_avail_count] = 991209975Snwhitehorn (kernelend & ~PAGE_MASK) + PAGE_SIZE; 992209975Snwhitehorn phys_avail[2*phys_avail_count + 1] = 993209975Snwhitehorn phys_avail[j+1]; 994209975Snwhitehorn phys_avail_count++; 995209975Snwhitehorn } 996209975Snwhitehorn 997209975Snwhitehorn phys_avail[j+1] = kernelstart & ~PAGE_MASK; 998209975Snwhitehorn } 999209975Snwhitehorn 1000209975Snwhitehorn if (kernelend >= phys_avail[j] && 1001209975Snwhitehorn kernelend < phys_avail[j+1]) { 1002209975Snwhitehorn if (kernelstart > phys_avail[j]) { 1003209975Snwhitehorn phys_avail[2*phys_avail_count] = phys_avail[j]; 1004209975Snwhitehorn phys_avail[2*phys_avail_count + 1] = 1005209975Snwhitehorn kernelstart & ~PAGE_MASK; 1006209975Snwhitehorn phys_avail_count++; 1007209975Snwhitehorn } 1008209975Snwhitehorn 1009209975Snwhitehorn phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 1010209975Snwhitehorn } 1011209975Snwhitehorn } 1012209975Snwhitehorn 1013190681Snwhitehorn physmem = btoc(physsz); 1014190681Snwhitehorn 1015190681Snwhitehorn /* 1016190681Snwhitehorn * Allocate PTEG table. 1017190681Snwhitehorn */ 1018190681Snwhitehorn#ifdef PTEGCOUNT 1019190681Snwhitehorn moea64_pteg_count = PTEGCOUNT; 1020190681Snwhitehorn#else 1021190681Snwhitehorn moea64_pteg_count = 0x1000; 1022190681Snwhitehorn 1023190681Snwhitehorn while (moea64_pteg_count < physmem) 1024190681Snwhitehorn moea64_pteg_count <<= 1; 1025209975Snwhitehorn 1026209975Snwhitehorn moea64_pteg_count >>= 1; 1027190681Snwhitehorn#endif /* PTEGCOUNT */ 1028190681Snwhitehorn 1029190681Snwhitehorn size = moea64_pteg_count * sizeof(struct lpteg); 1030190681Snwhitehorn CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 1031190681Snwhitehorn moea64_pteg_count, size); 1032190681Snwhitehorn 1033190681Snwhitehorn /* 1034190681Snwhitehorn * We now need to allocate memory. This memory, to be allocated, 1035190681Snwhitehorn * has to reside in a page table. The page table we are about to 1036190681Snwhitehorn * allocate. We don't have BAT. So drop to data real mode for a minute 1037190681Snwhitehorn * as a measure of last resort. We do this a couple times. 1038190681Snwhitehorn */ 1039190681Snwhitehorn 1040190681Snwhitehorn moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 1041190681Snwhitehorn DISABLE_TRANS(msr); 1042190681Snwhitehorn bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 1043190681Snwhitehorn ENABLE_TRANS(msr); 1044190681Snwhitehorn 1045190681Snwhitehorn moea64_pteg_mask = moea64_pteg_count - 1; 1046190681Snwhitehorn 1047190681Snwhitehorn CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 1048190681Snwhitehorn 1049190681Snwhitehorn /* 1050190681Snwhitehorn * Allocate pv/overflow lists. 1051190681Snwhitehorn */ 1052190681Snwhitehorn size = sizeof(struct pvo_head) * moea64_pteg_count; 1053190681Snwhitehorn 1054190681Snwhitehorn moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 1055190681Snwhitehorn PAGE_SIZE); 1056190681Snwhitehorn CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 1057190681Snwhitehorn 1058190681Snwhitehorn DISABLE_TRANS(msr); 1059190681Snwhitehorn for (i = 0; i < moea64_pteg_count; i++) 1060190681Snwhitehorn LIST_INIT(&moea64_pvo_table[i]); 1061190681Snwhitehorn ENABLE_TRANS(msr); 1062190681Snwhitehorn 1063190681Snwhitehorn /* 1064190681Snwhitehorn * Initialize the lock that synchronizes access to the pteg and pvo 1065190681Snwhitehorn * tables. 1066190681Snwhitehorn */ 1067190681Snwhitehorn mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 1068190681Snwhitehorn MTX_RECURSE); 1069211967Snwhitehorn mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 1070190681Snwhitehorn 1071190681Snwhitehorn /* 1072198378Snwhitehorn * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 1073198378Snwhitehorn */ 1074198378Snwhitehorn mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 1075198378Snwhitehorn 1076198378Snwhitehorn /* 1077190681Snwhitehorn * Initialise the unmanaged pvo pool. 1078190681Snwhitehorn */ 1079190681Snwhitehorn moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 1080190681Snwhitehorn BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 1081190681Snwhitehorn moea64_bpvo_pool_index = 0; 1082190681Snwhitehorn 1083190681Snwhitehorn /* 1084190681Snwhitehorn * Make sure kernel vsid is allocated as well as VSID 0. 1085190681Snwhitehorn */ 1086209975Snwhitehorn #ifndef __powerpc64__ 1087209975Snwhitehorn moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 1088190681Snwhitehorn |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 1089190681Snwhitehorn moea64_vsid_bitmap[0] |= 1; 1090209975Snwhitehorn #endif 1091190681Snwhitehorn 1092190681Snwhitehorn /* 1093190681Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 1094190681Snwhitehorn */ 1095209975Snwhitehorn #ifdef __powerpc64__ 1096209975Snwhitehorn for (i = 0; i < 64; i++) { 1097209975Snwhitehorn pcpup->pc_slb[i].slbv = 0; 1098209975Snwhitehorn pcpup->pc_slb[i].slbe = 0; 1099209975Snwhitehorn } 1100209975Snwhitehorn #else 1101190681Snwhitehorn for (i = 0; i < 16; i++) 1102190681Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 1103209975Snwhitehorn #endif 1104190681Snwhitehorn 1105190681Snwhitehorn kernel_pmap->pmap_phys = kernel_pmap; 1106190681Snwhitehorn kernel_pmap->pm_active = ~0; 1107190681Snwhitehorn 1108190681Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 1109190681Snwhitehorn 1110190681Snwhitehorn /* 1111190681Snwhitehorn * Now map in all the other buffers we allocated earlier 1112190681Snwhitehorn */ 1113190681Snwhitehorn 1114209975Snwhitehorn moea64_setup_direct_map(mmup, kernelstart, kernelend); 1115190681Snwhitehorn 1116190681Snwhitehorn /* 1117209975Snwhitehorn * Set up the Open Firmware pmap and add its mappings if not in real 1118209975Snwhitehorn * mode. 1119190681Snwhitehorn */ 1120190681Snwhitehorn 1121190681Snwhitehorn if (!ofw_real_mode) { 1122209975Snwhitehorn #ifndef __powerpc64__ 1123209975Snwhitehorn moea64_pinit(mmup, &ofw_pmap); 1124190681Snwhitehorn 1125204128Snwhitehorn for (i = 0; i < 16; i++) 1126204128Snwhitehorn ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 1127209975Snwhitehorn #endif 1128190681Snwhitehorn 1129190681Snwhitehorn if ((chosen = OF_finddevice("/chosen")) == -1) 1130190681Snwhitehorn panic("moea64_bootstrap: can't find /chosen"); 1131190681Snwhitehorn OF_getprop(chosen, "mmu", &mmui, 4); 1132209975Snwhitehorn 1133190681Snwhitehorn if ((mmu = OF_instance_to_package(mmui)) == -1) 1134190681Snwhitehorn panic("moea64_bootstrap: can't get mmu package"); 1135190681Snwhitehorn if ((sz = OF_getproplen(mmu, "translations")) == -1) 1136190681Snwhitehorn panic("moea64_bootstrap: can't get ofw translation count"); 1137199226Snwhitehorn if (sz > 6144 /* tmpstksz - 2 KB headroom */) 1138199226Snwhitehorn panic("moea64_bootstrap: too many ofw translations"); 1139190681Snwhitehorn 1140199226Snwhitehorn moea64_add_ofw_mappings(mmup, mmu, sz); 1141190681Snwhitehorn } 1142190681Snwhitehorn 1143190681Snwhitehorn#ifdef SMP 1144190681Snwhitehorn TLBSYNC(); 1145190681Snwhitehorn#endif 1146190681Snwhitehorn 1147190681Snwhitehorn /* 1148190681Snwhitehorn * Calculate the last available physical address. 1149190681Snwhitehorn */ 1150190681Snwhitehorn for (i = 0; phys_avail[i + 2] != 0; i += 2) 1151190681Snwhitehorn ; 1152190681Snwhitehorn Maxmem = powerpc_btop(phys_avail[i + 1]); 1153190681Snwhitehorn 1154190681Snwhitehorn /* 1155190681Snwhitehorn * Initialize MMU and remap early physical mappings 1156190681Snwhitehorn */ 1157209975Snwhitehorn moea64_cpu_bootstrap(mmup,0); 1158190681Snwhitehorn mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 1159190681Snwhitehorn pmap_bootstrapped++; 1160190681Snwhitehorn bs_remap_earlyboot(); 1161190681Snwhitehorn 1162190681Snwhitehorn /* 1163190681Snwhitehorn * Set the start and end of kva. 1164190681Snwhitehorn */ 1165190681Snwhitehorn virtual_avail = VM_MIN_KERNEL_ADDRESS; 1166204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 1167190681Snwhitehorn 1168190681Snwhitehorn /* 1169209975Snwhitehorn * Map the entire KVA range into the SLB. We must not fault there. 1170209975Snwhitehorn */ 1171209975Snwhitehorn #ifdef __powerpc64__ 1172209975Snwhitehorn for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 1173209975Snwhitehorn moea64_bootstrap_slb_prefault(va, 0); 1174209975Snwhitehorn #endif 1175209975Snwhitehorn 1176209975Snwhitehorn /* 1177204128Snwhitehorn * Figure out how far we can extend virtual_end into segment 16 1178204128Snwhitehorn * without running into existing mappings. Segment 16 is guaranteed 1179204128Snwhitehorn * to contain neither RAM nor devices (at least on Apple hardware), 1180204128Snwhitehorn * but will generally contain some OFW mappings we should not 1181204128Snwhitehorn * step on. 1182190681Snwhitehorn */ 1183190681Snwhitehorn 1184209975Snwhitehorn #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 1185204128Snwhitehorn PMAP_LOCK(kernel_pmap); 1186209975Snwhitehorn while (virtual_end < VM_MAX_KERNEL_ADDRESS && 1187209975Snwhitehorn moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 1188204128Snwhitehorn virtual_end += PAGE_SIZE; 1189204128Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1190209975Snwhitehorn #endif 1191190681Snwhitehorn 1192190681Snwhitehorn /* 1193204694Snwhitehorn * Allocate some things for page zeroing. We put this directly 1194204694Snwhitehorn * in the page table, marked with LPTE_LOCKED, to avoid any 1195204694Snwhitehorn * of the PVO book-keeping or other parts of the VM system 1196204694Snwhitehorn * from even knowing that this hack exists. 1197190681Snwhitehorn */ 1198190681Snwhitehorn 1199209975Snwhitehorn if (!hw_direct_map) { 1200209975Snwhitehorn mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 1201209975Snwhitehorn MTX_DEF); 1202209975Snwhitehorn for (i = 0; i < 2; i++) { 1203209975Snwhitehorn struct lpte pt; 1204209975Snwhitehorn uint64_t vsid; 1205209975Snwhitehorn int pteidx, ptegidx; 1206204694Snwhitehorn 1207209975Snwhitehorn moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 1208209975Snwhitehorn virtual_end -= PAGE_SIZE; 1209190681Snwhitehorn 1210209975Snwhitehorn LOCK_TABLE(); 1211209975Snwhitehorn 1212209975Snwhitehorn vsid = va_to_vsid(kernel_pmap, 1213209975Snwhitehorn moea64_scratchpage_va[i]); 1214209975Snwhitehorn moea64_pte_create(&pt, vsid, moea64_scratchpage_va[i], 1215209975Snwhitehorn LPTE_NOEXEC, 0); 1216209975Snwhitehorn pt.pte_hi |= LPTE_LOCKED; 1217190681Snwhitehorn 1218209975Snwhitehorn moea64_scratchpage_vpn[i] = (vsid << 16) | 1219209975Snwhitehorn ((moea64_scratchpage_va[i] & ADDR_PIDX) >> 1220209975Snwhitehorn ADDR_PIDX_SHFT); 1221209975Snwhitehorn ptegidx = va_to_pteg(vsid, moea64_scratchpage_va[i], 0); 1222209975Snwhitehorn pteidx = moea64_pte_insert(ptegidx, &pt); 1223209975Snwhitehorn if (pt.pte_hi & LPTE_HID) 1224209975Snwhitehorn ptegidx ^= moea64_pteg_mask; 1225204694Snwhitehorn 1226209975Snwhitehorn moea64_scratchpage_pte[i] = 1227209975Snwhitehorn &moea64_pteg_table[ptegidx].pt[pteidx]; 1228204694Snwhitehorn 1229209975Snwhitehorn UNLOCK_TABLE(); 1230209975Snwhitehorn } 1231190681Snwhitehorn } 1232190681Snwhitehorn 1233190681Snwhitehorn /* 1234190681Snwhitehorn * Allocate a kernel stack with a guard page for thread0 and map it 1235190681Snwhitehorn * into the kernel page map. 1236190681Snwhitehorn */ 1237190681Snwhitehorn pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 1238190681Snwhitehorn va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1239190681Snwhitehorn virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 1240190681Snwhitehorn CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 1241190681Snwhitehorn thread0.td_kstack = va; 1242190681Snwhitehorn thread0.td_kstack_pages = KSTACK_PAGES; 1243190681Snwhitehorn for (i = 0; i < KSTACK_PAGES; i++) { 1244201758Smbr moea64_kenter(mmup, va, pa); 1245190681Snwhitehorn pa += PAGE_SIZE; 1246190681Snwhitehorn va += PAGE_SIZE; 1247190681Snwhitehorn } 1248190681Snwhitehorn 1249190681Snwhitehorn /* 1250190681Snwhitehorn * Allocate virtual address space for the message buffer. 1251190681Snwhitehorn */ 1252190681Snwhitehorn pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1253204297Snwhitehorn msgbufp = (struct msgbuf *)virtual_avail; 1254204297Snwhitehorn va = virtual_avail; 1255204297Snwhitehorn virtual_avail += round_page(MSGBUF_SIZE); 1256204297Snwhitehorn while (va < virtual_avail) { 1257204297Snwhitehorn moea64_kenter(mmup, va, pa); 1258190681Snwhitehorn pa += PAGE_SIZE; 1259204297Snwhitehorn va += PAGE_SIZE; 1260190681Snwhitehorn } 1261194784Sjeff 1262194784Sjeff /* 1263194784Sjeff * Allocate virtual address space for the dynamic percpu area. 1264194784Sjeff */ 1265194784Sjeff pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1266204297Snwhitehorn dpcpu = (void *)virtual_avail; 1267209975Snwhitehorn va = virtual_avail; 1268204297Snwhitehorn virtual_avail += DPCPU_SIZE; 1269204297Snwhitehorn while (va < virtual_avail) { 1270204297Snwhitehorn moea64_kenter(mmup, va, pa); 1271194784Sjeff pa += PAGE_SIZE; 1272204297Snwhitehorn va += PAGE_SIZE; 1273194784Sjeff } 1274194784Sjeff dpcpu_init(dpcpu, 0); 1275190681Snwhitehorn} 1276190681Snwhitehorn 1277190681Snwhitehorn/* 1278209975Snwhitehorn * Activate a user pmap. The pmap must be activated before its address 1279190681Snwhitehorn * space can be accessed in any way. 1280190681Snwhitehorn */ 1281190681Snwhitehornvoid 1282190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td) 1283190681Snwhitehorn{ 1284209975Snwhitehorn pmap_t pm; 1285190681Snwhitehorn 1286190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 1287209975Snwhitehorn pm->pm_active |= PCPU_GET(cpumask); 1288190681Snwhitehorn 1289209975Snwhitehorn #ifdef __powerpc64__ 1290209975Snwhitehorn PCPU_SET(userslb, pm->pm_slb); 1291209975Snwhitehorn #else 1292209975Snwhitehorn PCPU_SET(curpmap, pm->pmap_phys); 1293209975Snwhitehorn #endif 1294190681Snwhitehorn} 1295190681Snwhitehorn 1296190681Snwhitehornvoid 1297190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td) 1298190681Snwhitehorn{ 1299190681Snwhitehorn pmap_t pm; 1300190681Snwhitehorn 1301190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 1302190681Snwhitehorn pm->pm_active &= ~(PCPU_GET(cpumask)); 1303209975Snwhitehorn #ifdef __powerpc64__ 1304209975Snwhitehorn PCPU_SET(userslb, NULL); 1305209975Snwhitehorn #else 1306190681Snwhitehorn PCPU_SET(curpmap, NULL); 1307209975Snwhitehorn #endif 1308190681Snwhitehorn} 1309190681Snwhitehorn 1310190681Snwhitehornvoid 1311190681Snwhitehornmoea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1312190681Snwhitehorn{ 1313190681Snwhitehorn struct pvo_entry *pvo; 1314209975Snwhitehorn struct lpte *pt; 1315209975Snwhitehorn uint64_t vsid; 1316209975Snwhitehorn int i, ptegidx; 1317190681Snwhitehorn 1318190681Snwhitehorn PMAP_LOCK(pm); 1319209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1320190681Snwhitehorn 1321190681Snwhitehorn if (pvo != NULL) { 1322209975Snwhitehorn LOCK_TABLE(); 1323209975Snwhitehorn pt = moea64_pvo_to_pte(pvo); 1324209975Snwhitehorn 1325190681Snwhitehorn if (wired) { 1326190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1327190681Snwhitehorn pm->pm_stats.wired_count++; 1328190681Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 1329209975Snwhitehorn pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1330190681Snwhitehorn } else { 1331190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1332190681Snwhitehorn pm->pm_stats.wired_count--; 1333190681Snwhitehorn pvo->pvo_vaddr &= ~PVO_WIRED; 1334209975Snwhitehorn pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1335190681Snwhitehorn } 1336209975Snwhitehorn 1337209975Snwhitehorn if (pt != NULL) { 1338209975Snwhitehorn /* Update wiring flag in page table. */ 1339209975Snwhitehorn moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1340209975Snwhitehorn pvo->pvo_vpn); 1341209975Snwhitehorn } else if (wired) { 1342209975Snwhitehorn /* 1343209975Snwhitehorn * If we are wiring the page, and it wasn't in the 1344209975Snwhitehorn * page table before, add it. 1345209975Snwhitehorn */ 1346209975Snwhitehorn vsid = PVO_VSID(pvo); 1347209975Snwhitehorn ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1348209975Snwhitehorn pvo->pvo_vaddr & PVO_LARGE); 1349209975Snwhitehorn 1350209975Snwhitehorn i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 1351209975Snwhitehorn if (i >= 0) { 1352209975Snwhitehorn PVO_PTEGIDX_CLR(pvo); 1353209975Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 1354209975Snwhitehorn } 1355209975Snwhitehorn } 1356209975Snwhitehorn 1357209975Snwhitehorn UNLOCK_TABLE(); 1358190681Snwhitehorn } 1359190681Snwhitehorn PMAP_UNLOCK(pm); 1360190681Snwhitehorn} 1361190681Snwhitehorn 1362190681Snwhitehorn/* 1363190681Snwhitehorn * This goes through and sets the physical address of our 1364190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because 1365190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by 1366190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here 1367190681Snwhitehorn */ 1368190681Snwhitehorn 1369190681Snwhitehornstatic __inline 1370190681Snwhitehornvoid moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1371204694Snwhitehorn 1372209975Snwhitehorn KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1373204268Snwhitehorn mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1374204268Snwhitehorn 1375190681Snwhitehorn moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1376209975Snwhitehorn TLBIE(moea64_scratchpage_vpn[which]); 1377190681Snwhitehorn 1378204694Snwhitehorn moea64_scratchpage_pte[which]->pte_lo &= 1379204694Snwhitehorn ~(LPTE_WIMG | LPTE_RPGN); 1380204694Snwhitehorn moea64_scratchpage_pte[which]->pte_lo |= 1381213307Snwhitehorn moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1382190681Snwhitehorn EIEIO(); 1383190681Snwhitehorn 1384190681Snwhitehorn moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1385204042Snwhitehorn PTESYNC(); isync(); 1386190681Snwhitehorn} 1387190681Snwhitehorn 1388190681Snwhitehornvoid 1389190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1390190681Snwhitehorn{ 1391190681Snwhitehorn vm_offset_t dst; 1392190681Snwhitehorn vm_offset_t src; 1393190681Snwhitehorn 1394190681Snwhitehorn dst = VM_PAGE_TO_PHYS(mdst); 1395190681Snwhitehorn src = VM_PAGE_TO_PHYS(msrc); 1396190681Snwhitehorn 1397209975Snwhitehorn if (hw_direct_map) { 1398209975Snwhitehorn kcopy((void *)src, (void *)dst, PAGE_SIZE); 1399209975Snwhitehorn } else { 1400209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1401190681Snwhitehorn 1402209975Snwhitehorn moea64_set_scratchpage_pa(0,src); 1403209975Snwhitehorn moea64_set_scratchpage_pa(1,dst); 1404190681Snwhitehorn 1405209975Snwhitehorn kcopy((void *)moea64_scratchpage_va[0], 1406209975Snwhitehorn (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1407190681Snwhitehorn 1408209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1409209975Snwhitehorn } 1410190681Snwhitehorn} 1411190681Snwhitehorn 1412190681Snwhitehornvoid 1413190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1414190681Snwhitehorn{ 1415190681Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1416190681Snwhitehorn 1417190681Snwhitehorn if (!moea64_initialized) 1418209975Snwhitehorn panic("moea64_zero_page: can't zero pa %#" PRIxPTR, pa); 1419190681Snwhitehorn if (size + off > PAGE_SIZE) 1420190681Snwhitehorn panic("moea64_zero_page: size + off > PAGE_SIZE"); 1421190681Snwhitehorn 1422209975Snwhitehorn if (hw_direct_map) { 1423209975Snwhitehorn bzero((caddr_t)pa + off, size); 1424209975Snwhitehorn } else { 1425209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1426209975Snwhitehorn moea64_set_scratchpage_pa(0,pa); 1427209975Snwhitehorn bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1428209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1429209975Snwhitehorn } 1430190681Snwhitehorn} 1431190681Snwhitehorn 1432204269Snwhitehorn/* 1433204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it 1434204269Snwhitehorn */ 1435190681Snwhitehornvoid 1436204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m) 1437204269Snwhitehorn{ 1438204269Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1439209975Snwhitehorn vm_offset_t va, off; 1440204269Snwhitehorn 1441204269Snwhitehorn if (!moea64_initialized) 1442209975Snwhitehorn panic("moea64_zero_page: can't zero pa %#zx", pa); 1443204269Snwhitehorn 1444209975Snwhitehorn if (!hw_direct_map) { 1445209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1446204269Snwhitehorn 1447209975Snwhitehorn moea64_set_scratchpage_pa(0,pa); 1448209975Snwhitehorn va = moea64_scratchpage_va[0]; 1449209975Snwhitehorn } else { 1450209975Snwhitehorn va = pa; 1451209975Snwhitehorn } 1452209975Snwhitehorn 1453204269Snwhitehorn for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1454209975Snwhitehorn __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1455209975Snwhitehorn 1456209975Snwhitehorn if (!hw_direct_map) 1457209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1458204269Snwhitehorn} 1459204269Snwhitehorn 1460204269Snwhitehornvoid 1461190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1462190681Snwhitehorn{ 1463190681Snwhitehorn 1464190681Snwhitehorn moea64_zero_page(mmu, m); 1465190681Snwhitehorn} 1466190681Snwhitehorn 1467190681Snwhitehorn/* 1468190681Snwhitehorn * Map the given physical page at the specified virtual address in the 1469190681Snwhitehorn * target pmap with the protection requested. If specified the page 1470190681Snwhitehorn * will be wired down. 1471190681Snwhitehorn */ 1472190681Snwhitehornvoid 1473190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1474190681Snwhitehorn vm_prot_t prot, boolean_t wired) 1475190681Snwhitehorn{ 1476190681Snwhitehorn 1477190681Snwhitehorn vm_page_lock_queues(); 1478190681Snwhitehorn PMAP_LOCK(pmap); 1479190681Snwhitehorn moea64_enter_locked(pmap, va, m, prot, wired); 1480190681Snwhitehorn vm_page_unlock_queues(); 1481190681Snwhitehorn PMAP_UNLOCK(pmap); 1482190681Snwhitehorn} 1483190681Snwhitehorn 1484190681Snwhitehorn/* 1485190681Snwhitehorn * Map the given physical page at the specified virtual address in the 1486190681Snwhitehorn * target pmap with the protection requested. If specified the page 1487190681Snwhitehorn * will be wired down. 1488190681Snwhitehorn * 1489190681Snwhitehorn * The page queues and pmap must be locked. 1490190681Snwhitehorn */ 1491190681Snwhitehorn 1492190681Snwhitehornstatic void 1493190681Snwhitehornmoea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1494190681Snwhitehorn boolean_t wired) 1495190681Snwhitehorn{ 1496190681Snwhitehorn struct pvo_head *pvo_head; 1497190681Snwhitehorn uma_zone_t zone; 1498190681Snwhitehorn vm_page_t pg; 1499190681Snwhitehorn uint64_t pte_lo; 1500190681Snwhitehorn u_int pvo_flags; 1501190681Snwhitehorn int error; 1502190681Snwhitehorn 1503190681Snwhitehorn if (!moea64_initialized) { 1504190681Snwhitehorn pvo_head = &moea64_pvo_kunmanaged; 1505190681Snwhitehorn pg = NULL; 1506190681Snwhitehorn zone = moea64_upvo_zone; 1507190681Snwhitehorn pvo_flags = 0; 1508190681Snwhitehorn } else { 1509190681Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1510190681Snwhitehorn pg = m; 1511190681Snwhitehorn zone = moea64_mpvo_zone; 1512190681Snwhitehorn pvo_flags = PVO_MANAGED; 1513190681Snwhitehorn } 1514190681Snwhitehorn 1515190681Snwhitehorn if (pmap_bootstrapped) 1516190681Snwhitehorn mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1517190681Snwhitehorn PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1518209048Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1519209048Salc (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1520208175Salc ("moea64_enter_locked: page %p is not busy", m)); 1521190681Snwhitehorn 1522190681Snwhitehorn /* XXX change the pvo head for fake pages */ 1523190681Snwhitehorn if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1524190681Snwhitehorn pvo_flags &= ~PVO_MANAGED; 1525190681Snwhitehorn pvo_head = &moea64_pvo_kunmanaged; 1526190681Snwhitehorn zone = moea64_upvo_zone; 1527190681Snwhitehorn } 1528190681Snwhitehorn 1529213307Snwhitehorn pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1530190681Snwhitehorn 1531190681Snwhitehorn if (prot & VM_PROT_WRITE) { 1532190681Snwhitehorn pte_lo |= LPTE_BW; 1533208810Salc if (pmap_bootstrapped && 1534208810Salc (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1535190681Snwhitehorn vm_page_flag_set(m, PG_WRITEABLE); 1536190681Snwhitehorn } else 1537190681Snwhitehorn pte_lo |= LPTE_BR; 1538190681Snwhitehorn 1539190681Snwhitehorn if (prot & VM_PROT_EXECUTE) 1540190681Snwhitehorn pvo_flags |= VM_PROT_EXECUTE; 1541190681Snwhitehorn 1542190681Snwhitehorn if (wired) 1543190681Snwhitehorn pvo_flags |= PVO_WIRED; 1544190681Snwhitehorn 1545190681Snwhitehorn if ((m->flags & PG_FICTITIOUS) != 0) 1546190681Snwhitehorn pvo_flags |= PVO_FAKE; 1547190681Snwhitehorn 1548190681Snwhitehorn error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1549198378Snwhitehorn pte_lo, pvo_flags); 1550190681Snwhitehorn 1551190681Snwhitehorn /* 1552190681Snwhitehorn * Flush the page from the instruction cache if this page is 1553190681Snwhitehorn * mapped executable and cacheable. 1554190681Snwhitehorn */ 1555190681Snwhitehorn if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1556198341Smarcel moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1557190681Snwhitehorn } 1558190681Snwhitehorn} 1559190681Snwhitehorn 1560190681Snwhitehornstatic void 1561198341Smarcelmoea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1562190681Snwhitehorn{ 1563204042Snwhitehorn 1564190681Snwhitehorn /* 1565190681Snwhitehorn * This is much trickier than on older systems because 1566190681Snwhitehorn * we can't sync the icache on physical addresses directly 1567190681Snwhitehorn * without a direct map. Instead we check a couple of cases 1568190681Snwhitehorn * where the memory is already mapped in and, failing that, 1569190681Snwhitehorn * use the same trick we use for page zeroing to create 1570190681Snwhitehorn * a temporary mapping for this physical address. 1571190681Snwhitehorn */ 1572190681Snwhitehorn 1573190681Snwhitehorn if (!pmap_bootstrapped) { 1574190681Snwhitehorn /* 1575190681Snwhitehorn * If PMAP is not bootstrapped, we are likely to be 1576190681Snwhitehorn * in real mode. 1577190681Snwhitehorn */ 1578198341Smarcel __syncicache((void *)pa, sz); 1579190681Snwhitehorn } else if (pmap == kernel_pmap) { 1580198341Smarcel __syncicache((void *)va, sz); 1581209975Snwhitehorn } else if (hw_direct_map) { 1582209975Snwhitehorn __syncicache((void *)pa, sz); 1583190681Snwhitehorn } else { 1584190681Snwhitehorn /* Use the scratch page to set up a temp mapping */ 1585190681Snwhitehorn 1586190681Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1587190681Snwhitehorn 1588204042Snwhitehorn moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1589204042Snwhitehorn __syncicache((void *)(moea64_scratchpage_va[1] + 1590204042Snwhitehorn (va & ADDR_POFF)), sz); 1591190681Snwhitehorn 1592190681Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1593190681Snwhitehorn } 1594190681Snwhitehorn} 1595190681Snwhitehorn 1596190681Snwhitehorn/* 1597190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object. 1598190681Snwhitehorn * The sequence begins with the given page m_start. This page is 1599190681Snwhitehorn * mapped at the given virtual address start. Each subsequent page is 1600190681Snwhitehorn * mapped at a virtual address that is offset from start by the same 1601190681Snwhitehorn * amount as the page is offset from m_start within the object. The 1602190681Snwhitehorn * last page in the sequence is the page with the largest offset from 1603190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given 1604190681Snwhitehorn * virtual address end. Not every virtual page between start and end 1605190681Snwhitehorn * is mapped; only those for which a resident page exists with the 1606190681Snwhitehorn * corresponding offset from m_start are mapped. 1607190681Snwhitehorn */ 1608190681Snwhitehornvoid 1609190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1610190681Snwhitehorn vm_page_t m_start, vm_prot_t prot) 1611190681Snwhitehorn{ 1612190681Snwhitehorn vm_page_t m; 1613190681Snwhitehorn vm_pindex_t diff, psize; 1614190681Snwhitehorn 1615190681Snwhitehorn psize = atop(end - start); 1616190681Snwhitehorn m = m_start; 1617208574Salc vm_page_lock_queues(); 1618190681Snwhitehorn PMAP_LOCK(pm); 1619190681Snwhitehorn while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1620190681Snwhitehorn moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1621190681Snwhitehorn (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1622190681Snwhitehorn m = TAILQ_NEXT(m, listq); 1623190681Snwhitehorn } 1624208574Salc vm_page_unlock_queues(); 1625190681Snwhitehorn PMAP_UNLOCK(pm); 1626190681Snwhitehorn} 1627190681Snwhitehorn 1628190681Snwhitehornvoid 1629190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1630190681Snwhitehorn vm_prot_t prot) 1631190681Snwhitehorn{ 1632207796Salc 1633207796Salc vm_page_lock_queues(); 1634190681Snwhitehorn PMAP_LOCK(pm); 1635190681Snwhitehorn moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1636190681Snwhitehorn FALSE); 1637207796Salc vm_page_unlock_queues(); 1638190681Snwhitehorn PMAP_UNLOCK(pm); 1639190681Snwhitehorn} 1640190681Snwhitehorn 1641190681Snwhitehornvm_paddr_t 1642190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1643190681Snwhitehorn{ 1644190681Snwhitehorn struct pvo_entry *pvo; 1645190681Snwhitehorn vm_paddr_t pa; 1646190681Snwhitehorn 1647190681Snwhitehorn PMAP_LOCK(pm); 1648209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va); 1649190681Snwhitehorn if (pvo == NULL) 1650190681Snwhitehorn pa = 0; 1651190681Snwhitehorn else 1652209975Snwhitehorn pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1653209975Snwhitehorn (va - PVO_VADDR(pvo)); 1654190681Snwhitehorn PMAP_UNLOCK(pm); 1655190681Snwhitehorn return (pa); 1656190681Snwhitehorn} 1657190681Snwhitehorn 1658190681Snwhitehorn/* 1659190681Snwhitehorn * Atomically extract and hold the physical page with the given 1660190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given 1661190681Snwhitehorn * protection. 1662190681Snwhitehorn */ 1663190681Snwhitehornvm_page_t 1664190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1665190681Snwhitehorn{ 1666190681Snwhitehorn struct pvo_entry *pvo; 1667190681Snwhitehorn vm_page_t m; 1668207410Skmacy vm_paddr_t pa; 1669190681Snwhitehorn 1670190681Snwhitehorn m = NULL; 1671207410Skmacy pa = 0; 1672190681Snwhitehorn PMAP_LOCK(pmap); 1673207410Skmacyretry: 1674209975Snwhitehorn pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1675190681Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1676190681Snwhitehorn ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1677190681Snwhitehorn (prot & VM_PROT_WRITE) == 0)) { 1678207410Skmacy if (vm_page_pa_tryrelock(pmap, 1679207410Skmacy pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1680207410Skmacy goto retry; 1681190681Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1682190681Snwhitehorn vm_page_hold(m); 1683190681Snwhitehorn } 1684207410Skmacy PA_UNLOCK_COND(pa); 1685190681Snwhitehorn PMAP_UNLOCK(pmap); 1686190681Snwhitehorn return (m); 1687190681Snwhitehorn} 1688190681Snwhitehorn 1689190681Snwhitehornstatic void * 1690190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1691190681Snwhitehorn{ 1692190681Snwhitehorn /* 1693190681Snwhitehorn * This entire routine is a horrible hack to avoid bothering kmem 1694190681Snwhitehorn * for new KVA addresses. Because this can get called from inside 1695190681Snwhitehorn * kmem allocation routines, calling kmem for a new address here 1696190681Snwhitehorn * can lead to multiply locking non-recursive mutexes. 1697190681Snwhitehorn */ 1698190681Snwhitehorn static vm_pindex_t color; 1699190681Snwhitehorn vm_offset_t va; 1700190681Snwhitehorn 1701190681Snwhitehorn vm_page_t m; 1702190681Snwhitehorn int pflags, needed_lock; 1703190681Snwhitehorn 1704190681Snwhitehorn *flags = UMA_SLAB_PRIV; 1705190681Snwhitehorn needed_lock = !PMAP_LOCKED(kernel_pmap); 1706190681Snwhitehorn 1707190681Snwhitehorn if (needed_lock) 1708190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1709190681Snwhitehorn 1710190681Snwhitehorn if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1711190681Snwhitehorn pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1712190681Snwhitehorn else 1713190681Snwhitehorn pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1714190681Snwhitehorn if (wait & M_ZERO) 1715190681Snwhitehorn pflags |= VM_ALLOC_ZERO; 1716190681Snwhitehorn 1717190681Snwhitehorn for (;;) { 1718190681Snwhitehorn m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1719190681Snwhitehorn if (m == NULL) { 1720190681Snwhitehorn if (wait & M_NOWAIT) 1721190681Snwhitehorn return (NULL); 1722190681Snwhitehorn VM_WAIT; 1723190681Snwhitehorn } else 1724190681Snwhitehorn break; 1725190681Snwhitehorn } 1726190681Snwhitehorn 1727204128Snwhitehorn va = VM_PAGE_TO_PHYS(m); 1728190681Snwhitehorn 1729190681Snwhitehorn moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1730204128Snwhitehorn &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1731198378Snwhitehorn PVO_WIRED | PVO_BOOTSTRAP); 1732190681Snwhitehorn 1733190681Snwhitehorn if (needed_lock) 1734190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1735198378Snwhitehorn 1736190681Snwhitehorn if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1737190681Snwhitehorn bzero((void *)va, PAGE_SIZE); 1738190681Snwhitehorn 1739190681Snwhitehorn return (void *)va; 1740190681Snwhitehorn} 1741190681Snwhitehorn 1742190681Snwhitehornvoid 1743190681Snwhitehornmoea64_init(mmu_t mmu) 1744190681Snwhitehorn{ 1745190681Snwhitehorn 1746190681Snwhitehorn CTR0(KTR_PMAP, "moea64_init"); 1747190681Snwhitehorn 1748190681Snwhitehorn moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1749190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1750190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1751190681Snwhitehorn moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1752190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1753190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1754190681Snwhitehorn 1755190681Snwhitehorn if (!hw_direct_map) { 1756190681Snwhitehorn uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1757190681Snwhitehorn uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1758190681Snwhitehorn } 1759190681Snwhitehorn 1760190681Snwhitehorn moea64_initialized = TRUE; 1761190681Snwhitehorn} 1762190681Snwhitehorn 1763190681Snwhitehornboolean_t 1764207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m) 1765207155Salc{ 1766207155Salc 1767208574Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1768208574Salc ("moea64_is_referenced: page %p is not managed", m)); 1769207155Salc return (moea64_query_bit(m, PTE_REF)); 1770207155Salc} 1771207155Salc 1772207155Salcboolean_t 1773190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m) 1774190681Snwhitehorn{ 1775190681Snwhitehorn 1776208504Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1777208504Salc ("moea64_is_modified: page %p is not managed", m)); 1778208504Salc 1779208504Salc /* 1780208504Salc * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1781208504Salc * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1782208504Salc * is clear, no PTEs can have LPTE_CHG set. 1783208504Salc */ 1784208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1785208504Salc if ((m->oflags & VPO_BUSY) == 0 && 1786208504Salc (m->flags & PG_WRITEABLE) == 0) 1787190681Snwhitehorn return (FALSE); 1788208574Salc return (moea64_query_bit(m, LPTE_CHG)); 1789190681Snwhitehorn} 1790190681Snwhitehorn 1791214617Salcboolean_t 1792214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1793214617Salc{ 1794214617Salc struct pvo_entry *pvo; 1795214617Salc boolean_t rv; 1796214617Salc 1797214617Salc PMAP_LOCK(pmap); 1798214617Salc pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1799214617Salc rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1800214617Salc PMAP_UNLOCK(pmap); 1801214617Salc return (rv); 1802214617Salc} 1803214617Salc 1804190681Snwhitehornvoid 1805190681Snwhitehornmoea64_clear_reference(mmu_t mmu, vm_page_t m) 1806190681Snwhitehorn{ 1807190681Snwhitehorn 1808208504Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1809208504Salc ("moea64_clear_reference: page %p is not managed", m)); 1810208990Salc moea64_clear_bit(m, LPTE_REF); 1811190681Snwhitehorn} 1812190681Snwhitehorn 1813190681Snwhitehornvoid 1814190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m) 1815190681Snwhitehorn{ 1816190681Snwhitehorn 1817208504Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1818208504Salc ("moea64_clear_modify: page %p is not managed", m)); 1819208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1820208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 1821208504Salc ("moea64_clear_modify: page %p is busy", m)); 1822208504Salc 1823208504Salc /* 1824208504Salc * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG 1825208504Salc * set. If the object containing the page is locked and the page is 1826208504Salc * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1827208504Salc */ 1828208504Salc if ((m->flags & PG_WRITEABLE) == 0) 1829190681Snwhitehorn return; 1830208990Salc moea64_clear_bit(m, LPTE_CHG); 1831190681Snwhitehorn} 1832190681Snwhitehorn 1833190681Snwhitehorn/* 1834190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings. 1835190681Snwhitehorn */ 1836190681Snwhitehornvoid 1837190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m) 1838190681Snwhitehorn{ 1839190681Snwhitehorn struct pvo_entry *pvo; 1840190681Snwhitehorn struct lpte *pt; 1841190681Snwhitehorn pmap_t pmap; 1842190681Snwhitehorn uint64_t lo; 1843190681Snwhitehorn 1844208175Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1845208175Salc ("moea64_remove_write: page %p is not managed", m)); 1846208175Salc 1847208175Salc /* 1848208175Salc * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1849208175Salc * another thread while the object is locked. Thus, if PG_WRITEABLE 1850208175Salc * is clear, no page table entries need updating. 1851208175Salc */ 1852208175Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1853208175Salc if ((m->oflags & VPO_BUSY) == 0 && 1854190681Snwhitehorn (m->flags & PG_WRITEABLE) == 0) 1855190681Snwhitehorn return; 1856207796Salc vm_page_lock_queues(); 1857190681Snwhitehorn lo = moea64_attr_fetch(m); 1858190681Snwhitehorn SYNC(); 1859190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1860190681Snwhitehorn pmap = pvo->pvo_pmap; 1861190681Snwhitehorn PMAP_LOCK(pmap); 1862205370Snwhitehorn LOCK_TABLE(); 1863190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1864209975Snwhitehorn pt = moea64_pvo_to_pte(pvo); 1865190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1866190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1867190681Snwhitehorn if (pt != NULL) { 1868190681Snwhitehorn moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1869190681Snwhitehorn lo |= pvo->pvo_pte.lpte.pte_lo; 1870190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1871190681Snwhitehorn moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1872209975Snwhitehorn pvo->pvo_vpn); 1873209975Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1874209975Snwhitehorn isync(); 1875190681Snwhitehorn } 1876190681Snwhitehorn } 1877205370Snwhitehorn UNLOCK_TABLE(); 1878190681Snwhitehorn PMAP_UNLOCK(pmap); 1879190681Snwhitehorn } 1880190681Snwhitehorn if ((lo & LPTE_CHG) != 0) { 1881190681Snwhitehorn moea64_attr_clear(m, LPTE_CHG); 1882190681Snwhitehorn vm_page_dirty(m); 1883190681Snwhitehorn } 1884190681Snwhitehorn vm_page_flag_clear(m, PG_WRITEABLE); 1885207796Salc vm_page_unlock_queues(); 1886190681Snwhitehorn} 1887190681Snwhitehorn 1888190681Snwhitehorn/* 1889190681Snwhitehorn * moea64_ts_referenced: 1890190681Snwhitehorn * 1891190681Snwhitehorn * Return a count of reference bits for a page, clearing those bits. 1892190681Snwhitehorn * It is not necessary for every reference bit to be cleared, but it 1893190681Snwhitehorn * is necessary that 0 only be returned when there are truly no 1894190681Snwhitehorn * reference bits set. 1895190681Snwhitehorn * 1896190681Snwhitehorn * XXX: The exact number of bits to check and clear is a matter that 1897190681Snwhitehorn * should be tested and standardized at some point in the future for 1898190681Snwhitehorn * optimal aging of shared pages. 1899190681Snwhitehorn */ 1900190681Snwhitehornboolean_t 1901190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m) 1902190681Snwhitehorn{ 1903190681Snwhitehorn 1904208990Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1905208990Salc ("moea64_ts_referenced: page %p is not managed", m)); 1906208990Salc return (moea64_clear_bit(m, LPTE_REF)); 1907190681Snwhitehorn} 1908190681Snwhitehorn 1909190681Snwhitehorn/* 1910213307Snwhitehorn * Modify the WIMG settings of all mappings for a page. 1911213307Snwhitehorn */ 1912213307Snwhitehornvoid 1913213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1914213307Snwhitehorn{ 1915213307Snwhitehorn struct pvo_entry *pvo; 1916213335Snwhitehorn struct pvo_head *pvo_head; 1917213307Snwhitehorn struct lpte *pt; 1918213307Snwhitehorn pmap_t pmap; 1919213307Snwhitehorn uint64_t lo; 1920213307Snwhitehorn 1921213335Snwhitehorn if (m->flags & PG_FICTITIOUS) { 1922213335Snwhitehorn m->md.mdpg_cache_attrs = ma; 1923213335Snwhitehorn return; 1924213335Snwhitehorn } 1925213335Snwhitehorn 1926213307Snwhitehorn vm_page_lock_queues(); 1927213335Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1928213307Snwhitehorn lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1929213335Snwhitehorn LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1930213307Snwhitehorn pmap = pvo->pvo_pmap; 1931213307Snwhitehorn PMAP_LOCK(pmap); 1932213307Snwhitehorn LOCK_TABLE(); 1933213307Snwhitehorn pt = moea64_pvo_to_pte(pvo); 1934213307Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1935213307Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= lo; 1936213307Snwhitehorn if (pt != NULL) { 1937213307Snwhitehorn moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1938213307Snwhitehorn pvo->pvo_vpn); 1939213307Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1940213307Snwhitehorn isync(); 1941213307Snwhitehorn } 1942213307Snwhitehorn UNLOCK_TABLE(); 1943213307Snwhitehorn PMAP_UNLOCK(pmap); 1944213307Snwhitehorn } 1945213307Snwhitehorn m->md.mdpg_cache_attrs = ma; 1946213307Snwhitehorn vm_page_unlock_queues(); 1947213307Snwhitehorn} 1948213307Snwhitehorn 1949213307Snwhitehorn/* 1950190681Snwhitehorn * Map a wired page into kernel virtual address space. 1951190681Snwhitehorn */ 1952190681Snwhitehornvoid 1953213307Snwhitehornmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1954190681Snwhitehorn{ 1955190681Snwhitehorn uint64_t pte_lo; 1956190681Snwhitehorn int error; 1957190681Snwhitehorn 1958213307Snwhitehorn pte_lo = moea64_calc_wimg(pa, ma); 1959190681Snwhitehorn 1960190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1961190681Snwhitehorn error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1962190681Snwhitehorn &moea64_pvo_kunmanaged, va, pa, pte_lo, 1963198378Snwhitehorn PVO_WIRED | VM_PROT_EXECUTE); 1964190681Snwhitehorn 1965190681Snwhitehorn if (error != 0 && error != ENOENT) 1966209975Snwhitehorn panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1967190681Snwhitehorn pa, error); 1968190681Snwhitehorn 1969190681Snwhitehorn /* 1970190681Snwhitehorn * Flush the memory from the instruction cache. 1971190681Snwhitehorn */ 1972190681Snwhitehorn if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1973190681Snwhitehorn __syncicache((void *)va, PAGE_SIZE); 1974190681Snwhitehorn } 1975190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1976190681Snwhitehorn} 1977190681Snwhitehorn 1978213307Snwhitehornvoid 1979213307Snwhitehornmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1980213307Snwhitehorn{ 1981213307Snwhitehorn 1982213307Snwhitehorn moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1983213307Snwhitehorn} 1984213307Snwhitehorn 1985190681Snwhitehorn/* 1986190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual 1987190681Snwhitehorn * address. 1988190681Snwhitehorn */ 1989190681Snwhitehornvm_offset_t 1990190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va) 1991190681Snwhitehorn{ 1992190681Snwhitehorn struct pvo_entry *pvo; 1993190681Snwhitehorn vm_paddr_t pa; 1994190681Snwhitehorn 1995205370Snwhitehorn /* 1996205370Snwhitehorn * Shortcut the direct-mapped case when applicable. We never put 1997205370Snwhitehorn * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1998205370Snwhitehorn */ 1999205370Snwhitehorn if (va < VM_MIN_KERNEL_ADDRESS) 2000205370Snwhitehorn return (va); 2001205370Snwhitehorn 2002190681Snwhitehorn PMAP_LOCK(kernel_pmap); 2003209975Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, va); 2004209975Snwhitehorn KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 2005209975Snwhitehorn va)); 2006209975Snwhitehorn pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) + (va - PVO_VADDR(pvo)); 2007190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 2008190681Snwhitehorn return (pa); 2009190681Snwhitehorn} 2010190681Snwhitehorn 2011190681Snwhitehorn/* 2012190681Snwhitehorn * Remove a wired page from kernel virtual address space. 2013190681Snwhitehorn */ 2014190681Snwhitehornvoid 2015190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va) 2016190681Snwhitehorn{ 2017190681Snwhitehorn moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 2018190681Snwhitehorn} 2019190681Snwhitehorn 2020190681Snwhitehorn/* 2021190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space. 2022190681Snwhitehorn * 2023190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping. 2024190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region 2025190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt' 2026190681Snwhitehorn * unchanged. We cannot and therefore do not; *virt is updated with the 2027190681Snwhitehorn * first usable address after the mapped region. 2028190681Snwhitehorn */ 2029190681Snwhitehornvm_offset_t 2030190681Snwhitehornmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 2031190681Snwhitehorn vm_offset_t pa_end, int prot) 2032190681Snwhitehorn{ 2033190681Snwhitehorn vm_offset_t sva, va; 2034190681Snwhitehorn 2035190681Snwhitehorn sva = *virt; 2036190681Snwhitehorn va = sva; 2037190681Snwhitehorn for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 2038190681Snwhitehorn moea64_kenter(mmu, va, pa_start); 2039190681Snwhitehorn *virt = va; 2040190681Snwhitehorn 2041190681Snwhitehorn return (sva); 2042190681Snwhitehorn} 2043190681Snwhitehorn 2044190681Snwhitehorn/* 2045190681Snwhitehorn * Returns true if the pmap's pv is one of the first 2046190681Snwhitehorn * 16 pvs linked to from this page. This count may 2047190681Snwhitehorn * be changed upwards or downwards in the future; it 2048190681Snwhitehorn * is only necessary that true be returned for a small 2049190681Snwhitehorn * subset of pmaps for proper page aging. 2050190681Snwhitehorn */ 2051190681Snwhitehornboolean_t 2052190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2053190681Snwhitehorn{ 2054190681Snwhitehorn int loops; 2055190681Snwhitehorn struct pvo_entry *pvo; 2056208990Salc boolean_t rv; 2057190681Snwhitehorn 2058208990Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2059208990Salc ("moea64_page_exists_quick: page %p is not managed", m)); 2060190681Snwhitehorn loops = 0; 2061208990Salc rv = FALSE; 2062208990Salc vm_page_lock_queues(); 2063190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2064208990Salc if (pvo->pvo_pmap == pmap) { 2065208990Salc rv = TRUE; 2066208990Salc break; 2067208990Salc } 2068190681Snwhitehorn if (++loops >= 16) 2069190681Snwhitehorn break; 2070190681Snwhitehorn } 2071208990Salc vm_page_unlock_queues(); 2072208990Salc return (rv); 2073190681Snwhitehorn} 2074190681Snwhitehorn 2075190681Snwhitehorn/* 2076190681Snwhitehorn * Return the number of managed mappings to the given physical page 2077190681Snwhitehorn * that are wired. 2078190681Snwhitehorn */ 2079190681Snwhitehornint 2080190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 2081190681Snwhitehorn{ 2082190681Snwhitehorn struct pvo_entry *pvo; 2083190681Snwhitehorn int count; 2084190681Snwhitehorn 2085190681Snwhitehorn count = 0; 2086208990Salc if ((m->flags & PG_FICTITIOUS) != 0) 2087190681Snwhitehorn return (count); 2088207796Salc vm_page_lock_queues(); 2089190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 2090190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 2091190681Snwhitehorn count++; 2092207796Salc vm_page_unlock_queues(); 2093190681Snwhitehorn return (count); 2094190681Snwhitehorn} 2095190681Snwhitehorn 2096209975Snwhitehornstatic uintptr_t moea64_vsidcontext; 2097190681Snwhitehorn 2098209975Snwhitehornuintptr_t 2099209975Snwhitehornmoea64_get_unique_vsid(void) { 2100209975Snwhitehorn u_int entropy; 2101209975Snwhitehorn register_t hash; 2102209975Snwhitehorn uint32_t mask; 2103209975Snwhitehorn int i; 2104190681Snwhitehorn 2105190681Snwhitehorn entropy = 0; 2106190681Snwhitehorn __asm __volatile("mftb %0" : "=r"(entropy)); 2107190681Snwhitehorn 2108211967Snwhitehorn mtx_lock(&moea64_slb_mutex); 2109209975Snwhitehorn for (i = 0; i < NVSIDS; i += VSID_NBPW) { 2110209975Snwhitehorn u_int n; 2111190681Snwhitehorn 2112190681Snwhitehorn /* 2113190681Snwhitehorn * Create a new value by mutiplying by a prime and adding in 2114190681Snwhitehorn * entropy from the timebase register. This is to make the 2115190681Snwhitehorn * VSID more random so that the PT hash function collides 2116190681Snwhitehorn * less often. (Note that the prime casues gcc to do shifts 2117190681Snwhitehorn * instead of a multiply.) 2118190681Snwhitehorn */ 2119190681Snwhitehorn moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 2120209975Snwhitehorn hash = moea64_vsidcontext & (NVSIDS - 1); 2121190681Snwhitehorn if (hash == 0) /* 0 is special, avoid it */ 2122190681Snwhitehorn continue; 2123190681Snwhitehorn n = hash >> 5; 2124190681Snwhitehorn mask = 1 << (hash & (VSID_NBPW - 1)); 2125209975Snwhitehorn hash = (moea64_vsidcontext & VSID_HASHMASK); 2126190681Snwhitehorn if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 2127190681Snwhitehorn /* anything free in this bucket? */ 2128190681Snwhitehorn if (moea64_vsid_bitmap[n] == 0xffffffff) { 2129190681Snwhitehorn entropy = (moea64_vsidcontext >> 20); 2130190681Snwhitehorn continue; 2131190681Snwhitehorn } 2132212322Snwhitehorn i = ffs(~moea64_vsid_bitmap[n]) - 1; 2133190681Snwhitehorn mask = 1 << i; 2134209975Snwhitehorn hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 2135190681Snwhitehorn hash |= i; 2136190681Snwhitehorn } 2137212322Snwhitehorn KASSERT(!(moea64_vsid_bitmap[n] & mask), 2138212331Snwhitehorn ("Allocating in-use VSID %#zx\n", hash)); 2139190681Snwhitehorn moea64_vsid_bitmap[n] |= mask; 2140211967Snwhitehorn mtx_unlock(&moea64_slb_mutex); 2141209975Snwhitehorn return (hash); 2142190681Snwhitehorn } 2143190681Snwhitehorn 2144211967Snwhitehorn mtx_unlock(&moea64_slb_mutex); 2145209975Snwhitehorn panic("%s: out of segments",__func__); 2146190681Snwhitehorn} 2147190681Snwhitehorn 2148209975Snwhitehorn#ifdef __powerpc64__ 2149209975Snwhitehornvoid 2150209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 2151209975Snwhitehorn{ 2152209975Snwhitehorn PMAP_LOCK_INIT(pmap); 2153209975Snwhitehorn 2154212715Snwhitehorn pmap->pm_slb_tree_root = slb_alloc_tree(); 2155209975Snwhitehorn pmap->pm_slb = slb_alloc_user_cache(); 2156212722Snwhitehorn pmap->pm_slb_len = 0; 2157209975Snwhitehorn} 2158209975Snwhitehorn#else 2159209975Snwhitehornvoid 2160209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 2161209975Snwhitehorn{ 2162209975Snwhitehorn int i; 2163212308Snwhitehorn uint32_t hash; 2164209975Snwhitehorn 2165209975Snwhitehorn PMAP_LOCK_INIT(pmap); 2166209975Snwhitehorn 2167209975Snwhitehorn if (pmap_bootstrapped) 2168209975Snwhitehorn pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 2169209975Snwhitehorn (vm_offset_t)pmap); 2170209975Snwhitehorn else 2171209975Snwhitehorn pmap->pmap_phys = pmap; 2172209975Snwhitehorn 2173209975Snwhitehorn /* 2174209975Snwhitehorn * Allocate some segment registers for this pmap. 2175209975Snwhitehorn */ 2176209975Snwhitehorn hash = moea64_get_unique_vsid(); 2177209975Snwhitehorn 2178209975Snwhitehorn for (i = 0; i < 16; i++) 2179209975Snwhitehorn pmap->pm_sr[i] = VSID_MAKE(i, hash); 2180212308Snwhitehorn 2181212308Snwhitehorn KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 2182209975Snwhitehorn} 2183209975Snwhitehorn#endif 2184209975Snwhitehorn 2185190681Snwhitehorn/* 2186190681Snwhitehorn * Initialize the pmap associated with process 0. 2187190681Snwhitehorn */ 2188190681Snwhitehornvoid 2189190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm) 2190190681Snwhitehorn{ 2191190681Snwhitehorn moea64_pinit(mmu, pm); 2192190681Snwhitehorn bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 2193190681Snwhitehorn} 2194190681Snwhitehorn 2195190681Snwhitehorn/* 2196190681Snwhitehorn * Set the physical protection on the specified range of this map as requested. 2197190681Snwhitehorn */ 2198190681Snwhitehornvoid 2199190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 2200190681Snwhitehorn vm_prot_t prot) 2201190681Snwhitehorn{ 2202190681Snwhitehorn struct pvo_entry *pvo; 2203190681Snwhitehorn struct lpte *pt; 2204190681Snwhitehorn 2205190681Snwhitehorn CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 2206190681Snwhitehorn eva, prot); 2207190681Snwhitehorn 2208190681Snwhitehorn 2209190681Snwhitehorn KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 2210190681Snwhitehorn ("moea64_protect: non current pmap")); 2211190681Snwhitehorn 2212190681Snwhitehorn if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2213190681Snwhitehorn moea64_remove(mmu, pm, sva, eva); 2214190681Snwhitehorn return; 2215190681Snwhitehorn } 2216190681Snwhitehorn 2217190681Snwhitehorn vm_page_lock_queues(); 2218190681Snwhitehorn PMAP_LOCK(pm); 2219190681Snwhitehorn for (; sva < eva; sva += PAGE_SIZE) { 2220209975Snwhitehorn pvo = moea64_pvo_find_va(pm, sva); 2221190681Snwhitehorn if (pvo == NULL) 2222190681Snwhitehorn continue; 2223190681Snwhitehorn 2224190681Snwhitehorn /* 2225190681Snwhitehorn * Grab the PTE pointer before we diddle with the cached PTE 2226190681Snwhitehorn * copy. 2227190681Snwhitehorn */ 2228190681Snwhitehorn LOCK_TABLE(); 2229209975Snwhitehorn pt = moea64_pvo_to_pte(pvo); 2230190681Snwhitehorn 2231190681Snwhitehorn /* 2232190681Snwhitehorn * Change the protection of the page. 2233190681Snwhitehorn */ 2234190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 2235190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 2236190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 2237190681Snwhitehorn if ((prot & VM_PROT_EXECUTE) == 0) 2238190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 2239190681Snwhitehorn 2240190681Snwhitehorn /* 2241190681Snwhitehorn * If the PVO is in the page table, update that pte as well. 2242190681Snwhitehorn */ 2243190681Snwhitehorn if (pt != NULL) { 2244209975Snwhitehorn moea64_pte_change(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2245190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & 2246190681Snwhitehorn (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 2247198341Smarcel moea64_syncicache(pm, sva, 2248198341Smarcel pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 2249198341Smarcel PAGE_SIZE); 2250190681Snwhitehorn } 2251190681Snwhitehorn } 2252190681Snwhitehorn UNLOCK_TABLE(); 2253190681Snwhitehorn } 2254190681Snwhitehorn vm_page_unlock_queues(); 2255190681Snwhitehorn PMAP_UNLOCK(pm); 2256190681Snwhitehorn} 2257190681Snwhitehorn 2258190681Snwhitehorn/* 2259190681Snwhitehorn * Map a list of wired pages into kernel virtual address space. This is 2260190681Snwhitehorn * intended for temporary mappings which do not need page modification or 2261190681Snwhitehorn * references recorded. Existing mappings in the region are overwritten. 2262190681Snwhitehorn */ 2263190681Snwhitehornvoid 2264190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2265190681Snwhitehorn{ 2266190681Snwhitehorn while (count-- > 0) { 2267190681Snwhitehorn moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2268190681Snwhitehorn va += PAGE_SIZE; 2269190681Snwhitehorn m++; 2270190681Snwhitehorn } 2271190681Snwhitehorn} 2272190681Snwhitehorn 2273190681Snwhitehorn/* 2274190681Snwhitehorn * Remove page mappings from kernel virtual address space. Intended for 2275190681Snwhitehorn * temporary mappings entered by moea64_qenter. 2276190681Snwhitehorn */ 2277190681Snwhitehornvoid 2278190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2279190681Snwhitehorn{ 2280190681Snwhitehorn while (count-- > 0) { 2281190681Snwhitehorn moea64_kremove(mmu, va); 2282190681Snwhitehorn va += PAGE_SIZE; 2283190681Snwhitehorn } 2284190681Snwhitehorn} 2285190681Snwhitehorn 2286190681Snwhitehornvoid 2287209975Snwhitehornmoea64_release_vsid(uint64_t vsid) 2288209975Snwhitehorn{ 2289212044Snwhitehorn int idx, mask; 2290209975Snwhitehorn 2291212044Snwhitehorn mtx_lock(&moea64_slb_mutex); 2292212044Snwhitehorn idx = vsid & (NVSIDS-1); 2293212044Snwhitehorn mask = 1 << (idx % VSID_NBPW); 2294212044Snwhitehorn idx /= VSID_NBPW; 2295212308Snwhitehorn KASSERT(moea64_vsid_bitmap[idx] & mask, 2296212308Snwhitehorn ("Freeing unallocated VSID %#jx", vsid)); 2297212044Snwhitehorn moea64_vsid_bitmap[idx] &= ~mask; 2298212044Snwhitehorn mtx_unlock(&moea64_slb_mutex); 2299209975Snwhitehorn} 2300209975Snwhitehorn 2301209975Snwhitehorn 2302209975Snwhitehornvoid 2303190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap) 2304190681Snwhitehorn{ 2305190681Snwhitehorn 2306190681Snwhitehorn /* 2307209975Snwhitehorn * Free segment registers' VSIDs 2308190681Snwhitehorn */ 2309209975Snwhitehorn #ifdef __powerpc64__ 2310212715Snwhitehorn slb_free_tree(pmap); 2311209975Snwhitehorn slb_free_user_cache(pmap->pm_slb); 2312209975Snwhitehorn #else 2313212308Snwhitehorn KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2314190681Snwhitehorn 2315212308Snwhitehorn moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2316209975Snwhitehorn #endif 2317209975Snwhitehorn 2318190681Snwhitehorn PMAP_LOCK_DESTROY(pmap); 2319190681Snwhitehorn} 2320190681Snwhitehorn 2321190681Snwhitehorn/* 2322190681Snwhitehorn * Remove the given range of addresses from the specified map. 2323190681Snwhitehorn */ 2324190681Snwhitehornvoid 2325190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2326190681Snwhitehorn{ 2327190681Snwhitehorn struct pvo_entry *pvo; 2328190681Snwhitehorn 2329190681Snwhitehorn vm_page_lock_queues(); 2330190681Snwhitehorn PMAP_LOCK(pm); 2331190681Snwhitehorn for (; sva < eva; sva += PAGE_SIZE) { 2332209975Snwhitehorn pvo = moea64_pvo_find_va(pm, sva); 2333209975Snwhitehorn if (pvo != NULL) 2334209975Snwhitehorn moea64_pvo_remove(pvo); 2335190681Snwhitehorn } 2336190681Snwhitehorn vm_page_unlock_queues(); 2337190681Snwhitehorn PMAP_UNLOCK(pm); 2338190681Snwhitehorn} 2339190681Snwhitehorn 2340190681Snwhitehorn/* 2341190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2342190681Snwhitehorn * will reflect changes in pte's back to the vm_page. 2343190681Snwhitehorn */ 2344190681Snwhitehornvoid 2345190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m) 2346190681Snwhitehorn{ 2347190681Snwhitehorn struct pvo_head *pvo_head; 2348190681Snwhitehorn struct pvo_entry *pvo, *next_pvo; 2349190681Snwhitehorn pmap_t pmap; 2350190681Snwhitehorn 2351207796Salc vm_page_lock_queues(); 2352190681Snwhitehorn pvo_head = vm_page_to_pvoh(m); 2353190681Snwhitehorn for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2354190681Snwhitehorn next_pvo = LIST_NEXT(pvo, pvo_vlink); 2355190681Snwhitehorn 2356190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2357190681Snwhitehorn pmap = pvo->pvo_pmap; 2358190681Snwhitehorn PMAP_LOCK(pmap); 2359209975Snwhitehorn moea64_pvo_remove(pvo); 2360190681Snwhitehorn PMAP_UNLOCK(pmap); 2361190681Snwhitehorn } 2362204042Snwhitehorn if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 2363204042Snwhitehorn moea64_attr_clear(m, LPTE_CHG); 2364204042Snwhitehorn vm_page_dirty(m); 2365204042Snwhitehorn } 2366190681Snwhitehorn vm_page_flag_clear(m, PG_WRITEABLE); 2367207796Salc vm_page_unlock_queues(); 2368190681Snwhitehorn} 2369190681Snwhitehorn 2370190681Snwhitehorn/* 2371190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map. 2372190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are 2373190681Snwhitehorn * calculated. 2374190681Snwhitehorn */ 2375190681Snwhitehornstatic vm_offset_t 2376190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align) 2377190681Snwhitehorn{ 2378190681Snwhitehorn vm_offset_t s, e; 2379190681Snwhitehorn int i, j; 2380190681Snwhitehorn 2381190681Snwhitehorn size = round_page(size); 2382190681Snwhitehorn for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2383190681Snwhitehorn if (align != 0) 2384190681Snwhitehorn s = (phys_avail[i] + align - 1) & ~(align - 1); 2385190681Snwhitehorn else 2386190681Snwhitehorn s = phys_avail[i]; 2387190681Snwhitehorn e = s + size; 2388190681Snwhitehorn 2389190681Snwhitehorn if (s < phys_avail[i] || e > phys_avail[i + 1]) 2390190681Snwhitehorn continue; 2391190681Snwhitehorn 2392190681Snwhitehorn if (s == phys_avail[i]) { 2393190681Snwhitehorn phys_avail[i] += size; 2394190681Snwhitehorn } else if (e == phys_avail[i + 1]) { 2395190681Snwhitehorn phys_avail[i + 1] -= size; 2396190681Snwhitehorn } else { 2397190681Snwhitehorn for (j = phys_avail_count * 2; j > i; j -= 2) { 2398190681Snwhitehorn phys_avail[j] = phys_avail[j - 2]; 2399190681Snwhitehorn phys_avail[j + 1] = phys_avail[j - 1]; 2400190681Snwhitehorn } 2401190681Snwhitehorn 2402190681Snwhitehorn phys_avail[i + 3] = phys_avail[i + 1]; 2403190681Snwhitehorn phys_avail[i + 1] = s; 2404190681Snwhitehorn phys_avail[i + 2] = e; 2405190681Snwhitehorn phys_avail_count++; 2406190681Snwhitehorn } 2407190681Snwhitehorn 2408190681Snwhitehorn return (s); 2409190681Snwhitehorn } 2410190681Snwhitehorn panic("moea64_bootstrap_alloc: could not allocate memory"); 2411190681Snwhitehorn} 2412190681Snwhitehorn 2413190681Snwhitehornstatic void 2414190681Snwhitehorntlbia(void) 2415190681Snwhitehorn{ 2416190681Snwhitehorn vm_offset_t i; 2417209975Snwhitehorn #ifndef __powerpc64__ 2418198378Snwhitehorn register_t msr, scratch; 2419209975Snwhitehorn #endif 2420190681Snwhitehorn 2421209975Snwhitehorn TLBSYNC(); 2422209975Snwhitehorn 2423198378Snwhitehorn for (i = 0; i < 0xFF000; i += 0x00001000) { 2424209975Snwhitehorn #ifdef __powerpc64__ 2425209975Snwhitehorn __asm __volatile("tlbiel %0" :: "r"(i)); 2426209975Snwhitehorn #else 2427198378Snwhitehorn __asm __volatile("\ 2428198378Snwhitehorn mfmsr %0; \ 2429198378Snwhitehorn mr %1, %0; \ 2430198378Snwhitehorn insrdi %1,%3,1,0; \ 2431198378Snwhitehorn mtmsrd %1; \ 2432209975Snwhitehorn isync; \ 2433198378Snwhitehorn \ 2434198378Snwhitehorn tlbiel %2; \ 2435198378Snwhitehorn \ 2436198378Snwhitehorn mtmsrd %0; \ 2437209975Snwhitehorn isync;" 2438198378Snwhitehorn : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 2439209975Snwhitehorn #endif 2440198378Snwhitehorn } 2441209975Snwhitehorn 2442209975Snwhitehorn EIEIO(); 2443209975Snwhitehorn TLBSYNC(); 2444190681Snwhitehorn} 2445190681Snwhitehorn 2446209975Snwhitehorn#ifdef __powerpc64__ 2447209975Snwhitehornstatic void 2448209975Snwhitehornslbia(void) 2449209975Snwhitehorn{ 2450209975Snwhitehorn register_t seg0; 2451209975Snwhitehorn 2452209975Snwhitehorn __asm __volatile ("slbia"); 2453209975Snwhitehorn __asm __volatile ("slbmfee %0,%1; slbie %0;" : "=r"(seg0) : "r"(0)); 2454209975Snwhitehorn} 2455209975Snwhitehorn#endif 2456209975Snwhitehorn 2457190681Snwhitehornstatic int 2458190681Snwhitehornmoea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 2459198378Snwhitehorn vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 2460190681Snwhitehorn{ 2461190681Snwhitehorn struct pvo_entry *pvo; 2462190681Snwhitehorn uint64_t vsid; 2463190681Snwhitehorn int first; 2464190681Snwhitehorn u_int ptegidx; 2465190681Snwhitehorn int i; 2466190681Snwhitehorn int bootstrap; 2467190681Snwhitehorn 2468190681Snwhitehorn /* 2469190681Snwhitehorn * One nasty thing that can happen here is that the UMA calls to 2470190681Snwhitehorn * allocate new PVOs need to map more memory, which calls pvo_enter(), 2471190681Snwhitehorn * which calls UMA... 2472190681Snwhitehorn * 2473190681Snwhitehorn * We break the loop by detecting recursion and allocating out of 2474190681Snwhitehorn * the bootstrap pool. 2475190681Snwhitehorn */ 2476190681Snwhitehorn 2477190681Snwhitehorn first = 0; 2478190681Snwhitehorn bootstrap = (flags & PVO_BOOTSTRAP); 2479190681Snwhitehorn 2480190681Snwhitehorn if (!moea64_initialized) 2481190681Snwhitehorn bootstrap = 1; 2482190681Snwhitehorn 2483190681Snwhitehorn /* 2484190681Snwhitehorn * Compute the PTE Group index. 2485190681Snwhitehorn */ 2486190681Snwhitehorn va &= ~ADDR_POFF; 2487190681Snwhitehorn vsid = va_to_vsid(pm, va); 2488209975Snwhitehorn ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2489190681Snwhitehorn 2490190681Snwhitehorn /* 2491190681Snwhitehorn * Remove any existing mapping for this page. Reuse the pvo entry if 2492190681Snwhitehorn * there is a mapping. 2493190681Snwhitehorn */ 2494198378Snwhitehorn LOCK_TABLE(); 2495190681Snwhitehorn 2496212363Snwhitehorn moea64_pvo_enter_calls++; 2497212363Snwhitehorn 2498190681Snwhitehorn LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2499190681Snwhitehorn if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2500190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2501190681Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2502190681Snwhitehorn (pte_lo & LPTE_PP)) { 2503209975Snwhitehorn if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2504209975Snwhitehorn /* Re-insert if spilled */ 2505209975Snwhitehorn i = moea64_pte_insert(ptegidx, 2506209975Snwhitehorn &pvo->pvo_pte.lpte); 2507209975Snwhitehorn if (i >= 0) 2508209975Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 2509209975Snwhitehorn moea64_pte_overflow--; 2510209975Snwhitehorn } 2511198378Snwhitehorn UNLOCK_TABLE(); 2512190681Snwhitehorn return (0); 2513190681Snwhitehorn } 2514209975Snwhitehorn moea64_pvo_remove(pvo); 2515190681Snwhitehorn break; 2516190681Snwhitehorn } 2517190681Snwhitehorn } 2518190681Snwhitehorn 2519190681Snwhitehorn /* 2520190681Snwhitehorn * If we aren't overwriting a mapping, try to allocate. 2521190681Snwhitehorn */ 2522190681Snwhitehorn if (bootstrap) { 2523190681Snwhitehorn if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2524209975Snwhitehorn panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2525190681Snwhitehorn moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2526190681Snwhitehorn BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2527190681Snwhitehorn } 2528190681Snwhitehorn pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2529190681Snwhitehorn moea64_bpvo_pool_index++; 2530190681Snwhitehorn bootstrap = 1; 2531190681Snwhitehorn } else { 2532198378Snwhitehorn /* 2533204719Snwhitehorn * Note: drop the table lock around the UMA allocation in 2534198378Snwhitehorn * case the UMA allocator needs to manipulate the page 2535198378Snwhitehorn * table. The mapping we are working with is already 2536198378Snwhitehorn * protected by the PMAP lock. 2537198378Snwhitehorn */ 2538198378Snwhitehorn UNLOCK_TABLE(); 2539190681Snwhitehorn pvo = uma_zalloc(zone, M_NOWAIT); 2540198378Snwhitehorn LOCK_TABLE(); 2541190681Snwhitehorn } 2542190681Snwhitehorn 2543190681Snwhitehorn if (pvo == NULL) { 2544198378Snwhitehorn UNLOCK_TABLE(); 2545190681Snwhitehorn return (ENOMEM); 2546190681Snwhitehorn } 2547190681Snwhitehorn 2548190681Snwhitehorn moea64_pvo_entries++; 2549190681Snwhitehorn pvo->pvo_vaddr = va; 2550209975Snwhitehorn pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2551209975Snwhitehorn | (vsid << 16); 2552190681Snwhitehorn pvo->pvo_pmap = pm; 2553190681Snwhitehorn LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2554190681Snwhitehorn pvo->pvo_vaddr &= ~ADDR_POFF; 2555190681Snwhitehorn 2556190681Snwhitehorn if (!(flags & VM_PROT_EXECUTE)) 2557190681Snwhitehorn pte_lo |= LPTE_NOEXEC; 2558190681Snwhitehorn if (flags & PVO_WIRED) 2559190681Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 2560190681Snwhitehorn if (pvo_head != &moea64_pvo_kunmanaged) 2561190681Snwhitehorn pvo->pvo_vaddr |= PVO_MANAGED; 2562190681Snwhitehorn if (bootstrap) 2563190681Snwhitehorn pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2564190681Snwhitehorn if (flags & PVO_FAKE) 2565190681Snwhitehorn pvo->pvo_vaddr |= PVO_FAKE; 2566209975Snwhitehorn if (flags & PVO_LARGE) 2567209975Snwhitehorn pvo->pvo_vaddr |= PVO_LARGE; 2568190681Snwhitehorn 2569190681Snwhitehorn moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2570209975Snwhitehorn (uint64_t)(pa) | pte_lo, flags); 2571190681Snwhitehorn 2572190681Snwhitehorn /* 2573190681Snwhitehorn * Remember if the list was empty and therefore will be the first 2574190681Snwhitehorn * item. 2575190681Snwhitehorn */ 2576190681Snwhitehorn if (LIST_FIRST(pvo_head) == NULL) 2577190681Snwhitehorn first = 1; 2578190681Snwhitehorn LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2579190681Snwhitehorn 2580209975Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) { 2581209975Snwhitehorn pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2582190681Snwhitehorn pm->pm_stats.wired_count++; 2583209975Snwhitehorn } 2584190681Snwhitehorn pm->pm_stats.resident_count++; 2585190681Snwhitehorn 2586190681Snwhitehorn /* 2587190681Snwhitehorn * We hope this succeeds but it isn't required. 2588190681Snwhitehorn */ 2589190681Snwhitehorn i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2590190681Snwhitehorn if (i >= 0) { 2591190681Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 2592190681Snwhitehorn } else { 2593190681Snwhitehorn panic("moea64_pvo_enter: overflow"); 2594190681Snwhitehorn moea64_pte_overflow++; 2595190681Snwhitehorn } 2596190681Snwhitehorn 2597204042Snwhitehorn if (pm == kernel_pmap) 2598204042Snwhitehorn isync(); 2599204042Snwhitehorn 2600198378Snwhitehorn UNLOCK_TABLE(); 2601190681Snwhitehorn 2602209975Snwhitehorn#ifdef __powerpc64__ 2603209975Snwhitehorn /* 2604209975Snwhitehorn * Make sure all our bootstrap mappings are in the SLB as soon 2605209975Snwhitehorn * as virtual memory is switched on. 2606209975Snwhitehorn */ 2607209975Snwhitehorn if (!pmap_bootstrapped) 2608209975Snwhitehorn moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2609209975Snwhitehorn#endif 2610209975Snwhitehorn 2611190681Snwhitehorn return (first ? ENOENT : 0); 2612190681Snwhitehorn} 2613190681Snwhitehorn 2614190681Snwhitehornstatic void 2615209975Snwhitehornmoea64_pvo_remove(struct pvo_entry *pvo) 2616190681Snwhitehorn{ 2617190681Snwhitehorn struct lpte *pt; 2618190681Snwhitehorn 2619190681Snwhitehorn /* 2620190681Snwhitehorn * If there is an active pte entry, we need to deactivate it (and 2621190681Snwhitehorn * save the ref & cfg bits). 2622190681Snwhitehorn */ 2623190681Snwhitehorn LOCK_TABLE(); 2624209975Snwhitehorn pt = moea64_pvo_to_pte(pvo); 2625190681Snwhitehorn if (pt != NULL) { 2626209975Snwhitehorn moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2627190681Snwhitehorn PVO_PTEGIDX_CLR(pvo); 2628190681Snwhitehorn } else { 2629190681Snwhitehorn moea64_pte_overflow--; 2630190681Snwhitehorn } 2631190681Snwhitehorn 2632190681Snwhitehorn /* 2633190681Snwhitehorn * Update our statistics. 2634190681Snwhitehorn */ 2635190681Snwhitehorn pvo->pvo_pmap->pm_stats.resident_count--; 2636204042Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2637190681Snwhitehorn pvo->pvo_pmap->pm_stats.wired_count--; 2638190681Snwhitehorn 2639190681Snwhitehorn /* 2640190681Snwhitehorn * Save the REF/CHG bits into their cache if the page is managed. 2641190681Snwhitehorn */ 2642190681Snwhitehorn if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2643190681Snwhitehorn struct vm_page *pg; 2644190681Snwhitehorn 2645190681Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2646190681Snwhitehorn if (pg != NULL) { 2647190681Snwhitehorn moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2648190681Snwhitehorn (LPTE_REF | LPTE_CHG)); 2649190681Snwhitehorn } 2650190681Snwhitehorn } 2651190681Snwhitehorn 2652190681Snwhitehorn /* 2653190681Snwhitehorn * Remove this PVO from the PV list. 2654190681Snwhitehorn */ 2655190681Snwhitehorn LIST_REMOVE(pvo, pvo_vlink); 2656190681Snwhitehorn 2657190681Snwhitehorn /* 2658190681Snwhitehorn * Remove this from the overflow list and return it to the pool 2659190681Snwhitehorn * if we aren't going to reuse it. 2660190681Snwhitehorn */ 2661190681Snwhitehorn LIST_REMOVE(pvo, pvo_olink); 2662212363Snwhitehorn 2663212363Snwhitehorn moea64_pvo_entries--; 2664212363Snwhitehorn moea64_pvo_remove_calls++; 2665212363Snwhitehorn 2666204694Snwhitehorn UNLOCK_TABLE(); 2667204694Snwhitehorn 2668190681Snwhitehorn if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2669204042Snwhitehorn uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2670190681Snwhitehorn moea64_upvo_zone, pvo); 2671190681Snwhitehorn} 2672190681Snwhitehorn 2673190681Snwhitehornstatic struct pvo_entry * 2674209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2675190681Snwhitehorn{ 2676190681Snwhitehorn struct pvo_entry *pvo; 2677190681Snwhitehorn int ptegidx; 2678190681Snwhitehorn uint64_t vsid; 2679209975Snwhitehorn #ifdef __powerpc64__ 2680212715Snwhitehorn uint64_t slbv; 2681190681Snwhitehorn 2682212715Snwhitehorn if (pm == kernel_pmap) { 2683212715Snwhitehorn slbv = kernel_va_to_slbv(va); 2684212715Snwhitehorn } else { 2685212715Snwhitehorn struct slb *slb; 2686212715Snwhitehorn slb = user_va_to_slb_entry(pm, va); 2687212715Snwhitehorn /* The page is not mapped if the segment isn't */ 2688212715Snwhitehorn if (slb == NULL) 2689212715Snwhitehorn return NULL; 2690212715Snwhitehorn slbv = slb->slbv; 2691212715Snwhitehorn } 2692209975Snwhitehorn 2693212715Snwhitehorn vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2694212715Snwhitehorn if (slbv & SLBV_L) 2695209975Snwhitehorn va &= ~moea64_large_page_mask; 2696209975Snwhitehorn else 2697209975Snwhitehorn va &= ~ADDR_POFF; 2698212715Snwhitehorn ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L); 2699209975Snwhitehorn #else 2700190681Snwhitehorn va &= ~ADDR_POFF; 2701190681Snwhitehorn vsid = va_to_vsid(pm, va); 2702209975Snwhitehorn ptegidx = va_to_pteg(vsid, va, 0); 2703209975Snwhitehorn #endif 2704190681Snwhitehorn 2705190681Snwhitehorn LOCK_TABLE(); 2706190681Snwhitehorn LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2707209975Snwhitehorn if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2708190681Snwhitehorn break; 2709190681Snwhitehorn } 2710190681Snwhitehorn UNLOCK_TABLE(); 2711190681Snwhitehorn 2712190681Snwhitehorn return (pvo); 2713190681Snwhitehorn} 2714190681Snwhitehorn 2715190681Snwhitehornstatic struct lpte * 2716209975Snwhitehornmoea64_pvo_to_pte(const struct pvo_entry *pvo) 2717190681Snwhitehorn{ 2718209975Snwhitehorn struct lpte *pt; 2719209975Snwhitehorn int pteidx, ptegidx; 2720209975Snwhitehorn uint64_t vsid; 2721190681Snwhitehorn 2722209975Snwhitehorn ASSERT_TABLE_LOCK(); 2723209975Snwhitehorn 2724209975Snwhitehorn /* If the PTEG index is not set, then there is no page table entry */ 2725209975Snwhitehorn if (!PVO_PTEGIDX_ISSET(pvo)) 2726209975Snwhitehorn return (NULL); 2727209975Snwhitehorn 2728190681Snwhitehorn /* 2729209975Snwhitehorn * Calculate the ptegidx 2730190681Snwhitehorn */ 2731209975Snwhitehorn vsid = PVO_VSID(pvo); 2732209975Snwhitehorn ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 2733209975Snwhitehorn pvo->pvo_vaddr & PVO_LARGE); 2734190681Snwhitehorn 2735209975Snwhitehorn /* 2736209975Snwhitehorn * We can find the actual pte entry without searching by grabbing 2737209975Snwhitehorn * the PTEG index from 3 unused bits in pvo_vaddr and by 2738209975Snwhitehorn * noticing the HID bit. 2739209975Snwhitehorn */ 2740209975Snwhitehorn if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2741209975Snwhitehorn ptegidx ^= moea64_pteg_mask; 2742190681Snwhitehorn 2743209975Snwhitehorn pteidx = (ptegidx << 3) | PVO_PTEGIDX_GET(pvo); 2744190681Snwhitehorn 2745190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2746190681Snwhitehorn !PVO_PTEGIDX_ISSET(pvo)) { 2747190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2748190681Snwhitehorn "valid pte index", pvo); 2749190681Snwhitehorn } 2750190681Snwhitehorn 2751190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2752190681Snwhitehorn PVO_PTEGIDX_ISSET(pvo)) { 2753190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2754190681Snwhitehorn "pvo but no valid pte", pvo); 2755190681Snwhitehorn } 2756190681Snwhitehorn 2757209975Snwhitehorn pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2758190681Snwhitehorn if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2759190681Snwhitehorn LPTE_VALID) { 2760190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2761190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2762190681Snwhitehorn "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2763190681Snwhitehorn } 2764190681Snwhitehorn 2765190681Snwhitehorn if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2766205163Snwhitehorn ~(LPTE_M|LPTE_CHG|LPTE_REF)) != 0) { 2767190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p pte does not match " 2768190681Snwhitehorn "pte %p in moea64_pteg_table difference is %#x", 2769190681Snwhitehorn pvo, pt, 2770190681Snwhitehorn (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2771190681Snwhitehorn } 2772190681Snwhitehorn 2773190681Snwhitehorn return (pt); 2774190681Snwhitehorn } 2775190681Snwhitehorn 2776190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2777190681Snwhitehorn panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2778190681Snwhitehorn "moea64_pteg_table but valid in pvo", pvo, pt); 2779190681Snwhitehorn } 2780190681Snwhitehorn 2781190681Snwhitehorn return (NULL); 2782190681Snwhitehorn} 2783190681Snwhitehorn 2784209975Snwhitehornstatic __inline int 2785209975Snwhitehornmoea64_pte_spillable_ident(u_int ptegidx) 2786209975Snwhitehorn{ 2787209975Snwhitehorn struct lpte *pt; 2788209975Snwhitehorn int i, j, k; 2789209975Snwhitehorn 2790209975Snwhitehorn /* Start at a random slot */ 2791209975Snwhitehorn i = mftb() % 8; 2792209975Snwhitehorn k = -1; 2793209975Snwhitehorn for (j = 0; j < 8; j++) { 2794209975Snwhitehorn pt = &moea64_pteg_table[ptegidx].pt[(i + j) % 8]; 2795209975Snwhitehorn if (pt->pte_hi & (LPTE_LOCKED | LPTE_WIRED)) 2796209975Snwhitehorn continue; 2797209975Snwhitehorn 2798209975Snwhitehorn /* This is a candidate, so remember it */ 2799209975Snwhitehorn k = (i + j) % 8; 2800209975Snwhitehorn 2801209975Snwhitehorn /* Try to get a page that has not been used lately */ 2802209975Snwhitehorn if (!(pt->pte_lo & LPTE_REF)) 2803209975Snwhitehorn return (k); 2804209975Snwhitehorn } 2805209975Snwhitehorn 2806209975Snwhitehorn return (k); 2807209975Snwhitehorn} 2808209975Snwhitehorn 2809190681Snwhitehornstatic int 2810190681Snwhitehornmoea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2811190681Snwhitehorn{ 2812190681Snwhitehorn struct lpte *pt; 2813209975Snwhitehorn struct pvo_entry *pvo; 2814209975Snwhitehorn u_int pteg_bktidx; 2815190681Snwhitehorn int i; 2816190681Snwhitehorn 2817190681Snwhitehorn ASSERT_TABLE_LOCK(); 2818190681Snwhitehorn 2819190681Snwhitehorn /* 2820190681Snwhitehorn * First try primary hash. 2821190681Snwhitehorn */ 2822209975Snwhitehorn pteg_bktidx = ptegidx; 2823209975Snwhitehorn for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 2824209975Snwhitehorn if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 2825190681Snwhitehorn pvo_pt->pte_hi &= ~LPTE_HID; 2826190681Snwhitehorn moea64_pte_set(pt, pvo_pt); 2827190681Snwhitehorn return (i); 2828190681Snwhitehorn } 2829190681Snwhitehorn } 2830190681Snwhitehorn 2831190681Snwhitehorn /* 2832190681Snwhitehorn * Now try secondary hash. 2833190681Snwhitehorn */ 2834209975Snwhitehorn pteg_bktidx ^= moea64_pteg_mask; 2835209975Snwhitehorn for (pt = moea64_pteg_table[pteg_bktidx].pt, i = 0; i < 8; i++, pt++) { 2836209975Snwhitehorn if ((pt->pte_hi & (LPTE_VALID | LPTE_LOCKED)) == 0) { 2837190681Snwhitehorn pvo_pt->pte_hi |= LPTE_HID; 2838190681Snwhitehorn moea64_pte_set(pt, pvo_pt); 2839190681Snwhitehorn return (i); 2840190681Snwhitehorn } 2841190681Snwhitehorn } 2842190681Snwhitehorn 2843209975Snwhitehorn /* 2844209975Snwhitehorn * Out of luck. Find a PTE to sacrifice. 2845209975Snwhitehorn */ 2846209975Snwhitehorn pteg_bktidx = ptegidx; 2847209975Snwhitehorn i = moea64_pte_spillable_ident(pteg_bktidx); 2848209975Snwhitehorn if (i < 0) { 2849209975Snwhitehorn pteg_bktidx ^= moea64_pteg_mask; 2850209975Snwhitehorn i = moea64_pte_spillable_ident(pteg_bktidx); 2851209975Snwhitehorn } 2852209975Snwhitehorn 2853209975Snwhitehorn if (i < 0) { 2854209975Snwhitehorn /* No freeable slots in either PTEG? We're hosed. */ 2855209975Snwhitehorn panic("moea64_pte_insert: overflow"); 2856209975Snwhitehorn return (-1); 2857209975Snwhitehorn } 2858209975Snwhitehorn 2859209975Snwhitehorn if (pteg_bktidx == ptegidx) 2860209975Snwhitehorn pvo_pt->pte_hi &= ~LPTE_HID; 2861209975Snwhitehorn else 2862209975Snwhitehorn pvo_pt->pte_hi |= LPTE_HID; 2863209975Snwhitehorn 2864209975Snwhitehorn /* 2865209975Snwhitehorn * Synchronize the sacrifice PTE with its PVO, then mark both 2866209975Snwhitehorn * invalid. The PVO will be reused when/if the VM system comes 2867209975Snwhitehorn * here after a fault. 2868209975Snwhitehorn */ 2869209975Snwhitehorn pt = &moea64_pteg_table[pteg_bktidx].pt[i]; 2870209975Snwhitehorn 2871209975Snwhitehorn if (pt->pte_hi & LPTE_HID) 2872209975Snwhitehorn pteg_bktidx ^= moea64_pteg_mask; /* PTEs indexed by primary */ 2873209975Snwhitehorn 2874209975Snwhitehorn LIST_FOREACH(pvo, &moea64_pvo_table[pteg_bktidx], pvo_olink) { 2875209975Snwhitehorn if (pvo->pvo_pte.lpte.pte_hi == pt->pte_hi) { 2876209975Snwhitehorn KASSERT(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID, 2877209975Snwhitehorn ("Invalid PVO for valid PTE!")); 2878209975Snwhitehorn moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2879209975Snwhitehorn PVO_PTEGIDX_CLR(pvo); 2880209975Snwhitehorn moea64_pte_overflow++; 2881209975Snwhitehorn break; 2882209975Snwhitehorn } 2883209975Snwhitehorn } 2884209975Snwhitehorn 2885209975Snwhitehorn KASSERT(pvo->pvo_pte.lpte.pte_hi == pt->pte_hi, 2886209975Snwhitehorn ("Unable to find PVO for spilled PTE")); 2887209975Snwhitehorn 2888209975Snwhitehorn /* 2889209975Snwhitehorn * Set the new PTE. 2890209975Snwhitehorn */ 2891209975Snwhitehorn moea64_pte_set(pt, pvo_pt); 2892209975Snwhitehorn 2893209975Snwhitehorn return (i); 2894190681Snwhitehorn} 2895190681Snwhitehorn 2896190681Snwhitehornstatic boolean_t 2897190681Snwhitehornmoea64_query_bit(vm_page_t m, u_int64_t ptebit) 2898190681Snwhitehorn{ 2899190681Snwhitehorn struct pvo_entry *pvo; 2900190681Snwhitehorn struct lpte *pt; 2901190681Snwhitehorn 2902190681Snwhitehorn if (moea64_attr_fetch(m) & ptebit) 2903190681Snwhitehorn return (TRUE); 2904190681Snwhitehorn 2905208574Salc vm_page_lock_queues(); 2906205370Snwhitehorn 2907190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2908190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2909190681Snwhitehorn 2910190681Snwhitehorn /* 2911190681Snwhitehorn * See if we saved the bit off. If so, cache it and return 2912190681Snwhitehorn * success. 2913190681Snwhitehorn */ 2914190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2915190681Snwhitehorn moea64_attr_save(m, ptebit); 2916190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2917208574Salc vm_page_unlock_queues(); 2918190681Snwhitehorn return (TRUE); 2919190681Snwhitehorn } 2920190681Snwhitehorn } 2921190681Snwhitehorn 2922190681Snwhitehorn /* 2923190681Snwhitehorn * No luck, now go through the hard part of looking at the PTEs 2924190681Snwhitehorn * themselves. Sync so that any pending REF/CHG bits are flushed to 2925190681Snwhitehorn * the PTEs. 2926190681Snwhitehorn */ 2927190681Snwhitehorn SYNC(); 2928190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2929190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2930190681Snwhitehorn 2931190681Snwhitehorn /* 2932190681Snwhitehorn * See if this pvo has a valid PTE. if so, fetch the 2933190681Snwhitehorn * REF/CHG bits from the valid PTE. If the appropriate 2934190681Snwhitehorn * ptebit is set, cache it and return success. 2935190681Snwhitehorn */ 2936205370Snwhitehorn LOCK_TABLE(); 2937209975Snwhitehorn pt = moea64_pvo_to_pte(pvo); 2938190681Snwhitehorn if (pt != NULL) { 2939190681Snwhitehorn moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2940190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2941190681Snwhitehorn UNLOCK_TABLE(); 2942190681Snwhitehorn 2943190681Snwhitehorn moea64_attr_save(m, ptebit); 2944190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2945208574Salc vm_page_unlock_queues(); 2946190681Snwhitehorn return (TRUE); 2947190681Snwhitehorn } 2948190681Snwhitehorn } 2949205370Snwhitehorn UNLOCK_TABLE(); 2950190681Snwhitehorn } 2951190681Snwhitehorn 2952208574Salc vm_page_unlock_queues(); 2953190681Snwhitehorn return (FALSE); 2954190681Snwhitehorn} 2955190681Snwhitehorn 2956190681Snwhitehornstatic u_int 2957208990Salcmoea64_clear_bit(vm_page_t m, u_int64_t ptebit) 2958190681Snwhitehorn{ 2959190681Snwhitehorn u_int count; 2960190681Snwhitehorn struct pvo_entry *pvo; 2961190681Snwhitehorn struct lpte *pt; 2962190681Snwhitehorn 2963208990Salc vm_page_lock_queues(); 2964205370Snwhitehorn 2965190681Snwhitehorn /* 2966190681Snwhitehorn * Clear the cached value. 2967190681Snwhitehorn */ 2968190681Snwhitehorn moea64_attr_clear(m, ptebit); 2969190681Snwhitehorn 2970190681Snwhitehorn /* 2971190681Snwhitehorn * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2972190681Snwhitehorn * we can reset the right ones). note that since the pvo entries and 2973190681Snwhitehorn * list heads are accessed via BAT0 and are never placed in the page 2974190681Snwhitehorn * table, we don't have to worry about further accesses setting the 2975190681Snwhitehorn * REF/CHG bits. 2976190681Snwhitehorn */ 2977190681Snwhitehorn SYNC(); 2978190681Snwhitehorn 2979190681Snwhitehorn /* 2980190681Snwhitehorn * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2981190681Snwhitehorn * valid pte clear the ptebit from the valid pte. 2982190681Snwhitehorn */ 2983190681Snwhitehorn count = 0; 2984190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2985190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2986190681Snwhitehorn 2987205370Snwhitehorn LOCK_TABLE(); 2988209975Snwhitehorn pt = moea64_pvo_to_pte(pvo); 2989190681Snwhitehorn if (pt != NULL) { 2990190681Snwhitehorn moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2991190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2992190681Snwhitehorn count++; 2993209975Snwhitehorn moea64_pte_clear(pt, pvo->pvo_vpn, ptebit); 2994190681Snwhitehorn } 2995190681Snwhitehorn } 2996190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2997190681Snwhitehorn MOEA_PVO_CHECK(pvo); /* sanity check */ 2998205370Snwhitehorn UNLOCK_TABLE(); 2999190681Snwhitehorn } 3000190681Snwhitehorn 3001208990Salc vm_page_unlock_queues(); 3002190681Snwhitehorn return (count); 3003190681Snwhitehorn} 3004190681Snwhitehorn 3005190681Snwhitehornboolean_t 3006190681Snwhitehornmoea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 3007190681Snwhitehorn{ 3008204296Snwhitehorn struct pvo_entry *pvo; 3009204296Snwhitehorn vm_offset_t ppa; 3010204296Snwhitehorn int error = 0; 3011204296Snwhitehorn 3012204296Snwhitehorn PMAP_LOCK(kernel_pmap); 3013204296Snwhitehorn for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 3014209975Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, ppa); 3015204296Snwhitehorn if (pvo == NULL || 3016204296Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 3017204296Snwhitehorn error = EFAULT; 3018204296Snwhitehorn break; 3019204296Snwhitehorn } 3020204296Snwhitehorn } 3021204296Snwhitehorn PMAP_UNLOCK(kernel_pmap); 3022204296Snwhitehorn 3023204296Snwhitehorn return (error); 3024190681Snwhitehorn} 3025190681Snwhitehorn 3026190681Snwhitehorn/* 3027190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual 3028190681Snwhitehorn * address space. Return a pointer to where it is mapped. This 3029190681Snwhitehorn * routine is intended to be used for mapping device memory, 3030190681Snwhitehorn * NOT real memory. 3031190681Snwhitehorn */ 3032190681Snwhitehornvoid * 3033213307Snwhitehornmoea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 3034190681Snwhitehorn{ 3035190681Snwhitehorn vm_offset_t va, tmpva, ppa, offset; 3036190681Snwhitehorn 3037190681Snwhitehorn ppa = trunc_page(pa); 3038190681Snwhitehorn offset = pa & PAGE_MASK; 3039190681Snwhitehorn size = roundup(offset + size, PAGE_SIZE); 3040190681Snwhitehorn 3041190681Snwhitehorn va = kmem_alloc_nofault(kernel_map, size); 3042190681Snwhitehorn 3043190681Snwhitehorn if (!va) 3044190681Snwhitehorn panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 3045190681Snwhitehorn 3046190681Snwhitehorn for (tmpva = va; size > 0;) { 3047213307Snwhitehorn moea64_kenter_attr(mmu, tmpva, ppa, ma); 3048190681Snwhitehorn size -= PAGE_SIZE; 3049190681Snwhitehorn tmpva += PAGE_SIZE; 3050190681Snwhitehorn ppa += PAGE_SIZE; 3051190681Snwhitehorn } 3052190681Snwhitehorn 3053190681Snwhitehorn return ((void *)(va + offset)); 3054190681Snwhitehorn} 3055190681Snwhitehorn 3056213307Snwhitehornvoid * 3057213307Snwhitehornmoea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 3058213307Snwhitehorn{ 3059213307Snwhitehorn 3060213307Snwhitehorn return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 3061213307Snwhitehorn} 3062213307Snwhitehorn 3063190681Snwhitehornvoid 3064190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 3065190681Snwhitehorn{ 3066190681Snwhitehorn vm_offset_t base, offset; 3067190681Snwhitehorn 3068190681Snwhitehorn base = trunc_page(va); 3069190681Snwhitehorn offset = va & PAGE_MASK; 3070190681Snwhitehorn size = roundup(offset + size, PAGE_SIZE); 3071190681Snwhitehorn 3072190681Snwhitehorn kmem_free(kernel_map, base, size); 3073190681Snwhitehorn} 3074190681Snwhitehorn 3075198341Smarcelstatic void 3076198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 3077198341Smarcel{ 3078198341Smarcel struct pvo_entry *pvo; 3079198341Smarcel vm_offset_t lim; 3080198341Smarcel vm_paddr_t pa; 3081198341Smarcel vm_size_t len; 3082198341Smarcel 3083198341Smarcel PMAP_LOCK(pm); 3084198341Smarcel while (sz > 0) { 3085198341Smarcel lim = round_page(va); 3086198341Smarcel len = MIN(lim - va, sz); 3087209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 3088198341Smarcel if (pvo != NULL) { 3089205163Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) | 3090198341Smarcel (va & ADDR_POFF); 3091198341Smarcel moea64_syncicache(pm, va, pa, len); 3092198341Smarcel } 3093198341Smarcel va += len; 3094198341Smarcel sz -= len; 3095198341Smarcel } 3096198341Smarcel PMAP_UNLOCK(pm); 3097198341Smarcel} 3098