1139825Simp/*- 290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc. 390643Sbenno * All rights reserved. 490643Sbenno * 590643Sbenno * This code is derived from software contributed to The NetBSD Foundation 690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790643Sbenno * 890643Sbenno * Redistribution and use in source and binary forms, with or without 990643Sbenno * modification, are permitted provided that the following conditions 1090643Sbenno * are met: 1190643Sbenno * 1. Redistributions of source code must retain the above copyright 1290643Sbenno * notice, this list of conditions and the following disclaimer. 1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright 1490643Sbenno * notice, this list of conditions and the following disclaimer in the 1590643Sbenno * documentation and/or other materials provided with the distribution. 1690643Sbenno * 3. All advertising materials mentioning features or use of this software 1790643Sbenno * must display the following acknowledgement: 1890643Sbenno * This product includes software developed by the NetBSD 1990643Sbenno * Foundation, Inc. and its contributors. 2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its 2190643Sbenno * contributors may be used to endorse or promote products derived 2290643Sbenno * from this software without specific prior written permission. 2390643Sbenno * 2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790643Sbenno * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490643Sbenno * POSSIBILITY OF SUCH DAMAGE. 3590643Sbenno */ 36139825Simp/*- 3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH. 3977957Sbenno * All rights reserved. 4077957Sbenno * 4177957Sbenno * Redistribution and use in source and binary forms, with or without 4277957Sbenno * modification, are permitted provided that the following conditions 4377957Sbenno * are met: 4477957Sbenno * 1. Redistributions of source code must retain the above copyright 4577957Sbenno * notice, this list of conditions and the following disclaimer. 4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 4777957Sbenno * notice, this list of conditions and the following disclaimer in the 4877957Sbenno * documentation and/or other materials provided with the distribution. 4977957Sbenno * 3. All advertising materials mentioning features or use of this software 5077957Sbenno * must display the following acknowledgement: 5177957Sbenno * This product includes software developed by TooLs GmbH. 5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products 5377957Sbenno * derived from this software without specific prior written permission. 5477957Sbenno * 5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 6577957Sbenno * 6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6777957Sbenno */ 68139825Simp/*- 6977957Sbenno * Copyright (C) 2001 Benno Rice. 7077957Sbenno * All rights reserved. 7177957Sbenno * 7277957Sbenno * Redistribution and use in source and binary forms, with or without 7377957Sbenno * modification, are permitted provided that the following conditions 7477957Sbenno * are met: 7577957Sbenno * 1. Redistributions of source code must retain the above copyright 7677957Sbenno * notice, this list of conditions and the following disclaimer. 7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 7877957Sbenno * notice, this list of conditions and the following disclaimer in the 7977957Sbenno * documentation and/or other materials provided with the distribution. 8077957Sbenno * 8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9177957Sbenno */ 9277957Sbenno 93113038Sobrien#include <sys/cdefs.h> 94113038Sobrien__FBSDID("$FreeBSD$"); 9577957Sbenno 9690643Sbenno/* 9790643Sbenno * Manages physical address maps. 9890643Sbenno * 9990643Sbenno * Since the information managed by this module is also stored by the 10090643Sbenno * logical address mapping module, this module may throw away valid virtual 10190643Sbenno * to physical mappings at almost any time. However, invalidations of 10290643Sbenno * mappings must be done as requested. 10390643Sbenno * 10490643Sbenno * In order to cope with hardware architectures which make virtual to 10590643Sbenno * physical map invalidates expensive, this module may delay invalidate 10690643Sbenno * reduced protection operations until such time as they are actually 10790643Sbenno * necessary. This module is given full information as to which processors 10890643Sbenno * are currently using which maps, and to when physical maps must be made 10990643Sbenno * correct. 11090643Sbenno */ 11190643Sbenno 112118239Speter#include "opt_kstack_pages.h" 113118239Speter 11477957Sbenno#include <sys/param.h> 11580431Speter#include <sys/kernel.h> 116222813Sattilio#include <sys/queue.h> 117222813Sattilio#include <sys/cpuset.h> 11890643Sbenno#include <sys/ktr.h> 11990643Sbenno#include <sys/lock.h> 12090643Sbenno#include <sys/msgbuf.h> 12190643Sbenno#include <sys/mutex.h> 12277957Sbenno#include <sys/proc.h> 123238159Salc#include <sys/rwlock.h> 124222813Sattilio#include <sys/sched.h> 12590643Sbenno#include <sys/sysctl.h> 12690643Sbenno#include <sys/systm.h> 12777957Sbenno#include <sys/vmmeter.h> 12877957Sbenno 12990643Sbenno#include <dev/ofw/openfirm.h> 13090643Sbenno 131152180Sgrehan#include <vm/vm.h> 13277957Sbenno#include <vm/vm_param.h> 13377957Sbenno#include <vm/vm_kern.h> 13477957Sbenno#include <vm/vm_page.h> 13577957Sbenno#include <vm/vm_map.h> 13677957Sbenno#include <vm/vm_object.h> 13777957Sbenno#include <vm/vm_extern.h> 13877957Sbenno#include <vm/vm_pageout.h> 13992847Sjeff#include <vm/uma.h> 14077957Sbenno 141125687Sgrehan#include <machine/cpu.h> 142192067Snwhitehorn#include <machine/platform.h> 14383730Smp#include <machine/bat.h> 14490643Sbenno#include <machine/frame.h> 14590643Sbenno#include <machine/md_var.h> 14690643Sbenno#include <machine/psl.h> 14777957Sbenno#include <machine/pte.h> 148178628Smarcel#include <machine/smp.h> 14990643Sbenno#include <machine/sr.h> 150152180Sgrehan#include <machine/mmuvar.h> 151228609Snwhitehorn#include <machine/trap_aim.h> 15277957Sbenno 153152180Sgrehan#include "mmu_if.h" 15477957Sbenno 155152180Sgrehan#define MOEA_DEBUG 156152180Sgrehan 15790643Sbenno#define TODO panic("%s: not implemented", __func__); 15877957Sbenno 15990643Sbenno#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16090643Sbenno#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16190643Sbenno#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16290643Sbenno 16390643Sbennostruct ofw_map { 16490643Sbenno vm_offset_t om_va; 16590643Sbenno vm_size_t om_len; 16690643Sbenno vm_offset_t om_pa; 16790643Sbenno u_int om_mode; 16890643Sbenno}; 16977957Sbenno 170249864Sjhibbitsextern unsigned char _etext[]; 171249864Sjhibbitsextern unsigned char _end[]; 172249864Sjhibbits 173249864Sjhibbitsextern int dumpsys_minidump; 174249864Sjhibbits 17590643Sbenno/* 17690643Sbenno * Map of physical memory regions. 17790643Sbenno */ 17897346Sbennostatic struct mem_region *regions; 17997346Sbennostatic struct mem_region *pregions; 180209975Snwhitehornstatic u_int phys_avail_count; 181209975Snwhitehornstatic int regions_sz, pregions_sz; 182100319Sbennostatic struct ofw_map *translations; 18377957Sbenno 18490643Sbenno/* 185134535Salc * Lock for the pteg and pvo tables. 186134535Salc */ 187152180Sgrehanstruct mtx moea_table_mutex; 188212278Snwhitehornstruct mtx moea_vsid_mutex; 189134535Salc 190183094Smarcel/* tlbie instruction synchronization */ 191183094Smarcelstatic struct mtx tlbie_mtx; 192183094Smarcel 193134535Salc/* 19490643Sbenno * PTEG data. 19590643Sbenno */ 196152180Sgrehanstatic struct pteg *moea_pteg_table; 197152180Sgrehanu_int moea_pteg_count; 198152180Sgrehanu_int moea_pteg_mask; 19977957Sbenno 20090643Sbenno/* 20190643Sbenno * PVO data. 20290643Sbenno */ 203152180Sgrehanstruct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 204152180Sgrehanstruct pvo_head moea_pvo_kunmanaged = 205152180Sgrehan LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 20677957Sbenno 207242534Sattiliostatic struct rwlock_padalign pvh_global_lock; 208238159Salc 209152180Sgrehanuma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 210152180Sgrehanuma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 21177957Sbenno 21299037Sbenno#define BPVO_POOL_SIZE 32768 213152180Sgrehanstatic struct pvo_entry *moea_bpvo_pool; 214152180Sgrehanstatic int moea_bpvo_pool_index = 0; 21577957Sbenno 21690643Sbenno#define VSID_NBPW (sizeof(u_int32_t) * 8) 217152180Sgrehanstatic u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 21877957Sbenno 219152180Sgrehanstatic boolean_t moea_initialized = FALSE; 22077957Sbenno 22190643Sbenno/* 22290643Sbenno * Statistics. 22390643Sbenno */ 224152180Sgrehanu_int moea_pte_valid = 0; 225152180Sgrehanu_int moea_pte_overflow = 0; 226152180Sgrehanu_int moea_pte_replacements = 0; 227152180Sgrehanu_int moea_pvo_entries = 0; 228152180Sgrehanu_int moea_pvo_enter_calls = 0; 229152180Sgrehanu_int moea_pvo_remove_calls = 0; 230152180Sgrehanu_int moea_pte_spills = 0; 231152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 23290643Sbenno 0, ""); 233152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 234152180Sgrehan &moea_pte_overflow, 0, ""); 235152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 236152180Sgrehan &moea_pte_replacements, 0, ""); 237152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 23890643Sbenno 0, ""); 239152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 240152180Sgrehan &moea_pvo_enter_calls, 0, ""); 241152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 242152180Sgrehan &moea_pvo_remove_calls, 0, ""); 243152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 244152180Sgrehan &moea_pte_spills, 0, ""); 24577957Sbenno 24690643Sbenno/* 247152180Sgrehan * Allocate physical memory for use in moea_bootstrap. 24890643Sbenno */ 249152180Sgrehanstatic vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 25077957Sbenno 25190643Sbenno/* 25290643Sbenno * PTE calls. 25390643Sbenno */ 254152180Sgrehanstatic int moea_pte_insert(u_int, struct pte *); 25577957Sbenno 25677957Sbenno/* 25790643Sbenno * PVO calls. 25877957Sbenno */ 259152180Sgrehanstatic int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 26090643Sbenno vm_offset_t, vm_offset_t, u_int, int); 261152180Sgrehanstatic void moea_pvo_remove(struct pvo_entry *, int); 262152180Sgrehanstatic struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 263152180Sgrehanstatic struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 26490643Sbenno 26590643Sbenno/* 26690643Sbenno * Utility routines. 26790643Sbenno */ 268159303Salcstatic void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 269159303Salc vm_prot_t, boolean_t); 270152180Sgrehanstatic void moea_syncicache(vm_offset_t, vm_size_t); 271152180Sgrehanstatic boolean_t moea_query_bit(vm_page_t, int); 272208990Salcstatic u_int moea_clear_bit(vm_page_t, int); 273152180Sgrehanstatic void moea_kremove(mmu_t, vm_offset_t); 274152180Sgrehanint moea_pte_spill(vm_offset_t); 27590643Sbenno 276152180Sgrehan/* 277152180Sgrehan * Kernel MMU interface 278152180Sgrehan */ 279152180Sgrehanvoid moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 280152180Sgrehanvoid moea_clear_modify(mmu_t, vm_page_t); 281152180Sgrehanvoid moea_copy_page(mmu_t, vm_page_t, vm_page_t); 282248280Skibvoid moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 283248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize); 284152180Sgrehanvoid moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 285159303Salcvoid moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 286159303Salc vm_prot_t); 287159627Supsvoid moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 288152180Sgrehanvm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 289152180Sgrehanvm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 290152180Sgrehanvoid moea_init(mmu_t); 291152180Sgrehanboolean_t moea_is_modified(mmu_t, vm_page_t); 292214617Salcboolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 293207155Salcboolean_t moea_is_referenced(mmu_t, vm_page_t); 294238357Salcint moea_ts_referenced(mmu_t, vm_page_t); 295235936Srajvm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 296152180Sgrehanboolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 297173708Salcint moea_page_wired_mappings(mmu_t, vm_page_t); 298152180Sgrehanvoid moea_pinit(mmu_t, pmap_t); 299152180Sgrehanvoid moea_pinit0(mmu_t, pmap_t); 300152180Sgrehanvoid moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 301152180Sgrehanvoid moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 302152180Sgrehanvoid moea_qremove(mmu_t, vm_offset_t, int); 303152180Sgrehanvoid moea_release(mmu_t, pmap_t); 304152180Sgrehanvoid moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 305152180Sgrehanvoid moea_remove_all(mmu_t, vm_page_t); 306160889Salcvoid moea_remove_write(mmu_t, vm_page_t); 307152180Sgrehanvoid moea_zero_page(mmu_t, vm_page_t); 308152180Sgrehanvoid moea_zero_page_area(mmu_t, vm_page_t, int, int); 309152180Sgrehanvoid moea_zero_page_idle(mmu_t, vm_page_t); 310152180Sgrehanvoid moea_activate(mmu_t, struct thread *); 311152180Sgrehanvoid moea_deactivate(mmu_t, struct thread *); 312190681Snwhitehornvoid moea_cpu_bootstrap(mmu_t, int); 313152180Sgrehanvoid moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 314235936Srajvoid *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t); 315213307Snwhitehornvoid *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 316152180Sgrehanvoid moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 317235936Srajvm_paddr_t moea_kextract(mmu_t, vm_offset_t); 318213307Snwhitehornvoid moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 319235936Srajvoid moea_kenter(mmu_t, vm_offset_t, vm_paddr_t); 320213307Snwhitehornvoid moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 321235936Srajboolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 322198341Smarcelstatic void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 323249864Sjhibbitsvm_offset_t moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 324249864Sjhibbits vm_size_t *sz); 325249864Sjhibbitsstruct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev); 326152180Sgrehan 327152180Sgrehanstatic mmu_method_t moea_methods[] = { 328152180Sgrehan MMUMETHOD(mmu_change_wiring, moea_change_wiring), 329152180Sgrehan MMUMETHOD(mmu_clear_modify, moea_clear_modify), 330152180Sgrehan MMUMETHOD(mmu_copy_page, moea_copy_page), 331248280Skib MMUMETHOD(mmu_copy_pages, moea_copy_pages), 332152180Sgrehan MMUMETHOD(mmu_enter, moea_enter), 333159303Salc MMUMETHOD(mmu_enter_object, moea_enter_object), 334152180Sgrehan MMUMETHOD(mmu_enter_quick, moea_enter_quick), 335152180Sgrehan MMUMETHOD(mmu_extract, moea_extract), 336152180Sgrehan MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 337152180Sgrehan MMUMETHOD(mmu_init, moea_init), 338152180Sgrehan MMUMETHOD(mmu_is_modified, moea_is_modified), 339214617Salc MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 340207155Salc MMUMETHOD(mmu_is_referenced, moea_is_referenced), 341152180Sgrehan MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 342152180Sgrehan MMUMETHOD(mmu_map, moea_map), 343152180Sgrehan MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 344173708Salc MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 345152180Sgrehan MMUMETHOD(mmu_pinit, moea_pinit), 346152180Sgrehan MMUMETHOD(mmu_pinit0, moea_pinit0), 347152180Sgrehan MMUMETHOD(mmu_protect, moea_protect), 348152180Sgrehan MMUMETHOD(mmu_qenter, moea_qenter), 349152180Sgrehan MMUMETHOD(mmu_qremove, moea_qremove), 350152180Sgrehan MMUMETHOD(mmu_release, moea_release), 351152180Sgrehan MMUMETHOD(mmu_remove, moea_remove), 352152180Sgrehan MMUMETHOD(mmu_remove_all, moea_remove_all), 353160889Salc MMUMETHOD(mmu_remove_write, moea_remove_write), 354198341Smarcel MMUMETHOD(mmu_sync_icache, moea_sync_icache), 355152180Sgrehan MMUMETHOD(mmu_zero_page, moea_zero_page), 356152180Sgrehan MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 357152180Sgrehan MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 358152180Sgrehan MMUMETHOD(mmu_activate, moea_activate), 359152180Sgrehan MMUMETHOD(mmu_deactivate, moea_deactivate), 360213307Snwhitehorn MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 361152180Sgrehan 362152180Sgrehan /* Internal interfaces */ 363152180Sgrehan MMUMETHOD(mmu_bootstrap, moea_bootstrap), 364190681Snwhitehorn MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 365213307Snwhitehorn MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 366152180Sgrehan MMUMETHOD(mmu_mapdev, moea_mapdev), 367152180Sgrehan MMUMETHOD(mmu_unmapdev, moea_unmapdev), 368152180Sgrehan MMUMETHOD(mmu_kextract, moea_kextract), 369152180Sgrehan MMUMETHOD(mmu_kenter, moea_kenter), 370213307Snwhitehorn MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 371152180Sgrehan MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 372249864Sjhibbits MMUMETHOD(mmu_scan_md, moea_scan_md), 373249864Sjhibbits MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), 374152180Sgrehan 375152180Sgrehan { 0, 0 } 376152180Sgrehan}; 377152180Sgrehan 378212627SgrehanMMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 379152180Sgrehan 380213307Snwhitehornstatic __inline uint32_t 381213307Snwhitehornmoea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 382213307Snwhitehorn{ 383213307Snwhitehorn uint32_t pte_lo; 384213307Snwhitehorn int i; 385212627Sgrehan 386213307Snwhitehorn if (ma != VM_MEMATTR_DEFAULT) { 387213307Snwhitehorn switch (ma) { 388213307Snwhitehorn case VM_MEMATTR_UNCACHEABLE: 389213307Snwhitehorn return (PTE_I | PTE_G); 390213307Snwhitehorn case VM_MEMATTR_WRITE_COMBINING: 391213307Snwhitehorn case VM_MEMATTR_WRITE_BACK: 392213307Snwhitehorn case VM_MEMATTR_PREFETCHABLE: 393213307Snwhitehorn return (PTE_I); 394213307Snwhitehorn case VM_MEMATTR_WRITE_THROUGH: 395213307Snwhitehorn return (PTE_W | PTE_M); 396213307Snwhitehorn } 397213307Snwhitehorn } 398213307Snwhitehorn 399213307Snwhitehorn /* 400213307Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 401213307Snwhitehorn * it's in our available memory array. 402213307Snwhitehorn */ 403213307Snwhitehorn pte_lo = PTE_I | PTE_G; 404213307Snwhitehorn for (i = 0; i < pregions_sz; i++) { 405213307Snwhitehorn if ((pa >= pregions[i].mr_start) && 406213307Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 407213307Snwhitehorn pte_lo = PTE_M; 408213307Snwhitehorn break; 409213307Snwhitehorn } 410213307Snwhitehorn } 411213307Snwhitehorn 412213307Snwhitehorn return pte_lo; 413213307Snwhitehorn} 414213307Snwhitehorn 415183094Smarcelstatic void 416183094Smarceltlbie(vm_offset_t va) 417183094Smarcel{ 418152180Sgrehan 419183094Smarcel mtx_lock_spin(&tlbie_mtx); 420213407Snwhitehorn __asm __volatile("ptesync"); 421183094Smarcel __asm __volatile("tlbie %0" :: "r"(va)); 422213407Snwhitehorn __asm __volatile("eieio; tlbsync; ptesync"); 423183094Smarcel mtx_unlock_spin(&tlbie_mtx); 424183094Smarcel} 425183094Smarcel 426183094Smarcelstatic void 427183094Smarceltlbia(void) 428183094Smarcel{ 429183094Smarcel vm_offset_t va; 430183094Smarcel 431183094Smarcel for (va = 0; va < 0x00040000; va += 0x00001000) { 432183094Smarcel __asm __volatile("tlbie %0" :: "r"(va)); 433183094Smarcel powerpc_sync(); 434183094Smarcel } 435183094Smarcel __asm __volatile("tlbsync"); 436183094Smarcel powerpc_sync(); 437183094Smarcel} 438183094Smarcel 43990643Sbennostatic __inline int 44090643Sbennova_to_sr(u_int *sr, vm_offset_t va) 44177957Sbenno{ 44290643Sbenno return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 44390643Sbenno} 44477957Sbenno 44590643Sbennostatic __inline u_int 44690643Sbennova_to_pteg(u_int sr, vm_offset_t addr) 44790643Sbenno{ 44890643Sbenno u_int hash; 44990643Sbenno 45090643Sbenno hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 45190643Sbenno ADDR_PIDX_SHFT); 452152180Sgrehan return (hash & moea_pteg_mask); 45377957Sbenno} 45477957Sbenno 45590643Sbennostatic __inline struct pvo_head * 45690643Sbennovm_page_to_pvoh(vm_page_t m) 45790643Sbenno{ 45890643Sbenno 45990643Sbenno return (&m->md.mdpg_pvoh); 46090643Sbenno} 46190643Sbenno 46277957Sbennostatic __inline void 463152180Sgrehanmoea_attr_clear(vm_page_t m, int ptebit) 46477957Sbenno{ 46590643Sbenno 466238159Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 46790643Sbenno m->md.mdpg_attrs &= ~ptebit; 46877957Sbenno} 46977957Sbenno 47077957Sbennostatic __inline int 471152180Sgrehanmoea_attr_fetch(vm_page_t m) 47277957Sbenno{ 47377957Sbenno 47490643Sbenno return (m->md.mdpg_attrs); 47577957Sbenno} 47677957Sbenno 47790643Sbennostatic __inline void 478152180Sgrehanmoea_attr_save(vm_page_t m, int ptebit) 47990643Sbenno{ 48090643Sbenno 481238159Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 48290643Sbenno m->md.mdpg_attrs |= ptebit; 48390643Sbenno} 48490643Sbenno 48577957Sbennostatic __inline int 486152180Sgrehanmoea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 48777957Sbenno{ 48890643Sbenno if (pt->pte_hi == pvo_pt->pte_hi) 48990643Sbenno return (1); 49090643Sbenno 49190643Sbenno return (0); 49277957Sbenno} 49377957Sbenno 49477957Sbennostatic __inline int 495152180Sgrehanmoea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 49677957Sbenno{ 49790643Sbenno return (pt->pte_hi & ~PTE_VALID) == 49890643Sbenno (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 49990643Sbenno ((va >> ADDR_API_SHFT) & PTE_API) | which); 50090643Sbenno} 50177957Sbenno 50290643Sbennostatic __inline void 503152180Sgrehanmoea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 50490643Sbenno{ 505159928Salc 506159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 507159928Salc 50890643Sbenno /* 50990643Sbenno * Construct a PTE. Default to IMB initially. Valid bit only gets 51090643Sbenno * set when the real pte is set in memory. 51190643Sbenno * 51290643Sbenno * Note: Don't set the valid bit for correct operation of tlb update. 51390643Sbenno */ 51490643Sbenno pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 51590643Sbenno (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 51690643Sbenno pt->pte_lo = pte_lo; 51777957Sbenno} 51877957Sbenno 51990643Sbennostatic __inline void 520152180Sgrehanmoea_pte_synch(struct pte *pt, struct pte *pvo_pt) 52177957Sbenno{ 52277957Sbenno 523159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 52490643Sbenno pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 52577957Sbenno} 52677957Sbenno 52790643Sbennostatic __inline void 528152180Sgrehanmoea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 52977957Sbenno{ 53077957Sbenno 531159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 532159928Salc 53390643Sbenno /* 53490643Sbenno * As shown in Section 7.6.3.2.3 53590643Sbenno */ 53690643Sbenno pt->pte_lo &= ~ptebit; 537183094Smarcel tlbie(va); 53877957Sbenno} 53977957Sbenno 54090643Sbennostatic __inline void 541152180Sgrehanmoea_pte_set(struct pte *pt, struct pte *pvo_pt) 54277957Sbenno{ 54377957Sbenno 544159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 54590643Sbenno pvo_pt->pte_hi |= PTE_VALID; 54690643Sbenno 54777957Sbenno /* 54890643Sbenno * Update the PTE as defined in section 7.6.3.1. 549253976Sjhibbits * Note that the REF/CHG bits are from pvo_pt and thus should have 55090643Sbenno * been saved so this routine can restore them (if desired). 55177957Sbenno */ 55290643Sbenno pt->pte_lo = pvo_pt->pte_lo; 553183094Smarcel powerpc_sync(); 55490643Sbenno pt->pte_hi = pvo_pt->pte_hi; 555183094Smarcel powerpc_sync(); 556152180Sgrehan moea_pte_valid++; 55790643Sbenno} 55877957Sbenno 55990643Sbennostatic __inline void 560152180Sgrehanmoea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 56190643Sbenno{ 56290643Sbenno 563159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 56490643Sbenno pvo_pt->pte_hi &= ~PTE_VALID; 56590643Sbenno 56677957Sbenno /* 56790643Sbenno * Force the reg & chg bits back into the PTEs. 56877957Sbenno */ 569183094Smarcel powerpc_sync(); 57077957Sbenno 57190643Sbenno /* 57290643Sbenno * Invalidate the pte. 57390643Sbenno */ 57490643Sbenno pt->pte_hi &= ~PTE_VALID; 57577957Sbenno 576183094Smarcel tlbie(va); 57777957Sbenno 57890643Sbenno /* 57990643Sbenno * Save the reg & chg bits. 58090643Sbenno */ 581152180Sgrehan moea_pte_synch(pt, pvo_pt); 582152180Sgrehan moea_pte_valid--; 58377957Sbenno} 58477957Sbenno 58590643Sbennostatic __inline void 586152180Sgrehanmoea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 58790643Sbenno{ 58890643Sbenno 58990643Sbenno /* 59090643Sbenno * Invalidate the PTE 59190643Sbenno */ 592152180Sgrehan moea_pte_unset(pt, pvo_pt, va); 593152180Sgrehan moea_pte_set(pt, pvo_pt); 59490643Sbenno} 59590643Sbenno 59677957Sbenno/* 59790643Sbenno * Quick sort callout for comparing memory regions. 59877957Sbenno */ 59990643Sbennostatic int om_cmp(const void *a, const void *b); 60090643Sbenno 60190643Sbennostatic int 60290643Sbennoom_cmp(const void *a, const void *b) 60390643Sbenno{ 60490643Sbenno const struct ofw_map *mapa; 60590643Sbenno const struct ofw_map *mapb; 60690643Sbenno 60790643Sbenno mapa = a; 60890643Sbenno mapb = b; 60990643Sbenno if (mapa->om_pa < mapb->om_pa) 61090643Sbenno return (-1); 61190643Sbenno else if (mapa->om_pa > mapb->om_pa) 61290643Sbenno return (1); 61390643Sbenno else 61490643Sbenno return (0); 61577957Sbenno} 61677957Sbenno 61777957Sbennovoid 618190681Snwhitehornmoea_cpu_bootstrap(mmu_t mmup, int ap) 619178628Smarcel{ 620178628Smarcel u_int sdr; 621178628Smarcel int i; 622178628Smarcel 623178628Smarcel if (ap) { 624183094Smarcel powerpc_sync(); 625178628Smarcel __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 626178628Smarcel __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 627178628Smarcel isync(); 628178628Smarcel __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 629178628Smarcel __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 630178628Smarcel isync(); 631178628Smarcel } 632178628Smarcel 633243370Sadrian#ifdef WII 634243370Sadrian /* 635243370Sadrian * Special case for the Wii: don't install the PCI BAT. 636243370Sadrian */ 637243370Sadrian if (strcmp(installed_platform(), "wii") != 0) { 638243370Sadrian#endif 639243370Sadrian __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 640243370Sadrian __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 641243370Sadrian#ifdef WII 642243370Sadrian } 643243370Sadrian#endif 644178629Smarcel isync(); 645178628Smarcel 646178629Smarcel __asm __volatile("mtibatu 1,%0" :: "r"(0)); 647178629Smarcel __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 648178629Smarcel __asm __volatile("mtibatu 2,%0" :: "r"(0)); 649178629Smarcel __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 650178629Smarcel __asm __volatile("mtibatu 3,%0" :: "r"(0)); 651178628Smarcel isync(); 652178628Smarcel 653178628Smarcel for (i = 0; i < 16; i++) 654215163Snwhitehorn mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 655183094Smarcel powerpc_sync(); 656178628Smarcel 657178628Smarcel sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 658178628Smarcel __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 659178628Smarcel isync(); 660178628Smarcel 661179254Smarcel tlbia(); 662178628Smarcel} 663178628Smarcel 664178628Smarcelvoid 665152180Sgrehanmoea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 66677957Sbenno{ 66797346Sbenno ihandle_t mmui; 66890643Sbenno phandle_t chosen, mmu; 66990643Sbenno int sz; 67090643Sbenno int i, j; 671143200Sgrehan vm_size_t size, physsz, hwphyssz; 67290643Sbenno vm_offset_t pa, va, off; 673194784Sjeff void *dpcpu; 674209369Snwhitehorn register_t msr; 67577957Sbenno 67699037Sbenno /* 677103604Sgrehan * Set up BAT0 to map the lowest 256 MB area 67899037Sbenno */ 67999037Sbenno battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 68099037Sbenno battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 68199037Sbenno 682243370Sadrian /* 683243370Sadrian * Map PCI memory space. 684243370Sadrian */ 685243370Sadrian battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 686243370Sadrian battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 68799037Sbenno 688243370Sadrian battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 689243370Sadrian battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 69099037Sbenno 691243370Sadrian battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 692243370Sadrian battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 69399037Sbenno 694243370Sadrian battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 695243370Sadrian battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 69699037Sbenno 697243370Sadrian /* 698243370Sadrian * Map obio devices. 699243370Sadrian */ 700243370Sadrian battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 701243370Sadrian battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 70299037Sbenno 70377957Sbenno /* 70490643Sbenno * Use an IBAT and a DBAT to map the bottom segment of memory 705209369Snwhitehorn * where we are. Turn off instruction relocation temporarily 706209369Snwhitehorn * to prevent faults while reprogramming the IBAT. 70777957Sbenno */ 708209369Snwhitehorn msr = mfmsr(); 709209369Snwhitehorn mtmsr(msr & ~PSL_IR); 710152180Sgrehan __asm (".balign 32; \n" 711149958Sgrehan "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 712131808Sgrehan "mtdbatu 0,%0; mtdbatl 0,%1; isync" 713178628Smarcel :: "r"(battable[0].batu), "r"(battable[0].batl)); 714209369Snwhitehorn mtmsr(msr); 71599037Sbenno 716243370Sadrian#ifdef WII 717243370Sadrian if (strcmp(installed_platform(), "wii") != 0) { 718243370Sadrian#endif 719243370Sadrian /* map pci space */ 720243370Sadrian __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 721243370Sadrian __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 722243370Sadrian#ifdef WII 723243370Sadrian } 724243370Sadrian#endif 725178628Smarcel isync(); 72677957Sbenno 727190681Snwhitehorn /* set global direct map flag */ 728190681Snwhitehorn hw_direct_map = 1; 729190681Snwhitehorn 73097346Sbenno mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 731152180Sgrehan CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 73297346Sbenno 73397346Sbenno for (i = 0; i < pregions_sz; i++) { 734103604Sgrehan vm_offset_t pa; 735103604Sgrehan vm_offset_t end; 736103604Sgrehan 73797346Sbenno CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 73897346Sbenno pregions[i].mr_start, 73997346Sbenno pregions[i].mr_start + pregions[i].mr_size, 74097346Sbenno pregions[i].mr_size); 741103604Sgrehan /* 742103604Sgrehan * Install entries into the BAT table to allow all 743103604Sgrehan * of physmem to be convered by on-demand BAT entries. 744103604Sgrehan * The loop will sometimes set the same battable element 745103604Sgrehan * twice, but that's fine since they won't be used for 746103604Sgrehan * a while yet. 747103604Sgrehan */ 748103604Sgrehan pa = pregions[i].mr_start & 0xf0000000; 749103604Sgrehan end = pregions[i].mr_start + pregions[i].mr_size; 750103604Sgrehan do { 751103604Sgrehan u_int n = pa >> ADDR_SR_SHFT; 752152180Sgrehan 753103604Sgrehan battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 754103604Sgrehan battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 755103604Sgrehan pa += SEGMENT_LENGTH; 756103604Sgrehan } while (pa < end); 75797346Sbenno } 75897346Sbenno 75997346Sbenno if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 760152180Sgrehan panic("moea_bootstrap: phys_avail too small"); 761222614Snwhitehorn 76290643Sbenno phys_avail_count = 0; 76391793Sbenno physsz = 0; 764143234Sgrehan hwphyssz = 0; 765143234Sgrehan TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 76697346Sbenno for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 76790643Sbenno CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 76890643Sbenno regions[i].mr_start + regions[i].mr_size, 76990643Sbenno regions[i].mr_size); 770143200Sgrehan if (hwphyssz != 0 && 771143200Sgrehan (physsz + regions[i].mr_size) >= hwphyssz) { 772143200Sgrehan if (physsz < hwphyssz) { 773143200Sgrehan phys_avail[j] = regions[i].mr_start; 774143200Sgrehan phys_avail[j + 1] = regions[i].mr_start + 775143200Sgrehan hwphyssz - physsz; 776143200Sgrehan physsz = hwphyssz; 777143200Sgrehan phys_avail_count++; 778143200Sgrehan } 779143200Sgrehan break; 780143200Sgrehan } 78190643Sbenno phys_avail[j] = regions[i].mr_start; 78290643Sbenno phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 78390643Sbenno phys_avail_count++; 78491793Sbenno physsz += regions[i].mr_size; 78577957Sbenno } 786228609Snwhitehorn 787228609Snwhitehorn /* Check for overlap with the kernel and exception vectors */ 788228609Snwhitehorn for (j = 0; j < 2*phys_avail_count; j+=2) { 789228609Snwhitehorn if (phys_avail[j] < EXC_LAST) 790228609Snwhitehorn phys_avail[j] += EXC_LAST; 791228609Snwhitehorn 792228609Snwhitehorn if (kernelstart >= phys_avail[j] && 793228609Snwhitehorn kernelstart < phys_avail[j+1]) { 794228609Snwhitehorn if (kernelend < phys_avail[j+1]) { 795228609Snwhitehorn phys_avail[2*phys_avail_count] = 796228609Snwhitehorn (kernelend & ~PAGE_MASK) + PAGE_SIZE; 797228609Snwhitehorn phys_avail[2*phys_avail_count + 1] = 798228609Snwhitehorn phys_avail[j+1]; 799228609Snwhitehorn phys_avail_count++; 800228609Snwhitehorn } 801228609Snwhitehorn 802228609Snwhitehorn phys_avail[j+1] = kernelstart & ~PAGE_MASK; 803228609Snwhitehorn } 804228609Snwhitehorn 805228609Snwhitehorn if (kernelend >= phys_avail[j] && 806228609Snwhitehorn kernelend < phys_avail[j+1]) { 807228609Snwhitehorn if (kernelstart > phys_avail[j]) { 808228609Snwhitehorn phys_avail[2*phys_avail_count] = phys_avail[j]; 809228609Snwhitehorn phys_avail[2*phys_avail_count + 1] = 810228609Snwhitehorn kernelstart & ~PAGE_MASK; 811228609Snwhitehorn phys_avail_count++; 812228609Snwhitehorn } 813228609Snwhitehorn 814228609Snwhitehorn phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 815228609Snwhitehorn } 816228609Snwhitehorn } 817228609Snwhitehorn 81891793Sbenno physmem = btoc(physsz); 81977957Sbenno 82077957Sbenno /* 82190643Sbenno * Allocate PTEG table. 82277957Sbenno */ 82390643Sbenno#ifdef PTEGCOUNT 824152180Sgrehan moea_pteg_count = PTEGCOUNT; 82590643Sbenno#else 826152180Sgrehan moea_pteg_count = 0x1000; 82777957Sbenno 828152180Sgrehan while (moea_pteg_count < physmem) 829152180Sgrehan moea_pteg_count <<= 1; 83077957Sbenno 831152180Sgrehan moea_pteg_count >>= 1; 83290643Sbenno#endif /* PTEGCOUNT */ 83377957Sbenno 834152180Sgrehan size = moea_pteg_count * sizeof(struct pteg); 835152180Sgrehan CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 83690643Sbenno size); 837152180Sgrehan moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 838152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 839152180Sgrehan bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 840152180Sgrehan moea_pteg_mask = moea_pteg_count - 1; 84177957Sbenno 84290643Sbenno /* 84394839Sbenno * Allocate pv/overflow lists. 84490643Sbenno */ 845152180Sgrehan size = sizeof(struct pvo_head) * moea_pteg_count; 846152180Sgrehan moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 84790643Sbenno PAGE_SIZE); 848152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 849152180Sgrehan for (i = 0; i < moea_pteg_count; i++) 850152180Sgrehan LIST_INIT(&moea_pvo_table[i]); 85177957Sbenno 85290643Sbenno /* 853134535Salc * Initialize the lock that synchronizes access to the pteg and pvo 854134535Salc * tables. 855134535Salc */ 856159928Salc mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 857159928Salc MTX_RECURSE); 858212278Snwhitehorn mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 859134535Salc 860183094Smarcel mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 861183094Smarcel 862134535Salc /* 86390643Sbenno * Initialise the unmanaged pvo pool. 86490643Sbenno */ 865152180Sgrehan moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 86699037Sbenno BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 867152180Sgrehan moea_bpvo_pool_index = 0; 86877957Sbenno 86977957Sbenno /* 87090643Sbenno * Make sure kernel vsid is allocated as well as VSID 0. 87177957Sbenno */ 872152180Sgrehan moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 87390643Sbenno |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 874152180Sgrehan moea_vsid_bitmap[0] |= 1; 87577957Sbenno 87690643Sbenno /* 877215163Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 87890643Sbenno */ 879215163Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 880215163Snwhitehorn for (i = 0; i < 16; i++) 881215163Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 882222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 883235689Snwhitehorn RB_INIT(&kernel_pmap->pmap_pvo); 884215163Snwhitehorn 885238159Salc /* 886238159Salc * Initialize the global pv list lock. 887238159Salc */ 888238159Salc rw_init(&pvh_global_lock, "pmap pv global"); 889238159Salc 890215163Snwhitehorn /* 891215163Snwhitehorn * Set up the Open Firmware mappings 892215163Snwhitehorn */ 893228609Snwhitehorn chosen = OF_finddevice("/chosen"); 894228609Snwhitehorn if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && 895228609Snwhitehorn (mmu = OF_instance_to_package(mmui)) != -1 && 896228609Snwhitehorn (sz = OF_getproplen(mmu, "translations")) != -1) { 897228609Snwhitehorn translations = NULL; 898228609Snwhitehorn for (i = 0; phys_avail[i] != 0; i += 2) { 899228609Snwhitehorn if (phys_avail[i + 1] >= sz) { 900228609Snwhitehorn translations = (struct ofw_map *)phys_avail[i]; 901228609Snwhitehorn break; 902228609Snwhitehorn } 903131401Sgrehan } 904228609Snwhitehorn if (translations == NULL) 905228609Snwhitehorn panic("moea_bootstrap: no space to copy translations"); 906228609Snwhitehorn bzero(translations, sz); 907228609Snwhitehorn if (OF_getprop(mmu, "translations", translations, sz) == -1) 908228609Snwhitehorn panic("moea_bootstrap: can't get ofw translations"); 909228609Snwhitehorn CTR0(KTR_PMAP, "moea_bootstrap: translations"); 910228609Snwhitehorn sz /= sizeof(*translations); 911228609Snwhitehorn qsort(translations, sz, sizeof (*translations), om_cmp); 912228609Snwhitehorn for (i = 0; i < sz; i++) { 913228609Snwhitehorn CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 914228609Snwhitehorn translations[i].om_pa, translations[i].om_va, 915228609Snwhitehorn translations[i].om_len); 91677957Sbenno 917228609Snwhitehorn /* 918228609Snwhitehorn * If the mapping is 1:1, let the RAM and device 919228609Snwhitehorn * on-demand BAT tables take care of the translation. 920228609Snwhitehorn */ 921228609Snwhitehorn if (translations[i].om_va == translations[i].om_pa) 922228609Snwhitehorn continue; 92377957Sbenno 924228609Snwhitehorn /* Enter the pages */ 925228609Snwhitehorn for (off = 0; off < translations[i].om_len; 926228609Snwhitehorn off += PAGE_SIZE) 927228609Snwhitehorn moea_kenter(mmup, translations[i].om_va + off, 928228609Snwhitehorn translations[i].om_pa + off); 929228609Snwhitehorn } 93077957Sbenno } 93177957Sbenno 93290643Sbenno /* 933178261Smarcel * Calculate the last available physical address. 934178261Smarcel */ 935178261Smarcel for (i = 0; phys_avail[i + 2] != 0; i += 2) 936178261Smarcel ; 937178261Smarcel Maxmem = powerpc_btop(phys_avail[i + 1]); 938178261Smarcel 939190681Snwhitehorn moea_cpu_bootstrap(mmup,0); 94077957Sbenno 94190643Sbenno pmap_bootstrapped++; 942178261Smarcel 943178261Smarcel /* 944178261Smarcel * Set the start and end of kva. 945178261Smarcel */ 946178261Smarcel virtual_avail = VM_MIN_KERNEL_ADDRESS; 947204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 948178261Smarcel 949178261Smarcel /* 950178261Smarcel * Allocate a kernel stack with a guard page for thread0 and map it 951178261Smarcel * into the kernel page map. 952178261Smarcel */ 953178261Smarcel pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 954178261Smarcel va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 955178261Smarcel virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 956178261Smarcel CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 957178261Smarcel thread0.td_kstack = va; 958178261Smarcel thread0.td_kstack_pages = KSTACK_PAGES; 959178261Smarcel for (i = 0; i < KSTACK_PAGES; i++) { 960201758Smbr moea_kenter(mmup, va, pa); 961178261Smarcel pa += PAGE_SIZE; 962178261Smarcel va += PAGE_SIZE; 963178261Smarcel } 964178261Smarcel 965178261Smarcel /* 966178261Smarcel * Allocate virtual address space for the message buffer. 967178261Smarcel */ 968217688Spluknet pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 969178261Smarcel msgbufp = (struct msgbuf *)virtual_avail; 970178261Smarcel va = virtual_avail; 971217688Spluknet virtual_avail += round_page(msgbufsize); 972178261Smarcel while (va < virtual_avail) { 973201758Smbr moea_kenter(mmup, va, pa); 974178261Smarcel pa += PAGE_SIZE; 975178261Smarcel va += PAGE_SIZE; 976178261Smarcel } 977194784Sjeff 978194784Sjeff /* 979194784Sjeff * Allocate virtual address space for the dynamic percpu area. 980194784Sjeff */ 981194784Sjeff pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 982194784Sjeff dpcpu = (void *)virtual_avail; 983194784Sjeff va = virtual_avail; 984194784Sjeff virtual_avail += DPCPU_SIZE; 985194784Sjeff while (va < virtual_avail) { 986201758Smbr moea_kenter(mmup, va, pa); 987194784Sjeff pa += PAGE_SIZE; 988194784Sjeff va += PAGE_SIZE; 989194784Sjeff } 990194784Sjeff dpcpu_init(dpcpu, 0); 99177957Sbenno} 99277957Sbenno 99377957Sbenno/* 99490643Sbenno * Activate a user pmap. The pmap must be activated before it's address 99590643Sbenno * space can be accessed in any way. 99677957Sbenno */ 99777957Sbennovoid 998152180Sgrehanmoea_activate(mmu_t mmu, struct thread *td) 99977957Sbenno{ 100096250Sbenno pmap_t pm, pmr; 100177957Sbenno 100277957Sbenno /* 1003103604Sgrehan * Load all the data we need up front to encourage the compiler to 100490643Sbenno * not issue any loads while we have interrupts disabled below. 100577957Sbenno */ 100690643Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 1007183290Snwhitehorn pmr = pm->pmap_phys; 100877957Sbenno 1009223758Sattilio CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 101096250Sbenno PCPU_SET(curpmap, pmr); 101177957Sbenno} 101277957Sbenno 101391483Sbennovoid 1014152180Sgrehanmoea_deactivate(mmu_t mmu, struct thread *td) 101591483Sbenno{ 101691483Sbenno pmap_t pm; 101791483Sbenno 101891483Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 1019223758Sattilio CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 102096250Sbenno PCPU_SET(curpmap, NULL); 102191483Sbenno} 102291483Sbenno 102377957Sbennovoid 1024152180Sgrehanmoea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 102577957Sbenno{ 102696353Sbenno struct pvo_entry *pvo; 102796353Sbenno 1028134329Salc PMAP_LOCK(pm); 1029152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 103096353Sbenno 103196353Sbenno if (pvo != NULL) { 103296353Sbenno if (wired) { 103396353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 103496353Sbenno pm->pm_stats.wired_count++; 103596353Sbenno pvo->pvo_vaddr |= PVO_WIRED; 103696353Sbenno } else { 103796353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 103896353Sbenno pm->pm_stats.wired_count--; 103996353Sbenno pvo->pvo_vaddr &= ~PVO_WIRED; 104096353Sbenno } 104196353Sbenno } 1042134329Salc PMAP_UNLOCK(pm); 104377957Sbenno} 104477957Sbenno 104577957Sbennovoid 1046152180Sgrehanmoea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 104777957Sbenno{ 104897385Sbenno vm_offset_t dst; 104997385Sbenno vm_offset_t src; 105097385Sbenno 105197385Sbenno dst = VM_PAGE_TO_PHYS(mdst); 105297385Sbenno src = VM_PAGE_TO_PHYS(msrc); 105397385Sbenno 1054234156Snwhitehorn bcopy((void *)src, (void *)dst, PAGE_SIZE); 105577957Sbenno} 105677957Sbenno 1057248280Skibvoid 1058248280Skibmoea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1059248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1060248280Skib{ 1061248280Skib void *a_cp, *b_cp; 1062248280Skib vm_offset_t a_pg_offset, b_pg_offset; 1063248280Skib int cnt; 1064248280Skib 1065248280Skib while (xfersize > 0) { 1066248280Skib a_pg_offset = a_offset & PAGE_MASK; 1067248280Skib cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1068248280Skib a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1069248280Skib a_pg_offset; 1070248280Skib b_pg_offset = b_offset & PAGE_MASK; 1071248280Skib cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1072248280Skib b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1073248280Skib b_pg_offset; 1074248280Skib bcopy(a_cp, b_cp, cnt); 1075248280Skib a_offset += cnt; 1076248280Skib b_offset += cnt; 1077248280Skib xfersize -= cnt; 1078248280Skib } 1079248280Skib} 1080248280Skib 108177957Sbenno/* 108290643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb. 108377957Sbenno */ 108477957Sbennovoid 1085152180Sgrehanmoea_zero_page(mmu_t mmu, vm_page_t m) 108677957Sbenno{ 108794777Speter vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1088178265Smarcel void *va = (void *)pa; 108977957Sbenno 109090643Sbenno bzero(va, PAGE_SIZE); 109177957Sbenno} 109277957Sbenno 109377957Sbennovoid 1094152180Sgrehanmoea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 109577957Sbenno{ 109699666Sbenno vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1097178265Smarcel void *va = (void *)(pa + off); 109899666Sbenno 1099178265Smarcel bzero(va, size); 110077957Sbenno} 110177957Sbenno 110299571Spetervoid 1103152180Sgrehanmoea_zero_page_idle(mmu_t mmu, vm_page_t m) 110499571Speter{ 1105178265Smarcel vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1106178265Smarcel void *va = (void *)pa; 110799571Speter 1108178265Smarcel bzero(va, PAGE_SIZE); 110999571Speter} 111099571Speter 111177957Sbenno/* 111290643Sbenno * Map the given physical page at the specified virtual address in the 111390643Sbenno * target pmap with the protection requested. If specified the page 111490643Sbenno * will be wired down. 111577957Sbenno */ 111677957Sbennovoid 1117152180Sgrehanmoea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 111890643Sbenno boolean_t wired) 111977957Sbenno{ 1120159303Salc 1121238159Salc rw_wlock(&pvh_global_lock); 1122159303Salc PMAP_LOCK(pmap); 1123159324Salc moea_enter_locked(pmap, va, m, prot, wired); 1124238159Salc rw_wunlock(&pvh_global_lock); 1125159303Salc PMAP_UNLOCK(pmap); 1126159303Salc} 1127159303Salc 1128159303Salc/* 1129159303Salc * Map the given physical page at the specified virtual address in the 1130159303Salc * target pmap with the protection requested. If specified the page 1131159303Salc * will be wired down. 1132159303Salc * 1133159303Salc * The page queues and pmap must be locked. 1134159303Salc */ 1135159303Salcstatic void 1136159303Salcmoea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1137159303Salc boolean_t wired) 1138159303Salc{ 113990643Sbenno struct pvo_head *pvo_head; 114092847Sjeff uma_zone_t zone; 114196250Sbenno vm_page_t pg; 1142233949Snwhitehorn u_int pte_lo, pvo_flags; 114390643Sbenno int error; 114477957Sbenno 1145152180Sgrehan if (!moea_initialized) { 1146152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1147152180Sgrehan zone = moea_upvo_zone; 114890643Sbenno pvo_flags = 0; 114996250Sbenno pg = NULL; 115090643Sbenno } else { 1151110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1152110172Sgrehan pg = m; 1153152180Sgrehan zone = moea_mpvo_zone; 115490643Sbenno pvo_flags = PVO_MANAGED; 115590643Sbenno } 1156134535Salc if (pmap_bootstrapped) 1157238159Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 1158159303Salc PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1159254138Sattilio if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1160250747Salc VM_OBJECT_ASSERT_LOCKED(m->object); 116177957Sbenno 1162142416Sgrehan /* XXX change the pvo head for fake pages */ 1163224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1164189675Snwhitehorn pvo_flags &= ~PVO_MANAGED; 1165152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1166189675Snwhitehorn zone = moea_upvo_zone; 1167189675Snwhitehorn } 1168142416Sgrehan 1169213335Snwhitehorn pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 117077957Sbenno 1171164229Salc if (prot & VM_PROT_WRITE) { 117290643Sbenno pte_lo |= PTE_BW; 1173208810Salc if (pmap_bootstrapped && 1174224746Skib (m->oflags & VPO_UNMANAGED) == 0) 1175225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1176164229Salc } else 117790643Sbenno pte_lo |= PTE_BR; 117877957Sbenno 1179142416Sgrehan if (prot & VM_PROT_EXECUTE) 1180142416Sgrehan pvo_flags |= PVO_EXECUTABLE; 118177957Sbenno 118290643Sbenno if (wired) 118390643Sbenno pvo_flags |= PVO_WIRED; 118477957Sbenno 1185152180Sgrehan error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 118696250Sbenno pte_lo, pvo_flags); 118790643Sbenno 118896250Sbenno /* 1189233949Snwhitehorn * Flush the real page from the instruction cache. This has be done 1190233949Snwhitehorn * for all user mappings to prevent information leakage via the 1191234149Snwhitehorn * instruction cache. moea_pvo_enter() returns ENOENT for the first 1192234149Snwhitehorn * mapping for a page. 119396250Sbenno */ 1194234149Snwhitehorn if (pmap != kernel_pmap && error == ENOENT && 1195234149Snwhitehorn (pte_lo & (PTE_I | PTE_G)) == 0) 1196152180Sgrehan moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 119777957Sbenno} 119877957Sbenno 1199159303Salc/* 1200159303Salc * Maps a sequence of resident pages belonging to the same object. 1201159303Salc * The sequence begins with the given page m_start. This page is 1202159303Salc * mapped at the given virtual address start. Each subsequent page is 1203159303Salc * mapped at a virtual address that is offset from start by the same 1204159303Salc * amount as the page is offset from m_start within the object. The 1205159303Salc * last page in the sequence is the page with the largest offset from 1206159303Salc * m_start that can be mapped at a virtual address less than the given 1207159303Salc * virtual address end. Not every virtual page between start and end 1208159303Salc * is mapped; only those for which a resident page exists with the 1209159303Salc * corresponding offset from m_start are mapped. 1210159303Salc */ 1211159303Salcvoid 1212159303Salcmoea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1213159303Salc vm_page_t m_start, vm_prot_t prot) 1214159303Salc{ 1215159303Salc vm_page_t m; 1216159303Salc vm_pindex_t diff, psize; 1217159303Salc 1218250884Sattilio VM_OBJECT_ASSERT_LOCKED(m_start->object); 1219250884Sattilio 1220159303Salc psize = atop(end - start); 1221159303Salc m = m_start; 1222238159Salc rw_wlock(&pvh_global_lock); 1223159303Salc PMAP_LOCK(pm); 1224159303Salc while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1225159303Salc moea_enter_locked(pm, start + ptoa(diff), m, prot & 1226159303Salc (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1227159303Salc m = TAILQ_NEXT(m, listq); 1228159303Salc } 1229238159Salc rw_wunlock(&pvh_global_lock); 1230159303Salc PMAP_UNLOCK(pm); 1231159303Salc} 1232159303Salc 1233159627Supsvoid 1234152180Sgrehanmoea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1235159627Sups vm_prot_t prot) 1236117045Salc{ 1237117045Salc 1238238159Salc rw_wlock(&pvh_global_lock); 1239159303Salc PMAP_LOCK(pm); 1240159303Salc moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1241152180Sgrehan FALSE); 1242238159Salc rw_wunlock(&pvh_global_lock); 1243159303Salc PMAP_UNLOCK(pm); 1244117045Salc} 1245117045Salc 1246131658Salcvm_paddr_t 1247152180Sgrehanmoea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 124877957Sbenno{ 124996353Sbenno struct pvo_entry *pvo; 1250134329Salc vm_paddr_t pa; 125196353Sbenno 1252134329Salc PMAP_LOCK(pm); 1253152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1254134329Salc if (pvo == NULL) 1255134329Salc pa = 0; 1256134329Salc else 1257183290Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1258134329Salc PMAP_UNLOCK(pm); 1259134329Salc return (pa); 126077957Sbenno} 126177957Sbenno 126277957Sbenno/* 1263120336Sgrehan * Atomically extract and hold the physical page with the given 1264120336Sgrehan * pmap and virtual address pair if that mapping permits the given 1265120336Sgrehan * protection. 1266120336Sgrehan */ 1267120336Sgrehanvm_page_t 1268152180Sgrehanmoea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1269120336Sgrehan{ 1270132666Salc struct pvo_entry *pvo; 1271120336Sgrehan vm_page_t m; 1272207410Skmacy vm_paddr_t pa; 1273207410Skmacy 1274120336Sgrehan m = NULL; 1275207410Skmacy pa = 0; 1276134329Salc PMAP_LOCK(pmap); 1277207410Skmacyretry: 1278152180Sgrehan pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1279183290Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1280183290Snwhitehorn ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1281132666Salc (prot & VM_PROT_WRITE) == 0)) { 1282207410Skmacy if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1283207410Skmacy goto retry; 1284183290Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1285120336Sgrehan vm_page_hold(m); 1286120336Sgrehan } 1287207410Skmacy PA_UNLOCK_COND(pa); 1288134329Salc PMAP_UNLOCK(pmap); 1289120336Sgrehan return (m); 1290120336Sgrehan} 1291120336Sgrehan 129290643Sbennovoid 1293152180Sgrehanmoea_init(mmu_t mmu) 129477957Sbenno{ 129577957Sbenno 1296152180Sgrehan moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1297125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1298125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1299152180Sgrehan moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1300125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1301125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1302152180Sgrehan moea_initialized = TRUE; 130377957Sbenno} 130477957Sbenno 130590643Sbennoboolean_t 1306207155Salcmoea_is_referenced(mmu_t mmu, vm_page_t m) 1307207155Salc{ 1308238357Salc boolean_t rv; 1309207155Salc 1310224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1311208574Salc ("moea_is_referenced: page %p is not managed", m)); 1312238357Salc rw_wlock(&pvh_global_lock); 1313238357Salc rv = moea_query_bit(m, PTE_REF); 1314238357Salc rw_wunlock(&pvh_global_lock); 1315238357Salc return (rv); 1316207155Salc} 1317207155Salc 1318207155Salcboolean_t 1319152180Sgrehanmoea_is_modified(mmu_t mmu, vm_page_t m) 132090643Sbenno{ 1321238357Salc boolean_t rv; 132296353Sbenno 1323224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1324208504Salc ("moea_is_modified: page %p is not managed", m)); 1325208504Salc 1326208504Salc /* 1327254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1328225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1329208504Salc * is clear, no PTEs can have PTE_CHG set. 1330208504Salc */ 1331248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1332254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 133396353Sbenno return (FALSE); 1334238357Salc rw_wlock(&pvh_global_lock); 1335238357Salc rv = moea_query_bit(m, PTE_CHG); 1336238357Salc rw_wunlock(&pvh_global_lock); 1337238357Salc return (rv); 133890643Sbenno} 133990643Sbenno 1340214617Salcboolean_t 1341214617Salcmoea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1342214617Salc{ 1343214617Salc struct pvo_entry *pvo; 1344214617Salc boolean_t rv; 1345214617Salc 1346214617Salc PMAP_LOCK(pmap); 1347214617Salc pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1348214617Salc rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1349214617Salc PMAP_UNLOCK(pmap); 1350214617Salc return (rv); 1351214617Salc} 1352214617Salc 135390643Sbennovoid 1354152180Sgrehanmoea_clear_modify(mmu_t mmu, vm_page_t m) 1355110172Sgrehan{ 1356110172Sgrehan 1357224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1358208504Salc ("moea_clear_modify: page %p is not managed", m)); 1359248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1360254138Sattilio KASSERT(!vm_page_xbusied(m), 1361254138Sattilio ("moea_clear_modify: page %p is exclusive busy", m)); 1362208504Salc 1363208504Salc /* 1364225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG 1365208504Salc * set. If the object containing the page is locked and the page is 1366254138Sattilio * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1367208504Salc */ 1368225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 1369110172Sgrehan return; 1370238357Salc rw_wlock(&pvh_global_lock); 1371208990Salc moea_clear_bit(m, PTE_CHG); 1372238357Salc rw_wunlock(&pvh_global_lock); 1373110172Sgrehan} 1374110172Sgrehan 137591403Ssilby/* 1376160889Salc * Clear the write and modified bits in each of the given page's mappings. 1377160889Salc */ 1378160889Salcvoid 1379160889Salcmoea_remove_write(mmu_t mmu, vm_page_t m) 1380160889Salc{ 1381160889Salc struct pvo_entry *pvo; 1382160889Salc struct pte *pt; 1383160889Salc pmap_t pmap; 1384160889Salc u_int lo; 1385160889Salc 1386224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1387208175Salc ("moea_remove_write: page %p is not managed", m)); 1388208175Salc 1389208175Salc /* 1390254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1391254138Sattilio * set by another thread while the object is locked. Thus, 1392254138Sattilio * if PGA_WRITEABLE is clear, no page table entries need updating. 1393208175Salc */ 1394248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1395254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1396160889Salc return; 1397238159Salc rw_wlock(&pvh_global_lock); 1398160889Salc lo = moea_attr_fetch(m); 1399183094Smarcel powerpc_sync(); 1400160889Salc LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1401160889Salc pmap = pvo->pvo_pmap; 1402160889Salc PMAP_LOCK(pmap); 1403183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1404160889Salc pt = moea_pvo_to_pte(pvo, -1); 1405183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1406183290Snwhitehorn pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1407160889Salc if (pt != NULL) { 1408183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 1409183290Snwhitehorn lo |= pvo->pvo_pte.pte.pte_lo; 1410183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1411183290Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, 1412160889Salc pvo->pvo_vaddr); 1413160889Salc mtx_unlock(&moea_table_mutex); 1414160889Salc } 1415160889Salc } 1416160889Salc PMAP_UNLOCK(pmap); 1417160889Salc } 1418160889Salc if ((lo & PTE_CHG) != 0) { 1419160889Salc moea_attr_clear(m, PTE_CHG); 1420160889Salc vm_page_dirty(m); 1421160889Salc } 1422225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1423238159Salc rw_wunlock(&pvh_global_lock); 1424160889Salc} 1425160889Salc 1426160889Salc/* 1427152180Sgrehan * moea_ts_referenced: 142891403Ssilby * 142991403Ssilby * Return a count of reference bits for a page, clearing those bits. 143091403Ssilby * It is not necessary for every reference bit to be cleared, but it 143191403Ssilby * is necessary that 0 only be returned when there are truly no 143291403Ssilby * reference bits set. 143391403Ssilby * 143491403Ssilby * XXX: The exact number of bits to check and clear is a matter that 143591403Ssilby * should be tested and standardized at some point in the future for 143691403Ssilby * optimal aging of shared pages. 143791403Ssilby */ 1438238357Salcint 1439152180Sgrehanmoea_ts_referenced(mmu_t mmu, vm_page_t m) 144090643Sbenno{ 1441238357Salc int count; 1442110172Sgrehan 1443224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1444208990Salc ("moea_ts_referenced: page %p is not managed", m)); 1445238357Salc rw_wlock(&pvh_global_lock); 1446238357Salc count = moea_clear_bit(m, PTE_REF); 1447238357Salc rw_wunlock(&pvh_global_lock); 1448238357Salc return (count); 144990643Sbenno} 145090643Sbenno 145177957Sbenno/* 1452213307Snwhitehorn * Modify the WIMG settings of all mappings for a page. 1453213307Snwhitehorn */ 1454213307Snwhitehornvoid 1455213307Snwhitehornmoea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1456213307Snwhitehorn{ 1457213307Snwhitehorn struct pvo_entry *pvo; 1458213335Snwhitehorn struct pvo_head *pvo_head; 1459213307Snwhitehorn struct pte *pt; 1460213307Snwhitehorn pmap_t pmap; 1461213307Snwhitehorn u_int lo; 1462213307Snwhitehorn 1463224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1464213335Snwhitehorn m->md.mdpg_cache_attrs = ma; 1465213335Snwhitehorn return; 1466213335Snwhitehorn } 1467213335Snwhitehorn 1468238159Salc rw_wlock(&pvh_global_lock); 1469213335Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1470213307Snwhitehorn lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1471213335Snwhitehorn 1472213335Snwhitehorn LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1473213307Snwhitehorn pmap = pvo->pvo_pmap; 1474213307Snwhitehorn PMAP_LOCK(pmap); 1475213307Snwhitehorn pt = moea_pvo_to_pte(pvo, -1); 1476213307Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1477213307Snwhitehorn pvo->pvo_pte.pte.pte_lo |= lo; 1478213307Snwhitehorn if (pt != NULL) { 1479213307Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, 1480213307Snwhitehorn pvo->pvo_vaddr); 1481213307Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1482213307Snwhitehorn isync(); 1483213307Snwhitehorn } 1484213307Snwhitehorn mtx_unlock(&moea_table_mutex); 1485213307Snwhitehorn PMAP_UNLOCK(pmap); 1486213307Snwhitehorn } 1487213307Snwhitehorn m->md.mdpg_cache_attrs = ma; 1488238159Salc rw_wunlock(&pvh_global_lock); 1489213307Snwhitehorn} 1490213307Snwhitehorn 1491213307Snwhitehorn/* 149290643Sbenno * Map a wired page into kernel virtual address space. 149377957Sbenno */ 149477957Sbennovoid 1495235936Srajmoea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 149677957Sbenno{ 1497213307Snwhitehorn 1498213307Snwhitehorn moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1499213307Snwhitehorn} 1500213307Snwhitehorn 1501213307Snwhitehornvoid 1502213307Snwhitehornmoea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1503213307Snwhitehorn{ 150490643Sbenno u_int pte_lo; 150590643Sbenno int error; 150677957Sbenno 150790643Sbenno#if 0 150890643Sbenno if (va < VM_MIN_KERNEL_ADDRESS) 1509152180Sgrehan panic("moea_kenter: attempt to enter non-kernel address %#x", 151090643Sbenno va); 151190643Sbenno#endif 151277957Sbenno 1513213307Snwhitehorn pte_lo = moea_calc_wimg(pa, ma); 151477957Sbenno 1515135172Salc PMAP_LOCK(kernel_pmap); 1516152180Sgrehan error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1517152180Sgrehan &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 151890643Sbenno 151990643Sbenno if (error != 0 && error != ENOENT) 1520152180Sgrehan panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 152190643Sbenno pa, error); 152290643Sbenno 1523135172Salc PMAP_UNLOCK(kernel_pmap); 152477957Sbenno} 152577957Sbenno 152694838Sbenno/* 152794838Sbenno * Extract the physical page address associated with the given kernel virtual 152894838Sbenno * address. 152994838Sbenno */ 1530235936Srajvm_paddr_t 1531152180Sgrehanmoea_kextract(mmu_t mmu, vm_offset_t va) 153277957Sbenno{ 153394838Sbenno struct pvo_entry *pvo; 1534134329Salc vm_paddr_t pa; 153594838Sbenno 1536125185Sgrehan /* 1537183290Snwhitehorn * Allow direct mappings on 32-bit OEA 1538125185Sgrehan */ 1539125185Sgrehan if (va < VM_MIN_KERNEL_ADDRESS) { 1540125185Sgrehan return (va); 1541125185Sgrehan } 1542125185Sgrehan 1543134329Salc PMAP_LOCK(kernel_pmap); 1544152180Sgrehan pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1545152180Sgrehan KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1546183290Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1547134329Salc PMAP_UNLOCK(kernel_pmap); 1548134329Salc return (pa); 154977957Sbenno} 155077957Sbenno 155191456Sbenno/* 155291456Sbenno * Remove a wired page from kernel virtual address space. 155391456Sbenno */ 155477957Sbennovoid 1555152180Sgrehanmoea_kremove(mmu_t mmu, vm_offset_t va) 155677957Sbenno{ 155791456Sbenno 1558152180Sgrehan moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 155977957Sbenno} 156077957Sbenno 156177957Sbenno/* 156290643Sbenno * Map a range of physical addresses into kernel virtual address space. 156390643Sbenno * 156490643Sbenno * The value passed in *virt is a suggested virtual address for the mapping. 156590643Sbenno * Architectures which can support a direct-mapped physical to virtual region 156690643Sbenno * can return the appropriate address within that region, leaving '*virt' 156790643Sbenno * unchanged. We cannot and therefore do not; *virt is updated with the 156890643Sbenno * first usable address after the mapped region. 156977957Sbenno */ 157090643Sbennovm_offset_t 1571235936Srajmoea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1572235936Sraj vm_paddr_t pa_end, int prot) 157377957Sbenno{ 157490643Sbenno vm_offset_t sva, va; 157577957Sbenno 157690643Sbenno sva = *virt; 157790643Sbenno va = sva; 157890643Sbenno for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1579152180Sgrehan moea_kenter(mmu, va, pa_start); 158090643Sbenno *virt = va; 158190643Sbenno return (sva); 158277957Sbenno} 158377957Sbenno 158477957Sbenno/* 158591403Ssilby * Returns true if the pmap's pv is one of the first 158691403Ssilby * 16 pvs linked to from this page. This count may 158791403Ssilby * be changed upwards or downwards in the future; it 158891403Ssilby * is only necessary that true be returned for a small 158991403Ssilby * subset of pmaps for proper page aging. 159091403Ssilby */ 159190643Sbennoboolean_t 1592152180Sgrehanmoea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 159390643Sbenno{ 1594110172Sgrehan int loops; 1595110172Sgrehan struct pvo_entry *pvo; 1596208990Salc boolean_t rv; 1597110172Sgrehan 1598224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1599208990Salc ("moea_page_exists_quick: page %p is not managed", m)); 1600110172Sgrehan loops = 0; 1601208990Salc rv = FALSE; 1602238159Salc rw_wlock(&pvh_global_lock); 1603110172Sgrehan LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1604208990Salc if (pvo->pvo_pmap == pmap) { 1605208990Salc rv = TRUE; 1606208990Salc break; 1607208990Salc } 1608110172Sgrehan if (++loops >= 16) 1609110172Sgrehan break; 1610110172Sgrehan } 1611238159Salc rw_wunlock(&pvh_global_lock); 1612208990Salc return (rv); 161390643Sbenno} 161477957Sbenno 1615173708Salc/* 1616173708Salc * Return the number of managed mappings to the given physical page 1617173708Salc * that are wired. 1618173708Salc */ 1619173708Salcint 1620173708Salcmoea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1621173708Salc{ 1622173708Salc struct pvo_entry *pvo; 1623173708Salc int count; 1624173708Salc 1625173708Salc count = 0; 1626224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 1627173708Salc return (count); 1628238159Salc rw_wlock(&pvh_global_lock); 1629173708Salc LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1630173708Salc if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1631173708Salc count++; 1632238159Salc rw_wunlock(&pvh_global_lock); 1633173708Salc return (count); 1634173708Salc} 1635173708Salc 1636152180Sgrehanstatic u_int moea_vsidcontext; 163777957Sbenno 163890643Sbennovoid 1639152180Sgrehanmoea_pinit(mmu_t mmu, pmap_t pmap) 164090643Sbenno{ 164190643Sbenno int i, mask; 164290643Sbenno u_int entropy; 164377957Sbenno 1644152180Sgrehan KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1645235689Snwhitehorn RB_INIT(&pmap->pmap_pvo); 1646126478Sgrehan 164790643Sbenno entropy = 0; 164890643Sbenno __asm __volatile("mftb %0" : "=r"(entropy)); 164977957Sbenno 1650183290Snwhitehorn if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1651183290Snwhitehorn == NULL) { 1652183290Snwhitehorn pmap->pmap_phys = pmap; 1653183290Snwhitehorn } 1654183290Snwhitehorn 1655183290Snwhitehorn 1656212278Snwhitehorn mtx_lock(&moea_vsid_mutex); 165790643Sbenno /* 165890643Sbenno * Allocate some segment registers for this pmap. 165990643Sbenno */ 166090643Sbenno for (i = 0; i < NPMAPS; i += VSID_NBPW) { 166190643Sbenno u_int hash, n; 166277957Sbenno 166377957Sbenno /* 166490643Sbenno * Create a new value by mutiplying by a prime and adding in 166590643Sbenno * entropy from the timebase register. This is to make the 166690643Sbenno * VSID more random so that the PT hash function collides 166790643Sbenno * less often. (Note that the prime casues gcc to do shifts 166890643Sbenno * instead of a multiply.) 166977957Sbenno */ 1670152180Sgrehan moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1671152180Sgrehan hash = moea_vsidcontext & (NPMAPS - 1); 167290643Sbenno if (hash == 0) /* 0 is special, avoid it */ 167390643Sbenno continue; 167490643Sbenno n = hash >> 5; 167590643Sbenno mask = 1 << (hash & (VSID_NBPW - 1)); 1676152180Sgrehan hash = (moea_vsidcontext & 0xfffff); 1677152180Sgrehan if (moea_vsid_bitmap[n] & mask) { /* collision? */ 167890643Sbenno /* anything free in this bucket? */ 1679152180Sgrehan if (moea_vsid_bitmap[n] == 0xffffffff) { 1680152180Sgrehan entropy = (moea_vsidcontext >> 20); 168190643Sbenno continue; 168290643Sbenno } 1683212322Snwhitehorn i = ffs(~moea_vsid_bitmap[n]) - 1; 168490643Sbenno mask = 1 << i; 168590643Sbenno hash &= 0xfffff & ~(VSID_NBPW - 1); 168690643Sbenno hash |= i; 168777957Sbenno } 1688227627Snwhitehorn KASSERT(!(moea_vsid_bitmap[n] & mask), 1689227627Snwhitehorn ("Allocating in-use VSID group %#x\n", hash)); 1690152180Sgrehan moea_vsid_bitmap[n] |= mask; 169190643Sbenno for (i = 0; i < 16; i++) 169290643Sbenno pmap->pm_sr[i] = VSID_MAKE(i, hash); 1693212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 169490643Sbenno return; 169590643Sbenno } 169677957Sbenno 1697212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 1698152180Sgrehan panic("moea_pinit: out of segments"); 169977957Sbenno} 170077957Sbenno 170177957Sbenno/* 170290643Sbenno * Initialize the pmap associated with process 0. 170377957Sbenno */ 170477957Sbennovoid 1705152180Sgrehanmoea_pinit0(mmu_t mmu, pmap_t pm) 170677957Sbenno{ 170777957Sbenno 1708254667Skib PMAP_LOCK_INIT(pm); 1709152180Sgrehan moea_pinit(mmu, pm); 171090643Sbenno bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 171177957Sbenno} 171277957Sbenno 171394838Sbenno/* 171494838Sbenno * Set the physical protection on the specified range of this map as requested. 171594838Sbenno */ 171690643Sbennovoid 1717152180Sgrehanmoea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1718152180Sgrehan vm_prot_t prot) 171990643Sbenno{ 1720235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 172194838Sbenno struct pte *pt; 172294838Sbenno 172394838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1724152180Sgrehan ("moea_protect: non current pmap")); 172594838Sbenno 172694838Sbenno if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1727152180Sgrehan moea_remove(mmu, pm, sva, eva); 172894838Sbenno return; 172994838Sbenno } 173094838Sbenno 1731238159Salc rw_wlock(&pvh_global_lock); 1732134329Salc PMAP_LOCK(pm); 1733235689Snwhitehorn key.pvo_vaddr = sva; 1734235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1735235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1736235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 173794838Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 173894838Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 173994838Sbenno 174094838Sbenno /* 174194838Sbenno * Grab the PTE pointer before we diddle with the cached PTE 174294838Sbenno * copy. 174394838Sbenno */ 1744235689Snwhitehorn pt = moea_pvo_to_pte(pvo, -1); 174594838Sbenno /* 174694838Sbenno * Change the protection of the page. 174794838Sbenno */ 1748183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1749183290Snwhitehorn pvo->pvo_pte.pte.pte_lo |= PTE_BR; 175094838Sbenno 175194838Sbenno /* 175294838Sbenno * If the PVO is in the page table, update that pte as well. 175394838Sbenno */ 1754159928Salc if (pt != NULL) { 1755183290Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1756159928Salc mtx_unlock(&moea_table_mutex); 1757159928Salc } 175894838Sbenno } 1759238159Salc rw_wunlock(&pvh_global_lock); 1760134329Salc PMAP_UNLOCK(pm); 176177957Sbenno} 176277957Sbenno 176391456Sbenno/* 176491456Sbenno * Map a list of wired pages into kernel virtual address space. This is 176591456Sbenno * intended for temporary mappings which do not need page modification or 176691456Sbenno * references recorded. Existing mappings in the region are overwritten. 176791456Sbenno */ 176890643Sbennovoid 1769152180Sgrehanmoea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 177077957Sbenno{ 1771110172Sgrehan vm_offset_t va; 177277957Sbenno 1773110172Sgrehan va = sva; 1774110172Sgrehan while (count-- > 0) { 1775152180Sgrehan moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1776110172Sgrehan va += PAGE_SIZE; 1777110172Sgrehan m++; 1778110172Sgrehan } 177990643Sbenno} 178077957Sbenno 178191456Sbenno/* 178291456Sbenno * Remove page mappings from kernel virtual address space. Intended for 1783152180Sgrehan * temporary mappings entered by moea_qenter. 178491456Sbenno */ 178590643Sbennovoid 1786152180Sgrehanmoea_qremove(mmu_t mmu, vm_offset_t sva, int count) 178790643Sbenno{ 1788110172Sgrehan vm_offset_t va; 178991456Sbenno 1790110172Sgrehan va = sva; 1791110172Sgrehan while (count-- > 0) { 1792152180Sgrehan moea_kremove(mmu, va); 1793110172Sgrehan va += PAGE_SIZE; 1794110172Sgrehan } 179577957Sbenno} 179677957Sbenno 179790643Sbennovoid 1798152180Sgrehanmoea_release(mmu_t mmu, pmap_t pmap) 179990643Sbenno{ 1800103604Sgrehan int idx, mask; 1801103604Sgrehan 1802103604Sgrehan /* 1803103604Sgrehan * Free segment register's VSID 1804103604Sgrehan */ 1805103604Sgrehan if (pmap->pm_sr[0] == 0) 1806152180Sgrehan panic("moea_release"); 1807103604Sgrehan 1808212278Snwhitehorn mtx_lock(&moea_vsid_mutex); 1809103604Sgrehan idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1810103604Sgrehan mask = 1 << (idx % VSID_NBPW); 1811103604Sgrehan idx /= VSID_NBPW; 1812152180Sgrehan moea_vsid_bitmap[idx] &= ~mask; 1813212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 181477957Sbenno} 181577957Sbenno 181691456Sbenno/* 181791456Sbenno * Remove the given range of addresses from the specified map. 181891456Sbenno */ 181990643Sbennovoid 1820152180Sgrehanmoea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 182177957Sbenno{ 1822235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 182391456Sbenno 1824238159Salc rw_wlock(&pvh_global_lock); 1825134329Salc PMAP_LOCK(pm); 1826235689Snwhitehorn key.pvo_vaddr = sva; 1827235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1828235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1829235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1830235689Snwhitehorn moea_pvo_remove(pvo, -1); 183191456Sbenno } 1832140538Sgrehan PMAP_UNLOCK(pm); 1833238159Salc rw_wunlock(&pvh_global_lock); 183477957Sbenno} 183577957Sbenno 183694838Sbenno/* 1837152180Sgrehan * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1838110172Sgrehan * will reflect changes in pte's back to the vm_page. 1839110172Sgrehan */ 1840110172Sgrehanvoid 1841152180Sgrehanmoea_remove_all(mmu_t mmu, vm_page_t m) 1842110172Sgrehan{ 1843110172Sgrehan struct pvo_head *pvo_head; 1844110172Sgrehan struct pvo_entry *pvo, *next_pvo; 1845134329Salc pmap_t pmap; 1846110172Sgrehan 1847238159Salc rw_wlock(&pvh_global_lock); 1848110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1849110172Sgrehan for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1850110172Sgrehan next_pvo = LIST_NEXT(pvo, pvo_vlink); 1851133166Sgrehan 1852134329Salc pmap = pvo->pvo_pmap; 1853134329Salc PMAP_LOCK(pmap); 1854152180Sgrehan moea_pvo_remove(pvo, -1); 1855134329Salc PMAP_UNLOCK(pmap); 1856110172Sgrehan } 1857238357Salc if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { 1858208847Snwhitehorn moea_attr_clear(m, PTE_CHG); 1859204042Snwhitehorn vm_page_dirty(m); 1860204042Snwhitehorn } 1861225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1862238159Salc rw_wunlock(&pvh_global_lock); 1863110172Sgrehan} 1864110172Sgrehan 1865110172Sgrehan/* 186690643Sbenno * Allocate a physical page of memory directly from the phys_avail map. 1867152180Sgrehan * Can only be called from moea_bootstrap before avail start and end are 186890643Sbenno * calculated. 186983682Smp */ 187090643Sbennostatic vm_offset_t 1871152180Sgrehanmoea_bootstrap_alloc(vm_size_t size, u_int align) 187283682Smp{ 187390643Sbenno vm_offset_t s, e; 187490643Sbenno int i, j; 187583682Smp 187690643Sbenno size = round_page(size); 187790643Sbenno for (i = 0; phys_avail[i + 1] != 0; i += 2) { 187890643Sbenno if (align != 0) 187990643Sbenno s = (phys_avail[i] + align - 1) & ~(align - 1); 188090643Sbenno else 188190643Sbenno s = phys_avail[i]; 188290643Sbenno e = s + size; 188390643Sbenno 188490643Sbenno if (s < phys_avail[i] || e > phys_avail[i + 1]) 188590643Sbenno continue; 188690643Sbenno 188790643Sbenno if (s == phys_avail[i]) { 188890643Sbenno phys_avail[i] += size; 188990643Sbenno } else if (e == phys_avail[i + 1]) { 189090643Sbenno phys_avail[i + 1] -= size; 189190643Sbenno } else { 189290643Sbenno for (j = phys_avail_count * 2; j > i; j -= 2) { 189390643Sbenno phys_avail[j] = phys_avail[j - 2]; 189490643Sbenno phys_avail[j + 1] = phys_avail[j - 1]; 189590643Sbenno } 189690643Sbenno 189790643Sbenno phys_avail[i + 3] = phys_avail[i + 1]; 189890643Sbenno phys_avail[i + 1] = s; 189990643Sbenno phys_avail[i + 2] = e; 190090643Sbenno phys_avail_count++; 190190643Sbenno } 190290643Sbenno 190390643Sbenno return (s); 190483682Smp } 1905152180Sgrehan panic("moea_bootstrap_alloc: could not allocate memory"); 190683682Smp} 190783682Smp 190890643Sbennostatic void 1909152180Sgrehanmoea_syncicache(vm_offset_t pa, vm_size_t len) 191077957Sbenno{ 191190643Sbenno __syncicache((void *)pa, len); 191290643Sbenno} 191377957Sbenno 191490643Sbennostatic int 1915152180Sgrehanmoea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 191690643Sbenno vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 191777957Sbenno{ 191890643Sbenno struct pvo_entry *pvo; 191990643Sbenno u_int sr; 192090643Sbenno int first; 192190643Sbenno u_int ptegidx; 192290643Sbenno int i; 1923103604Sgrehan int bootstrap; 192477957Sbenno 1925152180Sgrehan moea_pvo_enter_calls++; 192696250Sbenno first = 0; 1927103604Sgrehan bootstrap = 0; 192890643Sbenno 192990643Sbenno /* 193090643Sbenno * Compute the PTE Group index. 193190643Sbenno */ 193290643Sbenno va &= ~ADDR_POFF; 193390643Sbenno sr = va_to_sr(pm->pm_sr, va); 193490643Sbenno ptegidx = va_to_pteg(sr, va); 193590643Sbenno 193690643Sbenno /* 193790643Sbenno * Remove any existing mapping for this page. Reuse the pvo entry if 193890643Sbenno * there is a mapping. 193990643Sbenno */ 1940152180Sgrehan mtx_lock(&moea_table_mutex); 1941152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 194290643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1943183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1944183290Snwhitehorn (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 194596334Sbenno (pte_lo & PTE_PP)) { 1946152180Sgrehan mtx_unlock(&moea_table_mutex); 194792521Sbenno return (0); 194896334Sbenno } 1949152180Sgrehan moea_pvo_remove(pvo, -1); 195090643Sbenno break; 195190643Sbenno } 195290643Sbenno } 195390643Sbenno 195490643Sbenno /* 195590643Sbenno * If we aren't overwriting a mapping, try to allocate. 195690643Sbenno */ 1957152180Sgrehan if (moea_initialized) { 195892847Sjeff pvo = uma_zalloc(zone, M_NOWAIT); 195992521Sbenno } else { 1960152180Sgrehan if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1961152180Sgrehan panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1962152180Sgrehan moea_bpvo_pool_index, BPVO_POOL_SIZE, 196399037Sbenno BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 196492521Sbenno } 1965152180Sgrehan pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1966152180Sgrehan moea_bpvo_pool_index++; 1967103604Sgrehan bootstrap = 1; 196892521Sbenno } 196990643Sbenno 197090643Sbenno if (pvo == NULL) { 1971152180Sgrehan mtx_unlock(&moea_table_mutex); 197290643Sbenno return (ENOMEM); 197390643Sbenno } 197490643Sbenno 1975152180Sgrehan moea_pvo_entries++; 197690643Sbenno pvo->pvo_vaddr = va; 197790643Sbenno pvo->pvo_pmap = pm; 1978152180Sgrehan LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 197990643Sbenno pvo->pvo_vaddr &= ~ADDR_POFF; 198090643Sbenno if (flags & VM_PROT_EXECUTE) 198190643Sbenno pvo->pvo_vaddr |= PVO_EXECUTABLE; 198290643Sbenno if (flags & PVO_WIRED) 198390643Sbenno pvo->pvo_vaddr |= PVO_WIRED; 1984152180Sgrehan if (pvo_head != &moea_pvo_kunmanaged) 198590643Sbenno pvo->pvo_vaddr |= PVO_MANAGED; 1986103604Sgrehan if (bootstrap) 1987103604Sgrehan pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1988142416Sgrehan 1989183290Snwhitehorn moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 199090643Sbenno 199190643Sbenno /* 1992228412Snwhitehorn * Add to pmap list 1993228412Snwhitehorn */ 1994235689Snwhitehorn RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 1995228412Snwhitehorn 1996228412Snwhitehorn /* 199790643Sbenno * Remember if the list was empty and therefore will be the first 199890643Sbenno * item. 199990643Sbenno */ 200096250Sbenno if (LIST_FIRST(pvo_head) == NULL) 200196250Sbenno first = 1; 2002142416Sgrehan LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 200390643Sbenno 2004183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2005134453Salc pm->pm_stats.wired_count++; 2006134453Salc pm->pm_stats.resident_count++; 200790643Sbenno 2008183290Snwhitehorn i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2009253976Sjhibbits KASSERT(i < 8, ("Invalid PTE index")); 201090643Sbenno if (i >= 0) { 201190643Sbenno PVO_PTEGIDX_SET(pvo, i); 201290643Sbenno } else { 2013152180Sgrehan panic("moea_pvo_enter: overflow"); 2014152180Sgrehan moea_pte_overflow++; 201590643Sbenno } 2016152180Sgrehan mtx_unlock(&moea_table_mutex); 201790643Sbenno 201890643Sbenno return (first ? ENOENT : 0); 201977957Sbenno} 202077957Sbenno 202190643Sbennostatic void 2022152180Sgrehanmoea_pvo_remove(struct pvo_entry *pvo, int pteidx) 202377957Sbenno{ 202490643Sbenno struct pte *pt; 202577957Sbenno 202690643Sbenno /* 202790643Sbenno * If there is an active pte entry, we need to deactivate it (and 202890643Sbenno * save the ref & cfg bits). 202990643Sbenno */ 2030152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 203190643Sbenno if (pt != NULL) { 2032183290Snwhitehorn moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 2033159928Salc mtx_unlock(&moea_table_mutex); 203490643Sbenno PVO_PTEGIDX_CLR(pvo); 203590643Sbenno } else { 2036152180Sgrehan moea_pte_overflow--; 2037142416Sgrehan } 203890643Sbenno 203990643Sbenno /* 204090643Sbenno * Update our statistics. 204190643Sbenno */ 204290643Sbenno pvo->pvo_pmap->pm_stats.resident_count--; 2043183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 204490643Sbenno pvo->pvo_pmap->pm_stats.wired_count--; 204590643Sbenno 204690643Sbenno /* 204790643Sbenno * Save the REF/CHG bits into their cache if the page is managed. 204890643Sbenno */ 2049224746Skib if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 205090643Sbenno struct vm_page *pg; 205190643Sbenno 2052183290Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 205390643Sbenno if (pg != NULL) { 2054183290Snwhitehorn moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 205590643Sbenno (PTE_REF | PTE_CHG)); 205690643Sbenno } 205790643Sbenno } 205890643Sbenno 205990643Sbenno /* 2060228412Snwhitehorn * Remove this PVO from the PV and pmap lists. 206190643Sbenno */ 206290643Sbenno LIST_REMOVE(pvo, pvo_vlink); 2063235689Snwhitehorn RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 206490643Sbenno 206590643Sbenno /* 206690643Sbenno * Remove this from the overflow list and return it to the pool 206790643Sbenno * if we aren't going to reuse it. 206890643Sbenno */ 206990643Sbenno LIST_REMOVE(pvo, pvo_olink); 207092521Sbenno if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2071152180Sgrehan uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2072152180Sgrehan moea_upvo_zone, pvo); 2073152180Sgrehan moea_pvo_entries--; 2074152180Sgrehan moea_pvo_remove_calls++; 207577957Sbenno} 207677957Sbenno 207790643Sbennostatic __inline int 2078152180Sgrehanmoea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 207977957Sbenno{ 208090643Sbenno int pteidx; 208177957Sbenno 208290643Sbenno /* 208390643Sbenno * We can find the actual pte entry without searching by grabbing 208490643Sbenno * the PTEG index from 3 unused bits in pte_lo[11:9] and by 208590643Sbenno * noticing the HID bit. 208690643Sbenno */ 208790643Sbenno pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2088183290Snwhitehorn if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2089152180Sgrehan pteidx ^= moea_pteg_mask * 8; 209090643Sbenno 209190643Sbenno return (pteidx); 209277957Sbenno} 209377957Sbenno 209490643Sbennostatic struct pvo_entry * 2095152180Sgrehanmoea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 209677957Sbenno{ 209790643Sbenno struct pvo_entry *pvo; 209890643Sbenno int ptegidx; 209990643Sbenno u_int sr; 210077957Sbenno 210190643Sbenno va &= ~ADDR_POFF; 210290643Sbenno sr = va_to_sr(pm->pm_sr, va); 210390643Sbenno ptegidx = va_to_pteg(sr, va); 210490643Sbenno 2105152180Sgrehan mtx_lock(&moea_table_mutex); 2106152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 210790643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 210890643Sbenno if (pteidx_p) 2109152180Sgrehan *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2110134535Salc break; 211190643Sbenno } 211290643Sbenno } 2113152180Sgrehan mtx_unlock(&moea_table_mutex); 211490643Sbenno 2115134535Salc return (pvo); 211677957Sbenno} 211777957Sbenno 211890643Sbennostatic struct pte * 2119152180Sgrehanmoea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 212077957Sbenno{ 212190643Sbenno struct pte *pt; 212277957Sbenno 212390643Sbenno /* 212490643Sbenno * If we haven't been supplied the ptegidx, calculate it. 212590643Sbenno */ 212690643Sbenno if (pteidx == -1) { 212790643Sbenno int ptegidx; 212890643Sbenno u_int sr; 212977957Sbenno 213090643Sbenno sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 213190643Sbenno ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2132152180Sgrehan pteidx = moea_pvo_pte_index(pvo, ptegidx); 213390643Sbenno } 213490643Sbenno 2135152180Sgrehan pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2136159928Salc mtx_lock(&moea_table_mutex); 213790643Sbenno 2138183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2139152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 214090643Sbenno "valid pte index", pvo); 214190643Sbenno } 214290643Sbenno 2143183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2144152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 214590643Sbenno "pvo but no valid pte", pvo); 214690643Sbenno } 214790643Sbenno 2148183290Snwhitehorn if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2149183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2150152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in " 2151152180Sgrehan "moea_pteg_table %p but invalid in pvo", pvo, pt); 215277957Sbenno } 215390643Sbenno 2154183290Snwhitehorn if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 215590643Sbenno != 0) { 2156152180Sgrehan panic("moea_pvo_to_pte: pvo %p pte does not match " 2157152180Sgrehan "pte %p in moea_pteg_table", pvo, pt); 215890643Sbenno } 215990643Sbenno 2160159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 216190643Sbenno return (pt); 216277957Sbenno } 216377957Sbenno 2164183290Snwhitehorn if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2165152180Sgrehan panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2166253976Sjhibbits "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi); 216790643Sbenno } 216877957Sbenno 2169159928Salc mtx_unlock(&moea_table_mutex); 217090643Sbenno return (NULL); 217177957Sbenno} 217278880Sbenno 217378880Sbenno/* 217490643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c? 217578880Sbenno */ 217690643Sbennoint 2177152180Sgrehanmoea_pte_spill(vm_offset_t addr) 217878880Sbenno{ 217990643Sbenno struct pvo_entry *source_pvo, *victim_pvo; 218090643Sbenno struct pvo_entry *pvo; 218190643Sbenno int ptegidx, i, j; 218290643Sbenno u_int sr; 218390643Sbenno struct pteg *pteg; 218490643Sbenno struct pte *pt; 218578880Sbenno 2186152180Sgrehan moea_pte_spills++; 218790643Sbenno 218894836Sbenno sr = mfsrin(addr); 218990643Sbenno ptegidx = va_to_pteg(sr, addr); 219090643Sbenno 219178880Sbenno /* 219290643Sbenno * Have to substitute some entry. Use the primary hash for this. 219390643Sbenno * Use low bits of timebase as random generator. 219478880Sbenno */ 2195152180Sgrehan pteg = &moea_pteg_table[ptegidx]; 2196152180Sgrehan mtx_lock(&moea_table_mutex); 219790643Sbenno __asm __volatile("mftb %0" : "=r"(i)); 219890643Sbenno i &= 7; 219990643Sbenno pt = &pteg->pt[i]; 220078880Sbenno 220190643Sbenno source_pvo = NULL; 220290643Sbenno victim_pvo = NULL; 2203152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 220478880Sbenno /* 220590643Sbenno * We need to find a pvo entry for this address. 220678880Sbenno */ 220790643Sbenno if (source_pvo == NULL && 2208183290Snwhitehorn moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2209183290Snwhitehorn pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 221090643Sbenno /* 221190643Sbenno * Now found an entry to be spilled into the pteg. 221290643Sbenno * The PTE is now valid, so we know it's active. 221390643Sbenno */ 2214183290Snwhitehorn j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 221578880Sbenno 221690643Sbenno if (j >= 0) { 221790643Sbenno PVO_PTEGIDX_SET(pvo, j); 2218152180Sgrehan moea_pte_overflow--; 2219152180Sgrehan mtx_unlock(&moea_table_mutex); 222090643Sbenno return (1); 222190643Sbenno } 222290643Sbenno 222390643Sbenno source_pvo = pvo; 222490643Sbenno 222590643Sbenno if (victim_pvo != NULL) 222690643Sbenno break; 222790643Sbenno } 222890643Sbenno 222978880Sbenno /* 223090643Sbenno * We also need the pvo entry of the victim we are replacing 223190643Sbenno * so save the R & C bits of the PTE. 223278880Sbenno */ 223390643Sbenno if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2234183290Snwhitehorn moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 223590643Sbenno victim_pvo = pvo; 223690643Sbenno if (source_pvo != NULL) 223790643Sbenno break; 223890643Sbenno } 223990643Sbenno } 224078880Sbenno 2241134535Salc if (source_pvo == NULL) { 2242152180Sgrehan mtx_unlock(&moea_table_mutex); 224390643Sbenno return (0); 2244134535Salc } 224590643Sbenno 224690643Sbenno if (victim_pvo == NULL) { 224790643Sbenno if ((pt->pte_hi & PTE_HID) == 0) 2248152180Sgrehan panic("moea_pte_spill: victim p-pte (%p) has no pvo" 224990643Sbenno "entry", pt); 225090643Sbenno 225178880Sbenno /* 225290643Sbenno * If this is a secondary PTE, we need to search it's primary 225390643Sbenno * pvo bucket for the matching PVO. 225478880Sbenno */ 2255152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 225690643Sbenno pvo_olink) { 225790643Sbenno /* 225890643Sbenno * We also need the pvo entry of the victim we are 225990643Sbenno * replacing so save the R & C bits of the PTE. 226090643Sbenno */ 2261183290Snwhitehorn if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 226290643Sbenno victim_pvo = pvo; 226390643Sbenno break; 226490643Sbenno } 226590643Sbenno } 226678880Sbenno 226790643Sbenno if (victim_pvo == NULL) 2268152180Sgrehan panic("moea_pte_spill: victim s-pte (%p) has no pvo" 226990643Sbenno "entry", pt); 227090643Sbenno } 227178880Sbenno 227290643Sbenno /* 227390643Sbenno * We are invalidating the TLB entry for the EA we are replacing even 227490643Sbenno * though it's valid. If we don't, we lose any ref/chg bit changes 227590643Sbenno * contained in the TLB entry. 227690643Sbenno */ 2277183290Snwhitehorn source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 227878880Sbenno 2279183290Snwhitehorn moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2280183290Snwhitehorn moea_pte_set(pt, &source_pvo->pvo_pte.pte); 228190643Sbenno 228290643Sbenno PVO_PTEGIDX_CLR(victim_pvo); 228390643Sbenno PVO_PTEGIDX_SET(source_pvo, i); 2284152180Sgrehan moea_pte_replacements++; 228590643Sbenno 2286152180Sgrehan mtx_unlock(&moea_table_mutex); 228790643Sbenno return (1); 228890643Sbenno} 228990643Sbenno 2290253976Sjhibbitsstatic __inline struct pvo_entry * 2291253976Sjhibbitsmoea_pte_spillable_ident(u_int ptegidx) 2292253976Sjhibbits{ 2293253976Sjhibbits struct pte *pt; 2294253976Sjhibbits struct pvo_entry *pvo_walk, *pvo = NULL; 2295253976Sjhibbits 2296253976Sjhibbits LIST_FOREACH(pvo_walk, &moea_pvo_table[ptegidx], pvo_olink) { 2297253976Sjhibbits if (pvo_walk->pvo_vaddr & PVO_WIRED) 2298253976Sjhibbits continue; 2299253976Sjhibbits 2300253976Sjhibbits if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID)) 2301253976Sjhibbits continue; 2302253976Sjhibbits 2303253976Sjhibbits pt = moea_pvo_to_pte(pvo_walk, -1); 2304253976Sjhibbits 2305253976Sjhibbits if (pt == NULL) 2306253976Sjhibbits continue; 2307253976Sjhibbits 2308253976Sjhibbits pvo = pvo_walk; 2309253976Sjhibbits 2310253976Sjhibbits mtx_unlock(&moea_table_mutex); 2311253976Sjhibbits if (!(pt->pte_lo & PTE_REF)) 2312253976Sjhibbits return (pvo_walk); 2313253976Sjhibbits } 2314253976Sjhibbits 2315253976Sjhibbits return (pvo); 2316253976Sjhibbits} 2317253976Sjhibbits 231890643Sbennostatic int 2319152180Sgrehanmoea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 232090643Sbenno{ 232190643Sbenno struct pte *pt; 2322253976Sjhibbits struct pvo_entry *victim_pvo; 232390643Sbenno int i; 2324253976Sjhibbits int victim_idx; 2325253976Sjhibbits u_int pteg_bkpidx = ptegidx; 232690643Sbenno 2327159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 2328159928Salc 232990643Sbenno /* 233090643Sbenno * First try primary hash. 233190643Sbenno */ 2332152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 233390643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 233490643Sbenno pvo_pt->pte_hi &= ~PTE_HID; 2335152180Sgrehan moea_pte_set(pt, pvo_pt); 233690643Sbenno return (i); 233778880Sbenno } 233890643Sbenno } 233978880Sbenno 234090643Sbenno /* 234190643Sbenno * Now try secondary hash. 234290643Sbenno */ 2343152180Sgrehan ptegidx ^= moea_pteg_mask; 2344165362Sgrehan 2345152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 234690643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 234790643Sbenno pvo_pt->pte_hi |= PTE_HID; 2348152180Sgrehan moea_pte_set(pt, pvo_pt); 234990643Sbenno return (i); 235090643Sbenno } 235190643Sbenno } 235278880Sbenno 2353253976Sjhibbits /* Try again, but this time try to force a PTE out. */ 2354253976Sjhibbits ptegidx = pteg_bkpidx; 2355253976Sjhibbits 2356253976Sjhibbits victim_pvo = moea_pte_spillable_ident(ptegidx); 2357253976Sjhibbits if (victim_pvo == NULL) { 2358253976Sjhibbits ptegidx ^= moea_pteg_mask; 2359253976Sjhibbits victim_pvo = moea_pte_spillable_ident(ptegidx); 2360253976Sjhibbits } 2361253976Sjhibbits 2362253976Sjhibbits if (victim_pvo == NULL) { 2363253976Sjhibbits panic("moea_pte_insert: overflow"); 2364253976Sjhibbits return (-1); 2365253976Sjhibbits } 2366253976Sjhibbits 2367253976Sjhibbits victim_idx = moea_pvo_pte_index(victim_pvo, ptegidx); 2368253976Sjhibbits 2369253976Sjhibbits if (pteg_bkpidx == ptegidx) 2370253976Sjhibbits pvo_pt->pte_hi &= ~PTE_HID; 2371253976Sjhibbits else 2372253976Sjhibbits pvo_pt->pte_hi |= PTE_HID; 2373253976Sjhibbits 2374253976Sjhibbits /* 2375253976Sjhibbits * Synchronize the sacrifice PTE with its PVO, then mark both 2376253976Sjhibbits * invalid. The PVO will be reused when/if the VM system comes 2377253976Sjhibbits * here after a fault. 2378253976Sjhibbits */ 2379253976Sjhibbits pt = &moea_pteg_table[victim_idx >> 3].pt[victim_idx & 7]; 2380253976Sjhibbits 2381253976Sjhibbits if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi) 2382253976Sjhibbits panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_hi); 2383253976Sjhibbits 2384253976Sjhibbits /* 2385253976Sjhibbits * Set the new PTE. 2386253976Sjhibbits */ 2387253976Sjhibbits moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2388253976Sjhibbits PVO_PTEGIDX_CLR(victim_pvo); 2389253976Sjhibbits moea_pte_overflow++; 2390253976Sjhibbits moea_pte_set(pt, pvo_pt); 2391253976Sjhibbits 2392253976Sjhibbits return (victim_idx & 7); 239378880Sbenno} 239484921Sbenno 239590643Sbennostatic boolean_t 2396152180Sgrehanmoea_query_bit(vm_page_t m, int ptebit) 239784921Sbenno{ 239890643Sbenno struct pvo_entry *pvo; 239990643Sbenno struct pte *pt; 240084921Sbenno 2401238357Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2402152180Sgrehan if (moea_attr_fetch(m) & ptebit) 240390643Sbenno return (TRUE); 240484921Sbenno 240590643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 240684921Sbenno 240790643Sbenno /* 240890643Sbenno * See if we saved the bit off. If so, cache it and return 240990643Sbenno * success. 241090643Sbenno */ 2411183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2412152180Sgrehan moea_attr_save(m, ptebit); 241390643Sbenno return (TRUE); 241490643Sbenno } 241590643Sbenno } 241684921Sbenno 241790643Sbenno /* 241890643Sbenno * No luck, now go through the hard part of looking at the PTEs 241990643Sbenno * themselves. Sync so that any pending REF/CHG bits are flushed to 242090643Sbenno * the PTEs. 242190643Sbenno */ 2422183094Smarcel powerpc_sync(); 242390643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 242490643Sbenno 242590643Sbenno /* 242690643Sbenno * See if this pvo has a valid PTE. if so, fetch the 242790643Sbenno * REF/CHG bits from the valid PTE. If the appropriate 242890643Sbenno * ptebit is set, cache it and return success. 242990643Sbenno */ 2430152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 243190643Sbenno if (pt != NULL) { 2432183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 2433159928Salc mtx_unlock(&moea_table_mutex); 2434183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2435152180Sgrehan moea_attr_save(m, ptebit); 243690643Sbenno return (TRUE); 243790643Sbenno } 243890643Sbenno } 243984921Sbenno } 244084921Sbenno 2441123354Sgallatin return (FALSE); 244284921Sbenno} 244390643Sbenno 2444110172Sgrehanstatic u_int 2445208990Salcmoea_clear_bit(vm_page_t m, int ptebit) 244690643Sbenno{ 2447110172Sgrehan u_int count; 244890643Sbenno struct pvo_entry *pvo; 244990643Sbenno struct pte *pt; 245090643Sbenno 2451238357Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 2452208990Salc 245390643Sbenno /* 245490643Sbenno * Clear the cached value. 245590643Sbenno */ 2456152180Sgrehan moea_attr_clear(m, ptebit); 245790643Sbenno 245890643Sbenno /* 245990643Sbenno * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 246090643Sbenno * we can reset the right ones). note that since the pvo entries and 246190643Sbenno * list heads are accessed via BAT0 and are never placed in the page 246290643Sbenno * table, we don't have to worry about further accesses setting the 246390643Sbenno * REF/CHG bits. 246490643Sbenno */ 2465183094Smarcel powerpc_sync(); 246690643Sbenno 246790643Sbenno /* 246890643Sbenno * For each pvo entry, clear the pvo's ptebit. If this pvo has a 246990643Sbenno * valid pte clear the ptebit from the valid pte. 247090643Sbenno */ 2471110172Sgrehan count = 0; 247290643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2473152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 247490643Sbenno if (pt != NULL) { 2475183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 2476183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2477110172Sgrehan count++; 2478152180Sgrehan moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2479110172Sgrehan } 2480159928Salc mtx_unlock(&moea_table_mutex); 248190643Sbenno } 2482183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~ptebit; 248390643Sbenno } 248490643Sbenno 2485110172Sgrehan return (count); 248690643Sbenno} 248799038Sbenno 248899038Sbenno/* 2489103604Sgrehan * Return true if the physical range is encompassed by the battable[idx] 2490103604Sgrehan */ 2491103604Sgrehanstatic int 2492152180Sgrehanmoea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2493103604Sgrehan{ 2494103604Sgrehan u_int prot; 2495103604Sgrehan u_int32_t start; 2496103604Sgrehan u_int32_t end; 2497103604Sgrehan u_int32_t bat_ble; 2498103604Sgrehan 2499103604Sgrehan /* 2500103604Sgrehan * Return immediately if not a valid mapping 2501103604Sgrehan */ 2502214601Snwhitehorn if (!(battable[idx].batu & BAT_Vs)) 2503103604Sgrehan return (EINVAL); 2504103604Sgrehan 2505103604Sgrehan /* 2506103604Sgrehan * The BAT entry must be cache-inhibited, guarded, and r/w 2507103604Sgrehan * so it can function as an i/o page 2508103604Sgrehan */ 2509103604Sgrehan prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2510103604Sgrehan if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2511103604Sgrehan return (EPERM); 2512103604Sgrehan 2513103604Sgrehan /* 2514103604Sgrehan * The address should be within the BAT range. Assume that the 2515103604Sgrehan * start address in the BAT has the correct alignment (thus 2516103604Sgrehan * not requiring masking) 2517103604Sgrehan */ 2518103604Sgrehan start = battable[idx].batl & BAT_PBS; 2519103604Sgrehan bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2520103604Sgrehan end = start | (bat_ble << 15) | 0x7fff; 2521103604Sgrehan 2522103604Sgrehan if ((pa < start) || ((pa + size) > end)) 2523103604Sgrehan return (ERANGE); 2524103604Sgrehan 2525103604Sgrehan return (0); 2526103604Sgrehan} 2527103604Sgrehan 2528152180Sgrehanboolean_t 2529235936Srajmoea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2530133855Sssouhlal{ 2531133855Sssouhlal int i; 2532103604Sgrehan 2533133855Sssouhlal /* 2534133855Sssouhlal * This currently does not work for entries that 2535133855Sssouhlal * overlap 256M BAT segments. 2536133855Sssouhlal */ 2537133855Sssouhlal 2538133855Sssouhlal for(i = 0; i < 16; i++) 2539152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2540133855Sssouhlal return (0); 2541133855Sssouhlal 2542133855Sssouhlal return (EFAULT); 2543133855Sssouhlal} 2544133855Sssouhlal 2545103604Sgrehan/* 254699038Sbenno * Map a set of physical memory pages into the kernel virtual 254799038Sbenno * address space. Return a pointer to where it is mapped. This 254899038Sbenno * routine is intended to be used for mapping device memory, 254999038Sbenno * NOT real memory. 255099038Sbenno */ 255199038Sbennovoid * 2552235936Srajmoea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 255399038Sbenno{ 2554213307Snwhitehorn 2555213307Snwhitehorn return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2556213307Snwhitehorn} 2557213307Snwhitehorn 2558213307Snwhitehornvoid * 2559213307Snwhitehornmoea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2560213307Snwhitehorn{ 2561103604Sgrehan vm_offset_t va, tmpva, ppa, offset; 2562103604Sgrehan int i; 2563103604Sgrehan 2564103604Sgrehan ppa = trunc_page(pa); 256599038Sbenno offset = pa & PAGE_MASK; 256699038Sbenno size = roundup(offset + size, PAGE_SIZE); 256799038Sbenno 2568103604Sgrehan /* 2569103604Sgrehan * If the physical address lies within a valid BAT table entry, 2570103604Sgrehan * return the 1:1 mapping. This currently doesn't work 2571103604Sgrehan * for regions that overlap 256M BAT segments. 2572103604Sgrehan */ 2573103604Sgrehan for (i = 0; i < 16; i++) { 2574152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2575103604Sgrehan return ((void *) pa); 2576103604Sgrehan } 2577103604Sgrehan 2578254025Sjeff va = kva_alloc(size); 257999038Sbenno if (!va) 2580152180Sgrehan panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 258199038Sbenno 258299038Sbenno for (tmpva = va; size > 0;) { 2583213307Snwhitehorn moea_kenter_attr(mmu, tmpva, ppa, ma); 2584183094Smarcel tlbie(tmpva); 258599038Sbenno size -= PAGE_SIZE; 258699038Sbenno tmpva += PAGE_SIZE; 2587103604Sgrehan ppa += PAGE_SIZE; 258899038Sbenno } 258999038Sbenno 259099038Sbenno return ((void *)(va + offset)); 259199038Sbenno} 259299038Sbenno 259399038Sbennovoid 2594152180Sgrehanmoea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 259599038Sbenno{ 259699038Sbenno vm_offset_t base, offset; 259799038Sbenno 2598103604Sgrehan /* 2599103604Sgrehan * If this is outside kernel virtual space, then it's a 2600103604Sgrehan * battable entry and doesn't require unmapping 2601103604Sgrehan */ 2602204128Snwhitehorn if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2603103604Sgrehan base = trunc_page(va); 2604103604Sgrehan offset = va & PAGE_MASK; 2605103604Sgrehan size = roundup(offset + size, PAGE_SIZE); 2606254025Sjeff kva_free(base, size); 2607103604Sgrehan } 260899038Sbenno} 2609198341Smarcel 2610198341Smarcelstatic void 2611198341Smarcelmoea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2612198341Smarcel{ 2613198341Smarcel struct pvo_entry *pvo; 2614198341Smarcel vm_offset_t lim; 2615198341Smarcel vm_paddr_t pa; 2616198341Smarcel vm_size_t len; 2617198341Smarcel 2618198341Smarcel PMAP_LOCK(pm); 2619198341Smarcel while (sz > 0) { 2620198341Smarcel lim = round_page(va); 2621198341Smarcel len = MIN(lim - va, sz); 2622198341Smarcel pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2623198341Smarcel if (pvo != NULL) { 2624198341Smarcel pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2625198341Smarcel (va & ADDR_POFF); 2626198341Smarcel moea_syncicache(pa, len); 2627198341Smarcel } 2628198341Smarcel va += len; 2629198341Smarcel sz -= len; 2630198341Smarcel } 2631198341Smarcel PMAP_UNLOCK(pm); 2632198341Smarcel} 2633249864Sjhibbits 2634249864Sjhibbitsvm_offset_t 2635249864Sjhibbitsmoea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2636249864Sjhibbits vm_size_t *sz) 2637249864Sjhibbits{ 2638249864Sjhibbits if (md->md_vaddr == ~0UL) 2639249864Sjhibbits return (md->md_paddr + ofs); 2640249864Sjhibbits else 2641249864Sjhibbits return (md->md_vaddr + ofs); 2642249864Sjhibbits} 2643249864Sjhibbits 2644249864Sjhibbitsstruct pmap_md * 2645249864Sjhibbitsmoea_scan_md(mmu_t mmu, struct pmap_md *prev) 2646249864Sjhibbits{ 2647249864Sjhibbits static struct pmap_md md; 2648249864Sjhibbits struct pvo_entry *pvo; 2649249864Sjhibbits vm_offset_t va; 2650249864Sjhibbits 2651249864Sjhibbits if (dumpsys_minidump) { 2652249864Sjhibbits md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2653249864Sjhibbits if (prev == NULL) { 2654249864Sjhibbits /* 1st: kernel .data and .bss. */ 2655249864Sjhibbits md.md_index = 1; 2656249864Sjhibbits md.md_vaddr = trunc_page((uintptr_t)_etext); 2657249864Sjhibbits md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2658249864Sjhibbits return (&md); 2659249864Sjhibbits } 2660249864Sjhibbits switch (prev->md_index) { 2661249864Sjhibbits case 1: 2662249864Sjhibbits /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2663249864Sjhibbits md.md_index = 2; 2664249864Sjhibbits md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr; 2665249864Sjhibbits md.md_size = round_page(msgbufp->msg_size); 2666249864Sjhibbits break; 2667249864Sjhibbits case 2: 2668249864Sjhibbits /* 3rd: kernel VM. */ 2669249864Sjhibbits va = prev->md_vaddr + prev->md_size; 2670249864Sjhibbits /* Find start of next chunk (from va). */ 2671249864Sjhibbits while (va < virtual_end) { 2672249864Sjhibbits /* Don't dump the buffer cache. */ 2673249864Sjhibbits if (va >= kmi.buffer_sva && 2674249864Sjhibbits va < kmi.buffer_eva) { 2675249864Sjhibbits va = kmi.buffer_eva; 2676249864Sjhibbits continue; 2677249864Sjhibbits } 2678249864Sjhibbits pvo = moea_pvo_find_va(kernel_pmap, 2679249864Sjhibbits va & ~ADDR_POFF, NULL); 2680249864Sjhibbits if (pvo != NULL && 2681249864Sjhibbits (pvo->pvo_pte.pte.pte_hi & PTE_VALID)) 2682249864Sjhibbits break; 2683249864Sjhibbits va += PAGE_SIZE; 2684249864Sjhibbits } 2685249864Sjhibbits if (va < virtual_end) { 2686249864Sjhibbits md.md_vaddr = va; 2687249864Sjhibbits va += PAGE_SIZE; 2688249864Sjhibbits /* Find last page in chunk. */ 2689249864Sjhibbits while (va < virtual_end) { 2690249864Sjhibbits /* Don't run into the buffer cache. */ 2691249864Sjhibbits if (va == kmi.buffer_sva) 2692249864Sjhibbits break; 2693249864Sjhibbits pvo = moea_pvo_find_va(kernel_pmap, 2694249864Sjhibbits va & ~ADDR_POFF, NULL); 2695249864Sjhibbits if (pvo == NULL || 2696249864Sjhibbits !(pvo->pvo_pte.pte.pte_hi & PTE_VALID)) 2697249864Sjhibbits break; 2698249864Sjhibbits va += PAGE_SIZE; 2699249864Sjhibbits } 2700249864Sjhibbits md.md_size = va - md.md_vaddr; 2701249864Sjhibbits break; 2702249864Sjhibbits } 2703249864Sjhibbits md.md_index = 3; 2704249864Sjhibbits /* FALLTHROUGH */ 2705249864Sjhibbits default: 2706249864Sjhibbits return (NULL); 2707249864Sjhibbits } 2708249864Sjhibbits } else { /* minidumps */ 2709249864Sjhibbits mem_regions(&pregions, &pregions_sz, 2710249864Sjhibbits ®ions, ®ions_sz); 2711249864Sjhibbits 2712249864Sjhibbits if (prev == NULL) { 2713249864Sjhibbits /* first physical chunk. */ 2714249864Sjhibbits md.md_paddr = pregions[0].mr_start; 2715249864Sjhibbits md.md_size = pregions[0].mr_size; 2716249864Sjhibbits md.md_vaddr = ~0UL; 2717249864Sjhibbits md.md_index = 1; 2718249864Sjhibbits } else if (md.md_index < pregions_sz) { 2719249864Sjhibbits md.md_paddr = pregions[md.md_index].mr_start; 2720249864Sjhibbits md.md_size = pregions[md.md_index].mr_size; 2721249864Sjhibbits md.md_vaddr = ~0UL; 2722249864Sjhibbits md.md_index++; 2723249864Sjhibbits } else { 2724249864Sjhibbits /* There's no next physical chunk. */ 2725249864Sjhibbits return (NULL); 2726249864Sjhibbits } 2727249864Sjhibbits } 2728249864Sjhibbits 2729249864Sjhibbits return (&md); 2730249864Sjhibbits} 2731