mmu_oea.c revision 235689
1139825Simp/*- 290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc. 390643Sbenno * All rights reserved. 490643Sbenno * 590643Sbenno * This code is derived from software contributed to The NetBSD Foundation 690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790643Sbenno * 890643Sbenno * Redistribution and use in source and binary forms, with or without 990643Sbenno * modification, are permitted provided that the following conditions 1090643Sbenno * are met: 1190643Sbenno * 1. Redistributions of source code must retain the above copyright 1290643Sbenno * notice, this list of conditions and the following disclaimer. 1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright 1490643Sbenno * notice, this list of conditions and the following disclaimer in the 1590643Sbenno * documentation and/or other materials provided with the distribution. 1690643Sbenno * 3. All advertising materials mentioning features or use of this software 1790643Sbenno * must display the following acknowledgement: 1890643Sbenno * This product includes software developed by the NetBSD 1990643Sbenno * Foundation, Inc. and its contributors. 2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its 2190643Sbenno * contributors may be used to endorse or promote products derived 2290643Sbenno * from this software without specific prior written permission. 2390643Sbenno * 2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790643Sbenno * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490643Sbenno * POSSIBILITY OF SUCH DAMAGE. 3590643Sbenno */ 36139825Simp/*- 3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH. 3977957Sbenno * All rights reserved. 4077957Sbenno * 4177957Sbenno * Redistribution and use in source and binary forms, with or without 4277957Sbenno * modification, are permitted provided that the following conditions 4377957Sbenno * are met: 4477957Sbenno * 1. Redistributions of source code must retain the above copyright 4577957Sbenno * notice, this list of conditions and the following disclaimer. 4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 4777957Sbenno * notice, this list of conditions and the following disclaimer in the 4877957Sbenno * documentation and/or other materials provided with the distribution. 4977957Sbenno * 3. All advertising materials mentioning features or use of this software 5077957Sbenno * must display the following acknowledgement: 5177957Sbenno * This product includes software developed by TooLs GmbH. 5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products 5377957Sbenno * derived from this software without specific prior written permission. 5477957Sbenno * 5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 6577957Sbenno * 6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6777957Sbenno */ 68139825Simp/*- 6977957Sbenno * Copyright (C) 2001 Benno Rice. 7077957Sbenno * All rights reserved. 7177957Sbenno * 7277957Sbenno * Redistribution and use in source and binary forms, with or without 7377957Sbenno * modification, are permitted provided that the following conditions 7477957Sbenno * are met: 7577957Sbenno * 1. Redistributions of source code must retain the above copyright 7677957Sbenno * notice, this list of conditions and the following disclaimer. 7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 7877957Sbenno * notice, this list of conditions and the following disclaimer in the 7977957Sbenno * documentation and/or other materials provided with the distribution. 8077957Sbenno * 8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9177957Sbenno */ 9277957Sbenno 93113038Sobrien#include <sys/cdefs.h> 94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 235689 2012-05-20 14:33:28Z nwhitehorn $"); 9577957Sbenno 9690643Sbenno/* 9790643Sbenno * Manages physical address maps. 9890643Sbenno * 9990643Sbenno * In addition to hardware address maps, this module is called upon to 10090643Sbenno * provide software-use-only maps which may or may not be stored in the 10190643Sbenno * same form as hardware maps. These pseudo-maps are used to store 10290643Sbenno * intermediate results from copy operations to and from address spaces. 10390643Sbenno * 10490643Sbenno * Since the information managed by this module is also stored by the 10590643Sbenno * logical address mapping module, this module may throw away valid virtual 10690643Sbenno * to physical mappings at almost any time. However, invalidations of 10790643Sbenno * mappings must be done as requested. 10890643Sbenno * 10990643Sbenno * In order to cope with hardware architectures which make virtual to 11090643Sbenno * physical map invalidates expensive, this module may delay invalidate 11190643Sbenno * reduced protection operations until such time as they are actually 11290643Sbenno * necessary. This module is given full information as to which processors 11390643Sbenno * are currently using which maps, and to when physical maps must be made 11490643Sbenno * correct. 11590643Sbenno */ 11690643Sbenno 117118239Speter#include "opt_kstack_pages.h" 118118239Speter 11977957Sbenno#include <sys/param.h> 12080431Speter#include <sys/kernel.h> 121222813Sattilio#include <sys/queue.h> 122222813Sattilio#include <sys/cpuset.h> 12390643Sbenno#include <sys/ktr.h> 12490643Sbenno#include <sys/lock.h> 12590643Sbenno#include <sys/msgbuf.h> 12690643Sbenno#include <sys/mutex.h> 12777957Sbenno#include <sys/proc.h> 128222813Sattilio#include <sys/sched.h> 12990643Sbenno#include <sys/sysctl.h> 13090643Sbenno#include <sys/systm.h> 13177957Sbenno#include <sys/vmmeter.h> 13277957Sbenno 13390643Sbenno#include <dev/ofw/openfirm.h> 13490643Sbenno 135152180Sgrehan#include <vm/vm.h> 13677957Sbenno#include <vm/vm_param.h> 13777957Sbenno#include <vm/vm_kern.h> 13877957Sbenno#include <vm/vm_page.h> 13977957Sbenno#include <vm/vm_map.h> 14077957Sbenno#include <vm/vm_object.h> 14177957Sbenno#include <vm/vm_extern.h> 14277957Sbenno#include <vm/vm_pageout.h> 14377957Sbenno#include <vm/vm_pager.h> 14492847Sjeff#include <vm/uma.h> 14577957Sbenno 146125687Sgrehan#include <machine/cpu.h> 147192067Snwhitehorn#include <machine/platform.h> 14883730Smp#include <machine/bat.h> 14990643Sbenno#include <machine/frame.h> 15090643Sbenno#include <machine/md_var.h> 15190643Sbenno#include <machine/psl.h> 15277957Sbenno#include <machine/pte.h> 153178628Smarcel#include <machine/smp.h> 15490643Sbenno#include <machine/sr.h> 155152180Sgrehan#include <machine/mmuvar.h> 156228609Snwhitehorn#include <machine/trap_aim.h> 15777957Sbenno 158152180Sgrehan#include "mmu_if.h" 15977957Sbenno 160152180Sgrehan#define MOEA_DEBUG 161152180Sgrehan 16290643Sbenno#define TODO panic("%s: not implemented", __func__); 16377957Sbenno 16490643Sbenno#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16590643Sbenno#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16690643Sbenno#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16790643Sbenno 16890643Sbennostruct ofw_map { 16990643Sbenno vm_offset_t om_va; 17090643Sbenno vm_size_t om_len; 17190643Sbenno vm_offset_t om_pa; 17290643Sbenno u_int om_mode; 17390643Sbenno}; 17477957Sbenno 17590643Sbenno/* 17690643Sbenno * Map of physical memory regions. 17790643Sbenno */ 17897346Sbennostatic struct mem_region *regions; 17997346Sbennostatic struct mem_region *pregions; 180209975Snwhitehornstatic u_int phys_avail_count; 181209975Snwhitehornstatic int regions_sz, pregions_sz; 182100319Sbennostatic struct ofw_map *translations; 18377957Sbenno 18490643Sbenno/* 185134535Salc * Lock for the pteg and pvo tables. 186134535Salc */ 187152180Sgrehanstruct mtx moea_table_mutex; 188212278Snwhitehornstruct mtx moea_vsid_mutex; 189134535Salc 190183094Smarcel/* tlbie instruction synchronization */ 191183094Smarcelstatic struct mtx tlbie_mtx; 192183094Smarcel 193134535Salc/* 19490643Sbenno * PTEG data. 19590643Sbenno */ 196152180Sgrehanstatic struct pteg *moea_pteg_table; 197152180Sgrehanu_int moea_pteg_count; 198152180Sgrehanu_int moea_pteg_mask; 19977957Sbenno 20090643Sbenno/* 20190643Sbenno * PVO data. 20290643Sbenno */ 203152180Sgrehanstruct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 204152180Sgrehanstruct pvo_head moea_pvo_kunmanaged = 205152180Sgrehan LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 20677957Sbenno 207152180Sgrehanuma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 208152180Sgrehanuma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 20977957Sbenno 21099037Sbenno#define BPVO_POOL_SIZE 32768 211152180Sgrehanstatic struct pvo_entry *moea_bpvo_pool; 212152180Sgrehanstatic int moea_bpvo_pool_index = 0; 21377957Sbenno 21490643Sbenno#define VSID_NBPW (sizeof(u_int32_t) * 8) 215152180Sgrehanstatic u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 21677957Sbenno 217152180Sgrehanstatic boolean_t moea_initialized = FALSE; 21877957Sbenno 21990643Sbenno/* 22090643Sbenno * Statistics. 22190643Sbenno */ 222152180Sgrehanu_int moea_pte_valid = 0; 223152180Sgrehanu_int moea_pte_overflow = 0; 224152180Sgrehanu_int moea_pte_replacements = 0; 225152180Sgrehanu_int moea_pvo_entries = 0; 226152180Sgrehanu_int moea_pvo_enter_calls = 0; 227152180Sgrehanu_int moea_pvo_remove_calls = 0; 228152180Sgrehanu_int moea_pte_spills = 0; 229152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 23090643Sbenno 0, ""); 231152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 232152180Sgrehan &moea_pte_overflow, 0, ""); 233152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 234152180Sgrehan &moea_pte_replacements, 0, ""); 235152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 23690643Sbenno 0, ""); 237152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 238152180Sgrehan &moea_pvo_enter_calls, 0, ""); 239152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 240152180Sgrehan &moea_pvo_remove_calls, 0, ""); 241152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 242152180Sgrehan &moea_pte_spills, 0, ""); 24377957Sbenno 24490643Sbenno/* 245152180Sgrehan * Allocate physical memory for use in moea_bootstrap. 24690643Sbenno */ 247152180Sgrehanstatic vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 24877957Sbenno 24990643Sbenno/* 25090643Sbenno * PTE calls. 25190643Sbenno */ 252152180Sgrehanstatic int moea_pte_insert(u_int, struct pte *); 25377957Sbenno 25477957Sbenno/* 25590643Sbenno * PVO calls. 25677957Sbenno */ 257152180Sgrehanstatic int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 25890643Sbenno vm_offset_t, vm_offset_t, u_int, int); 259152180Sgrehanstatic void moea_pvo_remove(struct pvo_entry *, int); 260152180Sgrehanstatic struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 261152180Sgrehanstatic struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 26290643Sbenno 26390643Sbenno/* 26490643Sbenno * Utility routines. 26590643Sbenno */ 266159303Salcstatic void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 267159303Salc vm_prot_t, boolean_t); 268152180Sgrehanstatic void moea_syncicache(vm_offset_t, vm_size_t); 269152180Sgrehanstatic boolean_t moea_query_bit(vm_page_t, int); 270208990Salcstatic u_int moea_clear_bit(vm_page_t, int); 271152180Sgrehanstatic void moea_kremove(mmu_t, vm_offset_t); 272152180Sgrehanint moea_pte_spill(vm_offset_t); 27390643Sbenno 274152180Sgrehan/* 275152180Sgrehan * Kernel MMU interface 276152180Sgrehan */ 277152180Sgrehanvoid moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 278152180Sgrehanvoid moea_clear_modify(mmu_t, vm_page_t); 279152180Sgrehanvoid moea_clear_reference(mmu_t, vm_page_t); 280152180Sgrehanvoid moea_copy_page(mmu_t, vm_page_t, vm_page_t); 281152180Sgrehanvoid moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 282159303Salcvoid moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 283159303Salc vm_prot_t); 284159627Supsvoid moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 285152180Sgrehanvm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 286152180Sgrehanvm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 287152180Sgrehanvoid moea_init(mmu_t); 288152180Sgrehanboolean_t moea_is_modified(mmu_t, vm_page_t); 289214617Salcboolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 290207155Salcboolean_t moea_is_referenced(mmu_t, vm_page_t); 291152180Sgrehanboolean_t moea_ts_referenced(mmu_t, vm_page_t); 292152180Sgrehanvm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 293152180Sgrehanboolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 294173708Salcint moea_page_wired_mappings(mmu_t, vm_page_t); 295152180Sgrehanvoid moea_pinit(mmu_t, pmap_t); 296152180Sgrehanvoid moea_pinit0(mmu_t, pmap_t); 297152180Sgrehanvoid moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 298152180Sgrehanvoid moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 299152180Sgrehanvoid moea_qremove(mmu_t, vm_offset_t, int); 300152180Sgrehanvoid moea_release(mmu_t, pmap_t); 301152180Sgrehanvoid moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 302152180Sgrehanvoid moea_remove_all(mmu_t, vm_page_t); 303160889Salcvoid moea_remove_write(mmu_t, vm_page_t); 304152180Sgrehanvoid moea_zero_page(mmu_t, vm_page_t); 305152180Sgrehanvoid moea_zero_page_area(mmu_t, vm_page_t, int, int); 306152180Sgrehanvoid moea_zero_page_idle(mmu_t, vm_page_t); 307152180Sgrehanvoid moea_activate(mmu_t, struct thread *); 308152180Sgrehanvoid moea_deactivate(mmu_t, struct thread *); 309190681Snwhitehornvoid moea_cpu_bootstrap(mmu_t, int); 310152180Sgrehanvoid moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 311152180Sgrehanvoid *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 312213307Snwhitehornvoid *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 313152180Sgrehanvoid moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 314152180Sgrehanvm_offset_t moea_kextract(mmu_t, vm_offset_t); 315213307Snwhitehornvoid moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 316152180Sgrehanvoid moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 317213307Snwhitehornvoid moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 318152180Sgrehanboolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 319198341Smarcelstatic void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 320152180Sgrehan 321152180Sgrehanstatic mmu_method_t moea_methods[] = { 322152180Sgrehan MMUMETHOD(mmu_change_wiring, moea_change_wiring), 323152180Sgrehan MMUMETHOD(mmu_clear_modify, moea_clear_modify), 324152180Sgrehan MMUMETHOD(mmu_clear_reference, moea_clear_reference), 325152180Sgrehan MMUMETHOD(mmu_copy_page, moea_copy_page), 326152180Sgrehan MMUMETHOD(mmu_enter, moea_enter), 327159303Salc MMUMETHOD(mmu_enter_object, moea_enter_object), 328152180Sgrehan MMUMETHOD(mmu_enter_quick, moea_enter_quick), 329152180Sgrehan MMUMETHOD(mmu_extract, moea_extract), 330152180Sgrehan MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 331152180Sgrehan MMUMETHOD(mmu_init, moea_init), 332152180Sgrehan MMUMETHOD(mmu_is_modified, moea_is_modified), 333214617Salc MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 334207155Salc MMUMETHOD(mmu_is_referenced, moea_is_referenced), 335152180Sgrehan MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 336152180Sgrehan MMUMETHOD(mmu_map, moea_map), 337152180Sgrehan MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 338173708Salc MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 339152180Sgrehan MMUMETHOD(mmu_pinit, moea_pinit), 340152180Sgrehan MMUMETHOD(mmu_pinit0, moea_pinit0), 341152180Sgrehan MMUMETHOD(mmu_protect, moea_protect), 342152180Sgrehan MMUMETHOD(mmu_qenter, moea_qenter), 343152180Sgrehan MMUMETHOD(mmu_qremove, moea_qremove), 344152180Sgrehan MMUMETHOD(mmu_release, moea_release), 345152180Sgrehan MMUMETHOD(mmu_remove, moea_remove), 346152180Sgrehan MMUMETHOD(mmu_remove_all, moea_remove_all), 347160889Salc MMUMETHOD(mmu_remove_write, moea_remove_write), 348198341Smarcel MMUMETHOD(mmu_sync_icache, moea_sync_icache), 349152180Sgrehan MMUMETHOD(mmu_zero_page, moea_zero_page), 350152180Sgrehan MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 351152180Sgrehan MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 352152180Sgrehan MMUMETHOD(mmu_activate, moea_activate), 353152180Sgrehan MMUMETHOD(mmu_deactivate, moea_deactivate), 354213307Snwhitehorn MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 355152180Sgrehan 356152180Sgrehan /* Internal interfaces */ 357152180Sgrehan MMUMETHOD(mmu_bootstrap, moea_bootstrap), 358190681Snwhitehorn MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 359213307Snwhitehorn MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 360152180Sgrehan MMUMETHOD(mmu_mapdev, moea_mapdev), 361152180Sgrehan MMUMETHOD(mmu_unmapdev, moea_unmapdev), 362152180Sgrehan MMUMETHOD(mmu_kextract, moea_kextract), 363152180Sgrehan MMUMETHOD(mmu_kenter, moea_kenter), 364213307Snwhitehorn MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 365152180Sgrehan MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 366152180Sgrehan 367152180Sgrehan { 0, 0 } 368152180Sgrehan}; 369152180Sgrehan 370212627SgrehanMMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 371152180Sgrehan 372213307Snwhitehornstatic __inline uint32_t 373213307Snwhitehornmoea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 374213307Snwhitehorn{ 375213307Snwhitehorn uint32_t pte_lo; 376213307Snwhitehorn int i; 377212627Sgrehan 378213307Snwhitehorn if (ma != VM_MEMATTR_DEFAULT) { 379213307Snwhitehorn switch (ma) { 380213307Snwhitehorn case VM_MEMATTR_UNCACHEABLE: 381213307Snwhitehorn return (PTE_I | PTE_G); 382213307Snwhitehorn case VM_MEMATTR_WRITE_COMBINING: 383213307Snwhitehorn case VM_MEMATTR_WRITE_BACK: 384213307Snwhitehorn case VM_MEMATTR_PREFETCHABLE: 385213307Snwhitehorn return (PTE_I); 386213307Snwhitehorn case VM_MEMATTR_WRITE_THROUGH: 387213307Snwhitehorn return (PTE_W | PTE_M); 388213307Snwhitehorn } 389213307Snwhitehorn } 390213307Snwhitehorn 391213307Snwhitehorn /* 392213307Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 393213307Snwhitehorn * it's in our available memory array. 394213307Snwhitehorn */ 395213307Snwhitehorn pte_lo = PTE_I | PTE_G; 396213307Snwhitehorn for (i = 0; i < pregions_sz; i++) { 397213307Snwhitehorn if ((pa >= pregions[i].mr_start) && 398213307Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 399213307Snwhitehorn pte_lo = PTE_M; 400213307Snwhitehorn break; 401213307Snwhitehorn } 402213307Snwhitehorn } 403213307Snwhitehorn 404213307Snwhitehorn return pte_lo; 405213307Snwhitehorn} 406213307Snwhitehorn 407183094Smarcelstatic void 408183094Smarceltlbie(vm_offset_t va) 409183094Smarcel{ 410152180Sgrehan 411183094Smarcel mtx_lock_spin(&tlbie_mtx); 412213407Snwhitehorn __asm __volatile("ptesync"); 413183094Smarcel __asm __volatile("tlbie %0" :: "r"(va)); 414213407Snwhitehorn __asm __volatile("eieio; tlbsync; ptesync"); 415183094Smarcel mtx_unlock_spin(&tlbie_mtx); 416183094Smarcel} 417183094Smarcel 418183094Smarcelstatic void 419183094Smarceltlbia(void) 420183094Smarcel{ 421183094Smarcel vm_offset_t va; 422183094Smarcel 423183094Smarcel for (va = 0; va < 0x00040000; va += 0x00001000) { 424183094Smarcel __asm __volatile("tlbie %0" :: "r"(va)); 425183094Smarcel powerpc_sync(); 426183094Smarcel } 427183094Smarcel __asm __volatile("tlbsync"); 428183094Smarcel powerpc_sync(); 429183094Smarcel} 430183094Smarcel 43190643Sbennostatic __inline int 43290643Sbennova_to_sr(u_int *sr, vm_offset_t va) 43377957Sbenno{ 43490643Sbenno return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 43590643Sbenno} 43677957Sbenno 43790643Sbennostatic __inline u_int 43890643Sbennova_to_pteg(u_int sr, vm_offset_t addr) 43990643Sbenno{ 44090643Sbenno u_int hash; 44190643Sbenno 44290643Sbenno hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 44390643Sbenno ADDR_PIDX_SHFT); 444152180Sgrehan return (hash & moea_pteg_mask); 44577957Sbenno} 44677957Sbenno 44790643Sbennostatic __inline struct pvo_head * 44890643Sbennovm_page_to_pvoh(vm_page_t m) 44990643Sbenno{ 45090643Sbenno 45190643Sbenno return (&m->md.mdpg_pvoh); 45290643Sbenno} 45390643Sbenno 45477957Sbennostatic __inline void 455152180Sgrehanmoea_attr_clear(vm_page_t m, int ptebit) 45677957Sbenno{ 45790643Sbenno 458159928Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 45990643Sbenno m->md.mdpg_attrs &= ~ptebit; 46077957Sbenno} 46177957Sbenno 46277957Sbennostatic __inline int 463152180Sgrehanmoea_attr_fetch(vm_page_t m) 46477957Sbenno{ 46577957Sbenno 46690643Sbenno return (m->md.mdpg_attrs); 46777957Sbenno} 46877957Sbenno 46990643Sbennostatic __inline void 470152180Sgrehanmoea_attr_save(vm_page_t m, int ptebit) 47190643Sbenno{ 47290643Sbenno 473159928Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 47490643Sbenno m->md.mdpg_attrs |= ptebit; 47590643Sbenno} 47690643Sbenno 47777957Sbennostatic __inline int 478152180Sgrehanmoea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 47977957Sbenno{ 48090643Sbenno if (pt->pte_hi == pvo_pt->pte_hi) 48190643Sbenno return (1); 48290643Sbenno 48390643Sbenno return (0); 48477957Sbenno} 48577957Sbenno 48677957Sbennostatic __inline int 487152180Sgrehanmoea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 48877957Sbenno{ 48990643Sbenno return (pt->pte_hi & ~PTE_VALID) == 49090643Sbenno (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 49190643Sbenno ((va >> ADDR_API_SHFT) & PTE_API) | which); 49290643Sbenno} 49377957Sbenno 49490643Sbennostatic __inline void 495152180Sgrehanmoea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 49690643Sbenno{ 497159928Salc 498159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 499159928Salc 50090643Sbenno /* 50190643Sbenno * Construct a PTE. Default to IMB initially. Valid bit only gets 50290643Sbenno * set when the real pte is set in memory. 50390643Sbenno * 50490643Sbenno * Note: Don't set the valid bit for correct operation of tlb update. 50590643Sbenno */ 50690643Sbenno pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 50790643Sbenno (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 50890643Sbenno pt->pte_lo = pte_lo; 50977957Sbenno} 51077957Sbenno 51190643Sbennostatic __inline void 512152180Sgrehanmoea_pte_synch(struct pte *pt, struct pte *pvo_pt) 51377957Sbenno{ 51477957Sbenno 515159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 51690643Sbenno pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 51777957Sbenno} 51877957Sbenno 51990643Sbennostatic __inline void 520152180Sgrehanmoea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 52177957Sbenno{ 52277957Sbenno 523159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 524159928Salc 52590643Sbenno /* 52690643Sbenno * As shown in Section 7.6.3.2.3 52790643Sbenno */ 52890643Sbenno pt->pte_lo &= ~ptebit; 529183094Smarcel tlbie(va); 53077957Sbenno} 53177957Sbenno 53290643Sbennostatic __inline void 533152180Sgrehanmoea_pte_set(struct pte *pt, struct pte *pvo_pt) 53477957Sbenno{ 53577957Sbenno 536159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 53790643Sbenno pvo_pt->pte_hi |= PTE_VALID; 53890643Sbenno 53977957Sbenno /* 54090643Sbenno * Update the PTE as defined in section 7.6.3.1. 54190643Sbenno * Note that the REF/CHG bits are from pvo_pt and thus should havce 54290643Sbenno * been saved so this routine can restore them (if desired). 54377957Sbenno */ 54490643Sbenno pt->pte_lo = pvo_pt->pte_lo; 545183094Smarcel powerpc_sync(); 54690643Sbenno pt->pte_hi = pvo_pt->pte_hi; 547183094Smarcel powerpc_sync(); 548152180Sgrehan moea_pte_valid++; 54990643Sbenno} 55077957Sbenno 55190643Sbennostatic __inline void 552152180Sgrehanmoea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 55390643Sbenno{ 55490643Sbenno 555159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 55690643Sbenno pvo_pt->pte_hi &= ~PTE_VALID; 55790643Sbenno 55877957Sbenno /* 55990643Sbenno * Force the reg & chg bits back into the PTEs. 56077957Sbenno */ 561183094Smarcel powerpc_sync(); 56277957Sbenno 56390643Sbenno /* 56490643Sbenno * Invalidate the pte. 56590643Sbenno */ 56690643Sbenno pt->pte_hi &= ~PTE_VALID; 56777957Sbenno 568183094Smarcel tlbie(va); 56977957Sbenno 57090643Sbenno /* 57190643Sbenno * Save the reg & chg bits. 57290643Sbenno */ 573152180Sgrehan moea_pte_synch(pt, pvo_pt); 574152180Sgrehan moea_pte_valid--; 57577957Sbenno} 57677957Sbenno 57790643Sbennostatic __inline void 578152180Sgrehanmoea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 57990643Sbenno{ 58090643Sbenno 58190643Sbenno /* 58290643Sbenno * Invalidate the PTE 58390643Sbenno */ 584152180Sgrehan moea_pte_unset(pt, pvo_pt, va); 585152180Sgrehan moea_pte_set(pt, pvo_pt); 58690643Sbenno} 58790643Sbenno 58877957Sbenno/* 58990643Sbenno * Quick sort callout for comparing memory regions. 59077957Sbenno */ 59190643Sbennostatic int om_cmp(const void *a, const void *b); 59290643Sbenno 59390643Sbennostatic int 59490643Sbennoom_cmp(const void *a, const void *b) 59590643Sbenno{ 59690643Sbenno const struct ofw_map *mapa; 59790643Sbenno const struct ofw_map *mapb; 59890643Sbenno 59990643Sbenno mapa = a; 60090643Sbenno mapb = b; 60190643Sbenno if (mapa->om_pa < mapb->om_pa) 60290643Sbenno return (-1); 60390643Sbenno else if (mapa->om_pa > mapb->om_pa) 60490643Sbenno return (1); 60590643Sbenno else 60690643Sbenno return (0); 60777957Sbenno} 60877957Sbenno 60977957Sbennovoid 610190681Snwhitehornmoea_cpu_bootstrap(mmu_t mmup, int ap) 611178628Smarcel{ 612178628Smarcel u_int sdr; 613178628Smarcel int i; 614178628Smarcel 615178628Smarcel if (ap) { 616183094Smarcel powerpc_sync(); 617178628Smarcel __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 618178628Smarcel __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 619178628Smarcel isync(); 620178628Smarcel __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 621178628Smarcel __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 622178628Smarcel isync(); 623178628Smarcel } 624178628Smarcel 625178629Smarcel __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 626178629Smarcel __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 627178629Smarcel isync(); 628178628Smarcel 629178629Smarcel __asm __volatile("mtibatu 1,%0" :: "r"(0)); 630178629Smarcel __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 631178629Smarcel __asm __volatile("mtibatu 2,%0" :: "r"(0)); 632178629Smarcel __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 633178629Smarcel __asm __volatile("mtibatu 3,%0" :: "r"(0)); 634178628Smarcel isync(); 635178628Smarcel 636178628Smarcel for (i = 0; i < 16; i++) 637215163Snwhitehorn mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 638183094Smarcel powerpc_sync(); 639178628Smarcel 640178628Smarcel sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 641178628Smarcel __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 642178628Smarcel isync(); 643178628Smarcel 644179254Smarcel tlbia(); 645178628Smarcel} 646178628Smarcel 647178628Smarcelvoid 648152180Sgrehanmoea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 64977957Sbenno{ 65097346Sbenno ihandle_t mmui; 65190643Sbenno phandle_t chosen, mmu; 65290643Sbenno int sz; 65390643Sbenno int i, j; 654143200Sgrehan vm_size_t size, physsz, hwphyssz; 65590643Sbenno vm_offset_t pa, va, off; 656194784Sjeff void *dpcpu; 657209369Snwhitehorn register_t msr; 65877957Sbenno 65999037Sbenno /* 660103604Sgrehan * Set up BAT0 to map the lowest 256 MB area 66199037Sbenno */ 66299037Sbenno battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 66399037Sbenno battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 66499037Sbenno 66599037Sbenno /* 66699037Sbenno * Map PCI memory space. 66799037Sbenno */ 66899037Sbenno battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 66999037Sbenno battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 67099037Sbenno 67199037Sbenno battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 67299037Sbenno battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 67399037Sbenno 67499037Sbenno battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 67599037Sbenno battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 67699037Sbenno 67799037Sbenno battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 67899037Sbenno battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 67999037Sbenno 68099037Sbenno /* 68199037Sbenno * Map obio devices. 68299037Sbenno */ 68399037Sbenno battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 68499037Sbenno battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 68599037Sbenno 68677957Sbenno /* 68790643Sbenno * Use an IBAT and a DBAT to map the bottom segment of memory 688209369Snwhitehorn * where we are. Turn off instruction relocation temporarily 689209369Snwhitehorn * to prevent faults while reprogramming the IBAT. 69077957Sbenno */ 691209369Snwhitehorn msr = mfmsr(); 692209369Snwhitehorn mtmsr(msr & ~PSL_IR); 693152180Sgrehan __asm (".balign 32; \n" 694149958Sgrehan "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 695131808Sgrehan "mtdbatu 0,%0; mtdbatl 0,%1; isync" 696178628Smarcel :: "r"(battable[0].batu), "r"(battable[0].batl)); 697209369Snwhitehorn mtmsr(msr); 69899037Sbenno 69999037Sbenno /* map pci space */ 700178628Smarcel __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 701178628Smarcel __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 702178628Smarcel isync(); 70377957Sbenno 704190681Snwhitehorn /* set global direct map flag */ 705190681Snwhitehorn hw_direct_map = 1; 706190681Snwhitehorn 70797346Sbenno mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 708152180Sgrehan CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 70997346Sbenno 71097346Sbenno for (i = 0; i < pregions_sz; i++) { 711103604Sgrehan vm_offset_t pa; 712103604Sgrehan vm_offset_t end; 713103604Sgrehan 71497346Sbenno CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 71597346Sbenno pregions[i].mr_start, 71697346Sbenno pregions[i].mr_start + pregions[i].mr_size, 71797346Sbenno pregions[i].mr_size); 718103604Sgrehan /* 719103604Sgrehan * Install entries into the BAT table to allow all 720103604Sgrehan * of physmem to be convered by on-demand BAT entries. 721103604Sgrehan * The loop will sometimes set the same battable element 722103604Sgrehan * twice, but that's fine since they won't be used for 723103604Sgrehan * a while yet. 724103604Sgrehan */ 725103604Sgrehan pa = pregions[i].mr_start & 0xf0000000; 726103604Sgrehan end = pregions[i].mr_start + pregions[i].mr_size; 727103604Sgrehan do { 728103604Sgrehan u_int n = pa >> ADDR_SR_SHFT; 729152180Sgrehan 730103604Sgrehan battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 731103604Sgrehan battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 732103604Sgrehan pa += SEGMENT_LENGTH; 733103604Sgrehan } while (pa < end); 73497346Sbenno } 73597346Sbenno 73697346Sbenno if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 737152180Sgrehan panic("moea_bootstrap: phys_avail too small"); 738222614Snwhitehorn 73990643Sbenno phys_avail_count = 0; 74091793Sbenno physsz = 0; 741143234Sgrehan hwphyssz = 0; 742143234Sgrehan TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 74397346Sbenno for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 74490643Sbenno CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 74590643Sbenno regions[i].mr_start + regions[i].mr_size, 74690643Sbenno regions[i].mr_size); 747143200Sgrehan if (hwphyssz != 0 && 748143200Sgrehan (physsz + regions[i].mr_size) >= hwphyssz) { 749143200Sgrehan if (physsz < hwphyssz) { 750143200Sgrehan phys_avail[j] = regions[i].mr_start; 751143200Sgrehan phys_avail[j + 1] = regions[i].mr_start + 752143200Sgrehan hwphyssz - physsz; 753143200Sgrehan physsz = hwphyssz; 754143200Sgrehan phys_avail_count++; 755143200Sgrehan } 756143200Sgrehan break; 757143200Sgrehan } 75890643Sbenno phys_avail[j] = regions[i].mr_start; 75990643Sbenno phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 76090643Sbenno phys_avail_count++; 76191793Sbenno physsz += regions[i].mr_size; 76277957Sbenno } 763228609Snwhitehorn 764228609Snwhitehorn /* Check for overlap with the kernel and exception vectors */ 765228609Snwhitehorn for (j = 0; j < 2*phys_avail_count; j+=2) { 766228609Snwhitehorn if (phys_avail[j] < EXC_LAST) 767228609Snwhitehorn phys_avail[j] += EXC_LAST; 768228609Snwhitehorn 769228609Snwhitehorn if (kernelstart >= phys_avail[j] && 770228609Snwhitehorn kernelstart < phys_avail[j+1]) { 771228609Snwhitehorn if (kernelend < phys_avail[j+1]) { 772228609Snwhitehorn phys_avail[2*phys_avail_count] = 773228609Snwhitehorn (kernelend & ~PAGE_MASK) + PAGE_SIZE; 774228609Snwhitehorn phys_avail[2*phys_avail_count + 1] = 775228609Snwhitehorn phys_avail[j+1]; 776228609Snwhitehorn phys_avail_count++; 777228609Snwhitehorn } 778228609Snwhitehorn 779228609Snwhitehorn phys_avail[j+1] = kernelstart & ~PAGE_MASK; 780228609Snwhitehorn } 781228609Snwhitehorn 782228609Snwhitehorn if (kernelend >= phys_avail[j] && 783228609Snwhitehorn kernelend < phys_avail[j+1]) { 784228609Snwhitehorn if (kernelstart > phys_avail[j]) { 785228609Snwhitehorn phys_avail[2*phys_avail_count] = phys_avail[j]; 786228609Snwhitehorn phys_avail[2*phys_avail_count + 1] = 787228609Snwhitehorn kernelstart & ~PAGE_MASK; 788228609Snwhitehorn phys_avail_count++; 789228609Snwhitehorn } 790228609Snwhitehorn 791228609Snwhitehorn phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 792228609Snwhitehorn } 793228609Snwhitehorn } 794228609Snwhitehorn 79591793Sbenno physmem = btoc(physsz); 79677957Sbenno 79777957Sbenno /* 79890643Sbenno * Allocate PTEG table. 79977957Sbenno */ 80090643Sbenno#ifdef PTEGCOUNT 801152180Sgrehan moea_pteg_count = PTEGCOUNT; 80290643Sbenno#else 803152180Sgrehan moea_pteg_count = 0x1000; 80477957Sbenno 805152180Sgrehan while (moea_pteg_count < physmem) 806152180Sgrehan moea_pteg_count <<= 1; 80777957Sbenno 808152180Sgrehan moea_pteg_count >>= 1; 80990643Sbenno#endif /* PTEGCOUNT */ 81077957Sbenno 811152180Sgrehan size = moea_pteg_count * sizeof(struct pteg); 812152180Sgrehan CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 81390643Sbenno size); 814152180Sgrehan moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 815152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 816152180Sgrehan bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 817152180Sgrehan moea_pteg_mask = moea_pteg_count - 1; 81877957Sbenno 81990643Sbenno /* 82094839Sbenno * Allocate pv/overflow lists. 82190643Sbenno */ 822152180Sgrehan size = sizeof(struct pvo_head) * moea_pteg_count; 823152180Sgrehan moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 82490643Sbenno PAGE_SIZE); 825152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 826152180Sgrehan for (i = 0; i < moea_pteg_count; i++) 827152180Sgrehan LIST_INIT(&moea_pvo_table[i]); 82877957Sbenno 82990643Sbenno /* 830134535Salc * Initialize the lock that synchronizes access to the pteg and pvo 831134535Salc * tables. 832134535Salc */ 833159928Salc mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 834159928Salc MTX_RECURSE); 835212278Snwhitehorn mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 836134535Salc 837183094Smarcel mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 838183094Smarcel 839134535Salc /* 84090643Sbenno * Initialise the unmanaged pvo pool. 84190643Sbenno */ 842152180Sgrehan moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 84399037Sbenno BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 844152180Sgrehan moea_bpvo_pool_index = 0; 84577957Sbenno 84677957Sbenno /* 84790643Sbenno * Make sure kernel vsid is allocated as well as VSID 0. 84877957Sbenno */ 849152180Sgrehan moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 85090643Sbenno |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 851152180Sgrehan moea_vsid_bitmap[0] |= 1; 85277957Sbenno 85390643Sbenno /* 854215163Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 85590643Sbenno */ 856215163Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 857215163Snwhitehorn for (i = 0; i < 16; i++) 858215163Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 859222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 860235689Snwhitehorn RB_INIT(&kernel_pmap->pmap_pvo); 861215163Snwhitehorn 862215163Snwhitehorn /* 863215163Snwhitehorn * Set up the Open Firmware mappings 864215163Snwhitehorn */ 865228609Snwhitehorn chosen = OF_finddevice("/chosen"); 866228609Snwhitehorn if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && 867228609Snwhitehorn (mmu = OF_instance_to_package(mmui)) != -1 && 868228609Snwhitehorn (sz = OF_getproplen(mmu, "translations")) != -1) { 869228609Snwhitehorn translations = NULL; 870228609Snwhitehorn for (i = 0; phys_avail[i] != 0; i += 2) { 871228609Snwhitehorn if (phys_avail[i + 1] >= sz) { 872228609Snwhitehorn translations = (struct ofw_map *)phys_avail[i]; 873228609Snwhitehorn break; 874228609Snwhitehorn } 875131401Sgrehan } 876228609Snwhitehorn if (translations == NULL) 877228609Snwhitehorn panic("moea_bootstrap: no space to copy translations"); 878228609Snwhitehorn bzero(translations, sz); 879228609Snwhitehorn if (OF_getprop(mmu, "translations", translations, sz) == -1) 880228609Snwhitehorn panic("moea_bootstrap: can't get ofw translations"); 881228609Snwhitehorn CTR0(KTR_PMAP, "moea_bootstrap: translations"); 882228609Snwhitehorn sz /= sizeof(*translations); 883228609Snwhitehorn qsort(translations, sz, sizeof (*translations), om_cmp); 884228609Snwhitehorn for (i = 0; i < sz; i++) { 885228609Snwhitehorn CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 886228609Snwhitehorn translations[i].om_pa, translations[i].om_va, 887228609Snwhitehorn translations[i].om_len); 88877957Sbenno 889228609Snwhitehorn /* 890228609Snwhitehorn * If the mapping is 1:1, let the RAM and device 891228609Snwhitehorn * on-demand BAT tables take care of the translation. 892228609Snwhitehorn */ 893228609Snwhitehorn if (translations[i].om_va == translations[i].om_pa) 894228609Snwhitehorn continue; 89577957Sbenno 896228609Snwhitehorn /* Enter the pages */ 897228609Snwhitehorn for (off = 0; off < translations[i].om_len; 898228609Snwhitehorn off += PAGE_SIZE) 899228609Snwhitehorn moea_kenter(mmup, translations[i].om_va + off, 900228609Snwhitehorn translations[i].om_pa + off); 901228609Snwhitehorn } 90277957Sbenno } 90377957Sbenno 90490643Sbenno /* 905178261Smarcel * Calculate the last available physical address. 906178261Smarcel */ 907178261Smarcel for (i = 0; phys_avail[i + 2] != 0; i += 2) 908178261Smarcel ; 909178261Smarcel Maxmem = powerpc_btop(phys_avail[i + 1]); 910178261Smarcel 911190681Snwhitehorn moea_cpu_bootstrap(mmup,0); 91277957Sbenno 91390643Sbenno pmap_bootstrapped++; 914178261Smarcel 915178261Smarcel /* 916178261Smarcel * Set the start and end of kva. 917178261Smarcel */ 918178261Smarcel virtual_avail = VM_MIN_KERNEL_ADDRESS; 919204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 920178261Smarcel 921178261Smarcel /* 922178261Smarcel * Allocate a kernel stack with a guard page for thread0 and map it 923178261Smarcel * into the kernel page map. 924178261Smarcel */ 925178261Smarcel pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 926178261Smarcel va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 927178261Smarcel virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 928178261Smarcel CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 929178261Smarcel thread0.td_kstack = va; 930178261Smarcel thread0.td_kstack_pages = KSTACK_PAGES; 931178261Smarcel for (i = 0; i < KSTACK_PAGES; i++) { 932201758Smbr moea_kenter(mmup, va, pa); 933178261Smarcel pa += PAGE_SIZE; 934178261Smarcel va += PAGE_SIZE; 935178261Smarcel } 936178261Smarcel 937178261Smarcel /* 938178261Smarcel * Allocate virtual address space for the message buffer. 939178261Smarcel */ 940217688Spluknet pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 941178261Smarcel msgbufp = (struct msgbuf *)virtual_avail; 942178261Smarcel va = virtual_avail; 943217688Spluknet virtual_avail += round_page(msgbufsize); 944178261Smarcel while (va < virtual_avail) { 945201758Smbr moea_kenter(mmup, va, pa); 946178261Smarcel pa += PAGE_SIZE; 947178261Smarcel va += PAGE_SIZE; 948178261Smarcel } 949194784Sjeff 950194784Sjeff /* 951194784Sjeff * Allocate virtual address space for the dynamic percpu area. 952194784Sjeff */ 953194784Sjeff pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 954194784Sjeff dpcpu = (void *)virtual_avail; 955194784Sjeff va = virtual_avail; 956194784Sjeff virtual_avail += DPCPU_SIZE; 957194784Sjeff while (va < virtual_avail) { 958201758Smbr moea_kenter(mmup, va, pa); 959194784Sjeff pa += PAGE_SIZE; 960194784Sjeff va += PAGE_SIZE; 961194784Sjeff } 962194784Sjeff dpcpu_init(dpcpu, 0); 96377957Sbenno} 96477957Sbenno 96577957Sbenno/* 96690643Sbenno * Activate a user pmap. The pmap must be activated before it's address 96790643Sbenno * space can be accessed in any way. 96877957Sbenno */ 96977957Sbennovoid 970152180Sgrehanmoea_activate(mmu_t mmu, struct thread *td) 97177957Sbenno{ 97296250Sbenno pmap_t pm, pmr; 97377957Sbenno 97477957Sbenno /* 975103604Sgrehan * Load all the data we need up front to encourage the compiler to 97690643Sbenno * not issue any loads while we have interrupts disabled below. 97777957Sbenno */ 97890643Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 979183290Snwhitehorn pmr = pm->pmap_phys; 98077957Sbenno 981223758Sattilio CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 98296250Sbenno PCPU_SET(curpmap, pmr); 98377957Sbenno} 98477957Sbenno 98591483Sbennovoid 986152180Sgrehanmoea_deactivate(mmu_t mmu, struct thread *td) 98791483Sbenno{ 98891483Sbenno pmap_t pm; 98991483Sbenno 99091483Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 991223758Sattilio CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 99296250Sbenno PCPU_SET(curpmap, NULL); 99391483Sbenno} 99491483Sbenno 99577957Sbennovoid 996152180Sgrehanmoea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 99777957Sbenno{ 99896353Sbenno struct pvo_entry *pvo; 99996353Sbenno 1000134329Salc PMAP_LOCK(pm); 1001152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 100296353Sbenno 100396353Sbenno if (pvo != NULL) { 100496353Sbenno if (wired) { 100596353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 100696353Sbenno pm->pm_stats.wired_count++; 100796353Sbenno pvo->pvo_vaddr |= PVO_WIRED; 100896353Sbenno } else { 100996353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 101096353Sbenno pm->pm_stats.wired_count--; 101196353Sbenno pvo->pvo_vaddr &= ~PVO_WIRED; 101296353Sbenno } 101396353Sbenno } 1014134329Salc PMAP_UNLOCK(pm); 101577957Sbenno} 101677957Sbenno 101777957Sbennovoid 1018152180Sgrehanmoea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 101977957Sbenno{ 102097385Sbenno vm_offset_t dst; 102197385Sbenno vm_offset_t src; 102297385Sbenno 102397385Sbenno dst = VM_PAGE_TO_PHYS(mdst); 102497385Sbenno src = VM_PAGE_TO_PHYS(msrc); 102597385Sbenno 1026234156Snwhitehorn bcopy((void *)src, (void *)dst, PAGE_SIZE); 102777957Sbenno} 102877957Sbenno 102977957Sbenno/* 103090643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb. 103177957Sbenno */ 103277957Sbennovoid 1033152180Sgrehanmoea_zero_page(mmu_t mmu, vm_page_t m) 103477957Sbenno{ 103594777Speter vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1036178265Smarcel void *va = (void *)pa; 103777957Sbenno 103890643Sbenno bzero(va, PAGE_SIZE); 103977957Sbenno} 104077957Sbenno 104177957Sbennovoid 1042152180Sgrehanmoea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 104377957Sbenno{ 104499666Sbenno vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1045178265Smarcel void *va = (void *)(pa + off); 104699666Sbenno 1047178265Smarcel bzero(va, size); 104877957Sbenno} 104977957Sbenno 105099571Spetervoid 1051152180Sgrehanmoea_zero_page_idle(mmu_t mmu, vm_page_t m) 105299571Speter{ 1053178265Smarcel vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1054178265Smarcel void *va = (void *)pa; 105599571Speter 1056178265Smarcel bzero(va, PAGE_SIZE); 105799571Speter} 105899571Speter 105977957Sbenno/* 106090643Sbenno * Map the given physical page at the specified virtual address in the 106190643Sbenno * target pmap with the protection requested. If specified the page 106290643Sbenno * will be wired down. 106377957Sbenno */ 106477957Sbennovoid 1065152180Sgrehanmoea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 106690643Sbenno boolean_t wired) 106777957Sbenno{ 1068159303Salc 1069159303Salc vm_page_lock_queues(); 1070159303Salc PMAP_LOCK(pmap); 1071159324Salc moea_enter_locked(pmap, va, m, prot, wired); 1072159303Salc vm_page_unlock_queues(); 1073159303Salc PMAP_UNLOCK(pmap); 1074159303Salc} 1075159303Salc 1076159303Salc/* 1077159303Salc * Map the given physical page at the specified virtual address in the 1078159303Salc * target pmap with the protection requested. If specified the page 1079159303Salc * will be wired down. 1080159303Salc * 1081159303Salc * The page queues and pmap must be locked. 1082159303Salc */ 1083159303Salcstatic void 1084159303Salcmoea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1085159303Salc boolean_t wired) 1086159303Salc{ 108790643Sbenno struct pvo_head *pvo_head; 108892847Sjeff uma_zone_t zone; 108996250Sbenno vm_page_t pg; 1090233949Snwhitehorn u_int pte_lo, pvo_flags; 109190643Sbenno int error; 109277957Sbenno 1093152180Sgrehan if (!moea_initialized) { 1094152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1095152180Sgrehan zone = moea_upvo_zone; 109690643Sbenno pvo_flags = 0; 109796250Sbenno pg = NULL; 109890643Sbenno } else { 1099110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1100110172Sgrehan pg = m; 1101152180Sgrehan zone = moea_mpvo_zone; 110290643Sbenno pvo_flags = PVO_MANAGED; 110390643Sbenno } 1104134535Salc if (pmap_bootstrapped) 1105159303Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1106159303Salc PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1107224746Skib KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1108224746Skib VM_OBJECT_LOCKED(m->object), 1109208175Salc ("moea_enter_locked: page %p is not busy", m)); 111077957Sbenno 1111142416Sgrehan /* XXX change the pvo head for fake pages */ 1112224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1113189675Snwhitehorn pvo_flags &= ~PVO_MANAGED; 1114152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1115189675Snwhitehorn zone = moea_upvo_zone; 1116189675Snwhitehorn } 1117142416Sgrehan 1118213335Snwhitehorn pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 111977957Sbenno 1120164229Salc if (prot & VM_PROT_WRITE) { 112190643Sbenno pte_lo |= PTE_BW; 1122208810Salc if (pmap_bootstrapped && 1123224746Skib (m->oflags & VPO_UNMANAGED) == 0) 1124225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1125164229Salc } else 112690643Sbenno pte_lo |= PTE_BR; 112777957Sbenno 1128142416Sgrehan if (prot & VM_PROT_EXECUTE) 1129142416Sgrehan pvo_flags |= PVO_EXECUTABLE; 113077957Sbenno 113190643Sbenno if (wired) 113290643Sbenno pvo_flags |= PVO_WIRED; 113377957Sbenno 1134152180Sgrehan error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 113596250Sbenno pte_lo, pvo_flags); 113690643Sbenno 113796250Sbenno /* 1138233949Snwhitehorn * Flush the real page from the instruction cache. This has be done 1139233949Snwhitehorn * for all user mappings to prevent information leakage via the 1140234149Snwhitehorn * instruction cache. moea_pvo_enter() returns ENOENT for the first 1141234149Snwhitehorn * mapping for a page. 114296250Sbenno */ 1143234149Snwhitehorn if (pmap != kernel_pmap && error == ENOENT && 1144234149Snwhitehorn (pte_lo & (PTE_I | PTE_G)) == 0) 1145152180Sgrehan moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 114677957Sbenno} 114777957Sbenno 1148159303Salc/* 1149159303Salc * Maps a sequence of resident pages belonging to the same object. 1150159303Salc * The sequence begins with the given page m_start. This page is 1151159303Salc * mapped at the given virtual address start. Each subsequent page is 1152159303Salc * mapped at a virtual address that is offset from start by the same 1153159303Salc * amount as the page is offset from m_start within the object. The 1154159303Salc * last page in the sequence is the page with the largest offset from 1155159303Salc * m_start that can be mapped at a virtual address less than the given 1156159303Salc * virtual address end. Not every virtual page between start and end 1157159303Salc * is mapped; only those for which a resident page exists with the 1158159303Salc * corresponding offset from m_start are mapped. 1159159303Salc */ 1160159303Salcvoid 1161159303Salcmoea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1162159303Salc vm_page_t m_start, vm_prot_t prot) 1163159303Salc{ 1164159303Salc vm_page_t m; 1165159303Salc vm_pindex_t diff, psize; 1166159303Salc 1167159303Salc psize = atop(end - start); 1168159303Salc m = m_start; 1169208574Salc vm_page_lock_queues(); 1170159303Salc PMAP_LOCK(pm); 1171159303Salc while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1172159303Salc moea_enter_locked(pm, start + ptoa(diff), m, prot & 1173159303Salc (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1174159303Salc m = TAILQ_NEXT(m, listq); 1175159303Salc } 1176208574Salc vm_page_unlock_queues(); 1177159303Salc PMAP_UNLOCK(pm); 1178159303Salc} 1179159303Salc 1180159627Supsvoid 1181152180Sgrehanmoea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1182159627Sups vm_prot_t prot) 1183117045Salc{ 1184117045Salc 1185207796Salc vm_page_lock_queues(); 1186159303Salc PMAP_LOCK(pm); 1187159303Salc moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1188152180Sgrehan FALSE); 1189207796Salc vm_page_unlock_queues(); 1190159303Salc PMAP_UNLOCK(pm); 1191117045Salc} 1192117045Salc 1193131658Salcvm_paddr_t 1194152180Sgrehanmoea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 119577957Sbenno{ 119696353Sbenno struct pvo_entry *pvo; 1197134329Salc vm_paddr_t pa; 119896353Sbenno 1199134329Salc PMAP_LOCK(pm); 1200152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1201134329Salc if (pvo == NULL) 1202134329Salc pa = 0; 1203134329Salc else 1204183290Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1205134329Salc PMAP_UNLOCK(pm); 1206134329Salc return (pa); 120777957Sbenno} 120877957Sbenno 120977957Sbenno/* 1210120336Sgrehan * Atomically extract and hold the physical page with the given 1211120336Sgrehan * pmap and virtual address pair if that mapping permits the given 1212120336Sgrehan * protection. 1213120336Sgrehan */ 1214120336Sgrehanvm_page_t 1215152180Sgrehanmoea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1216120336Sgrehan{ 1217132666Salc struct pvo_entry *pvo; 1218120336Sgrehan vm_page_t m; 1219207410Skmacy vm_paddr_t pa; 1220207410Skmacy 1221120336Sgrehan m = NULL; 1222207410Skmacy pa = 0; 1223134329Salc PMAP_LOCK(pmap); 1224207410Skmacyretry: 1225152180Sgrehan pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1226183290Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1227183290Snwhitehorn ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1228132666Salc (prot & VM_PROT_WRITE) == 0)) { 1229207410Skmacy if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1230207410Skmacy goto retry; 1231183290Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1232120336Sgrehan vm_page_hold(m); 1233120336Sgrehan } 1234207410Skmacy PA_UNLOCK_COND(pa); 1235134329Salc PMAP_UNLOCK(pmap); 1236120336Sgrehan return (m); 1237120336Sgrehan} 1238120336Sgrehan 123990643Sbennovoid 1240152180Sgrehanmoea_init(mmu_t mmu) 124177957Sbenno{ 124277957Sbenno 1243152180Sgrehan moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1244125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1245125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1246152180Sgrehan moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1247125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1248125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1249152180Sgrehan moea_initialized = TRUE; 125077957Sbenno} 125177957Sbenno 125290643Sbennoboolean_t 1253207155Salcmoea_is_referenced(mmu_t mmu, vm_page_t m) 1254207155Salc{ 1255207155Salc 1256224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1257208574Salc ("moea_is_referenced: page %p is not managed", m)); 1258207155Salc return (moea_query_bit(m, PTE_REF)); 1259207155Salc} 1260207155Salc 1261207155Salcboolean_t 1262152180Sgrehanmoea_is_modified(mmu_t mmu, vm_page_t m) 126390643Sbenno{ 126496353Sbenno 1265224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1266208504Salc ("moea_is_modified: page %p is not managed", m)); 1267208504Salc 1268208504Salc /* 1269225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1270225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1271208504Salc * is clear, no PTEs can have PTE_CHG set. 1272208504Salc */ 1273208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1274208504Salc if ((m->oflags & VPO_BUSY) == 0 && 1275225418Skib (m->aflags & PGA_WRITEABLE) == 0) 127696353Sbenno return (FALSE); 1277208574Salc return (moea_query_bit(m, PTE_CHG)); 127890643Sbenno} 127990643Sbenno 1280214617Salcboolean_t 1281214617Salcmoea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1282214617Salc{ 1283214617Salc struct pvo_entry *pvo; 1284214617Salc boolean_t rv; 1285214617Salc 1286214617Salc PMAP_LOCK(pmap); 1287214617Salc pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1288214617Salc rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1289214617Salc PMAP_UNLOCK(pmap); 1290214617Salc return (rv); 1291214617Salc} 1292214617Salc 129390643Sbennovoid 1294152180Sgrehanmoea_clear_reference(mmu_t mmu, vm_page_t m) 129590643Sbenno{ 1296110172Sgrehan 1297224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1298208504Salc ("moea_clear_reference: page %p is not managed", m)); 1299208990Salc moea_clear_bit(m, PTE_REF); 130090643Sbenno} 130190643Sbenno 1302110172Sgrehanvoid 1303152180Sgrehanmoea_clear_modify(mmu_t mmu, vm_page_t m) 1304110172Sgrehan{ 1305110172Sgrehan 1306224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1307208504Salc ("moea_clear_modify: page %p is not managed", m)); 1308208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1309208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 1310208504Salc ("moea_clear_modify: page %p is busy", m)); 1311208504Salc 1312208504Salc /* 1313225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG 1314208504Salc * set. If the object containing the page is locked and the page is 1315225418Skib * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1316208504Salc */ 1317225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 1318110172Sgrehan return; 1319208990Salc moea_clear_bit(m, PTE_CHG); 1320110172Sgrehan} 1321110172Sgrehan 132291403Ssilby/* 1323160889Salc * Clear the write and modified bits in each of the given page's mappings. 1324160889Salc */ 1325160889Salcvoid 1326160889Salcmoea_remove_write(mmu_t mmu, vm_page_t m) 1327160889Salc{ 1328160889Salc struct pvo_entry *pvo; 1329160889Salc struct pte *pt; 1330160889Salc pmap_t pmap; 1331160889Salc u_int lo; 1332160889Salc 1333224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1334208175Salc ("moea_remove_write: page %p is not managed", m)); 1335208175Salc 1336208175Salc /* 1337225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1338225418Skib * another thread while the object is locked. Thus, if PGA_WRITEABLE 1339208175Salc * is clear, no page table entries need updating. 1340208175Salc */ 1341208175Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1342208175Salc if ((m->oflags & VPO_BUSY) == 0 && 1343225418Skib (m->aflags & PGA_WRITEABLE) == 0) 1344160889Salc return; 1345207796Salc vm_page_lock_queues(); 1346160889Salc lo = moea_attr_fetch(m); 1347183094Smarcel powerpc_sync(); 1348160889Salc LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1349160889Salc pmap = pvo->pvo_pmap; 1350160889Salc PMAP_LOCK(pmap); 1351183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1352160889Salc pt = moea_pvo_to_pte(pvo, -1); 1353183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1354183290Snwhitehorn pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1355160889Salc if (pt != NULL) { 1356183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 1357183290Snwhitehorn lo |= pvo->pvo_pte.pte.pte_lo; 1358183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1359183290Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, 1360160889Salc pvo->pvo_vaddr); 1361160889Salc mtx_unlock(&moea_table_mutex); 1362160889Salc } 1363160889Salc } 1364160889Salc PMAP_UNLOCK(pmap); 1365160889Salc } 1366160889Salc if ((lo & PTE_CHG) != 0) { 1367160889Salc moea_attr_clear(m, PTE_CHG); 1368160889Salc vm_page_dirty(m); 1369160889Salc } 1370225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1371207796Salc vm_page_unlock_queues(); 1372160889Salc} 1373160889Salc 1374160889Salc/* 1375152180Sgrehan * moea_ts_referenced: 137691403Ssilby * 137791403Ssilby * Return a count of reference bits for a page, clearing those bits. 137891403Ssilby * It is not necessary for every reference bit to be cleared, but it 137991403Ssilby * is necessary that 0 only be returned when there are truly no 138091403Ssilby * reference bits set. 138191403Ssilby * 138291403Ssilby * XXX: The exact number of bits to check and clear is a matter that 138391403Ssilby * should be tested and standardized at some point in the future for 138491403Ssilby * optimal aging of shared pages. 138591403Ssilby */ 1386152180Sgrehanboolean_t 1387152180Sgrehanmoea_ts_referenced(mmu_t mmu, vm_page_t m) 138890643Sbenno{ 1389110172Sgrehan 1390224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1391208990Salc ("moea_ts_referenced: page %p is not managed", m)); 1392208990Salc return (moea_clear_bit(m, PTE_REF)); 139390643Sbenno} 139490643Sbenno 139577957Sbenno/* 1396213307Snwhitehorn * Modify the WIMG settings of all mappings for a page. 1397213307Snwhitehorn */ 1398213307Snwhitehornvoid 1399213307Snwhitehornmoea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1400213307Snwhitehorn{ 1401213307Snwhitehorn struct pvo_entry *pvo; 1402213335Snwhitehorn struct pvo_head *pvo_head; 1403213307Snwhitehorn struct pte *pt; 1404213307Snwhitehorn pmap_t pmap; 1405213307Snwhitehorn u_int lo; 1406213307Snwhitehorn 1407224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1408213335Snwhitehorn m->md.mdpg_cache_attrs = ma; 1409213335Snwhitehorn return; 1410213335Snwhitehorn } 1411213335Snwhitehorn 1412213307Snwhitehorn vm_page_lock_queues(); 1413213335Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1414213307Snwhitehorn lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1415213335Snwhitehorn 1416213335Snwhitehorn LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1417213307Snwhitehorn pmap = pvo->pvo_pmap; 1418213307Snwhitehorn PMAP_LOCK(pmap); 1419213307Snwhitehorn pt = moea_pvo_to_pte(pvo, -1); 1420213307Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1421213307Snwhitehorn pvo->pvo_pte.pte.pte_lo |= lo; 1422213307Snwhitehorn if (pt != NULL) { 1423213307Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, 1424213307Snwhitehorn pvo->pvo_vaddr); 1425213307Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1426213307Snwhitehorn isync(); 1427213307Snwhitehorn } 1428213307Snwhitehorn mtx_unlock(&moea_table_mutex); 1429213307Snwhitehorn PMAP_UNLOCK(pmap); 1430213307Snwhitehorn } 1431213307Snwhitehorn m->md.mdpg_cache_attrs = ma; 1432213307Snwhitehorn vm_page_unlock_queues(); 1433213307Snwhitehorn} 1434213307Snwhitehorn 1435213307Snwhitehorn/* 143690643Sbenno * Map a wired page into kernel virtual address space. 143777957Sbenno */ 143877957Sbennovoid 1439152180Sgrehanmoea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 144077957Sbenno{ 1441213307Snwhitehorn 1442213307Snwhitehorn moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1443213307Snwhitehorn} 1444213307Snwhitehorn 1445213307Snwhitehornvoid 1446213307Snwhitehornmoea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1447213307Snwhitehorn{ 144890643Sbenno u_int pte_lo; 144990643Sbenno int error; 145077957Sbenno 145190643Sbenno#if 0 145290643Sbenno if (va < VM_MIN_KERNEL_ADDRESS) 1453152180Sgrehan panic("moea_kenter: attempt to enter non-kernel address %#x", 145490643Sbenno va); 145590643Sbenno#endif 145677957Sbenno 1457213307Snwhitehorn pte_lo = moea_calc_wimg(pa, ma); 145877957Sbenno 1459135172Salc PMAP_LOCK(kernel_pmap); 1460152180Sgrehan error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1461152180Sgrehan &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 146290643Sbenno 146390643Sbenno if (error != 0 && error != ENOENT) 1464152180Sgrehan panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 146590643Sbenno pa, error); 146690643Sbenno 1467135172Salc PMAP_UNLOCK(kernel_pmap); 146877957Sbenno} 146977957Sbenno 147094838Sbenno/* 147194838Sbenno * Extract the physical page address associated with the given kernel virtual 147294838Sbenno * address. 147394838Sbenno */ 147490643Sbennovm_offset_t 1475152180Sgrehanmoea_kextract(mmu_t mmu, vm_offset_t va) 147677957Sbenno{ 147794838Sbenno struct pvo_entry *pvo; 1478134329Salc vm_paddr_t pa; 147994838Sbenno 1480125185Sgrehan /* 1481183290Snwhitehorn * Allow direct mappings on 32-bit OEA 1482125185Sgrehan */ 1483125185Sgrehan if (va < VM_MIN_KERNEL_ADDRESS) { 1484125185Sgrehan return (va); 1485125185Sgrehan } 1486125185Sgrehan 1487134329Salc PMAP_LOCK(kernel_pmap); 1488152180Sgrehan pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1489152180Sgrehan KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1490183290Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1491134329Salc PMAP_UNLOCK(kernel_pmap); 1492134329Salc return (pa); 149377957Sbenno} 149477957Sbenno 149591456Sbenno/* 149691456Sbenno * Remove a wired page from kernel virtual address space. 149791456Sbenno */ 149877957Sbennovoid 1499152180Sgrehanmoea_kremove(mmu_t mmu, vm_offset_t va) 150077957Sbenno{ 150191456Sbenno 1502152180Sgrehan moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 150377957Sbenno} 150477957Sbenno 150577957Sbenno/* 150690643Sbenno * Map a range of physical addresses into kernel virtual address space. 150790643Sbenno * 150890643Sbenno * The value passed in *virt is a suggested virtual address for the mapping. 150990643Sbenno * Architectures which can support a direct-mapped physical to virtual region 151090643Sbenno * can return the appropriate address within that region, leaving '*virt' 151190643Sbenno * unchanged. We cannot and therefore do not; *virt is updated with the 151290643Sbenno * first usable address after the mapped region. 151377957Sbenno */ 151490643Sbennovm_offset_t 1515152180Sgrehanmoea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1516152180Sgrehan vm_offset_t pa_end, int prot) 151777957Sbenno{ 151890643Sbenno vm_offset_t sva, va; 151977957Sbenno 152090643Sbenno sva = *virt; 152190643Sbenno va = sva; 152290643Sbenno for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1523152180Sgrehan moea_kenter(mmu, va, pa_start); 152490643Sbenno *virt = va; 152590643Sbenno return (sva); 152677957Sbenno} 152777957Sbenno 152877957Sbenno/* 152991403Ssilby * Returns true if the pmap's pv is one of the first 153091403Ssilby * 16 pvs linked to from this page. This count may 153191403Ssilby * be changed upwards or downwards in the future; it 153291403Ssilby * is only necessary that true be returned for a small 153391403Ssilby * subset of pmaps for proper page aging. 153491403Ssilby */ 153590643Sbennoboolean_t 1536152180Sgrehanmoea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 153790643Sbenno{ 1538110172Sgrehan int loops; 1539110172Sgrehan struct pvo_entry *pvo; 1540208990Salc boolean_t rv; 1541110172Sgrehan 1542224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1543208990Salc ("moea_page_exists_quick: page %p is not managed", m)); 1544110172Sgrehan loops = 0; 1545208990Salc rv = FALSE; 1546208990Salc vm_page_lock_queues(); 1547110172Sgrehan LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1548208990Salc if (pvo->pvo_pmap == pmap) { 1549208990Salc rv = TRUE; 1550208990Salc break; 1551208990Salc } 1552110172Sgrehan if (++loops >= 16) 1553110172Sgrehan break; 1554110172Sgrehan } 1555208990Salc vm_page_unlock_queues(); 1556208990Salc return (rv); 155790643Sbenno} 155877957Sbenno 1559173708Salc/* 1560173708Salc * Return the number of managed mappings to the given physical page 1561173708Salc * that are wired. 1562173708Salc */ 1563173708Salcint 1564173708Salcmoea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1565173708Salc{ 1566173708Salc struct pvo_entry *pvo; 1567173708Salc int count; 1568173708Salc 1569173708Salc count = 0; 1570224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 1571173708Salc return (count); 1572207796Salc vm_page_lock_queues(); 1573173708Salc LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1574173708Salc if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1575173708Salc count++; 1576207796Salc vm_page_unlock_queues(); 1577173708Salc return (count); 1578173708Salc} 1579173708Salc 1580152180Sgrehanstatic u_int moea_vsidcontext; 158177957Sbenno 158290643Sbennovoid 1583152180Sgrehanmoea_pinit(mmu_t mmu, pmap_t pmap) 158490643Sbenno{ 158590643Sbenno int i, mask; 158690643Sbenno u_int entropy; 158777957Sbenno 1588152180Sgrehan KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1589134329Salc PMAP_LOCK_INIT(pmap); 1590235689Snwhitehorn RB_INIT(&pmap->pmap_pvo); 1591126478Sgrehan 159290643Sbenno entropy = 0; 159390643Sbenno __asm __volatile("mftb %0" : "=r"(entropy)); 159477957Sbenno 1595183290Snwhitehorn if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1596183290Snwhitehorn == NULL) { 1597183290Snwhitehorn pmap->pmap_phys = pmap; 1598183290Snwhitehorn } 1599183290Snwhitehorn 1600183290Snwhitehorn 1601212278Snwhitehorn mtx_lock(&moea_vsid_mutex); 160290643Sbenno /* 160390643Sbenno * Allocate some segment registers for this pmap. 160490643Sbenno */ 160590643Sbenno for (i = 0; i < NPMAPS; i += VSID_NBPW) { 160690643Sbenno u_int hash, n; 160777957Sbenno 160877957Sbenno /* 160990643Sbenno * Create a new value by mutiplying by a prime and adding in 161090643Sbenno * entropy from the timebase register. This is to make the 161190643Sbenno * VSID more random so that the PT hash function collides 161290643Sbenno * less often. (Note that the prime casues gcc to do shifts 161390643Sbenno * instead of a multiply.) 161477957Sbenno */ 1615152180Sgrehan moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1616152180Sgrehan hash = moea_vsidcontext & (NPMAPS - 1); 161790643Sbenno if (hash == 0) /* 0 is special, avoid it */ 161890643Sbenno continue; 161990643Sbenno n = hash >> 5; 162090643Sbenno mask = 1 << (hash & (VSID_NBPW - 1)); 1621152180Sgrehan hash = (moea_vsidcontext & 0xfffff); 1622152180Sgrehan if (moea_vsid_bitmap[n] & mask) { /* collision? */ 162390643Sbenno /* anything free in this bucket? */ 1624152180Sgrehan if (moea_vsid_bitmap[n] == 0xffffffff) { 1625152180Sgrehan entropy = (moea_vsidcontext >> 20); 162690643Sbenno continue; 162790643Sbenno } 1628212322Snwhitehorn i = ffs(~moea_vsid_bitmap[n]) - 1; 162990643Sbenno mask = 1 << i; 163090643Sbenno hash &= 0xfffff & ~(VSID_NBPW - 1); 163190643Sbenno hash |= i; 163277957Sbenno } 1633227627Snwhitehorn KASSERT(!(moea_vsid_bitmap[n] & mask), 1634227627Snwhitehorn ("Allocating in-use VSID group %#x\n", hash)); 1635152180Sgrehan moea_vsid_bitmap[n] |= mask; 163690643Sbenno for (i = 0; i < 16; i++) 163790643Sbenno pmap->pm_sr[i] = VSID_MAKE(i, hash); 1638212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 163990643Sbenno return; 164090643Sbenno } 164177957Sbenno 1642212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 1643152180Sgrehan panic("moea_pinit: out of segments"); 164477957Sbenno} 164577957Sbenno 164677957Sbenno/* 164790643Sbenno * Initialize the pmap associated with process 0. 164877957Sbenno */ 164977957Sbennovoid 1650152180Sgrehanmoea_pinit0(mmu_t mmu, pmap_t pm) 165177957Sbenno{ 165277957Sbenno 1653152180Sgrehan moea_pinit(mmu, pm); 165490643Sbenno bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 165577957Sbenno} 165677957Sbenno 165794838Sbenno/* 165894838Sbenno * Set the physical protection on the specified range of this map as requested. 165994838Sbenno */ 166090643Sbennovoid 1661152180Sgrehanmoea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1662152180Sgrehan vm_prot_t prot) 166390643Sbenno{ 1664235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 166594838Sbenno struct pte *pt; 166694838Sbenno 166794838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1668152180Sgrehan ("moea_protect: non current pmap")); 166994838Sbenno 167094838Sbenno if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1671152180Sgrehan moea_remove(mmu, pm, sva, eva); 167294838Sbenno return; 167394838Sbenno } 167494838Sbenno 1675132220Salc vm_page_lock_queues(); 1676134329Salc PMAP_LOCK(pm); 1677235689Snwhitehorn key.pvo_vaddr = sva; 1678235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1679235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1680235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 168194838Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 168294838Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 168394838Sbenno 168494838Sbenno /* 168594838Sbenno * Grab the PTE pointer before we diddle with the cached PTE 168694838Sbenno * copy. 168794838Sbenno */ 1688235689Snwhitehorn pt = moea_pvo_to_pte(pvo, -1); 168994838Sbenno /* 169094838Sbenno * Change the protection of the page. 169194838Sbenno */ 1692183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1693183290Snwhitehorn pvo->pvo_pte.pte.pte_lo |= PTE_BR; 169494838Sbenno 169594838Sbenno /* 169694838Sbenno * If the PVO is in the page table, update that pte as well. 169794838Sbenno */ 1698159928Salc if (pt != NULL) { 1699183290Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1700159928Salc mtx_unlock(&moea_table_mutex); 1701159928Salc } 170294838Sbenno } 1703132220Salc vm_page_unlock_queues(); 1704134329Salc PMAP_UNLOCK(pm); 170577957Sbenno} 170677957Sbenno 170791456Sbenno/* 170891456Sbenno * Map a list of wired pages into kernel virtual address space. This is 170991456Sbenno * intended for temporary mappings which do not need page modification or 171091456Sbenno * references recorded. Existing mappings in the region are overwritten. 171191456Sbenno */ 171290643Sbennovoid 1713152180Sgrehanmoea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 171477957Sbenno{ 1715110172Sgrehan vm_offset_t va; 171677957Sbenno 1717110172Sgrehan va = sva; 1718110172Sgrehan while (count-- > 0) { 1719152180Sgrehan moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1720110172Sgrehan va += PAGE_SIZE; 1721110172Sgrehan m++; 1722110172Sgrehan } 172390643Sbenno} 172477957Sbenno 172591456Sbenno/* 172691456Sbenno * Remove page mappings from kernel virtual address space. Intended for 1727152180Sgrehan * temporary mappings entered by moea_qenter. 172891456Sbenno */ 172990643Sbennovoid 1730152180Sgrehanmoea_qremove(mmu_t mmu, vm_offset_t sva, int count) 173190643Sbenno{ 1732110172Sgrehan vm_offset_t va; 173391456Sbenno 1734110172Sgrehan va = sva; 1735110172Sgrehan while (count-- > 0) { 1736152180Sgrehan moea_kremove(mmu, va); 1737110172Sgrehan va += PAGE_SIZE; 1738110172Sgrehan } 173977957Sbenno} 174077957Sbenno 174190643Sbennovoid 1742152180Sgrehanmoea_release(mmu_t mmu, pmap_t pmap) 174390643Sbenno{ 1744103604Sgrehan int idx, mask; 1745103604Sgrehan 1746103604Sgrehan /* 1747103604Sgrehan * Free segment register's VSID 1748103604Sgrehan */ 1749103604Sgrehan if (pmap->pm_sr[0] == 0) 1750152180Sgrehan panic("moea_release"); 1751103604Sgrehan 1752212278Snwhitehorn mtx_lock(&moea_vsid_mutex); 1753103604Sgrehan idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1754103604Sgrehan mask = 1 << (idx % VSID_NBPW); 1755103604Sgrehan idx /= VSID_NBPW; 1756152180Sgrehan moea_vsid_bitmap[idx] &= ~mask; 1757212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 1758134329Salc PMAP_LOCK_DESTROY(pmap); 175977957Sbenno} 176077957Sbenno 176191456Sbenno/* 176291456Sbenno * Remove the given range of addresses from the specified map. 176391456Sbenno */ 176490643Sbennovoid 1765152180Sgrehanmoea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 176677957Sbenno{ 1767235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 176891456Sbenno 1769132220Salc vm_page_lock_queues(); 1770134329Salc PMAP_LOCK(pm); 1771235689Snwhitehorn key.pvo_vaddr = sva; 1772235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1773235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1774235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1775235689Snwhitehorn moea_pvo_remove(pvo, -1); 177691456Sbenno } 1777140538Sgrehan PMAP_UNLOCK(pm); 1778132220Salc vm_page_unlock_queues(); 177977957Sbenno} 178077957Sbenno 178194838Sbenno/* 1782152180Sgrehan * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1783110172Sgrehan * will reflect changes in pte's back to the vm_page. 1784110172Sgrehan */ 1785110172Sgrehanvoid 1786152180Sgrehanmoea_remove_all(mmu_t mmu, vm_page_t m) 1787110172Sgrehan{ 1788110172Sgrehan struct pvo_head *pvo_head; 1789110172Sgrehan struct pvo_entry *pvo, *next_pvo; 1790134329Salc pmap_t pmap; 1791110172Sgrehan 1792207796Salc vm_page_lock_queues(); 1793110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1794110172Sgrehan for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1795110172Sgrehan next_pvo = LIST_NEXT(pvo, pvo_vlink); 1796133166Sgrehan 1797134329Salc pmap = pvo->pvo_pmap; 1798134329Salc PMAP_LOCK(pmap); 1799152180Sgrehan moea_pvo_remove(pvo, -1); 1800134329Salc PMAP_UNLOCK(pmap); 1801110172Sgrehan } 1802225418Skib if ((m->aflags & PGA_WRITEABLE) && moea_is_modified(mmu, m)) { 1803208847Snwhitehorn moea_attr_clear(m, PTE_CHG); 1804204042Snwhitehorn vm_page_dirty(m); 1805204042Snwhitehorn } 1806225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1807207796Salc vm_page_unlock_queues(); 1808110172Sgrehan} 1809110172Sgrehan 1810110172Sgrehan/* 181190643Sbenno * Allocate a physical page of memory directly from the phys_avail map. 1812152180Sgrehan * Can only be called from moea_bootstrap before avail start and end are 181390643Sbenno * calculated. 181483682Smp */ 181590643Sbennostatic vm_offset_t 1816152180Sgrehanmoea_bootstrap_alloc(vm_size_t size, u_int align) 181783682Smp{ 181890643Sbenno vm_offset_t s, e; 181990643Sbenno int i, j; 182083682Smp 182190643Sbenno size = round_page(size); 182290643Sbenno for (i = 0; phys_avail[i + 1] != 0; i += 2) { 182390643Sbenno if (align != 0) 182490643Sbenno s = (phys_avail[i] + align - 1) & ~(align - 1); 182590643Sbenno else 182690643Sbenno s = phys_avail[i]; 182790643Sbenno e = s + size; 182890643Sbenno 182990643Sbenno if (s < phys_avail[i] || e > phys_avail[i + 1]) 183090643Sbenno continue; 183190643Sbenno 183290643Sbenno if (s == phys_avail[i]) { 183390643Sbenno phys_avail[i] += size; 183490643Sbenno } else if (e == phys_avail[i + 1]) { 183590643Sbenno phys_avail[i + 1] -= size; 183690643Sbenno } else { 183790643Sbenno for (j = phys_avail_count * 2; j > i; j -= 2) { 183890643Sbenno phys_avail[j] = phys_avail[j - 2]; 183990643Sbenno phys_avail[j + 1] = phys_avail[j - 1]; 184090643Sbenno } 184190643Sbenno 184290643Sbenno phys_avail[i + 3] = phys_avail[i + 1]; 184390643Sbenno phys_avail[i + 1] = s; 184490643Sbenno phys_avail[i + 2] = e; 184590643Sbenno phys_avail_count++; 184690643Sbenno } 184790643Sbenno 184890643Sbenno return (s); 184983682Smp } 1850152180Sgrehan panic("moea_bootstrap_alloc: could not allocate memory"); 185183682Smp} 185283682Smp 185390643Sbennostatic void 1854152180Sgrehanmoea_syncicache(vm_offset_t pa, vm_size_t len) 185577957Sbenno{ 185690643Sbenno __syncicache((void *)pa, len); 185790643Sbenno} 185877957Sbenno 185990643Sbennostatic int 1860152180Sgrehanmoea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 186190643Sbenno vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 186277957Sbenno{ 186390643Sbenno struct pvo_entry *pvo; 186490643Sbenno u_int sr; 186590643Sbenno int first; 186690643Sbenno u_int ptegidx; 186790643Sbenno int i; 1868103604Sgrehan int bootstrap; 186977957Sbenno 1870152180Sgrehan moea_pvo_enter_calls++; 187196250Sbenno first = 0; 1872103604Sgrehan bootstrap = 0; 187390643Sbenno 187490643Sbenno /* 187590643Sbenno * Compute the PTE Group index. 187690643Sbenno */ 187790643Sbenno va &= ~ADDR_POFF; 187890643Sbenno sr = va_to_sr(pm->pm_sr, va); 187990643Sbenno ptegidx = va_to_pteg(sr, va); 188090643Sbenno 188190643Sbenno /* 188290643Sbenno * Remove any existing mapping for this page. Reuse the pvo entry if 188390643Sbenno * there is a mapping. 188490643Sbenno */ 1885152180Sgrehan mtx_lock(&moea_table_mutex); 1886152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 188790643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1888183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1889183290Snwhitehorn (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 189096334Sbenno (pte_lo & PTE_PP)) { 1891152180Sgrehan mtx_unlock(&moea_table_mutex); 189292521Sbenno return (0); 189396334Sbenno } 1894152180Sgrehan moea_pvo_remove(pvo, -1); 189590643Sbenno break; 189690643Sbenno } 189790643Sbenno } 189890643Sbenno 189990643Sbenno /* 190090643Sbenno * If we aren't overwriting a mapping, try to allocate. 190190643Sbenno */ 1902152180Sgrehan if (moea_initialized) { 190392847Sjeff pvo = uma_zalloc(zone, M_NOWAIT); 190492521Sbenno } else { 1905152180Sgrehan if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1906152180Sgrehan panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1907152180Sgrehan moea_bpvo_pool_index, BPVO_POOL_SIZE, 190899037Sbenno BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 190992521Sbenno } 1910152180Sgrehan pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1911152180Sgrehan moea_bpvo_pool_index++; 1912103604Sgrehan bootstrap = 1; 191392521Sbenno } 191490643Sbenno 191590643Sbenno if (pvo == NULL) { 1916152180Sgrehan mtx_unlock(&moea_table_mutex); 191790643Sbenno return (ENOMEM); 191890643Sbenno } 191990643Sbenno 1920152180Sgrehan moea_pvo_entries++; 192190643Sbenno pvo->pvo_vaddr = va; 192290643Sbenno pvo->pvo_pmap = pm; 1923152180Sgrehan LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 192490643Sbenno pvo->pvo_vaddr &= ~ADDR_POFF; 192590643Sbenno if (flags & VM_PROT_EXECUTE) 192690643Sbenno pvo->pvo_vaddr |= PVO_EXECUTABLE; 192790643Sbenno if (flags & PVO_WIRED) 192890643Sbenno pvo->pvo_vaddr |= PVO_WIRED; 1929152180Sgrehan if (pvo_head != &moea_pvo_kunmanaged) 193090643Sbenno pvo->pvo_vaddr |= PVO_MANAGED; 1931103604Sgrehan if (bootstrap) 1932103604Sgrehan pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1933142416Sgrehan 1934183290Snwhitehorn moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 193590643Sbenno 193690643Sbenno /* 1937228412Snwhitehorn * Add to pmap list 1938228412Snwhitehorn */ 1939235689Snwhitehorn RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 1940228412Snwhitehorn 1941228412Snwhitehorn /* 194290643Sbenno * Remember if the list was empty and therefore will be the first 194390643Sbenno * item. 194490643Sbenno */ 194596250Sbenno if (LIST_FIRST(pvo_head) == NULL) 194696250Sbenno first = 1; 1947142416Sgrehan LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 194890643Sbenno 1949183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 1950134453Salc pm->pm_stats.wired_count++; 1951134453Salc pm->pm_stats.resident_count++; 195290643Sbenno 195390643Sbenno /* 195490643Sbenno * We hope this succeeds but it isn't required. 195590643Sbenno */ 1956183290Snwhitehorn i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 195790643Sbenno if (i >= 0) { 195890643Sbenno PVO_PTEGIDX_SET(pvo, i); 195990643Sbenno } else { 1960152180Sgrehan panic("moea_pvo_enter: overflow"); 1961152180Sgrehan moea_pte_overflow++; 196290643Sbenno } 1963152180Sgrehan mtx_unlock(&moea_table_mutex); 196490643Sbenno 196590643Sbenno return (first ? ENOENT : 0); 196677957Sbenno} 196777957Sbenno 196890643Sbennostatic void 1969152180Sgrehanmoea_pvo_remove(struct pvo_entry *pvo, int pteidx) 197077957Sbenno{ 197190643Sbenno struct pte *pt; 197277957Sbenno 197390643Sbenno /* 197490643Sbenno * If there is an active pte entry, we need to deactivate it (and 197590643Sbenno * save the ref & cfg bits). 197690643Sbenno */ 1977152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 197890643Sbenno if (pt != NULL) { 1979183290Snwhitehorn moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1980159928Salc mtx_unlock(&moea_table_mutex); 198190643Sbenno PVO_PTEGIDX_CLR(pvo); 198290643Sbenno } else { 1983152180Sgrehan moea_pte_overflow--; 1984142416Sgrehan } 198590643Sbenno 198690643Sbenno /* 198790643Sbenno * Update our statistics. 198890643Sbenno */ 198990643Sbenno pvo->pvo_pmap->pm_stats.resident_count--; 1990183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 199190643Sbenno pvo->pvo_pmap->pm_stats.wired_count--; 199290643Sbenno 199390643Sbenno /* 199490643Sbenno * Save the REF/CHG bits into their cache if the page is managed. 199590643Sbenno */ 1996224746Skib if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 199790643Sbenno struct vm_page *pg; 199890643Sbenno 1999183290Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 200090643Sbenno if (pg != NULL) { 2001183290Snwhitehorn moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 200290643Sbenno (PTE_REF | PTE_CHG)); 200390643Sbenno } 200490643Sbenno } 200590643Sbenno 200690643Sbenno /* 2007228412Snwhitehorn * Remove this PVO from the PV and pmap lists. 200890643Sbenno */ 200990643Sbenno LIST_REMOVE(pvo, pvo_vlink); 2010235689Snwhitehorn RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 201190643Sbenno 201290643Sbenno /* 201390643Sbenno * Remove this from the overflow list and return it to the pool 201490643Sbenno * if we aren't going to reuse it. 201590643Sbenno */ 201690643Sbenno LIST_REMOVE(pvo, pvo_olink); 201792521Sbenno if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2018152180Sgrehan uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2019152180Sgrehan moea_upvo_zone, pvo); 2020152180Sgrehan moea_pvo_entries--; 2021152180Sgrehan moea_pvo_remove_calls++; 202277957Sbenno} 202377957Sbenno 202490643Sbennostatic __inline int 2025152180Sgrehanmoea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 202677957Sbenno{ 202790643Sbenno int pteidx; 202877957Sbenno 202990643Sbenno /* 203090643Sbenno * We can find the actual pte entry without searching by grabbing 203190643Sbenno * the PTEG index from 3 unused bits in pte_lo[11:9] and by 203290643Sbenno * noticing the HID bit. 203390643Sbenno */ 203490643Sbenno pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2035183290Snwhitehorn if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2036152180Sgrehan pteidx ^= moea_pteg_mask * 8; 203790643Sbenno 203890643Sbenno return (pteidx); 203977957Sbenno} 204077957Sbenno 204190643Sbennostatic struct pvo_entry * 2042152180Sgrehanmoea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 204377957Sbenno{ 204490643Sbenno struct pvo_entry *pvo; 204590643Sbenno int ptegidx; 204690643Sbenno u_int sr; 204777957Sbenno 204890643Sbenno va &= ~ADDR_POFF; 204990643Sbenno sr = va_to_sr(pm->pm_sr, va); 205090643Sbenno ptegidx = va_to_pteg(sr, va); 205190643Sbenno 2052152180Sgrehan mtx_lock(&moea_table_mutex); 2053152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 205490643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 205590643Sbenno if (pteidx_p) 2056152180Sgrehan *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2057134535Salc break; 205890643Sbenno } 205990643Sbenno } 2060152180Sgrehan mtx_unlock(&moea_table_mutex); 206190643Sbenno 2062134535Salc return (pvo); 206377957Sbenno} 206477957Sbenno 206590643Sbennostatic struct pte * 2066152180Sgrehanmoea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 206777957Sbenno{ 206890643Sbenno struct pte *pt; 206977957Sbenno 207090643Sbenno /* 207190643Sbenno * If we haven't been supplied the ptegidx, calculate it. 207290643Sbenno */ 207390643Sbenno if (pteidx == -1) { 207490643Sbenno int ptegidx; 207590643Sbenno u_int sr; 207677957Sbenno 207790643Sbenno sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 207890643Sbenno ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2079152180Sgrehan pteidx = moea_pvo_pte_index(pvo, ptegidx); 208090643Sbenno } 208190643Sbenno 2082152180Sgrehan pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2083159928Salc mtx_lock(&moea_table_mutex); 208490643Sbenno 2085183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2086152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 208790643Sbenno "valid pte index", pvo); 208890643Sbenno } 208990643Sbenno 2090183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2091152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 209290643Sbenno "pvo but no valid pte", pvo); 209390643Sbenno } 209490643Sbenno 2095183290Snwhitehorn if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2096183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2097152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in " 2098152180Sgrehan "moea_pteg_table %p but invalid in pvo", pvo, pt); 209977957Sbenno } 210090643Sbenno 2101183290Snwhitehorn if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 210290643Sbenno != 0) { 2103152180Sgrehan panic("moea_pvo_to_pte: pvo %p pte does not match " 2104152180Sgrehan "pte %p in moea_pteg_table", pvo, pt); 210590643Sbenno } 210690643Sbenno 2107159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 210890643Sbenno return (pt); 210977957Sbenno } 211077957Sbenno 2111183290Snwhitehorn if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2112152180Sgrehan panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2113152180Sgrehan "moea_pteg_table but valid in pvo", pvo, pt); 211490643Sbenno } 211577957Sbenno 2116159928Salc mtx_unlock(&moea_table_mutex); 211790643Sbenno return (NULL); 211877957Sbenno} 211978880Sbenno 212078880Sbenno/* 212190643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c? 212278880Sbenno */ 212390643Sbennoint 2124152180Sgrehanmoea_pte_spill(vm_offset_t addr) 212578880Sbenno{ 212690643Sbenno struct pvo_entry *source_pvo, *victim_pvo; 212790643Sbenno struct pvo_entry *pvo; 212890643Sbenno int ptegidx, i, j; 212990643Sbenno u_int sr; 213090643Sbenno struct pteg *pteg; 213190643Sbenno struct pte *pt; 213278880Sbenno 2133152180Sgrehan moea_pte_spills++; 213490643Sbenno 213594836Sbenno sr = mfsrin(addr); 213690643Sbenno ptegidx = va_to_pteg(sr, addr); 213790643Sbenno 213878880Sbenno /* 213990643Sbenno * Have to substitute some entry. Use the primary hash for this. 214090643Sbenno * Use low bits of timebase as random generator. 214178880Sbenno */ 2142152180Sgrehan pteg = &moea_pteg_table[ptegidx]; 2143152180Sgrehan mtx_lock(&moea_table_mutex); 214490643Sbenno __asm __volatile("mftb %0" : "=r"(i)); 214590643Sbenno i &= 7; 214690643Sbenno pt = &pteg->pt[i]; 214778880Sbenno 214890643Sbenno source_pvo = NULL; 214990643Sbenno victim_pvo = NULL; 2150152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 215178880Sbenno /* 215290643Sbenno * We need to find a pvo entry for this address. 215378880Sbenno */ 215490643Sbenno if (source_pvo == NULL && 2155183290Snwhitehorn moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2156183290Snwhitehorn pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 215790643Sbenno /* 215890643Sbenno * Now found an entry to be spilled into the pteg. 215990643Sbenno * The PTE is now valid, so we know it's active. 216090643Sbenno */ 2161183290Snwhitehorn j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 216278880Sbenno 216390643Sbenno if (j >= 0) { 216490643Sbenno PVO_PTEGIDX_SET(pvo, j); 2165152180Sgrehan moea_pte_overflow--; 2166152180Sgrehan mtx_unlock(&moea_table_mutex); 216790643Sbenno return (1); 216890643Sbenno } 216990643Sbenno 217090643Sbenno source_pvo = pvo; 217190643Sbenno 217290643Sbenno if (victim_pvo != NULL) 217390643Sbenno break; 217490643Sbenno } 217590643Sbenno 217678880Sbenno /* 217790643Sbenno * We also need the pvo entry of the victim we are replacing 217890643Sbenno * so save the R & C bits of the PTE. 217978880Sbenno */ 218090643Sbenno if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2181183290Snwhitehorn moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 218290643Sbenno victim_pvo = pvo; 218390643Sbenno if (source_pvo != NULL) 218490643Sbenno break; 218590643Sbenno } 218690643Sbenno } 218778880Sbenno 2188134535Salc if (source_pvo == NULL) { 2189152180Sgrehan mtx_unlock(&moea_table_mutex); 219090643Sbenno return (0); 2191134535Salc } 219290643Sbenno 219390643Sbenno if (victim_pvo == NULL) { 219490643Sbenno if ((pt->pte_hi & PTE_HID) == 0) 2195152180Sgrehan panic("moea_pte_spill: victim p-pte (%p) has no pvo" 219690643Sbenno "entry", pt); 219790643Sbenno 219878880Sbenno /* 219990643Sbenno * If this is a secondary PTE, we need to search it's primary 220090643Sbenno * pvo bucket for the matching PVO. 220178880Sbenno */ 2202152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 220390643Sbenno pvo_olink) { 220490643Sbenno /* 220590643Sbenno * We also need the pvo entry of the victim we are 220690643Sbenno * replacing so save the R & C bits of the PTE. 220790643Sbenno */ 2208183290Snwhitehorn if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 220990643Sbenno victim_pvo = pvo; 221090643Sbenno break; 221190643Sbenno } 221290643Sbenno } 221378880Sbenno 221490643Sbenno if (victim_pvo == NULL) 2215152180Sgrehan panic("moea_pte_spill: victim s-pte (%p) has no pvo" 221690643Sbenno "entry", pt); 221790643Sbenno } 221878880Sbenno 221990643Sbenno /* 222090643Sbenno * We are invalidating the TLB entry for the EA we are replacing even 222190643Sbenno * though it's valid. If we don't, we lose any ref/chg bit changes 222290643Sbenno * contained in the TLB entry. 222390643Sbenno */ 2224183290Snwhitehorn source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 222578880Sbenno 2226183290Snwhitehorn moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2227183290Snwhitehorn moea_pte_set(pt, &source_pvo->pvo_pte.pte); 222890643Sbenno 222990643Sbenno PVO_PTEGIDX_CLR(victim_pvo); 223090643Sbenno PVO_PTEGIDX_SET(source_pvo, i); 2231152180Sgrehan moea_pte_replacements++; 223290643Sbenno 2233152180Sgrehan mtx_unlock(&moea_table_mutex); 223490643Sbenno return (1); 223590643Sbenno} 223690643Sbenno 223790643Sbennostatic int 2238152180Sgrehanmoea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 223990643Sbenno{ 224090643Sbenno struct pte *pt; 224190643Sbenno int i; 224290643Sbenno 2243159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 2244159928Salc 224590643Sbenno /* 224690643Sbenno * First try primary hash. 224790643Sbenno */ 2248152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 224990643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 225090643Sbenno pvo_pt->pte_hi &= ~PTE_HID; 2251152180Sgrehan moea_pte_set(pt, pvo_pt); 225290643Sbenno return (i); 225378880Sbenno } 225490643Sbenno } 225578880Sbenno 225690643Sbenno /* 225790643Sbenno * Now try secondary hash. 225890643Sbenno */ 2259152180Sgrehan ptegidx ^= moea_pteg_mask; 2260165362Sgrehan 2261152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 226290643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 226390643Sbenno pvo_pt->pte_hi |= PTE_HID; 2264152180Sgrehan moea_pte_set(pt, pvo_pt); 226590643Sbenno return (i); 226690643Sbenno } 226790643Sbenno } 226878880Sbenno 2269152180Sgrehan panic("moea_pte_insert: overflow"); 227090643Sbenno return (-1); 227178880Sbenno} 227284921Sbenno 227390643Sbennostatic boolean_t 2274152180Sgrehanmoea_query_bit(vm_page_t m, int ptebit) 227584921Sbenno{ 227690643Sbenno struct pvo_entry *pvo; 227790643Sbenno struct pte *pt; 227884921Sbenno 2279152180Sgrehan if (moea_attr_fetch(m) & ptebit) 228090643Sbenno return (TRUE); 228184921Sbenno 2282208574Salc vm_page_lock_queues(); 228390643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 228484921Sbenno 228590643Sbenno /* 228690643Sbenno * See if we saved the bit off. If so, cache it and return 228790643Sbenno * success. 228890643Sbenno */ 2289183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2290152180Sgrehan moea_attr_save(m, ptebit); 2291208574Salc vm_page_unlock_queues(); 229290643Sbenno return (TRUE); 229390643Sbenno } 229490643Sbenno } 229584921Sbenno 229690643Sbenno /* 229790643Sbenno * No luck, now go through the hard part of looking at the PTEs 229890643Sbenno * themselves. Sync so that any pending REF/CHG bits are flushed to 229990643Sbenno * the PTEs. 230090643Sbenno */ 2301183094Smarcel powerpc_sync(); 230290643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 230390643Sbenno 230490643Sbenno /* 230590643Sbenno * See if this pvo has a valid PTE. if so, fetch the 230690643Sbenno * REF/CHG bits from the valid PTE. If the appropriate 230790643Sbenno * ptebit is set, cache it and return success. 230890643Sbenno */ 2309152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 231090643Sbenno if (pt != NULL) { 2311183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 2312159928Salc mtx_unlock(&moea_table_mutex); 2313183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2314152180Sgrehan moea_attr_save(m, ptebit); 2315208574Salc vm_page_unlock_queues(); 231690643Sbenno return (TRUE); 231790643Sbenno } 231890643Sbenno } 231984921Sbenno } 232084921Sbenno 2321208574Salc vm_page_unlock_queues(); 2322123354Sgallatin return (FALSE); 232384921Sbenno} 232490643Sbenno 2325110172Sgrehanstatic u_int 2326208990Salcmoea_clear_bit(vm_page_t m, int ptebit) 232790643Sbenno{ 2328110172Sgrehan u_int count; 232990643Sbenno struct pvo_entry *pvo; 233090643Sbenno struct pte *pt; 233190643Sbenno 2332208990Salc vm_page_lock_queues(); 2333208990Salc 233490643Sbenno /* 233590643Sbenno * Clear the cached value. 233690643Sbenno */ 2337152180Sgrehan moea_attr_clear(m, ptebit); 233890643Sbenno 233990643Sbenno /* 234090643Sbenno * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 234190643Sbenno * we can reset the right ones). note that since the pvo entries and 234290643Sbenno * list heads are accessed via BAT0 and are never placed in the page 234390643Sbenno * table, we don't have to worry about further accesses setting the 234490643Sbenno * REF/CHG bits. 234590643Sbenno */ 2346183094Smarcel powerpc_sync(); 234790643Sbenno 234890643Sbenno /* 234990643Sbenno * For each pvo entry, clear the pvo's ptebit. If this pvo has a 235090643Sbenno * valid pte clear the ptebit from the valid pte. 235190643Sbenno */ 2352110172Sgrehan count = 0; 235390643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2354152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 235590643Sbenno if (pt != NULL) { 2356183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 2357183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2358110172Sgrehan count++; 2359152180Sgrehan moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2360110172Sgrehan } 2361159928Salc mtx_unlock(&moea_table_mutex); 236290643Sbenno } 2363183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~ptebit; 236490643Sbenno } 236590643Sbenno 2366208990Salc vm_page_unlock_queues(); 2367110172Sgrehan return (count); 236890643Sbenno} 236999038Sbenno 237099038Sbenno/* 2371103604Sgrehan * Return true if the physical range is encompassed by the battable[idx] 2372103604Sgrehan */ 2373103604Sgrehanstatic int 2374152180Sgrehanmoea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2375103604Sgrehan{ 2376103604Sgrehan u_int prot; 2377103604Sgrehan u_int32_t start; 2378103604Sgrehan u_int32_t end; 2379103604Sgrehan u_int32_t bat_ble; 2380103604Sgrehan 2381103604Sgrehan /* 2382103604Sgrehan * Return immediately if not a valid mapping 2383103604Sgrehan */ 2384214601Snwhitehorn if (!(battable[idx].batu & BAT_Vs)) 2385103604Sgrehan return (EINVAL); 2386103604Sgrehan 2387103604Sgrehan /* 2388103604Sgrehan * The BAT entry must be cache-inhibited, guarded, and r/w 2389103604Sgrehan * so it can function as an i/o page 2390103604Sgrehan */ 2391103604Sgrehan prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2392103604Sgrehan if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2393103604Sgrehan return (EPERM); 2394103604Sgrehan 2395103604Sgrehan /* 2396103604Sgrehan * The address should be within the BAT range. Assume that the 2397103604Sgrehan * start address in the BAT has the correct alignment (thus 2398103604Sgrehan * not requiring masking) 2399103604Sgrehan */ 2400103604Sgrehan start = battable[idx].batl & BAT_PBS; 2401103604Sgrehan bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2402103604Sgrehan end = start | (bat_ble << 15) | 0x7fff; 2403103604Sgrehan 2404103604Sgrehan if ((pa < start) || ((pa + size) > end)) 2405103604Sgrehan return (ERANGE); 2406103604Sgrehan 2407103604Sgrehan return (0); 2408103604Sgrehan} 2409103604Sgrehan 2410152180Sgrehanboolean_t 2411152180Sgrehanmoea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2412133855Sssouhlal{ 2413133855Sssouhlal int i; 2414103604Sgrehan 2415133855Sssouhlal /* 2416133855Sssouhlal * This currently does not work for entries that 2417133855Sssouhlal * overlap 256M BAT segments. 2418133855Sssouhlal */ 2419133855Sssouhlal 2420133855Sssouhlal for(i = 0; i < 16; i++) 2421152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2422133855Sssouhlal return (0); 2423133855Sssouhlal 2424133855Sssouhlal return (EFAULT); 2425133855Sssouhlal} 2426133855Sssouhlal 2427103604Sgrehan/* 242899038Sbenno * Map a set of physical memory pages into the kernel virtual 242999038Sbenno * address space. Return a pointer to where it is mapped. This 243099038Sbenno * routine is intended to be used for mapping device memory, 243199038Sbenno * NOT real memory. 243299038Sbenno */ 243399038Sbennovoid * 2434152180Sgrehanmoea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 243599038Sbenno{ 2436213307Snwhitehorn 2437213307Snwhitehorn return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2438213307Snwhitehorn} 2439213307Snwhitehorn 2440213307Snwhitehornvoid * 2441213307Snwhitehornmoea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2442213307Snwhitehorn{ 2443103604Sgrehan vm_offset_t va, tmpva, ppa, offset; 2444103604Sgrehan int i; 2445103604Sgrehan 2446103604Sgrehan ppa = trunc_page(pa); 244799038Sbenno offset = pa & PAGE_MASK; 244899038Sbenno size = roundup(offset + size, PAGE_SIZE); 244999038Sbenno 2450103604Sgrehan /* 2451103604Sgrehan * If the physical address lies within a valid BAT table entry, 2452103604Sgrehan * return the 1:1 mapping. This currently doesn't work 2453103604Sgrehan * for regions that overlap 256M BAT segments. 2454103604Sgrehan */ 2455103604Sgrehan for (i = 0; i < 16; i++) { 2456152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2457103604Sgrehan return ((void *) pa); 2458103604Sgrehan } 2459103604Sgrehan 2460118365Salc va = kmem_alloc_nofault(kernel_map, size); 246199038Sbenno if (!va) 2462152180Sgrehan panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 246399038Sbenno 246499038Sbenno for (tmpva = va; size > 0;) { 2465213307Snwhitehorn moea_kenter_attr(mmu, tmpva, ppa, ma); 2466183094Smarcel tlbie(tmpva); 246799038Sbenno size -= PAGE_SIZE; 246899038Sbenno tmpva += PAGE_SIZE; 2469103604Sgrehan ppa += PAGE_SIZE; 247099038Sbenno } 247199038Sbenno 247299038Sbenno return ((void *)(va + offset)); 247399038Sbenno} 247499038Sbenno 247599038Sbennovoid 2476152180Sgrehanmoea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 247799038Sbenno{ 247899038Sbenno vm_offset_t base, offset; 247999038Sbenno 2480103604Sgrehan /* 2481103604Sgrehan * If this is outside kernel virtual space, then it's a 2482103604Sgrehan * battable entry and doesn't require unmapping 2483103604Sgrehan */ 2484204128Snwhitehorn if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2485103604Sgrehan base = trunc_page(va); 2486103604Sgrehan offset = va & PAGE_MASK; 2487103604Sgrehan size = roundup(offset + size, PAGE_SIZE); 2488103604Sgrehan kmem_free(kernel_map, base, size); 2489103604Sgrehan } 249099038Sbenno} 2491198341Smarcel 2492198341Smarcelstatic void 2493198341Smarcelmoea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2494198341Smarcel{ 2495198341Smarcel struct pvo_entry *pvo; 2496198341Smarcel vm_offset_t lim; 2497198341Smarcel vm_paddr_t pa; 2498198341Smarcel vm_size_t len; 2499198341Smarcel 2500198341Smarcel PMAP_LOCK(pm); 2501198341Smarcel while (sz > 0) { 2502198341Smarcel lim = round_page(va); 2503198341Smarcel len = MIN(lim - va, sz); 2504198341Smarcel pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2505198341Smarcel if (pvo != NULL) { 2506198341Smarcel pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2507198341Smarcel (va & ADDR_POFF); 2508198341Smarcel moea_syncicache(pa, len); 2509198341Smarcel } 2510198341Smarcel va += len; 2511198341Smarcel sz -= len; 2512198341Smarcel } 2513198341Smarcel PMAP_UNLOCK(pm); 2514198341Smarcel} 2515