mmu_oea.c revision 228412
1139825Simp/*- 290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc. 390643Sbenno * All rights reserved. 490643Sbenno * 590643Sbenno * This code is derived from software contributed to The NetBSD Foundation 690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790643Sbenno * 890643Sbenno * Redistribution and use in source and binary forms, with or without 990643Sbenno * modification, are permitted provided that the following conditions 1090643Sbenno * are met: 1190643Sbenno * 1. Redistributions of source code must retain the above copyright 1290643Sbenno * notice, this list of conditions and the following disclaimer. 1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright 1490643Sbenno * notice, this list of conditions and the following disclaimer in the 1590643Sbenno * documentation and/or other materials provided with the distribution. 1690643Sbenno * 3. All advertising materials mentioning features or use of this software 1790643Sbenno * must display the following acknowledgement: 1890643Sbenno * This product includes software developed by the NetBSD 1990643Sbenno * Foundation, Inc. and its contributors. 2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its 2190643Sbenno * contributors may be used to endorse or promote products derived 2290643Sbenno * from this software without specific prior written permission. 2390643Sbenno * 2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790643Sbenno * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490643Sbenno * POSSIBILITY OF SUCH DAMAGE. 3590643Sbenno */ 36139825Simp/*- 3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH. 3977957Sbenno * All rights reserved. 4077957Sbenno * 4177957Sbenno * Redistribution and use in source and binary forms, with or without 4277957Sbenno * modification, are permitted provided that the following conditions 4377957Sbenno * are met: 4477957Sbenno * 1. Redistributions of source code must retain the above copyright 4577957Sbenno * notice, this list of conditions and the following disclaimer. 4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 4777957Sbenno * notice, this list of conditions and the following disclaimer in the 4877957Sbenno * documentation and/or other materials provided with the distribution. 4977957Sbenno * 3. All advertising materials mentioning features or use of this software 5077957Sbenno * must display the following acknowledgement: 5177957Sbenno * This product includes software developed by TooLs GmbH. 5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products 5377957Sbenno * derived from this software without specific prior written permission. 5477957Sbenno * 5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 6577957Sbenno * 6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6777957Sbenno */ 68139825Simp/*- 6977957Sbenno * Copyright (C) 2001 Benno Rice. 7077957Sbenno * All rights reserved. 7177957Sbenno * 7277957Sbenno * Redistribution and use in source and binary forms, with or without 7377957Sbenno * modification, are permitted provided that the following conditions 7477957Sbenno * are met: 7577957Sbenno * 1. Redistributions of source code must retain the above copyright 7677957Sbenno * notice, this list of conditions and the following disclaimer. 7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 7877957Sbenno * notice, this list of conditions and the following disclaimer in the 7977957Sbenno * documentation and/or other materials provided with the distribution. 8077957Sbenno * 8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9177957Sbenno */ 9277957Sbenno 93113038Sobrien#include <sys/cdefs.h> 94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 228412 2011-12-11 17:19:48Z nwhitehorn $"); 9577957Sbenno 9690643Sbenno/* 9790643Sbenno * Manages physical address maps. 9890643Sbenno * 9990643Sbenno * In addition to hardware address maps, this module is called upon to 10090643Sbenno * provide software-use-only maps which may or may not be stored in the 10190643Sbenno * same form as hardware maps. These pseudo-maps are used to store 10290643Sbenno * intermediate results from copy operations to and from address spaces. 10390643Sbenno * 10490643Sbenno * Since the information managed by this module is also stored by the 10590643Sbenno * logical address mapping module, this module may throw away valid virtual 10690643Sbenno * to physical mappings at almost any time. However, invalidations of 10790643Sbenno * mappings must be done as requested. 10890643Sbenno * 10990643Sbenno * In order to cope with hardware architectures which make virtual to 11090643Sbenno * physical map invalidates expensive, this module may delay invalidate 11190643Sbenno * reduced protection operations until such time as they are actually 11290643Sbenno * necessary. This module is given full information as to which processors 11390643Sbenno * are currently using which maps, and to when physical maps must be made 11490643Sbenno * correct. 11590643Sbenno */ 11690643Sbenno 117118239Speter#include "opt_kstack_pages.h" 118118239Speter 11977957Sbenno#include <sys/param.h> 12080431Speter#include <sys/kernel.h> 121222813Sattilio#include <sys/queue.h> 122222813Sattilio#include <sys/cpuset.h> 12390643Sbenno#include <sys/ktr.h> 12490643Sbenno#include <sys/lock.h> 12590643Sbenno#include <sys/msgbuf.h> 12690643Sbenno#include <sys/mutex.h> 12777957Sbenno#include <sys/proc.h> 128222813Sattilio#include <sys/sched.h> 12990643Sbenno#include <sys/sysctl.h> 13090643Sbenno#include <sys/systm.h> 13177957Sbenno#include <sys/vmmeter.h> 13277957Sbenno 13390643Sbenno#include <dev/ofw/openfirm.h> 13490643Sbenno 135152180Sgrehan#include <vm/vm.h> 13677957Sbenno#include <vm/vm_param.h> 13777957Sbenno#include <vm/vm_kern.h> 13877957Sbenno#include <vm/vm_page.h> 13977957Sbenno#include <vm/vm_map.h> 14077957Sbenno#include <vm/vm_object.h> 14177957Sbenno#include <vm/vm_extern.h> 14277957Sbenno#include <vm/vm_pageout.h> 14377957Sbenno#include <vm/vm_pager.h> 14492847Sjeff#include <vm/uma.h> 14577957Sbenno 146125687Sgrehan#include <machine/cpu.h> 147192067Snwhitehorn#include <machine/platform.h> 14883730Smp#include <machine/bat.h> 14990643Sbenno#include <machine/frame.h> 15090643Sbenno#include <machine/md_var.h> 15190643Sbenno#include <machine/psl.h> 15277957Sbenno#include <machine/pte.h> 153178628Smarcel#include <machine/smp.h> 15490643Sbenno#include <machine/sr.h> 155152180Sgrehan#include <machine/mmuvar.h> 15677957Sbenno 157152180Sgrehan#include "mmu_if.h" 15877957Sbenno 159152180Sgrehan#define MOEA_DEBUG 160152180Sgrehan 16190643Sbenno#define TODO panic("%s: not implemented", __func__); 16277957Sbenno 16390643Sbenno#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16490643Sbenno#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16590643Sbenno#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16690643Sbenno 16790643Sbennostruct ofw_map { 16890643Sbenno vm_offset_t om_va; 16990643Sbenno vm_size_t om_len; 17090643Sbenno vm_offset_t om_pa; 17190643Sbenno u_int om_mode; 17290643Sbenno}; 17377957Sbenno 17490643Sbenno/* 17590643Sbenno * Map of physical memory regions. 17690643Sbenno */ 17797346Sbennostatic struct mem_region *regions; 17897346Sbennostatic struct mem_region *pregions; 179209975Snwhitehornstatic u_int phys_avail_count; 180209975Snwhitehornstatic int regions_sz, pregions_sz; 181100319Sbennostatic struct ofw_map *translations; 18277957Sbenno 18390643Sbenno/* 184134535Salc * Lock for the pteg and pvo tables. 185134535Salc */ 186152180Sgrehanstruct mtx moea_table_mutex; 187212278Snwhitehornstruct mtx moea_vsid_mutex; 188134535Salc 189183094Smarcel/* tlbie instruction synchronization */ 190183094Smarcelstatic struct mtx tlbie_mtx; 191183094Smarcel 192134535Salc/* 19390643Sbenno * PTEG data. 19490643Sbenno */ 195152180Sgrehanstatic struct pteg *moea_pteg_table; 196152180Sgrehanu_int moea_pteg_count; 197152180Sgrehanu_int moea_pteg_mask; 19877957Sbenno 19990643Sbenno/* 20090643Sbenno * PVO data. 20190643Sbenno */ 202152180Sgrehanstruct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 203152180Sgrehanstruct pvo_head moea_pvo_kunmanaged = 204152180Sgrehan LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 20577957Sbenno 206152180Sgrehanuma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 207152180Sgrehanuma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 20877957Sbenno 20999037Sbenno#define BPVO_POOL_SIZE 32768 210152180Sgrehanstatic struct pvo_entry *moea_bpvo_pool; 211152180Sgrehanstatic int moea_bpvo_pool_index = 0; 21277957Sbenno 21390643Sbenno#define VSID_NBPW (sizeof(u_int32_t) * 8) 214152180Sgrehanstatic u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 21577957Sbenno 216152180Sgrehanstatic boolean_t moea_initialized = FALSE; 21777957Sbenno 21890643Sbenno/* 21990643Sbenno * Statistics. 22090643Sbenno */ 221152180Sgrehanu_int moea_pte_valid = 0; 222152180Sgrehanu_int moea_pte_overflow = 0; 223152180Sgrehanu_int moea_pte_replacements = 0; 224152180Sgrehanu_int moea_pvo_entries = 0; 225152180Sgrehanu_int moea_pvo_enter_calls = 0; 226152180Sgrehanu_int moea_pvo_remove_calls = 0; 227152180Sgrehanu_int moea_pte_spills = 0; 228152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 22990643Sbenno 0, ""); 230152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 231152180Sgrehan &moea_pte_overflow, 0, ""); 232152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 233152180Sgrehan &moea_pte_replacements, 0, ""); 234152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 23590643Sbenno 0, ""); 236152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 237152180Sgrehan &moea_pvo_enter_calls, 0, ""); 238152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 239152180Sgrehan &moea_pvo_remove_calls, 0, ""); 240152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 241152180Sgrehan &moea_pte_spills, 0, ""); 24277957Sbenno 24390643Sbenno/* 244152180Sgrehan * Allocate physical memory for use in moea_bootstrap. 24590643Sbenno */ 246152180Sgrehanstatic vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 24777957Sbenno 24890643Sbenno/* 24990643Sbenno * PTE calls. 25090643Sbenno */ 251152180Sgrehanstatic int moea_pte_insert(u_int, struct pte *); 25277957Sbenno 25377957Sbenno/* 25490643Sbenno * PVO calls. 25577957Sbenno */ 256152180Sgrehanstatic int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 25790643Sbenno vm_offset_t, vm_offset_t, u_int, int); 258152180Sgrehanstatic void moea_pvo_remove(struct pvo_entry *, int); 259152180Sgrehanstatic struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 260152180Sgrehanstatic struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 26190643Sbenno 26290643Sbenno/* 26390643Sbenno * Utility routines. 26490643Sbenno */ 265159303Salcstatic void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 266159303Salc vm_prot_t, boolean_t); 267152180Sgrehanstatic void moea_syncicache(vm_offset_t, vm_size_t); 268152180Sgrehanstatic boolean_t moea_query_bit(vm_page_t, int); 269208990Salcstatic u_int moea_clear_bit(vm_page_t, int); 270152180Sgrehanstatic void moea_kremove(mmu_t, vm_offset_t); 271152180Sgrehanint moea_pte_spill(vm_offset_t); 27290643Sbenno 273152180Sgrehan/* 274152180Sgrehan * Kernel MMU interface 275152180Sgrehan */ 276152180Sgrehanvoid moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 277152180Sgrehanvoid moea_clear_modify(mmu_t, vm_page_t); 278152180Sgrehanvoid moea_clear_reference(mmu_t, vm_page_t); 279152180Sgrehanvoid moea_copy_page(mmu_t, vm_page_t, vm_page_t); 280152180Sgrehanvoid moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 281159303Salcvoid moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 282159303Salc vm_prot_t); 283159627Supsvoid moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 284152180Sgrehanvm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 285152180Sgrehanvm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 286152180Sgrehanvoid moea_init(mmu_t); 287152180Sgrehanboolean_t moea_is_modified(mmu_t, vm_page_t); 288214617Salcboolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 289207155Salcboolean_t moea_is_referenced(mmu_t, vm_page_t); 290152180Sgrehanboolean_t moea_ts_referenced(mmu_t, vm_page_t); 291152180Sgrehanvm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 292152180Sgrehanboolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 293173708Salcint moea_page_wired_mappings(mmu_t, vm_page_t); 294152180Sgrehanvoid moea_pinit(mmu_t, pmap_t); 295152180Sgrehanvoid moea_pinit0(mmu_t, pmap_t); 296152180Sgrehanvoid moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 297152180Sgrehanvoid moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 298152180Sgrehanvoid moea_qremove(mmu_t, vm_offset_t, int); 299152180Sgrehanvoid moea_release(mmu_t, pmap_t); 300152180Sgrehanvoid moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 301152180Sgrehanvoid moea_remove_all(mmu_t, vm_page_t); 302160889Salcvoid moea_remove_write(mmu_t, vm_page_t); 303152180Sgrehanvoid moea_zero_page(mmu_t, vm_page_t); 304152180Sgrehanvoid moea_zero_page_area(mmu_t, vm_page_t, int, int); 305152180Sgrehanvoid moea_zero_page_idle(mmu_t, vm_page_t); 306152180Sgrehanvoid moea_activate(mmu_t, struct thread *); 307152180Sgrehanvoid moea_deactivate(mmu_t, struct thread *); 308190681Snwhitehornvoid moea_cpu_bootstrap(mmu_t, int); 309152180Sgrehanvoid moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 310152180Sgrehanvoid *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 311213307Snwhitehornvoid *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 312152180Sgrehanvoid moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 313152180Sgrehanvm_offset_t moea_kextract(mmu_t, vm_offset_t); 314213307Snwhitehornvoid moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 315152180Sgrehanvoid moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 316213307Snwhitehornvoid moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 317152180Sgrehanboolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 318198341Smarcelstatic void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 319152180Sgrehan 320152180Sgrehanstatic mmu_method_t moea_methods[] = { 321152180Sgrehan MMUMETHOD(mmu_change_wiring, moea_change_wiring), 322152180Sgrehan MMUMETHOD(mmu_clear_modify, moea_clear_modify), 323152180Sgrehan MMUMETHOD(mmu_clear_reference, moea_clear_reference), 324152180Sgrehan MMUMETHOD(mmu_copy_page, moea_copy_page), 325152180Sgrehan MMUMETHOD(mmu_enter, moea_enter), 326159303Salc MMUMETHOD(mmu_enter_object, moea_enter_object), 327152180Sgrehan MMUMETHOD(mmu_enter_quick, moea_enter_quick), 328152180Sgrehan MMUMETHOD(mmu_extract, moea_extract), 329152180Sgrehan MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 330152180Sgrehan MMUMETHOD(mmu_init, moea_init), 331152180Sgrehan MMUMETHOD(mmu_is_modified, moea_is_modified), 332214617Salc MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 333207155Salc MMUMETHOD(mmu_is_referenced, moea_is_referenced), 334152180Sgrehan MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 335152180Sgrehan MMUMETHOD(mmu_map, moea_map), 336152180Sgrehan MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 337173708Salc MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 338152180Sgrehan MMUMETHOD(mmu_pinit, moea_pinit), 339152180Sgrehan MMUMETHOD(mmu_pinit0, moea_pinit0), 340152180Sgrehan MMUMETHOD(mmu_protect, moea_protect), 341152180Sgrehan MMUMETHOD(mmu_qenter, moea_qenter), 342152180Sgrehan MMUMETHOD(mmu_qremove, moea_qremove), 343152180Sgrehan MMUMETHOD(mmu_release, moea_release), 344152180Sgrehan MMUMETHOD(mmu_remove, moea_remove), 345152180Sgrehan MMUMETHOD(mmu_remove_all, moea_remove_all), 346160889Salc MMUMETHOD(mmu_remove_write, moea_remove_write), 347198341Smarcel MMUMETHOD(mmu_sync_icache, moea_sync_icache), 348152180Sgrehan MMUMETHOD(mmu_zero_page, moea_zero_page), 349152180Sgrehan MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 350152180Sgrehan MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 351152180Sgrehan MMUMETHOD(mmu_activate, moea_activate), 352152180Sgrehan MMUMETHOD(mmu_deactivate, moea_deactivate), 353213307Snwhitehorn MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 354152180Sgrehan 355152180Sgrehan /* Internal interfaces */ 356152180Sgrehan MMUMETHOD(mmu_bootstrap, moea_bootstrap), 357190681Snwhitehorn MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 358213307Snwhitehorn MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 359152180Sgrehan MMUMETHOD(mmu_mapdev, moea_mapdev), 360152180Sgrehan MMUMETHOD(mmu_unmapdev, moea_unmapdev), 361152180Sgrehan MMUMETHOD(mmu_kextract, moea_kextract), 362152180Sgrehan MMUMETHOD(mmu_kenter, moea_kenter), 363213307Snwhitehorn MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 364152180Sgrehan MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 365152180Sgrehan 366152180Sgrehan { 0, 0 } 367152180Sgrehan}; 368152180Sgrehan 369212627SgrehanMMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 370152180Sgrehan 371213307Snwhitehornstatic __inline uint32_t 372213307Snwhitehornmoea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 373213307Snwhitehorn{ 374213307Snwhitehorn uint32_t pte_lo; 375213307Snwhitehorn int i; 376212627Sgrehan 377213307Snwhitehorn if (ma != VM_MEMATTR_DEFAULT) { 378213307Snwhitehorn switch (ma) { 379213307Snwhitehorn case VM_MEMATTR_UNCACHEABLE: 380213307Snwhitehorn return (PTE_I | PTE_G); 381213307Snwhitehorn case VM_MEMATTR_WRITE_COMBINING: 382213307Snwhitehorn case VM_MEMATTR_WRITE_BACK: 383213307Snwhitehorn case VM_MEMATTR_PREFETCHABLE: 384213307Snwhitehorn return (PTE_I); 385213307Snwhitehorn case VM_MEMATTR_WRITE_THROUGH: 386213307Snwhitehorn return (PTE_W | PTE_M); 387213307Snwhitehorn } 388213307Snwhitehorn } 389213307Snwhitehorn 390213307Snwhitehorn /* 391213307Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 392213307Snwhitehorn * it's in our available memory array. 393213307Snwhitehorn */ 394213307Snwhitehorn pte_lo = PTE_I | PTE_G; 395213307Snwhitehorn for (i = 0; i < pregions_sz; i++) { 396213307Snwhitehorn if ((pa >= pregions[i].mr_start) && 397213307Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 398213307Snwhitehorn pte_lo = PTE_M; 399213307Snwhitehorn break; 400213307Snwhitehorn } 401213307Snwhitehorn } 402213307Snwhitehorn 403213307Snwhitehorn return pte_lo; 404213307Snwhitehorn} 405213307Snwhitehorn 406183094Smarcelstatic void 407183094Smarceltlbie(vm_offset_t va) 408183094Smarcel{ 409152180Sgrehan 410183094Smarcel mtx_lock_spin(&tlbie_mtx); 411213407Snwhitehorn __asm __volatile("ptesync"); 412183094Smarcel __asm __volatile("tlbie %0" :: "r"(va)); 413213407Snwhitehorn __asm __volatile("eieio; tlbsync; ptesync"); 414183094Smarcel mtx_unlock_spin(&tlbie_mtx); 415183094Smarcel} 416183094Smarcel 417183094Smarcelstatic void 418183094Smarceltlbia(void) 419183094Smarcel{ 420183094Smarcel vm_offset_t va; 421183094Smarcel 422183094Smarcel for (va = 0; va < 0x00040000; va += 0x00001000) { 423183094Smarcel __asm __volatile("tlbie %0" :: "r"(va)); 424183094Smarcel powerpc_sync(); 425183094Smarcel } 426183094Smarcel __asm __volatile("tlbsync"); 427183094Smarcel powerpc_sync(); 428183094Smarcel} 429183094Smarcel 43090643Sbennostatic __inline int 43190643Sbennova_to_sr(u_int *sr, vm_offset_t va) 43277957Sbenno{ 43390643Sbenno return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 43490643Sbenno} 43577957Sbenno 43690643Sbennostatic __inline u_int 43790643Sbennova_to_pteg(u_int sr, vm_offset_t addr) 43890643Sbenno{ 43990643Sbenno u_int hash; 44090643Sbenno 44190643Sbenno hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 44290643Sbenno ADDR_PIDX_SHFT); 443152180Sgrehan return (hash & moea_pteg_mask); 44477957Sbenno} 44577957Sbenno 44690643Sbennostatic __inline struct pvo_head * 44790643Sbennovm_page_to_pvoh(vm_page_t m) 44890643Sbenno{ 44990643Sbenno 45090643Sbenno return (&m->md.mdpg_pvoh); 45190643Sbenno} 45290643Sbenno 45377957Sbennostatic __inline void 454152180Sgrehanmoea_attr_clear(vm_page_t m, int ptebit) 45577957Sbenno{ 45690643Sbenno 457159928Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 45890643Sbenno m->md.mdpg_attrs &= ~ptebit; 45977957Sbenno} 46077957Sbenno 46177957Sbennostatic __inline int 462152180Sgrehanmoea_attr_fetch(vm_page_t m) 46377957Sbenno{ 46477957Sbenno 46590643Sbenno return (m->md.mdpg_attrs); 46677957Sbenno} 46777957Sbenno 46890643Sbennostatic __inline void 469152180Sgrehanmoea_attr_save(vm_page_t m, int ptebit) 47090643Sbenno{ 47190643Sbenno 472159928Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 47390643Sbenno m->md.mdpg_attrs |= ptebit; 47490643Sbenno} 47590643Sbenno 47677957Sbennostatic __inline int 477152180Sgrehanmoea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 47877957Sbenno{ 47990643Sbenno if (pt->pte_hi == pvo_pt->pte_hi) 48090643Sbenno return (1); 48190643Sbenno 48290643Sbenno return (0); 48377957Sbenno} 48477957Sbenno 48577957Sbennostatic __inline int 486152180Sgrehanmoea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 48777957Sbenno{ 48890643Sbenno return (pt->pte_hi & ~PTE_VALID) == 48990643Sbenno (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 49090643Sbenno ((va >> ADDR_API_SHFT) & PTE_API) | which); 49190643Sbenno} 49277957Sbenno 49390643Sbennostatic __inline void 494152180Sgrehanmoea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 49590643Sbenno{ 496159928Salc 497159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 498159928Salc 49990643Sbenno /* 50090643Sbenno * Construct a PTE. Default to IMB initially. Valid bit only gets 50190643Sbenno * set when the real pte is set in memory. 50290643Sbenno * 50390643Sbenno * Note: Don't set the valid bit for correct operation of tlb update. 50490643Sbenno */ 50590643Sbenno pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 50690643Sbenno (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 50790643Sbenno pt->pte_lo = pte_lo; 50877957Sbenno} 50977957Sbenno 51090643Sbennostatic __inline void 511152180Sgrehanmoea_pte_synch(struct pte *pt, struct pte *pvo_pt) 51277957Sbenno{ 51377957Sbenno 514159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 51590643Sbenno pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 51677957Sbenno} 51777957Sbenno 51890643Sbennostatic __inline void 519152180Sgrehanmoea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 52077957Sbenno{ 52177957Sbenno 522159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 523159928Salc 52490643Sbenno /* 52590643Sbenno * As shown in Section 7.6.3.2.3 52690643Sbenno */ 52790643Sbenno pt->pte_lo &= ~ptebit; 528183094Smarcel tlbie(va); 52977957Sbenno} 53077957Sbenno 53190643Sbennostatic __inline void 532152180Sgrehanmoea_pte_set(struct pte *pt, struct pte *pvo_pt) 53377957Sbenno{ 53477957Sbenno 535159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 53690643Sbenno pvo_pt->pte_hi |= PTE_VALID; 53790643Sbenno 53877957Sbenno /* 53990643Sbenno * Update the PTE as defined in section 7.6.3.1. 54090643Sbenno * Note that the REF/CHG bits are from pvo_pt and thus should havce 54190643Sbenno * been saved so this routine can restore them (if desired). 54277957Sbenno */ 54390643Sbenno pt->pte_lo = pvo_pt->pte_lo; 544183094Smarcel powerpc_sync(); 54590643Sbenno pt->pte_hi = pvo_pt->pte_hi; 546183094Smarcel powerpc_sync(); 547152180Sgrehan moea_pte_valid++; 54890643Sbenno} 54977957Sbenno 55090643Sbennostatic __inline void 551152180Sgrehanmoea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 55290643Sbenno{ 55390643Sbenno 554159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 55590643Sbenno pvo_pt->pte_hi &= ~PTE_VALID; 55690643Sbenno 55777957Sbenno /* 55890643Sbenno * Force the reg & chg bits back into the PTEs. 55977957Sbenno */ 560183094Smarcel powerpc_sync(); 56177957Sbenno 56290643Sbenno /* 56390643Sbenno * Invalidate the pte. 56490643Sbenno */ 56590643Sbenno pt->pte_hi &= ~PTE_VALID; 56677957Sbenno 567183094Smarcel tlbie(va); 56877957Sbenno 56990643Sbenno /* 57090643Sbenno * Save the reg & chg bits. 57190643Sbenno */ 572152180Sgrehan moea_pte_synch(pt, pvo_pt); 573152180Sgrehan moea_pte_valid--; 57477957Sbenno} 57577957Sbenno 57690643Sbennostatic __inline void 577152180Sgrehanmoea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 57890643Sbenno{ 57990643Sbenno 58090643Sbenno /* 58190643Sbenno * Invalidate the PTE 58290643Sbenno */ 583152180Sgrehan moea_pte_unset(pt, pvo_pt, va); 584152180Sgrehan moea_pte_set(pt, pvo_pt); 58590643Sbenno} 58690643Sbenno 58777957Sbenno/* 58890643Sbenno * Quick sort callout for comparing memory regions. 58977957Sbenno */ 59090643Sbennostatic int om_cmp(const void *a, const void *b); 59190643Sbenno 59290643Sbennostatic int 59390643Sbennoom_cmp(const void *a, const void *b) 59490643Sbenno{ 59590643Sbenno const struct ofw_map *mapa; 59690643Sbenno const struct ofw_map *mapb; 59790643Sbenno 59890643Sbenno mapa = a; 59990643Sbenno mapb = b; 60090643Sbenno if (mapa->om_pa < mapb->om_pa) 60190643Sbenno return (-1); 60290643Sbenno else if (mapa->om_pa > mapb->om_pa) 60390643Sbenno return (1); 60490643Sbenno else 60590643Sbenno return (0); 60677957Sbenno} 60777957Sbenno 60877957Sbennovoid 609190681Snwhitehornmoea_cpu_bootstrap(mmu_t mmup, int ap) 610178628Smarcel{ 611178628Smarcel u_int sdr; 612178628Smarcel int i; 613178628Smarcel 614178628Smarcel if (ap) { 615183094Smarcel powerpc_sync(); 616178628Smarcel __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 617178628Smarcel __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 618178628Smarcel isync(); 619178628Smarcel __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 620178628Smarcel __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 621178628Smarcel isync(); 622178628Smarcel } 623178628Smarcel 624178629Smarcel __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 625178629Smarcel __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 626178629Smarcel isync(); 627178628Smarcel 628178629Smarcel __asm __volatile("mtibatu 1,%0" :: "r"(0)); 629178629Smarcel __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 630178629Smarcel __asm __volatile("mtibatu 2,%0" :: "r"(0)); 631178629Smarcel __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 632178629Smarcel __asm __volatile("mtibatu 3,%0" :: "r"(0)); 633178628Smarcel isync(); 634178628Smarcel 635178628Smarcel for (i = 0; i < 16; i++) 636215163Snwhitehorn mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 637183094Smarcel powerpc_sync(); 638178628Smarcel 639178628Smarcel sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 640178628Smarcel __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 641178628Smarcel isync(); 642178628Smarcel 643179254Smarcel tlbia(); 644178628Smarcel} 645178628Smarcel 646178628Smarcelvoid 647152180Sgrehanmoea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 64877957Sbenno{ 64997346Sbenno ihandle_t mmui; 65090643Sbenno phandle_t chosen, mmu; 65190643Sbenno int sz; 65290643Sbenno int i, j; 653143200Sgrehan vm_size_t size, physsz, hwphyssz; 65490643Sbenno vm_offset_t pa, va, off; 655194784Sjeff void *dpcpu; 656209369Snwhitehorn register_t msr; 65777957Sbenno 65899037Sbenno /* 659103604Sgrehan * Set up BAT0 to map the lowest 256 MB area 66099037Sbenno */ 66199037Sbenno battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 66299037Sbenno battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 66399037Sbenno 66499037Sbenno /* 66599037Sbenno * Map PCI memory space. 66699037Sbenno */ 66799037Sbenno battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 66899037Sbenno battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 66999037Sbenno 67099037Sbenno battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 67199037Sbenno battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 67299037Sbenno 67399037Sbenno battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 67499037Sbenno battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 67599037Sbenno 67699037Sbenno battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 67799037Sbenno battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 67899037Sbenno 67999037Sbenno /* 68099037Sbenno * Map obio devices. 68199037Sbenno */ 68299037Sbenno battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 68399037Sbenno battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 68499037Sbenno 68577957Sbenno /* 68690643Sbenno * Use an IBAT and a DBAT to map the bottom segment of memory 687209369Snwhitehorn * where we are. Turn off instruction relocation temporarily 688209369Snwhitehorn * to prevent faults while reprogramming the IBAT. 68977957Sbenno */ 690209369Snwhitehorn msr = mfmsr(); 691209369Snwhitehorn mtmsr(msr & ~PSL_IR); 692152180Sgrehan __asm (".balign 32; \n" 693149958Sgrehan "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 694131808Sgrehan "mtdbatu 0,%0; mtdbatl 0,%1; isync" 695178628Smarcel :: "r"(battable[0].batu), "r"(battable[0].batl)); 696209369Snwhitehorn mtmsr(msr); 69799037Sbenno 69899037Sbenno /* map pci space */ 699178628Smarcel __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 700178628Smarcel __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 701178628Smarcel isync(); 70277957Sbenno 703190681Snwhitehorn /* set global direct map flag */ 704190681Snwhitehorn hw_direct_map = 1; 705190681Snwhitehorn 70697346Sbenno mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 707152180Sgrehan CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 70897346Sbenno 70997346Sbenno for (i = 0; i < pregions_sz; i++) { 710103604Sgrehan vm_offset_t pa; 711103604Sgrehan vm_offset_t end; 712103604Sgrehan 71397346Sbenno CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 71497346Sbenno pregions[i].mr_start, 71597346Sbenno pregions[i].mr_start + pregions[i].mr_size, 71697346Sbenno pregions[i].mr_size); 717103604Sgrehan /* 718103604Sgrehan * Install entries into the BAT table to allow all 719103604Sgrehan * of physmem to be convered by on-demand BAT entries. 720103604Sgrehan * The loop will sometimes set the same battable element 721103604Sgrehan * twice, but that's fine since they won't be used for 722103604Sgrehan * a while yet. 723103604Sgrehan */ 724103604Sgrehan pa = pregions[i].mr_start & 0xf0000000; 725103604Sgrehan end = pregions[i].mr_start + pregions[i].mr_size; 726103604Sgrehan do { 727103604Sgrehan u_int n = pa >> ADDR_SR_SHFT; 728152180Sgrehan 729103604Sgrehan battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 730103604Sgrehan battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 731103604Sgrehan pa += SEGMENT_LENGTH; 732103604Sgrehan } while (pa < end); 73397346Sbenno } 73497346Sbenno 73597346Sbenno if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 736152180Sgrehan panic("moea_bootstrap: phys_avail too small"); 737222614Snwhitehorn 73890643Sbenno phys_avail_count = 0; 73991793Sbenno physsz = 0; 740143234Sgrehan hwphyssz = 0; 741143234Sgrehan TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 74297346Sbenno for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 74390643Sbenno CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 74490643Sbenno regions[i].mr_start + regions[i].mr_size, 74590643Sbenno regions[i].mr_size); 746143200Sgrehan if (hwphyssz != 0 && 747143200Sgrehan (physsz + regions[i].mr_size) >= hwphyssz) { 748143200Sgrehan if (physsz < hwphyssz) { 749143200Sgrehan phys_avail[j] = regions[i].mr_start; 750143200Sgrehan phys_avail[j + 1] = regions[i].mr_start + 751143200Sgrehan hwphyssz - physsz; 752143200Sgrehan physsz = hwphyssz; 753143200Sgrehan phys_avail_count++; 754143200Sgrehan } 755143200Sgrehan break; 756143200Sgrehan } 75790643Sbenno phys_avail[j] = regions[i].mr_start; 75890643Sbenno phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 75990643Sbenno phys_avail_count++; 76091793Sbenno physsz += regions[i].mr_size; 76177957Sbenno } 76291793Sbenno physmem = btoc(physsz); 76377957Sbenno 76477957Sbenno /* 76590643Sbenno * Allocate PTEG table. 76677957Sbenno */ 76790643Sbenno#ifdef PTEGCOUNT 768152180Sgrehan moea_pteg_count = PTEGCOUNT; 76990643Sbenno#else 770152180Sgrehan moea_pteg_count = 0x1000; 77177957Sbenno 772152180Sgrehan while (moea_pteg_count < physmem) 773152180Sgrehan moea_pteg_count <<= 1; 77477957Sbenno 775152180Sgrehan moea_pteg_count >>= 1; 77690643Sbenno#endif /* PTEGCOUNT */ 77777957Sbenno 778152180Sgrehan size = moea_pteg_count * sizeof(struct pteg); 779152180Sgrehan CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 78090643Sbenno size); 781152180Sgrehan moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 782152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 783152180Sgrehan bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 784152180Sgrehan moea_pteg_mask = moea_pteg_count - 1; 78577957Sbenno 78690643Sbenno /* 78794839Sbenno * Allocate pv/overflow lists. 78890643Sbenno */ 789152180Sgrehan size = sizeof(struct pvo_head) * moea_pteg_count; 790152180Sgrehan moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 79190643Sbenno PAGE_SIZE); 792152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 793152180Sgrehan for (i = 0; i < moea_pteg_count; i++) 794152180Sgrehan LIST_INIT(&moea_pvo_table[i]); 79577957Sbenno 79690643Sbenno /* 797134535Salc * Initialize the lock that synchronizes access to the pteg and pvo 798134535Salc * tables. 799134535Salc */ 800159928Salc mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 801159928Salc MTX_RECURSE); 802212278Snwhitehorn mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 803134535Salc 804183094Smarcel mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 805183094Smarcel 806134535Salc /* 80790643Sbenno * Initialise the unmanaged pvo pool. 80890643Sbenno */ 809152180Sgrehan moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 81099037Sbenno BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 811152180Sgrehan moea_bpvo_pool_index = 0; 81277957Sbenno 81377957Sbenno /* 81490643Sbenno * Make sure kernel vsid is allocated as well as VSID 0. 81577957Sbenno */ 816152180Sgrehan moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 81790643Sbenno |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 818152180Sgrehan moea_vsid_bitmap[0] |= 1; 81977957Sbenno 82090643Sbenno /* 821215163Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 82290643Sbenno */ 823215163Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 824215163Snwhitehorn for (i = 0; i < 16; i++) 825215163Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 826222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 827228412Snwhitehorn LIST_INIT(&kernel_pmap->pmap_pvo); 828215163Snwhitehorn 829215163Snwhitehorn /* 830215163Snwhitehorn * Set up the Open Firmware mappings 831215163Snwhitehorn */ 83290643Sbenno if ((chosen = OF_finddevice("/chosen")) == -1) 833152180Sgrehan panic("moea_bootstrap: can't find /chosen"); 83490643Sbenno OF_getprop(chosen, "mmu", &mmui, 4); 83590643Sbenno if ((mmu = OF_instance_to_package(mmui)) == -1) 836152180Sgrehan panic("moea_bootstrap: can't get mmu package"); 83790643Sbenno if ((sz = OF_getproplen(mmu, "translations")) == -1) 838152180Sgrehan panic("moea_bootstrap: can't get ofw translation count"); 839100319Sbenno translations = NULL; 840131401Sgrehan for (i = 0; phys_avail[i] != 0; i += 2) { 841131401Sgrehan if (phys_avail[i + 1] >= sz) { 842100319Sbenno translations = (struct ofw_map *)phys_avail[i]; 843131401Sgrehan break; 844131401Sgrehan } 845100319Sbenno } 846100319Sbenno if (translations == NULL) 847152180Sgrehan panic("moea_bootstrap: no space to copy translations"); 84890643Sbenno bzero(translations, sz); 84990643Sbenno if (OF_getprop(mmu, "translations", translations, sz) == -1) 850152180Sgrehan panic("moea_bootstrap: can't get ofw translations"); 851152180Sgrehan CTR0(KTR_PMAP, "moea_bootstrap: translations"); 85297346Sbenno sz /= sizeof(*translations); 85390643Sbenno qsort(translations, sz, sizeof (*translations), om_cmp); 854216563Snwhitehorn for (i = 0; i < sz; i++) { 85590643Sbenno CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 85690643Sbenno translations[i].om_pa, translations[i].om_va, 85790643Sbenno translations[i].om_len); 85877957Sbenno 859103604Sgrehan /* 860103604Sgrehan * If the mapping is 1:1, let the RAM and device on-demand 861103604Sgrehan * BAT tables take care of the translation. 862103604Sgrehan */ 863103604Sgrehan if (translations[i].om_va == translations[i].om_pa) 864103604Sgrehan continue; 86577957Sbenno 866103604Sgrehan /* Enter the pages */ 867216563Snwhitehorn for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) 868215163Snwhitehorn moea_kenter(mmup, translations[i].om_va + off, 869215163Snwhitehorn translations[i].om_pa + off); 87077957Sbenno } 87177957Sbenno 87290643Sbenno /* 873178261Smarcel * Calculate the last available physical address. 874178261Smarcel */ 875178261Smarcel for (i = 0; phys_avail[i + 2] != 0; i += 2) 876178261Smarcel ; 877178261Smarcel Maxmem = powerpc_btop(phys_avail[i + 1]); 878178261Smarcel 879190681Snwhitehorn moea_cpu_bootstrap(mmup,0); 88077957Sbenno 88190643Sbenno pmap_bootstrapped++; 882178261Smarcel 883178261Smarcel /* 884178261Smarcel * Set the start and end of kva. 885178261Smarcel */ 886178261Smarcel virtual_avail = VM_MIN_KERNEL_ADDRESS; 887204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 888178261Smarcel 889178261Smarcel /* 890178261Smarcel * Allocate a kernel stack with a guard page for thread0 and map it 891178261Smarcel * into the kernel page map. 892178261Smarcel */ 893178261Smarcel pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 894178261Smarcel va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 895178261Smarcel virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 896178261Smarcel CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 897178261Smarcel thread0.td_kstack = va; 898178261Smarcel thread0.td_kstack_pages = KSTACK_PAGES; 899178261Smarcel for (i = 0; i < KSTACK_PAGES; i++) { 900201758Smbr moea_kenter(mmup, va, pa); 901178261Smarcel pa += PAGE_SIZE; 902178261Smarcel va += PAGE_SIZE; 903178261Smarcel } 904178261Smarcel 905178261Smarcel /* 906178261Smarcel * Allocate virtual address space for the message buffer. 907178261Smarcel */ 908217688Spluknet pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 909178261Smarcel msgbufp = (struct msgbuf *)virtual_avail; 910178261Smarcel va = virtual_avail; 911217688Spluknet virtual_avail += round_page(msgbufsize); 912178261Smarcel while (va < virtual_avail) { 913201758Smbr moea_kenter(mmup, va, pa); 914178261Smarcel pa += PAGE_SIZE; 915178261Smarcel va += PAGE_SIZE; 916178261Smarcel } 917194784Sjeff 918194784Sjeff /* 919194784Sjeff * Allocate virtual address space for the dynamic percpu area. 920194784Sjeff */ 921194784Sjeff pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 922194784Sjeff dpcpu = (void *)virtual_avail; 923194784Sjeff va = virtual_avail; 924194784Sjeff virtual_avail += DPCPU_SIZE; 925194784Sjeff while (va < virtual_avail) { 926201758Smbr moea_kenter(mmup, va, pa); 927194784Sjeff pa += PAGE_SIZE; 928194784Sjeff va += PAGE_SIZE; 929194784Sjeff } 930194784Sjeff dpcpu_init(dpcpu, 0); 93177957Sbenno} 93277957Sbenno 93377957Sbenno/* 93490643Sbenno * Activate a user pmap. The pmap must be activated before it's address 93590643Sbenno * space can be accessed in any way. 93677957Sbenno */ 93777957Sbennovoid 938152180Sgrehanmoea_activate(mmu_t mmu, struct thread *td) 93977957Sbenno{ 94096250Sbenno pmap_t pm, pmr; 94177957Sbenno 94277957Sbenno /* 943103604Sgrehan * Load all the data we need up front to encourage the compiler to 94490643Sbenno * not issue any loads while we have interrupts disabled below. 94577957Sbenno */ 94690643Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 947183290Snwhitehorn pmr = pm->pmap_phys; 94877957Sbenno 949223758Sattilio CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 95096250Sbenno PCPU_SET(curpmap, pmr); 95177957Sbenno} 95277957Sbenno 95391483Sbennovoid 954152180Sgrehanmoea_deactivate(mmu_t mmu, struct thread *td) 95591483Sbenno{ 95691483Sbenno pmap_t pm; 95791483Sbenno 95891483Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 959223758Sattilio CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 96096250Sbenno PCPU_SET(curpmap, NULL); 96191483Sbenno} 96291483Sbenno 96377957Sbennovoid 964152180Sgrehanmoea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 96577957Sbenno{ 96696353Sbenno struct pvo_entry *pvo; 96796353Sbenno 968134329Salc PMAP_LOCK(pm); 969152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 97096353Sbenno 97196353Sbenno if (pvo != NULL) { 97296353Sbenno if (wired) { 97396353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 97496353Sbenno pm->pm_stats.wired_count++; 97596353Sbenno pvo->pvo_vaddr |= PVO_WIRED; 97696353Sbenno } else { 97796353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 97896353Sbenno pm->pm_stats.wired_count--; 97996353Sbenno pvo->pvo_vaddr &= ~PVO_WIRED; 98096353Sbenno } 98196353Sbenno } 982134329Salc PMAP_UNLOCK(pm); 98377957Sbenno} 98477957Sbenno 98577957Sbennovoid 986152180Sgrehanmoea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 98777957Sbenno{ 98897385Sbenno vm_offset_t dst; 98997385Sbenno vm_offset_t src; 99097385Sbenno 99197385Sbenno dst = VM_PAGE_TO_PHYS(mdst); 99297385Sbenno src = VM_PAGE_TO_PHYS(msrc); 99397385Sbenno 99497385Sbenno kcopy((void *)src, (void *)dst, PAGE_SIZE); 99577957Sbenno} 99677957Sbenno 99777957Sbenno/* 99890643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb. 99977957Sbenno */ 100077957Sbennovoid 1001152180Sgrehanmoea_zero_page(mmu_t mmu, vm_page_t m) 100277957Sbenno{ 100394777Speter vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1004178265Smarcel void *va = (void *)pa; 100577957Sbenno 100690643Sbenno bzero(va, PAGE_SIZE); 100777957Sbenno} 100877957Sbenno 100977957Sbennovoid 1010152180Sgrehanmoea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 101177957Sbenno{ 101299666Sbenno vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1013178265Smarcel void *va = (void *)(pa + off); 101499666Sbenno 1015178265Smarcel bzero(va, size); 101677957Sbenno} 101777957Sbenno 101899571Spetervoid 1019152180Sgrehanmoea_zero_page_idle(mmu_t mmu, vm_page_t m) 102099571Speter{ 1021178265Smarcel vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1022178265Smarcel void *va = (void *)pa; 102399571Speter 1024178265Smarcel bzero(va, PAGE_SIZE); 102599571Speter} 102699571Speter 102777957Sbenno/* 102890643Sbenno * Map the given physical page at the specified virtual address in the 102990643Sbenno * target pmap with the protection requested. If specified the page 103090643Sbenno * will be wired down. 103177957Sbenno */ 103277957Sbennovoid 1033152180Sgrehanmoea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 103490643Sbenno boolean_t wired) 103577957Sbenno{ 1036159303Salc 1037159303Salc vm_page_lock_queues(); 1038159303Salc PMAP_LOCK(pmap); 1039159324Salc moea_enter_locked(pmap, va, m, prot, wired); 1040159303Salc vm_page_unlock_queues(); 1041159303Salc PMAP_UNLOCK(pmap); 1042159303Salc} 1043159303Salc 1044159303Salc/* 1045159303Salc * Map the given physical page at the specified virtual address in the 1046159303Salc * target pmap with the protection requested. If specified the page 1047159303Salc * will be wired down. 1048159303Salc * 1049159303Salc * The page queues and pmap must be locked. 1050159303Salc */ 1051159303Salcstatic void 1052159303Salcmoea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1053159303Salc boolean_t wired) 1054159303Salc{ 105590643Sbenno struct pvo_head *pvo_head; 105692847Sjeff uma_zone_t zone; 105796250Sbenno vm_page_t pg; 1058213307Snwhitehorn u_int pte_lo, pvo_flags, was_exec; 105990643Sbenno int error; 106077957Sbenno 1061152180Sgrehan if (!moea_initialized) { 1062152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1063152180Sgrehan zone = moea_upvo_zone; 106490643Sbenno pvo_flags = 0; 106596250Sbenno pg = NULL; 106696250Sbenno was_exec = PTE_EXEC; 106790643Sbenno } else { 1068110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1069110172Sgrehan pg = m; 1070152180Sgrehan zone = moea_mpvo_zone; 107190643Sbenno pvo_flags = PVO_MANAGED; 107296250Sbenno was_exec = 0; 107390643Sbenno } 1074134535Salc if (pmap_bootstrapped) 1075159303Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1076159303Salc PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1077224746Skib KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1078224746Skib VM_OBJECT_LOCKED(m->object), 1079208175Salc ("moea_enter_locked: page %p is not busy", m)); 108077957Sbenno 1081142416Sgrehan /* XXX change the pvo head for fake pages */ 1082224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1083189675Snwhitehorn pvo_flags &= ~PVO_MANAGED; 1084152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1085189675Snwhitehorn zone = moea_upvo_zone; 1086189675Snwhitehorn } 1087142416Sgrehan 108896250Sbenno /* 108996250Sbenno * If this is a managed page, and it's the first reference to the page, 109096250Sbenno * clear the execness of the page. Otherwise fetch the execness. 109196250Sbenno */ 1092224746Skib if ((pg != NULL) && ((m->oflags & VPO_UNMANAGED) == 0)) { 109396250Sbenno if (LIST_EMPTY(pvo_head)) { 1094152180Sgrehan moea_attr_clear(pg, PTE_EXEC); 109596250Sbenno } else { 1096152180Sgrehan was_exec = moea_attr_fetch(pg) & PTE_EXEC; 109796250Sbenno } 109896250Sbenno } 109996250Sbenno 1100213335Snwhitehorn pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 110177957Sbenno 1102164229Salc if (prot & VM_PROT_WRITE) { 110390643Sbenno pte_lo |= PTE_BW; 1104208810Salc if (pmap_bootstrapped && 1105224746Skib (m->oflags & VPO_UNMANAGED) == 0) 1106225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1107164229Salc } else 110890643Sbenno pte_lo |= PTE_BR; 110977957Sbenno 1110142416Sgrehan if (prot & VM_PROT_EXECUTE) 1111142416Sgrehan pvo_flags |= PVO_EXECUTABLE; 111277957Sbenno 111390643Sbenno if (wired) 111490643Sbenno pvo_flags |= PVO_WIRED; 111577957Sbenno 1116152180Sgrehan error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 111796250Sbenno pte_lo, pvo_flags); 111890643Sbenno 111996250Sbenno /* 112096250Sbenno * Flush the real page from the instruction cache if this page is 112196250Sbenno * mapped executable and cacheable and was not previously mapped (or 112296250Sbenno * was not mapped executable). 112396250Sbenno */ 112496250Sbenno if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 112596250Sbenno (pte_lo & PTE_I) == 0 && was_exec == 0) { 112677957Sbenno /* 112790643Sbenno * Flush the real memory from the cache. 112877957Sbenno */ 1129152180Sgrehan moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 113096250Sbenno if (pg != NULL) 1131152180Sgrehan moea_attr_save(pg, PTE_EXEC); 113277957Sbenno } 1133103604Sgrehan 1134103604Sgrehan /* XXX syncicache always until problems are sorted */ 1135152180Sgrehan moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 113677957Sbenno} 113777957Sbenno 1138159303Salc/* 1139159303Salc * Maps a sequence of resident pages belonging to the same object. 1140159303Salc * The sequence begins with the given page m_start. This page is 1141159303Salc * mapped at the given virtual address start. Each subsequent page is 1142159303Salc * mapped at a virtual address that is offset from start by the same 1143159303Salc * amount as the page is offset from m_start within the object. The 1144159303Salc * last page in the sequence is the page with the largest offset from 1145159303Salc * m_start that can be mapped at a virtual address less than the given 1146159303Salc * virtual address end. Not every virtual page between start and end 1147159303Salc * is mapped; only those for which a resident page exists with the 1148159303Salc * corresponding offset from m_start are mapped. 1149159303Salc */ 1150159303Salcvoid 1151159303Salcmoea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1152159303Salc vm_page_t m_start, vm_prot_t prot) 1153159303Salc{ 1154159303Salc vm_page_t m; 1155159303Salc vm_pindex_t diff, psize; 1156159303Salc 1157159303Salc psize = atop(end - start); 1158159303Salc m = m_start; 1159208574Salc vm_page_lock_queues(); 1160159303Salc PMAP_LOCK(pm); 1161159303Salc while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1162159303Salc moea_enter_locked(pm, start + ptoa(diff), m, prot & 1163159303Salc (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1164159303Salc m = TAILQ_NEXT(m, listq); 1165159303Salc } 1166208574Salc vm_page_unlock_queues(); 1167159303Salc PMAP_UNLOCK(pm); 1168159303Salc} 1169159303Salc 1170159627Supsvoid 1171152180Sgrehanmoea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1172159627Sups vm_prot_t prot) 1173117045Salc{ 1174117045Salc 1175207796Salc vm_page_lock_queues(); 1176159303Salc PMAP_LOCK(pm); 1177159303Salc moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1178152180Sgrehan FALSE); 1179207796Salc vm_page_unlock_queues(); 1180159303Salc PMAP_UNLOCK(pm); 1181117045Salc} 1182117045Salc 1183131658Salcvm_paddr_t 1184152180Sgrehanmoea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 118577957Sbenno{ 118696353Sbenno struct pvo_entry *pvo; 1187134329Salc vm_paddr_t pa; 118896353Sbenno 1189134329Salc PMAP_LOCK(pm); 1190152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1191134329Salc if (pvo == NULL) 1192134329Salc pa = 0; 1193134329Salc else 1194183290Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1195134329Salc PMAP_UNLOCK(pm); 1196134329Salc return (pa); 119777957Sbenno} 119877957Sbenno 119977957Sbenno/* 1200120336Sgrehan * Atomically extract and hold the physical page with the given 1201120336Sgrehan * pmap and virtual address pair if that mapping permits the given 1202120336Sgrehan * protection. 1203120336Sgrehan */ 1204120336Sgrehanvm_page_t 1205152180Sgrehanmoea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1206120336Sgrehan{ 1207132666Salc struct pvo_entry *pvo; 1208120336Sgrehan vm_page_t m; 1209207410Skmacy vm_paddr_t pa; 1210207410Skmacy 1211120336Sgrehan m = NULL; 1212207410Skmacy pa = 0; 1213134329Salc PMAP_LOCK(pmap); 1214207410Skmacyretry: 1215152180Sgrehan pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1216183290Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1217183290Snwhitehorn ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1218132666Salc (prot & VM_PROT_WRITE) == 0)) { 1219207410Skmacy if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1220207410Skmacy goto retry; 1221183290Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1222120336Sgrehan vm_page_hold(m); 1223120336Sgrehan } 1224207410Skmacy PA_UNLOCK_COND(pa); 1225134329Salc PMAP_UNLOCK(pmap); 1226120336Sgrehan return (m); 1227120336Sgrehan} 1228120336Sgrehan 122990643Sbennovoid 1230152180Sgrehanmoea_init(mmu_t mmu) 123177957Sbenno{ 123277957Sbenno 1233152180Sgrehan moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1234125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1235125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1236152180Sgrehan moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1237125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1238125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1239152180Sgrehan moea_initialized = TRUE; 124077957Sbenno} 124177957Sbenno 124290643Sbennoboolean_t 1243207155Salcmoea_is_referenced(mmu_t mmu, vm_page_t m) 1244207155Salc{ 1245207155Salc 1246224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1247208574Salc ("moea_is_referenced: page %p is not managed", m)); 1248207155Salc return (moea_query_bit(m, PTE_REF)); 1249207155Salc} 1250207155Salc 1251207155Salcboolean_t 1252152180Sgrehanmoea_is_modified(mmu_t mmu, vm_page_t m) 125390643Sbenno{ 125496353Sbenno 1255224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1256208504Salc ("moea_is_modified: page %p is not managed", m)); 1257208504Salc 1258208504Salc /* 1259225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1260225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1261208504Salc * is clear, no PTEs can have PTE_CHG set. 1262208504Salc */ 1263208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1264208504Salc if ((m->oflags & VPO_BUSY) == 0 && 1265225418Skib (m->aflags & PGA_WRITEABLE) == 0) 126696353Sbenno return (FALSE); 1267208574Salc return (moea_query_bit(m, PTE_CHG)); 126890643Sbenno} 126990643Sbenno 1270214617Salcboolean_t 1271214617Salcmoea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1272214617Salc{ 1273214617Salc struct pvo_entry *pvo; 1274214617Salc boolean_t rv; 1275214617Salc 1276214617Salc PMAP_LOCK(pmap); 1277214617Salc pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1278214617Salc rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1279214617Salc PMAP_UNLOCK(pmap); 1280214617Salc return (rv); 1281214617Salc} 1282214617Salc 128390643Sbennovoid 1284152180Sgrehanmoea_clear_reference(mmu_t mmu, vm_page_t m) 128590643Sbenno{ 1286110172Sgrehan 1287224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1288208504Salc ("moea_clear_reference: page %p is not managed", m)); 1289208990Salc moea_clear_bit(m, PTE_REF); 129090643Sbenno} 129190643Sbenno 1292110172Sgrehanvoid 1293152180Sgrehanmoea_clear_modify(mmu_t mmu, vm_page_t m) 1294110172Sgrehan{ 1295110172Sgrehan 1296224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1297208504Salc ("moea_clear_modify: page %p is not managed", m)); 1298208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1299208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 1300208504Salc ("moea_clear_modify: page %p is busy", m)); 1301208504Salc 1302208504Salc /* 1303225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG 1304208504Salc * set. If the object containing the page is locked and the page is 1305225418Skib * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1306208504Salc */ 1307225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 1308110172Sgrehan return; 1309208990Salc moea_clear_bit(m, PTE_CHG); 1310110172Sgrehan} 1311110172Sgrehan 131291403Ssilby/* 1313160889Salc * Clear the write and modified bits in each of the given page's mappings. 1314160889Salc */ 1315160889Salcvoid 1316160889Salcmoea_remove_write(mmu_t mmu, vm_page_t m) 1317160889Salc{ 1318160889Salc struct pvo_entry *pvo; 1319160889Salc struct pte *pt; 1320160889Salc pmap_t pmap; 1321160889Salc u_int lo; 1322160889Salc 1323224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1324208175Salc ("moea_remove_write: page %p is not managed", m)); 1325208175Salc 1326208175Salc /* 1327225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1328225418Skib * another thread while the object is locked. Thus, if PGA_WRITEABLE 1329208175Salc * is clear, no page table entries need updating. 1330208175Salc */ 1331208175Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1332208175Salc if ((m->oflags & VPO_BUSY) == 0 && 1333225418Skib (m->aflags & PGA_WRITEABLE) == 0) 1334160889Salc return; 1335207796Salc vm_page_lock_queues(); 1336160889Salc lo = moea_attr_fetch(m); 1337183094Smarcel powerpc_sync(); 1338160889Salc LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1339160889Salc pmap = pvo->pvo_pmap; 1340160889Salc PMAP_LOCK(pmap); 1341183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1342160889Salc pt = moea_pvo_to_pte(pvo, -1); 1343183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1344183290Snwhitehorn pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1345160889Salc if (pt != NULL) { 1346183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 1347183290Snwhitehorn lo |= pvo->pvo_pte.pte.pte_lo; 1348183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1349183290Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, 1350160889Salc pvo->pvo_vaddr); 1351160889Salc mtx_unlock(&moea_table_mutex); 1352160889Salc } 1353160889Salc } 1354160889Salc PMAP_UNLOCK(pmap); 1355160889Salc } 1356160889Salc if ((lo & PTE_CHG) != 0) { 1357160889Salc moea_attr_clear(m, PTE_CHG); 1358160889Salc vm_page_dirty(m); 1359160889Salc } 1360225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1361207796Salc vm_page_unlock_queues(); 1362160889Salc} 1363160889Salc 1364160889Salc/* 1365152180Sgrehan * moea_ts_referenced: 136691403Ssilby * 136791403Ssilby * Return a count of reference bits for a page, clearing those bits. 136891403Ssilby * It is not necessary for every reference bit to be cleared, but it 136991403Ssilby * is necessary that 0 only be returned when there are truly no 137091403Ssilby * reference bits set. 137191403Ssilby * 137291403Ssilby * XXX: The exact number of bits to check and clear is a matter that 137391403Ssilby * should be tested and standardized at some point in the future for 137491403Ssilby * optimal aging of shared pages. 137591403Ssilby */ 1376152180Sgrehanboolean_t 1377152180Sgrehanmoea_ts_referenced(mmu_t mmu, vm_page_t m) 137890643Sbenno{ 1379110172Sgrehan 1380224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1381208990Salc ("moea_ts_referenced: page %p is not managed", m)); 1382208990Salc return (moea_clear_bit(m, PTE_REF)); 138390643Sbenno} 138490643Sbenno 138577957Sbenno/* 1386213307Snwhitehorn * Modify the WIMG settings of all mappings for a page. 1387213307Snwhitehorn */ 1388213307Snwhitehornvoid 1389213307Snwhitehornmoea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1390213307Snwhitehorn{ 1391213307Snwhitehorn struct pvo_entry *pvo; 1392213335Snwhitehorn struct pvo_head *pvo_head; 1393213307Snwhitehorn struct pte *pt; 1394213307Snwhitehorn pmap_t pmap; 1395213307Snwhitehorn u_int lo; 1396213307Snwhitehorn 1397224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1398213335Snwhitehorn m->md.mdpg_cache_attrs = ma; 1399213335Snwhitehorn return; 1400213335Snwhitehorn } 1401213335Snwhitehorn 1402213307Snwhitehorn vm_page_lock_queues(); 1403213335Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1404213307Snwhitehorn lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1405213335Snwhitehorn 1406213335Snwhitehorn LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1407213307Snwhitehorn pmap = pvo->pvo_pmap; 1408213307Snwhitehorn PMAP_LOCK(pmap); 1409213307Snwhitehorn pt = moea_pvo_to_pte(pvo, -1); 1410213307Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1411213307Snwhitehorn pvo->pvo_pte.pte.pte_lo |= lo; 1412213307Snwhitehorn if (pt != NULL) { 1413213307Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, 1414213307Snwhitehorn pvo->pvo_vaddr); 1415213307Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1416213307Snwhitehorn isync(); 1417213307Snwhitehorn } 1418213307Snwhitehorn mtx_unlock(&moea_table_mutex); 1419213307Snwhitehorn PMAP_UNLOCK(pmap); 1420213307Snwhitehorn } 1421213307Snwhitehorn m->md.mdpg_cache_attrs = ma; 1422213307Snwhitehorn vm_page_unlock_queues(); 1423213307Snwhitehorn} 1424213307Snwhitehorn 1425213307Snwhitehorn/* 142690643Sbenno * Map a wired page into kernel virtual address space. 142777957Sbenno */ 142877957Sbennovoid 1429152180Sgrehanmoea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 143077957Sbenno{ 1431213307Snwhitehorn 1432213307Snwhitehorn moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1433213307Snwhitehorn} 1434213307Snwhitehorn 1435213307Snwhitehornvoid 1436213307Snwhitehornmoea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1437213307Snwhitehorn{ 143890643Sbenno u_int pte_lo; 143990643Sbenno int error; 144077957Sbenno 144190643Sbenno#if 0 144290643Sbenno if (va < VM_MIN_KERNEL_ADDRESS) 1443152180Sgrehan panic("moea_kenter: attempt to enter non-kernel address %#x", 144490643Sbenno va); 144590643Sbenno#endif 144677957Sbenno 1447213307Snwhitehorn pte_lo = moea_calc_wimg(pa, ma); 144877957Sbenno 1449135172Salc PMAP_LOCK(kernel_pmap); 1450152180Sgrehan error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1451152180Sgrehan &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 145290643Sbenno 145390643Sbenno if (error != 0 && error != ENOENT) 1454152180Sgrehan panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 145590643Sbenno pa, error); 145690643Sbenno 145777957Sbenno /* 145890643Sbenno * Flush the real memory from the instruction cache. 145977957Sbenno */ 146090643Sbenno if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1461152180Sgrehan moea_syncicache(pa, PAGE_SIZE); 146277957Sbenno } 1463135172Salc PMAP_UNLOCK(kernel_pmap); 146477957Sbenno} 146577957Sbenno 146694838Sbenno/* 146794838Sbenno * Extract the physical page address associated with the given kernel virtual 146894838Sbenno * address. 146994838Sbenno */ 147090643Sbennovm_offset_t 1471152180Sgrehanmoea_kextract(mmu_t mmu, vm_offset_t va) 147277957Sbenno{ 147394838Sbenno struct pvo_entry *pvo; 1474134329Salc vm_paddr_t pa; 147594838Sbenno 1476125185Sgrehan /* 1477183290Snwhitehorn * Allow direct mappings on 32-bit OEA 1478125185Sgrehan */ 1479125185Sgrehan if (va < VM_MIN_KERNEL_ADDRESS) { 1480125185Sgrehan return (va); 1481125185Sgrehan } 1482125185Sgrehan 1483134329Salc PMAP_LOCK(kernel_pmap); 1484152180Sgrehan pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1485152180Sgrehan KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1486183290Snwhitehorn pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1487134329Salc PMAP_UNLOCK(kernel_pmap); 1488134329Salc return (pa); 148977957Sbenno} 149077957Sbenno 149191456Sbenno/* 149291456Sbenno * Remove a wired page from kernel virtual address space. 149391456Sbenno */ 149477957Sbennovoid 1495152180Sgrehanmoea_kremove(mmu_t mmu, vm_offset_t va) 149677957Sbenno{ 149791456Sbenno 1498152180Sgrehan moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 149977957Sbenno} 150077957Sbenno 150177957Sbenno/* 150290643Sbenno * Map a range of physical addresses into kernel virtual address space. 150390643Sbenno * 150490643Sbenno * The value passed in *virt is a suggested virtual address for the mapping. 150590643Sbenno * Architectures which can support a direct-mapped physical to virtual region 150690643Sbenno * can return the appropriate address within that region, leaving '*virt' 150790643Sbenno * unchanged. We cannot and therefore do not; *virt is updated with the 150890643Sbenno * first usable address after the mapped region. 150977957Sbenno */ 151090643Sbennovm_offset_t 1511152180Sgrehanmoea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1512152180Sgrehan vm_offset_t pa_end, int prot) 151377957Sbenno{ 151490643Sbenno vm_offset_t sva, va; 151577957Sbenno 151690643Sbenno sva = *virt; 151790643Sbenno va = sva; 151890643Sbenno for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1519152180Sgrehan moea_kenter(mmu, va, pa_start); 152090643Sbenno *virt = va; 152190643Sbenno return (sva); 152277957Sbenno} 152377957Sbenno 152477957Sbenno/* 152591403Ssilby * Returns true if the pmap's pv is one of the first 152691403Ssilby * 16 pvs linked to from this page. This count may 152791403Ssilby * be changed upwards or downwards in the future; it 152891403Ssilby * is only necessary that true be returned for a small 152991403Ssilby * subset of pmaps for proper page aging. 153091403Ssilby */ 153190643Sbennoboolean_t 1532152180Sgrehanmoea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 153390643Sbenno{ 1534110172Sgrehan int loops; 1535110172Sgrehan struct pvo_entry *pvo; 1536208990Salc boolean_t rv; 1537110172Sgrehan 1538224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1539208990Salc ("moea_page_exists_quick: page %p is not managed", m)); 1540110172Sgrehan loops = 0; 1541208990Salc rv = FALSE; 1542208990Salc vm_page_lock_queues(); 1543110172Sgrehan LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1544208990Salc if (pvo->pvo_pmap == pmap) { 1545208990Salc rv = TRUE; 1546208990Salc break; 1547208990Salc } 1548110172Sgrehan if (++loops >= 16) 1549110172Sgrehan break; 1550110172Sgrehan } 1551208990Salc vm_page_unlock_queues(); 1552208990Salc return (rv); 155390643Sbenno} 155477957Sbenno 1555173708Salc/* 1556173708Salc * Return the number of managed mappings to the given physical page 1557173708Salc * that are wired. 1558173708Salc */ 1559173708Salcint 1560173708Salcmoea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1561173708Salc{ 1562173708Salc struct pvo_entry *pvo; 1563173708Salc int count; 1564173708Salc 1565173708Salc count = 0; 1566224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 1567173708Salc return (count); 1568207796Salc vm_page_lock_queues(); 1569173708Salc LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1570173708Salc if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1571173708Salc count++; 1572207796Salc vm_page_unlock_queues(); 1573173708Salc return (count); 1574173708Salc} 1575173708Salc 1576152180Sgrehanstatic u_int moea_vsidcontext; 157777957Sbenno 157890643Sbennovoid 1579152180Sgrehanmoea_pinit(mmu_t mmu, pmap_t pmap) 158090643Sbenno{ 158190643Sbenno int i, mask; 158290643Sbenno u_int entropy; 158377957Sbenno 1584152180Sgrehan KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1585134329Salc PMAP_LOCK_INIT(pmap); 1586228412Snwhitehorn LIST_INIT(&pmap->pmap_pvo); 1587126478Sgrehan 158890643Sbenno entropy = 0; 158990643Sbenno __asm __volatile("mftb %0" : "=r"(entropy)); 159077957Sbenno 1591183290Snwhitehorn if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1592183290Snwhitehorn == NULL) { 1593183290Snwhitehorn pmap->pmap_phys = pmap; 1594183290Snwhitehorn } 1595183290Snwhitehorn 1596183290Snwhitehorn 1597212278Snwhitehorn mtx_lock(&moea_vsid_mutex); 159890643Sbenno /* 159990643Sbenno * Allocate some segment registers for this pmap. 160090643Sbenno */ 160190643Sbenno for (i = 0; i < NPMAPS; i += VSID_NBPW) { 160290643Sbenno u_int hash, n; 160377957Sbenno 160477957Sbenno /* 160590643Sbenno * Create a new value by mutiplying by a prime and adding in 160690643Sbenno * entropy from the timebase register. This is to make the 160790643Sbenno * VSID more random so that the PT hash function collides 160890643Sbenno * less often. (Note that the prime casues gcc to do shifts 160990643Sbenno * instead of a multiply.) 161077957Sbenno */ 1611152180Sgrehan moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1612152180Sgrehan hash = moea_vsidcontext & (NPMAPS - 1); 161390643Sbenno if (hash == 0) /* 0 is special, avoid it */ 161490643Sbenno continue; 161590643Sbenno n = hash >> 5; 161690643Sbenno mask = 1 << (hash & (VSID_NBPW - 1)); 1617152180Sgrehan hash = (moea_vsidcontext & 0xfffff); 1618152180Sgrehan if (moea_vsid_bitmap[n] & mask) { /* collision? */ 161990643Sbenno /* anything free in this bucket? */ 1620152180Sgrehan if (moea_vsid_bitmap[n] == 0xffffffff) { 1621152180Sgrehan entropy = (moea_vsidcontext >> 20); 162290643Sbenno continue; 162390643Sbenno } 1624212322Snwhitehorn i = ffs(~moea_vsid_bitmap[n]) - 1; 162590643Sbenno mask = 1 << i; 162690643Sbenno hash &= 0xfffff & ~(VSID_NBPW - 1); 162790643Sbenno hash |= i; 162877957Sbenno } 1629227627Snwhitehorn KASSERT(!(moea_vsid_bitmap[n] & mask), 1630227627Snwhitehorn ("Allocating in-use VSID group %#x\n", hash)); 1631152180Sgrehan moea_vsid_bitmap[n] |= mask; 163290643Sbenno for (i = 0; i < 16; i++) 163390643Sbenno pmap->pm_sr[i] = VSID_MAKE(i, hash); 1634212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 163590643Sbenno return; 163690643Sbenno } 163777957Sbenno 1638212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 1639152180Sgrehan panic("moea_pinit: out of segments"); 164077957Sbenno} 164177957Sbenno 164277957Sbenno/* 164390643Sbenno * Initialize the pmap associated with process 0. 164477957Sbenno */ 164577957Sbennovoid 1646152180Sgrehanmoea_pinit0(mmu_t mmu, pmap_t pm) 164777957Sbenno{ 164877957Sbenno 1649152180Sgrehan moea_pinit(mmu, pm); 165090643Sbenno bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 165177957Sbenno} 165277957Sbenno 165394838Sbenno/* 165494838Sbenno * Set the physical protection on the specified range of this map as requested. 165594838Sbenno */ 165690643Sbennovoid 1657152180Sgrehanmoea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1658152180Sgrehan vm_prot_t prot) 165990643Sbenno{ 166094838Sbenno struct pvo_entry *pvo; 166194838Sbenno struct pte *pt; 166294838Sbenno int pteidx; 166394838Sbenno 166494838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1665152180Sgrehan ("moea_protect: non current pmap")); 166694838Sbenno 166794838Sbenno if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1668152180Sgrehan moea_remove(mmu, pm, sva, eva); 166994838Sbenno return; 167094838Sbenno } 167194838Sbenno 1672132220Salc vm_page_lock_queues(); 1673134329Salc PMAP_LOCK(pm); 167494838Sbenno for (; sva < eva; sva += PAGE_SIZE) { 1675152180Sgrehan pvo = moea_pvo_find_va(pm, sva, &pteidx); 167694838Sbenno if (pvo == NULL) 167794838Sbenno continue; 167894838Sbenno 167994838Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 168094838Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 168194838Sbenno 168294838Sbenno /* 168394838Sbenno * Grab the PTE pointer before we diddle with the cached PTE 168494838Sbenno * copy. 168594838Sbenno */ 1686152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 168794838Sbenno /* 168894838Sbenno * Change the protection of the page. 168994838Sbenno */ 1690183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1691183290Snwhitehorn pvo->pvo_pte.pte.pte_lo |= PTE_BR; 169294838Sbenno 169394838Sbenno /* 169494838Sbenno * If the PVO is in the page table, update that pte as well. 169594838Sbenno */ 1696159928Salc if (pt != NULL) { 1697183290Snwhitehorn moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1698159928Salc mtx_unlock(&moea_table_mutex); 1699159928Salc } 170094838Sbenno } 1701132220Salc vm_page_unlock_queues(); 1702134329Salc PMAP_UNLOCK(pm); 170377957Sbenno} 170477957Sbenno 170591456Sbenno/* 170691456Sbenno * Map a list of wired pages into kernel virtual address space. This is 170791456Sbenno * intended for temporary mappings which do not need page modification or 170891456Sbenno * references recorded. Existing mappings in the region are overwritten. 170991456Sbenno */ 171090643Sbennovoid 1711152180Sgrehanmoea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 171277957Sbenno{ 1713110172Sgrehan vm_offset_t va; 171477957Sbenno 1715110172Sgrehan va = sva; 1716110172Sgrehan while (count-- > 0) { 1717152180Sgrehan moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1718110172Sgrehan va += PAGE_SIZE; 1719110172Sgrehan m++; 1720110172Sgrehan } 172190643Sbenno} 172277957Sbenno 172391456Sbenno/* 172491456Sbenno * Remove page mappings from kernel virtual address space. Intended for 1725152180Sgrehan * temporary mappings entered by moea_qenter. 172691456Sbenno */ 172790643Sbennovoid 1728152180Sgrehanmoea_qremove(mmu_t mmu, vm_offset_t sva, int count) 172990643Sbenno{ 1730110172Sgrehan vm_offset_t va; 173191456Sbenno 1732110172Sgrehan va = sva; 1733110172Sgrehan while (count-- > 0) { 1734152180Sgrehan moea_kremove(mmu, va); 1735110172Sgrehan va += PAGE_SIZE; 1736110172Sgrehan } 173777957Sbenno} 173877957Sbenno 173990643Sbennovoid 1740152180Sgrehanmoea_release(mmu_t mmu, pmap_t pmap) 174190643Sbenno{ 1742103604Sgrehan int idx, mask; 1743103604Sgrehan 1744103604Sgrehan /* 1745103604Sgrehan * Free segment register's VSID 1746103604Sgrehan */ 1747103604Sgrehan if (pmap->pm_sr[0] == 0) 1748152180Sgrehan panic("moea_release"); 1749103604Sgrehan 1750212278Snwhitehorn mtx_lock(&moea_vsid_mutex); 1751103604Sgrehan idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1752103604Sgrehan mask = 1 << (idx % VSID_NBPW); 1753103604Sgrehan idx /= VSID_NBPW; 1754152180Sgrehan moea_vsid_bitmap[idx] &= ~mask; 1755212278Snwhitehorn mtx_unlock(&moea_vsid_mutex); 1756134329Salc PMAP_LOCK_DESTROY(pmap); 175777957Sbenno} 175877957Sbenno 175991456Sbenno/* 176091456Sbenno * Remove the given range of addresses from the specified map. 176191456Sbenno */ 176290643Sbennovoid 1763152180Sgrehanmoea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 176477957Sbenno{ 176591456Sbenno struct pvo_entry *pvo; 176691456Sbenno int pteidx; 176791456Sbenno 1768132220Salc vm_page_lock_queues(); 1769134329Salc PMAP_LOCK(pm); 1770228412Snwhitehorn if ((eva - sva)/PAGE_SIZE < 10) { 1771228412Snwhitehorn for (; sva < eva; sva += PAGE_SIZE) { 1772228412Snwhitehorn pvo = moea_pvo_find_va(pm, sva, &pteidx); 1773228412Snwhitehorn if (pvo != NULL) 1774228412Snwhitehorn moea_pvo_remove(pvo, pteidx); 177591456Sbenno } 1776228412Snwhitehorn } else { 1777228412Snwhitehorn LIST_FOREACH(pvo, &pm->pmap_pvo, pvo_plink) { 1778228412Snwhitehorn if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) 1779228412Snwhitehorn continue; 1780228412Snwhitehorn moea_pvo_remove(pvo, -1); 1781228412Snwhitehorn } 178291456Sbenno } 1783140538Sgrehan PMAP_UNLOCK(pm); 1784132220Salc vm_page_unlock_queues(); 178577957Sbenno} 178677957Sbenno 178794838Sbenno/* 1788152180Sgrehan * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1789110172Sgrehan * will reflect changes in pte's back to the vm_page. 1790110172Sgrehan */ 1791110172Sgrehanvoid 1792152180Sgrehanmoea_remove_all(mmu_t mmu, vm_page_t m) 1793110172Sgrehan{ 1794110172Sgrehan struct pvo_head *pvo_head; 1795110172Sgrehan struct pvo_entry *pvo, *next_pvo; 1796134329Salc pmap_t pmap; 1797110172Sgrehan 1798207796Salc vm_page_lock_queues(); 1799110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1800110172Sgrehan for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1801110172Sgrehan next_pvo = LIST_NEXT(pvo, pvo_vlink); 1802133166Sgrehan 1803134329Salc pmap = pvo->pvo_pmap; 1804134329Salc PMAP_LOCK(pmap); 1805152180Sgrehan moea_pvo_remove(pvo, -1); 1806134329Salc PMAP_UNLOCK(pmap); 1807110172Sgrehan } 1808225418Skib if ((m->aflags & PGA_WRITEABLE) && moea_is_modified(mmu, m)) { 1809208847Snwhitehorn moea_attr_clear(m, PTE_CHG); 1810204042Snwhitehorn vm_page_dirty(m); 1811204042Snwhitehorn } 1812225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1813207796Salc vm_page_unlock_queues(); 1814110172Sgrehan} 1815110172Sgrehan 1816110172Sgrehan/* 181790643Sbenno * Allocate a physical page of memory directly from the phys_avail map. 1818152180Sgrehan * Can only be called from moea_bootstrap before avail start and end are 181990643Sbenno * calculated. 182083682Smp */ 182190643Sbennostatic vm_offset_t 1822152180Sgrehanmoea_bootstrap_alloc(vm_size_t size, u_int align) 182383682Smp{ 182490643Sbenno vm_offset_t s, e; 182590643Sbenno int i, j; 182683682Smp 182790643Sbenno size = round_page(size); 182890643Sbenno for (i = 0; phys_avail[i + 1] != 0; i += 2) { 182990643Sbenno if (align != 0) 183090643Sbenno s = (phys_avail[i] + align - 1) & ~(align - 1); 183190643Sbenno else 183290643Sbenno s = phys_avail[i]; 183390643Sbenno e = s + size; 183490643Sbenno 183590643Sbenno if (s < phys_avail[i] || e > phys_avail[i + 1]) 183690643Sbenno continue; 183790643Sbenno 183890643Sbenno if (s == phys_avail[i]) { 183990643Sbenno phys_avail[i] += size; 184090643Sbenno } else if (e == phys_avail[i + 1]) { 184190643Sbenno phys_avail[i + 1] -= size; 184290643Sbenno } else { 184390643Sbenno for (j = phys_avail_count * 2; j > i; j -= 2) { 184490643Sbenno phys_avail[j] = phys_avail[j - 2]; 184590643Sbenno phys_avail[j + 1] = phys_avail[j - 1]; 184690643Sbenno } 184790643Sbenno 184890643Sbenno phys_avail[i + 3] = phys_avail[i + 1]; 184990643Sbenno phys_avail[i + 1] = s; 185090643Sbenno phys_avail[i + 2] = e; 185190643Sbenno phys_avail_count++; 185290643Sbenno } 185390643Sbenno 185490643Sbenno return (s); 185583682Smp } 1856152180Sgrehan panic("moea_bootstrap_alloc: could not allocate memory"); 185783682Smp} 185883682Smp 185990643Sbennostatic void 1860152180Sgrehanmoea_syncicache(vm_offset_t pa, vm_size_t len) 186177957Sbenno{ 186290643Sbenno __syncicache((void *)pa, len); 186390643Sbenno} 186477957Sbenno 186590643Sbennostatic int 1866152180Sgrehanmoea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 186790643Sbenno vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 186877957Sbenno{ 186990643Sbenno struct pvo_entry *pvo; 187090643Sbenno u_int sr; 187190643Sbenno int first; 187290643Sbenno u_int ptegidx; 187390643Sbenno int i; 1874103604Sgrehan int bootstrap; 187577957Sbenno 1876152180Sgrehan moea_pvo_enter_calls++; 187796250Sbenno first = 0; 1878103604Sgrehan bootstrap = 0; 187990643Sbenno 188090643Sbenno /* 188190643Sbenno * Compute the PTE Group index. 188290643Sbenno */ 188390643Sbenno va &= ~ADDR_POFF; 188490643Sbenno sr = va_to_sr(pm->pm_sr, va); 188590643Sbenno ptegidx = va_to_pteg(sr, va); 188690643Sbenno 188790643Sbenno /* 188890643Sbenno * Remove any existing mapping for this page. Reuse the pvo entry if 188990643Sbenno * there is a mapping. 189090643Sbenno */ 1891152180Sgrehan mtx_lock(&moea_table_mutex); 1892152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 189390643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1894183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1895183290Snwhitehorn (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 189696334Sbenno (pte_lo & PTE_PP)) { 1897152180Sgrehan mtx_unlock(&moea_table_mutex); 189892521Sbenno return (0); 189996334Sbenno } 1900152180Sgrehan moea_pvo_remove(pvo, -1); 190190643Sbenno break; 190290643Sbenno } 190390643Sbenno } 190490643Sbenno 190590643Sbenno /* 190690643Sbenno * If we aren't overwriting a mapping, try to allocate. 190790643Sbenno */ 1908152180Sgrehan if (moea_initialized) { 190992847Sjeff pvo = uma_zalloc(zone, M_NOWAIT); 191092521Sbenno } else { 1911152180Sgrehan if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1912152180Sgrehan panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1913152180Sgrehan moea_bpvo_pool_index, BPVO_POOL_SIZE, 191499037Sbenno BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 191592521Sbenno } 1916152180Sgrehan pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1917152180Sgrehan moea_bpvo_pool_index++; 1918103604Sgrehan bootstrap = 1; 191992521Sbenno } 192090643Sbenno 192190643Sbenno if (pvo == NULL) { 1922152180Sgrehan mtx_unlock(&moea_table_mutex); 192390643Sbenno return (ENOMEM); 192490643Sbenno } 192590643Sbenno 1926152180Sgrehan moea_pvo_entries++; 192790643Sbenno pvo->pvo_vaddr = va; 192890643Sbenno pvo->pvo_pmap = pm; 1929152180Sgrehan LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 193090643Sbenno pvo->pvo_vaddr &= ~ADDR_POFF; 193190643Sbenno if (flags & VM_PROT_EXECUTE) 193290643Sbenno pvo->pvo_vaddr |= PVO_EXECUTABLE; 193390643Sbenno if (flags & PVO_WIRED) 193490643Sbenno pvo->pvo_vaddr |= PVO_WIRED; 1935152180Sgrehan if (pvo_head != &moea_pvo_kunmanaged) 193690643Sbenno pvo->pvo_vaddr |= PVO_MANAGED; 1937103604Sgrehan if (bootstrap) 1938103604Sgrehan pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1939142416Sgrehan 1940183290Snwhitehorn moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 194190643Sbenno 194290643Sbenno /* 1943228412Snwhitehorn * Add to pmap list 1944228412Snwhitehorn */ 1945228412Snwhitehorn LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink); 1946228412Snwhitehorn 1947228412Snwhitehorn /* 194890643Sbenno * Remember if the list was empty and therefore will be the first 194990643Sbenno * item. 195090643Sbenno */ 195196250Sbenno if (LIST_FIRST(pvo_head) == NULL) 195296250Sbenno first = 1; 1953142416Sgrehan LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 195490643Sbenno 1955183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 1956134453Salc pm->pm_stats.wired_count++; 1957134453Salc pm->pm_stats.resident_count++; 195890643Sbenno 195990643Sbenno /* 196090643Sbenno * We hope this succeeds but it isn't required. 196190643Sbenno */ 1962183290Snwhitehorn i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 196390643Sbenno if (i >= 0) { 196490643Sbenno PVO_PTEGIDX_SET(pvo, i); 196590643Sbenno } else { 1966152180Sgrehan panic("moea_pvo_enter: overflow"); 1967152180Sgrehan moea_pte_overflow++; 196890643Sbenno } 1969152180Sgrehan mtx_unlock(&moea_table_mutex); 197090643Sbenno 197190643Sbenno return (first ? ENOENT : 0); 197277957Sbenno} 197377957Sbenno 197490643Sbennostatic void 1975152180Sgrehanmoea_pvo_remove(struct pvo_entry *pvo, int pteidx) 197677957Sbenno{ 197790643Sbenno struct pte *pt; 197877957Sbenno 197990643Sbenno /* 198090643Sbenno * If there is an active pte entry, we need to deactivate it (and 198190643Sbenno * save the ref & cfg bits). 198290643Sbenno */ 1983152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 198490643Sbenno if (pt != NULL) { 1985183290Snwhitehorn moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1986159928Salc mtx_unlock(&moea_table_mutex); 198790643Sbenno PVO_PTEGIDX_CLR(pvo); 198890643Sbenno } else { 1989152180Sgrehan moea_pte_overflow--; 1990142416Sgrehan } 199190643Sbenno 199290643Sbenno /* 199390643Sbenno * Update our statistics. 199490643Sbenno */ 199590643Sbenno pvo->pvo_pmap->pm_stats.resident_count--; 1996183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 199790643Sbenno pvo->pvo_pmap->pm_stats.wired_count--; 199890643Sbenno 199990643Sbenno /* 200090643Sbenno * Save the REF/CHG bits into their cache if the page is managed. 200190643Sbenno */ 2002224746Skib if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 200390643Sbenno struct vm_page *pg; 200490643Sbenno 2005183290Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 200690643Sbenno if (pg != NULL) { 2007183290Snwhitehorn moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 200890643Sbenno (PTE_REF | PTE_CHG)); 200990643Sbenno } 201090643Sbenno } 201190643Sbenno 201290643Sbenno /* 2013228412Snwhitehorn * Remove this PVO from the PV and pmap lists. 201490643Sbenno */ 201590643Sbenno LIST_REMOVE(pvo, pvo_vlink); 2016228412Snwhitehorn LIST_REMOVE(pvo, pvo_plink); 201790643Sbenno 201890643Sbenno /* 201990643Sbenno * Remove this from the overflow list and return it to the pool 202090643Sbenno * if we aren't going to reuse it. 202190643Sbenno */ 202290643Sbenno LIST_REMOVE(pvo, pvo_olink); 202392521Sbenno if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2024152180Sgrehan uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2025152180Sgrehan moea_upvo_zone, pvo); 2026152180Sgrehan moea_pvo_entries--; 2027152180Sgrehan moea_pvo_remove_calls++; 202877957Sbenno} 202977957Sbenno 203090643Sbennostatic __inline int 2031152180Sgrehanmoea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 203277957Sbenno{ 203390643Sbenno int pteidx; 203477957Sbenno 203590643Sbenno /* 203690643Sbenno * We can find the actual pte entry without searching by grabbing 203790643Sbenno * the PTEG index from 3 unused bits in pte_lo[11:9] and by 203890643Sbenno * noticing the HID bit. 203990643Sbenno */ 204090643Sbenno pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2041183290Snwhitehorn if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2042152180Sgrehan pteidx ^= moea_pteg_mask * 8; 204390643Sbenno 204490643Sbenno return (pteidx); 204577957Sbenno} 204677957Sbenno 204790643Sbennostatic struct pvo_entry * 2048152180Sgrehanmoea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 204977957Sbenno{ 205090643Sbenno struct pvo_entry *pvo; 205190643Sbenno int ptegidx; 205290643Sbenno u_int sr; 205377957Sbenno 205490643Sbenno va &= ~ADDR_POFF; 205590643Sbenno sr = va_to_sr(pm->pm_sr, va); 205690643Sbenno ptegidx = va_to_pteg(sr, va); 205790643Sbenno 2058152180Sgrehan mtx_lock(&moea_table_mutex); 2059152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 206090643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 206190643Sbenno if (pteidx_p) 2062152180Sgrehan *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2063134535Salc break; 206490643Sbenno } 206590643Sbenno } 2066152180Sgrehan mtx_unlock(&moea_table_mutex); 206790643Sbenno 2068134535Salc return (pvo); 206977957Sbenno} 207077957Sbenno 207190643Sbennostatic struct pte * 2072152180Sgrehanmoea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 207377957Sbenno{ 207490643Sbenno struct pte *pt; 207577957Sbenno 207690643Sbenno /* 207790643Sbenno * If we haven't been supplied the ptegidx, calculate it. 207890643Sbenno */ 207990643Sbenno if (pteidx == -1) { 208090643Sbenno int ptegidx; 208190643Sbenno u_int sr; 208277957Sbenno 208390643Sbenno sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 208490643Sbenno ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2085152180Sgrehan pteidx = moea_pvo_pte_index(pvo, ptegidx); 208690643Sbenno } 208790643Sbenno 2088152180Sgrehan pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2089159928Salc mtx_lock(&moea_table_mutex); 209090643Sbenno 2091183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2092152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 209390643Sbenno "valid pte index", pvo); 209490643Sbenno } 209590643Sbenno 2096183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2097152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 209890643Sbenno "pvo but no valid pte", pvo); 209990643Sbenno } 210090643Sbenno 2101183290Snwhitehorn if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2102183290Snwhitehorn if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2103152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in " 2104152180Sgrehan "moea_pteg_table %p but invalid in pvo", pvo, pt); 210577957Sbenno } 210690643Sbenno 2107183290Snwhitehorn if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 210890643Sbenno != 0) { 2109152180Sgrehan panic("moea_pvo_to_pte: pvo %p pte does not match " 2110152180Sgrehan "pte %p in moea_pteg_table", pvo, pt); 211190643Sbenno } 211290643Sbenno 2113159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 211490643Sbenno return (pt); 211577957Sbenno } 211677957Sbenno 2117183290Snwhitehorn if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2118152180Sgrehan panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2119152180Sgrehan "moea_pteg_table but valid in pvo", pvo, pt); 212090643Sbenno } 212177957Sbenno 2122159928Salc mtx_unlock(&moea_table_mutex); 212390643Sbenno return (NULL); 212477957Sbenno} 212578880Sbenno 212678880Sbenno/* 212790643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c? 212878880Sbenno */ 212990643Sbennoint 2130152180Sgrehanmoea_pte_spill(vm_offset_t addr) 213178880Sbenno{ 213290643Sbenno struct pvo_entry *source_pvo, *victim_pvo; 213390643Sbenno struct pvo_entry *pvo; 213490643Sbenno int ptegidx, i, j; 213590643Sbenno u_int sr; 213690643Sbenno struct pteg *pteg; 213790643Sbenno struct pte *pt; 213878880Sbenno 2139152180Sgrehan moea_pte_spills++; 214090643Sbenno 214194836Sbenno sr = mfsrin(addr); 214290643Sbenno ptegidx = va_to_pteg(sr, addr); 214390643Sbenno 214478880Sbenno /* 214590643Sbenno * Have to substitute some entry. Use the primary hash for this. 214690643Sbenno * Use low bits of timebase as random generator. 214778880Sbenno */ 2148152180Sgrehan pteg = &moea_pteg_table[ptegidx]; 2149152180Sgrehan mtx_lock(&moea_table_mutex); 215090643Sbenno __asm __volatile("mftb %0" : "=r"(i)); 215190643Sbenno i &= 7; 215290643Sbenno pt = &pteg->pt[i]; 215378880Sbenno 215490643Sbenno source_pvo = NULL; 215590643Sbenno victim_pvo = NULL; 2156152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 215778880Sbenno /* 215890643Sbenno * We need to find a pvo entry for this address. 215978880Sbenno */ 216090643Sbenno if (source_pvo == NULL && 2161183290Snwhitehorn moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2162183290Snwhitehorn pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 216390643Sbenno /* 216490643Sbenno * Now found an entry to be spilled into the pteg. 216590643Sbenno * The PTE is now valid, so we know it's active. 216690643Sbenno */ 2167183290Snwhitehorn j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 216878880Sbenno 216990643Sbenno if (j >= 0) { 217090643Sbenno PVO_PTEGIDX_SET(pvo, j); 2171152180Sgrehan moea_pte_overflow--; 2172152180Sgrehan mtx_unlock(&moea_table_mutex); 217390643Sbenno return (1); 217490643Sbenno } 217590643Sbenno 217690643Sbenno source_pvo = pvo; 217790643Sbenno 217890643Sbenno if (victim_pvo != NULL) 217990643Sbenno break; 218090643Sbenno } 218190643Sbenno 218278880Sbenno /* 218390643Sbenno * We also need the pvo entry of the victim we are replacing 218490643Sbenno * so save the R & C bits of the PTE. 218578880Sbenno */ 218690643Sbenno if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2187183290Snwhitehorn moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 218890643Sbenno victim_pvo = pvo; 218990643Sbenno if (source_pvo != NULL) 219090643Sbenno break; 219190643Sbenno } 219290643Sbenno } 219378880Sbenno 2194134535Salc if (source_pvo == NULL) { 2195152180Sgrehan mtx_unlock(&moea_table_mutex); 219690643Sbenno return (0); 2197134535Salc } 219890643Sbenno 219990643Sbenno if (victim_pvo == NULL) { 220090643Sbenno if ((pt->pte_hi & PTE_HID) == 0) 2201152180Sgrehan panic("moea_pte_spill: victim p-pte (%p) has no pvo" 220290643Sbenno "entry", pt); 220390643Sbenno 220478880Sbenno /* 220590643Sbenno * If this is a secondary PTE, we need to search it's primary 220690643Sbenno * pvo bucket for the matching PVO. 220778880Sbenno */ 2208152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 220990643Sbenno pvo_olink) { 221090643Sbenno /* 221190643Sbenno * We also need the pvo entry of the victim we are 221290643Sbenno * replacing so save the R & C bits of the PTE. 221390643Sbenno */ 2214183290Snwhitehorn if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 221590643Sbenno victim_pvo = pvo; 221690643Sbenno break; 221790643Sbenno } 221890643Sbenno } 221978880Sbenno 222090643Sbenno if (victim_pvo == NULL) 2221152180Sgrehan panic("moea_pte_spill: victim s-pte (%p) has no pvo" 222290643Sbenno "entry", pt); 222390643Sbenno } 222478880Sbenno 222590643Sbenno /* 222690643Sbenno * We are invalidating the TLB entry for the EA we are replacing even 222790643Sbenno * though it's valid. If we don't, we lose any ref/chg bit changes 222890643Sbenno * contained in the TLB entry. 222990643Sbenno */ 2230183290Snwhitehorn source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 223178880Sbenno 2232183290Snwhitehorn moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2233183290Snwhitehorn moea_pte_set(pt, &source_pvo->pvo_pte.pte); 223490643Sbenno 223590643Sbenno PVO_PTEGIDX_CLR(victim_pvo); 223690643Sbenno PVO_PTEGIDX_SET(source_pvo, i); 2237152180Sgrehan moea_pte_replacements++; 223890643Sbenno 2239152180Sgrehan mtx_unlock(&moea_table_mutex); 224090643Sbenno return (1); 224190643Sbenno} 224290643Sbenno 224390643Sbennostatic int 2244152180Sgrehanmoea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 224590643Sbenno{ 224690643Sbenno struct pte *pt; 224790643Sbenno int i; 224890643Sbenno 2249159928Salc mtx_assert(&moea_table_mutex, MA_OWNED); 2250159928Salc 225190643Sbenno /* 225290643Sbenno * First try primary hash. 225390643Sbenno */ 2254152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 225590643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 225690643Sbenno pvo_pt->pte_hi &= ~PTE_HID; 2257152180Sgrehan moea_pte_set(pt, pvo_pt); 225890643Sbenno return (i); 225978880Sbenno } 226090643Sbenno } 226178880Sbenno 226290643Sbenno /* 226390643Sbenno * Now try secondary hash. 226490643Sbenno */ 2265152180Sgrehan ptegidx ^= moea_pteg_mask; 2266165362Sgrehan 2267152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 226890643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 226990643Sbenno pvo_pt->pte_hi |= PTE_HID; 2270152180Sgrehan moea_pte_set(pt, pvo_pt); 227190643Sbenno return (i); 227290643Sbenno } 227390643Sbenno } 227478880Sbenno 2275152180Sgrehan panic("moea_pte_insert: overflow"); 227690643Sbenno return (-1); 227778880Sbenno} 227884921Sbenno 227990643Sbennostatic boolean_t 2280152180Sgrehanmoea_query_bit(vm_page_t m, int ptebit) 228184921Sbenno{ 228290643Sbenno struct pvo_entry *pvo; 228390643Sbenno struct pte *pt; 228484921Sbenno 2285152180Sgrehan if (moea_attr_fetch(m) & ptebit) 228690643Sbenno return (TRUE); 228784921Sbenno 2288208574Salc vm_page_lock_queues(); 228990643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 229084921Sbenno 229190643Sbenno /* 229290643Sbenno * See if we saved the bit off. If so, cache it and return 229390643Sbenno * success. 229490643Sbenno */ 2295183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2296152180Sgrehan moea_attr_save(m, ptebit); 2297208574Salc vm_page_unlock_queues(); 229890643Sbenno return (TRUE); 229990643Sbenno } 230090643Sbenno } 230184921Sbenno 230290643Sbenno /* 230390643Sbenno * No luck, now go through the hard part of looking at the PTEs 230490643Sbenno * themselves. Sync so that any pending REF/CHG bits are flushed to 230590643Sbenno * the PTEs. 230690643Sbenno */ 2307183094Smarcel powerpc_sync(); 230890643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 230990643Sbenno 231090643Sbenno /* 231190643Sbenno * See if this pvo has a valid PTE. if so, fetch the 231290643Sbenno * REF/CHG bits from the valid PTE. If the appropriate 231390643Sbenno * ptebit is set, cache it and return success. 231490643Sbenno */ 2315152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 231690643Sbenno if (pt != NULL) { 2317183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 2318159928Salc mtx_unlock(&moea_table_mutex); 2319183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2320152180Sgrehan moea_attr_save(m, ptebit); 2321208574Salc vm_page_unlock_queues(); 232290643Sbenno return (TRUE); 232390643Sbenno } 232490643Sbenno } 232584921Sbenno } 232684921Sbenno 2327208574Salc vm_page_unlock_queues(); 2328123354Sgallatin return (FALSE); 232984921Sbenno} 233090643Sbenno 2331110172Sgrehanstatic u_int 2332208990Salcmoea_clear_bit(vm_page_t m, int ptebit) 233390643Sbenno{ 2334110172Sgrehan u_int count; 233590643Sbenno struct pvo_entry *pvo; 233690643Sbenno struct pte *pt; 233790643Sbenno 2338208990Salc vm_page_lock_queues(); 2339208990Salc 234090643Sbenno /* 234190643Sbenno * Clear the cached value. 234290643Sbenno */ 2343152180Sgrehan moea_attr_clear(m, ptebit); 234490643Sbenno 234590643Sbenno /* 234690643Sbenno * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 234790643Sbenno * we can reset the right ones). note that since the pvo entries and 234890643Sbenno * list heads are accessed via BAT0 and are never placed in the page 234990643Sbenno * table, we don't have to worry about further accesses setting the 235090643Sbenno * REF/CHG bits. 235190643Sbenno */ 2352183094Smarcel powerpc_sync(); 235390643Sbenno 235490643Sbenno /* 235590643Sbenno * For each pvo entry, clear the pvo's ptebit. If this pvo has a 235690643Sbenno * valid pte clear the ptebit from the valid pte. 235790643Sbenno */ 2358110172Sgrehan count = 0; 235990643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2360152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 236190643Sbenno if (pt != NULL) { 2362183290Snwhitehorn moea_pte_synch(pt, &pvo->pvo_pte.pte); 2363183290Snwhitehorn if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2364110172Sgrehan count++; 2365152180Sgrehan moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2366110172Sgrehan } 2367159928Salc mtx_unlock(&moea_table_mutex); 236890643Sbenno } 2369183290Snwhitehorn pvo->pvo_pte.pte.pte_lo &= ~ptebit; 237090643Sbenno } 237190643Sbenno 2372208990Salc vm_page_unlock_queues(); 2373110172Sgrehan return (count); 237490643Sbenno} 237599038Sbenno 237699038Sbenno/* 2377103604Sgrehan * Return true if the physical range is encompassed by the battable[idx] 2378103604Sgrehan */ 2379103604Sgrehanstatic int 2380152180Sgrehanmoea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2381103604Sgrehan{ 2382103604Sgrehan u_int prot; 2383103604Sgrehan u_int32_t start; 2384103604Sgrehan u_int32_t end; 2385103604Sgrehan u_int32_t bat_ble; 2386103604Sgrehan 2387103604Sgrehan /* 2388103604Sgrehan * Return immediately if not a valid mapping 2389103604Sgrehan */ 2390214601Snwhitehorn if (!(battable[idx].batu & BAT_Vs)) 2391103604Sgrehan return (EINVAL); 2392103604Sgrehan 2393103604Sgrehan /* 2394103604Sgrehan * The BAT entry must be cache-inhibited, guarded, and r/w 2395103604Sgrehan * so it can function as an i/o page 2396103604Sgrehan */ 2397103604Sgrehan prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2398103604Sgrehan if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2399103604Sgrehan return (EPERM); 2400103604Sgrehan 2401103604Sgrehan /* 2402103604Sgrehan * The address should be within the BAT range. Assume that the 2403103604Sgrehan * start address in the BAT has the correct alignment (thus 2404103604Sgrehan * not requiring masking) 2405103604Sgrehan */ 2406103604Sgrehan start = battable[idx].batl & BAT_PBS; 2407103604Sgrehan bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2408103604Sgrehan end = start | (bat_ble << 15) | 0x7fff; 2409103604Sgrehan 2410103604Sgrehan if ((pa < start) || ((pa + size) > end)) 2411103604Sgrehan return (ERANGE); 2412103604Sgrehan 2413103604Sgrehan return (0); 2414103604Sgrehan} 2415103604Sgrehan 2416152180Sgrehanboolean_t 2417152180Sgrehanmoea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2418133855Sssouhlal{ 2419133855Sssouhlal int i; 2420103604Sgrehan 2421133855Sssouhlal /* 2422133855Sssouhlal * This currently does not work for entries that 2423133855Sssouhlal * overlap 256M BAT segments. 2424133855Sssouhlal */ 2425133855Sssouhlal 2426133855Sssouhlal for(i = 0; i < 16; i++) 2427152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2428133855Sssouhlal return (0); 2429133855Sssouhlal 2430133855Sssouhlal return (EFAULT); 2431133855Sssouhlal} 2432133855Sssouhlal 2433103604Sgrehan/* 243499038Sbenno * Map a set of physical memory pages into the kernel virtual 243599038Sbenno * address space. Return a pointer to where it is mapped. This 243699038Sbenno * routine is intended to be used for mapping device memory, 243799038Sbenno * NOT real memory. 243899038Sbenno */ 243999038Sbennovoid * 2440152180Sgrehanmoea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 244199038Sbenno{ 2442213307Snwhitehorn 2443213307Snwhitehorn return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2444213307Snwhitehorn} 2445213307Snwhitehorn 2446213307Snwhitehornvoid * 2447213307Snwhitehornmoea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2448213307Snwhitehorn{ 2449103604Sgrehan vm_offset_t va, tmpva, ppa, offset; 2450103604Sgrehan int i; 2451103604Sgrehan 2452103604Sgrehan ppa = trunc_page(pa); 245399038Sbenno offset = pa & PAGE_MASK; 245499038Sbenno size = roundup(offset + size, PAGE_SIZE); 245599038Sbenno 2456103604Sgrehan /* 2457103604Sgrehan * If the physical address lies within a valid BAT table entry, 2458103604Sgrehan * return the 1:1 mapping. This currently doesn't work 2459103604Sgrehan * for regions that overlap 256M BAT segments. 2460103604Sgrehan */ 2461103604Sgrehan for (i = 0; i < 16; i++) { 2462152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2463103604Sgrehan return ((void *) pa); 2464103604Sgrehan } 2465103604Sgrehan 2466118365Salc va = kmem_alloc_nofault(kernel_map, size); 246799038Sbenno if (!va) 2468152180Sgrehan panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 246999038Sbenno 247099038Sbenno for (tmpva = va; size > 0;) { 2471213307Snwhitehorn moea_kenter_attr(mmu, tmpva, ppa, ma); 2472183094Smarcel tlbie(tmpva); 247399038Sbenno size -= PAGE_SIZE; 247499038Sbenno tmpva += PAGE_SIZE; 2475103604Sgrehan ppa += PAGE_SIZE; 247699038Sbenno } 247799038Sbenno 247899038Sbenno return ((void *)(va + offset)); 247999038Sbenno} 248099038Sbenno 248199038Sbennovoid 2482152180Sgrehanmoea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 248399038Sbenno{ 248499038Sbenno vm_offset_t base, offset; 248599038Sbenno 2486103604Sgrehan /* 2487103604Sgrehan * If this is outside kernel virtual space, then it's a 2488103604Sgrehan * battable entry and doesn't require unmapping 2489103604Sgrehan */ 2490204128Snwhitehorn if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2491103604Sgrehan base = trunc_page(va); 2492103604Sgrehan offset = va & PAGE_MASK; 2493103604Sgrehan size = roundup(offset + size, PAGE_SIZE); 2494103604Sgrehan kmem_free(kernel_map, base, size); 2495103604Sgrehan } 249699038Sbenno} 2497198341Smarcel 2498198341Smarcelstatic void 2499198341Smarcelmoea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2500198341Smarcel{ 2501198341Smarcel struct pvo_entry *pvo; 2502198341Smarcel vm_offset_t lim; 2503198341Smarcel vm_paddr_t pa; 2504198341Smarcel vm_size_t len; 2505198341Smarcel 2506198341Smarcel PMAP_LOCK(pm); 2507198341Smarcel while (sz > 0) { 2508198341Smarcel lim = round_page(va); 2509198341Smarcel len = MIN(lim - va, sz); 2510198341Smarcel pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2511198341Smarcel if (pvo != NULL) { 2512198341Smarcel pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2513198341Smarcel (va & ADDR_POFF); 2514198341Smarcel moea_syncicache(pa, len); 2515198341Smarcel } 2516198341Smarcel va += len; 2517198341Smarcel sz -= len; 2518198341Smarcel } 2519198341Smarcel PMAP_UNLOCK(pm); 2520198341Smarcel} 2521