mmu_oea.c revision 159303
1139825Simp/*- 290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc. 390643Sbenno * All rights reserved. 490643Sbenno * 590643Sbenno * This code is derived from software contributed to The NetBSD Foundation 690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790643Sbenno * 890643Sbenno * Redistribution and use in source and binary forms, with or without 990643Sbenno * modification, are permitted provided that the following conditions 1090643Sbenno * are met: 1190643Sbenno * 1. Redistributions of source code must retain the above copyright 1290643Sbenno * notice, this list of conditions and the following disclaimer. 1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright 1490643Sbenno * notice, this list of conditions and the following disclaimer in the 1590643Sbenno * documentation and/or other materials provided with the distribution. 1690643Sbenno * 3. All advertising materials mentioning features or use of this software 1790643Sbenno * must display the following acknowledgement: 1890643Sbenno * This product includes software developed by the NetBSD 1990643Sbenno * Foundation, Inc. and its contributors. 2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its 2190643Sbenno * contributors may be used to endorse or promote products derived 2290643Sbenno * from this software without specific prior written permission. 2390643Sbenno * 2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790643Sbenno * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490643Sbenno * POSSIBILITY OF SUCH DAMAGE. 3590643Sbenno */ 36139825Simp/*- 3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH. 3977957Sbenno * All rights reserved. 4077957Sbenno * 4177957Sbenno * Redistribution and use in source and binary forms, with or without 4277957Sbenno * modification, are permitted provided that the following conditions 4377957Sbenno * are met: 4477957Sbenno * 1. Redistributions of source code must retain the above copyright 4577957Sbenno * notice, this list of conditions and the following disclaimer. 4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 4777957Sbenno * notice, this list of conditions and the following disclaimer in the 4877957Sbenno * documentation and/or other materials provided with the distribution. 4977957Sbenno * 3. All advertising materials mentioning features or use of this software 5077957Sbenno * must display the following acknowledgement: 5177957Sbenno * This product includes software developed by TooLs GmbH. 5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products 5377957Sbenno * derived from this software without specific prior written permission. 5477957Sbenno * 5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 6577957Sbenno * 6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6777957Sbenno */ 68139825Simp/*- 6977957Sbenno * Copyright (C) 2001 Benno Rice. 7077957Sbenno * All rights reserved. 7177957Sbenno * 7277957Sbenno * Redistribution and use in source and binary forms, with or without 7377957Sbenno * modification, are permitted provided that the following conditions 7477957Sbenno * are met: 7577957Sbenno * 1. Redistributions of source code must retain the above copyright 7677957Sbenno * notice, this list of conditions and the following disclaimer. 7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 7877957Sbenno * notice, this list of conditions and the following disclaimer in the 7977957Sbenno * documentation and/or other materials provided with the distribution. 8077957Sbenno * 8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9177957Sbenno */ 9277957Sbenno 93113038Sobrien#include <sys/cdefs.h> 94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 159303 2006-06-05 20:35:27Z alc $"); 9577957Sbenno 9690643Sbenno/* 9790643Sbenno * Manages physical address maps. 9890643Sbenno * 9990643Sbenno * In addition to hardware address maps, this module is called upon to 10090643Sbenno * provide software-use-only maps which may or may not be stored in the 10190643Sbenno * same form as hardware maps. These pseudo-maps are used to store 10290643Sbenno * intermediate results from copy operations to and from address spaces. 10390643Sbenno * 10490643Sbenno * Since the information managed by this module is also stored by the 10590643Sbenno * logical address mapping module, this module may throw away valid virtual 10690643Sbenno * to physical mappings at almost any time. However, invalidations of 10790643Sbenno * mappings must be done as requested. 10890643Sbenno * 10990643Sbenno * In order to cope with hardware architectures which make virtual to 11090643Sbenno * physical map invalidates expensive, this module may delay invalidate 11190643Sbenno * reduced protection operations until such time as they are actually 11290643Sbenno * necessary. This module is given full information as to which processors 11390643Sbenno * are currently using which maps, and to when physical maps must be made 11490643Sbenno * correct. 11590643Sbenno */ 11690643Sbenno 117118239Speter#include "opt_kstack_pages.h" 118118239Speter 11977957Sbenno#include <sys/param.h> 12080431Speter#include <sys/kernel.h> 12190643Sbenno#include <sys/ktr.h> 12290643Sbenno#include <sys/lock.h> 12390643Sbenno#include <sys/msgbuf.h> 12490643Sbenno#include <sys/mutex.h> 12577957Sbenno#include <sys/proc.h> 12690643Sbenno#include <sys/sysctl.h> 12790643Sbenno#include <sys/systm.h> 12877957Sbenno#include <sys/vmmeter.h> 12977957Sbenno 13090643Sbenno#include <dev/ofw/openfirm.h> 13190643Sbenno 132152180Sgrehan#include <vm/vm.h> 13377957Sbenno#include <vm/vm_param.h> 13477957Sbenno#include <vm/vm_kern.h> 13577957Sbenno#include <vm/vm_page.h> 13677957Sbenno#include <vm/vm_map.h> 13777957Sbenno#include <vm/vm_object.h> 13877957Sbenno#include <vm/vm_extern.h> 13977957Sbenno#include <vm/vm_pageout.h> 14077957Sbenno#include <vm/vm_pager.h> 14192847Sjeff#include <vm/uma.h> 14277957Sbenno 143125687Sgrehan#include <machine/cpu.h> 14497346Sbenno#include <machine/powerpc.h> 14583730Smp#include <machine/bat.h> 14690643Sbenno#include <machine/frame.h> 14790643Sbenno#include <machine/md_var.h> 14890643Sbenno#include <machine/psl.h> 14977957Sbenno#include <machine/pte.h> 15090643Sbenno#include <machine/sr.h> 151152180Sgrehan#include <machine/mmuvar.h> 15277957Sbenno 153152180Sgrehan#include "mmu_if.h" 15477957Sbenno 155152180Sgrehan#define MOEA_DEBUG 156152180Sgrehan 15790643Sbenno#define TODO panic("%s: not implemented", __func__); 15877957Sbenno 15990643Sbenno#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 16090643Sbenno#define TLBSYNC() __asm __volatile("tlbsync"); 16190643Sbenno#define SYNC() __asm __volatile("sync"); 16290643Sbenno#define EIEIO() __asm __volatile("eieio"); 16390643Sbenno 16490643Sbenno#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16590643Sbenno#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16690643Sbenno#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16790643Sbenno 168142416Sgrehan#define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ 169142416Sgrehan#define PVO_PTEGIDX_VALID 0x008 /* slot is valid */ 170142416Sgrehan#define PVO_WIRED 0x010 /* PVO entry is wired */ 171142416Sgrehan#define PVO_MANAGED 0x020 /* PVO entry is managed */ 172142416Sgrehan#define PVO_EXECUTABLE 0x040 /* PVO entry is executable */ 173142416Sgrehan#define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during 17492521Sbenno bootstrap */ 175142416Sgrehan#define PVO_FAKE 0x100 /* fictitious phys page */ 17690643Sbenno#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 17790643Sbenno#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 178142416Sgrehan#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 17990643Sbenno#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 18090643Sbenno#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 18190643Sbenno#define PVO_PTEGIDX_CLR(pvo) \ 18290643Sbenno ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 18390643Sbenno#define PVO_PTEGIDX_SET(pvo, i) \ 18490643Sbenno ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 18590643Sbenno 186152180Sgrehan#define MOEA_PVO_CHECK(pvo) 18790643Sbenno 18890643Sbennostruct ofw_map { 18990643Sbenno vm_offset_t om_va; 19090643Sbenno vm_size_t om_len; 19190643Sbenno vm_offset_t om_pa; 19290643Sbenno u_int om_mode; 19390643Sbenno}; 19477957Sbenno 19590643Sbenno/* 19690643Sbenno * Map of physical memory regions. 19790643Sbenno */ 19897346Sbennostatic struct mem_region *regions; 19997346Sbennostatic struct mem_region *pregions; 200152180Sgrehanu_int phys_avail_count; 20197346Sbennoint regions_sz, pregions_sz; 202100319Sbennostatic struct ofw_map *translations; 20377957Sbenno 20490643Sbennoextern struct pmap ofw_pmap; 20577957Sbenno 206152180Sgrehan 207152180Sgrehan 20890643Sbenno/* 209134535Salc * Lock for the pteg and pvo tables. 210134535Salc */ 211152180Sgrehanstruct mtx moea_table_mutex; 212134535Salc 213134535Salc/* 21490643Sbenno * PTEG data. 21590643Sbenno */ 216152180Sgrehanstatic struct pteg *moea_pteg_table; 217152180Sgrehanu_int moea_pteg_count; 218152180Sgrehanu_int moea_pteg_mask; 21977957Sbenno 22090643Sbenno/* 22190643Sbenno * PVO data. 22290643Sbenno */ 223152180Sgrehanstruct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 224152180Sgrehanstruct pvo_head moea_pvo_kunmanaged = 225152180Sgrehan LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 226152180Sgrehanstruct pvo_head moea_pvo_unmanaged = 227152180Sgrehan LIST_HEAD_INITIALIZER(moea_pvo_unmanaged); /* list of unmanaged pages */ 22877957Sbenno 229152180Sgrehanuma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 230152180Sgrehanuma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 23177957Sbenno 23299037Sbenno#define BPVO_POOL_SIZE 32768 233152180Sgrehanstatic struct pvo_entry *moea_bpvo_pool; 234152180Sgrehanstatic int moea_bpvo_pool_index = 0; 23577957Sbenno 23690643Sbenno#define VSID_NBPW (sizeof(u_int32_t) * 8) 237152180Sgrehanstatic u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 23877957Sbenno 239152180Sgrehanstatic boolean_t moea_initialized = FALSE; 24077957Sbenno 24190643Sbenno/* 24290643Sbenno * Statistics. 24390643Sbenno */ 244152180Sgrehanu_int moea_pte_valid = 0; 245152180Sgrehanu_int moea_pte_overflow = 0; 246152180Sgrehanu_int moea_pte_replacements = 0; 247152180Sgrehanu_int moea_pvo_entries = 0; 248152180Sgrehanu_int moea_pvo_enter_calls = 0; 249152180Sgrehanu_int moea_pvo_remove_calls = 0; 250152180Sgrehanu_int moea_pte_spills = 0; 251152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 25290643Sbenno 0, ""); 253152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 254152180Sgrehan &moea_pte_overflow, 0, ""); 255152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 256152180Sgrehan &moea_pte_replacements, 0, ""); 257152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 25890643Sbenno 0, ""); 259152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 260152180Sgrehan &moea_pvo_enter_calls, 0, ""); 261152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 262152180Sgrehan &moea_pvo_remove_calls, 0, ""); 263152180SgrehanSYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 264152180Sgrehan &moea_pte_spills, 0, ""); 26577957Sbenno 266152180Sgrehanstruct pvo_entry *moea_pvo_zeropage; 26777957Sbenno 268152180Sgrehanvm_offset_t moea_rkva_start = VM_MIN_KERNEL_ADDRESS; 269152180Sgrehanu_int moea_rkva_count = 4; 27077957Sbenno 27190643Sbenno/* 272152180Sgrehan * Allocate physical memory for use in moea_bootstrap. 27390643Sbenno */ 274152180Sgrehanstatic vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 27577957Sbenno 27690643Sbenno/* 27790643Sbenno * PTE calls. 27890643Sbenno */ 279152180Sgrehanstatic int moea_pte_insert(u_int, struct pte *); 28077957Sbenno 28177957Sbenno/* 28290643Sbenno * PVO calls. 28377957Sbenno */ 284152180Sgrehanstatic int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 28590643Sbenno vm_offset_t, vm_offset_t, u_int, int); 286152180Sgrehanstatic void moea_pvo_remove(struct pvo_entry *, int); 287152180Sgrehanstatic struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 288152180Sgrehanstatic struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 28990643Sbenno 29090643Sbenno/* 29190643Sbenno * Utility routines. 29290643Sbenno */ 293159303Salcstatic void moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 294159303Salc vm_prot_t, boolean_t); 295152180Sgrehanstatic struct pvo_entry *moea_rkva_alloc(mmu_t); 296152180Sgrehanstatic void moea_pa_map(struct pvo_entry *, vm_offset_t, 29790643Sbenno struct pte *, int *); 298152180Sgrehanstatic void moea_pa_unmap(struct pvo_entry *, struct pte *, int *); 299152180Sgrehanstatic void moea_syncicache(vm_offset_t, vm_size_t); 300152180Sgrehanstatic boolean_t moea_query_bit(vm_page_t, int); 301152180Sgrehanstatic u_int moea_clear_bit(vm_page_t, int, int *); 302152180Sgrehanstatic void moea_kremove(mmu_t, vm_offset_t); 30390643Sbennostatic void tlbia(void); 304152180Sgrehanint moea_pte_spill(vm_offset_t); 30590643Sbenno 306152180Sgrehan/* 307152180Sgrehan * Kernel MMU interface 308152180Sgrehan */ 309152180Sgrehanvoid moea_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 310152180Sgrehanvoid moea_clear_modify(mmu_t, vm_page_t); 311152180Sgrehanvoid moea_clear_reference(mmu_t, vm_page_t); 312152180Sgrehanvoid moea_copy_page(mmu_t, vm_page_t, vm_page_t); 313152180Sgrehanvoid moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 314159303Salcvoid moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 315159303Salc vm_prot_t); 316152180Sgrehanvm_page_t moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, 317152180Sgrehan vm_page_t); 318152180Sgrehanvm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 319152180Sgrehanvm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 320152180Sgrehanvoid moea_init(mmu_t); 321152180Sgrehanboolean_t moea_is_modified(mmu_t, vm_page_t); 322152180Sgrehanboolean_t moea_ts_referenced(mmu_t, vm_page_t); 323152180Sgrehanvm_offset_t moea_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 324152180Sgrehanboolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 325152180Sgrehanvoid moea_page_protect(mmu_t, vm_page_t, vm_prot_t); 326152180Sgrehanvoid moea_pinit(mmu_t, pmap_t); 327152180Sgrehanvoid moea_pinit0(mmu_t, pmap_t); 328152180Sgrehanvoid moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 329152180Sgrehanvoid moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 330152180Sgrehanvoid moea_qremove(mmu_t, vm_offset_t, int); 331152180Sgrehanvoid moea_release(mmu_t, pmap_t); 332152180Sgrehanvoid moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 333152180Sgrehanvoid moea_remove_all(mmu_t, vm_page_t); 334152180Sgrehanvoid moea_zero_page(mmu_t, vm_page_t); 335152180Sgrehanvoid moea_zero_page_area(mmu_t, vm_page_t, int, int); 336152180Sgrehanvoid moea_zero_page_idle(mmu_t, vm_page_t); 337152180Sgrehanvoid moea_activate(mmu_t, struct thread *); 338152180Sgrehanvoid moea_deactivate(mmu_t, struct thread *); 339152180Sgrehanvoid moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 340152180Sgrehanvoid *moea_mapdev(mmu_t, vm_offset_t, vm_size_t); 341152180Sgrehanvoid moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 342152180Sgrehanvm_offset_t moea_kextract(mmu_t, vm_offset_t); 343152180Sgrehanvoid moea_kenter(mmu_t, vm_offset_t, vm_offset_t); 344152180Sgrehanboolean_t moea_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 345152180Sgrehan 346152180Sgrehanstatic mmu_method_t moea_methods[] = { 347152180Sgrehan MMUMETHOD(mmu_change_wiring, moea_change_wiring), 348152180Sgrehan MMUMETHOD(mmu_clear_modify, moea_clear_modify), 349152180Sgrehan MMUMETHOD(mmu_clear_reference, moea_clear_reference), 350152180Sgrehan MMUMETHOD(mmu_copy_page, moea_copy_page), 351152180Sgrehan MMUMETHOD(mmu_enter, moea_enter), 352159303Salc MMUMETHOD(mmu_enter_object, moea_enter_object), 353152180Sgrehan MMUMETHOD(mmu_enter_quick, moea_enter_quick), 354152180Sgrehan MMUMETHOD(mmu_extract, moea_extract), 355152180Sgrehan MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 356152180Sgrehan MMUMETHOD(mmu_init, moea_init), 357152180Sgrehan MMUMETHOD(mmu_is_modified, moea_is_modified), 358152180Sgrehan MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 359152180Sgrehan MMUMETHOD(mmu_map, moea_map), 360152180Sgrehan MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 361152180Sgrehan MMUMETHOD(mmu_page_protect, moea_page_protect), 362152180Sgrehan MMUMETHOD(mmu_pinit, moea_pinit), 363152180Sgrehan MMUMETHOD(mmu_pinit0, moea_pinit0), 364152180Sgrehan MMUMETHOD(mmu_protect, moea_protect), 365152180Sgrehan MMUMETHOD(mmu_qenter, moea_qenter), 366152180Sgrehan MMUMETHOD(mmu_qremove, moea_qremove), 367152180Sgrehan MMUMETHOD(mmu_release, moea_release), 368152180Sgrehan MMUMETHOD(mmu_remove, moea_remove), 369152180Sgrehan MMUMETHOD(mmu_remove_all, moea_remove_all), 370152180Sgrehan MMUMETHOD(mmu_zero_page, moea_zero_page), 371152180Sgrehan MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 372152180Sgrehan MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 373152180Sgrehan MMUMETHOD(mmu_activate, moea_activate), 374152180Sgrehan MMUMETHOD(mmu_deactivate, moea_deactivate), 375152180Sgrehan 376152180Sgrehan /* Internal interfaces */ 377152180Sgrehan MMUMETHOD(mmu_bootstrap, moea_bootstrap), 378152180Sgrehan MMUMETHOD(mmu_mapdev, moea_mapdev), 379152180Sgrehan MMUMETHOD(mmu_unmapdev, moea_unmapdev), 380152180Sgrehan MMUMETHOD(mmu_kextract, moea_kextract), 381152180Sgrehan MMUMETHOD(mmu_kenter, moea_kenter), 382152180Sgrehan MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 383152180Sgrehan 384152180Sgrehan { 0, 0 } 385152180Sgrehan}; 386152180Sgrehan 387152180Sgrehanstatic mmu_def_t oea_mmu = { 388152180Sgrehan MMU_TYPE_OEA, 389152180Sgrehan moea_methods, 390152180Sgrehan 0 391152180Sgrehan}; 392152180SgrehanMMU_DEF(oea_mmu); 393152180Sgrehan 394152180Sgrehan 39590643Sbennostatic __inline int 39690643Sbennova_to_sr(u_int *sr, vm_offset_t va) 39777957Sbenno{ 39890643Sbenno return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 39990643Sbenno} 40077957Sbenno 40190643Sbennostatic __inline u_int 40290643Sbennova_to_pteg(u_int sr, vm_offset_t addr) 40390643Sbenno{ 40490643Sbenno u_int hash; 40590643Sbenno 40690643Sbenno hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 40790643Sbenno ADDR_PIDX_SHFT); 408152180Sgrehan return (hash & moea_pteg_mask); 40977957Sbenno} 41077957Sbenno 41190643Sbennostatic __inline struct pvo_head * 41296250Sbennopa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 41377957Sbenno{ 41490643Sbenno struct vm_page *pg; 41577957Sbenno 41690643Sbenno pg = PHYS_TO_VM_PAGE(pa); 41790643Sbenno 41896250Sbenno if (pg_p != NULL) 41996250Sbenno *pg_p = pg; 42096250Sbenno 42190643Sbenno if (pg == NULL) 422152180Sgrehan return (&moea_pvo_unmanaged); 42390643Sbenno 42490643Sbenno return (&pg->md.mdpg_pvoh); 42577957Sbenno} 42677957Sbenno 42790643Sbennostatic __inline struct pvo_head * 42890643Sbennovm_page_to_pvoh(vm_page_t m) 42990643Sbenno{ 43090643Sbenno 43190643Sbenno return (&m->md.mdpg_pvoh); 43290643Sbenno} 43390643Sbenno 43477957Sbennostatic __inline void 435152180Sgrehanmoea_attr_clear(vm_page_t m, int ptebit) 43677957Sbenno{ 43790643Sbenno 43890643Sbenno m->md.mdpg_attrs &= ~ptebit; 43977957Sbenno} 44077957Sbenno 44177957Sbennostatic __inline int 442152180Sgrehanmoea_attr_fetch(vm_page_t m) 44377957Sbenno{ 44477957Sbenno 44590643Sbenno return (m->md.mdpg_attrs); 44677957Sbenno} 44777957Sbenno 44890643Sbennostatic __inline void 449152180Sgrehanmoea_attr_save(vm_page_t m, int ptebit) 45090643Sbenno{ 45190643Sbenno 45290643Sbenno m->md.mdpg_attrs |= ptebit; 45390643Sbenno} 45490643Sbenno 45577957Sbennostatic __inline int 456152180Sgrehanmoea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 45777957Sbenno{ 45890643Sbenno if (pt->pte_hi == pvo_pt->pte_hi) 45990643Sbenno return (1); 46090643Sbenno 46190643Sbenno return (0); 46277957Sbenno} 46377957Sbenno 46477957Sbennostatic __inline int 465152180Sgrehanmoea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 46677957Sbenno{ 46790643Sbenno return (pt->pte_hi & ~PTE_VALID) == 46890643Sbenno (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 46990643Sbenno ((va >> ADDR_API_SHFT) & PTE_API) | which); 47090643Sbenno} 47177957Sbenno 47290643Sbennostatic __inline void 473152180Sgrehanmoea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 47490643Sbenno{ 47590643Sbenno /* 47690643Sbenno * Construct a PTE. Default to IMB initially. Valid bit only gets 47790643Sbenno * set when the real pte is set in memory. 47890643Sbenno * 47990643Sbenno * Note: Don't set the valid bit for correct operation of tlb update. 48090643Sbenno */ 48190643Sbenno pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 48290643Sbenno (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 48390643Sbenno pt->pte_lo = pte_lo; 48477957Sbenno} 48577957Sbenno 48690643Sbennostatic __inline void 487152180Sgrehanmoea_pte_synch(struct pte *pt, struct pte *pvo_pt) 48877957Sbenno{ 48977957Sbenno 49090643Sbenno pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 49177957Sbenno} 49277957Sbenno 49390643Sbennostatic __inline void 494152180Sgrehanmoea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 49577957Sbenno{ 49677957Sbenno 49790643Sbenno /* 49890643Sbenno * As shown in Section 7.6.3.2.3 49990643Sbenno */ 50090643Sbenno pt->pte_lo &= ~ptebit; 50190643Sbenno TLBIE(va); 50290643Sbenno EIEIO(); 50390643Sbenno TLBSYNC(); 50490643Sbenno SYNC(); 50577957Sbenno} 50677957Sbenno 50790643Sbennostatic __inline void 508152180Sgrehanmoea_pte_set(struct pte *pt, struct pte *pvo_pt) 50977957Sbenno{ 51077957Sbenno 51190643Sbenno pvo_pt->pte_hi |= PTE_VALID; 51290643Sbenno 51377957Sbenno /* 51490643Sbenno * Update the PTE as defined in section 7.6.3.1. 51590643Sbenno * Note that the REF/CHG bits are from pvo_pt and thus should havce 51690643Sbenno * been saved so this routine can restore them (if desired). 51777957Sbenno */ 51890643Sbenno pt->pte_lo = pvo_pt->pte_lo; 51990643Sbenno EIEIO(); 52090643Sbenno pt->pte_hi = pvo_pt->pte_hi; 52190643Sbenno SYNC(); 522152180Sgrehan moea_pte_valid++; 52390643Sbenno} 52477957Sbenno 52590643Sbennostatic __inline void 526152180Sgrehanmoea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 52790643Sbenno{ 52890643Sbenno 52990643Sbenno pvo_pt->pte_hi &= ~PTE_VALID; 53090643Sbenno 53177957Sbenno /* 53290643Sbenno * Force the reg & chg bits back into the PTEs. 53377957Sbenno */ 53490643Sbenno SYNC(); 53577957Sbenno 53690643Sbenno /* 53790643Sbenno * Invalidate the pte. 53890643Sbenno */ 53990643Sbenno pt->pte_hi &= ~PTE_VALID; 54077957Sbenno 54190643Sbenno SYNC(); 54290643Sbenno TLBIE(va); 54390643Sbenno EIEIO(); 54490643Sbenno TLBSYNC(); 54590643Sbenno SYNC(); 54677957Sbenno 54790643Sbenno /* 54890643Sbenno * Save the reg & chg bits. 54990643Sbenno */ 550152180Sgrehan moea_pte_synch(pt, pvo_pt); 551152180Sgrehan moea_pte_valid--; 55277957Sbenno} 55377957Sbenno 55490643Sbennostatic __inline void 555152180Sgrehanmoea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 55690643Sbenno{ 55790643Sbenno 55890643Sbenno /* 55990643Sbenno * Invalidate the PTE 56090643Sbenno */ 561152180Sgrehan moea_pte_unset(pt, pvo_pt, va); 562152180Sgrehan moea_pte_set(pt, pvo_pt); 56390643Sbenno} 56490643Sbenno 56577957Sbenno/* 56690643Sbenno * Quick sort callout for comparing memory regions. 56777957Sbenno */ 56890643Sbennostatic int mr_cmp(const void *a, const void *b); 56990643Sbennostatic int om_cmp(const void *a, const void *b); 57090643Sbenno 57190643Sbennostatic int 57290643Sbennomr_cmp(const void *a, const void *b) 57377957Sbenno{ 57490643Sbenno const struct mem_region *regiona; 57590643Sbenno const struct mem_region *regionb; 57677957Sbenno 57790643Sbenno regiona = a; 57890643Sbenno regionb = b; 57990643Sbenno if (regiona->mr_start < regionb->mr_start) 58090643Sbenno return (-1); 58190643Sbenno else if (regiona->mr_start > regionb->mr_start) 58290643Sbenno return (1); 58390643Sbenno else 58490643Sbenno return (0); 58590643Sbenno} 58677957Sbenno 58790643Sbennostatic int 58890643Sbennoom_cmp(const void *a, const void *b) 58990643Sbenno{ 59090643Sbenno const struct ofw_map *mapa; 59190643Sbenno const struct ofw_map *mapb; 59290643Sbenno 59390643Sbenno mapa = a; 59490643Sbenno mapb = b; 59590643Sbenno if (mapa->om_pa < mapb->om_pa) 59690643Sbenno return (-1); 59790643Sbenno else if (mapa->om_pa > mapb->om_pa) 59890643Sbenno return (1); 59990643Sbenno else 60090643Sbenno return (0); 60177957Sbenno} 60277957Sbenno 60377957Sbennovoid 604152180Sgrehanmoea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 60577957Sbenno{ 60697346Sbenno ihandle_t mmui; 60790643Sbenno phandle_t chosen, mmu; 60890643Sbenno int sz; 60990643Sbenno int i, j; 610103604Sgrehan int ofw_mappings; 611143200Sgrehan vm_size_t size, physsz, hwphyssz; 61290643Sbenno vm_offset_t pa, va, off; 61390643Sbenno u_int batl, batu; 61477957Sbenno 61599037Sbenno /* 616103604Sgrehan * Set up BAT0 to map the lowest 256 MB area 61799037Sbenno */ 61899037Sbenno battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 61999037Sbenno battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 62099037Sbenno 62199037Sbenno /* 62299037Sbenno * Map PCI memory space. 62399037Sbenno */ 62499037Sbenno battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 62599037Sbenno battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 62699037Sbenno 62799037Sbenno battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 62899037Sbenno battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 62999037Sbenno 63099037Sbenno battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 63199037Sbenno battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 63299037Sbenno 63399037Sbenno battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 63499037Sbenno battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 63599037Sbenno 63699037Sbenno /* 63799037Sbenno * Map obio devices. 63899037Sbenno */ 63999037Sbenno battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 64099037Sbenno battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 64199037Sbenno 64277957Sbenno /* 64390643Sbenno * Use an IBAT and a DBAT to map the bottom segment of memory 64490643Sbenno * where we are. 64577957Sbenno */ 64690643Sbenno batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 64790643Sbenno batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 648152180Sgrehan __asm (".balign 32; \n" 649149958Sgrehan "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 650131808Sgrehan "mtdbatu 0,%0; mtdbatl 0,%1; isync" 65190643Sbenno :: "r"(batu), "r"(batl)); 65299037Sbenno 65390643Sbenno#if 0 65499037Sbenno /* map frame buffer */ 65599037Sbenno batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 65699037Sbenno batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 657131808Sgrehan __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 65899037Sbenno :: "r"(batu), "r"(batl)); 65999037Sbenno#endif 66099037Sbenno 66199037Sbenno#if 1 66299037Sbenno /* map pci space */ 66390643Sbenno batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 66499037Sbenno batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 665131808Sgrehan __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 66690643Sbenno :: "r"(batu), "r"(batl)); 66790643Sbenno#endif 66877957Sbenno 66977957Sbenno /* 67090643Sbenno * Set the start and end of kva. 67177957Sbenno */ 67290643Sbenno virtual_avail = VM_MIN_KERNEL_ADDRESS; 67390643Sbenno virtual_end = VM_MAX_KERNEL_ADDRESS; 67490643Sbenno 67597346Sbenno mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 676152180Sgrehan CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 67797346Sbenno 67897346Sbenno qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 67997346Sbenno for (i = 0; i < pregions_sz; i++) { 680103604Sgrehan vm_offset_t pa; 681103604Sgrehan vm_offset_t end; 682103604Sgrehan 68397346Sbenno CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 68497346Sbenno pregions[i].mr_start, 68597346Sbenno pregions[i].mr_start + pregions[i].mr_size, 68697346Sbenno pregions[i].mr_size); 687103604Sgrehan /* 688103604Sgrehan * Install entries into the BAT table to allow all 689103604Sgrehan * of physmem to be convered by on-demand BAT entries. 690103604Sgrehan * The loop will sometimes set the same battable element 691103604Sgrehan * twice, but that's fine since they won't be used for 692103604Sgrehan * a while yet. 693103604Sgrehan */ 694103604Sgrehan pa = pregions[i].mr_start & 0xf0000000; 695103604Sgrehan end = pregions[i].mr_start + pregions[i].mr_size; 696103604Sgrehan do { 697103604Sgrehan u_int n = pa >> ADDR_SR_SHFT; 698152180Sgrehan 699103604Sgrehan battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 700103604Sgrehan battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 701103604Sgrehan pa += SEGMENT_LENGTH; 702103604Sgrehan } while (pa < end); 70397346Sbenno } 70497346Sbenno 70597346Sbenno if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 706152180Sgrehan panic("moea_bootstrap: phys_avail too small"); 70797346Sbenno qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 70890643Sbenno phys_avail_count = 0; 70991793Sbenno physsz = 0; 710143234Sgrehan hwphyssz = 0; 711143234Sgrehan TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 71297346Sbenno for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 71390643Sbenno CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 71490643Sbenno regions[i].mr_start + regions[i].mr_size, 71590643Sbenno regions[i].mr_size); 716143200Sgrehan if (hwphyssz != 0 && 717143200Sgrehan (physsz + regions[i].mr_size) >= hwphyssz) { 718143200Sgrehan if (physsz < hwphyssz) { 719143200Sgrehan phys_avail[j] = regions[i].mr_start; 720143200Sgrehan phys_avail[j + 1] = regions[i].mr_start + 721143200Sgrehan hwphyssz - physsz; 722143200Sgrehan physsz = hwphyssz; 723143200Sgrehan phys_avail_count++; 724143200Sgrehan } 725143200Sgrehan break; 726143200Sgrehan } 72790643Sbenno phys_avail[j] = regions[i].mr_start; 72890643Sbenno phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 72990643Sbenno phys_avail_count++; 73091793Sbenno physsz += regions[i].mr_size; 73177957Sbenno } 73291793Sbenno physmem = btoc(physsz); 73377957Sbenno 73477957Sbenno /* 73590643Sbenno * Allocate PTEG table. 73677957Sbenno */ 73790643Sbenno#ifdef PTEGCOUNT 738152180Sgrehan moea_pteg_count = PTEGCOUNT; 73990643Sbenno#else 740152180Sgrehan moea_pteg_count = 0x1000; 74177957Sbenno 742152180Sgrehan while (moea_pteg_count < physmem) 743152180Sgrehan moea_pteg_count <<= 1; 74477957Sbenno 745152180Sgrehan moea_pteg_count >>= 1; 74690643Sbenno#endif /* PTEGCOUNT */ 74777957Sbenno 748152180Sgrehan size = moea_pteg_count * sizeof(struct pteg); 749152180Sgrehan CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 75090643Sbenno size); 751152180Sgrehan moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 752152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 753152180Sgrehan bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 754152180Sgrehan moea_pteg_mask = moea_pteg_count - 1; 75577957Sbenno 75690643Sbenno /* 75794839Sbenno * Allocate pv/overflow lists. 75890643Sbenno */ 759152180Sgrehan size = sizeof(struct pvo_head) * moea_pteg_count; 760152180Sgrehan moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 76190643Sbenno PAGE_SIZE); 762152180Sgrehan CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 763152180Sgrehan for (i = 0; i < moea_pteg_count; i++) 764152180Sgrehan LIST_INIT(&moea_pvo_table[i]); 76577957Sbenno 76690643Sbenno /* 767134535Salc * Initialize the lock that synchronizes access to the pteg and pvo 768134535Salc * tables. 769134535Salc */ 770152180Sgrehan mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF); 771134535Salc 772134535Salc /* 77390643Sbenno * Allocate the message buffer. 77490643Sbenno */ 775152180Sgrehan msgbuf_phys = moea_bootstrap_alloc(MSGBUF_SIZE, 0); 77677957Sbenno 77790643Sbenno /* 77890643Sbenno * Initialise the unmanaged pvo pool. 77990643Sbenno */ 780152180Sgrehan moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 78199037Sbenno BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 782152180Sgrehan moea_bpvo_pool_index = 0; 78377957Sbenno 78477957Sbenno /* 78590643Sbenno * Make sure kernel vsid is allocated as well as VSID 0. 78677957Sbenno */ 787152180Sgrehan moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 78890643Sbenno |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 789152180Sgrehan moea_vsid_bitmap[0] |= 1; 79077957Sbenno 79190643Sbenno /* 792133862Smarius * Set up the Open Firmware pmap and add it's mappings. 79390643Sbenno */ 794152180Sgrehan moea_pinit(mmup, &ofw_pmap); 79590643Sbenno ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 796126478Sgrehan ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 79790643Sbenno if ((chosen = OF_finddevice("/chosen")) == -1) 798152180Sgrehan panic("moea_bootstrap: can't find /chosen"); 79990643Sbenno OF_getprop(chosen, "mmu", &mmui, 4); 80090643Sbenno if ((mmu = OF_instance_to_package(mmui)) == -1) 801152180Sgrehan panic("moea_bootstrap: can't get mmu package"); 80290643Sbenno if ((sz = OF_getproplen(mmu, "translations")) == -1) 803152180Sgrehan panic("moea_bootstrap: can't get ofw translation count"); 804100319Sbenno translations = NULL; 805131401Sgrehan for (i = 0; phys_avail[i] != 0; i += 2) { 806131401Sgrehan if (phys_avail[i + 1] >= sz) { 807100319Sbenno translations = (struct ofw_map *)phys_avail[i]; 808131401Sgrehan break; 809131401Sgrehan } 810100319Sbenno } 811100319Sbenno if (translations == NULL) 812152180Sgrehan panic("moea_bootstrap: no space to copy translations"); 81390643Sbenno bzero(translations, sz); 81490643Sbenno if (OF_getprop(mmu, "translations", translations, sz) == -1) 815152180Sgrehan panic("moea_bootstrap: can't get ofw translations"); 816152180Sgrehan CTR0(KTR_PMAP, "moea_bootstrap: translations"); 81797346Sbenno sz /= sizeof(*translations); 81890643Sbenno qsort(translations, sz, sizeof (*translations), om_cmp); 819103604Sgrehan for (i = 0, ofw_mappings = 0; i < sz; i++) { 82090643Sbenno CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 82190643Sbenno translations[i].om_pa, translations[i].om_va, 82290643Sbenno translations[i].om_len); 82377957Sbenno 824103604Sgrehan /* 825103604Sgrehan * If the mapping is 1:1, let the RAM and device on-demand 826103604Sgrehan * BAT tables take care of the translation. 827103604Sgrehan */ 828103604Sgrehan if (translations[i].om_va == translations[i].om_pa) 829103604Sgrehan continue; 83077957Sbenno 831103604Sgrehan /* Enter the pages */ 83290643Sbenno for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 83390643Sbenno struct vm_page m; 83477957Sbenno 83590643Sbenno m.phys_addr = translations[i].om_pa + off; 836159303Salc moea_enter_locked(&ofw_pmap, 837152180Sgrehan translations[i].om_va + off, &m, 838103604Sgrehan VM_PROT_ALL, 1); 839103604Sgrehan ofw_mappings++; 84077957Sbenno } 84177957Sbenno } 84290643Sbenno#ifdef SMP 84390643Sbenno TLBSYNC(); 84490643Sbenno#endif 84577957Sbenno 84690643Sbenno /* 84790643Sbenno * Initialize the kernel pmap (which is statically allocated). 84890643Sbenno */ 849134329Salc PMAP_LOCK_INIT(kernel_pmap); 85090643Sbenno for (i = 0; i < 16; i++) { 85190643Sbenno kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 85277957Sbenno } 85390643Sbenno kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 854139401Sgrehan kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 85590643Sbenno kernel_pmap->pm_active = ~0; 85677957Sbenno 85777957Sbenno /* 85890643Sbenno * Allocate a kernel stack with a guard page for thread0 and map it 85990643Sbenno * into the kernel page map. 86077957Sbenno */ 861152180Sgrehan pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 86290643Sbenno kstack0_phys = pa; 86390643Sbenno kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 864152180Sgrehan CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 86590643Sbenno kstack0); 86690643Sbenno virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 86790643Sbenno for (i = 0; i < KSTACK_PAGES; i++) { 86890643Sbenno pa = kstack0_phys + i * PAGE_SIZE; 86990643Sbenno va = kstack0 + i * PAGE_SIZE; 870152180Sgrehan moea_kenter(mmup, va, pa); 87190643Sbenno TLBIE(va); 87277957Sbenno } 87377957Sbenno 87490643Sbenno /* 875127875Salc * Calculate the last available physical address. 87690643Sbenno */ 87790643Sbenno for (i = 0; phys_avail[i + 2] != 0; i += 2) 87890643Sbenno ; 879128103Salc Maxmem = powerpc_btop(phys_avail[i + 1]); 88077957Sbenno 88177957Sbenno /* 88290643Sbenno * Allocate virtual address space for the message buffer. 88377957Sbenno */ 88490643Sbenno msgbufp = (struct msgbuf *)virtual_avail; 88590643Sbenno virtual_avail += round_page(MSGBUF_SIZE); 88677957Sbenno 88777957Sbenno /* 88890643Sbenno * Initialize hardware. 88977957Sbenno */ 89077957Sbenno for (i = 0; i < 16; i++) { 89194836Sbenno mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 89277957Sbenno } 89377957Sbenno __asm __volatile ("mtsr %0,%1" 89490643Sbenno :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 895139401Sgrehan __asm __volatile ("mtsr %0,%1" 896139401Sgrehan :: "n"(KERNEL2_SR), "r"(KERNEL2_SEGMENT)); 89777957Sbenno __asm __volatile ("sync; mtsdr1 %0; isync" 898152180Sgrehan :: "r"((u_int)moea_pteg_table | (moea_pteg_mask >> 10))); 89977957Sbenno tlbia(); 90077957Sbenno 90190643Sbenno pmap_bootstrapped++; 90277957Sbenno} 90377957Sbenno 90477957Sbenno/* 90590643Sbenno * Activate a user pmap. The pmap must be activated before it's address 90690643Sbenno * space can be accessed in any way. 90777957Sbenno */ 90877957Sbennovoid 909152180Sgrehanmoea_activate(mmu_t mmu, struct thread *td) 91077957Sbenno{ 91196250Sbenno pmap_t pm, pmr; 91277957Sbenno 91377957Sbenno /* 914103604Sgrehan * Load all the data we need up front to encourage the compiler to 91590643Sbenno * not issue any loads while we have interrupts disabled below. 91677957Sbenno */ 91790643Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 91877957Sbenno 919152180Sgrehan if ((pmr = (pmap_t)moea_kextract(mmu, (vm_offset_t)pm)) == NULL) 92096250Sbenno pmr = pm; 92196250Sbenno 92290643Sbenno pm->pm_active |= PCPU_GET(cpumask); 92396250Sbenno PCPU_SET(curpmap, pmr); 92477957Sbenno} 92577957Sbenno 92691483Sbennovoid 927152180Sgrehanmoea_deactivate(mmu_t mmu, struct thread *td) 92891483Sbenno{ 92991483Sbenno pmap_t pm; 93091483Sbenno 93191483Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 93291483Sbenno pm->pm_active &= ~(PCPU_GET(cpumask)); 93396250Sbenno PCPU_SET(curpmap, NULL); 93491483Sbenno} 93591483Sbenno 93677957Sbennovoid 937152180Sgrehanmoea_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 93877957Sbenno{ 93996353Sbenno struct pvo_entry *pvo; 94096353Sbenno 941134329Salc PMAP_LOCK(pm); 942152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 94396353Sbenno 94496353Sbenno if (pvo != NULL) { 94596353Sbenno if (wired) { 94696353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 94796353Sbenno pm->pm_stats.wired_count++; 94896353Sbenno pvo->pvo_vaddr |= PVO_WIRED; 94996353Sbenno } else { 95096353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 95196353Sbenno pm->pm_stats.wired_count--; 95296353Sbenno pvo->pvo_vaddr &= ~PVO_WIRED; 95396353Sbenno } 95496353Sbenno } 955134329Salc PMAP_UNLOCK(pm); 95677957Sbenno} 95777957Sbenno 95877957Sbennovoid 959152180Sgrehanmoea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 96077957Sbenno{ 96197385Sbenno vm_offset_t dst; 96297385Sbenno vm_offset_t src; 96397385Sbenno 96497385Sbenno dst = VM_PAGE_TO_PHYS(mdst); 96597385Sbenno src = VM_PAGE_TO_PHYS(msrc); 96697385Sbenno 96797385Sbenno kcopy((void *)src, (void *)dst, PAGE_SIZE); 96877957Sbenno} 96977957Sbenno 97077957Sbenno/* 97190643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb. 97277957Sbenno */ 97377957Sbennovoid 974152180Sgrehanmoea_zero_page(mmu_t mmu, vm_page_t m) 97577957Sbenno{ 97694777Speter vm_offset_t pa = VM_PAGE_TO_PHYS(m); 977110172Sgrehan caddr_t va; 97877957Sbenno 97990643Sbenno if (pa < SEGMENT_LENGTH) { 98090643Sbenno va = (caddr_t) pa; 981152180Sgrehan } else if (moea_initialized) { 982152180Sgrehan if (moea_pvo_zeropage == NULL) 983152180Sgrehan moea_pvo_zeropage = moea_rkva_alloc(mmu); 984152180Sgrehan moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL); 985152180Sgrehan va = (caddr_t)PVO_VADDR(moea_pvo_zeropage); 98690643Sbenno } else { 987152180Sgrehan panic("moea_zero_page: can't zero pa %#x", pa); 98877957Sbenno } 98990643Sbenno 99090643Sbenno bzero(va, PAGE_SIZE); 99190643Sbenno 99290643Sbenno if (pa >= SEGMENT_LENGTH) 993152180Sgrehan moea_pa_unmap(moea_pvo_zeropage, NULL, NULL); 99477957Sbenno} 99577957Sbenno 99677957Sbennovoid 997152180Sgrehanmoea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 99877957Sbenno{ 99999666Sbenno vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1000103604Sgrehan caddr_t va; 100199666Sbenno 100299666Sbenno if (pa < SEGMENT_LENGTH) { 100399666Sbenno va = (caddr_t) pa; 1004152180Sgrehan } else if (moea_initialized) { 1005152180Sgrehan if (moea_pvo_zeropage == NULL) 1006152180Sgrehan moea_pvo_zeropage = moea_rkva_alloc(mmu); 1007152180Sgrehan moea_pa_map(moea_pvo_zeropage, pa, NULL, NULL); 1008152180Sgrehan va = (caddr_t)PVO_VADDR(moea_pvo_zeropage); 100999666Sbenno } else { 1010152180Sgrehan panic("moea_zero_page: can't zero pa %#x", pa); 101199666Sbenno } 101299666Sbenno 1013103604Sgrehan bzero(va + off, size); 101499666Sbenno 101599666Sbenno if (pa >= SEGMENT_LENGTH) 1016152180Sgrehan moea_pa_unmap(moea_pvo_zeropage, NULL, NULL); 101777957Sbenno} 101877957Sbenno 101999571Spetervoid 1020152180Sgrehanmoea_zero_page_idle(mmu_t mmu, vm_page_t m) 102199571Speter{ 102299571Speter 1023152180Sgrehan /* XXX this is called outside of Giant, is moea_zero_page safe? */ 102499571Speter /* XXX maybe have a dedicated mapping for this to avoid the problem? */ 102599571Speter mtx_lock(&Giant); 1026152180Sgrehan moea_zero_page(mmu, m); 102799571Speter mtx_unlock(&Giant); 102899571Speter} 102999571Speter 103077957Sbenno/* 103190643Sbenno * Map the given physical page at the specified virtual address in the 103290643Sbenno * target pmap with the protection requested. If specified the page 103390643Sbenno * will be wired down. 103477957Sbenno */ 103577957Sbennovoid 1036152180Sgrehanmoea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 103790643Sbenno boolean_t wired) 103877957Sbenno{ 1039159303Salc 1040159303Salc vm_page_lock_queues(); 1041159303Salc PMAP_LOCK(pmap); 1042159303Salc pmap_enter_locked(pmap, va, m, prot, wired); 1043159303Salc vm_page_unlock_queues(); 1044159303Salc PMAP_UNLOCK(pmap); 1045159303Salc} 1046159303Salc 1047159303Salc/* 1048159303Salc * Map the given physical page at the specified virtual address in the 1049159303Salc * target pmap with the protection requested. If specified the page 1050159303Salc * will be wired down. 1051159303Salc * 1052159303Salc * The page queues and pmap must be locked. 1053159303Salc */ 1054159303Salcstatic void 1055159303Salcmoea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1056159303Salc boolean_t wired) 1057159303Salc{ 105890643Sbenno struct pvo_head *pvo_head; 105992847Sjeff uma_zone_t zone; 106096250Sbenno vm_page_t pg; 106196250Sbenno u_int pte_lo, pvo_flags, was_exec, i; 106290643Sbenno int error; 106377957Sbenno 1064152180Sgrehan if (!moea_initialized) { 1065152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1066152180Sgrehan zone = moea_upvo_zone; 106790643Sbenno pvo_flags = 0; 106896250Sbenno pg = NULL; 106996250Sbenno was_exec = PTE_EXEC; 107090643Sbenno } else { 1071110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1072110172Sgrehan pg = m; 1073152180Sgrehan zone = moea_mpvo_zone; 107490643Sbenno pvo_flags = PVO_MANAGED; 107596250Sbenno was_exec = 0; 107690643Sbenno } 1077134535Salc if (pmap_bootstrapped) 1078159303Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1079159303Salc PMAP_LOCK_ASSERT(pmap, MA_OWNED); 108077957Sbenno 1081142416Sgrehan /* XXX change the pvo head for fake pages */ 1082142416Sgrehan if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) 1083152180Sgrehan pvo_head = &moea_pvo_kunmanaged; 1084142416Sgrehan 108596250Sbenno /* 108696250Sbenno * If this is a managed page, and it's the first reference to the page, 108796250Sbenno * clear the execness of the page. Otherwise fetch the execness. 108896250Sbenno */ 1089142416Sgrehan if ((pg != NULL) && ((m->flags & PG_FICTITIOUS) == 0)) { 109096250Sbenno if (LIST_EMPTY(pvo_head)) { 1091152180Sgrehan moea_attr_clear(pg, PTE_EXEC); 109296250Sbenno } else { 1093152180Sgrehan was_exec = moea_attr_fetch(pg) & PTE_EXEC; 109496250Sbenno } 109596250Sbenno } 109696250Sbenno 109796250Sbenno /* 109896250Sbenno * Assume the page is cache inhibited and access is guarded unless 109996250Sbenno * it's in our available memory array. 110096250Sbenno */ 110190643Sbenno pte_lo = PTE_I | PTE_G; 110297346Sbenno for (i = 0; i < pregions_sz; i++) { 110397346Sbenno if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 110497346Sbenno (VM_PAGE_TO_PHYS(m) < 110597346Sbenno (pregions[i].mr_start + pregions[i].mr_size))) { 110696250Sbenno pte_lo &= ~(PTE_I | PTE_G); 110796250Sbenno break; 110896250Sbenno } 110996250Sbenno } 111077957Sbenno 111190643Sbenno if (prot & VM_PROT_WRITE) 111290643Sbenno pte_lo |= PTE_BW; 111390643Sbenno else 111490643Sbenno pte_lo |= PTE_BR; 111577957Sbenno 1116142416Sgrehan if (prot & VM_PROT_EXECUTE) 1117142416Sgrehan pvo_flags |= PVO_EXECUTABLE; 111877957Sbenno 111990643Sbenno if (wired) 112090643Sbenno pvo_flags |= PVO_WIRED; 112177957Sbenno 1122142416Sgrehan if ((m->flags & PG_FICTITIOUS) != 0) 1123142416Sgrehan pvo_flags |= PVO_FAKE; 1124142416Sgrehan 1125152180Sgrehan error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 112696250Sbenno pte_lo, pvo_flags); 112790643Sbenno 112896250Sbenno /* 112996250Sbenno * Flush the real page from the instruction cache if this page is 113096250Sbenno * mapped executable and cacheable and was not previously mapped (or 113196250Sbenno * was not mapped executable). 113296250Sbenno */ 113396250Sbenno if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 113496250Sbenno (pte_lo & PTE_I) == 0 && was_exec == 0) { 113577957Sbenno /* 113690643Sbenno * Flush the real memory from the cache. 113777957Sbenno */ 1138152180Sgrehan moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 113996250Sbenno if (pg != NULL) 1140152180Sgrehan moea_attr_save(pg, PTE_EXEC); 114177957Sbenno } 1142103604Sgrehan 1143103604Sgrehan /* XXX syncicache always until problems are sorted */ 1144152180Sgrehan moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 114577957Sbenno} 114677957Sbenno 1147159303Salc/* 1148159303Salc * Maps a sequence of resident pages belonging to the same object. 1149159303Salc * The sequence begins with the given page m_start. This page is 1150159303Salc * mapped at the given virtual address start. Each subsequent page is 1151159303Salc * mapped at a virtual address that is offset from start by the same 1152159303Salc * amount as the page is offset from m_start within the object. The 1153159303Salc * last page in the sequence is the page with the largest offset from 1154159303Salc * m_start that can be mapped at a virtual address less than the given 1155159303Salc * virtual address end. Not every virtual page between start and end 1156159303Salc * is mapped; only those for which a resident page exists with the 1157159303Salc * corresponding offset from m_start are mapped. 1158159303Salc */ 1159159303Salcvoid 1160159303Salcmoea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1161159303Salc vm_page_t m_start, vm_prot_t prot) 1162159303Salc{ 1163159303Salc vm_page_t m; 1164159303Salc vm_pindex_t diff, psize; 1165159303Salc 1166159303Salc psize = atop(end - start); 1167159303Salc m = m_start; 1168159303Salc PMAP_LOCK(pm); 1169159303Salc while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1170159303Salc moea_enter_locked(pm, start + ptoa(diff), m, prot & 1171159303Salc (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1172159303Salc m = TAILQ_NEXT(m, listq); 1173159303Salc } 1174159303Salc PMAP_UNLOCK(pm); 1175159303Salc} 1176159303Salc 1177117045Salcvm_page_t 1178152180Sgrehanmoea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1179152180Sgrehan vm_prot_t prot, vm_page_t mpte) 1180117045Salc{ 1181117045Salc 1182159303Salc PMAP_LOCK(pm); 1183159303Salc moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1184152180Sgrehan FALSE); 1185159303Salc PMAP_UNLOCK(pm); 1186117045Salc return (NULL); 1187117045Salc} 1188117045Salc 1189131658Salcvm_paddr_t 1190152180Sgrehanmoea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 119177957Sbenno{ 119296353Sbenno struct pvo_entry *pvo; 1193134329Salc vm_paddr_t pa; 119496353Sbenno 1195134329Salc PMAP_LOCK(pm); 1196152180Sgrehan pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1197134329Salc if (pvo == NULL) 1198134329Salc pa = 0; 1199134329Salc else 1200134329Salc pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1201134329Salc PMAP_UNLOCK(pm); 1202134329Salc return (pa); 120377957Sbenno} 120477957Sbenno 120577957Sbenno/* 1206120336Sgrehan * Atomically extract and hold the physical page with the given 1207120336Sgrehan * pmap and virtual address pair if that mapping permits the given 1208120336Sgrehan * protection. 1209120336Sgrehan */ 1210120336Sgrehanvm_page_t 1211152180Sgrehanmoea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1212120336Sgrehan{ 1213132666Salc struct pvo_entry *pvo; 1214120336Sgrehan vm_page_t m; 1215120336Sgrehan 1216120336Sgrehan m = NULL; 1217120336Sgrehan mtx_lock(&Giant); 1218134329Salc vm_page_lock_queues(); 1219134329Salc PMAP_LOCK(pmap); 1220152180Sgrehan pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1221132666Salc if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && 1222132666Salc ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || 1223132666Salc (prot & VM_PROT_WRITE) == 0)) { 1224132666Salc m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1225120336Sgrehan vm_page_hold(m); 1226120336Sgrehan } 1227134329Salc vm_page_unlock_queues(); 1228134329Salc PMAP_UNLOCK(pmap); 1229120336Sgrehan mtx_unlock(&Giant); 1230120336Sgrehan return (m); 1231120336Sgrehan} 1232120336Sgrehan 123390643Sbennovoid 1234152180Sgrehanmoea_init(mmu_t mmu) 123577957Sbenno{ 123677957Sbenno 1237152180Sgrehan CTR0(KTR_PMAP, "moea_init"); 1238147217Salc 1239152180Sgrehan moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1240125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1241125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1242152180Sgrehan moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1243125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1244125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 1245152180Sgrehan moea_initialized = TRUE; 124677957Sbenno} 124777957Sbenno 124890643Sbennoboolean_t 1249152180Sgrehanmoea_is_modified(mmu_t mmu, vm_page_t m) 125090643Sbenno{ 125196353Sbenno 1252110172Sgrehan if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 125396353Sbenno return (FALSE); 125496353Sbenno 1255152180Sgrehan return (moea_query_bit(m, PTE_CHG)); 125690643Sbenno} 125790643Sbenno 125890643Sbennovoid 1259152180Sgrehanmoea_clear_reference(mmu_t mmu, vm_page_t m) 126090643Sbenno{ 1261110172Sgrehan 1262110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1263110172Sgrehan return; 1264152180Sgrehan moea_clear_bit(m, PTE_REF, NULL); 126590643Sbenno} 126690643Sbenno 1267110172Sgrehanvoid 1268152180Sgrehanmoea_clear_modify(mmu_t mmu, vm_page_t m) 1269110172Sgrehan{ 1270110172Sgrehan 1271110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1272110172Sgrehan return; 1273152180Sgrehan moea_clear_bit(m, PTE_CHG, NULL); 1274110172Sgrehan} 1275110172Sgrehan 127691403Ssilby/* 1277152180Sgrehan * moea_ts_referenced: 127891403Ssilby * 127991403Ssilby * Return a count of reference bits for a page, clearing those bits. 128091403Ssilby * It is not necessary for every reference bit to be cleared, but it 128191403Ssilby * is necessary that 0 only be returned when there are truly no 128291403Ssilby * reference bits set. 128391403Ssilby * 128491403Ssilby * XXX: The exact number of bits to check and clear is a matter that 128591403Ssilby * should be tested and standardized at some point in the future for 128691403Ssilby * optimal aging of shared pages. 128791403Ssilby */ 1288152180Sgrehanboolean_t 1289152180Sgrehanmoea_ts_referenced(mmu_t mmu, vm_page_t m) 129090643Sbenno{ 1291110172Sgrehan int count; 1292110172Sgrehan 1293110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1294110172Sgrehan return (0); 1295110172Sgrehan 1296152180Sgrehan count = moea_clear_bit(m, PTE_REF, NULL); 1297110172Sgrehan 1298110172Sgrehan return (count); 129990643Sbenno} 130090643Sbenno 130177957Sbenno/* 130290643Sbenno * Map a wired page into kernel virtual address space. 130377957Sbenno */ 130477957Sbennovoid 1305152180Sgrehanmoea_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 130677957Sbenno{ 130790643Sbenno u_int pte_lo; 130890643Sbenno int error; 130990643Sbenno int i; 131077957Sbenno 131190643Sbenno#if 0 131290643Sbenno if (va < VM_MIN_KERNEL_ADDRESS) 1313152180Sgrehan panic("moea_kenter: attempt to enter non-kernel address %#x", 131490643Sbenno va); 131590643Sbenno#endif 131677957Sbenno 1317103604Sgrehan pte_lo = PTE_I | PTE_G; 1318103604Sgrehan for (i = 0; i < pregions_sz; i++) { 1319103604Sgrehan if ((pa >= pregions[i].mr_start) && 1320103604Sgrehan (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 132190643Sbenno pte_lo &= ~(PTE_I | PTE_G); 132277957Sbenno break; 132377957Sbenno } 1324103604Sgrehan } 132577957Sbenno 1326135172Salc PMAP_LOCK(kernel_pmap); 1327152180Sgrehan error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1328152180Sgrehan &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 132990643Sbenno 133090643Sbenno if (error != 0 && error != ENOENT) 1331152180Sgrehan panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 133290643Sbenno pa, error); 133390643Sbenno 133477957Sbenno /* 133590643Sbenno * Flush the real memory from the instruction cache. 133677957Sbenno */ 133790643Sbenno if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1338152180Sgrehan moea_syncicache(pa, PAGE_SIZE); 133977957Sbenno } 1340135172Salc PMAP_UNLOCK(kernel_pmap); 134177957Sbenno} 134277957Sbenno 134394838Sbenno/* 134494838Sbenno * Extract the physical page address associated with the given kernel virtual 134594838Sbenno * address. 134694838Sbenno */ 134790643Sbennovm_offset_t 1348152180Sgrehanmoea_kextract(mmu_t mmu, vm_offset_t va) 134977957Sbenno{ 135094838Sbenno struct pvo_entry *pvo; 1351134329Salc vm_paddr_t pa; 135294838Sbenno 1353125185Sgrehan#ifdef UMA_MD_SMALL_ALLOC 1354125185Sgrehan /* 1355125185Sgrehan * Allow direct mappings 1356125185Sgrehan */ 1357125185Sgrehan if (va < VM_MIN_KERNEL_ADDRESS) { 1358125185Sgrehan return (va); 1359125185Sgrehan } 1360125185Sgrehan#endif 1361125185Sgrehan 1362134329Salc PMAP_LOCK(kernel_pmap); 1363152180Sgrehan pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1364152180Sgrehan KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1365134329Salc pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1366134329Salc PMAP_UNLOCK(kernel_pmap); 1367134329Salc return (pa); 136877957Sbenno} 136977957Sbenno 137091456Sbenno/* 137191456Sbenno * Remove a wired page from kernel virtual address space. 137291456Sbenno */ 137377957Sbennovoid 1374152180Sgrehanmoea_kremove(mmu_t mmu, vm_offset_t va) 137577957Sbenno{ 137691456Sbenno 1377152180Sgrehan moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 137877957Sbenno} 137977957Sbenno 138077957Sbenno/* 138190643Sbenno * Map a range of physical addresses into kernel virtual address space. 138290643Sbenno * 138390643Sbenno * The value passed in *virt is a suggested virtual address for the mapping. 138490643Sbenno * Architectures which can support a direct-mapped physical to virtual region 138590643Sbenno * can return the appropriate address within that region, leaving '*virt' 138690643Sbenno * unchanged. We cannot and therefore do not; *virt is updated with the 138790643Sbenno * first usable address after the mapped region. 138877957Sbenno */ 138990643Sbennovm_offset_t 1390152180Sgrehanmoea_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1391152180Sgrehan vm_offset_t pa_end, int prot) 139277957Sbenno{ 139390643Sbenno vm_offset_t sva, va; 139477957Sbenno 139590643Sbenno sva = *virt; 139690643Sbenno va = sva; 139790643Sbenno for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1398152180Sgrehan moea_kenter(mmu, va, pa_start); 139990643Sbenno *virt = va; 140090643Sbenno return (sva); 140177957Sbenno} 140277957Sbenno 140377957Sbenno/* 140490643Sbenno * Lower the permission for all mappings to a given page. 140577957Sbenno */ 140677957Sbennovoid 1407152180Sgrehanmoea_page_protect(mmu_t mmu, vm_page_t m, vm_prot_t prot) 140877957Sbenno{ 140990643Sbenno struct pvo_head *pvo_head; 141090643Sbenno struct pvo_entry *pvo, *next_pvo; 141190643Sbenno struct pte *pt; 1412134329Salc pmap_t pmap; 141377957Sbenno 141490643Sbenno /* 141590643Sbenno * Since the routine only downgrades protection, if the 141690643Sbenno * maximal protection is desired, there isn't any change 141790643Sbenno * to be made. 141890643Sbenno */ 141990643Sbenno if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 142090643Sbenno (VM_PROT_READ|VM_PROT_WRITE)) 142177957Sbenno return; 142277957Sbenno 142390643Sbenno pvo_head = vm_page_to_pvoh(m); 142490643Sbenno for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 142590643Sbenno next_pvo = LIST_NEXT(pvo, pvo_vlink); 1426152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 1427134329Salc pmap = pvo->pvo_pmap; 1428134329Salc PMAP_LOCK(pmap); 142990643Sbenno 143090643Sbenno /* 143190643Sbenno * Downgrading to no mapping at all, we just remove the entry. 143290643Sbenno */ 143390643Sbenno if ((prot & VM_PROT_READ) == 0) { 1434152180Sgrehan moea_pvo_remove(pvo, -1); 1435134329Salc PMAP_UNLOCK(pmap); 143690643Sbenno continue; 143777957Sbenno } 143890643Sbenno 143990643Sbenno /* 144090643Sbenno * If EXEC permission is being revoked, just clear the flag 144190643Sbenno * in the PVO. 144290643Sbenno */ 144390643Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 144490643Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 144590643Sbenno 144690643Sbenno /* 144790643Sbenno * If this entry is already RO, don't diddle with the page 144890643Sbenno * table. 144990643Sbenno */ 145090643Sbenno if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1451134329Salc PMAP_UNLOCK(pmap); 1452152180Sgrehan MOEA_PVO_CHECK(pvo); 145390643Sbenno continue; 145477957Sbenno } 145590643Sbenno 145690643Sbenno /* 145790643Sbenno * Grab the PTE before we diddle the bits so pvo_to_pte can 145890643Sbenno * verify the pte contents are as expected. 145990643Sbenno */ 1460152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 146190643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_PP; 146290643Sbenno pvo->pvo_pte.pte_lo |= PTE_BR; 146390643Sbenno if (pt != NULL) 1464152180Sgrehan moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1465134329Salc PMAP_UNLOCK(pmap); 1466152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 146777957Sbenno } 1468133166Sgrehan 1469133166Sgrehan /* 1470133166Sgrehan * Downgrading from writeable: clear the VM page flag 1471133166Sgrehan */ 1472133166Sgrehan if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) 1473133166Sgrehan vm_page_flag_clear(m, PG_WRITEABLE); 147477957Sbenno} 147577957Sbenno 147677957Sbenno/* 147791403Ssilby * Returns true if the pmap's pv is one of the first 147891403Ssilby * 16 pvs linked to from this page. This count may 147991403Ssilby * be changed upwards or downwards in the future; it 148091403Ssilby * is only necessary that true be returned for a small 148191403Ssilby * subset of pmaps for proper page aging. 148291403Ssilby */ 148390643Sbennoboolean_t 1484152180Sgrehanmoea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 148590643Sbenno{ 1486110172Sgrehan int loops; 1487110172Sgrehan struct pvo_entry *pvo; 1488110172Sgrehan 1489152180Sgrehan if (!moea_initialized || (m->flags & PG_FICTITIOUS)) 1490110172Sgrehan return FALSE; 1491110172Sgrehan 1492110172Sgrehan loops = 0; 1493110172Sgrehan LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1494110172Sgrehan if (pvo->pvo_pmap == pmap) 1495110172Sgrehan return (TRUE); 1496110172Sgrehan if (++loops >= 16) 1497110172Sgrehan break; 1498110172Sgrehan } 1499110172Sgrehan 1500110172Sgrehan return (FALSE); 150190643Sbenno} 150277957Sbenno 1503152180Sgrehanstatic u_int moea_vsidcontext; 150477957Sbenno 150590643Sbennovoid 1506152180Sgrehanmoea_pinit(mmu_t mmu, pmap_t pmap) 150790643Sbenno{ 150890643Sbenno int i, mask; 150990643Sbenno u_int entropy; 151077957Sbenno 1511152180Sgrehan KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1512134329Salc PMAP_LOCK_INIT(pmap); 1513126478Sgrehan 151490643Sbenno entropy = 0; 151590643Sbenno __asm __volatile("mftb %0" : "=r"(entropy)); 151677957Sbenno 151790643Sbenno /* 151890643Sbenno * Allocate some segment registers for this pmap. 151990643Sbenno */ 152090643Sbenno for (i = 0; i < NPMAPS; i += VSID_NBPW) { 152190643Sbenno u_int hash, n; 152277957Sbenno 152377957Sbenno /* 152490643Sbenno * Create a new value by mutiplying by a prime and adding in 152590643Sbenno * entropy from the timebase register. This is to make the 152690643Sbenno * VSID more random so that the PT hash function collides 152790643Sbenno * less often. (Note that the prime casues gcc to do shifts 152890643Sbenno * instead of a multiply.) 152977957Sbenno */ 1530152180Sgrehan moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1531152180Sgrehan hash = moea_vsidcontext & (NPMAPS - 1); 153290643Sbenno if (hash == 0) /* 0 is special, avoid it */ 153390643Sbenno continue; 153490643Sbenno n = hash >> 5; 153590643Sbenno mask = 1 << (hash & (VSID_NBPW - 1)); 1536152180Sgrehan hash = (moea_vsidcontext & 0xfffff); 1537152180Sgrehan if (moea_vsid_bitmap[n] & mask) { /* collision? */ 153890643Sbenno /* anything free in this bucket? */ 1539152180Sgrehan if (moea_vsid_bitmap[n] == 0xffffffff) { 1540152180Sgrehan entropy = (moea_vsidcontext >> 20); 154190643Sbenno continue; 154290643Sbenno } 1543152180Sgrehan i = ffs(~moea_vsid_bitmap[i]) - 1; 154490643Sbenno mask = 1 << i; 154590643Sbenno hash &= 0xfffff & ~(VSID_NBPW - 1); 154690643Sbenno hash |= i; 154777957Sbenno } 1548152180Sgrehan moea_vsid_bitmap[n] |= mask; 154990643Sbenno for (i = 0; i < 16; i++) 155090643Sbenno pmap->pm_sr[i] = VSID_MAKE(i, hash); 155190643Sbenno return; 155290643Sbenno } 155377957Sbenno 1554152180Sgrehan panic("moea_pinit: out of segments"); 155577957Sbenno} 155677957Sbenno 155777957Sbenno/* 155890643Sbenno * Initialize the pmap associated with process 0. 155977957Sbenno */ 156077957Sbennovoid 1561152180Sgrehanmoea_pinit0(mmu_t mmu, pmap_t pm) 156277957Sbenno{ 156377957Sbenno 1564152180Sgrehan moea_pinit(mmu, pm); 156590643Sbenno bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 156677957Sbenno} 156777957Sbenno 156894838Sbenno/* 156994838Sbenno * Set the physical protection on the specified range of this map as requested. 157094838Sbenno */ 157190643Sbennovoid 1572152180Sgrehanmoea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1573152180Sgrehan vm_prot_t prot) 157490643Sbenno{ 157594838Sbenno struct pvo_entry *pvo; 157694838Sbenno struct pte *pt; 157794838Sbenno int pteidx; 157894838Sbenno 1579152180Sgrehan CTR4(KTR_PMAP, "moea_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 158094838Sbenno eva, prot); 158194838Sbenno 158294838Sbenno 158394838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1584152180Sgrehan ("moea_protect: non current pmap")); 158594838Sbenno 158694838Sbenno if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1587132899Salc mtx_lock(&Giant); 1588152180Sgrehan moea_remove(mmu, pm, sva, eva); 1589132899Salc mtx_unlock(&Giant); 159094838Sbenno return; 159194838Sbenno } 159294838Sbenno 1593132899Salc mtx_lock(&Giant); 1594132220Salc vm_page_lock_queues(); 1595134329Salc PMAP_LOCK(pm); 159694838Sbenno for (; sva < eva; sva += PAGE_SIZE) { 1597152180Sgrehan pvo = moea_pvo_find_va(pm, sva, &pteidx); 159894838Sbenno if (pvo == NULL) 159994838Sbenno continue; 160094838Sbenno 160194838Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 160294838Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 160394838Sbenno 160494838Sbenno /* 160594838Sbenno * Grab the PTE pointer before we diddle with the cached PTE 160694838Sbenno * copy. 160794838Sbenno */ 1608152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 160994838Sbenno /* 161094838Sbenno * Change the protection of the page. 161194838Sbenno */ 161294838Sbenno pvo->pvo_pte.pte_lo &= ~PTE_PP; 161394838Sbenno pvo->pvo_pte.pte_lo |= PTE_BR; 161494838Sbenno 161594838Sbenno /* 161694838Sbenno * If the PVO is in the page table, update that pte as well. 161794838Sbenno */ 161894838Sbenno if (pt != NULL) 1619152180Sgrehan moea_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 162094838Sbenno } 1621132220Salc vm_page_unlock_queues(); 1622134329Salc PMAP_UNLOCK(pm); 1623132899Salc mtx_unlock(&Giant); 162477957Sbenno} 162577957Sbenno 162691456Sbenno/* 162791456Sbenno * Map a list of wired pages into kernel virtual address space. This is 162891456Sbenno * intended for temporary mappings which do not need page modification or 162991456Sbenno * references recorded. Existing mappings in the region are overwritten. 163091456Sbenno */ 163190643Sbennovoid 1632152180Sgrehanmoea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 163377957Sbenno{ 1634110172Sgrehan vm_offset_t va; 163577957Sbenno 1636110172Sgrehan va = sva; 1637110172Sgrehan while (count-- > 0) { 1638152180Sgrehan moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1639110172Sgrehan va += PAGE_SIZE; 1640110172Sgrehan m++; 1641110172Sgrehan } 164290643Sbenno} 164377957Sbenno 164491456Sbenno/* 164591456Sbenno * Remove page mappings from kernel virtual address space. Intended for 1646152180Sgrehan * temporary mappings entered by moea_qenter. 164791456Sbenno */ 164890643Sbennovoid 1649152180Sgrehanmoea_qremove(mmu_t mmu, vm_offset_t sva, int count) 165090643Sbenno{ 1651110172Sgrehan vm_offset_t va; 165291456Sbenno 1653110172Sgrehan va = sva; 1654110172Sgrehan while (count-- > 0) { 1655152180Sgrehan moea_kremove(mmu, va); 1656110172Sgrehan va += PAGE_SIZE; 1657110172Sgrehan } 165877957Sbenno} 165977957Sbenno 166090643Sbennovoid 1661152180Sgrehanmoea_release(mmu_t mmu, pmap_t pmap) 166290643Sbenno{ 1663103604Sgrehan int idx, mask; 1664103604Sgrehan 1665103604Sgrehan /* 1666103604Sgrehan * Free segment register's VSID 1667103604Sgrehan */ 1668103604Sgrehan if (pmap->pm_sr[0] == 0) 1669152180Sgrehan panic("moea_release"); 1670103604Sgrehan 1671103604Sgrehan idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1672103604Sgrehan mask = 1 << (idx % VSID_NBPW); 1673103604Sgrehan idx /= VSID_NBPW; 1674152180Sgrehan moea_vsid_bitmap[idx] &= ~mask; 1675134329Salc PMAP_LOCK_DESTROY(pmap); 167677957Sbenno} 167777957Sbenno 167891456Sbenno/* 167991456Sbenno * Remove the given range of addresses from the specified map. 168091456Sbenno */ 168190643Sbennovoid 1682152180Sgrehanmoea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 168377957Sbenno{ 168491456Sbenno struct pvo_entry *pvo; 168591456Sbenno int pteidx; 168691456Sbenno 1687132220Salc vm_page_lock_queues(); 1688134329Salc PMAP_LOCK(pm); 168991456Sbenno for (; sva < eva; sva += PAGE_SIZE) { 1690152180Sgrehan pvo = moea_pvo_find_va(pm, sva, &pteidx); 169191456Sbenno if (pvo != NULL) { 1692152180Sgrehan moea_pvo_remove(pvo, pteidx); 169391456Sbenno } 169491456Sbenno } 1695140538Sgrehan PMAP_UNLOCK(pm); 1696132220Salc vm_page_unlock_queues(); 169777957Sbenno} 169877957Sbenno 169994838Sbenno/* 1700152180Sgrehan * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1701110172Sgrehan * will reflect changes in pte's back to the vm_page. 1702110172Sgrehan */ 1703110172Sgrehanvoid 1704152180Sgrehanmoea_remove_all(mmu_t mmu, vm_page_t m) 1705110172Sgrehan{ 1706110172Sgrehan struct pvo_head *pvo_head; 1707110172Sgrehan struct pvo_entry *pvo, *next_pvo; 1708134329Salc pmap_t pmap; 1709110172Sgrehan 1710120336Sgrehan mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1711120336Sgrehan 1712110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1713110172Sgrehan for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1714110172Sgrehan next_pvo = LIST_NEXT(pvo, pvo_vlink); 1715133166Sgrehan 1716152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 1717134329Salc pmap = pvo->pvo_pmap; 1718134329Salc PMAP_LOCK(pmap); 1719152180Sgrehan moea_pvo_remove(pvo, -1); 1720134329Salc PMAP_UNLOCK(pmap); 1721110172Sgrehan } 1722110172Sgrehan vm_page_flag_clear(m, PG_WRITEABLE); 1723110172Sgrehan} 1724110172Sgrehan 1725110172Sgrehan/* 172690643Sbenno * Allocate a physical page of memory directly from the phys_avail map. 1727152180Sgrehan * Can only be called from moea_bootstrap before avail start and end are 172890643Sbenno * calculated. 172983682Smp */ 173090643Sbennostatic vm_offset_t 1731152180Sgrehanmoea_bootstrap_alloc(vm_size_t size, u_int align) 173283682Smp{ 173390643Sbenno vm_offset_t s, e; 173490643Sbenno int i, j; 173583682Smp 173690643Sbenno size = round_page(size); 173790643Sbenno for (i = 0; phys_avail[i + 1] != 0; i += 2) { 173890643Sbenno if (align != 0) 173990643Sbenno s = (phys_avail[i] + align - 1) & ~(align - 1); 174090643Sbenno else 174190643Sbenno s = phys_avail[i]; 174290643Sbenno e = s + size; 174390643Sbenno 174490643Sbenno if (s < phys_avail[i] || e > phys_avail[i + 1]) 174590643Sbenno continue; 174690643Sbenno 174790643Sbenno if (s == phys_avail[i]) { 174890643Sbenno phys_avail[i] += size; 174990643Sbenno } else if (e == phys_avail[i + 1]) { 175090643Sbenno phys_avail[i + 1] -= size; 175190643Sbenno } else { 175290643Sbenno for (j = phys_avail_count * 2; j > i; j -= 2) { 175390643Sbenno phys_avail[j] = phys_avail[j - 2]; 175490643Sbenno phys_avail[j + 1] = phys_avail[j - 1]; 175590643Sbenno } 175690643Sbenno 175790643Sbenno phys_avail[i + 3] = phys_avail[i + 1]; 175890643Sbenno phys_avail[i + 1] = s; 175990643Sbenno phys_avail[i + 2] = e; 176090643Sbenno phys_avail_count++; 176190643Sbenno } 176290643Sbenno 176390643Sbenno return (s); 176483682Smp } 1765152180Sgrehan panic("moea_bootstrap_alloc: could not allocate memory"); 176683682Smp} 176783682Smp 176883682Smp/* 176990643Sbenno * Return an unmapped pvo for a kernel virtual address. 177090643Sbenno * Used by pmap functions that operate on physical pages. 177183682Smp */ 177290643Sbennostatic struct pvo_entry * 1773152180Sgrehanmoea_rkva_alloc(mmu_t mmu) 177483682Smp{ 177590643Sbenno struct pvo_entry *pvo; 177690643Sbenno struct pte *pt; 177790643Sbenno vm_offset_t kva; 177890643Sbenno int pteidx; 177983682Smp 1780152180Sgrehan if (moea_rkva_count == 0) 1781152180Sgrehan panic("moea_rkva_alloc: no more reserved KVAs"); 178290643Sbenno 1783152180Sgrehan kva = moea_rkva_start + (PAGE_SIZE * --moea_rkva_count); 1784152180Sgrehan moea_kenter(mmu, kva, 0); 178590643Sbenno 1786152180Sgrehan pvo = moea_pvo_find_va(kernel_pmap, kva, &pteidx); 178790643Sbenno 178890643Sbenno if (pvo == NULL) 1789152180Sgrehan panic("moea_kva_alloc: moea_pvo_find_va failed"); 179090643Sbenno 1791152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 179290643Sbenno 179390643Sbenno if (pt == NULL) 1794152180Sgrehan panic("moea_kva_alloc: moea_pvo_to_pte failed"); 179590643Sbenno 1796152180Sgrehan moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 179790643Sbenno PVO_PTEGIDX_CLR(pvo); 179890643Sbenno 1799152180Sgrehan moea_pte_overflow++; 180090643Sbenno 180190643Sbenno return (pvo); 180290643Sbenno} 180390643Sbenno 180490643Sbennostatic void 1805152180Sgrehanmoea_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 180690643Sbenno int *depth_p) 180790643Sbenno{ 180890643Sbenno struct pte *pt; 180990643Sbenno 181090643Sbenno /* 181190643Sbenno * If this pvo already has a valid pte, we need to save it so it can 181290643Sbenno * be restored later. We then just reload the new PTE over the old 181390643Sbenno * slot. 181490643Sbenno */ 181590643Sbenno if (saved_pt != NULL) { 1816152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 181790643Sbenno 181890643Sbenno if (pt != NULL) { 1819152180Sgrehan moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 182090643Sbenno PVO_PTEGIDX_CLR(pvo); 1821152180Sgrehan moea_pte_overflow++; 182283682Smp } 182390643Sbenno 182490643Sbenno *saved_pt = pvo->pvo_pte; 182590643Sbenno 182690643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 182783682Smp } 182890643Sbenno 182990643Sbenno pvo->pvo_pte.pte_lo |= pa; 183090643Sbenno 1831152180Sgrehan if (!moea_pte_spill(pvo->pvo_vaddr)) 1832152180Sgrehan panic("moea_pa_map: could not spill pvo %p", pvo); 183390643Sbenno 183490643Sbenno if (depth_p != NULL) 183590643Sbenno (*depth_p)++; 183683682Smp} 183783682Smp 183890643Sbennostatic void 1839152180Sgrehanmoea_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 184077957Sbenno{ 184190643Sbenno struct pte *pt; 184277957Sbenno 1843152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 184490643Sbenno 184590643Sbenno if (pt != NULL) { 1846152180Sgrehan moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 184790643Sbenno PVO_PTEGIDX_CLR(pvo); 1848152180Sgrehan moea_pte_overflow++; 184990643Sbenno } 185090643Sbenno 185190643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 185290643Sbenno 185390643Sbenno /* 185490643Sbenno * If there is a saved PTE and it's valid, restore it and return. 185590643Sbenno */ 185690643Sbenno if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 185790643Sbenno if (depth_p != NULL && --(*depth_p) == 0) 1858152180Sgrehan panic("moea_pa_unmap: restoring but depth == 0"); 185990643Sbenno 186090643Sbenno pvo->pvo_pte = *saved_pt; 186190643Sbenno 1862152180Sgrehan if (!moea_pte_spill(pvo->pvo_vaddr)) 1863152180Sgrehan panic("moea_pa_unmap: could not spill pvo %p", pvo); 186490643Sbenno } 186577957Sbenno} 186677957Sbenno 186790643Sbennostatic void 1868152180Sgrehanmoea_syncicache(vm_offset_t pa, vm_size_t len) 186977957Sbenno{ 187090643Sbenno __syncicache((void *)pa, len); 187190643Sbenno} 187277957Sbenno 187390643Sbennostatic void 187490643Sbennotlbia(void) 187590643Sbenno{ 187690643Sbenno caddr_t i; 187790643Sbenno 187890643Sbenno SYNC(); 187990643Sbenno for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 188090643Sbenno TLBIE(i); 188190643Sbenno EIEIO(); 188290643Sbenno } 188390643Sbenno TLBSYNC(); 188490643Sbenno SYNC(); 188577957Sbenno} 188677957Sbenno 188790643Sbennostatic int 1888152180Sgrehanmoea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 188990643Sbenno vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 189077957Sbenno{ 189190643Sbenno struct pvo_entry *pvo; 189290643Sbenno u_int sr; 189390643Sbenno int first; 189490643Sbenno u_int ptegidx; 189590643Sbenno int i; 1896103604Sgrehan int bootstrap; 189777957Sbenno 1898152180Sgrehan moea_pvo_enter_calls++; 189996250Sbenno first = 0; 1900103604Sgrehan bootstrap = 0; 190190643Sbenno 190290643Sbenno /* 190390643Sbenno * Compute the PTE Group index. 190490643Sbenno */ 190590643Sbenno va &= ~ADDR_POFF; 190690643Sbenno sr = va_to_sr(pm->pm_sr, va); 190790643Sbenno ptegidx = va_to_pteg(sr, va); 190890643Sbenno 190990643Sbenno /* 191090643Sbenno * Remove any existing mapping for this page. Reuse the pvo entry if 191190643Sbenno * there is a mapping. 191290643Sbenno */ 1913152180Sgrehan mtx_lock(&moea_table_mutex); 1914152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 191590643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 191696334Sbenno if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 191796334Sbenno (pvo->pvo_pte.pte_lo & PTE_PP) == 191896334Sbenno (pte_lo & PTE_PP)) { 1919152180Sgrehan mtx_unlock(&moea_table_mutex); 192092521Sbenno return (0); 192196334Sbenno } 1922152180Sgrehan moea_pvo_remove(pvo, -1); 192390643Sbenno break; 192490643Sbenno } 192590643Sbenno } 192690643Sbenno 192790643Sbenno /* 192890643Sbenno * If we aren't overwriting a mapping, try to allocate. 192990643Sbenno */ 1930152180Sgrehan if (moea_initialized) { 193192847Sjeff pvo = uma_zalloc(zone, M_NOWAIT); 193292521Sbenno } else { 1933152180Sgrehan if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1934152180Sgrehan panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1935152180Sgrehan moea_bpvo_pool_index, BPVO_POOL_SIZE, 193699037Sbenno BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 193792521Sbenno } 1938152180Sgrehan pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1939152180Sgrehan moea_bpvo_pool_index++; 1940103604Sgrehan bootstrap = 1; 194192521Sbenno } 194290643Sbenno 194390643Sbenno if (pvo == NULL) { 1944152180Sgrehan mtx_unlock(&moea_table_mutex); 194590643Sbenno return (ENOMEM); 194690643Sbenno } 194790643Sbenno 1948152180Sgrehan moea_pvo_entries++; 194990643Sbenno pvo->pvo_vaddr = va; 195090643Sbenno pvo->pvo_pmap = pm; 1951152180Sgrehan LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 195290643Sbenno pvo->pvo_vaddr &= ~ADDR_POFF; 195390643Sbenno if (flags & VM_PROT_EXECUTE) 195490643Sbenno pvo->pvo_vaddr |= PVO_EXECUTABLE; 195590643Sbenno if (flags & PVO_WIRED) 195690643Sbenno pvo->pvo_vaddr |= PVO_WIRED; 1957152180Sgrehan if (pvo_head != &moea_pvo_kunmanaged) 195890643Sbenno pvo->pvo_vaddr |= PVO_MANAGED; 1959103604Sgrehan if (bootstrap) 1960103604Sgrehan pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1961142416Sgrehan if (flags & PVO_FAKE) 1962142416Sgrehan pvo->pvo_vaddr |= PVO_FAKE; 1963142416Sgrehan 1964152180Sgrehan moea_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 196590643Sbenno 196690643Sbenno /* 196790643Sbenno * Remember if the list was empty and therefore will be the first 196890643Sbenno * item. 196990643Sbenno */ 197096250Sbenno if (LIST_FIRST(pvo_head) == NULL) 197196250Sbenno first = 1; 1972142416Sgrehan LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 197390643Sbenno 197490643Sbenno if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1975134453Salc pm->pm_stats.wired_count++; 1976134453Salc pm->pm_stats.resident_count++; 197790643Sbenno 197890643Sbenno /* 197990643Sbenno * We hope this succeeds but it isn't required. 198090643Sbenno */ 1981152180Sgrehan i = moea_pte_insert(ptegidx, &pvo->pvo_pte); 198290643Sbenno if (i >= 0) { 198390643Sbenno PVO_PTEGIDX_SET(pvo, i); 198490643Sbenno } else { 1985152180Sgrehan panic("moea_pvo_enter: overflow"); 1986152180Sgrehan moea_pte_overflow++; 198790643Sbenno } 1988152180Sgrehan mtx_unlock(&moea_table_mutex); 198990643Sbenno 199090643Sbenno return (first ? ENOENT : 0); 199177957Sbenno} 199277957Sbenno 199390643Sbennostatic void 1994152180Sgrehanmoea_pvo_remove(struct pvo_entry *pvo, int pteidx) 199577957Sbenno{ 199690643Sbenno struct pte *pt; 199777957Sbenno 199890643Sbenno /* 199990643Sbenno * If there is an active pte entry, we need to deactivate it (and 200090643Sbenno * save the ref & cfg bits). 200190643Sbenno */ 2002152180Sgrehan pt = moea_pvo_to_pte(pvo, pteidx); 200390643Sbenno if (pt != NULL) { 2004152180Sgrehan moea_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 200590643Sbenno PVO_PTEGIDX_CLR(pvo); 200690643Sbenno } else { 2007152180Sgrehan moea_pte_overflow--; 2008142416Sgrehan } 200990643Sbenno 201090643Sbenno /* 201190643Sbenno * Update our statistics. 201290643Sbenno */ 201390643Sbenno pvo->pvo_pmap->pm_stats.resident_count--; 201490643Sbenno if (pvo->pvo_pte.pte_lo & PVO_WIRED) 201590643Sbenno pvo->pvo_pmap->pm_stats.wired_count--; 201690643Sbenno 201790643Sbenno /* 201890643Sbenno * Save the REF/CHG bits into their cache if the page is managed. 201990643Sbenno */ 2020142416Sgrehan if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 202190643Sbenno struct vm_page *pg; 202290643Sbenno 202392067Sbenno pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 202490643Sbenno if (pg != NULL) { 2025152180Sgrehan moea_attr_save(pg, pvo->pvo_pte.pte_lo & 202690643Sbenno (PTE_REF | PTE_CHG)); 202790643Sbenno } 202890643Sbenno } 202990643Sbenno 203090643Sbenno /* 203190643Sbenno * Remove this PVO from the PV list. 203290643Sbenno */ 203390643Sbenno LIST_REMOVE(pvo, pvo_vlink); 203490643Sbenno 203590643Sbenno /* 203690643Sbenno * Remove this from the overflow list and return it to the pool 203790643Sbenno * if we aren't going to reuse it. 203890643Sbenno */ 203990643Sbenno LIST_REMOVE(pvo, pvo_olink); 204092521Sbenno if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2041152180Sgrehan uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2042152180Sgrehan moea_upvo_zone, pvo); 2043152180Sgrehan moea_pvo_entries--; 2044152180Sgrehan moea_pvo_remove_calls++; 204577957Sbenno} 204677957Sbenno 204790643Sbennostatic __inline int 2048152180Sgrehanmoea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 204977957Sbenno{ 205090643Sbenno int pteidx; 205177957Sbenno 205290643Sbenno /* 205390643Sbenno * We can find the actual pte entry without searching by grabbing 205490643Sbenno * the PTEG index from 3 unused bits in pte_lo[11:9] and by 205590643Sbenno * noticing the HID bit. 205690643Sbenno */ 205790643Sbenno pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 205890643Sbenno if (pvo->pvo_pte.pte_hi & PTE_HID) 2059152180Sgrehan pteidx ^= moea_pteg_mask * 8; 206090643Sbenno 206190643Sbenno return (pteidx); 206277957Sbenno} 206377957Sbenno 206490643Sbennostatic struct pvo_entry * 2065152180Sgrehanmoea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 206677957Sbenno{ 206790643Sbenno struct pvo_entry *pvo; 206890643Sbenno int ptegidx; 206990643Sbenno u_int sr; 207077957Sbenno 207190643Sbenno va &= ~ADDR_POFF; 207290643Sbenno sr = va_to_sr(pm->pm_sr, va); 207390643Sbenno ptegidx = va_to_pteg(sr, va); 207490643Sbenno 2075152180Sgrehan mtx_lock(&moea_table_mutex); 2076152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 207790643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 207890643Sbenno if (pteidx_p) 2079152180Sgrehan *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2080134535Salc break; 208190643Sbenno } 208290643Sbenno } 2083152180Sgrehan mtx_unlock(&moea_table_mutex); 208490643Sbenno 2085134535Salc return (pvo); 208677957Sbenno} 208777957Sbenno 208890643Sbennostatic struct pte * 2089152180Sgrehanmoea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 209077957Sbenno{ 209190643Sbenno struct pte *pt; 209277957Sbenno 209390643Sbenno /* 209490643Sbenno * If we haven't been supplied the ptegidx, calculate it. 209590643Sbenno */ 209690643Sbenno if (pteidx == -1) { 209790643Sbenno int ptegidx; 209890643Sbenno u_int sr; 209977957Sbenno 210090643Sbenno sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 210190643Sbenno ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2102152180Sgrehan pteidx = moea_pvo_pte_index(pvo, ptegidx); 210390643Sbenno } 210490643Sbenno 2105152180Sgrehan pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 210690643Sbenno 210790643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2108152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 210990643Sbenno "valid pte index", pvo); 211090643Sbenno } 211190643Sbenno 211290643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2113152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 211490643Sbenno "pvo but no valid pte", pvo); 211590643Sbenno } 211690643Sbenno 211790643Sbenno if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 211890643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 2119152180Sgrehan panic("moea_pvo_to_pte: pvo %p has valid pte in " 2120152180Sgrehan "moea_pteg_table %p but invalid in pvo", pvo, pt); 212177957Sbenno } 212290643Sbenno 212390643Sbenno if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 212490643Sbenno != 0) { 2125152180Sgrehan panic("moea_pvo_to_pte: pvo %p pte does not match " 2126152180Sgrehan "pte %p in moea_pteg_table", pvo, pt); 212790643Sbenno } 212890643Sbenno 212990643Sbenno return (pt); 213077957Sbenno } 213177957Sbenno 213290643Sbenno if (pvo->pvo_pte.pte_hi & PTE_VALID) { 2133152180Sgrehan panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2134152180Sgrehan "moea_pteg_table but valid in pvo", pvo, pt); 213590643Sbenno } 213677957Sbenno 213790643Sbenno return (NULL); 213877957Sbenno} 213978880Sbenno 214078880Sbenno/* 214190643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c? 214278880Sbenno */ 214390643Sbennoint 2144152180Sgrehanmoea_pte_spill(vm_offset_t addr) 214578880Sbenno{ 214690643Sbenno struct pvo_entry *source_pvo, *victim_pvo; 214790643Sbenno struct pvo_entry *pvo; 214890643Sbenno int ptegidx, i, j; 214990643Sbenno u_int sr; 215090643Sbenno struct pteg *pteg; 215190643Sbenno struct pte *pt; 215278880Sbenno 2153152180Sgrehan moea_pte_spills++; 215490643Sbenno 215594836Sbenno sr = mfsrin(addr); 215690643Sbenno ptegidx = va_to_pteg(sr, addr); 215790643Sbenno 215878880Sbenno /* 215990643Sbenno * Have to substitute some entry. Use the primary hash for this. 216090643Sbenno * Use low bits of timebase as random generator. 216178880Sbenno */ 2162152180Sgrehan pteg = &moea_pteg_table[ptegidx]; 2163152180Sgrehan mtx_lock(&moea_table_mutex); 216490643Sbenno __asm __volatile("mftb %0" : "=r"(i)); 216590643Sbenno i &= 7; 216690643Sbenno pt = &pteg->pt[i]; 216778880Sbenno 216890643Sbenno source_pvo = NULL; 216990643Sbenno victim_pvo = NULL; 2170152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 217178880Sbenno /* 217290643Sbenno * We need to find a pvo entry for this address. 217378880Sbenno */ 2174152180Sgrehan MOEA_PVO_CHECK(pvo); 217590643Sbenno if (source_pvo == NULL && 2176152180Sgrehan moea_pte_match(&pvo->pvo_pte, sr, addr, 217790643Sbenno pvo->pvo_pte.pte_hi & PTE_HID)) { 217890643Sbenno /* 217990643Sbenno * Now found an entry to be spilled into the pteg. 218090643Sbenno * The PTE is now valid, so we know it's active. 218190643Sbenno */ 2182152180Sgrehan j = moea_pte_insert(ptegidx, &pvo->pvo_pte); 218378880Sbenno 218490643Sbenno if (j >= 0) { 218590643Sbenno PVO_PTEGIDX_SET(pvo, j); 2186152180Sgrehan moea_pte_overflow--; 2187152180Sgrehan MOEA_PVO_CHECK(pvo); 2188152180Sgrehan mtx_unlock(&moea_table_mutex); 218990643Sbenno return (1); 219090643Sbenno } 219190643Sbenno 219290643Sbenno source_pvo = pvo; 219390643Sbenno 219490643Sbenno if (victim_pvo != NULL) 219590643Sbenno break; 219690643Sbenno } 219790643Sbenno 219878880Sbenno /* 219990643Sbenno * We also need the pvo entry of the victim we are replacing 220090643Sbenno * so save the R & C bits of the PTE. 220178880Sbenno */ 220290643Sbenno if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2203152180Sgrehan moea_pte_compare(pt, &pvo->pvo_pte)) { 220490643Sbenno victim_pvo = pvo; 220590643Sbenno if (source_pvo != NULL) 220690643Sbenno break; 220790643Sbenno } 220890643Sbenno } 220978880Sbenno 2210134535Salc if (source_pvo == NULL) { 2211152180Sgrehan mtx_unlock(&moea_table_mutex); 221290643Sbenno return (0); 2213134535Salc } 221490643Sbenno 221590643Sbenno if (victim_pvo == NULL) { 221690643Sbenno if ((pt->pte_hi & PTE_HID) == 0) 2217152180Sgrehan panic("moea_pte_spill: victim p-pte (%p) has no pvo" 221890643Sbenno "entry", pt); 221990643Sbenno 222078880Sbenno /* 222190643Sbenno * If this is a secondary PTE, we need to search it's primary 222290643Sbenno * pvo bucket for the matching PVO. 222378880Sbenno */ 2224152180Sgrehan LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 222590643Sbenno pvo_olink) { 2226152180Sgrehan MOEA_PVO_CHECK(pvo); 222790643Sbenno /* 222890643Sbenno * We also need the pvo entry of the victim we are 222990643Sbenno * replacing so save the R & C bits of the PTE. 223090643Sbenno */ 2231152180Sgrehan if (moea_pte_compare(pt, &pvo->pvo_pte)) { 223290643Sbenno victim_pvo = pvo; 223390643Sbenno break; 223490643Sbenno } 223590643Sbenno } 223678880Sbenno 223790643Sbenno if (victim_pvo == NULL) 2238152180Sgrehan panic("moea_pte_spill: victim s-pte (%p) has no pvo" 223990643Sbenno "entry", pt); 224090643Sbenno } 224178880Sbenno 224290643Sbenno /* 224390643Sbenno * We are invalidating the TLB entry for the EA we are replacing even 224490643Sbenno * though it's valid. If we don't, we lose any ref/chg bit changes 224590643Sbenno * contained in the TLB entry. 224690643Sbenno */ 224790643Sbenno source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 224878880Sbenno 2249152180Sgrehan moea_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2250152180Sgrehan moea_pte_set(pt, &source_pvo->pvo_pte); 225190643Sbenno 225290643Sbenno PVO_PTEGIDX_CLR(victim_pvo); 225390643Sbenno PVO_PTEGIDX_SET(source_pvo, i); 2254152180Sgrehan moea_pte_replacements++; 225590643Sbenno 2256152180Sgrehan MOEA_PVO_CHECK(victim_pvo); 2257152180Sgrehan MOEA_PVO_CHECK(source_pvo); 225890643Sbenno 2259152180Sgrehan mtx_unlock(&moea_table_mutex); 226090643Sbenno return (1); 226190643Sbenno} 226290643Sbenno 226390643Sbennostatic int 2264152180Sgrehanmoea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 226590643Sbenno{ 226690643Sbenno struct pte *pt; 226790643Sbenno int i; 226890643Sbenno 226990643Sbenno /* 227090643Sbenno * First try primary hash. 227190643Sbenno */ 2272152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 227390643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 227490643Sbenno pvo_pt->pte_hi &= ~PTE_HID; 2275152180Sgrehan moea_pte_set(pt, pvo_pt); 227690643Sbenno return (i); 227778880Sbenno } 227890643Sbenno } 227978880Sbenno 228090643Sbenno /* 228190643Sbenno * Now try secondary hash. 228290643Sbenno */ 2283152180Sgrehan ptegidx ^= moea_pteg_mask; 228490643Sbenno ptegidx++; 2285152180Sgrehan for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 228690643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 228790643Sbenno pvo_pt->pte_hi |= PTE_HID; 2288152180Sgrehan moea_pte_set(pt, pvo_pt); 228990643Sbenno return (i); 229090643Sbenno } 229190643Sbenno } 229278880Sbenno 2293152180Sgrehan panic("moea_pte_insert: overflow"); 229490643Sbenno return (-1); 229578880Sbenno} 229684921Sbenno 229790643Sbennostatic boolean_t 2298152180Sgrehanmoea_query_bit(vm_page_t m, int ptebit) 229984921Sbenno{ 230090643Sbenno struct pvo_entry *pvo; 230190643Sbenno struct pte *pt; 230284921Sbenno 2303123560Sgrehan#if 0 2304152180Sgrehan if (moea_attr_fetch(m) & ptebit) 230590643Sbenno return (TRUE); 2306123560Sgrehan#endif 230784921Sbenno 230890643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2309152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 231084921Sbenno 231190643Sbenno /* 231290643Sbenno * See if we saved the bit off. If so, cache it and return 231390643Sbenno * success. 231490643Sbenno */ 231590643Sbenno if (pvo->pvo_pte.pte_lo & ptebit) { 2316152180Sgrehan moea_attr_save(m, ptebit); 2317152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 231890643Sbenno return (TRUE); 231990643Sbenno } 232090643Sbenno } 232184921Sbenno 232290643Sbenno /* 232390643Sbenno * No luck, now go through the hard part of looking at the PTEs 232490643Sbenno * themselves. Sync so that any pending REF/CHG bits are flushed to 232590643Sbenno * the PTEs. 232690643Sbenno */ 232790643Sbenno SYNC(); 232890643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2329152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 233090643Sbenno 233190643Sbenno /* 233290643Sbenno * See if this pvo has a valid PTE. if so, fetch the 233390643Sbenno * REF/CHG bits from the valid PTE. If the appropriate 233490643Sbenno * ptebit is set, cache it and return success. 233590643Sbenno */ 2336152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 233790643Sbenno if (pt != NULL) { 2338152180Sgrehan moea_pte_synch(pt, &pvo->pvo_pte); 233990643Sbenno if (pvo->pvo_pte.pte_lo & ptebit) { 2340152180Sgrehan moea_attr_save(m, ptebit); 2341152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 234290643Sbenno return (TRUE); 234390643Sbenno } 234490643Sbenno } 234584921Sbenno } 234684921Sbenno 2347123354Sgallatin return (FALSE); 234884921Sbenno} 234990643Sbenno 2350110172Sgrehanstatic u_int 2351152180Sgrehanmoea_clear_bit(vm_page_t m, int ptebit, int *origbit) 235290643Sbenno{ 2353110172Sgrehan u_int count; 235490643Sbenno struct pvo_entry *pvo; 235590643Sbenno struct pte *pt; 235690643Sbenno int rv; 235790643Sbenno 235890643Sbenno /* 235990643Sbenno * Clear the cached value. 236090643Sbenno */ 2361152180Sgrehan rv = moea_attr_fetch(m); 2362152180Sgrehan moea_attr_clear(m, ptebit); 236390643Sbenno 236490643Sbenno /* 236590643Sbenno * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 236690643Sbenno * we can reset the right ones). note that since the pvo entries and 236790643Sbenno * list heads are accessed via BAT0 and are never placed in the page 236890643Sbenno * table, we don't have to worry about further accesses setting the 236990643Sbenno * REF/CHG bits. 237090643Sbenno */ 237190643Sbenno SYNC(); 237290643Sbenno 237390643Sbenno /* 237490643Sbenno * For each pvo entry, clear the pvo's ptebit. If this pvo has a 237590643Sbenno * valid pte clear the ptebit from the valid pte. 237690643Sbenno */ 2377110172Sgrehan count = 0; 237890643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2379152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 2380152180Sgrehan pt = moea_pvo_to_pte(pvo, -1); 238190643Sbenno if (pt != NULL) { 2382152180Sgrehan moea_pte_synch(pt, &pvo->pvo_pte); 2383110172Sgrehan if (pvo->pvo_pte.pte_lo & ptebit) { 2384110172Sgrehan count++; 2385152180Sgrehan moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2386110172Sgrehan } 238790643Sbenno } 238890643Sbenno rv |= pvo->pvo_pte.pte_lo; 238990643Sbenno pvo->pvo_pte.pte_lo &= ~ptebit; 2390152180Sgrehan MOEA_PVO_CHECK(pvo); /* sanity check */ 239190643Sbenno } 239290643Sbenno 2393110172Sgrehan if (origbit != NULL) { 2394110172Sgrehan *origbit = rv; 2395110172Sgrehan } 2396110172Sgrehan 2397110172Sgrehan return (count); 239890643Sbenno} 239999038Sbenno 240099038Sbenno/* 2401103604Sgrehan * Return true if the physical range is encompassed by the battable[idx] 2402103604Sgrehan */ 2403103604Sgrehanstatic int 2404152180Sgrehanmoea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2405103604Sgrehan{ 2406103604Sgrehan u_int prot; 2407103604Sgrehan u_int32_t start; 2408103604Sgrehan u_int32_t end; 2409103604Sgrehan u_int32_t bat_ble; 2410103604Sgrehan 2411103604Sgrehan /* 2412103604Sgrehan * Return immediately if not a valid mapping 2413103604Sgrehan */ 2414103604Sgrehan if (!battable[idx].batu & BAT_Vs) 2415103604Sgrehan return (EINVAL); 2416103604Sgrehan 2417103604Sgrehan /* 2418103604Sgrehan * The BAT entry must be cache-inhibited, guarded, and r/w 2419103604Sgrehan * so it can function as an i/o page 2420103604Sgrehan */ 2421103604Sgrehan prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2422103604Sgrehan if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2423103604Sgrehan return (EPERM); 2424103604Sgrehan 2425103604Sgrehan /* 2426103604Sgrehan * The address should be within the BAT range. Assume that the 2427103604Sgrehan * start address in the BAT has the correct alignment (thus 2428103604Sgrehan * not requiring masking) 2429103604Sgrehan */ 2430103604Sgrehan start = battable[idx].batl & BAT_PBS; 2431103604Sgrehan bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2432103604Sgrehan end = start | (bat_ble << 15) | 0x7fff; 2433103604Sgrehan 2434103604Sgrehan if ((pa < start) || ((pa + size) > end)) 2435103604Sgrehan return (ERANGE); 2436103604Sgrehan 2437103604Sgrehan return (0); 2438103604Sgrehan} 2439103604Sgrehan 2440152180Sgrehanboolean_t 2441152180Sgrehanmoea_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2442133855Sssouhlal{ 2443133855Sssouhlal int i; 2444103604Sgrehan 2445133855Sssouhlal /* 2446133855Sssouhlal * This currently does not work for entries that 2447133855Sssouhlal * overlap 256M BAT segments. 2448133855Sssouhlal */ 2449133855Sssouhlal 2450133855Sssouhlal for(i = 0; i < 16; i++) 2451152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2452133855Sssouhlal return (0); 2453133855Sssouhlal 2454133855Sssouhlal return (EFAULT); 2455133855Sssouhlal} 2456133855Sssouhlal 2457103604Sgrehan/* 245899038Sbenno * Map a set of physical memory pages into the kernel virtual 245999038Sbenno * address space. Return a pointer to where it is mapped. This 246099038Sbenno * routine is intended to be used for mapping device memory, 246199038Sbenno * NOT real memory. 246299038Sbenno */ 246399038Sbennovoid * 2464152180Sgrehanmoea_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 246599038Sbenno{ 2466103604Sgrehan vm_offset_t va, tmpva, ppa, offset; 2467103604Sgrehan int i; 2468103604Sgrehan 2469103604Sgrehan ppa = trunc_page(pa); 247099038Sbenno offset = pa & PAGE_MASK; 247199038Sbenno size = roundup(offset + size, PAGE_SIZE); 247299038Sbenno 247399038Sbenno GIANT_REQUIRED; 247499038Sbenno 2475103604Sgrehan /* 2476103604Sgrehan * If the physical address lies within a valid BAT table entry, 2477103604Sgrehan * return the 1:1 mapping. This currently doesn't work 2478103604Sgrehan * for regions that overlap 256M BAT segments. 2479103604Sgrehan */ 2480103604Sgrehan for (i = 0; i < 16; i++) { 2481152180Sgrehan if (moea_bat_mapped(i, pa, size) == 0) 2482103604Sgrehan return ((void *) pa); 2483103604Sgrehan } 2484103604Sgrehan 2485118365Salc va = kmem_alloc_nofault(kernel_map, size); 248699038Sbenno if (!va) 2487152180Sgrehan panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 248899038Sbenno 248999038Sbenno for (tmpva = va; size > 0;) { 2490152180Sgrehan moea_kenter(mmu, tmpva, ppa); 249199038Sbenno TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 249299038Sbenno size -= PAGE_SIZE; 249399038Sbenno tmpva += PAGE_SIZE; 2494103604Sgrehan ppa += PAGE_SIZE; 249599038Sbenno } 249699038Sbenno 249799038Sbenno return ((void *)(va + offset)); 249899038Sbenno} 249999038Sbenno 250099038Sbennovoid 2501152180Sgrehanmoea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 250299038Sbenno{ 250399038Sbenno vm_offset_t base, offset; 250499038Sbenno 2505103604Sgrehan /* 2506103604Sgrehan * If this is outside kernel virtual space, then it's a 2507103604Sgrehan * battable entry and doesn't require unmapping 2508103604Sgrehan */ 2509103604Sgrehan if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2510103604Sgrehan base = trunc_page(va); 2511103604Sgrehan offset = va & PAGE_MASK; 2512103604Sgrehan size = roundup(offset + size, PAGE_SIZE); 2513103604Sgrehan kmem_free(kernel_map, base, size); 2514103604Sgrehan } 251599038Sbenno} 2516