mmu_oea64.c revision 248084
1190681Snwhitehorn/*- 2190681Snwhitehorn * Copyright (c) 2001 The NetBSD Foundation, Inc. 3190681Snwhitehorn * All rights reserved. 4190681Snwhitehorn * 5190681Snwhitehorn * This code is derived from software contributed to The NetBSD Foundation 6190681Snwhitehorn * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7190681Snwhitehorn * 8190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 9190681Snwhitehorn * modification, are permitted provided that the following conditions 10190681Snwhitehorn * are met: 11190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 12190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 13190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 14190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 15190681Snwhitehorn * documentation and/or other materials provided with the distribution. 16190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software 17190681Snwhitehorn * must display the following acknowledgement: 18190681Snwhitehorn * This product includes software developed by the NetBSD 19190681Snwhitehorn * Foundation, Inc. and its contributors. 20190681Snwhitehorn * 4. Neither the name of The NetBSD Foundation nor the names of its 21190681Snwhitehorn * contributors may be used to endorse or promote products derived 22190681Snwhitehorn * from this software without specific prior written permission. 23190681Snwhitehorn * 24190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25190681Snwhitehorn * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26190681Snwhitehorn * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27190681Snwhitehorn * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28190681Snwhitehorn * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29190681Snwhitehorn * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30190681Snwhitehorn * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31190681Snwhitehorn * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32190681Snwhitehorn * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33190681Snwhitehorn * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34190681Snwhitehorn * POSSIBILITY OF SUCH DAMAGE. 35190681Snwhitehorn */ 36190681Snwhitehorn/*- 37190681Snwhitehorn * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38190681Snwhitehorn * Copyright (C) 1995, 1996 TooLs GmbH. 39190681Snwhitehorn * All rights reserved. 40190681Snwhitehorn * 41190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 42190681Snwhitehorn * modification, are permitted provided that the following conditions 43190681Snwhitehorn * are met: 44190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 45190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 46190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 47190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 48190681Snwhitehorn * documentation and/or other materials provided with the distribution. 49190681Snwhitehorn * 3. All advertising materials mentioning features or use of this software 50190681Snwhitehorn * must display the following acknowledgement: 51190681Snwhitehorn * This product includes software developed by TooLs GmbH. 52190681Snwhitehorn * 4. The name of TooLs GmbH may not be used to endorse or promote products 53190681Snwhitehorn * derived from this software without specific prior written permission. 54190681Snwhitehorn * 55190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65190681Snwhitehorn * 66190681Snwhitehorn * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67190681Snwhitehorn */ 68190681Snwhitehorn/*- 69190681Snwhitehorn * Copyright (C) 2001 Benno Rice. 70190681Snwhitehorn * All rights reserved. 71190681Snwhitehorn * 72190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 73190681Snwhitehorn * modification, are permitted provided that the following conditions 74190681Snwhitehorn * are met: 75190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 76190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 77190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 78190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 79190681Snwhitehorn * documentation and/or other materials provided with the distribution. 80190681Snwhitehorn * 81190681Snwhitehorn * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84190681Snwhitehorn * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85190681Snwhitehorn * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86190681Snwhitehorn * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87190681Snwhitehorn * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88190681Snwhitehorn * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89190681Snwhitehorn * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90190681Snwhitehorn * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91190681Snwhitehorn */ 92190681Snwhitehorn 93190681Snwhitehorn#include <sys/cdefs.h> 94190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 248084 2013-03-09 02:32:23Z attilio $"); 95190681Snwhitehorn 96190681Snwhitehorn/* 97190681Snwhitehorn * Manages physical address maps. 98190681Snwhitehorn * 99190681Snwhitehorn * Since the information managed by this module is also stored by the 100190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual 101190681Snwhitehorn * to physical mappings at almost any time. However, invalidations of 102190681Snwhitehorn * mappings must be done as requested. 103190681Snwhitehorn * 104190681Snwhitehorn * In order to cope with hardware architectures which make virtual to 105190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate 106190681Snwhitehorn * reduced protection operations until such time as they are actually 107190681Snwhitehorn * necessary. This module is given full information as to which processors 108190681Snwhitehorn * are currently using which maps, and to when physical maps must be made 109190681Snwhitehorn * correct. 110190681Snwhitehorn */ 111190681Snwhitehorn 112230779Skib#include "opt_compat.h" 113190681Snwhitehorn#include "opt_kstack_pages.h" 114190681Snwhitehorn 115190681Snwhitehorn#include <sys/param.h> 116190681Snwhitehorn#include <sys/kernel.h> 117222813Sattilio#include <sys/queue.h> 118222813Sattilio#include <sys/cpuset.h> 119190681Snwhitehorn#include <sys/ktr.h> 120190681Snwhitehorn#include <sys/lock.h> 121190681Snwhitehorn#include <sys/msgbuf.h> 122243040Skib#include <sys/malloc.h> 123190681Snwhitehorn#include <sys/mutex.h> 124190681Snwhitehorn#include <sys/proc.h> 125233529Snwhitehorn#include <sys/rwlock.h> 126222813Sattilio#include <sys/sched.h> 127190681Snwhitehorn#include <sys/sysctl.h> 128190681Snwhitehorn#include <sys/systm.h> 129190681Snwhitehorn#include <sys/vmmeter.h> 130190681Snwhitehorn 131190681Snwhitehorn#include <sys/kdb.h> 132190681Snwhitehorn 133190681Snwhitehorn#include <dev/ofw/openfirm.h> 134190681Snwhitehorn 135190681Snwhitehorn#include <vm/vm.h> 136190681Snwhitehorn#include <vm/vm_param.h> 137190681Snwhitehorn#include <vm/vm_kern.h> 138190681Snwhitehorn#include <vm/vm_page.h> 139190681Snwhitehorn#include <vm/vm_map.h> 140190681Snwhitehorn#include <vm/vm_object.h> 141190681Snwhitehorn#include <vm/vm_extern.h> 142190681Snwhitehorn#include <vm/vm_pageout.h> 143190681Snwhitehorn#include <vm/uma.h> 144190681Snwhitehorn 145209975Snwhitehorn#include <machine/_inttypes.h> 146190681Snwhitehorn#include <machine/cpu.h> 147192067Snwhitehorn#include <machine/platform.h> 148190681Snwhitehorn#include <machine/frame.h> 149190681Snwhitehorn#include <machine/md_var.h> 150190681Snwhitehorn#include <machine/psl.h> 151190681Snwhitehorn#include <machine/bat.h> 152209975Snwhitehorn#include <machine/hid.h> 153190681Snwhitehorn#include <machine/pte.h> 154190681Snwhitehorn#include <machine/sr.h> 155190681Snwhitehorn#include <machine/trap.h> 156190681Snwhitehorn#include <machine/mmuvar.h> 157190681Snwhitehorn 158216174Snwhitehorn#include "mmu_oea64.h" 159190681Snwhitehorn#include "mmu_if.h" 160216174Snwhitehorn#include "moea64_if.h" 161190681Snwhitehorn 162209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid); 163209975Snwhitehornuintptr_t moea64_get_unique_vsid(void); 164190681Snwhitehorn 165222614Snwhitehorn#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 166222614Snwhitehorn#define ENABLE_TRANS(msr) mtmsr(msr) 167190681Snwhitehorn 168190681Snwhitehorn#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 169190681Snwhitehorn#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 170204268Snwhitehorn#define VSID_HASH_MASK 0x0000007fffffffffULL 171190681Snwhitehorn 172233529Snwhitehorn/* 173233529Snwhitehorn * Locking semantics: 174233529Snwhitehorn * -- Read lock: if no modifications are being made to either the PVO lists 175233529Snwhitehorn * or page table or if any modifications being made result in internal 176233529Snwhitehorn * changes (e.g. wiring, protection) such that the existence of the PVOs 177233529Snwhitehorn * is unchanged and they remain associated with the same pmap (in which 178233529Snwhitehorn * case the changes should be protected by the pmap lock) 179233529Snwhitehorn * -- Write lock: required if PTEs/PVOs are being inserted or removed. 180233529Snwhitehorn */ 181190681Snwhitehorn 182233529Snwhitehorn#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock) 183233529Snwhitehorn#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock) 184233529Snwhitehorn#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock) 185233529Snwhitehorn#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock) 186233529Snwhitehorn 187190681Snwhitehornstruct ofw_map { 188209975Snwhitehorn cell_t om_va; 189209975Snwhitehorn cell_t om_len; 190209975Snwhitehorn cell_t om_pa_hi; 191209975Snwhitehorn cell_t om_pa_lo; 192209975Snwhitehorn cell_t om_mode; 193190681Snwhitehorn}; 194190681Snwhitehorn 195190681Snwhitehorn/* 196190681Snwhitehorn * Map of physical memory regions. 197190681Snwhitehorn */ 198190681Snwhitehornstatic struct mem_region *regions; 199190681Snwhitehornstatic struct mem_region *pregions; 200209975Snwhitehornstatic u_int phys_avail_count; 201209975Snwhitehornstatic int regions_sz, pregions_sz; 202190681Snwhitehorn 203190681Snwhitehornextern void bs_remap_earlyboot(void); 204190681Snwhitehorn 205190681Snwhitehorn/* 206190681Snwhitehorn * Lock for the pteg and pvo tables. 207190681Snwhitehorn */ 208233529Snwhitehornstruct rwlock moea64_table_lock; 209211967Snwhitehornstruct mtx moea64_slb_mutex; 210190681Snwhitehorn 211190681Snwhitehorn/* 212190681Snwhitehorn * PTEG data. 213190681Snwhitehorn */ 214190681Snwhitehornu_int moea64_pteg_count; 215190681Snwhitehornu_int moea64_pteg_mask; 216190681Snwhitehorn 217190681Snwhitehorn/* 218190681Snwhitehorn * PVO data. 219190681Snwhitehorn */ 220190681Snwhitehornstruct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 221190681Snwhitehorn 222190681Snwhitehornuma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 223190681Snwhitehornuma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 224190681Snwhitehorn 225190681Snwhitehorn#define BPVO_POOL_SIZE 327680 226190681Snwhitehornstatic struct pvo_entry *moea64_bpvo_pool; 227190681Snwhitehornstatic int moea64_bpvo_pool_index = 0; 228190681Snwhitehorn 229190681Snwhitehorn#define VSID_NBPW (sizeof(u_int32_t) * 8) 230209975Snwhitehorn#ifdef __powerpc64__ 231209975Snwhitehorn#define NVSIDS (NPMAPS * 16) 232209975Snwhitehorn#define VSID_HASHMASK 0xffffffffUL 233209975Snwhitehorn#else 234209975Snwhitehorn#define NVSIDS NPMAPS 235209975Snwhitehorn#define VSID_HASHMASK 0xfffffUL 236209975Snwhitehorn#endif 237209975Snwhitehornstatic u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 238190681Snwhitehorn 239190681Snwhitehornstatic boolean_t moea64_initialized = FALSE; 240190681Snwhitehorn 241190681Snwhitehorn/* 242190681Snwhitehorn * Statistics. 243190681Snwhitehorn */ 244190681Snwhitehornu_int moea64_pte_valid = 0; 245190681Snwhitehornu_int moea64_pte_overflow = 0; 246190681Snwhitehornu_int moea64_pvo_entries = 0; 247190681Snwhitehornu_int moea64_pvo_enter_calls = 0; 248190681Snwhitehornu_int moea64_pvo_remove_calls = 0; 249190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 250190681Snwhitehorn &moea64_pte_valid, 0, ""); 251190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 252190681Snwhitehorn &moea64_pte_overflow, 0, ""); 253190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 254190681Snwhitehorn &moea64_pvo_entries, 0, ""); 255190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 256190681Snwhitehorn &moea64_pvo_enter_calls, 0, ""); 257190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 258190681Snwhitehorn &moea64_pvo_remove_calls, 0, ""); 259190681Snwhitehorn 260190681Snwhitehornvm_offset_t moea64_scratchpage_va[2]; 261216174Snwhitehornstruct pvo_entry *moea64_scratchpage_pvo[2]; 262216174Snwhitehornuintptr_t moea64_scratchpage_pte[2]; 263190681Snwhitehornstruct mtx moea64_scratchpage_mtx; 264190681Snwhitehorn 265209975Snwhitehornuint64_t moea64_large_page_mask = 0; 266209975Snwhitehornint moea64_large_page_size = 0; 267209975Snwhitehornint moea64_large_page_shift = 0; 268209975Snwhitehorn 269190681Snwhitehorn/* 270190681Snwhitehorn * PVO calls. 271190681Snwhitehorn */ 272216174Snwhitehornstatic int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 273198378Snwhitehorn vm_offset_t, vm_offset_t, uint64_t, int); 274216174Snwhitehornstatic void moea64_pvo_remove(mmu_t, struct pvo_entry *); 275209975Snwhitehornstatic struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 276190681Snwhitehorn 277190681Snwhitehorn/* 278190681Snwhitehorn * Utility routines. 279190681Snwhitehorn */ 280216174Snwhitehornstatic boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 281216174Snwhitehornstatic u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 282190681Snwhitehornstatic void moea64_kremove(mmu_t, vm_offset_t); 283216174Snwhitehornstatic void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 284198341Smarcel vm_offset_t pa, vm_size_t sz); 285190681Snwhitehorn 286190681Snwhitehorn/* 287190681Snwhitehorn * Kernel MMU interface 288190681Snwhitehorn */ 289190681Snwhitehornvoid moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 290190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t); 291190681Snwhitehornvoid moea64_clear_reference(mmu_t, vm_page_t); 292190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 293190681Snwhitehornvoid moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 294190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 295190681Snwhitehorn vm_prot_t); 296190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 297190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 298190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 299190681Snwhitehornvoid moea64_init(mmu_t); 300190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t); 301214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 302207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t); 303238357Salcint moea64_ts_referenced(mmu_t, vm_page_t); 304236019Srajvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 305190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 306190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t); 307190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t); 308190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t); 309190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 310190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 311190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int); 312190681Snwhitehornvoid moea64_release(mmu_t, pmap_t); 313190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 314233017Snwhitehornvoid moea64_remove_pages(mmu_t, pmap_t); 315190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t); 316190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t); 317190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t); 318190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int); 319190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t); 320190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *); 321190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *); 322236019Srajvoid *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 323213307Snwhitehornvoid *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 324190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 325236019Srajvm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 326213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 327213307Snwhitehornvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 328236019Srajvoid moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 329236019Srajboolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 330198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 331190681Snwhitehorn 332209975Snwhitehornstatic mmu_method_t moea64_methods[] = { 333190681Snwhitehorn MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 334190681Snwhitehorn MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 335190681Snwhitehorn MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 336190681Snwhitehorn MMUMETHOD(mmu_copy_page, moea64_copy_page), 337190681Snwhitehorn MMUMETHOD(mmu_enter, moea64_enter), 338190681Snwhitehorn MMUMETHOD(mmu_enter_object, moea64_enter_object), 339190681Snwhitehorn MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 340190681Snwhitehorn MMUMETHOD(mmu_extract, moea64_extract), 341190681Snwhitehorn MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 342190681Snwhitehorn MMUMETHOD(mmu_init, moea64_init), 343190681Snwhitehorn MMUMETHOD(mmu_is_modified, moea64_is_modified), 344214617Salc MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 345207155Salc MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 346190681Snwhitehorn MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 347190681Snwhitehorn MMUMETHOD(mmu_map, moea64_map), 348190681Snwhitehorn MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 349190681Snwhitehorn MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 350190681Snwhitehorn MMUMETHOD(mmu_pinit, moea64_pinit), 351190681Snwhitehorn MMUMETHOD(mmu_pinit0, moea64_pinit0), 352190681Snwhitehorn MMUMETHOD(mmu_protect, moea64_protect), 353190681Snwhitehorn MMUMETHOD(mmu_qenter, moea64_qenter), 354190681Snwhitehorn MMUMETHOD(mmu_qremove, moea64_qremove), 355190681Snwhitehorn MMUMETHOD(mmu_release, moea64_release), 356190681Snwhitehorn MMUMETHOD(mmu_remove, moea64_remove), 357233017Snwhitehorn MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 358190681Snwhitehorn MMUMETHOD(mmu_remove_all, moea64_remove_all), 359190681Snwhitehorn MMUMETHOD(mmu_remove_write, moea64_remove_write), 360198341Smarcel MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 361190681Snwhitehorn MMUMETHOD(mmu_zero_page, moea64_zero_page), 362190681Snwhitehorn MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 363190681Snwhitehorn MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 364190681Snwhitehorn MMUMETHOD(mmu_activate, moea64_activate), 365190681Snwhitehorn MMUMETHOD(mmu_deactivate, moea64_deactivate), 366213307Snwhitehorn MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 367190681Snwhitehorn 368190681Snwhitehorn /* Internal interfaces */ 369190681Snwhitehorn MMUMETHOD(mmu_mapdev, moea64_mapdev), 370213307Snwhitehorn MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 371190681Snwhitehorn MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 372190681Snwhitehorn MMUMETHOD(mmu_kextract, moea64_kextract), 373190681Snwhitehorn MMUMETHOD(mmu_kenter, moea64_kenter), 374213307Snwhitehorn MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 375190681Snwhitehorn MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 376190681Snwhitehorn 377190681Snwhitehorn { 0, 0 } 378190681Snwhitehorn}; 379190681Snwhitehorn 380216174SnwhitehornMMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 381190681Snwhitehorn 382190681Snwhitehornstatic __inline u_int 383209975Snwhitehornva_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 384190681Snwhitehorn{ 385204268Snwhitehorn uint64_t hash; 386209975Snwhitehorn int shift; 387190681Snwhitehorn 388209975Snwhitehorn shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 389204268Snwhitehorn hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 390209975Snwhitehorn shift); 391190681Snwhitehorn return (hash & moea64_pteg_mask); 392190681Snwhitehorn} 393190681Snwhitehorn 394190681Snwhitehornstatic __inline struct pvo_head * 395190681Snwhitehornvm_page_to_pvoh(vm_page_t m) 396190681Snwhitehorn{ 397190681Snwhitehorn 398190681Snwhitehorn return (&m->md.mdpg_pvoh); 399190681Snwhitehorn} 400190681Snwhitehorn 401190681Snwhitehornstatic __inline void 402190681Snwhitehornmoea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 403209975Snwhitehorn uint64_t pte_lo, int flags) 404190681Snwhitehorn{ 405209975Snwhitehorn 406190681Snwhitehorn /* 407190681Snwhitehorn * Construct a PTE. Default to IMB initially. Valid bit only gets 408190681Snwhitehorn * set when the real pte is set in memory. 409190681Snwhitehorn * 410190681Snwhitehorn * Note: Don't set the valid bit for correct operation of tlb update. 411190681Snwhitehorn */ 412190681Snwhitehorn pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 413190681Snwhitehorn (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 414190681Snwhitehorn 415209975Snwhitehorn if (flags & PVO_LARGE) 416209975Snwhitehorn pt->pte_hi |= LPTE_BIG; 417209975Snwhitehorn 418190681Snwhitehorn pt->pte_lo = pte_lo; 419190681Snwhitehorn} 420190681Snwhitehorn 421190681Snwhitehornstatic __inline uint64_t 422213307Snwhitehornmoea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 423190681Snwhitehorn{ 424190681Snwhitehorn uint64_t pte_lo; 425190681Snwhitehorn int i; 426190681Snwhitehorn 427213307Snwhitehorn if (ma != VM_MEMATTR_DEFAULT) { 428213307Snwhitehorn switch (ma) { 429213307Snwhitehorn case VM_MEMATTR_UNCACHEABLE: 430213307Snwhitehorn return (LPTE_I | LPTE_G); 431213307Snwhitehorn case VM_MEMATTR_WRITE_COMBINING: 432213307Snwhitehorn case VM_MEMATTR_WRITE_BACK: 433213307Snwhitehorn case VM_MEMATTR_PREFETCHABLE: 434213307Snwhitehorn return (LPTE_I); 435213307Snwhitehorn case VM_MEMATTR_WRITE_THROUGH: 436213307Snwhitehorn return (LPTE_W | LPTE_M); 437213307Snwhitehorn } 438213307Snwhitehorn } 439213307Snwhitehorn 440190681Snwhitehorn /* 441190681Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 442190681Snwhitehorn * it's in our available memory array. 443190681Snwhitehorn */ 444190681Snwhitehorn pte_lo = LPTE_I | LPTE_G; 445190681Snwhitehorn for (i = 0; i < pregions_sz; i++) { 446190681Snwhitehorn if ((pa >= pregions[i].mr_start) && 447190681Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 448190681Snwhitehorn pte_lo &= ~(LPTE_I | LPTE_G); 449190681Snwhitehorn pte_lo |= LPTE_M; 450190681Snwhitehorn break; 451190681Snwhitehorn } 452190681Snwhitehorn } 453190681Snwhitehorn 454190681Snwhitehorn return pte_lo; 455190681Snwhitehorn} 456190681Snwhitehorn 457190681Snwhitehorn/* 458190681Snwhitehorn * Quick sort callout for comparing memory regions. 459190681Snwhitehorn */ 460190681Snwhitehornstatic int om_cmp(const void *a, const void *b); 461190681Snwhitehorn 462190681Snwhitehornstatic int 463190681Snwhitehornom_cmp(const void *a, const void *b) 464190681Snwhitehorn{ 465190681Snwhitehorn const struct ofw_map *mapa; 466190681Snwhitehorn const struct ofw_map *mapb; 467190681Snwhitehorn 468190681Snwhitehorn mapa = a; 469190681Snwhitehorn mapb = b; 470190681Snwhitehorn if (mapa->om_pa_hi < mapb->om_pa_hi) 471190681Snwhitehorn return (-1); 472190681Snwhitehorn else if (mapa->om_pa_hi > mapb->om_pa_hi) 473190681Snwhitehorn return (1); 474190681Snwhitehorn else if (mapa->om_pa_lo < mapb->om_pa_lo) 475190681Snwhitehorn return (-1); 476190681Snwhitehorn else if (mapa->om_pa_lo > mapb->om_pa_lo) 477190681Snwhitehorn return (1); 478190681Snwhitehorn else 479190681Snwhitehorn return (0); 480190681Snwhitehorn} 481190681Snwhitehorn 482190681Snwhitehornstatic void 483199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 484199226Snwhitehorn{ 485199226Snwhitehorn struct ofw_map translations[sz/sizeof(struct ofw_map)]; 486199226Snwhitehorn register_t msr; 487199226Snwhitehorn vm_offset_t off; 488204128Snwhitehorn vm_paddr_t pa_base; 489216563Snwhitehorn int i; 490199226Snwhitehorn 491199226Snwhitehorn bzero(translations, sz); 492199226Snwhitehorn if (OF_getprop(mmu, "translations", translations, sz) == -1) 493199226Snwhitehorn panic("moea64_bootstrap: can't get ofw translations"); 494199226Snwhitehorn 495199226Snwhitehorn CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 496199226Snwhitehorn sz /= sizeof(*translations); 497199226Snwhitehorn qsort(translations, sz, sizeof (*translations), om_cmp); 498199226Snwhitehorn 499216563Snwhitehorn for (i = 0; i < sz; i++) { 500199226Snwhitehorn CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 501199226Snwhitehorn (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 502199226Snwhitehorn translations[i].om_len); 503199226Snwhitehorn 504199226Snwhitehorn if (translations[i].om_pa_lo % PAGE_SIZE) 505199226Snwhitehorn panic("OFW translation not page-aligned!"); 506199226Snwhitehorn 507209975Snwhitehorn pa_base = translations[i].om_pa_lo; 508209975Snwhitehorn 509209975Snwhitehorn #ifdef __powerpc64__ 510209975Snwhitehorn pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 511209975Snwhitehorn #else 512199226Snwhitehorn if (translations[i].om_pa_hi) 513199226Snwhitehorn panic("OFW translations above 32-bit boundary!"); 514209975Snwhitehorn #endif 515199226Snwhitehorn 516199226Snwhitehorn /* Now enter the pages for this mapping */ 517199226Snwhitehorn 518199226Snwhitehorn DISABLE_TRANS(msr); 519199226Snwhitehorn for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 520209975Snwhitehorn if (moea64_pvo_find_va(kernel_pmap, 521209975Snwhitehorn translations[i].om_va + off) != NULL) 522209975Snwhitehorn continue; 523209975Snwhitehorn 524204128Snwhitehorn moea64_kenter(mmup, translations[i].om_va + off, 525204128Snwhitehorn pa_base + off); 526199226Snwhitehorn } 527199226Snwhitehorn ENABLE_TRANS(msr); 528199226Snwhitehorn } 529199226Snwhitehorn} 530199226Snwhitehorn 531209975Snwhitehorn#ifdef __powerpc64__ 532199226Snwhitehornstatic void 533209975Snwhitehornmoea64_probe_large_page(void) 534190681Snwhitehorn{ 535209975Snwhitehorn uint16_t pvr = mfpvr() >> 16; 536209975Snwhitehorn 537209975Snwhitehorn switch (pvr) { 538209975Snwhitehorn case IBM970: 539209975Snwhitehorn case IBM970FX: 540209975Snwhitehorn case IBM970MP: 541209975Snwhitehorn powerpc_sync(); isync(); 542209975Snwhitehorn mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 543209975Snwhitehorn powerpc_sync(); isync(); 544209975Snwhitehorn 545209975Snwhitehorn /* FALLTHROUGH */ 546209975Snwhitehorn case IBMCELLBE: 547209975Snwhitehorn moea64_large_page_size = 0x1000000; /* 16 MB */ 548209975Snwhitehorn moea64_large_page_shift = 24; 549209975Snwhitehorn break; 550209975Snwhitehorn default: 551209975Snwhitehorn moea64_large_page_size = 0; 552209975Snwhitehorn } 553209975Snwhitehorn 554209975Snwhitehorn moea64_large_page_mask = moea64_large_page_size - 1; 555209975Snwhitehorn} 556209975Snwhitehorn 557209975Snwhitehornstatic void 558209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large) 559209975Snwhitehorn{ 560209975Snwhitehorn struct slb *cache; 561209975Snwhitehorn struct slb entry; 562209975Snwhitehorn uint64_t esid, slbe; 563209975Snwhitehorn uint64_t i; 564209975Snwhitehorn 565209975Snwhitehorn cache = PCPU_GET(slb); 566209975Snwhitehorn esid = va >> ADDR_SR_SHFT; 567209975Snwhitehorn slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 568209975Snwhitehorn 569209975Snwhitehorn for (i = 0; i < 64; i++) { 570209975Snwhitehorn if (cache[i].slbe == (slbe | i)) 571209975Snwhitehorn return; 572209975Snwhitehorn } 573209975Snwhitehorn 574209975Snwhitehorn entry.slbe = slbe; 575210704Snwhitehorn entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 576209975Snwhitehorn if (large) 577209975Snwhitehorn entry.slbv |= SLBV_L; 578209975Snwhitehorn 579212722Snwhitehorn slb_insert_kernel(entry.slbe, entry.slbv); 580209975Snwhitehorn} 581209975Snwhitehorn#endif 582209975Snwhitehorn 583209975Snwhitehornstatic void 584209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 585209975Snwhitehorn vm_offset_t kernelend) 586209975Snwhitehorn{ 587209975Snwhitehorn register_t msr; 588209975Snwhitehorn vm_paddr_t pa; 589209975Snwhitehorn vm_offset_t size, off; 590209975Snwhitehorn uint64_t pte_lo; 591209975Snwhitehorn int i; 592209975Snwhitehorn 593209975Snwhitehorn if (moea64_large_page_size == 0) 594209975Snwhitehorn hw_direct_map = 0; 595209975Snwhitehorn 596209975Snwhitehorn DISABLE_TRANS(msr); 597209975Snwhitehorn if (hw_direct_map) { 598233529Snwhitehorn LOCK_TABLE_WR(); 599209975Snwhitehorn PMAP_LOCK(kernel_pmap); 600209975Snwhitehorn for (i = 0; i < pregions_sz; i++) { 601209975Snwhitehorn for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 602209975Snwhitehorn pregions[i].mr_size; pa += moea64_large_page_size) { 603209975Snwhitehorn pte_lo = LPTE_M; 604209975Snwhitehorn 605209975Snwhitehorn /* 606209975Snwhitehorn * Set memory access as guarded if prefetch within 607209975Snwhitehorn * the page could exit the available physmem area. 608209975Snwhitehorn */ 609209975Snwhitehorn if (pa & moea64_large_page_mask) { 610209975Snwhitehorn pa &= moea64_large_page_mask; 611209975Snwhitehorn pte_lo |= LPTE_G; 612209975Snwhitehorn } 613209975Snwhitehorn if (pa + moea64_large_page_size > 614209975Snwhitehorn pregions[i].mr_start + pregions[i].mr_size) 615209975Snwhitehorn pte_lo |= LPTE_G; 616209975Snwhitehorn 617216174Snwhitehorn moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 618235689Snwhitehorn NULL, pa, pa, pte_lo, 619235689Snwhitehorn PVO_WIRED | PVO_LARGE); 620209975Snwhitehorn } 621209975Snwhitehorn } 622209975Snwhitehorn PMAP_UNLOCK(kernel_pmap); 623233529Snwhitehorn UNLOCK_TABLE_WR(); 624209975Snwhitehorn } else { 625209975Snwhitehorn size = sizeof(struct pvo_head) * moea64_pteg_count; 626209975Snwhitehorn off = (vm_offset_t)(moea64_pvo_table); 627209975Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 628209975Snwhitehorn moea64_kenter(mmup, pa, pa); 629209975Snwhitehorn size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 630209975Snwhitehorn off = (vm_offset_t)(moea64_bpvo_pool); 631209975Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 632209975Snwhitehorn moea64_kenter(mmup, pa, pa); 633209975Snwhitehorn 634209975Snwhitehorn /* 635209975Snwhitehorn * Map certain important things, like ourselves. 636209975Snwhitehorn * 637209975Snwhitehorn * NOTE: We do not map the exception vector space. That code is 638209975Snwhitehorn * used only in real mode, and leaving it unmapped allows us to 639209975Snwhitehorn * catch NULL pointer deferences, instead of making NULL a valid 640209975Snwhitehorn * address. 641209975Snwhitehorn */ 642209975Snwhitehorn 643209975Snwhitehorn for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 644209975Snwhitehorn pa += PAGE_SIZE) 645209975Snwhitehorn moea64_kenter(mmup, pa, pa); 646209975Snwhitehorn } 647209975Snwhitehorn ENABLE_TRANS(msr); 648209975Snwhitehorn} 649209975Snwhitehorn 650216174Snwhitehornvoid 651216174Snwhitehornmoea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 652209975Snwhitehorn{ 653190681Snwhitehorn int i, j; 654216174Snwhitehorn vm_size_t physsz, hwphyssz; 655190681Snwhitehorn 656209975Snwhitehorn#ifndef __powerpc64__ 657190681Snwhitehorn /* We don't have a direct map since there is no BAT */ 658190681Snwhitehorn hw_direct_map = 0; 659190681Snwhitehorn 660190681Snwhitehorn /* Make sure battable is zero, since we have no BAT */ 661190681Snwhitehorn for (i = 0; i < 16; i++) { 662190681Snwhitehorn battable[i].batu = 0; 663190681Snwhitehorn battable[i].batl = 0; 664190681Snwhitehorn } 665209975Snwhitehorn#else 666209975Snwhitehorn moea64_probe_large_page(); 667190681Snwhitehorn 668209975Snwhitehorn /* Use a direct map if we have large page support */ 669209975Snwhitehorn if (moea64_large_page_size > 0) 670209975Snwhitehorn hw_direct_map = 1; 671209975Snwhitehorn else 672209975Snwhitehorn hw_direct_map = 0; 673209975Snwhitehorn#endif 674209975Snwhitehorn 675190681Snwhitehorn /* Get physical memory regions from firmware */ 676190681Snwhitehorn mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 677190681Snwhitehorn CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 678190681Snwhitehorn 679190681Snwhitehorn if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 680190681Snwhitehorn panic("moea64_bootstrap: phys_avail too small"); 681222614Snwhitehorn 682190681Snwhitehorn phys_avail_count = 0; 683190681Snwhitehorn physsz = 0; 684190681Snwhitehorn hwphyssz = 0; 685190681Snwhitehorn TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 686190681Snwhitehorn for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 687190681Snwhitehorn CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 688190681Snwhitehorn regions[i].mr_start + regions[i].mr_size, 689190681Snwhitehorn regions[i].mr_size); 690190681Snwhitehorn if (hwphyssz != 0 && 691190681Snwhitehorn (physsz + regions[i].mr_size) >= hwphyssz) { 692190681Snwhitehorn if (physsz < hwphyssz) { 693190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 694190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + 695190681Snwhitehorn hwphyssz - physsz; 696190681Snwhitehorn physsz = hwphyssz; 697190681Snwhitehorn phys_avail_count++; 698190681Snwhitehorn } 699190681Snwhitehorn break; 700190681Snwhitehorn } 701190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 702190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 703190681Snwhitehorn phys_avail_count++; 704190681Snwhitehorn physsz += regions[i].mr_size; 705190681Snwhitehorn } 706209975Snwhitehorn 707209975Snwhitehorn /* Check for overlap with the kernel and exception vectors */ 708209975Snwhitehorn for (j = 0; j < 2*phys_avail_count; j+=2) { 709209975Snwhitehorn if (phys_avail[j] < EXC_LAST) 710209975Snwhitehorn phys_avail[j] += EXC_LAST; 711209975Snwhitehorn 712209975Snwhitehorn if (kernelstart >= phys_avail[j] && 713209975Snwhitehorn kernelstart < phys_avail[j+1]) { 714209975Snwhitehorn if (kernelend < phys_avail[j+1]) { 715209975Snwhitehorn phys_avail[2*phys_avail_count] = 716209975Snwhitehorn (kernelend & ~PAGE_MASK) + PAGE_SIZE; 717209975Snwhitehorn phys_avail[2*phys_avail_count + 1] = 718209975Snwhitehorn phys_avail[j+1]; 719209975Snwhitehorn phys_avail_count++; 720209975Snwhitehorn } 721209975Snwhitehorn 722209975Snwhitehorn phys_avail[j+1] = kernelstart & ~PAGE_MASK; 723209975Snwhitehorn } 724209975Snwhitehorn 725209975Snwhitehorn if (kernelend >= phys_avail[j] && 726209975Snwhitehorn kernelend < phys_avail[j+1]) { 727209975Snwhitehorn if (kernelstart > phys_avail[j]) { 728209975Snwhitehorn phys_avail[2*phys_avail_count] = phys_avail[j]; 729209975Snwhitehorn phys_avail[2*phys_avail_count + 1] = 730209975Snwhitehorn kernelstart & ~PAGE_MASK; 731209975Snwhitehorn phys_avail_count++; 732209975Snwhitehorn } 733209975Snwhitehorn 734209975Snwhitehorn phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 735209975Snwhitehorn } 736209975Snwhitehorn } 737209975Snwhitehorn 738190681Snwhitehorn physmem = btoc(physsz); 739190681Snwhitehorn 740190681Snwhitehorn#ifdef PTEGCOUNT 741190681Snwhitehorn moea64_pteg_count = PTEGCOUNT; 742190681Snwhitehorn#else 743190681Snwhitehorn moea64_pteg_count = 0x1000; 744190681Snwhitehorn 745190681Snwhitehorn while (moea64_pteg_count < physmem) 746190681Snwhitehorn moea64_pteg_count <<= 1; 747209975Snwhitehorn 748209975Snwhitehorn moea64_pteg_count >>= 1; 749190681Snwhitehorn#endif /* PTEGCOUNT */ 750216174Snwhitehorn} 751190681Snwhitehorn 752216174Snwhitehornvoid 753216174Snwhitehornmoea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 754216174Snwhitehorn{ 755216174Snwhitehorn vm_size_t size; 756216174Snwhitehorn register_t msr; 757216174Snwhitehorn int i; 758190681Snwhitehorn 759190681Snwhitehorn /* 760216174Snwhitehorn * Set PTEG mask 761190681Snwhitehorn */ 762190681Snwhitehorn moea64_pteg_mask = moea64_pteg_count - 1; 763190681Snwhitehorn 764190681Snwhitehorn /* 765190681Snwhitehorn * Allocate pv/overflow lists. 766190681Snwhitehorn */ 767190681Snwhitehorn size = sizeof(struct pvo_head) * moea64_pteg_count; 768190681Snwhitehorn 769190681Snwhitehorn moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 770190681Snwhitehorn PAGE_SIZE); 771190681Snwhitehorn CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 772190681Snwhitehorn 773190681Snwhitehorn DISABLE_TRANS(msr); 774190681Snwhitehorn for (i = 0; i < moea64_pteg_count; i++) 775190681Snwhitehorn LIST_INIT(&moea64_pvo_table[i]); 776190681Snwhitehorn ENABLE_TRANS(msr); 777190681Snwhitehorn 778190681Snwhitehorn /* 779190681Snwhitehorn * Initialize the lock that synchronizes access to the pteg and pvo 780190681Snwhitehorn * tables. 781190681Snwhitehorn */ 782233529Snwhitehorn rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE); 783211967Snwhitehorn mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 784190681Snwhitehorn 785190681Snwhitehorn /* 786190681Snwhitehorn * Initialise the unmanaged pvo pool. 787190681Snwhitehorn */ 788190681Snwhitehorn moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 789190681Snwhitehorn BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 790190681Snwhitehorn moea64_bpvo_pool_index = 0; 791190681Snwhitehorn 792190681Snwhitehorn /* 793190681Snwhitehorn * Make sure kernel vsid is allocated as well as VSID 0. 794190681Snwhitehorn */ 795209975Snwhitehorn #ifndef __powerpc64__ 796209975Snwhitehorn moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 797190681Snwhitehorn |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 798190681Snwhitehorn moea64_vsid_bitmap[0] |= 1; 799209975Snwhitehorn #endif 800190681Snwhitehorn 801190681Snwhitehorn /* 802190681Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 803190681Snwhitehorn */ 804209975Snwhitehorn #ifdef __powerpc64__ 805209975Snwhitehorn for (i = 0; i < 64; i++) { 806209975Snwhitehorn pcpup->pc_slb[i].slbv = 0; 807209975Snwhitehorn pcpup->pc_slb[i].slbe = 0; 808209975Snwhitehorn } 809209975Snwhitehorn #else 810190681Snwhitehorn for (i = 0; i < 16; i++) 811190681Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 812209975Snwhitehorn #endif 813190681Snwhitehorn 814190681Snwhitehorn kernel_pmap->pmap_phys = kernel_pmap; 815222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 816235689Snwhitehorn RB_INIT(&kernel_pmap->pmap_pvo); 817190681Snwhitehorn 818190681Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 819190681Snwhitehorn 820190681Snwhitehorn /* 821190681Snwhitehorn * Now map in all the other buffers we allocated earlier 822190681Snwhitehorn */ 823190681Snwhitehorn 824209975Snwhitehorn moea64_setup_direct_map(mmup, kernelstart, kernelend); 825216174Snwhitehorn} 826190681Snwhitehorn 827216174Snwhitehornvoid 828216174Snwhitehornmoea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 829216174Snwhitehorn{ 830216174Snwhitehorn ihandle_t mmui; 831216174Snwhitehorn phandle_t chosen; 832216174Snwhitehorn phandle_t mmu; 833216174Snwhitehorn size_t sz; 834216174Snwhitehorn int i; 835216174Snwhitehorn vm_offset_t pa, va; 836216174Snwhitehorn void *dpcpu; 837216174Snwhitehorn 838190681Snwhitehorn /* 839209975Snwhitehorn * Set up the Open Firmware pmap and add its mappings if not in real 840209975Snwhitehorn * mode. 841190681Snwhitehorn */ 842190681Snwhitehorn 843215067Snwhitehorn chosen = OF_finddevice("/chosen"); 844215067Snwhitehorn if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 845215158Snwhitehorn mmu = OF_instance_to_package(mmui); 846215158Snwhitehorn if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 847215158Snwhitehorn sz = 0; 848199226Snwhitehorn if (sz > 6144 /* tmpstksz - 2 KB headroom */) 849199226Snwhitehorn panic("moea64_bootstrap: too many ofw translations"); 850190681Snwhitehorn 851215158Snwhitehorn if (sz > 0) 852215158Snwhitehorn moea64_add_ofw_mappings(mmup, mmu, sz); 853190681Snwhitehorn } 854190681Snwhitehorn 855190681Snwhitehorn /* 856190681Snwhitehorn * Calculate the last available physical address. 857190681Snwhitehorn */ 858190681Snwhitehorn for (i = 0; phys_avail[i + 2] != 0; i += 2) 859190681Snwhitehorn ; 860190681Snwhitehorn Maxmem = powerpc_btop(phys_avail[i + 1]); 861190681Snwhitehorn 862190681Snwhitehorn /* 863190681Snwhitehorn * Initialize MMU and remap early physical mappings 864190681Snwhitehorn */ 865216174Snwhitehorn MMU_CPU_BOOTSTRAP(mmup,0); 866222614Snwhitehorn mtmsr(mfmsr() | PSL_DR | PSL_IR); 867190681Snwhitehorn pmap_bootstrapped++; 868190681Snwhitehorn bs_remap_earlyboot(); 869190681Snwhitehorn 870190681Snwhitehorn /* 871190681Snwhitehorn * Set the start and end of kva. 872190681Snwhitehorn */ 873190681Snwhitehorn virtual_avail = VM_MIN_KERNEL_ADDRESS; 874204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 875190681Snwhitehorn 876190681Snwhitehorn /* 877209975Snwhitehorn * Map the entire KVA range into the SLB. We must not fault there. 878209975Snwhitehorn */ 879209975Snwhitehorn #ifdef __powerpc64__ 880209975Snwhitehorn for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 881209975Snwhitehorn moea64_bootstrap_slb_prefault(va, 0); 882209975Snwhitehorn #endif 883209975Snwhitehorn 884209975Snwhitehorn /* 885204128Snwhitehorn * Figure out how far we can extend virtual_end into segment 16 886204128Snwhitehorn * without running into existing mappings. Segment 16 is guaranteed 887204128Snwhitehorn * to contain neither RAM nor devices (at least on Apple hardware), 888204128Snwhitehorn * but will generally contain some OFW mappings we should not 889204128Snwhitehorn * step on. 890190681Snwhitehorn */ 891190681Snwhitehorn 892209975Snwhitehorn #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 893204128Snwhitehorn PMAP_LOCK(kernel_pmap); 894209975Snwhitehorn while (virtual_end < VM_MAX_KERNEL_ADDRESS && 895209975Snwhitehorn moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 896204128Snwhitehorn virtual_end += PAGE_SIZE; 897204128Snwhitehorn PMAP_UNLOCK(kernel_pmap); 898209975Snwhitehorn #endif 899190681Snwhitehorn 900190681Snwhitehorn /* 901190681Snwhitehorn * Allocate a kernel stack with a guard page for thread0 and map it 902190681Snwhitehorn * into the kernel page map. 903190681Snwhitehorn */ 904190681Snwhitehorn pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 905190681Snwhitehorn va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 906190681Snwhitehorn virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 907220642Sandreast CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 908190681Snwhitehorn thread0.td_kstack = va; 909190681Snwhitehorn thread0.td_kstack_pages = KSTACK_PAGES; 910190681Snwhitehorn for (i = 0; i < KSTACK_PAGES; i++) { 911201758Smbr moea64_kenter(mmup, va, pa); 912190681Snwhitehorn pa += PAGE_SIZE; 913190681Snwhitehorn va += PAGE_SIZE; 914190681Snwhitehorn } 915190681Snwhitehorn 916190681Snwhitehorn /* 917190681Snwhitehorn * Allocate virtual address space for the message buffer. 918190681Snwhitehorn */ 919217688Spluknet pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 920204297Snwhitehorn msgbufp = (struct msgbuf *)virtual_avail; 921204297Snwhitehorn va = virtual_avail; 922217688Spluknet virtual_avail += round_page(msgbufsize); 923204297Snwhitehorn while (va < virtual_avail) { 924204297Snwhitehorn moea64_kenter(mmup, va, pa); 925190681Snwhitehorn pa += PAGE_SIZE; 926204297Snwhitehorn va += PAGE_SIZE; 927190681Snwhitehorn } 928194784Sjeff 929194784Sjeff /* 930194784Sjeff * Allocate virtual address space for the dynamic percpu area. 931194784Sjeff */ 932194784Sjeff pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 933204297Snwhitehorn dpcpu = (void *)virtual_avail; 934209975Snwhitehorn va = virtual_avail; 935204297Snwhitehorn virtual_avail += DPCPU_SIZE; 936204297Snwhitehorn while (va < virtual_avail) { 937204297Snwhitehorn moea64_kenter(mmup, va, pa); 938194784Sjeff pa += PAGE_SIZE; 939204297Snwhitehorn va += PAGE_SIZE; 940194784Sjeff } 941194784Sjeff dpcpu_init(dpcpu, 0); 942216174Snwhitehorn 943216174Snwhitehorn /* 944216174Snwhitehorn * Allocate some things for page zeroing. We put this directly 945216174Snwhitehorn * in the page table, marked with LPTE_LOCKED, to avoid any 946216174Snwhitehorn * of the PVO book-keeping or other parts of the VM system 947216174Snwhitehorn * from even knowing that this hack exists. 948216174Snwhitehorn */ 949216174Snwhitehorn 950216174Snwhitehorn if (!hw_direct_map) { 951216174Snwhitehorn mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 952216174Snwhitehorn MTX_DEF); 953216174Snwhitehorn for (i = 0; i < 2; i++) { 954216174Snwhitehorn moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 955216174Snwhitehorn virtual_end -= PAGE_SIZE; 956216174Snwhitehorn 957216174Snwhitehorn moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 958216174Snwhitehorn 959216174Snwhitehorn moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 960216174Snwhitehorn kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 961233529Snwhitehorn LOCK_TABLE_RD(); 962216174Snwhitehorn moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 963216174Snwhitehorn mmup, moea64_scratchpage_pvo[i]); 964216174Snwhitehorn moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 965216174Snwhitehorn |= LPTE_LOCKED; 966216174Snwhitehorn MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 967216174Snwhitehorn &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 968216174Snwhitehorn moea64_scratchpage_pvo[i]->pvo_vpn); 969233529Snwhitehorn UNLOCK_TABLE_RD(); 970216174Snwhitehorn } 971216174Snwhitehorn } 972190681Snwhitehorn} 973190681Snwhitehorn 974190681Snwhitehorn/* 975209975Snwhitehorn * Activate a user pmap. The pmap must be activated before its address 976190681Snwhitehorn * space can be accessed in any way. 977190681Snwhitehorn */ 978190681Snwhitehornvoid 979190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td) 980190681Snwhitehorn{ 981209975Snwhitehorn pmap_t pm; 982190681Snwhitehorn 983190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 984223758Sattilio CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 985190681Snwhitehorn 986209975Snwhitehorn #ifdef __powerpc64__ 987209975Snwhitehorn PCPU_SET(userslb, pm->pm_slb); 988209975Snwhitehorn #else 989209975Snwhitehorn PCPU_SET(curpmap, pm->pmap_phys); 990209975Snwhitehorn #endif 991190681Snwhitehorn} 992190681Snwhitehorn 993190681Snwhitehornvoid 994190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td) 995190681Snwhitehorn{ 996190681Snwhitehorn pmap_t pm; 997190681Snwhitehorn 998190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 999223758Sattilio CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1000209975Snwhitehorn #ifdef __powerpc64__ 1001209975Snwhitehorn PCPU_SET(userslb, NULL); 1002209975Snwhitehorn #else 1003190681Snwhitehorn PCPU_SET(curpmap, NULL); 1004209975Snwhitehorn #endif 1005190681Snwhitehorn} 1006190681Snwhitehorn 1007190681Snwhitehornvoid 1008190681Snwhitehornmoea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1009190681Snwhitehorn{ 1010190681Snwhitehorn struct pvo_entry *pvo; 1011216174Snwhitehorn uintptr_t pt; 1012209975Snwhitehorn uint64_t vsid; 1013209975Snwhitehorn int i, ptegidx; 1014190681Snwhitehorn 1015233529Snwhitehorn LOCK_TABLE_WR(); 1016190681Snwhitehorn PMAP_LOCK(pm); 1017209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1018190681Snwhitehorn 1019190681Snwhitehorn if (pvo != NULL) { 1020216174Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1021209975Snwhitehorn 1022190681Snwhitehorn if (wired) { 1023190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1024190681Snwhitehorn pm->pm_stats.wired_count++; 1025190681Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 1026209975Snwhitehorn pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1027190681Snwhitehorn } else { 1028190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1029190681Snwhitehorn pm->pm_stats.wired_count--; 1030190681Snwhitehorn pvo->pvo_vaddr &= ~PVO_WIRED; 1031209975Snwhitehorn pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1032190681Snwhitehorn } 1033209975Snwhitehorn 1034216174Snwhitehorn if (pt != -1) { 1035209975Snwhitehorn /* Update wiring flag in page table. */ 1036216174Snwhitehorn MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1037209975Snwhitehorn pvo->pvo_vpn); 1038209975Snwhitehorn } else if (wired) { 1039209975Snwhitehorn /* 1040209975Snwhitehorn * If we are wiring the page, and it wasn't in the 1041209975Snwhitehorn * page table before, add it. 1042209975Snwhitehorn */ 1043209975Snwhitehorn vsid = PVO_VSID(pvo); 1044209975Snwhitehorn ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1045209975Snwhitehorn pvo->pvo_vaddr & PVO_LARGE); 1046209975Snwhitehorn 1047216174Snwhitehorn i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1048216174Snwhitehorn 1049209975Snwhitehorn if (i >= 0) { 1050209975Snwhitehorn PVO_PTEGIDX_CLR(pvo); 1051209975Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 1052209975Snwhitehorn } 1053209975Snwhitehorn } 1054209975Snwhitehorn 1055190681Snwhitehorn } 1056233529Snwhitehorn UNLOCK_TABLE_WR(); 1057190681Snwhitehorn PMAP_UNLOCK(pm); 1058190681Snwhitehorn} 1059190681Snwhitehorn 1060190681Snwhitehorn/* 1061190681Snwhitehorn * This goes through and sets the physical address of our 1062190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because 1063190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by 1064190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here 1065190681Snwhitehorn */ 1066190681Snwhitehorn 1067190681Snwhitehornstatic __inline 1068216174Snwhitehornvoid moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1069204694Snwhitehorn 1070209975Snwhitehorn KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1071204268Snwhitehorn mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1072204268Snwhitehorn 1073216174Snwhitehorn moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1074204694Snwhitehorn ~(LPTE_WIMG | LPTE_RPGN); 1075216174Snwhitehorn moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1076213307Snwhitehorn moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1077216174Snwhitehorn MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1078216174Snwhitehorn &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1079216174Snwhitehorn moea64_scratchpage_pvo[which]->pvo_vpn); 1080216383Snwhitehorn isync(); 1081190681Snwhitehorn} 1082190681Snwhitehorn 1083190681Snwhitehornvoid 1084190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1085190681Snwhitehorn{ 1086190681Snwhitehorn vm_offset_t dst; 1087190681Snwhitehorn vm_offset_t src; 1088190681Snwhitehorn 1089190681Snwhitehorn dst = VM_PAGE_TO_PHYS(mdst); 1090190681Snwhitehorn src = VM_PAGE_TO_PHYS(msrc); 1091190681Snwhitehorn 1092209975Snwhitehorn if (hw_direct_map) { 1093234156Snwhitehorn bcopy((void *)src, (void *)dst, PAGE_SIZE); 1094209975Snwhitehorn } else { 1095209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1096190681Snwhitehorn 1097216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 0, src); 1098216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 1, dst); 1099190681Snwhitehorn 1100234156Snwhitehorn bcopy((void *)moea64_scratchpage_va[0], 1101209975Snwhitehorn (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1102190681Snwhitehorn 1103209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1104209975Snwhitehorn } 1105190681Snwhitehorn} 1106190681Snwhitehorn 1107190681Snwhitehornvoid 1108190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1109190681Snwhitehorn{ 1110190681Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1111190681Snwhitehorn 1112190681Snwhitehorn if (size + off > PAGE_SIZE) 1113190681Snwhitehorn panic("moea64_zero_page: size + off > PAGE_SIZE"); 1114190681Snwhitehorn 1115209975Snwhitehorn if (hw_direct_map) { 1116209975Snwhitehorn bzero((caddr_t)pa + off, size); 1117209975Snwhitehorn } else { 1118209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1119216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 0, pa); 1120209975Snwhitehorn bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1121209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1122209975Snwhitehorn } 1123190681Snwhitehorn} 1124190681Snwhitehorn 1125204269Snwhitehorn/* 1126204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it 1127204269Snwhitehorn */ 1128190681Snwhitehornvoid 1129204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m) 1130204269Snwhitehorn{ 1131204269Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1132209975Snwhitehorn vm_offset_t va, off; 1133204269Snwhitehorn 1134209975Snwhitehorn if (!hw_direct_map) { 1135209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1136204269Snwhitehorn 1137216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 0, pa); 1138209975Snwhitehorn va = moea64_scratchpage_va[0]; 1139209975Snwhitehorn } else { 1140209975Snwhitehorn va = pa; 1141209975Snwhitehorn } 1142209975Snwhitehorn 1143204269Snwhitehorn for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1144209975Snwhitehorn __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1145209975Snwhitehorn 1146209975Snwhitehorn if (!hw_direct_map) 1147209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1148204269Snwhitehorn} 1149204269Snwhitehorn 1150204269Snwhitehornvoid 1151190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1152190681Snwhitehorn{ 1153190681Snwhitehorn 1154190681Snwhitehorn moea64_zero_page(mmu, m); 1155190681Snwhitehorn} 1156190681Snwhitehorn 1157190681Snwhitehorn/* 1158190681Snwhitehorn * Map the given physical page at the specified virtual address in the 1159190681Snwhitehorn * target pmap with the protection requested. If specified the page 1160190681Snwhitehorn * will be wired down. 1161190681Snwhitehorn */ 1162233957Snwhitehorn 1163190681Snwhitehornvoid 1164190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1165190681Snwhitehorn vm_prot_t prot, boolean_t wired) 1166190681Snwhitehorn{ 1167190681Snwhitehorn struct pvo_head *pvo_head; 1168190681Snwhitehorn uma_zone_t zone; 1169190681Snwhitehorn vm_page_t pg; 1170190681Snwhitehorn uint64_t pte_lo; 1171190681Snwhitehorn u_int pvo_flags; 1172190681Snwhitehorn int error; 1173190681Snwhitehorn 1174190681Snwhitehorn if (!moea64_initialized) { 1175235689Snwhitehorn pvo_head = NULL; 1176190681Snwhitehorn pg = NULL; 1177190681Snwhitehorn zone = moea64_upvo_zone; 1178190681Snwhitehorn pvo_flags = 0; 1179190681Snwhitehorn } else { 1180190681Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1181190681Snwhitehorn pg = m; 1182190681Snwhitehorn zone = moea64_mpvo_zone; 1183190681Snwhitehorn pvo_flags = PVO_MANAGED; 1184190681Snwhitehorn } 1185190681Snwhitehorn 1186247400Sattilio if ((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) == 0) 1187248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1188190681Snwhitehorn 1189190681Snwhitehorn /* XXX change the pvo head for fake pages */ 1190224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1191190681Snwhitehorn pvo_flags &= ~PVO_MANAGED; 1192235689Snwhitehorn pvo_head = NULL; 1193190681Snwhitehorn zone = moea64_upvo_zone; 1194190681Snwhitehorn } 1195190681Snwhitehorn 1196213307Snwhitehorn pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1197190681Snwhitehorn 1198190681Snwhitehorn if (prot & VM_PROT_WRITE) { 1199190681Snwhitehorn pte_lo |= LPTE_BW; 1200208810Salc if (pmap_bootstrapped && 1201224746Skib (m->oflags & VPO_UNMANAGED) == 0) 1202225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1203190681Snwhitehorn } else 1204190681Snwhitehorn pte_lo |= LPTE_BR; 1205190681Snwhitehorn 1206217341Snwhitehorn if ((prot & VM_PROT_EXECUTE) == 0) 1207217341Snwhitehorn pte_lo |= LPTE_NOEXEC; 1208190681Snwhitehorn 1209190681Snwhitehorn if (wired) 1210190681Snwhitehorn pvo_flags |= PVO_WIRED; 1211190681Snwhitehorn 1212233957Snwhitehorn LOCK_TABLE_WR(); 1213233957Snwhitehorn PMAP_LOCK(pmap); 1214216174Snwhitehorn error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1215216174Snwhitehorn VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1216233957Snwhitehorn PMAP_UNLOCK(pmap); 1217233957Snwhitehorn UNLOCK_TABLE_WR(); 1218190681Snwhitehorn 1219190681Snwhitehorn /* 1220190681Snwhitehorn * Flush the page from the instruction cache if this page is 1221190681Snwhitehorn * mapped executable and cacheable. 1222190681Snwhitehorn */ 1223233949Snwhitehorn if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1224233949Snwhitehorn (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1225233949Snwhitehorn vm_page_aflag_set(m, PGA_EXECUTABLE); 1226216174Snwhitehorn moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1227233949Snwhitehorn } 1228190681Snwhitehorn} 1229190681Snwhitehorn 1230190681Snwhitehornstatic void 1231216174Snwhitehornmoea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1232216174Snwhitehorn vm_size_t sz) 1233190681Snwhitehorn{ 1234204042Snwhitehorn 1235190681Snwhitehorn /* 1236190681Snwhitehorn * This is much trickier than on older systems because 1237190681Snwhitehorn * we can't sync the icache on physical addresses directly 1238190681Snwhitehorn * without a direct map. Instead we check a couple of cases 1239190681Snwhitehorn * where the memory is already mapped in and, failing that, 1240190681Snwhitehorn * use the same trick we use for page zeroing to create 1241190681Snwhitehorn * a temporary mapping for this physical address. 1242190681Snwhitehorn */ 1243190681Snwhitehorn 1244190681Snwhitehorn if (!pmap_bootstrapped) { 1245190681Snwhitehorn /* 1246190681Snwhitehorn * If PMAP is not bootstrapped, we are likely to be 1247190681Snwhitehorn * in real mode. 1248190681Snwhitehorn */ 1249198341Smarcel __syncicache((void *)pa, sz); 1250190681Snwhitehorn } else if (pmap == kernel_pmap) { 1251198341Smarcel __syncicache((void *)va, sz); 1252209975Snwhitehorn } else if (hw_direct_map) { 1253209975Snwhitehorn __syncicache((void *)pa, sz); 1254190681Snwhitehorn } else { 1255190681Snwhitehorn /* Use the scratch page to set up a temp mapping */ 1256190681Snwhitehorn 1257190681Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1258190681Snwhitehorn 1259216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1260204042Snwhitehorn __syncicache((void *)(moea64_scratchpage_va[1] + 1261204042Snwhitehorn (va & ADDR_POFF)), sz); 1262190681Snwhitehorn 1263190681Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1264190681Snwhitehorn } 1265190681Snwhitehorn} 1266190681Snwhitehorn 1267190681Snwhitehorn/* 1268190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object. 1269190681Snwhitehorn * The sequence begins with the given page m_start. This page is 1270190681Snwhitehorn * mapped at the given virtual address start. Each subsequent page is 1271190681Snwhitehorn * mapped at a virtual address that is offset from start by the same 1272190681Snwhitehorn * amount as the page is offset from m_start within the object. The 1273190681Snwhitehorn * last page in the sequence is the page with the largest offset from 1274190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given 1275190681Snwhitehorn * virtual address end. Not every virtual page between start and end 1276190681Snwhitehorn * is mapped; only those for which a resident page exists with the 1277190681Snwhitehorn * corresponding offset from m_start are mapped. 1278190681Snwhitehorn */ 1279190681Snwhitehornvoid 1280190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1281190681Snwhitehorn vm_page_t m_start, vm_prot_t prot) 1282190681Snwhitehorn{ 1283190681Snwhitehorn vm_page_t m; 1284190681Snwhitehorn vm_pindex_t diff, psize; 1285190681Snwhitehorn 1286190681Snwhitehorn psize = atop(end - start); 1287190681Snwhitehorn m = m_start; 1288190681Snwhitehorn while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1289233957Snwhitehorn moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1290190681Snwhitehorn (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1291190681Snwhitehorn m = TAILQ_NEXT(m, listq); 1292190681Snwhitehorn } 1293190681Snwhitehorn} 1294190681Snwhitehorn 1295190681Snwhitehornvoid 1296190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1297190681Snwhitehorn vm_prot_t prot) 1298190681Snwhitehorn{ 1299207796Salc 1300233957Snwhitehorn moea64_enter(mmu, pm, va, m, 1301216174Snwhitehorn prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1302190681Snwhitehorn} 1303190681Snwhitehorn 1304190681Snwhitehornvm_paddr_t 1305190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1306190681Snwhitehorn{ 1307190681Snwhitehorn struct pvo_entry *pvo; 1308190681Snwhitehorn vm_paddr_t pa; 1309190681Snwhitehorn 1310190681Snwhitehorn PMAP_LOCK(pm); 1311209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va); 1312190681Snwhitehorn if (pvo == NULL) 1313190681Snwhitehorn pa = 0; 1314190681Snwhitehorn else 1315209975Snwhitehorn pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1316209975Snwhitehorn (va - PVO_VADDR(pvo)); 1317190681Snwhitehorn PMAP_UNLOCK(pm); 1318190681Snwhitehorn return (pa); 1319190681Snwhitehorn} 1320190681Snwhitehorn 1321190681Snwhitehorn/* 1322190681Snwhitehorn * Atomically extract and hold the physical page with the given 1323190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given 1324190681Snwhitehorn * protection. 1325190681Snwhitehorn */ 1326190681Snwhitehornvm_page_t 1327190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1328190681Snwhitehorn{ 1329190681Snwhitehorn struct pvo_entry *pvo; 1330190681Snwhitehorn vm_page_t m; 1331207410Skmacy vm_paddr_t pa; 1332190681Snwhitehorn 1333190681Snwhitehorn m = NULL; 1334207410Skmacy pa = 0; 1335190681Snwhitehorn PMAP_LOCK(pmap); 1336207410Skmacyretry: 1337209975Snwhitehorn pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1338190681Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1339190681Snwhitehorn ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1340190681Snwhitehorn (prot & VM_PROT_WRITE) == 0)) { 1341235689Snwhitehorn if (vm_page_pa_tryrelock(pmap, 1342207410Skmacy pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1343207410Skmacy goto retry; 1344190681Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1345190681Snwhitehorn vm_page_hold(m); 1346190681Snwhitehorn } 1347207410Skmacy PA_UNLOCK_COND(pa); 1348190681Snwhitehorn PMAP_UNLOCK(pmap); 1349190681Snwhitehorn return (m); 1350190681Snwhitehorn} 1351190681Snwhitehorn 1352216174Snwhitehornstatic mmu_t installed_mmu; 1353216174Snwhitehorn 1354190681Snwhitehornstatic void * 1355190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1356190681Snwhitehorn{ 1357190681Snwhitehorn /* 1358190681Snwhitehorn * This entire routine is a horrible hack to avoid bothering kmem 1359190681Snwhitehorn * for new KVA addresses. Because this can get called from inside 1360190681Snwhitehorn * kmem allocation routines, calling kmem for a new address here 1361190681Snwhitehorn * can lead to multiply locking non-recursive mutexes. 1362190681Snwhitehorn */ 1363190681Snwhitehorn vm_offset_t va; 1364190681Snwhitehorn 1365190681Snwhitehorn vm_page_t m; 1366190681Snwhitehorn int pflags, needed_lock; 1367190681Snwhitehorn 1368190681Snwhitehorn *flags = UMA_SLAB_PRIV; 1369190681Snwhitehorn needed_lock = !PMAP_LOCKED(kernel_pmap); 1370243040Skib pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 1371190681Snwhitehorn 1372190681Snwhitehorn for (;;) { 1373228522Salc m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1374190681Snwhitehorn if (m == NULL) { 1375190681Snwhitehorn if (wait & M_NOWAIT) 1376190681Snwhitehorn return (NULL); 1377190681Snwhitehorn VM_WAIT; 1378190681Snwhitehorn } else 1379190681Snwhitehorn break; 1380190681Snwhitehorn } 1381190681Snwhitehorn 1382204128Snwhitehorn va = VM_PAGE_TO_PHYS(m); 1383190681Snwhitehorn 1384233529Snwhitehorn LOCK_TABLE_WR(); 1385233529Snwhitehorn if (needed_lock) 1386233529Snwhitehorn PMAP_LOCK(kernel_pmap); 1387233529Snwhitehorn 1388216174Snwhitehorn moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1389235689Snwhitehorn NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); 1390190681Snwhitehorn 1391190681Snwhitehorn if (needed_lock) 1392190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1393233529Snwhitehorn UNLOCK_TABLE_WR(); 1394198378Snwhitehorn 1395190681Snwhitehorn if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1396190681Snwhitehorn bzero((void *)va, PAGE_SIZE); 1397190681Snwhitehorn 1398190681Snwhitehorn return (void *)va; 1399190681Snwhitehorn} 1400190681Snwhitehorn 1401230767Skibextern int elf32_nxstack; 1402230767Skib 1403190681Snwhitehornvoid 1404190681Snwhitehornmoea64_init(mmu_t mmu) 1405190681Snwhitehorn{ 1406190681Snwhitehorn 1407190681Snwhitehorn CTR0(KTR_PMAP, "moea64_init"); 1408190681Snwhitehorn 1409190681Snwhitehorn moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1410190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1411190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1412190681Snwhitehorn moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1413190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1414190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1415190681Snwhitehorn 1416190681Snwhitehorn if (!hw_direct_map) { 1417216174Snwhitehorn installed_mmu = mmu; 1418190681Snwhitehorn uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1419190681Snwhitehorn uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1420190681Snwhitehorn } 1421190681Snwhitehorn 1422230779Skib#ifdef COMPAT_FREEBSD32 1423230767Skib elf32_nxstack = 1; 1424230779Skib#endif 1425230767Skib 1426190681Snwhitehorn moea64_initialized = TRUE; 1427190681Snwhitehorn} 1428190681Snwhitehorn 1429190681Snwhitehornboolean_t 1430207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m) 1431207155Salc{ 1432207155Salc 1433224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1434208574Salc ("moea64_is_referenced: page %p is not managed", m)); 1435216174Snwhitehorn return (moea64_query_bit(mmu, m, PTE_REF)); 1436207155Salc} 1437207155Salc 1438207155Salcboolean_t 1439190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m) 1440190681Snwhitehorn{ 1441190681Snwhitehorn 1442224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1443208504Salc ("moea64_is_modified: page %p is not managed", m)); 1444208504Salc 1445208504Salc /* 1446225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1447225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1448208504Salc * is clear, no PTEs can have LPTE_CHG set. 1449208504Salc */ 1450248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1451208504Salc if ((m->oflags & VPO_BUSY) == 0 && 1452225418Skib (m->aflags & PGA_WRITEABLE) == 0) 1453190681Snwhitehorn return (FALSE); 1454216174Snwhitehorn return (moea64_query_bit(mmu, m, LPTE_CHG)); 1455190681Snwhitehorn} 1456190681Snwhitehorn 1457214617Salcboolean_t 1458214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1459214617Salc{ 1460214617Salc struct pvo_entry *pvo; 1461214617Salc boolean_t rv; 1462214617Salc 1463214617Salc PMAP_LOCK(pmap); 1464214617Salc pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1465214617Salc rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1466214617Salc PMAP_UNLOCK(pmap); 1467214617Salc return (rv); 1468214617Salc} 1469214617Salc 1470190681Snwhitehornvoid 1471190681Snwhitehornmoea64_clear_reference(mmu_t mmu, vm_page_t m) 1472190681Snwhitehorn{ 1473190681Snwhitehorn 1474224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1475208504Salc ("moea64_clear_reference: page %p is not managed", m)); 1476216174Snwhitehorn moea64_clear_bit(mmu, m, LPTE_REF); 1477190681Snwhitehorn} 1478190681Snwhitehorn 1479190681Snwhitehornvoid 1480190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m) 1481190681Snwhitehorn{ 1482190681Snwhitehorn 1483224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1484208504Salc ("moea64_clear_modify: page %p is not managed", m)); 1485248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1486208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 1487208504Salc ("moea64_clear_modify: page %p is busy", m)); 1488208504Salc 1489208504Salc /* 1490225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1491208504Salc * set. If the object containing the page is locked and the page is 1492225418Skib * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1493208504Salc */ 1494225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 1495190681Snwhitehorn return; 1496216174Snwhitehorn moea64_clear_bit(mmu, m, LPTE_CHG); 1497190681Snwhitehorn} 1498190681Snwhitehorn 1499190681Snwhitehorn/* 1500190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings. 1501190681Snwhitehorn */ 1502190681Snwhitehornvoid 1503190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m) 1504190681Snwhitehorn{ 1505190681Snwhitehorn struct pvo_entry *pvo; 1506216174Snwhitehorn uintptr_t pt; 1507190681Snwhitehorn pmap_t pmap; 1508233434Snwhitehorn uint64_t lo = 0; 1509190681Snwhitehorn 1510224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1511208175Salc ("moea64_remove_write: page %p is not managed", m)); 1512208175Salc 1513208175Salc /* 1514225418Skib * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1515225418Skib * another thread while the object is locked. Thus, if PGA_WRITEABLE 1516208175Salc * is clear, no page table entries need updating. 1517208175Salc */ 1518248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1519208175Salc if ((m->oflags & VPO_BUSY) == 0 && 1520225418Skib (m->aflags & PGA_WRITEABLE) == 0) 1521190681Snwhitehorn return; 1522216174Snwhitehorn powerpc_sync(); 1523233529Snwhitehorn LOCK_TABLE_RD(); 1524190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1525190681Snwhitehorn pmap = pvo->pvo_pmap; 1526190681Snwhitehorn PMAP_LOCK(pmap); 1527190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1528216174Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1529190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1530190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1531216174Snwhitehorn if (pt != -1) { 1532216174Snwhitehorn MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1533190681Snwhitehorn lo |= pvo->pvo_pte.lpte.pte_lo; 1534190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1535216174Snwhitehorn MOEA64_PTE_CHANGE(mmu, pt, 1536216174Snwhitehorn &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1537209975Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1538209975Snwhitehorn isync(); 1539190681Snwhitehorn } 1540190681Snwhitehorn } 1541233530Snwhitehorn if ((lo & LPTE_CHG) != 0) 1542233530Snwhitehorn vm_page_dirty(m); 1543190681Snwhitehorn PMAP_UNLOCK(pmap); 1544190681Snwhitehorn } 1545233529Snwhitehorn UNLOCK_TABLE_RD(); 1546225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1547190681Snwhitehorn} 1548190681Snwhitehorn 1549190681Snwhitehorn/* 1550190681Snwhitehorn * moea64_ts_referenced: 1551190681Snwhitehorn * 1552190681Snwhitehorn * Return a count of reference bits for a page, clearing those bits. 1553190681Snwhitehorn * It is not necessary for every reference bit to be cleared, but it 1554190681Snwhitehorn * is necessary that 0 only be returned when there are truly no 1555190681Snwhitehorn * reference bits set. 1556190681Snwhitehorn * 1557190681Snwhitehorn * XXX: The exact number of bits to check and clear is a matter that 1558190681Snwhitehorn * should be tested and standardized at some point in the future for 1559190681Snwhitehorn * optimal aging of shared pages. 1560190681Snwhitehorn */ 1561238357Salcint 1562190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m) 1563190681Snwhitehorn{ 1564190681Snwhitehorn 1565224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1566208990Salc ("moea64_ts_referenced: page %p is not managed", m)); 1567216174Snwhitehorn return (moea64_clear_bit(mmu, m, LPTE_REF)); 1568190681Snwhitehorn} 1569190681Snwhitehorn 1570190681Snwhitehorn/* 1571213307Snwhitehorn * Modify the WIMG settings of all mappings for a page. 1572213307Snwhitehorn */ 1573213307Snwhitehornvoid 1574213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1575213307Snwhitehorn{ 1576213307Snwhitehorn struct pvo_entry *pvo; 1577213335Snwhitehorn struct pvo_head *pvo_head; 1578216174Snwhitehorn uintptr_t pt; 1579213307Snwhitehorn pmap_t pmap; 1580213307Snwhitehorn uint64_t lo; 1581213307Snwhitehorn 1582224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1583213335Snwhitehorn m->md.mdpg_cache_attrs = ma; 1584213335Snwhitehorn return; 1585213335Snwhitehorn } 1586213335Snwhitehorn 1587213335Snwhitehorn pvo_head = vm_page_to_pvoh(m); 1588213307Snwhitehorn lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1589233529Snwhitehorn LOCK_TABLE_RD(); 1590213335Snwhitehorn LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1591213307Snwhitehorn pmap = pvo->pvo_pmap; 1592213307Snwhitehorn PMAP_LOCK(pmap); 1593216174Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1594213307Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1595213307Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= lo; 1596216174Snwhitehorn if (pt != -1) { 1597216174Snwhitehorn MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1598213307Snwhitehorn pvo->pvo_vpn); 1599213307Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1600213307Snwhitehorn isync(); 1601213307Snwhitehorn } 1602213307Snwhitehorn PMAP_UNLOCK(pmap); 1603213307Snwhitehorn } 1604233529Snwhitehorn UNLOCK_TABLE_RD(); 1605213307Snwhitehorn m->md.mdpg_cache_attrs = ma; 1606213307Snwhitehorn} 1607213307Snwhitehorn 1608213307Snwhitehorn/* 1609190681Snwhitehorn * Map a wired page into kernel virtual address space. 1610190681Snwhitehorn */ 1611190681Snwhitehornvoid 1612213307Snwhitehornmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1613190681Snwhitehorn{ 1614190681Snwhitehorn uint64_t pte_lo; 1615190681Snwhitehorn int error; 1616190681Snwhitehorn 1617213307Snwhitehorn pte_lo = moea64_calc_wimg(pa, ma); 1618190681Snwhitehorn 1619233529Snwhitehorn LOCK_TABLE_WR(); 1620190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1621216174Snwhitehorn error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1622235689Snwhitehorn NULL, va, pa, pte_lo, PVO_WIRED); 1623233529Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1624233529Snwhitehorn UNLOCK_TABLE_WR(); 1625190681Snwhitehorn 1626190681Snwhitehorn if (error != 0 && error != ENOENT) 1627209975Snwhitehorn panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1628190681Snwhitehorn pa, error); 1629190681Snwhitehorn} 1630190681Snwhitehorn 1631213307Snwhitehornvoid 1632236019Srajmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1633213307Snwhitehorn{ 1634213307Snwhitehorn 1635213307Snwhitehorn moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1636213307Snwhitehorn} 1637213307Snwhitehorn 1638190681Snwhitehorn/* 1639190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual 1640190681Snwhitehorn * address. 1641190681Snwhitehorn */ 1642236019Srajvm_paddr_t 1643190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va) 1644190681Snwhitehorn{ 1645190681Snwhitehorn struct pvo_entry *pvo; 1646190681Snwhitehorn vm_paddr_t pa; 1647190681Snwhitehorn 1648205370Snwhitehorn /* 1649205370Snwhitehorn * Shortcut the direct-mapped case when applicable. We never put 1650205370Snwhitehorn * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1651205370Snwhitehorn */ 1652205370Snwhitehorn if (va < VM_MIN_KERNEL_ADDRESS) 1653205370Snwhitehorn return (va); 1654205370Snwhitehorn 1655190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1656209975Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, va); 1657209975Snwhitehorn KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1658209975Snwhitehorn va)); 1659223471Sandreast pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1660190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1661190681Snwhitehorn return (pa); 1662190681Snwhitehorn} 1663190681Snwhitehorn 1664190681Snwhitehorn/* 1665190681Snwhitehorn * Remove a wired page from kernel virtual address space. 1666190681Snwhitehorn */ 1667190681Snwhitehornvoid 1668190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va) 1669190681Snwhitehorn{ 1670190681Snwhitehorn moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1671190681Snwhitehorn} 1672190681Snwhitehorn 1673190681Snwhitehorn/* 1674190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space. 1675190681Snwhitehorn * 1676190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping. 1677190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region 1678190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt' 1679190681Snwhitehorn * unchanged. We cannot and therefore do not; *virt is updated with the 1680190681Snwhitehorn * first usable address after the mapped region. 1681190681Snwhitehorn */ 1682190681Snwhitehornvm_offset_t 1683236019Srajmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1684236019Sraj vm_paddr_t pa_end, int prot) 1685190681Snwhitehorn{ 1686190681Snwhitehorn vm_offset_t sva, va; 1687190681Snwhitehorn 1688190681Snwhitehorn sva = *virt; 1689190681Snwhitehorn va = sva; 1690190681Snwhitehorn for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1691190681Snwhitehorn moea64_kenter(mmu, va, pa_start); 1692190681Snwhitehorn *virt = va; 1693190681Snwhitehorn 1694190681Snwhitehorn return (sva); 1695190681Snwhitehorn} 1696190681Snwhitehorn 1697190681Snwhitehorn/* 1698190681Snwhitehorn * Returns true if the pmap's pv is one of the first 1699190681Snwhitehorn * 16 pvs linked to from this page. This count may 1700190681Snwhitehorn * be changed upwards or downwards in the future; it 1701190681Snwhitehorn * is only necessary that true be returned for a small 1702190681Snwhitehorn * subset of pmaps for proper page aging. 1703190681Snwhitehorn */ 1704190681Snwhitehornboolean_t 1705190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1706190681Snwhitehorn{ 1707190681Snwhitehorn int loops; 1708190681Snwhitehorn struct pvo_entry *pvo; 1709208990Salc boolean_t rv; 1710190681Snwhitehorn 1711224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1712208990Salc ("moea64_page_exists_quick: page %p is not managed", m)); 1713190681Snwhitehorn loops = 0; 1714208990Salc rv = FALSE; 1715233529Snwhitehorn LOCK_TABLE_RD(); 1716190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1717208990Salc if (pvo->pvo_pmap == pmap) { 1718208990Salc rv = TRUE; 1719208990Salc break; 1720208990Salc } 1721190681Snwhitehorn if (++loops >= 16) 1722190681Snwhitehorn break; 1723190681Snwhitehorn } 1724233529Snwhitehorn UNLOCK_TABLE_RD(); 1725208990Salc return (rv); 1726190681Snwhitehorn} 1727190681Snwhitehorn 1728190681Snwhitehorn/* 1729190681Snwhitehorn * Return the number of managed mappings to the given physical page 1730190681Snwhitehorn * that are wired. 1731190681Snwhitehorn */ 1732190681Snwhitehornint 1733190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1734190681Snwhitehorn{ 1735190681Snwhitehorn struct pvo_entry *pvo; 1736190681Snwhitehorn int count; 1737190681Snwhitehorn 1738190681Snwhitehorn count = 0; 1739224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 1740190681Snwhitehorn return (count); 1741233529Snwhitehorn LOCK_TABLE_RD(); 1742190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1743190681Snwhitehorn if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1744190681Snwhitehorn count++; 1745233529Snwhitehorn UNLOCK_TABLE_RD(); 1746190681Snwhitehorn return (count); 1747190681Snwhitehorn} 1748190681Snwhitehorn 1749209975Snwhitehornstatic uintptr_t moea64_vsidcontext; 1750190681Snwhitehorn 1751209975Snwhitehornuintptr_t 1752209975Snwhitehornmoea64_get_unique_vsid(void) { 1753209975Snwhitehorn u_int entropy; 1754209975Snwhitehorn register_t hash; 1755209975Snwhitehorn uint32_t mask; 1756209975Snwhitehorn int i; 1757190681Snwhitehorn 1758190681Snwhitehorn entropy = 0; 1759190681Snwhitehorn __asm __volatile("mftb %0" : "=r"(entropy)); 1760190681Snwhitehorn 1761211967Snwhitehorn mtx_lock(&moea64_slb_mutex); 1762209975Snwhitehorn for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1763209975Snwhitehorn u_int n; 1764190681Snwhitehorn 1765190681Snwhitehorn /* 1766190681Snwhitehorn * Create a new value by mutiplying by a prime and adding in 1767190681Snwhitehorn * entropy from the timebase register. This is to make the 1768190681Snwhitehorn * VSID more random so that the PT hash function collides 1769190681Snwhitehorn * less often. (Note that the prime casues gcc to do shifts 1770190681Snwhitehorn * instead of a multiply.) 1771190681Snwhitehorn */ 1772190681Snwhitehorn moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1773209975Snwhitehorn hash = moea64_vsidcontext & (NVSIDS - 1); 1774190681Snwhitehorn if (hash == 0) /* 0 is special, avoid it */ 1775190681Snwhitehorn continue; 1776190681Snwhitehorn n = hash >> 5; 1777190681Snwhitehorn mask = 1 << (hash & (VSID_NBPW - 1)); 1778209975Snwhitehorn hash = (moea64_vsidcontext & VSID_HASHMASK); 1779190681Snwhitehorn if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1780190681Snwhitehorn /* anything free in this bucket? */ 1781190681Snwhitehorn if (moea64_vsid_bitmap[n] == 0xffffffff) { 1782190681Snwhitehorn entropy = (moea64_vsidcontext >> 20); 1783190681Snwhitehorn continue; 1784190681Snwhitehorn } 1785212322Snwhitehorn i = ffs(~moea64_vsid_bitmap[n]) - 1; 1786190681Snwhitehorn mask = 1 << i; 1787209975Snwhitehorn hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1788190681Snwhitehorn hash |= i; 1789190681Snwhitehorn } 1790212322Snwhitehorn KASSERT(!(moea64_vsid_bitmap[n] & mask), 1791212331Snwhitehorn ("Allocating in-use VSID %#zx\n", hash)); 1792190681Snwhitehorn moea64_vsid_bitmap[n] |= mask; 1793211967Snwhitehorn mtx_unlock(&moea64_slb_mutex); 1794209975Snwhitehorn return (hash); 1795190681Snwhitehorn } 1796190681Snwhitehorn 1797211967Snwhitehorn mtx_unlock(&moea64_slb_mutex); 1798209975Snwhitehorn panic("%s: out of segments",__func__); 1799190681Snwhitehorn} 1800190681Snwhitehorn 1801209975Snwhitehorn#ifdef __powerpc64__ 1802209975Snwhitehornvoid 1803209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 1804209975Snwhitehorn{ 1805209975Snwhitehorn PMAP_LOCK_INIT(pmap); 1806235689Snwhitehorn RB_INIT(&pmap->pmap_pvo); 1807209975Snwhitehorn 1808212715Snwhitehorn pmap->pm_slb_tree_root = slb_alloc_tree(); 1809209975Snwhitehorn pmap->pm_slb = slb_alloc_user_cache(); 1810212722Snwhitehorn pmap->pm_slb_len = 0; 1811209975Snwhitehorn} 1812209975Snwhitehorn#else 1813209975Snwhitehornvoid 1814209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 1815209975Snwhitehorn{ 1816209975Snwhitehorn int i; 1817212308Snwhitehorn uint32_t hash; 1818209975Snwhitehorn 1819209975Snwhitehorn PMAP_LOCK_INIT(pmap); 1820235689Snwhitehorn RB_INIT(&pmap->pmap_pvo); 1821209975Snwhitehorn 1822209975Snwhitehorn if (pmap_bootstrapped) 1823209975Snwhitehorn pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1824209975Snwhitehorn (vm_offset_t)pmap); 1825209975Snwhitehorn else 1826209975Snwhitehorn pmap->pmap_phys = pmap; 1827209975Snwhitehorn 1828209975Snwhitehorn /* 1829209975Snwhitehorn * Allocate some segment registers for this pmap. 1830209975Snwhitehorn */ 1831209975Snwhitehorn hash = moea64_get_unique_vsid(); 1832209975Snwhitehorn 1833209975Snwhitehorn for (i = 0; i < 16; i++) 1834209975Snwhitehorn pmap->pm_sr[i] = VSID_MAKE(i, hash); 1835212308Snwhitehorn 1836212308Snwhitehorn KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1837209975Snwhitehorn} 1838209975Snwhitehorn#endif 1839209975Snwhitehorn 1840190681Snwhitehorn/* 1841190681Snwhitehorn * Initialize the pmap associated with process 0. 1842190681Snwhitehorn */ 1843190681Snwhitehornvoid 1844190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm) 1845190681Snwhitehorn{ 1846190681Snwhitehorn moea64_pinit(mmu, pm); 1847190681Snwhitehorn bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1848190681Snwhitehorn} 1849190681Snwhitehorn 1850190681Snwhitehorn/* 1851190681Snwhitehorn * Set the physical protection on the specified range of this map as requested. 1852190681Snwhitehorn */ 1853233011Snwhitehornstatic void 1854233011Snwhitehornmoea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1855233011Snwhitehorn{ 1856233011Snwhitehorn uintptr_t pt; 1857233949Snwhitehorn struct vm_page *pg; 1858233436Snwhitehorn uint64_t oldlo; 1859233011Snwhitehorn 1860233529Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 1861233529Snwhitehorn 1862233011Snwhitehorn /* 1863233011Snwhitehorn * Grab the PTE pointer before we diddle with the cached PTE 1864233011Snwhitehorn * copy. 1865233011Snwhitehorn */ 1866233011Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1867233011Snwhitehorn 1868233011Snwhitehorn /* 1869233011Snwhitehorn * Change the protection of the page. 1870233011Snwhitehorn */ 1871233436Snwhitehorn oldlo = pvo->pvo_pte.lpte.pte_lo; 1872233011Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1873233011Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1874233011Snwhitehorn if ((prot & VM_PROT_EXECUTE) == 0) 1875233011Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1876233436Snwhitehorn if (prot & VM_PROT_WRITE) 1877233436Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BW; 1878233436Snwhitehorn else 1879233436Snwhitehorn pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1880233011Snwhitehorn 1881233949Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1882233949Snwhitehorn 1883233011Snwhitehorn /* 1884233011Snwhitehorn * If the PVO is in the page table, update that pte as well. 1885233011Snwhitehorn */ 1886234155Snwhitehorn if (pt != -1) 1887233011Snwhitehorn MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1888233011Snwhitehorn pvo->pvo_vpn); 1889234155Snwhitehorn if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 1890234155Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1891234155Snwhitehorn if ((pg->oflags & VPO_UNMANAGED) == 0) 1892233949Snwhitehorn vm_page_aflag_set(pg, PGA_EXECUTABLE); 1893234155Snwhitehorn moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1894234155Snwhitehorn pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); 1895233011Snwhitehorn } 1896233434Snwhitehorn 1897233434Snwhitehorn /* 1898233436Snwhitehorn * Update vm about the REF/CHG bits if the page is managed and we have 1899233436Snwhitehorn * removed write access. 1900233434Snwhitehorn */ 1901233436Snwhitehorn if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 1902233436Snwhitehorn (oldlo & LPTE_PP) != LPTE_BR && !(prot && VM_PROT_WRITE)) { 1903233434Snwhitehorn if (pg != NULL) { 1904233434Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 1905233434Snwhitehorn vm_page_dirty(pg); 1906233434Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 1907233434Snwhitehorn vm_page_aflag_set(pg, PGA_REFERENCED); 1908233434Snwhitehorn } 1909233434Snwhitehorn } 1910233011Snwhitehorn} 1911233011Snwhitehorn 1912190681Snwhitehornvoid 1913190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1914190681Snwhitehorn vm_prot_t prot) 1915190681Snwhitehorn{ 1916235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 1917190681Snwhitehorn 1918233011Snwhitehorn CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 1919233011Snwhitehorn sva, eva, prot); 1920190681Snwhitehorn 1921190681Snwhitehorn KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1922190681Snwhitehorn ("moea64_protect: non current pmap")); 1923190681Snwhitehorn 1924190681Snwhitehorn if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1925190681Snwhitehorn moea64_remove(mmu, pm, sva, eva); 1926190681Snwhitehorn return; 1927190681Snwhitehorn } 1928190681Snwhitehorn 1929233529Snwhitehorn LOCK_TABLE_RD(); 1930190681Snwhitehorn PMAP_LOCK(pm); 1931235689Snwhitehorn key.pvo_vaddr = sva; 1932235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1933235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1934235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1935235689Snwhitehorn moea64_pvo_protect(mmu, pm, pvo, prot); 1936190681Snwhitehorn } 1937233529Snwhitehorn UNLOCK_TABLE_RD(); 1938190681Snwhitehorn PMAP_UNLOCK(pm); 1939190681Snwhitehorn} 1940190681Snwhitehorn 1941190681Snwhitehorn/* 1942190681Snwhitehorn * Map a list of wired pages into kernel virtual address space. This is 1943190681Snwhitehorn * intended for temporary mappings which do not need page modification or 1944190681Snwhitehorn * references recorded. Existing mappings in the region are overwritten. 1945190681Snwhitehorn */ 1946190681Snwhitehornvoid 1947190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1948190681Snwhitehorn{ 1949190681Snwhitehorn while (count-- > 0) { 1950190681Snwhitehorn moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1951190681Snwhitehorn va += PAGE_SIZE; 1952190681Snwhitehorn m++; 1953190681Snwhitehorn } 1954190681Snwhitehorn} 1955190681Snwhitehorn 1956190681Snwhitehorn/* 1957190681Snwhitehorn * Remove page mappings from kernel virtual address space. Intended for 1958190681Snwhitehorn * temporary mappings entered by moea64_qenter. 1959190681Snwhitehorn */ 1960190681Snwhitehornvoid 1961190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1962190681Snwhitehorn{ 1963190681Snwhitehorn while (count-- > 0) { 1964190681Snwhitehorn moea64_kremove(mmu, va); 1965190681Snwhitehorn va += PAGE_SIZE; 1966190681Snwhitehorn } 1967190681Snwhitehorn} 1968190681Snwhitehorn 1969190681Snwhitehornvoid 1970209975Snwhitehornmoea64_release_vsid(uint64_t vsid) 1971209975Snwhitehorn{ 1972212044Snwhitehorn int idx, mask; 1973209975Snwhitehorn 1974212044Snwhitehorn mtx_lock(&moea64_slb_mutex); 1975212044Snwhitehorn idx = vsid & (NVSIDS-1); 1976212044Snwhitehorn mask = 1 << (idx % VSID_NBPW); 1977212044Snwhitehorn idx /= VSID_NBPW; 1978212308Snwhitehorn KASSERT(moea64_vsid_bitmap[idx] & mask, 1979212308Snwhitehorn ("Freeing unallocated VSID %#jx", vsid)); 1980212044Snwhitehorn moea64_vsid_bitmap[idx] &= ~mask; 1981212044Snwhitehorn mtx_unlock(&moea64_slb_mutex); 1982209975Snwhitehorn} 1983209975Snwhitehorn 1984209975Snwhitehorn 1985209975Snwhitehornvoid 1986190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap) 1987190681Snwhitehorn{ 1988190681Snwhitehorn 1989190681Snwhitehorn /* 1990209975Snwhitehorn * Free segment registers' VSIDs 1991190681Snwhitehorn */ 1992209975Snwhitehorn #ifdef __powerpc64__ 1993212715Snwhitehorn slb_free_tree(pmap); 1994209975Snwhitehorn slb_free_user_cache(pmap->pm_slb); 1995209975Snwhitehorn #else 1996212308Snwhitehorn KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 1997190681Snwhitehorn 1998212308Snwhitehorn moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 1999209975Snwhitehorn #endif 2000209975Snwhitehorn 2001190681Snwhitehorn PMAP_LOCK_DESTROY(pmap); 2002190681Snwhitehorn} 2003190681Snwhitehorn 2004190681Snwhitehorn/* 2005233017Snwhitehorn * Remove all pages mapped by the specified pmap 2006233017Snwhitehorn */ 2007233017Snwhitehornvoid 2008233017Snwhitehornmoea64_remove_pages(mmu_t mmu, pmap_t pm) 2009233017Snwhitehorn{ 2010233017Snwhitehorn struct pvo_entry *pvo, *tpvo; 2011233017Snwhitehorn 2012233529Snwhitehorn LOCK_TABLE_WR(); 2013233017Snwhitehorn PMAP_LOCK(pm); 2014235689Snwhitehorn RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2015233434Snwhitehorn if (!(pvo->pvo_vaddr & PVO_WIRED)) 2016233434Snwhitehorn moea64_pvo_remove(mmu, pvo); 2017233434Snwhitehorn } 2018233529Snwhitehorn UNLOCK_TABLE_WR(); 2019233017Snwhitehorn PMAP_UNLOCK(pm); 2020233017Snwhitehorn} 2021233017Snwhitehorn 2022233017Snwhitehorn/* 2023190681Snwhitehorn * Remove the given range of addresses from the specified map. 2024190681Snwhitehorn */ 2025190681Snwhitehornvoid 2026190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2027190681Snwhitehorn{ 2028235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 2029190681Snwhitehorn 2030233011Snwhitehorn /* 2031233011Snwhitehorn * Perform an unsynchronized read. This is, however, safe. 2032233011Snwhitehorn */ 2033233011Snwhitehorn if (pm->pm_stats.resident_count == 0) 2034233011Snwhitehorn return; 2035233011Snwhitehorn 2036233529Snwhitehorn LOCK_TABLE_WR(); 2037190681Snwhitehorn PMAP_LOCK(pm); 2038235689Snwhitehorn key.pvo_vaddr = sva; 2039235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2040235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2041235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2042235689Snwhitehorn moea64_pvo_remove(mmu, pvo); 2043190681Snwhitehorn } 2044233529Snwhitehorn UNLOCK_TABLE_WR(); 2045190681Snwhitehorn PMAP_UNLOCK(pm); 2046190681Snwhitehorn} 2047190681Snwhitehorn 2048190681Snwhitehorn/* 2049190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2050190681Snwhitehorn * will reflect changes in pte's back to the vm_page. 2051190681Snwhitehorn */ 2052190681Snwhitehornvoid 2053190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m) 2054190681Snwhitehorn{ 2055190681Snwhitehorn struct pvo_entry *pvo, *next_pvo; 2056190681Snwhitehorn pmap_t pmap; 2057190681Snwhitehorn 2058233529Snwhitehorn LOCK_TABLE_WR(); 2059233949Snwhitehorn LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2060190681Snwhitehorn pmap = pvo->pvo_pmap; 2061190681Snwhitehorn PMAP_LOCK(pmap); 2062216174Snwhitehorn moea64_pvo_remove(mmu, pvo); 2063190681Snwhitehorn PMAP_UNLOCK(pmap); 2064190681Snwhitehorn } 2065233529Snwhitehorn UNLOCK_TABLE_WR(); 2066233434Snwhitehorn if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2067204042Snwhitehorn vm_page_dirty(m); 2068225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 2069233949Snwhitehorn vm_page_aflag_clear(m, PGA_EXECUTABLE); 2070190681Snwhitehorn} 2071190681Snwhitehorn 2072190681Snwhitehorn/* 2073190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map. 2074190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are 2075190681Snwhitehorn * calculated. 2076190681Snwhitehorn */ 2077216174Snwhitehornvm_offset_t 2078190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align) 2079190681Snwhitehorn{ 2080190681Snwhitehorn vm_offset_t s, e; 2081190681Snwhitehorn int i, j; 2082190681Snwhitehorn 2083190681Snwhitehorn size = round_page(size); 2084190681Snwhitehorn for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2085190681Snwhitehorn if (align != 0) 2086190681Snwhitehorn s = (phys_avail[i] + align - 1) & ~(align - 1); 2087190681Snwhitehorn else 2088190681Snwhitehorn s = phys_avail[i]; 2089190681Snwhitehorn e = s + size; 2090190681Snwhitehorn 2091190681Snwhitehorn if (s < phys_avail[i] || e > phys_avail[i + 1]) 2092190681Snwhitehorn continue; 2093190681Snwhitehorn 2094215159Snwhitehorn if (s + size > platform_real_maxaddr()) 2095215159Snwhitehorn continue; 2096215159Snwhitehorn 2097190681Snwhitehorn if (s == phys_avail[i]) { 2098190681Snwhitehorn phys_avail[i] += size; 2099190681Snwhitehorn } else if (e == phys_avail[i + 1]) { 2100190681Snwhitehorn phys_avail[i + 1] -= size; 2101190681Snwhitehorn } else { 2102190681Snwhitehorn for (j = phys_avail_count * 2; j > i; j -= 2) { 2103190681Snwhitehorn phys_avail[j] = phys_avail[j - 2]; 2104190681Snwhitehorn phys_avail[j + 1] = phys_avail[j - 1]; 2105190681Snwhitehorn } 2106190681Snwhitehorn 2107190681Snwhitehorn phys_avail[i + 3] = phys_avail[i + 1]; 2108190681Snwhitehorn phys_avail[i + 1] = s; 2109190681Snwhitehorn phys_avail[i + 2] = e; 2110190681Snwhitehorn phys_avail_count++; 2111190681Snwhitehorn } 2112190681Snwhitehorn 2113190681Snwhitehorn return (s); 2114190681Snwhitehorn } 2115190681Snwhitehorn panic("moea64_bootstrap_alloc: could not allocate memory"); 2116190681Snwhitehorn} 2117190681Snwhitehorn 2118190681Snwhitehornstatic int 2119216174Snwhitehornmoea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2120216174Snwhitehorn struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2121216174Snwhitehorn uint64_t pte_lo, int flags) 2122190681Snwhitehorn{ 2123190681Snwhitehorn struct pvo_entry *pvo; 2124190681Snwhitehorn uint64_t vsid; 2125190681Snwhitehorn int first; 2126190681Snwhitehorn u_int ptegidx; 2127190681Snwhitehorn int i; 2128190681Snwhitehorn int bootstrap; 2129190681Snwhitehorn 2130190681Snwhitehorn /* 2131190681Snwhitehorn * One nasty thing that can happen here is that the UMA calls to 2132190681Snwhitehorn * allocate new PVOs need to map more memory, which calls pvo_enter(), 2133190681Snwhitehorn * which calls UMA... 2134190681Snwhitehorn * 2135190681Snwhitehorn * We break the loop by detecting recursion and allocating out of 2136190681Snwhitehorn * the bootstrap pool. 2137190681Snwhitehorn */ 2138190681Snwhitehorn 2139190681Snwhitehorn first = 0; 2140190681Snwhitehorn bootstrap = (flags & PVO_BOOTSTRAP); 2141190681Snwhitehorn 2142190681Snwhitehorn if (!moea64_initialized) 2143190681Snwhitehorn bootstrap = 1; 2144190681Snwhitehorn 2145233529Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 2146233529Snwhitehorn rw_assert(&moea64_table_lock, RA_WLOCKED); 2147233529Snwhitehorn 2148190681Snwhitehorn /* 2149190681Snwhitehorn * Compute the PTE Group index. 2150190681Snwhitehorn */ 2151190681Snwhitehorn va &= ~ADDR_POFF; 2152190681Snwhitehorn vsid = va_to_vsid(pm, va); 2153209975Snwhitehorn ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2154190681Snwhitehorn 2155190681Snwhitehorn /* 2156190681Snwhitehorn * Remove any existing mapping for this page. Reuse the pvo entry if 2157190681Snwhitehorn * there is a mapping. 2158190681Snwhitehorn */ 2159212363Snwhitehorn moea64_pvo_enter_calls++; 2160212363Snwhitehorn 2161190681Snwhitehorn LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2162190681Snwhitehorn if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2163190681Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2164217341Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2165217341Snwhitehorn == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2166209975Snwhitehorn if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2167209975Snwhitehorn /* Re-insert if spilled */ 2168216174Snwhitehorn i = MOEA64_PTE_INSERT(mmu, ptegidx, 2169209975Snwhitehorn &pvo->pvo_pte.lpte); 2170209975Snwhitehorn if (i >= 0) 2171209975Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 2172209975Snwhitehorn moea64_pte_overflow--; 2173209975Snwhitehorn } 2174190681Snwhitehorn return (0); 2175190681Snwhitehorn } 2176216174Snwhitehorn moea64_pvo_remove(mmu, pvo); 2177190681Snwhitehorn break; 2178190681Snwhitehorn } 2179190681Snwhitehorn } 2180190681Snwhitehorn 2181190681Snwhitehorn /* 2182190681Snwhitehorn * If we aren't overwriting a mapping, try to allocate. 2183190681Snwhitehorn */ 2184190681Snwhitehorn if (bootstrap) { 2185190681Snwhitehorn if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2186209975Snwhitehorn panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2187190681Snwhitehorn moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2188190681Snwhitehorn BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2189190681Snwhitehorn } 2190190681Snwhitehorn pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2191190681Snwhitehorn moea64_bpvo_pool_index++; 2192190681Snwhitehorn bootstrap = 1; 2193190681Snwhitehorn } else { 2194190681Snwhitehorn pvo = uma_zalloc(zone, M_NOWAIT); 2195190681Snwhitehorn } 2196190681Snwhitehorn 2197233529Snwhitehorn if (pvo == NULL) 2198190681Snwhitehorn return (ENOMEM); 2199190681Snwhitehorn 2200190681Snwhitehorn moea64_pvo_entries++; 2201190681Snwhitehorn pvo->pvo_vaddr = va; 2202209975Snwhitehorn pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2203209975Snwhitehorn | (vsid << 16); 2204190681Snwhitehorn pvo->pvo_pmap = pm; 2205190681Snwhitehorn LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2206190681Snwhitehorn pvo->pvo_vaddr &= ~ADDR_POFF; 2207190681Snwhitehorn 2208190681Snwhitehorn if (flags & PVO_WIRED) 2209190681Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 2210235689Snwhitehorn if (pvo_head != NULL) 2211190681Snwhitehorn pvo->pvo_vaddr |= PVO_MANAGED; 2212190681Snwhitehorn if (bootstrap) 2213190681Snwhitehorn pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2214209975Snwhitehorn if (flags & PVO_LARGE) 2215209975Snwhitehorn pvo->pvo_vaddr |= PVO_LARGE; 2216190681Snwhitehorn 2217190681Snwhitehorn moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2218209975Snwhitehorn (uint64_t)(pa) | pte_lo, flags); 2219190681Snwhitehorn 2220190681Snwhitehorn /* 2221228412Snwhitehorn * Add to pmap list 2222228412Snwhitehorn */ 2223235689Snwhitehorn RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2224228412Snwhitehorn 2225228412Snwhitehorn /* 2226190681Snwhitehorn * Remember if the list was empty and therefore will be the first 2227190681Snwhitehorn * item. 2228190681Snwhitehorn */ 2229235689Snwhitehorn if (pvo_head != NULL) { 2230235689Snwhitehorn if (LIST_FIRST(pvo_head) == NULL) 2231235689Snwhitehorn first = 1; 2232235689Snwhitehorn LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2233235689Snwhitehorn } 2234190681Snwhitehorn 2235209975Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) { 2236209975Snwhitehorn pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2237190681Snwhitehorn pm->pm_stats.wired_count++; 2238209975Snwhitehorn } 2239190681Snwhitehorn pm->pm_stats.resident_count++; 2240190681Snwhitehorn 2241190681Snwhitehorn /* 2242190681Snwhitehorn * We hope this succeeds but it isn't required. 2243190681Snwhitehorn */ 2244216174Snwhitehorn i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2245190681Snwhitehorn if (i >= 0) { 2246190681Snwhitehorn PVO_PTEGIDX_SET(pvo, i); 2247190681Snwhitehorn } else { 2248190681Snwhitehorn panic("moea64_pvo_enter: overflow"); 2249190681Snwhitehorn moea64_pte_overflow++; 2250190681Snwhitehorn } 2251190681Snwhitehorn 2252204042Snwhitehorn if (pm == kernel_pmap) 2253204042Snwhitehorn isync(); 2254204042Snwhitehorn 2255209975Snwhitehorn#ifdef __powerpc64__ 2256209975Snwhitehorn /* 2257209975Snwhitehorn * Make sure all our bootstrap mappings are in the SLB as soon 2258209975Snwhitehorn * as virtual memory is switched on. 2259209975Snwhitehorn */ 2260209975Snwhitehorn if (!pmap_bootstrapped) 2261209975Snwhitehorn moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2262209975Snwhitehorn#endif 2263209975Snwhitehorn 2264190681Snwhitehorn return (first ? ENOENT : 0); 2265190681Snwhitehorn} 2266190681Snwhitehorn 2267190681Snwhitehornstatic void 2268216174Snwhitehornmoea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2269190681Snwhitehorn{ 2270233949Snwhitehorn struct vm_page *pg; 2271216174Snwhitehorn uintptr_t pt; 2272190681Snwhitehorn 2273233529Snwhitehorn PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2274233529Snwhitehorn rw_assert(&moea64_table_lock, RA_WLOCKED); 2275233529Snwhitehorn 2276190681Snwhitehorn /* 2277190681Snwhitehorn * If there is an active pte entry, we need to deactivate it (and 2278190681Snwhitehorn * save the ref & cfg bits). 2279190681Snwhitehorn */ 2280216174Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2281216174Snwhitehorn if (pt != -1) { 2282216174Snwhitehorn MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2283190681Snwhitehorn PVO_PTEGIDX_CLR(pvo); 2284190681Snwhitehorn } else { 2285190681Snwhitehorn moea64_pte_overflow--; 2286190681Snwhitehorn } 2287190681Snwhitehorn 2288190681Snwhitehorn /* 2289190681Snwhitehorn * Update our statistics. 2290190681Snwhitehorn */ 2291190681Snwhitehorn pvo->pvo_pmap->pm_stats.resident_count--; 2292204042Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2293190681Snwhitehorn pvo->pvo_pmap->pm_stats.wired_count--; 2294190681Snwhitehorn 2295190681Snwhitehorn /* 2296235689Snwhitehorn * Remove this PVO from the pmap list. 2297233529Snwhitehorn */ 2298235689Snwhitehorn RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2299233529Snwhitehorn 2300233529Snwhitehorn /* 2301233529Snwhitehorn * Remove this from the overflow list and return it to the pool 2302233529Snwhitehorn * if we aren't going to reuse it. 2303233529Snwhitehorn */ 2304233529Snwhitehorn LIST_REMOVE(pvo, pvo_olink); 2305233529Snwhitehorn 2306233529Snwhitehorn /* 2307233434Snwhitehorn * Update vm about the REF/CHG bits if the page is managed. 2308190681Snwhitehorn */ 2309233949Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2310233949Snwhitehorn 2311234155Snwhitehorn if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) { 2312235689Snwhitehorn LIST_REMOVE(pvo, pvo_vlink); 2313234155Snwhitehorn if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 2314233434Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2315233434Snwhitehorn vm_page_dirty(pg); 2316233434Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2317233434Snwhitehorn vm_page_aflag_set(pg, PGA_REFERENCED); 2318233529Snwhitehorn if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2319233529Snwhitehorn vm_page_aflag_clear(pg, PGA_WRITEABLE); 2320190681Snwhitehorn } 2321234155Snwhitehorn if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2322234155Snwhitehorn vm_page_aflag_clear(pg, PGA_EXECUTABLE); 2323190681Snwhitehorn } 2324190681Snwhitehorn 2325212363Snwhitehorn moea64_pvo_entries--; 2326212363Snwhitehorn moea64_pvo_remove_calls++; 2327212363Snwhitehorn 2328190681Snwhitehorn if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2329204042Snwhitehorn uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2330190681Snwhitehorn moea64_upvo_zone, pvo); 2331190681Snwhitehorn} 2332190681Snwhitehorn 2333190681Snwhitehornstatic struct pvo_entry * 2334209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2335190681Snwhitehorn{ 2336235689Snwhitehorn struct pvo_entry key; 2337190681Snwhitehorn 2338235689Snwhitehorn key.pvo_vaddr = va & ~ADDR_POFF; 2339235689Snwhitehorn return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2340190681Snwhitehorn} 2341190681Snwhitehorn 2342190681Snwhitehornstatic boolean_t 2343216174Snwhitehornmoea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2344190681Snwhitehorn{ 2345190681Snwhitehorn struct pvo_entry *pvo; 2346216174Snwhitehorn uintptr_t pt; 2347190681Snwhitehorn 2348233529Snwhitehorn LOCK_TABLE_RD(); 2349190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2350190681Snwhitehorn /* 2351233434Snwhitehorn * See if we saved the bit off. If so, return success. 2352190681Snwhitehorn */ 2353190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2354233529Snwhitehorn UNLOCK_TABLE_RD(); 2355190681Snwhitehorn return (TRUE); 2356190681Snwhitehorn } 2357190681Snwhitehorn } 2358190681Snwhitehorn 2359190681Snwhitehorn /* 2360190681Snwhitehorn * No luck, now go through the hard part of looking at the PTEs 2361190681Snwhitehorn * themselves. Sync so that any pending REF/CHG bits are flushed to 2362190681Snwhitehorn * the PTEs. 2363190681Snwhitehorn */ 2364216174Snwhitehorn powerpc_sync(); 2365190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2366190681Snwhitehorn 2367190681Snwhitehorn /* 2368190681Snwhitehorn * See if this pvo has a valid PTE. if so, fetch the 2369190681Snwhitehorn * REF/CHG bits from the valid PTE. If the appropriate 2370233434Snwhitehorn * ptebit is set, return success. 2371190681Snwhitehorn */ 2372233529Snwhitehorn PMAP_LOCK(pvo->pvo_pmap); 2373216174Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2374216174Snwhitehorn if (pt != -1) { 2375216174Snwhitehorn MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2376190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2377233529Snwhitehorn PMAP_UNLOCK(pvo->pvo_pmap); 2378233529Snwhitehorn UNLOCK_TABLE_RD(); 2379190681Snwhitehorn return (TRUE); 2380190681Snwhitehorn } 2381190681Snwhitehorn } 2382233529Snwhitehorn PMAP_UNLOCK(pvo->pvo_pmap); 2383190681Snwhitehorn } 2384190681Snwhitehorn 2385233529Snwhitehorn UNLOCK_TABLE_RD(); 2386190681Snwhitehorn return (FALSE); 2387190681Snwhitehorn} 2388190681Snwhitehorn 2389190681Snwhitehornstatic u_int 2390216174Snwhitehornmoea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2391190681Snwhitehorn{ 2392190681Snwhitehorn u_int count; 2393190681Snwhitehorn struct pvo_entry *pvo; 2394216174Snwhitehorn uintptr_t pt; 2395190681Snwhitehorn 2396190681Snwhitehorn /* 2397190681Snwhitehorn * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2398190681Snwhitehorn * we can reset the right ones). note that since the pvo entries and 2399190681Snwhitehorn * list heads are accessed via BAT0 and are never placed in the page 2400190681Snwhitehorn * table, we don't have to worry about further accesses setting the 2401190681Snwhitehorn * REF/CHG bits. 2402190681Snwhitehorn */ 2403216174Snwhitehorn powerpc_sync(); 2404190681Snwhitehorn 2405190681Snwhitehorn /* 2406190681Snwhitehorn * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2407190681Snwhitehorn * valid pte clear the ptebit from the valid pte. 2408190681Snwhitehorn */ 2409190681Snwhitehorn count = 0; 2410233529Snwhitehorn LOCK_TABLE_RD(); 2411190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2412233529Snwhitehorn PMAP_LOCK(pvo->pvo_pmap); 2413216174Snwhitehorn pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2414216174Snwhitehorn if (pt != -1) { 2415216174Snwhitehorn MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2416190681Snwhitehorn if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2417190681Snwhitehorn count++; 2418216174Snwhitehorn MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2419216174Snwhitehorn pvo->pvo_vpn, ptebit); 2420190681Snwhitehorn } 2421190681Snwhitehorn } 2422190681Snwhitehorn pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2423233529Snwhitehorn PMAP_UNLOCK(pvo->pvo_pmap); 2424190681Snwhitehorn } 2425190681Snwhitehorn 2426233529Snwhitehorn UNLOCK_TABLE_RD(); 2427190681Snwhitehorn return (count); 2428190681Snwhitehorn} 2429190681Snwhitehorn 2430190681Snwhitehornboolean_t 2431236019Srajmoea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2432190681Snwhitehorn{ 2433235689Snwhitehorn struct pvo_entry *pvo, key; 2434204296Snwhitehorn vm_offset_t ppa; 2435204296Snwhitehorn int error = 0; 2436204296Snwhitehorn 2437204296Snwhitehorn PMAP_LOCK(kernel_pmap); 2438235689Snwhitehorn key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2439235689Snwhitehorn for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2440235689Snwhitehorn ppa < pa + size; ppa += PAGE_SIZE, 2441235689Snwhitehorn pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2442204296Snwhitehorn if (pvo == NULL || 2443204296Snwhitehorn (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2444204296Snwhitehorn error = EFAULT; 2445204296Snwhitehorn break; 2446204296Snwhitehorn } 2447204296Snwhitehorn } 2448204296Snwhitehorn PMAP_UNLOCK(kernel_pmap); 2449204296Snwhitehorn 2450204296Snwhitehorn return (error); 2451190681Snwhitehorn} 2452190681Snwhitehorn 2453190681Snwhitehorn/* 2454190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual 2455190681Snwhitehorn * address space. Return a pointer to where it is mapped. This 2456190681Snwhitehorn * routine is intended to be used for mapping device memory, 2457190681Snwhitehorn * NOT real memory. 2458190681Snwhitehorn */ 2459190681Snwhitehornvoid * 2460213307Snwhitehornmoea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2461190681Snwhitehorn{ 2462190681Snwhitehorn vm_offset_t va, tmpva, ppa, offset; 2463190681Snwhitehorn 2464190681Snwhitehorn ppa = trunc_page(pa); 2465190681Snwhitehorn offset = pa & PAGE_MASK; 2466233618Snwhitehorn size = roundup2(offset + size, PAGE_SIZE); 2467190681Snwhitehorn 2468190681Snwhitehorn va = kmem_alloc_nofault(kernel_map, size); 2469190681Snwhitehorn 2470190681Snwhitehorn if (!va) 2471190681Snwhitehorn panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2472190681Snwhitehorn 2473190681Snwhitehorn for (tmpva = va; size > 0;) { 2474213307Snwhitehorn moea64_kenter_attr(mmu, tmpva, ppa, ma); 2475190681Snwhitehorn size -= PAGE_SIZE; 2476190681Snwhitehorn tmpva += PAGE_SIZE; 2477190681Snwhitehorn ppa += PAGE_SIZE; 2478190681Snwhitehorn } 2479190681Snwhitehorn 2480190681Snwhitehorn return ((void *)(va + offset)); 2481190681Snwhitehorn} 2482190681Snwhitehorn 2483213307Snwhitehornvoid * 2484236019Srajmoea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2485213307Snwhitehorn{ 2486213307Snwhitehorn 2487213307Snwhitehorn return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2488213307Snwhitehorn} 2489213307Snwhitehorn 2490190681Snwhitehornvoid 2491190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2492190681Snwhitehorn{ 2493190681Snwhitehorn vm_offset_t base, offset; 2494190681Snwhitehorn 2495190681Snwhitehorn base = trunc_page(va); 2496190681Snwhitehorn offset = va & PAGE_MASK; 2497233618Snwhitehorn size = roundup2(offset + size, PAGE_SIZE); 2498190681Snwhitehorn 2499190681Snwhitehorn kmem_free(kernel_map, base, size); 2500190681Snwhitehorn} 2501190681Snwhitehorn 2502216174Snwhitehornvoid 2503198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2504198341Smarcel{ 2505198341Smarcel struct pvo_entry *pvo; 2506198341Smarcel vm_offset_t lim; 2507198341Smarcel vm_paddr_t pa; 2508198341Smarcel vm_size_t len; 2509198341Smarcel 2510198341Smarcel PMAP_LOCK(pm); 2511198341Smarcel while (sz > 0) { 2512198341Smarcel lim = round_page(va); 2513198341Smarcel len = MIN(lim - va, sz); 2514209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2515222666Snwhitehorn if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2516222666Snwhitehorn pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2517198341Smarcel (va & ADDR_POFF); 2518216174Snwhitehorn moea64_syncicache(mmu, pm, va, pa, len); 2519198341Smarcel } 2520198341Smarcel va += len; 2521198341Smarcel sz -= len; 2522198341Smarcel } 2523198341Smarcel PMAP_UNLOCK(pm); 2524198341Smarcel} 2525