mmu_oea64.c revision 279594
1190681Snwhitehorn/*- 2279252Snwhitehorn * Copyright (c) 2008-2015 Nathan Whitehorn 3190681Snwhitehorn * All rights reserved. 4190681Snwhitehorn * 5190681Snwhitehorn * Redistribution and use in source and binary forms, with or without 6190681Snwhitehorn * modification, are permitted provided that the following conditions 7190681Snwhitehorn * are met: 8190681Snwhitehorn * 9190681Snwhitehorn * 1. Redistributions of source code must retain the above copyright 10190681Snwhitehorn * notice, this list of conditions and the following disclaimer. 11190681Snwhitehorn * 2. Redistributions in binary form must reproduce the above copyright 12190681Snwhitehorn * notice, this list of conditions and the following disclaimer in the 13190681Snwhitehorn * documentation and/or other materials provided with the distribution. 14190681Snwhitehorn * 15279252Snwhitehorn * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16190681Snwhitehorn * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17190681Snwhitehorn * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18279252Snwhitehorn * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19279252Snwhitehorn * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20279252Snwhitehorn * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21279252Snwhitehorn * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22279252Snwhitehorn * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23279252Snwhitehorn * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24279252Snwhitehorn * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25190681Snwhitehorn */ 26190681Snwhitehorn 27190681Snwhitehorn#include <sys/cdefs.h> 28190681Snwhitehorn__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 279594 2015-03-04 16:45:31Z nwhitehorn $"); 29190681Snwhitehorn 30190681Snwhitehorn/* 31190681Snwhitehorn * Manages physical address maps. 32190681Snwhitehorn * 33190681Snwhitehorn * Since the information managed by this module is also stored by the 34190681Snwhitehorn * logical address mapping module, this module may throw away valid virtual 35190681Snwhitehorn * to physical mappings at almost any time. However, invalidations of 36190681Snwhitehorn * mappings must be done as requested. 37190681Snwhitehorn * 38190681Snwhitehorn * In order to cope with hardware architectures which make virtual to 39190681Snwhitehorn * physical map invalidates expensive, this module may delay invalidate 40190681Snwhitehorn * reduced protection operations until such time as they are actually 41190681Snwhitehorn * necessary. This module is given full information as to which processors 42190681Snwhitehorn * are currently using which maps, and to when physical maps must be made 43190681Snwhitehorn * correct. 44190681Snwhitehorn */ 45190681Snwhitehorn 46230779Skib#include "opt_compat.h" 47190681Snwhitehorn#include "opt_kstack_pages.h" 48190681Snwhitehorn 49190681Snwhitehorn#include <sys/param.h> 50190681Snwhitehorn#include <sys/kernel.h> 51276772Smarkj#include <sys/conf.h> 52222813Sattilio#include <sys/queue.h> 53222813Sattilio#include <sys/cpuset.h> 54276772Smarkj#include <sys/kerneldump.h> 55190681Snwhitehorn#include <sys/ktr.h> 56190681Snwhitehorn#include <sys/lock.h> 57190681Snwhitehorn#include <sys/msgbuf.h> 58243040Skib#include <sys/malloc.h> 59190681Snwhitehorn#include <sys/mutex.h> 60190681Snwhitehorn#include <sys/proc.h> 61233529Snwhitehorn#include <sys/rwlock.h> 62222813Sattilio#include <sys/sched.h> 63190681Snwhitehorn#include <sys/sysctl.h> 64190681Snwhitehorn#include <sys/systm.h> 65190681Snwhitehorn#include <sys/vmmeter.h> 66190681Snwhitehorn 67190681Snwhitehorn#include <sys/kdb.h> 68190681Snwhitehorn 69190681Snwhitehorn#include <dev/ofw/openfirm.h> 70190681Snwhitehorn 71190681Snwhitehorn#include <vm/vm.h> 72190681Snwhitehorn#include <vm/vm_param.h> 73190681Snwhitehorn#include <vm/vm_kern.h> 74190681Snwhitehorn#include <vm/vm_page.h> 75190681Snwhitehorn#include <vm/vm_map.h> 76190681Snwhitehorn#include <vm/vm_object.h> 77190681Snwhitehorn#include <vm/vm_extern.h> 78190681Snwhitehorn#include <vm/vm_pageout.h> 79190681Snwhitehorn#include <vm/uma.h> 80190681Snwhitehorn 81209975Snwhitehorn#include <machine/_inttypes.h> 82190681Snwhitehorn#include <machine/cpu.h> 83192067Snwhitehorn#include <machine/platform.h> 84190681Snwhitehorn#include <machine/frame.h> 85190681Snwhitehorn#include <machine/md_var.h> 86190681Snwhitehorn#include <machine/psl.h> 87190681Snwhitehorn#include <machine/bat.h> 88209975Snwhitehorn#include <machine/hid.h> 89190681Snwhitehorn#include <machine/pte.h> 90190681Snwhitehorn#include <machine/sr.h> 91190681Snwhitehorn#include <machine/trap.h> 92190681Snwhitehorn#include <machine/mmuvar.h> 93190681Snwhitehorn 94216174Snwhitehorn#include "mmu_oea64.h" 95190681Snwhitehorn#include "mmu_if.h" 96216174Snwhitehorn#include "moea64_if.h" 97190681Snwhitehorn 98209975Snwhitehornvoid moea64_release_vsid(uint64_t vsid); 99209975Snwhitehornuintptr_t moea64_get_unique_vsid(void); 100190681Snwhitehorn 101222614Snwhitehorn#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 102222614Snwhitehorn#define ENABLE_TRANS(msr) mtmsr(msr) 103190681Snwhitehorn 104190681Snwhitehorn#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 105190681Snwhitehorn#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 106204268Snwhitehorn#define VSID_HASH_MASK 0x0000007fffffffffULL 107190681Snwhitehorn 108233529Snwhitehorn/* 109233529Snwhitehorn * Locking semantics: 110279252Snwhitehorn * 111279252Snwhitehorn * There are two locks of interest: the page locks and the pmap locks, which 112279252Snwhitehorn * protect their individual PVO lists and are locked in that order. The contents 113279252Snwhitehorn * of all PVO entries are protected by the locks of their respective pmaps. 114279252Snwhitehorn * The pmap of any PVO is guaranteed not to change so long as the PVO is linked 115279252Snwhitehorn * into any list. 116279252Snwhitehorn * 117233529Snwhitehorn */ 118190681Snwhitehorn 119279252Snwhitehorn#define PV_LOCK_COUNT PA_LOCK_COUNT*3 120279252Snwhitehornstatic struct mtx_padalign pv_lock[PV_LOCK_COUNT]; 121279252Snwhitehorn 122279252Snwhitehorn#define PV_LOCKPTR(pa) ((struct mtx *)(&pv_lock[pa_index(pa) % PV_LOCK_COUNT])) 123279252Snwhitehorn#define PV_LOCK(pa) mtx_lock(PV_LOCKPTR(pa)) 124279252Snwhitehorn#define PV_UNLOCK(pa) mtx_unlock(PV_LOCKPTR(pa)) 125279252Snwhitehorn#define PV_LOCKASSERT(pa) mtx_assert(PV_LOCKPTR(pa), MA_OWNED) 126279252Snwhitehorn#define PV_PAGE_LOCK(m) PV_LOCK(VM_PAGE_TO_PHYS(m)) 127279252Snwhitehorn#define PV_PAGE_UNLOCK(m) PV_UNLOCK(VM_PAGE_TO_PHYS(m)) 128279252Snwhitehorn#define PV_PAGE_LOCKASSERT(m) PV_LOCKASSERT(VM_PAGE_TO_PHYS(m)) 129233529Snwhitehorn 130190681Snwhitehornstruct ofw_map { 131209975Snwhitehorn cell_t om_va; 132209975Snwhitehorn cell_t om_len; 133258268Snwhitehorn uint64_t om_pa; 134209975Snwhitehorn cell_t om_mode; 135190681Snwhitehorn}; 136190681Snwhitehorn 137257941Sjhibbitsextern unsigned char _etext[]; 138257941Sjhibbitsextern unsigned char _end[]; 139257941Sjhibbits 140276515Snwhitehornextern int ofw_real_mode; 141257941Sjhibbits 142190681Snwhitehorn/* 143190681Snwhitehorn * Map of physical memory regions. 144190681Snwhitehorn */ 145190681Snwhitehornstatic struct mem_region *regions; 146190681Snwhitehornstatic struct mem_region *pregions; 147209975Snwhitehornstatic u_int phys_avail_count; 148209975Snwhitehornstatic int regions_sz, pregions_sz; 149190681Snwhitehorn 150190681Snwhitehornextern void bs_remap_earlyboot(void); 151190681Snwhitehorn 152190681Snwhitehorn/* 153279252Snwhitehorn * Lock for the SLB tables. 154190681Snwhitehorn */ 155211967Snwhitehornstruct mtx moea64_slb_mutex; 156190681Snwhitehorn 157190681Snwhitehorn/* 158190681Snwhitehorn * PTEG data. 159190681Snwhitehorn */ 160190681Snwhitehornu_int moea64_pteg_count; 161190681Snwhitehornu_int moea64_pteg_mask; 162190681Snwhitehorn 163190681Snwhitehorn/* 164190681Snwhitehorn * PVO data. 165190681Snwhitehorn */ 166190681Snwhitehorn 167279252Snwhitehornuma_zone_t moea64_pvo_zone; /* zone for pvo entries */ 168190681Snwhitehorn 169190681Snwhitehornstatic struct pvo_entry *moea64_bpvo_pool; 170190681Snwhitehornstatic int moea64_bpvo_pool_index = 0; 171277356Snwhitehornstatic int moea64_bpvo_pool_size = 327680; 172277356SnwhitehornTUNABLE_INT("machdep.moea64_bpvo_pool_size", &moea64_bpvo_pool_size); 173277157SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_allocated_bpvo_entries, CTLFLAG_RD, 174277157Snwhitehorn &moea64_bpvo_pool_index, 0, ""); 175190681Snwhitehorn 176190681Snwhitehorn#define VSID_NBPW (sizeof(u_int32_t) * 8) 177209975Snwhitehorn#ifdef __powerpc64__ 178209975Snwhitehorn#define NVSIDS (NPMAPS * 16) 179209975Snwhitehorn#define VSID_HASHMASK 0xffffffffUL 180209975Snwhitehorn#else 181209975Snwhitehorn#define NVSIDS NPMAPS 182209975Snwhitehorn#define VSID_HASHMASK 0xfffffUL 183209975Snwhitehorn#endif 184209975Snwhitehornstatic u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 185190681Snwhitehorn 186190681Snwhitehornstatic boolean_t moea64_initialized = FALSE; 187190681Snwhitehorn 188190681Snwhitehorn/* 189190681Snwhitehorn * Statistics. 190190681Snwhitehorn */ 191190681Snwhitehornu_int moea64_pte_valid = 0; 192190681Snwhitehornu_int moea64_pte_overflow = 0; 193190681Snwhitehornu_int moea64_pvo_entries = 0; 194190681Snwhitehornu_int moea64_pvo_enter_calls = 0; 195190681Snwhitehornu_int moea64_pvo_remove_calls = 0; 196190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 197190681Snwhitehorn &moea64_pte_valid, 0, ""); 198190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 199190681Snwhitehorn &moea64_pte_overflow, 0, ""); 200190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 201190681Snwhitehorn &moea64_pvo_entries, 0, ""); 202190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 203190681Snwhitehorn &moea64_pvo_enter_calls, 0, ""); 204190681SnwhitehornSYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 205190681Snwhitehorn &moea64_pvo_remove_calls, 0, ""); 206190681Snwhitehorn 207190681Snwhitehornvm_offset_t moea64_scratchpage_va[2]; 208216174Snwhitehornstruct pvo_entry *moea64_scratchpage_pvo[2]; 209190681Snwhitehornstruct mtx moea64_scratchpage_mtx; 210190681Snwhitehorn 211209975Snwhitehornuint64_t moea64_large_page_mask = 0; 212255418Snwhitehornuint64_t moea64_large_page_size = 0; 213209975Snwhitehornint moea64_large_page_shift = 0; 214209975Snwhitehorn 215190681Snwhitehorn/* 216190681Snwhitehorn * PVO calls. 217190681Snwhitehorn */ 218279252Snwhitehornstatic int moea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, 219279252Snwhitehorn struct pvo_head *pvo_head); 220279252Snwhitehornstatic void moea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo); 221279252Snwhitehornstatic void moea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo); 222209975Snwhitehornstatic struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 223190681Snwhitehorn 224190681Snwhitehorn/* 225190681Snwhitehorn * Utility routines. 226190681Snwhitehorn */ 227279252Snwhitehornstatic boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t); 228279252Snwhitehornstatic u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t); 229190681Snwhitehornstatic void moea64_kremove(mmu_t, vm_offset_t); 230216174Snwhitehornstatic void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 231198341Smarcel vm_offset_t pa, vm_size_t sz); 232190681Snwhitehorn 233190681Snwhitehorn/* 234190681Snwhitehorn * Kernel MMU interface 235190681Snwhitehorn */ 236190681Snwhitehornvoid moea64_clear_modify(mmu_t, vm_page_t); 237190681Snwhitehornvoid moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 238248280Skibvoid moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 239248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize); 240269728Skibint moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, 241269728Skib u_int flags, int8_t psind); 242190681Snwhitehornvoid moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 243190681Snwhitehorn vm_prot_t); 244190681Snwhitehornvoid moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 245190681Snwhitehornvm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 246190681Snwhitehornvm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 247190681Snwhitehornvoid moea64_init(mmu_t); 248190681Snwhitehornboolean_t moea64_is_modified(mmu_t, vm_page_t); 249214617Salcboolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 250207155Salcboolean_t moea64_is_referenced(mmu_t, vm_page_t); 251238357Salcint moea64_ts_referenced(mmu_t, vm_page_t); 252236019Srajvm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 253190681Snwhitehornboolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 254190681Snwhitehornint moea64_page_wired_mappings(mmu_t, vm_page_t); 255190681Snwhitehornvoid moea64_pinit(mmu_t, pmap_t); 256190681Snwhitehornvoid moea64_pinit0(mmu_t, pmap_t); 257190681Snwhitehornvoid moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 258190681Snwhitehornvoid moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 259190681Snwhitehornvoid moea64_qremove(mmu_t, vm_offset_t, int); 260190681Snwhitehornvoid moea64_release(mmu_t, pmap_t); 261190681Snwhitehornvoid moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 262233017Snwhitehornvoid moea64_remove_pages(mmu_t, pmap_t); 263190681Snwhitehornvoid moea64_remove_all(mmu_t, vm_page_t); 264190681Snwhitehornvoid moea64_remove_write(mmu_t, vm_page_t); 265268591Salcvoid moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 266190681Snwhitehornvoid moea64_zero_page(mmu_t, vm_page_t); 267190681Snwhitehornvoid moea64_zero_page_area(mmu_t, vm_page_t, int, int); 268190681Snwhitehornvoid moea64_zero_page_idle(mmu_t, vm_page_t); 269190681Snwhitehornvoid moea64_activate(mmu_t, struct thread *); 270190681Snwhitehornvoid moea64_deactivate(mmu_t, struct thread *); 271236019Srajvoid *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 272213307Snwhitehornvoid *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 273190681Snwhitehornvoid moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 274236019Srajvm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 275213307Snwhitehornvoid moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 276213307Snwhitehornvoid moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 277236019Srajvoid moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 278236019Srajboolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 279198341Smarcelstatic void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 280276772Smarkjvoid moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, 281276772Smarkj void **va); 282276772Smarkjvoid moea64_scan_init(mmu_t mmu); 283190681Snwhitehorn 284209975Snwhitehornstatic mmu_method_t moea64_methods[] = { 285190681Snwhitehorn MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 286190681Snwhitehorn MMUMETHOD(mmu_copy_page, moea64_copy_page), 287248280Skib MMUMETHOD(mmu_copy_pages, moea64_copy_pages), 288190681Snwhitehorn MMUMETHOD(mmu_enter, moea64_enter), 289190681Snwhitehorn MMUMETHOD(mmu_enter_object, moea64_enter_object), 290190681Snwhitehorn MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 291190681Snwhitehorn MMUMETHOD(mmu_extract, moea64_extract), 292190681Snwhitehorn MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 293190681Snwhitehorn MMUMETHOD(mmu_init, moea64_init), 294190681Snwhitehorn MMUMETHOD(mmu_is_modified, moea64_is_modified), 295214617Salc MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 296207155Salc MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 297190681Snwhitehorn MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 298190681Snwhitehorn MMUMETHOD(mmu_map, moea64_map), 299190681Snwhitehorn MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 300190681Snwhitehorn MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 301190681Snwhitehorn MMUMETHOD(mmu_pinit, moea64_pinit), 302190681Snwhitehorn MMUMETHOD(mmu_pinit0, moea64_pinit0), 303190681Snwhitehorn MMUMETHOD(mmu_protect, moea64_protect), 304190681Snwhitehorn MMUMETHOD(mmu_qenter, moea64_qenter), 305190681Snwhitehorn MMUMETHOD(mmu_qremove, moea64_qremove), 306190681Snwhitehorn MMUMETHOD(mmu_release, moea64_release), 307190681Snwhitehorn MMUMETHOD(mmu_remove, moea64_remove), 308233017Snwhitehorn MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 309190681Snwhitehorn MMUMETHOD(mmu_remove_all, moea64_remove_all), 310190681Snwhitehorn MMUMETHOD(mmu_remove_write, moea64_remove_write), 311198341Smarcel MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 312268591Salc MMUMETHOD(mmu_unwire, moea64_unwire), 313190681Snwhitehorn MMUMETHOD(mmu_zero_page, moea64_zero_page), 314190681Snwhitehorn MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 315190681Snwhitehorn MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 316190681Snwhitehorn MMUMETHOD(mmu_activate, moea64_activate), 317190681Snwhitehorn MMUMETHOD(mmu_deactivate, moea64_deactivate), 318213307Snwhitehorn MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 319190681Snwhitehorn 320190681Snwhitehorn /* Internal interfaces */ 321190681Snwhitehorn MMUMETHOD(mmu_mapdev, moea64_mapdev), 322213307Snwhitehorn MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 323190681Snwhitehorn MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 324190681Snwhitehorn MMUMETHOD(mmu_kextract, moea64_kextract), 325190681Snwhitehorn MMUMETHOD(mmu_kenter, moea64_kenter), 326213307Snwhitehorn MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 327190681Snwhitehorn MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 328276772Smarkj MMUMETHOD(mmu_scan_init, moea64_scan_init), 329257941Sjhibbits MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), 330190681Snwhitehorn 331190681Snwhitehorn { 0, 0 } 332190681Snwhitehorn}; 333190681Snwhitehorn 334216174SnwhitehornMMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 335190681Snwhitehorn 336279252Snwhitehornstatic struct pvo_head * 337279252Snwhitehornvm_page_to_pvoh(vm_page_t m) 338190681Snwhitehorn{ 339279252Snwhitehorn 340279252Snwhitehorn mtx_assert(PV_LOCKPTR(VM_PAGE_TO_PHYS(m)), MA_OWNED); 341279252Snwhitehorn return (&m->md.mdpg_pvoh); 342279252Snwhitehorn} 343279252Snwhitehorn 344279252Snwhitehornstatic struct pvo_entry * 345279252Snwhitehornalloc_pvo_entry(int bootstrap) 346279252Snwhitehorn{ 347279252Snwhitehorn struct pvo_entry *pvo; 348279252Snwhitehorn 349279252Snwhitehorn if (!moea64_initialized || bootstrap) { 350279252Snwhitehorn if (moea64_bpvo_pool_index >= moea64_bpvo_pool_size) { 351279252Snwhitehorn panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 352279252Snwhitehorn moea64_bpvo_pool_index, moea64_bpvo_pool_size, 353279252Snwhitehorn moea64_bpvo_pool_size * sizeof(struct pvo_entry)); 354279252Snwhitehorn } 355279252Snwhitehorn pvo = &moea64_bpvo_pool[ 356279252Snwhitehorn atomic_fetchadd_int(&moea64_bpvo_pool_index, 1)]; 357279252Snwhitehorn bzero(pvo, sizeof(*pvo)); 358279252Snwhitehorn pvo->pvo_vaddr = PVO_BOOTSTRAP; 359279252Snwhitehorn } else { 360279252Snwhitehorn pvo = uma_zalloc(moea64_pvo_zone, M_NOWAIT); 361279252Snwhitehorn bzero(pvo, sizeof(*pvo)); 362279252Snwhitehorn } 363279252Snwhitehorn 364279252Snwhitehorn return (pvo); 365279252Snwhitehorn} 366279252Snwhitehorn 367279252Snwhitehorn 368279252Snwhitehornstatic void 369279252Snwhitehorninit_pvo_entry(struct pvo_entry *pvo, pmap_t pmap, vm_offset_t va) 370279252Snwhitehorn{ 371279252Snwhitehorn uint64_t vsid; 372204268Snwhitehorn uint64_t hash; 373209975Snwhitehorn int shift; 374190681Snwhitehorn 375279252Snwhitehorn PMAP_LOCK_ASSERT(pmap, MA_OWNED); 376279252Snwhitehorn 377279252Snwhitehorn pvo->pvo_pmap = pmap; 378279252Snwhitehorn va &= ~ADDR_POFF; 379279252Snwhitehorn pvo->pvo_vaddr |= va; 380279252Snwhitehorn vsid = va_to_vsid(pmap, va); 381279252Snwhitehorn pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 382279252Snwhitehorn | (vsid << 16); 383279252Snwhitehorn 384279252Snwhitehorn shift = (pvo->pvo_vaddr & PVO_LARGE) ? moea64_large_page_shift : 385279252Snwhitehorn ADDR_PIDX_SHFT; 386279252Snwhitehorn hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)va & ADDR_PIDX) >> shift); 387279252Snwhitehorn pvo->pvo_pte.slot = (hash & moea64_pteg_mask) << 3; 388190681Snwhitehorn} 389190681Snwhitehorn 390279252Snwhitehornstatic void 391279252Snwhitehornfree_pvo_entry(struct pvo_entry *pvo) 392190681Snwhitehorn{ 393190681Snwhitehorn 394279252Snwhitehorn if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 395279252Snwhitehorn uma_zfree(moea64_pvo_zone, pvo); 396190681Snwhitehorn} 397190681Snwhitehorn 398279252Snwhitehornvoid 399279252Snwhitehornmoea64_pte_from_pvo(const struct pvo_entry *pvo, struct lpte *lpte) 400190681Snwhitehorn{ 401209975Snwhitehorn 402279252Snwhitehorn lpte->pte_hi = (pvo->pvo_vpn >> (ADDR_API_SHFT64 - ADDR_PIDX_SHFT)) & 403279252Snwhitehorn LPTE_AVPN_MASK; 404279252Snwhitehorn lpte->pte_hi |= LPTE_VALID; 405279252Snwhitehorn 406279252Snwhitehorn if (pvo->pvo_vaddr & PVO_LARGE) 407279252Snwhitehorn lpte->pte_hi |= LPTE_BIG; 408279252Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 409279252Snwhitehorn lpte->pte_hi |= LPTE_WIRED; 410279252Snwhitehorn if (pvo->pvo_vaddr & PVO_HID) 411279252Snwhitehorn lpte->pte_hi |= LPTE_HID; 412190681Snwhitehorn 413279252Snwhitehorn lpte->pte_lo = pvo->pvo_pte.pa; /* Includes WIMG bits */ 414279252Snwhitehorn if (pvo->pvo_pte.prot & VM_PROT_WRITE) 415279252Snwhitehorn lpte->pte_lo |= LPTE_BW; 416279252Snwhitehorn else 417279252Snwhitehorn lpte->pte_lo |= LPTE_BR; 418209975Snwhitehorn 419279252Snwhitehorn if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE)) 420279252Snwhitehorn lpte->pte_lo |= LPTE_NOEXEC; 421190681Snwhitehorn} 422190681Snwhitehorn 423190681Snwhitehornstatic __inline uint64_t 424213307Snwhitehornmoea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 425190681Snwhitehorn{ 426190681Snwhitehorn uint64_t pte_lo; 427190681Snwhitehorn int i; 428190681Snwhitehorn 429213307Snwhitehorn if (ma != VM_MEMATTR_DEFAULT) { 430213307Snwhitehorn switch (ma) { 431213307Snwhitehorn case VM_MEMATTR_UNCACHEABLE: 432213307Snwhitehorn return (LPTE_I | LPTE_G); 433213307Snwhitehorn case VM_MEMATTR_WRITE_COMBINING: 434213307Snwhitehorn case VM_MEMATTR_WRITE_BACK: 435213307Snwhitehorn case VM_MEMATTR_PREFETCHABLE: 436213307Snwhitehorn return (LPTE_I); 437213307Snwhitehorn case VM_MEMATTR_WRITE_THROUGH: 438213307Snwhitehorn return (LPTE_W | LPTE_M); 439213307Snwhitehorn } 440213307Snwhitehorn } 441213307Snwhitehorn 442190681Snwhitehorn /* 443190681Snwhitehorn * Assume the page is cache inhibited and access is guarded unless 444190681Snwhitehorn * it's in our available memory array. 445190681Snwhitehorn */ 446190681Snwhitehorn pte_lo = LPTE_I | LPTE_G; 447190681Snwhitehorn for (i = 0; i < pregions_sz; i++) { 448190681Snwhitehorn if ((pa >= pregions[i].mr_start) && 449190681Snwhitehorn (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 450190681Snwhitehorn pte_lo &= ~(LPTE_I | LPTE_G); 451190681Snwhitehorn pte_lo |= LPTE_M; 452190681Snwhitehorn break; 453190681Snwhitehorn } 454190681Snwhitehorn } 455190681Snwhitehorn 456190681Snwhitehorn return pte_lo; 457190681Snwhitehorn} 458190681Snwhitehorn 459190681Snwhitehorn/* 460190681Snwhitehorn * Quick sort callout for comparing memory regions. 461190681Snwhitehorn */ 462190681Snwhitehornstatic int om_cmp(const void *a, const void *b); 463190681Snwhitehorn 464190681Snwhitehornstatic int 465190681Snwhitehornom_cmp(const void *a, const void *b) 466190681Snwhitehorn{ 467190681Snwhitehorn const struct ofw_map *mapa; 468190681Snwhitehorn const struct ofw_map *mapb; 469190681Snwhitehorn 470190681Snwhitehorn mapa = a; 471190681Snwhitehorn mapb = b; 472258268Snwhitehorn if (mapa->om_pa < mapb->om_pa) 473190681Snwhitehorn return (-1); 474258268Snwhitehorn else if (mapa->om_pa > mapb->om_pa) 475190681Snwhitehorn return (1); 476190681Snwhitehorn else 477190681Snwhitehorn return (0); 478190681Snwhitehorn} 479190681Snwhitehorn 480190681Snwhitehornstatic void 481199226Snwhitehornmoea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 482199226Snwhitehorn{ 483258268Snwhitehorn struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ 484258268Snwhitehorn pcell_t acells, trans_cells[sz/sizeof(cell_t)]; 485279252Snwhitehorn struct pvo_entry *pvo; 486199226Snwhitehorn register_t msr; 487199226Snwhitehorn vm_offset_t off; 488204128Snwhitehorn vm_paddr_t pa_base; 489258268Snwhitehorn int i, j; 490199226Snwhitehorn 491199226Snwhitehorn bzero(translations, sz); 492258268Snwhitehorn OF_getprop(OF_finddevice("/"), "#address-cells", &acells, 493258268Snwhitehorn sizeof(acells)); 494258268Snwhitehorn if (OF_getprop(mmu, "translations", trans_cells, sz) == -1) 495199226Snwhitehorn panic("moea64_bootstrap: can't get ofw translations"); 496199226Snwhitehorn 497199226Snwhitehorn CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 498258268Snwhitehorn sz /= sizeof(cell_t); 499258268Snwhitehorn for (i = 0, j = 0; i < sz; j++) { 500258268Snwhitehorn translations[j].om_va = trans_cells[i++]; 501258268Snwhitehorn translations[j].om_len = trans_cells[i++]; 502258268Snwhitehorn translations[j].om_pa = trans_cells[i++]; 503258268Snwhitehorn if (acells == 2) { 504258268Snwhitehorn translations[j].om_pa <<= 32; 505258268Snwhitehorn translations[j].om_pa |= trans_cells[i++]; 506258268Snwhitehorn } 507258268Snwhitehorn translations[j].om_mode = trans_cells[i++]; 508258268Snwhitehorn } 509258268Snwhitehorn KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)", 510258268Snwhitehorn i, sz)); 511258268Snwhitehorn 512258268Snwhitehorn sz = j; 513199226Snwhitehorn qsort(translations, sz, sizeof (*translations), om_cmp); 514199226Snwhitehorn 515216563Snwhitehorn for (i = 0; i < sz; i++) { 516258268Snwhitehorn pa_base = translations[i].om_pa; 517258268Snwhitehorn #ifndef __powerpc64__ 518258268Snwhitehorn if ((translations[i].om_pa >> 32) != 0) 519199226Snwhitehorn panic("OFW translations above 32-bit boundary!"); 520209975Snwhitehorn #endif 521199226Snwhitehorn 522257180Snwhitehorn if (pa_base % PAGE_SIZE) 523257180Snwhitehorn panic("OFW translation not page-aligned (phys)!"); 524257180Snwhitehorn if (translations[i].om_va % PAGE_SIZE) 525257180Snwhitehorn panic("OFW translation not page-aligned (virt)!"); 526257180Snwhitehorn 527257180Snwhitehorn CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x", 528257180Snwhitehorn pa_base, translations[i].om_va, translations[i].om_len); 529257180Snwhitehorn 530199226Snwhitehorn /* Now enter the pages for this mapping */ 531199226Snwhitehorn 532199226Snwhitehorn DISABLE_TRANS(msr); 533199226Snwhitehorn for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 534277157Snwhitehorn /* If this address is direct-mapped, skip remapping */ 535277157Snwhitehorn if (hw_direct_map && translations[i].om_va == pa_base && 536277157Snwhitehorn moea64_calc_wimg(pa_base + off, VM_MEMATTR_DEFAULT) == LPTE_M) 537277157Snwhitehorn continue; 538277157Snwhitehorn 539279252Snwhitehorn PMAP_LOCK(kernel_pmap); 540279252Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, 541279252Snwhitehorn translations[i].om_va + off); 542279252Snwhitehorn PMAP_UNLOCK(kernel_pmap); 543279252Snwhitehorn if (pvo != NULL) 544209975Snwhitehorn continue; 545209975Snwhitehorn 546204128Snwhitehorn moea64_kenter(mmup, translations[i].om_va + off, 547204128Snwhitehorn pa_base + off); 548199226Snwhitehorn } 549199226Snwhitehorn ENABLE_TRANS(msr); 550199226Snwhitehorn } 551199226Snwhitehorn} 552199226Snwhitehorn 553209975Snwhitehorn#ifdef __powerpc64__ 554199226Snwhitehornstatic void 555209975Snwhitehornmoea64_probe_large_page(void) 556190681Snwhitehorn{ 557209975Snwhitehorn uint16_t pvr = mfpvr() >> 16; 558209975Snwhitehorn 559209975Snwhitehorn switch (pvr) { 560209975Snwhitehorn case IBM970: 561209975Snwhitehorn case IBM970FX: 562209975Snwhitehorn case IBM970MP: 563209975Snwhitehorn powerpc_sync(); isync(); 564209975Snwhitehorn mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 565209975Snwhitehorn powerpc_sync(); isync(); 566209975Snwhitehorn 567209975Snwhitehorn /* FALLTHROUGH */ 568255418Snwhitehorn default: 569209975Snwhitehorn moea64_large_page_size = 0x1000000; /* 16 MB */ 570209975Snwhitehorn moea64_large_page_shift = 24; 571209975Snwhitehorn } 572209975Snwhitehorn 573209975Snwhitehorn moea64_large_page_mask = moea64_large_page_size - 1; 574209975Snwhitehorn} 575209975Snwhitehorn 576209975Snwhitehornstatic void 577209975Snwhitehornmoea64_bootstrap_slb_prefault(vm_offset_t va, int large) 578209975Snwhitehorn{ 579209975Snwhitehorn struct slb *cache; 580209975Snwhitehorn struct slb entry; 581209975Snwhitehorn uint64_t esid, slbe; 582209975Snwhitehorn uint64_t i; 583209975Snwhitehorn 584209975Snwhitehorn cache = PCPU_GET(slb); 585209975Snwhitehorn esid = va >> ADDR_SR_SHFT; 586209975Snwhitehorn slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 587209975Snwhitehorn 588209975Snwhitehorn for (i = 0; i < 64; i++) { 589209975Snwhitehorn if (cache[i].slbe == (slbe | i)) 590209975Snwhitehorn return; 591209975Snwhitehorn } 592209975Snwhitehorn 593209975Snwhitehorn entry.slbe = slbe; 594210704Snwhitehorn entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 595209975Snwhitehorn if (large) 596209975Snwhitehorn entry.slbv |= SLBV_L; 597209975Snwhitehorn 598212722Snwhitehorn slb_insert_kernel(entry.slbe, entry.slbv); 599209975Snwhitehorn} 600209975Snwhitehorn#endif 601209975Snwhitehorn 602209975Snwhitehornstatic void 603209975Snwhitehornmoea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 604209975Snwhitehorn vm_offset_t kernelend) 605209975Snwhitehorn{ 606279252Snwhitehorn struct pvo_entry *pvo; 607209975Snwhitehorn register_t msr; 608209975Snwhitehorn vm_paddr_t pa; 609209975Snwhitehorn vm_offset_t size, off; 610209975Snwhitehorn uint64_t pte_lo; 611209975Snwhitehorn int i; 612209975Snwhitehorn 613209975Snwhitehorn if (moea64_large_page_size == 0) 614209975Snwhitehorn hw_direct_map = 0; 615209975Snwhitehorn 616209975Snwhitehorn DISABLE_TRANS(msr); 617209975Snwhitehorn if (hw_direct_map) { 618209975Snwhitehorn PMAP_LOCK(kernel_pmap); 619209975Snwhitehorn for (i = 0; i < pregions_sz; i++) { 620209975Snwhitehorn for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 621209975Snwhitehorn pregions[i].mr_size; pa += moea64_large_page_size) { 622209975Snwhitehorn pte_lo = LPTE_M; 623209975Snwhitehorn 624279252Snwhitehorn pvo = alloc_pvo_entry(1 /* bootstrap */); 625279252Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED | PVO_LARGE; 626279252Snwhitehorn init_pvo_entry(pvo, kernel_pmap, pa); 627279252Snwhitehorn 628209975Snwhitehorn /* 629209975Snwhitehorn * Set memory access as guarded if prefetch within 630209975Snwhitehorn * the page could exit the available physmem area. 631209975Snwhitehorn */ 632209975Snwhitehorn if (pa & moea64_large_page_mask) { 633209975Snwhitehorn pa &= moea64_large_page_mask; 634209975Snwhitehorn pte_lo |= LPTE_G; 635209975Snwhitehorn } 636209975Snwhitehorn if (pa + moea64_large_page_size > 637209975Snwhitehorn pregions[i].mr_start + pregions[i].mr_size) 638209975Snwhitehorn pte_lo |= LPTE_G; 639209975Snwhitehorn 640279252Snwhitehorn pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | 641279252Snwhitehorn VM_PROT_EXECUTE; 642279252Snwhitehorn pvo->pvo_pte.pa = pa | pte_lo; 643279252Snwhitehorn moea64_pvo_enter(mmup, pvo, NULL); 644209975Snwhitehorn } 645209975Snwhitehorn } 646209975Snwhitehorn PMAP_UNLOCK(kernel_pmap); 647209975Snwhitehorn } else { 648277356Snwhitehorn size = moea64_bpvo_pool_size*sizeof(struct pvo_entry); 649209975Snwhitehorn off = (vm_offset_t)(moea64_bpvo_pool); 650209975Snwhitehorn for (pa = off; pa < off + size; pa += PAGE_SIZE) 651209975Snwhitehorn moea64_kenter(mmup, pa, pa); 652209975Snwhitehorn 653209975Snwhitehorn /* 654209975Snwhitehorn * Map certain important things, like ourselves. 655209975Snwhitehorn * 656209975Snwhitehorn * NOTE: We do not map the exception vector space. That code is 657209975Snwhitehorn * used only in real mode, and leaving it unmapped allows us to 658209975Snwhitehorn * catch NULL pointer deferences, instead of making NULL a valid 659209975Snwhitehorn * address. 660209975Snwhitehorn */ 661209975Snwhitehorn 662209975Snwhitehorn for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 663209975Snwhitehorn pa += PAGE_SIZE) 664209975Snwhitehorn moea64_kenter(mmup, pa, pa); 665209975Snwhitehorn } 666209975Snwhitehorn ENABLE_TRANS(msr); 667248508Skib 668248508Skib /* 669248508Skib * Allow user to override unmapped_buf_allowed for testing. 670248508Skib * XXXKIB Only direct map implementation was tested. 671248508Skib */ 672248508Skib if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", 673248508Skib &unmapped_buf_allowed)) 674248508Skib unmapped_buf_allowed = hw_direct_map; 675209975Snwhitehorn} 676209975Snwhitehorn 677216174Snwhitehornvoid 678216174Snwhitehornmoea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 679209975Snwhitehorn{ 680190681Snwhitehorn int i, j; 681216174Snwhitehorn vm_size_t physsz, hwphyssz; 682190681Snwhitehorn 683209975Snwhitehorn#ifndef __powerpc64__ 684190681Snwhitehorn /* We don't have a direct map since there is no BAT */ 685190681Snwhitehorn hw_direct_map = 0; 686190681Snwhitehorn 687190681Snwhitehorn /* Make sure battable is zero, since we have no BAT */ 688190681Snwhitehorn for (i = 0; i < 16; i++) { 689190681Snwhitehorn battable[i].batu = 0; 690190681Snwhitehorn battable[i].batl = 0; 691190681Snwhitehorn } 692209975Snwhitehorn#else 693209975Snwhitehorn moea64_probe_large_page(); 694190681Snwhitehorn 695209975Snwhitehorn /* Use a direct map if we have large page support */ 696209975Snwhitehorn if (moea64_large_page_size > 0) 697209975Snwhitehorn hw_direct_map = 1; 698209975Snwhitehorn else 699209975Snwhitehorn hw_direct_map = 0; 700209975Snwhitehorn#endif 701209975Snwhitehorn 702190681Snwhitehorn /* Get physical memory regions from firmware */ 703190681Snwhitehorn mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 704190681Snwhitehorn CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 705190681Snwhitehorn 706190681Snwhitehorn if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 707190681Snwhitehorn panic("moea64_bootstrap: phys_avail too small"); 708222614Snwhitehorn 709190681Snwhitehorn phys_avail_count = 0; 710190681Snwhitehorn physsz = 0; 711190681Snwhitehorn hwphyssz = 0; 712190681Snwhitehorn TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 713190681Snwhitehorn for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 714257180Snwhitehorn CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", 715257180Snwhitehorn regions[i].mr_start, regions[i].mr_start + 716257180Snwhitehorn regions[i].mr_size, regions[i].mr_size); 717190681Snwhitehorn if (hwphyssz != 0 && 718190681Snwhitehorn (physsz + regions[i].mr_size) >= hwphyssz) { 719190681Snwhitehorn if (physsz < hwphyssz) { 720190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 721190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + 722190681Snwhitehorn hwphyssz - physsz; 723190681Snwhitehorn physsz = hwphyssz; 724190681Snwhitehorn phys_avail_count++; 725190681Snwhitehorn } 726190681Snwhitehorn break; 727190681Snwhitehorn } 728190681Snwhitehorn phys_avail[j] = regions[i].mr_start; 729190681Snwhitehorn phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 730190681Snwhitehorn phys_avail_count++; 731190681Snwhitehorn physsz += regions[i].mr_size; 732190681Snwhitehorn } 733209975Snwhitehorn 734209975Snwhitehorn /* Check for overlap with the kernel and exception vectors */ 735209975Snwhitehorn for (j = 0; j < 2*phys_avail_count; j+=2) { 736209975Snwhitehorn if (phys_avail[j] < EXC_LAST) 737209975Snwhitehorn phys_avail[j] += EXC_LAST; 738209975Snwhitehorn 739209975Snwhitehorn if (kernelstart >= phys_avail[j] && 740209975Snwhitehorn kernelstart < phys_avail[j+1]) { 741209975Snwhitehorn if (kernelend < phys_avail[j+1]) { 742209975Snwhitehorn phys_avail[2*phys_avail_count] = 743209975Snwhitehorn (kernelend & ~PAGE_MASK) + PAGE_SIZE; 744209975Snwhitehorn phys_avail[2*phys_avail_count + 1] = 745209975Snwhitehorn phys_avail[j+1]; 746209975Snwhitehorn phys_avail_count++; 747209975Snwhitehorn } 748209975Snwhitehorn 749209975Snwhitehorn phys_avail[j+1] = kernelstart & ~PAGE_MASK; 750209975Snwhitehorn } 751209975Snwhitehorn 752209975Snwhitehorn if (kernelend >= phys_avail[j] && 753209975Snwhitehorn kernelend < phys_avail[j+1]) { 754209975Snwhitehorn if (kernelstart > phys_avail[j]) { 755209975Snwhitehorn phys_avail[2*phys_avail_count] = phys_avail[j]; 756209975Snwhitehorn phys_avail[2*phys_avail_count + 1] = 757209975Snwhitehorn kernelstart & ~PAGE_MASK; 758209975Snwhitehorn phys_avail_count++; 759209975Snwhitehorn } 760209975Snwhitehorn 761209975Snwhitehorn phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 762209975Snwhitehorn } 763209975Snwhitehorn } 764209975Snwhitehorn 765190681Snwhitehorn physmem = btoc(physsz); 766190681Snwhitehorn 767190681Snwhitehorn#ifdef PTEGCOUNT 768190681Snwhitehorn moea64_pteg_count = PTEGCOUNT; 769190681Snwhitehorn#else 770190681Snwhitehorn moea64_pteg_count = 0x1000; 771190681Snwhitehorn 772190681Snwhitehorn while (moea64_pteg_count < physmem) 773190681Snwhitehorn moea64_pteg_count <<= 1; 774209975Snwhitehorn 775209975Snwhitehorn moea64_pteg_count >>= 1; 776190681Snwhitehorn#endif /* PTEGCOUNT */ 777216174Snwhitehorn} 778190681Snwhitehorn 779216174Snwhitehornvoid 780216174Snwhitehornmoea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 781216174Snwhitehorn{ 782216174Snwhitehorn int i; 783190681Snwhitehorn 784190681Snwhitehorn /* 785216174Snwhitehorn * Set PTEG mask 786190681Snwhitehorn */ 787190681Snwhitehorn moea64_pteg_mask = moea64_pteg_count - 1; 788190681Snwhitehorn 789190681Snwhitehorn /* 790279252Snwhitehorn * Initialize SLB table lock and page locks 791190681Snwhitehorn */ 792211967Snwhitehorn mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 793279252Snwhitehorn for (i = 0; i < PV_LOCK_COUNT; i++) 794279252Snwhitehorn mtx_init(&pv_lock[i], "page pv", NULL, MTX_DEF); 795190681Snwhitehorn 796190681Snwhitehorn /* 797279252Snwhitehorn * Initialise the bootstrap pvo pool. 798190681Snwhitehorn */ 799190681Snwhitehorn moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 800277356Snwhitehorn moea64_bpvo_pool_size*sizeof(struct pvo_entry), 0); 801190681Snwhitehorn moea64_bpvo_pool_index = 0; 802190681Snwhitehorn 803190681Snwhitehorn /* 804190681Snwhitehorn * Make sure kernel vsid is allocated as well as VSID 0. 805190681Snwhitehorn */ 806209975Snwhitehorn #ifndef __powerpc64__ 807209975Snwhitehorn moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 808190681Snwhitehorn |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 809190681Snwhitehorn moea64_vsid_bitmap[0] |= 1; 810209975Snwhitehorn #endif 811190681Snwhitehorn 812190681Snwhitehorn /* 813190681Snwhitehorn * Initialize the kernel pmap (which is statically allocated). 814190681Snwhitehorn */ 815209975Snwhitehorn #ifdef __powerpc64__ 816209975Snwhitehorn for (i = 0; i < 64; i++) { 817209975Snwhitehorn pcpup->pc_slb[i].slbv = 0; 818209975Snwhitehorn pcpup->pc_slb[i].slbe = 0; 819209975Snwhitehorn } 820209975Snwhitehorn #else 821190681Snwhitehorn for (i = 0; i < 16; i++) 822190681Snwhitehorn kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 823209975Snwhitehorn #endif 824190681Snwhitehorn 825190681Snwhitehorn kernel_pmap->pmap_phys = kernel_pmap; 826222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 827235689Snwhitehorn RB_INIT(&kernel_pmap->pmap_pvo); 828190681Snwhitehorn 829190681Snwhitehorn PMAP_LOCK_INIT(kernel_pmap); 830190681Snwhitehorn 831190681Snwhitehorn /* 832190681Snwhitehorn * Now map in all the other buffers we allocated earlier 833190681Snwhitehorn */ 834190681Snwhitehorn 835209975Snwhitehorn moea64_setup_direct_map(mmup, kernelstart, kernelend); 836216174Snwhitehorn} 837190681Snwhitehorn 838216174Snwhitehornvoid 839216174Snwhitehornmoea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 840216174Snwhitehorn{ 841216174Snwhitehorn ihandle_t mmui; 842216174Snwhitehorn phandle_t chosen; 843216174Snwhitehorn phandle_t mmu; 844276515Snwhitehorn ssize_t sz; 845216174Snwhitehorn int i; 846216174Snwhitehorn vm_offset_t pa, va; 847216174Snwhitehorn void *dpcpu; 848216174Snwhitehorn 849190681Snwhitehorn /* 850209975Snwhitehorn * Set up the Open Firmware pmap and add its mappings if not in real 851209975Snwhitehorn * mode. 852190681Snwhitehorn */ 853190681Snwhitehorn 854215067Snwhitehorn chosen = OF_finddevice("/chosen"); 855276515Snwhitehorn if (!ofw_real_mode && chosen != -1 && 856276515Snwhitehorn OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 857276515Snwhitehorn mmu = OF_instance_to_package(mmui); 858276515Snwhitehorn if (mmu == -1 || 859276515Snwhitehorn (sz = OF_getproplen(mmu, "translations")) == -1) 860276515Snwhitehorn sz = 0; 861276515Snwhitehorn if (sz > 6144 /* tmpstksz - 2 KB headroom */) 862276515Snwhitehorn panic("moea64_bootstrap: too many ofw translations"); 863190681Snwhitehorn 864276515Snwhitehorn if (sz > 0) 865276515Snwhitehorn moea64_add_ofw_mappings(mmup, mmu, sz); 866190681Snwhitehorn } 867190681Snwhitehorn 868190681Snwhitehorn /* 869190681Snwhitehorn * Calculate the last available physical address. 870190681Snwhitehorn */ 871190681Snwhitehorn for (i = 0; phys_avail[i + 2] != 0; i += 2) 872190681Snwhitehorn ; 873190681Snwhitehorn Maxmem = powerpc_btop(phys_avail[i + 1]); 874190681Snwhitehorn 875190681Snwhitehorn /* 876190681Snwhitehorn * Initialize MMU and remap early physical mappings 877190681Snwhitehorn */ 878216174Snwhitehorn MMU_CPU_BOOTSTRAP(mmup,0); 879222614Snwhitehorn mtmsr(mfmsr() | PSL_DR | PSL_IR); 880190681Snwhitehorn pmap_bootstrapped++; 881190681Snwhitehorn bs_remap_earlyboot(); 882190681Snwhitehorn 883190681Snwhitehorn /* 884190681Snwhitehorn * Set the start and end of kva. 885190681Snwhitehorn */ 886190681Snwhitehorn virtual_avail = VM_MIN_KERNEL_ADDRESS; 887204128Snwhitehorn virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 888190681Snwhitehorn 889190681Snwhitehorn /* 890209975Snwhitehorn * Map the entire KVA range into the SLB. We must not fault there. 891209975Snwhitehorn */ 892209975Snwhitehorn #ifdef __powerpc64__ 893209975Snwhitehorn for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 894209975Snwhitehorn moea64_bootstrap_slb_prefault(va, 0); 895209975Snwhitehorn #endif 896209975Snwhitehorn 897209975Snwhitehorn /* 898204128Snwhitehorn * Figure out how far we can extend virtual_end into segment 16 899204128Snwhitehorn * without running into existing mappings. Segment 16 is guaranteed 900204128Snwhitehorn * to contain neither RAM nor devices (at least on Apple hardware), 901204128Snwhitehorn * but will generally contain some OFW mappings we should not 902204128Snwhitehorn * step on. 903190681Snwhitehorn */ 904190681Snwhitehorn 905209975Snwhitehorn #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 906204128Snwhitehorn PMAP_LOCK(kernel_pmap); 907209975Snwhitehorn while (virtual_end < VM_MAX_KERNEL_ADDRESS && 908209975Snwhitehorn moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 909204128Snwhitehorn virtual_end += PAGE_SIZE; 910204128Snwhitehorn PMAP_UNLOCK(kernel_pmap); 911209975Snwhitehorn #endif 912190681Snwhitehorn 913190681Snwhitehorn /* 914190681Snwhitehorn * Allocate a kernel stack with a guard page for thread0 and map it 915190681Snwhitehorn * into the kernel page map. 916190681Snwhitehorn */ 917190681Snwhitehorn pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 918190681Snwhitehorn va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 919190681Snwhitehorn virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 920220642Sandreast CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 921190681Snwhitehorn thread0.td_kstack = va; 922190681Snwhitehorn thread0.td_kstack_pages = KSTACK_PAGES; 923190681Snwhitehorn for (i = 0; i < KSTACK_PAGES; i++) { 924201758Smbr moea64_kenter(mmup, va, pa); 925190681Snwhitehorn pa += PAGE_SIZE; 926190681Snwhitehorn va += PAGE_SIZE; 927190681Snwhitehorn } 928190681Snwhitehorn 929190681Snwhitehorn /* 930190681Snwhitehorn * Allocate virtual address space for the message buffer. 931190681Snwhitehorn */ 932217688Spluknet pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 933204297Snwhitehorn msgbufp = (struct msgbuf *)virtual_avail; 934204297Snwhitehorn va = virtual_avail; 935217688Spluknet virtual_avail += round_page(msgbufsize); 936204297Snwhitehorn while (va < virtual_avail) { 937204297Snwhitehorn moea64_kenter(mmup, va, pa); 938190681Snwhitehorn pa += PAGE_SIZE; 939204297Snwhitehorn va += PAGE_SIZE; 940190681Snwhitehorn } 941194784Sjeff 942194784Sjeff /* 943194784Sjeff * Allocate virtual address space for the dynamic percpu area. 944194784Sjeff */ 945194784Sjeff pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 946204297Snwhitehorn dpcpu = (void *)virtual_avail; 947209975Snwhitehorn va = virtual_avail; 948204297Snwhitehorn virtual_avail += DPCPU_SIZE; 949204297Snwhitehorn while (va < virtual_avail) { 950204297Snwhitehorn moea64_kenter(mmup, va, pa); 951194784Sjeff pa += PAGE_SIZE; 952204297Snwhitehorn va += PAGE_SIZE; 953194784Sjeff } 954194784Sjeff dpcpu_init(dpcpu, 0); 955216174Snwhitehorn 956216174Snwhitehorn /* 957216174Snwhitehorn * Allocate some things for page zeroing. We put this directly 958279252Snwhitehorn * in the page table and use MOEA64_PTE_REPLACE to avoid any 959216174Snwhitehorn * of the PVO book-keeping or other parts of the VM system 960216174Snwhitehorn * from even knowing that this hack exists. 961216174Snwhitehorn */ 962216174Snwhitehorn 963216174Snwhitehorn if (!hw_direct_map) { 964216174Snwhitehorn mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 965216174Snwhitehorn MTX_DEF); 966216174Snwhitehorn for (i = 0; i < 2; i++) { 967216174Snwhitehorn moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 968216174Snwhitehorn virtual_end -= PAGE_SIZE; 969216174Snwhitehorn 970216174Snwhitehorn moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 971216174Snwhitehorn 972279252Snwhitehorn PMAP_LOCK(kernel_pmap); 973216174Snwhitehorn moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 974216174Snwhitehorn kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 975279252Snwhitehorn PMAP_UNLOCK(kernel_pmap); 976216174Snwhitehorn } 977216174Snwhitehorn } 978190681Snwhitehorn} 979190681Snwhitehorn 980190681Snwhitehorn/* 981279252Snwhitehorn * Activate a user pmap. This mostly involves setting some non-CPU 982279252Snwhitehorn * state. 983190681Snwhitehorn */ 984190681Snwhitehornvoid 985190681Snwhitehornmoea64_activate(mmu_t mmu, struct thread *td) 986190681Snwhitehorn{ 987209975Snwhitehorn pmap_t pm; 988190681Snwhitehorn 989190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 990223758Sattilio CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 991190681Snwhitehorn 992209975Snwhitehorn #ifdef __powerpc64__ 993209975Snwhitehorn PCPU_SET(userslb, pm->pm_slb); 994279594Snwhitehorn __asm __volatile("slbmte %0, %1; isync" :: 995279594Snwhitehorn "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); 996209975Snwhitehorn #else 997209975Snwhitehorn PCPU_SET(curpmap, pm->pmap_phys); 998279594Snwhitehorn mtsrin(USER_SR << ADDR_SR_SHFT, td->td_pcb->pcb_cpu.aim.usr_vsid); 999209975Snwhitehorn #endif 1000190681Snwhitehorn} 1001190681Snwhitehorn 1002190681Snwhitehornvoid 1003190681Snwhitehornmoea64_deactivate(mmu_t mmu, struct thread *td) 1004190681Snwhitehorn{ 1005190681Snwhitehorn pmap_t pm; 1006190681Snwhitehorn 1007279594Snwhitehorn __asm __volatile("isync; slbie %0" :: "r"(USER_ADDR)); 1008279594Snwhitehorn 1009190681Snwhitehorn pm = &td->td_proc->p_vmspace->vm_pmap; 1010223758Sattilio CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1011209975Snwhitehorn #ifdef __powerpc64__ 1012209975Snwhitehorn PCPU_SET(userslb, NULL); 1013209975Snwhitehorn #else 1014190681Snwhitehorn PCPU_SET(curpmap, NULL); 1015209975Snwhitehorn #endif 1016190681Snwhitehorn} 1017190681Snwhitehorn 1018190681Snwhitehornvoid 1019268591Salcmoea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1020268591Salc{ 1021268591Salc struct pvo_entry key, *pvo; 1022279252Snwhitehorn vm_page_t m; 1023279252Snwhitehorn int64_t refchg; 1024268591Salc 1025279252Snwhitehorn key.pvo_vaddr = sva; 1026268591Salc PMAP_LOCK(pm); 1027268591Salc for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1028268591Salc pvo != NULL && PVO_VADDR(pvo) < eva; 1029268591Salc pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 1030268591Salc if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1031268591Salc panic("moea64_unwire: pvo %p is missing PVO_WIRED", 1032268591Salc pvo); 1033268591Salc pvo->pvo_vaddr &= ~PVO_WIRED; 1034279252Snwhitehorn refchg = MOEA64_PTE_REPLACE(mmu, pvo, 0 /* No invalidation */); 1035279252Snwhitehorn if ((pvo->pvo_vaddr & PVO_MANAGED) && 1036279252Snwhitehorn (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 1037279252Snwhitehorn if (refchg < 0) 1038279252Snwhitehorn refchg = LPTE_CHG; 1039279252Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 1040279252Snwhitehorn 1041279252Snwhitehorn refchg |= atomic_readandclear_32(&m->md.mdpg_attrs); 1042279252Snwhitehorn if (refchg & LPTE_CHG) 1043279252Snwhitehorn vm_page_dirty(m); 1044279252Snwhitehorn if (refchg & LPTE_REF) 1045279252Snwhitehorn vm_page_aflag_set(m, PGA_REFERENCED); 1046268591Salc } 1047268591Salc pm->pm_stats.wired_count--; 1048268591Salc } 1049268591Salc PMAP_UNLOCK(pm); 1050268591Salc} 1051268591Salc 1052190681Snwhitehorn/* 1053190681Snwhitehorn * This goes through and sets the physical address of our 1054190681Snwhitehorn * special scratch PTE to the PA we want to zero or copy. Because 1055190681Snwhitehorn * of locking issues (this can get called in pvo_enter() by 1056190681Snwhitehorn * the UMA allocator), we can't use most other utility functions here 1057190681Snwhitehorn */ 1058190681Snwhitehorn 1059190681Snwhitehornstatic __inline 1060216174Snwhitehornvoid moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1061204694Snwhitehorn 1062209975Snwhitehorn KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1063204268Snwhitehorn mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1064204268Snwhitehorn 1065279252Snwhitehorn moea64_scratchpage_pvo[which]->pvo_pte.pa = 1066213307Snwhitehorn moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1067279252Snwhitehorn MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which], 1068279252Snwhitehorn MOEA64_PTE_INVALIDATE); 1069216383Snwhitehorn isync(); 1070190681Snwhitehorn} 1071190681Snwhitehorn 1072190681Snwhitehornvoid 1073190681Snwhitehornmoea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1074190681Snwhitehorn{ 1075190681Snwhitehorn vm_offset_t dst; 1076190681Snwhitehorn vm_offset_t src; 1077190681Snwhitehorn 1078190681Snwhitehorn dst = VM_PAGE_TO_PHYS(mdst); 1079190681Snwhitehorn src = VM_PAGE_TO_PHYS(msrc); 1080190681Snwhitehorn 1081209975Snwhitehorn if (hw_direct_map) { 1082234156Snwhitehorn bcopy((void *)src, (void *)dst, PAGE_SIZE); 1083209975Snwhitehorn } else { 1084209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1085190681Snwhitehorn 1086216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 0, src); 1087216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 1, dst); 1088190681Snwhitehorn 1089234156Snwhitehorn bcopy((void *)moea64_scratchpage_va[0], 1090209975Snwhitehorn (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1091190681Snwhitehorn 1092209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1093209975Snwhitehorn } 1094190681Snwhitehorn} 1095190681Snwhitehorn 1096248280Skibstatic inline void 1097248280Skibmoea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1098248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1099248280Skib{ 1100248280Skib void *a_cp, *b_cp; 1101248280Skib vm_offset_t a_pg_offset, b_pg_offset; 1102248280Skib int cnt; 1103248280Skib 1104248280Skib while (xfersize > 0) { 1105248280Skib a_pg_offset = a_offset & PAGE_MASK; 1106248280Skib cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1107248280Skib a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1108248280Skib a_pg_offset; 1109248280Skib b_pg_offset = b_offset & PAGE_MASK; 1110248280Skib cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1111248280Skib b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1112248280Skib b_pg_offset; 1113248280Skib bcopy(a_cp, b_cp, cnt); 1114248280Skib a_offset += cnt; 1115248280Skib b_offset += cnt; 1116248280Skib xfersize -= cnt; 1117248280Skib } 1118248280Skib} 1119248280Skib 1120248280Skibstatic inline void 1121248280Skibmoea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1122248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1123248280Skib{ 1124248280Skib void *a_cp, *b_cp; 1125248280Skib vm_offset_t a_pg_offset, b_pg_offset; 1126248280Skib int cnt; 1127248280Skib 1128248280Skib mtx_lock(&moea64_scratchpage_mtx); 1129248280Skib while (xfersize > 0) { 1130248280Skib a_pg_offset = a_offset & PAGE_MASK; 1131248280Skib cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1132248280Skib moea64_set_scratchpage_pa(mmu, 0, 1133248280Skib VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 1134248280Skib a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; 1135248280Skib b_pg_offset = b_offset & PAGE_MASK; 1136248280Skib cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1137248280Skib moea64_set_scratchpage_pa(mmu, 1, 1138248280Skib VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 1139248280Skib b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; 1140248280Skib bcopy(a_cp, b_cp, cnt); 1141248280Skib a_offset += cnt; 1142248280Skib b_offset += cnt; 1143248280Skib xfersize -= cnt; 1144248280Skib } 1145248280Skib mtx_unlock(&moea64_scratchpage_mtx); 1146248280Skib} 1147248280Skib 1148190681Snwhitehornvoid 1149248280Skibmoea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1150248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1151248280Skib{ 1152248280Skib 1153248280Skib if (hw_direct_map) { 1154248280Skib moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, 1155248280Skib xfersize); 1156248280Skib } else { 1157248280Skib moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, 1158248280Skib xfersize); 1159248280Skib } 1160248280Skib} 1161248280Skib 1162248280Skibvoid 1163190681Snwhitehornmoea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1164190681Snwhitehorn{ 1165190681Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1166190681Snwhitehorn 1167190681Snwhitehorn if (size + off > PAGE_SIZE) 1168190681Snwhitehorn panic("moea64_zero_page: size + off > PAGE_SIZE"); 1169190681Snwhitehorn 1170209975Snwhitehorn if (hw_direct_map) { 1171209975Snwhitehorn bzero((caddr_t)pa + off, size); 1172209975Snwhitehorn } else { 1173209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1174216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 0, pa); 1175209975Snwhitehorn bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1176209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1177209975Snwhitehorn } 1178190681Snwhitehorn} 1179190681Snwhitehorn 1180204269Snwhitehorn/* 1181204269Snwhitehorn * Zero a page of physical memory by temporarily mapping it 1182204269Snwhitehorn */ 1183190681Snwhitehornvoid 1184204269Snwhitehornmoea64_zero_page(mmu_t mmu, vm_page_t m) 1185204269Snwhitehorn{ 1186204269Snwhitehorn vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1187209975Snwhitehorn vm_offset_t va, off; 1188204269Snwhitehorn 1189209975Snwhitehorn if (!hw_direct_map) { 1190209975Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1191204269Snwhitehorn 1192216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 0, pa); 1193209975Snwhitehorn va = moea64_scratchpage_va[0]; 1194209975Snwhitehorn } else { 1195209975Snwhitehorn va = pa; 1196209975Snwhitehorn } 1197209975Snwhitehorn 1198204269Snwhitehorn for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1199209975Snwhitehorn __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1200209975Snwhitehorn 1201209975Snwhitehorn if (!hw_direct_map) 1202209975Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1203204269Snwhitehorn} 1204204269Snwhitehorn 1205204269Snwhitehornvoid 1206190681Snwhitehornmoea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1207190681Snwhitehorn{ 1208190681Snwhitehorn 1209190681Snwhitehorn moea64_zero_page(mmu, m); 1210190681Snwhitehorn} 1211190681Snwhitehorn 1212190681Snwhitehorn/* 1213190681Snwhitehorn * Map the given physical page at the specified virtual address in the 1214190681Snwhitehorn * target pmap with the protection requested. If specified the page 1215190681Snwhitehorn * will be wired down. 1216190681Snwhitehorn */ 1217233957Snwhitehorn 1218269728Skibint 1219190681Snwhitehornmoea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1220269728Skib vm_prot_t prot, u_int flags, int8_t psind) 1221190681Snwhitehorn{ 1222279252Snwhitehorn struct pvo_entry *pvo, *oldpvo; 1223190681Snwhitehorn struct pvo_head *pvo_head; 1224190681Snwhitehorn uint64_t pte_lo; 1225190681Snwhitehorn int error; 1226190681Snwhitehorn 1227269388Salc if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1228269388Salc VM_OBJECT_ASSERT_LOCKED(m->object); 1229269388Salc 1230279252Snwhitehorn pvo = alloc_pvo_entry(0); 1231279252Snwhitehorn pvo->pvo_pmap = NULL; /* to be filled in later */ 1232279252Snwhitehorn pvo->pvo_pte.prot = prot; 1233279252Snwhitehorn 1234279252Snwhitehorn pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1235279252Snwhitehorn pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | pte_lo; 1236279252Snwhitehorn 1237279252Snwhitehorn if ((flags & PMAP_ENTER_WIRED) != 0) 1238279252Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 1239279252Snwhitehorn 1240269388Salc if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { 1241235689Snwhitehorn pvo_head = NULL; 1242190681Snwhitehorn } else { 1243279252Snwhitehorn pvo_head = &m->md.mdpg_pvoh; 1244279252Snwhitehorn pvo->pvo_vaddr |= PVO_MANAGED; 1245190681Snwhitehorn } 1246279252Snwhitehorn 1247279252Snwhitehorn for (;;) { 1248279252Snwhitehorn PV_PAGE_LOCK(m); 1249279252Snwhitehorn PMAP_LOCK(pmap); 1250279252Snwhitehorn if (pvo->pvo_pmap == NULL) 1251279252Snwhitehorn init_pvo_entry(pvo, pmap, va); 1252279252Snwhitehorn if (prot & VM_PROT_WRITE) 1253279252Snwhitehorn if (pmap_bootstrapped && 1254279252Snwhitehorn (m->oflags & VPO_UNMANAGED) == 0) 1255279252Snwhitehorn vm_page_aflag_set(m, PGA_WRITEABLE); 1256190681Snwhitehorn 1257279252Snwhitehorn oldpvo = moea64_pvo_find_va(pmap, va); 1258279252Snwhitehorn if (oldpvo != NULL) { 1259279252Snwhitehorn if (oldpvo->pvo_vaddr == pvo->pvo_vaddr && 1260279252Snwhitehorn oldpvo->pvo_pte.pa == pvo->pvo_pte.pa && 1261279252Snwhitehorn oldpvo->pvo_pte.prot == prot) { 1262279252Snwhitehorn /* Identical mapping already exists */ 1263279252Snwhitehorn error = 0; 1264190681Snwhitehorn 1265279252Snwhitehorn /* If not in page table, reinsert it */ 1266279252Snwhitehorn if (MOEA64_PTE_SYNCH(mmu, oldpvo) < 0) { 1267279252Snwhitehorn moea64_pte_overflow--; 1268279252Snwhitehorn MOEA64_PTE_INSERT(mmu, oldpvo); 1269279252Snwhitehorn } 1270190681Snwhitehorn 1271279252Snwhitehorn /* Then just clean up and go home */ 1272279252Snwhitehorn PV_PAGE_UNLOCK(m); 1273279252Snwhitehorn PMAP_UNLOCK(pmap); 1274279252Snwhitehorn free_pvo_entry(pvo); 1275279252Snwhitehorn break; 1276279252Snwhitehorn } 1277190681Snwhitehorn 1278279252Snwhitehorn /* Otherwise, need to kill it first */ 1279279252Snwhitehorn KASSERT(oldpvo->pvo_pmap == pmap, ("pmap of old " 1280279252Snwhitehorn "mapping does not match new mapping")); 1281279252Snwhitehorn moea64_pvo_remove_from_pmap(mmu, oldpvo); 1282279252Snwhitehorn } 1283279252Snwhitehorn error = moea64_pvo_enter(mmu, pvo, pvo_head); 1284279252Snwhitehorn PV_PAGE_UNLOCK(m); 1285279252Snwhitehorn PMAP_UNLOCK(pmap); 1286190681Snwhitehorn 1287279252Snwhitehorn /* Free any dead pages */ 1288279252Snwhitehorn if (oldpvo != NULL) { 1289279252Snwhitehorn PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1290279252Snwhitehorn moea64_pvo_remove_from_page(mmu, oldpvo); 1291279252Snwhitehorn PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1292279252Snwhitehorn free_pvo_entry(oldpvo); 1293279252Snwhitehorn } 1294279252Snwhitehorn 1295269728Skib if (error != ENOMEM) 1296269728Skib break; 1297269728Skib if ((flags & PMAP_ENTER_NOSLEEP) != 0) 1298269728Skib return (KERN_RESOURCE_SHORTAGE); 1299269728Skib VM_OBJECT_ASSERT_UNLOCKED(m->object); 1300269728Skib VM_WAIT; 1301269728Skib } 1302190681Snwhitehorn 1303190681Snwhitehorn /* 1304190681Snwhitehorn * Flush the page from the instruction cache if this page is 1305190681Snwhitehorn * mapped executable and cacheable. 1306190681Snwhitehorn */ 1307233949Snwhitehorn if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1308233949Snwhitehorn (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1309233949Snwhitehorn vm_page_aflag_set(m, PGA_EXECUTABLE); 1310216174Snwhitehorn moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1311233949Snwhitehorn } 1312269728Skib return (KERN_SUCCESS); 1313190681Snwhitehorn} 1314190681Snwhitehorn 1315190681Snwhitehornstatic void 1316216174Snwhitehornmoea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1317216174Snwhitehorn vm_size_t sz) 1318190681Snwhitehorn{ 1319204042Snwhitehorn 1320190681Snwhitehorn /* 1321190681Snwhitehorn * This is much trickier than on older systems because 1322190681Snwhitehorn * we can't sync the icache on physical addresses directly 1323190681Snwhitehorn * without a direct map. Instead we check a couple of cases 1324190681Snwhitehorn * where the memory is already mapped in and, failing that, 1325190681Snwhitehorn * use the same trick we use for page zeroing to create 1326190681Snwhitehorn * a temporary mapping for this physical address. 1327190681Snwhitehorn */ 1328190681Snwhitehorn 1329190681Snwhitehorn if (!pmap_bootstrapped) { 1330190681Snwhitehorn /* 1331190681Snwhitehorn * If PMAP is not bootstrapped, we are likely to be 1332190681Snwhitehorn * in real mode. 1333190681Snwhitehorn */ 1334198341Smarcel __syncicache((void *)pa, sz); 1335190681Snwhitehorn } else if (pmap == kernel_pmap) { 1336198341Smarcel __syncicache((void *)va, sz); 1337209975Snwhitehorn } else if (hw_direct_map) { 1338209975Snwhitehorn __syncicache((void *)pa, sz); 1339190681Snwhitehorn } else { 1340190681Snwhitehorn /* Use the scratch page to set up a temp mapping */ 1341190681Snwhitehorn 1342190681Snwhitehorn mtx_lock(&moea64_scratchpage_mtx); 1343190681Snwhitehorn 1344216174Snwhitehorn moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1345204042Snwhitehorn __syncicache((void *)(moea64_scratchpage_va[1] + 1346204042Snwhitehorn (va & ADDR_POFF)), sz); 1347190681Snwhitehorn 1348190681Snwhitehorn mtx_unlock(&moea64_scratchpage_mtx); 1349190681Snwhitehorn } 1350190681Snwhitehorn} 1351190681Snwhitehorn 1352190681Snwhitehorn/* 1353190681Snwhitehorn * Maps a sequence of resident pages belonging to the same object. 1354190681Snwhitehorn * The sequence begins with the given page m_start. This page is 1355190681Snwhitehorn * mapped at the given virtual address start. Each subsequent page is 1356190681Snwhitehorn * mapped at a virtual address that is offset from start by the same 1357190681Snwhitehorn * amount as the page is offset from m_start within the object. The 1358190681Snwhitehorn * last page in the sequence is the page with the largest offset from 1359190681Snwhitehorn * m_start that can be mapped at a virtual address less than the given 1360190681Snwhitehorn * virtual address end. Not every virtual page between start and end 1361190681Snwhitehorn * is mapped; only those for which a resident page exists with the 1362190681Snwhitehorn * corresponding offset from m_start are mapped. 1363190681Snwhitehorn */ 1364190681Snwhitehornvoid 1365190681Snwhitehornmoea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1366190681Snwhitehorn vm_page_t m_start, vm_prot_t prot) 1367190681Snwhitehorn{ 1368190681Snwhitehorn vm_page_t m; 1369190681Snwhitehorn vm_pindex_t diff, psize; 1370190681Snwhitehorn 1371250884Sattilio VM_OBJECT_ASSERT_LOCKED(m_start->object); 1372250884Sattilio 1373190681Snwhitehorn psize = atop(end - start); 1374190681Snwhitehorn m = m_start; 1375190681Snwhitehorn while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1376233957Snwhitehorn moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1377269728Skib (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 0); 1378190681Snwhitehorn m = TAILQ_NEXT(m, listq); 1379190681Snwhitehorn } 1380190681Snwhitehorn} 1381190681Snwhitehorn 1382190681Snwhitehornvoid 1383190681Snwhitehornmoea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1384190681Snwhitehorn vm_prot_t prot) 1385190681Snwhitehorn{ 1386207796Salc 1387269728Skib moea64_enter(mmu, pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1388269728Skib PMAP_ENTER_NOSLEEP, 0); 1389190681Snwhitehorn} 1390190681Snwhitehorn 1391190681Snwhitehornvm_paddr_t 1392190681Snwhitehornmoea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1393190681Snwhitehorn{ 1394190681Snwhitehorn struct pvo_entry *pvo; 1395190681Snwhitehorn vm_paddr_t pa; 1396190681Snwhitehorn 1397190681Snwhitehorn PMAP_LOCK(pm); 1398209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va); 1399190681Snwhitehorn if (pvo == NULL) 1400190681Snwhitehorn pa = 0; 1401190681Snwhitehorn else 1402279252Snwhitehorn pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1403190681Snwhitehorn PMAP_UNLOCK(pm); 1404279252Snwhitehorn 1405190681Snwhitehorn return (pa); 1406190681Snwhitehorn} 1407190681Snwhitehorn 1408190681Snwhitehorn/* 1409190681Snwhitehorn * Atomically extract and hold the physical page with the given 1410190681Snwhitehorn * pmap and virtual address pair if that mapping permits the given 1411190681Snwhitehorn * protection. 1412190681Snwhitehorn */ 1413190681Snwhitehornvm_page_t 1414190681Snwhitehornmoea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1415190681Snwhitehorn{ 1416190681Snwhitehorn struct pvo_entry *pvo; 1417190681Snwhitehorn vm_page_t m; 1418207410Skmacy vm_paddr_t pa; 1419190681Snwhitehorn 1420190681Snwhitehorn m = NULL; 1421207410Skmacy pa = 0; 1422190681Snwhitehorn PMAP_LOCK(pmap); 1423207410Skmacyretry: 1424209975Snwhitehorn pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1425279252Snwhitehorn if (pvo != NULL && (pvo->pvo_pte.prot & prot) == prot) { 1426235689Snwhitehorn if (vm_page_pa_tryrelock(pmap, 1427279252Snwhitehorn pvo->pvo_pte.pa & LPTE_RPGN, &pa)) 1428207410Skmacy goto retry; 1429279252Snwhitehorn m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 1430190681Snwhitehorn vm_page_hold(m); 1431190681Snwhitehorn } 1432207410Skmacy PA_UNLOCK_COND(pa); 1433190681Snwhitehorn PMAP_UNLOCK(pmap); 1434190681Snwhitehorn return (m); 1435190681Snwhitehorn} 1436190681Snwhitehorn 1437216174Snwhitehornstatic mmu_t installed_mmu; 1438216174Snwhitehorn 1439190681Snwhitehornstatic void * 1440190681Snwhitehornmoea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1441190681Snwhitehorn{ 1442279252Snwhitehorn struct pvo_entry *pvo; 1443279252Snwhitehorn vm_offset_t va; 1444279252Snwhitehorn vm_page_t m; 1445279252Snwhitehorn int pflags, needed_lock; 1446279252Snwhitehorn 1447190681Snwhitehorn /* 1448190681Snwhitehorn * This entire routine is a horrible hack to avoid bothering kmem 1449190681Snwhitehorn * for new KVA addresses. Because this can get called from inside 1450190681Snwhitehorn * kmem allocation routines, calling kmem for a new address here 1451190681Snwhitehorn * can lead to multiply locking non-recursive mutexes. 1452190681Snwhitehorn */ 1453190681Snwhitehorn 1454190681Snwhitehorn *flags = UMA_SLAB_PRIV; 1455190681Snwhitehorn needed_lock = !PMAP_LOCKED(kernel_pmap); 1456243040Skib pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 1457190681Snwhitehorn 1458190681Snwhitehorn for (;;) { 1459228522Salc m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1460190681Snwhitehorn if (m == NULL) { 1461190681Snwhitehorn if (wait & M_NOWAIT) 1462190681Snwhitehorn return (NULL); 1463190681Snwhitehorn VM_WAIT; 1464190681Snwhitehorn } else 1465190681Snwhitehorn break; 1466190681Snwhitehorn } 1467190681Snwhitehorn 1468204128Snwhitehorn va = VM_PAGE_TO_PHYS(m); 1469190681Snwhitehorn 1470279252Snwhitehorn pvo = alloc_pvo_entry(1 /* bootstrap */); 1471279252Snwhitehorn 1472279252Snwhitehorn pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE; 1473279252Snwhitehorn pvo->pvo_pte.pa = VM_PAGE_TO_PHYS(m) | LPTE_M; 1474279252Snwhitehorn 1475233529Snwhitehorn if (needed_lock) 1476233529Snwhitehorn PMAP_LOCK(kernel_pmap); 1477233529Snwhitehorn 1478279252Snwhitehorn init_pvo_entry(pvo, kernel_pmap, va); 1479279252Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 1480190681Snwhitehorn 1481279252Snwhitehorn moea64_pvo_enter(installed_mmu, pvo, NULL); 1482279252Snwhitehorn 1483190681Snwhitehorn if (needed_lock) 1484190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1485198378Snwhitehorn 1486190681Snwhitehorn if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1487190681Snwhitehorn bzero((void *)va, PAGE_SIZE); 1488190681Snwhitehorn 1489190681Snwhitehorn return (void *)va; 1490190681Snwhitehorn} 1491190681Snwhitehorn 1492230767Skibextern int elf32_nxstack; 1493230767Skib 1494190681Snwhitehornvoid 1495190681Snwhitehornmoea64_init(mmu_t mmu) 1496190681Snwhitehorn{ 1497190681Snwhitehorn 1498190681Snwhitehorn CTR0(KTR_PMAP, "moea64_init"); 1499190681Snwhitehorn 1500279252Snwhitehorn moea64_pvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1501190681Snwhitehorn NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1502190681Snwhitehorn UMA_ZONE_VM | UMA_ZONE_NOFREE); 1503190681Snwhitehorn 1504190681Snwhitehorn if (!hw_direct_map) { 1505216174Snwhitehorn installed_mmu = mmu; 1506279252Snwhitehorn uma_zone_set_allocf(moea64_pvo_zone,moea64_uma_page_alloc); 1507190681Snwhitehorn } 1508190681Snwhitehorn 1509230779Skib#ifdef COMPAT_FREEBSD32 1510230767Skib elf32_nxstack = 1; 1511230779Skib#endif 1512230767Skib 1513190681Snwhitehorn moea64_initialized = TRUE; 1514190681Snwhitehorn} 1515190681Snwhitehorn 1516190681Snwhitehornboolean_t 1517207155Salcmoea64_is_referenced(mmu_t mmu, vm_page_t m) 1518207155Salc{ 1519207155Salc 1520224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1521208574Salc ("moea64_is_referenced: page %p is not managed", m)); 1522279252Snwhitehorn 1523279252Snwhitehorn return (moea64_query_bit(mmu, m, LPTE_REF)); 1524207155Salc} 1525207155Salc 1526207155Salcboolean_t 1527190681Snwhitehornmoea64_is_modified(mmu_t mmu, vm_page_t m) 1528190681Snwhitehorn{ 1529190681Snwhitehorn 1530224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1531208504Salc ("moea64_is_modified: page %p is not managed", m)); 1532208504Salc 1533208504Salc /* 1534254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1535225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1536208504Salc * is clear, no PTEs can have LPTE_CHG set. 1537208504Salc */ 1538255503Snwhitehorn VM_OBJECT_ASSERT_LOCKED(m->object); 1539254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1540190681Snwhitehorn return (FALSE); 1541216174Snwhitehorn return (moea64_query_bit(mmu, m, LPTE_CHG)); 1542190681Snwhitehorn} 1543190681Snwhitehorn 1544214617Salcboolean_t 1545214617Salcmoea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1546214617Salc{ 1547214617Salc struct pvo_entry *pvo; 1548279252Snwhitehorn boolean_t rv = TRUE; 1549214617Salc 1550214617Salc PMAP_LOCK(pmap); 1551214617Salc pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1552279252Snwhitehorn if (pvo != NULL) 1553279252Snwhitehorn rv = FALSE; 1554214617Salc PMAP_UNLOCK(pmap); 1555214617Salc return (rv); 1556214617Salc} 1557214617Salc 1558190681Snwhitehornvoid 1559190681Snwhitehornmoea64_clear_modify(mmu_t mmu, vm_page_t m) 1560190681Snwhitehorn{ 1561190681Snwhitehorn 1562224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1563208504Salc ("moea64_clear_modify: page %p is not managed", m)); 1564248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1565254138Sattilio KASSERT(!vm_page_xbusied(m), 1566254138Sattilio ("moea64_clear_modify: page %p is exclusive busied", m)); 1567208504Salc 1568208504Salc /* 1569225418Skib * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1570208504Salc * set. If the object containing the page is locked and the page is 1571254138Sattilio * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1572208504Salc */ 1573225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 1574190681Snwhitehorn return; 1575216174Snwhitehorn moea64_clear_bit(mmu, m, LPTE_CHG); 1576190681Snwhitehorn} 1577190681Snwhitehorn 1578190681Snwhitehorn/* 1579190681Snwhitehorn * Clear the write and modified bits in each of the given page's mappings. 1580190681Snwhitehorn */ 1581190681Snwhitehornvoid 1582190681Snwhitehornmoea64_remove_write(mmu_t mmu, vm_page_t m) 1583190681Snwhitehorn{ 1584190681Snwhitehorn struct pvo_entry *pvo; 1585279252Snwhitehorn int64_t refchg, ret; 1586190681Snwhitehorn pmap_t pmap; 1587190681Snwhitehorn 1588224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1589208175Salc ("moea64_remove_write: page %p is not managed", m)); 1590208175Salc 1591208175Salc /* 1592254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1593254138Sattilio * set by another thread while the object is locked. Thus, 1594254138Sattilio * if PGA_WRITEABLE is clear, no page table entries need updating. 1595208175Salc */ 1596248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1597254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1598190681Snwhitehorn return; 1599216174Snwhitehorn powerpc_sync(); 1600279252Snwhitehorn PV_PAGE_LOCK(m); 1601279252Snwhitehorn refchg = 0; 1602190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1603190681Snwhitehorn pmap = pvo->pvo_pmap; 1604190681Snwhitehorn PMAP_LOCK(pmap); 1605279252Snwhitehorn if (!(pvo->pvo_vaddr & PVO_DEAD) && 1606279252Snwhitehorn (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 1607279252Snwhitehorn pvo->pvo_pte.prot &= ~VM_PROT_WRITE; 1608279252Snwhitehorn ret = MOEA64_PTE_REPLACE(mmu, pvo, 1609279252Snwhitehorn MOEA64_PTE_PROT_UPDATE); 1610279252Snwhitehorn if (ret < 0) 1611279252Snwhitehorn ret = LPTE_CHG; 1612279252Snwhitehorn refchg |= ret; 1613279252Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1614279252Snwhitehorn isync(); 1615190681Snwhitehorn } 1616190681Snwhitehorn PMAP_UNLOCK(pmap); 1617190681Snwhitehorn } 1618279252Snwhitehorn if ((refchg | atomic_readandclear_32(&m->md.mdpg_attrs)) & LPTE_CHG) 1619279252Snwhitehorn vm_page_dirty(m); 1620225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1621279252Snwhitehorn PV_PAGE_UNLOCK(m); 1622190681Snwhitehorn} 1623190681Snwhitehorn 1624190681Snwhitehorn/* 1625190681Snwhitehorn * moea64_ts_referenced: 1626190681Snwhitehorn * 1627190681Snwhitehorn * Return a count of reference bits for a page, clearing those bits. 1628190681Snwhitehorn * It is not necessary for every reference bit to be cleared, but it 1629190681Snwhitehorn * is necessary that 0 only be returned when there are truly no 1630190681Snwhitehorn * reference bits set. 1631190681Snwhitehorn * 1632190681Snwhitehorn * XXX: The exact number of bits to check and clear is a matter that 1633190681Snwhitehorn * should be tested and standardized at some point in the future for 1634190681Snwhitehorn * optimal aging of shared pages. 1635190681Snwhitehorn */ 1636238357Salcint 1637190681Snwhitehornmoea64_ts_referenced(mmu_t mmu, vm_page_t m) 1638190681Snwhitehorn{ 1639190681Snwhitehorn 1640224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1641208990Salc ("moea64_ts_referenced: page %p is not managed", m)); 1642216174Snwhitehorn return (moea64_clear_bit(mmu, m, LPTE_REF)); 1643190681Snwhitehorn} 1644190681Snwhitehorn 1645190681Snwhitehorn/* 1646213307Snwhitehorn * Modify the WIMG settings of all mappings for a page. 1647213307Snwhitehorn */ 1648213307Snwhitehornvoid 1649213307Snwhitehornmoea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1650213307Snwhitehorn{ 1651213307Snwhitehorn struct pvo_entry *pvo; 1652279252Snwhitehorn int64_t refchg; 1653213307Snwhitehorn pmap_t pmap; 1654213307Snwhitehorn uint64_t lo; 1655213307Snwhitehorn 1656224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) { 1657213335Snwhitehorn m->md.mdpg_cache_attrs = ma; 1658213335Snwhitehorn return; 1659213335Snwhitehorn } 1660213335Snwhitehorn 1661213307Snwhitehorn lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1662279252Snwhitehorn 1663279252Snwhitehorn PV_PAGE_LOCK(m); 1664279252Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1665213307Snwhitehorn pmap = pvo->pvo_pmap; 1666213307Snwhitehorn PMAP_LOCK(pmap); 1667279252Snwhitehorn if (!(pvo->pvo_vaddr & PVO_DEAD)) { 1668279252Snwhitehorn pvo->pvo_pte.pa &= ~LPTE_WIMG; 1669279252Snwhitehorn pvo->pvo_pte.pa |= lo; 1670279252Snwhitehorn refchg = MOEA64_PTE_REPLACE(mmu, pvo, 1671279252Snwhitehorn MOEA64_PTE_INVALIDATE); 1672279252Snwhitehorn if (refchg < 0) 1673279252Snwhitehorn refchg = (pvo->pvo_pte.prot & VM_PROT_WRITE) ? 1674279252Snwhitehorn LPTE_CHG : 0; 1675279252Snwhitehorn if ((pvo->pvo_vaddr & PVO_MANAGED) && 1676279252Snwhitehorn (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 1677279252Snwhitehorn refchg |= 1678279252Snwhitehorn atomic_readandclear_32(&m->md.mdpg_attrs); 1679279252Snwhitehorn if (refchg & LPTE_CHG) 1680279252Snwhitehorn vm_page_dirty(m); 1681279252Snwhitehorn if (refchg & LPTE_REF) 1682279252Snwhitehorn vm_page_aflag_set(m, PGA_REFERENCED); 1683279252Snwhitehorn } 1684213307Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 1685213307Snwhitehorn isync(); 1686213307Snwhitehorn } 1687213307Snwhitehorn PMAP_UNLOCK(pmap); 1688213307Snwhitehorn } 1689213307Snwhitehorn m->md.mdpg_cache_attrs = ma; 1690279252Snwhitehorn PV_PAGE_UNLOCK(m); 1691213307Snwhitehorn} 1692213307Snwhitehorn 1693213307Snwhitehorn/* 1694190681Snwhitehorn * Map a wired page into kernel virtual address space. 1695190681Snwhitehorn */ 1696190681Snwhitehornvoid 1697213307Snwhitehornmoea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1698190681Snwhitehorn{ 1699190681Snwhitehorn int error; 1700279252Snwhitehorn struct pvo_entry *pvo, *oldpvo; 1701190681Snwhitehorn 1702279252Snwhitehorn pvo = alloc_pvo_entry(0); 1703279252Snwhitehorn pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE; 1704279252Snwhitehorn pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma); 1705279252Snwhitehorn pvo->pvo_vaddr |= PVO_WIRED; 1706190681Snwhitehorn 1707190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1708279252Snwhitehorn oldpvo = moea64_pvo_find_va(kernel_pmap, va); 1709279252Snwhitehorn if (oldpvo != NULL) 1710279252Snwhitehorn moea64_pvo_remove_from_pmap(mmu, oldpvo); 1711279252Snwhitehorn init_pvo_entry(pvo, kernel_pmap, va); 1712279252Snwhitehorn error = moea64_pvo_enter(mmu, pvo, NULL); 1713233529Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1714190681Snwhitehorn 1715279252Snwhitehorn /* Free any dead pages */ 1716279252Snwhitehorn if (oldpvo != NULL) { 1717279252Snwhitehorn PV_LOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1718279252Snwhitehorn moea64_pvo_remove_from_page(mmu, oldpvo); 1719279252Snwhitehorn PV_UNLOCK(oldpvo->pvo_pte.pa & LPTE_RPGN); 1720279252Snwhitehorn free_pvo_entry(oldpvo); 1721279252Snwhitehorn } 1722279252Snwhitehorn 1723190681Snwhitehorn if (error != 0 && error != ENOENT) 1724209975Snwhitehorn panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1725190681Snwhitehorn pa, error); 1726190681Snwhitehorn} 1727190681Snwhitehorn 1728213307Snwhitehornvoid 1729236019Srajmoea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1730213307Snwhitehorn{ 1731213307Snwhitehorn 1732213307Snwhitehorn moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1733213307Snwhitehorn} 1734213307Snwhitehorn 1735190681Snwhitehorn/* 1736190681Snwhitehorn * Extract the physical page address associated with the given kernel virtual 1737190681Snwhitehorn * address. 1738190681Snwhitehorn */ 1739236019Srajvm_paddr_t 1740190681Snwhitehornmoea64_kextract(mmu_t mmu, vm_offset_t va) 1741190681Snwhitehorn{ 1742190681Snwhitehorn struct pvo_entry *pvo; 1743190681Snwhitehorn vm_paddr_t pa; 1744190681Snwhitehorn 1745205370Snwhitehorn /* 1746205370Snwhitehorn * Shortcut the direct-mapped case when applicable. We never put 1747205370Snwhitehorn * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1748205370Snwhitehorn */ 1749205370Snwhitehorn if (va < VM_MIN_KERNEL_ADDRESS) 1750205370Snwhitehorn return (va); 1751205370Snwhitehorn 1752190681Snwhitehorn PMAP_LOCK(kernel_pmap); 1753209975Snwhitehorn pvo = moea64_pvo_find_va(kernel_pmap, va); 1754209975Snwhitehorn KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1755209975Snwhitehorn va)); 1756279252Snwhitehorn pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1757190681Snwhitehorn PMAP_UNLOCK(kernel_pmap); 1758190681Snwhitehorn return (pa); 1759190681Snwhitehorn} 1760190681Snwhitehorn 1761190681Snwhitehorn/* 1762190681Snwhitehorn * Remove a wired page from kernel virtual address space. 1763190681Snwhitehorn */ 1764190681Snwhitehornvoid 1765190681Snwhitehornmoea64_kremove(mmu_t mmu, vm_offset_t va) 1766190681Snwhitehorn{ 1767190681Snwhitehorn moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1768190681Snwhitehorn} 1769190681Snwhitehorn 1770190681Snwhitehorn/* 1771190681Snwhitehorn * Map a range of physical addresses into kernel virtual address space. 1772190681Snwhitehorn * 1773190681Snwhitehorn * The value passed in *virt is a suggested virtual address for the mapping. 1774190681Snwhitehorn * Architectures which can support a direct-mapped physical to virtual region 1775190681Snwhitehorn * can return the appropriate address within that region, leaving '*virt' 1776279252Snwhitehorn * unchanged. Other architectures should map the pages starting at '*virt' and 1777279252Snwhitehorn * update '*virt' with the first usable address after the mapped region. 1778190681Snwhitehorn */ 1779190681Snwhitehornvm_offset_t 1780236019Srajmoea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1781236019Sraj vm_paddr_t pa_end, int prot) 1782190681Snwhitehorn{ 1783190681Snwhitehorn vm_offset_t sva, va; 1784190681Snwhitehorn 1785279252Snwhitehorn if (hw_direct_map) { 1786279252Snwhitehorn /* 1787279252Snwhitehorn * Check if every page in the region is covered by the direct 1788279252Snwhitehorn * map. The direct map covers all of physical memory. Use 1789279252Snwhitehorn * moea64_calc_wimg() as a shortcut to see if the page is in 1790279252Snwhitehorn * physical memory as a way to see if the direct map covers it. 1791279252Snwhitehorn */ 1792279252Snwhitehorn for (va = pa_start; va < pa_end; va += PAGE_SIZE) 1793279252Snwhitehorn if (moea64_calc_wimg(va, VM_MEMATTR_DEFAULT) != LPTE_M) 1794279252Snwhitehorn break; 1795279252Snwhitehorn if (va == pa_end) 1796279252Snwhitehorn return (pa_start); 1797279252Snwhitehorn } 1798190681Snwhitehorn sva = *virt; 1799190681Snwhitehorn va = sva; 1800279252Snwhitehorn /* XXX respect prot argument */ 1801190681Snwhitehorn for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1802190681Snwhitehorn moea64_kenter(mmu, va, pa_start); 1803190681Snwhitehorn *virt = va; 1804190681Snwhitehorn 1805190681Snwhitehorn return (sva); 1806190681Snwhitehorn} 1807190681Snwhitehorn 1808190681Snwhitehorn/* 1809190681Snwhitehorn * Returns true if the pmap's pv is one of the first 1810190681Snwhitehorn * 16 pvs linked to from this page. This count may 1811190681Snwhitehorn * be changed upwards or downwards in the future; it 1812190681Snwhitehorn * is only necessary that true be returned for a small 1813190681Snwhitehorn * subset of pmaps for proper page aging. 1814190681Snwhitehorn */ 1815190681Snwhitehornboolean_t 1816190681Snwhitehornmoea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1817190681Snwhitehorn{ 1818190681Snwhitehorn int loops; 1819190681Snwhitehorn struct pvo_entry *pvo; 1820208990Salc boolean_t rv; 1821190681Snwhitehorn 1822224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1823208990Salc ("moea64_page_exists_quick: page %p is not managed", m)); 1824190681Snwhitehorn loops = 0; 1825208990Salc rv = FALSE; 1826279252Snwhitehorn PV_PAGE_LOCK(m); 1827190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1828279252Snwhitehorn if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { 1829208990Salc rv = TRUE; 1830208990Salc break; 1831208990Salc } 1832190681Snwhitehorn if (++loops >= 16) 1833190681Snwhitehorn break; 1834190681Snwhitehorn } 1835279252Snwhitehorn PV_PAGE_UNLOCK(m); 1836208990Salc return (rv); 1837190681Snwhitehorn} 1838190681Snwhitehorn 1839190681Snwhitehorn/* 1840190681Snwhitehorn * Return the number of managed mappings to the given physical page 1841190681Snwhitehorn * that are wired. 1842190681Snwhitehorn */ 1843190681Snwhitehornint 1844190681Snwhitehornmoea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1845190681Snwhitehorn{ 1846190681Snwhitehorn struct pvo_entry *pvo; 1847190681Snwhitehorn int count; 1848190681Snwhitehorn 1849190681Snwhitehorn count = 0; 1850224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 1851190681Snwhitehorn return (count); 1852279252Snwhitehorn PV_PAGE_LOCK(m); 1853190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1854279252Snwhitehorn if ((pvo->pvo_vaddr & (PVO_DEAD | PVO_WIRED)) == PVO_WIRED) 1855190681Snwhitehorn count++; 1856279252Snwhitehorn PV_PAGE_UNLOCK(m); 1857190681Snwhitehorn return (count); 1858190681Snwhitehorn} 1859190681Snwhitehorn 1860209975Snwhitehornstatic uintptr_t moea64_vsidcontext; 1861190681Snwhitehorn 1862209975Snwhitehornuintptr_t 1863209975Snwhitehornmoea64_get_unique_vsid(void) { 1864209975Snwhitehorn u_int entropy; 1865209975Snwhitehorn register_t hash; 1866209975Snwhitehorn uint32_t mask; 1867209975Snwhitehorn int i; 1868190681Snwhitehorn 1869190681Snwhitehorn entropy = 0; 1870190681Snwhitehorn __asm __volatile("mftb %0" : "=r"(entropy)); 1871190681Snwhitehorn 1872211967Snwhitehorn mtx_lock(&moea64_slb_mutex); 1873209975Snwhitehorn for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1874209975Snwhitehorn u_int n; 1875190681Snwhitehorn 1876190681Snwhitehorn /* 1877190681Snwhitehorn * Create a new value by mutiplying by a prime and adding in 1878190681Snwhitehorn * entropy from the timebase register. This is to make the 1879190681Snwhitehorn * VSID more random so that the PT hash function collides 1880190681Snwhitehorn * less often. (Note that the prime casues gcc to do shifts 1881190681Snwhitehorn * instead of a multiply.) 1882190681Snwhitehorn */ 1883190681Snwhitehorn moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1884209975Snwhitehorn hash = moea64_vsidcontext & (NVSIDS - 1); 1885190681Snwhitehorn if (hash == 0) /* 0 is special, avoid it */ 1886190681Snwhitehorn continue; 1887190681Snwhitehorn n = hash >> 5; 1888190681Snwhitehorn mask = 1 << (hash & (VSID_NBPW - 1)); 1889209975Snwhitehorn hash = (moea64_vsidcontext & VSID_HASHMASK); 1890190681Snwhitehorn if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1891190681Snwhitehorn /* anything free in this bucket? */ 1892190681Snwhitehorn if (moea64_vsid_bitmap[n] == 0xffffffff) { 1893190681Snwhitehorn entropy = (moea64_vsidcontext >> 20); 1894190681Snwhitehorn continue; 1895190681Snwhitehorn } 1896212322Snwhitehorn i = ffs(~moea64_vsid_bitmap[n]) - 1; 1897190681Snwhitehorn mask = 1 << i; 1898209975Snwhitehorn hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1899190681Snwhitehorn hash |= i; 1900190681Snwhitehorn } 1901212322Snwhitehorn KASSERT(!(moea64_vsid_bitmap[n] & mask), 1902212331Snwhitehorn ("Allocating in-use VSID %#zx\n", hash)); 1903190681Snwhitehorn moea64_vsid_bitmap[n] |= mask; 1904211967Snwhitehorn mtx_unlock(&moea64_slb_mutex); 1905209975Snwhitehorn return (hash); 1906190681Snwhitehorn } 1907190681Snwhitehorn 1908211967Snwhitehorn mtx_unlock(&moea64_slb_mutex); 1909209975Snwhitehorn panic("%s: out of segments",__func__); 1910190681Snwhitehorn} 1911190681Snwhitehorn 1912209975Snwhitehorn#ifdef __powerpc64__ 1913209975Snwhitehornvoid 1914209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 1915209975Snwhitehorn{ 1916254667Skib 1917235689Snwhitehorn RB_INIT(&pmap->pmap_pvo); 1918209975Snwhitehorn 1919212715Snwhitehorn pmap->pm_slb_tree_root = slb_alloc_tree(); 1920209975Snwhitehorn pmap->pm_slb = slb_alloc_user_cache(); 1921212722Snwhitehorn pmap->pm_slb_len = 0; 1922209975Snwhitehorn} 1923209975Snwhitehorn#else 1924209975Snwhitehornvoid 1925209975Snwhitehornmoea64_pinit(mmu_t mmu, pmap_t pmap) 1926209975Snwhitehorn{ 1927209975Snwhitehorn int i; 1928212308Snwhitehorn uint32_t hash; 1929209975Snwhitehorn 1930235689Snwhitehorn RB_INIT(&pmap->pmap_pvo); 1931209975Snwhitehorn 1932209975Snwhitehorn if (pmap_bootstrapped) 1933209975Snwhitehorn pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1934209975Snwhitehorn (vm_offset_t)pmap); 1935209975Snwhitehorn else 1936209975Snwhitehorn pmap->pmap_phys = pmap; 1937209975Snwhitehorn 1938209975Snwhitehorn /* 1939209975Snwhitehorn * Allocate some segment registers for this pmap. 1940209975Snwhitehorn */ 1941209975Snwhitehorn hash = moea64_get_unique_vsid(); 1942209975Snwhitehorn 1943209975Snwhitehorn for (i = 0; i < 16; i++) 1944209975Snwhitehorn pmap->pm_sr[i] = VSID_MAKE(i, hash); 1945212308Snwhitehorn 1946212308Snwhitehorn KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1947209975Snwhitehorn} 1948209975Snwhitehorn#endif 1949209975Snwhitehorn 1950190681Snwhitehorn/* 1951190681Snwhitehorn * Initialize the pmap associated with process 0. 1952190681Snwhitehorn */ 1953190681Snwhitehornvoid 1954190681Snwhitehornmoea64_pinit0(mmu_t mmu, pmap_t pm) 1955190681Snwhitehorn{ 1956254667Skib 1957254667Skib PMAP_LOCK_INIT(pm); 1958190681Snwhitehorn moea64_pinit(mmu, pm); 1959190681Snwhitehorn bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1960190681Snwhitehorn} 1961190681Snwhitehorn 1962190681Snwhitehorn/* 1963190681Snwhitehorn * Set the physical protection on the specified range of this map as requested. 1964190681Snwhitehorn */ 1965233011Snwhitehornstatic void 1966233011Snwhitehornmoea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1967233011Snwhitehorn{ 1968279252Snwhitehorn struct vm_page *pg; 1969279252Snwhitehorn vm_prot_t oldprot; 1970279252Snwhitehorn int32_t refchg; 1971233011Snwhitehorn 1972233529Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 1973233529Snwhitehorn 1974233011Snwhitehorn /* 1975279252Snwhitehorn * Change the protection of the page. 1976233011Snwhitehorn */ 1977279252Snwhitehorn oldprot = pvo->pvo_pte.prot; 1978279252Snwhitehorn pvo->pvo_pte.prot = prot; 1979279252Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 1980233011Snwhitehorn 1981233011Snwhitehorn /* 1982279252Snwhitehorn * If the PVO is in the page table, update mapping 1983233011Snwhitehorn */ 1984279252Snwhitehorn refchg = MOEA64_PTE_REPLACE(mmu, pvo, MOEA64_PTE_PROT_UPDATE); 1985279252Snwhitehorn if (refchg < 0) 1986279252Snwhitehorn refchg = (oldprot & VM_PROT_WRITE) ? LPTE_CHG : 0; 1987233011Snwhitehorn 1988234155Snwhitehorn if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 1989279252Snwhitehorn (pvo->pvo_pte.pa & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1990234155Snwhitehorn if ((pg->oflags & VPO_UNMANAGED) == 0) 1991233949Snwhitehorn vm_page_aflag_set(pg, PGA_EXECUTABLE); 1992234155Snwhitehorn moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1993279252Snwhitehorn pvo->pvo_pte.pa & LPTE_RPGN, PAGE_SIZE); 1994233011Snwhitehorn } 1995233434Snwhitehorn 1996233434Snwhitehorn /* 1997233436Snwhitehorn * Update vm about the REF/CHG bits if the page is managed and we have 1998233436Snwhitehorn * removed write access. 1999233434Snwhitehorn */ 2000279252Snwhitehorn if (pg != NULL && (pvo->pvo_vaddr & PVO_MANAGED) && 2001279252Snwhitehorn (oldprot & VM_PROT_WRITE)) { 2002279252Snwhitehorn refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); 2003279252Snwhitehorn if (refchg & LPTE_CHG) 2004279252Snwhitehorn vm_page_dirty(pg); 2005279252Snwhitehorn if (refchg & LPTE_REF) 2006279252Snwhitehorn vm_page_aflag_set(pg, PGA_REFERENCED); 2007233434Snwhitehorn } 2008233011Snwhitehorn} 2009233011Snwhitehorn 2010190681Snwhitehornvoid 2011190681Snwhitehornmoea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 2012190681Snwhitehorn vm_prot_t prot) 2013190681Snwhitehorn{ 2014235689Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 2015190681Snwhitehorn 2016233011Snwhitehorn CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 2017233011Snwhitehorn sva, eva, prot); 2018190681Snwhitehorn 2019190681Snwhitehorn KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 2020190681Snwhitehorn ("moea64_protect: non current pmap")); 2021190681Snwhitehorn 2022190681Snwhitehorn if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2023190681Snwhitehorn moea64_remove(mmu, pm, sva, eva); 2024190681Snwhitehorn return; 2025190681Snwhitehorn } 2026190681Snwhitehorn 2027190681Snwhitehorn PMAP_LOCK(pm); 2028235689Snwhitehorn key.pvo_vaddr = sva; 2029235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2030235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2031235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2032235689Snwhitehorn moea64_pvo_protect(mmu, pm, pvo, prot); 2033190681Snwhitehorn } 2034190681Snwhitehorn PMAP_UNLOCK(pm); 2035190681Snwhitehorn} 2036190681Snwhitehorn 2037190681Snwhitehorn/* 2038190681Snwhitehorn * Map a list of wired pages into kernel virtual address space. This is 2039190681Snwhitehorn * intended for temporary mappings which do not need page modification or 2040190681Snwhitehorn * references recorded. Existing mappings in the region are overwritten. 2041190681Snwhitehorn */ 2042190681Snwhitehornvoid 2043190681Snwhitehornmoea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2044190681Snwhitehorn{ 2045190681Snwhitehorn while (count-- > 0) { 2046190681Snwhitehorn moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2047190681Snwhitehorn va += PAGE_SIZE; 2048190681Snwhitehorn m++; 2049190681Snwhitehorn } 2050190681Snwhitehorn} 2051190681Snwhitehorn 2052190681Snwhitehorn/* 2053190681Snwhitehorn * Remove page mappings from kernel virtual address space. Intended for 2054190681Snwhitehorn * temporary mappings entered by moea64_qenter. 2055190681Snwhitehorn */ 2056190681Snwhitehornvoid 2057190681Snwhitehornmoea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2058190681Snwhitehorn{ 2059190681Snwhitehorn while (count-- > 0) { 2060190681Snwhitehorn moea64_kremove(mmu, va); 2061190681Snwhitehorn va += PAGE_SIZE; 2062190681Snwhitehorn } 2063190681Snwhitehorn} 2064190681Snwhitehorn 2065190681Snwhitehornvoid 2066209975Snwhitehornmoea64_release_vsid(uint64_t vsid) 2067209975Snwhitehorn{ 2068212044Snwhitehorn int idx, mask; 2069209975Snwhitehorn 2070212044Snwhitehorn mtx_lock(&moea64_slb_mutex); 2071212044Snwhitehorn idx = vsid & (NVSIDS-1); 2072212044Snwhitehorn mask = 1 << (idx % VSID_NBPW); 2073212044Snwhitehorn idx /= VSID_NBPW; 2074212308Snwhitehorn KASSERT(moea64_vsid_bitmap[idx] & mask, 2075212308Snwhitehorn ("Freeing unallocated VSID %#jx", vsid)); 2076212044Snwhitehorn moea64_vsid_bitmap[idx] &= ~mask; 2077212044Snwhitehorn mtx_unlock(&moea64_slb_mutex); 2078209975Snwhitehorn} 2079209975Snwhitehorn 2080209975Snwhitehorn 2081209975Snwhitehornvoid 2082190681Snwhitehornmoea64_release(mmu_t mmu, pmap_t pmap) 2083190681Snwhitehorn{ 2084190681Snwhitehorn 2085190681Snwhitehorn /* 2086209975Snwhitehorn * Free segment registers' VSIDs 2087190681Snwhitehorn */ 2088209975Snwhitehorn #ifdef __powerpc64__ 2089212715Snwhitehorn slb_free_tree(pmap); 2090209975Snwhitehorn slb_free_user_cache(pmap->pm_slb); 2091209975Snwhitehorn #else 2092212308Snwhitehorn KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2093190681Snwhitehorn 2094212308Snwhitehorn moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2095209975Snwhitehorn #endif 2096190681Snwhitehorn} 2097190681Snwhitehorn 2098190681Snwhitehorn/* 2099233017Snwhitehorn * Remove all pages mapped by the specified pmap 2100233017Snwhitehorn */ 2101233017Snwhitehornvoid 2102233017Snwhitehornmoea64_remove_pages(mmu_t mmu, pmap_t pm) 2103233017Snwhitehorn{ 2104279252Snwhitehorn struct pvo_entry *pvo, *tpvo; 2105279252Snwhitehorn struct pvo_tree tofree; 2106233017Snwhitehorn 2107279252Snwhitehorn RB_INIT(&tofree); 2108279252Snwhitehorn 2109233017Snwhitehorn PMAP_LOCK(pm); 2110235689Snwhitehorn RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2111279252Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2112279252Snwhitehorn continue; 2113279252Snwhitehorn 2114279252Snwhitehorn /* 2115279252Snwhitehorn * For locking reasons, remove this from the page table and 2116279252Snwhitehorn * pmap, but save delinking from the vm_page for a second 2117279252Snwhitehorn * pass 2118279252Snwhitehorn */ 2119279252Snwhitehorn moea64_pvo_remove_from_pmap(mmu, pvo); 2120279252Snwhitehorn RB_INSERT(pvo_tree, &tofree, pvo); 2121233434Snwhitehorn } 2122233017Snwhitehorn PMAP_UNLOCK(pm); 2123279252Snwhitehorn 2124279252Snwhitehorn RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) { 2125279252Snwhitehorn PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2126279252Snwhitehorn moea64_pvo_remove_from_page(mmu, pvo); 2127279252Snwhitehorn PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2128279252Snwhitehorn RB_REMOVE(pvo_tree, &tofree, pvo); 2129279252Snwhitehorn free_pvo_entry(pvo); 2130279252Snwhitehorn } 2131233017Snwhitehorn} 2132233017Snwhitehorn 2133233017Snwhitehorn/* 2134190681Snwhitehorn * Remove the given range of addresses from the specified map. 2135190681Snwhitehorn */ 2136190681Snwhitehornvoid 2137190681Snwhitehornmoea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2138190681Snwhitehorn{ 2139279252Snwhitehorn struct pvo_entry *pvo, *tpvo, key; 2140279252Snwhitehorn struct pvo_tree tofree; 2141190681Snwhitehorn 2142233011Snwhitehorn /* 2143233011Snwhitehorn * Perform an unsynchronized read. This is, however, safe. 2144233011Snwhitehorn */ 2145233011Snwhitehorn if (pm->pm_stats.resident_count == 0) 2146233011Snwhitehorn return; 2147233011Snwhitehorn 2148279252Snwhitehorn key.pvo_vaddr = sva; 2149279252Snwhitehorn 2150279252Snwhitehorn RB_INIT(&tofree); 2151279252Snwhitehorn 2152190681Snwhitehorn PMAP_LOCK(pm); 2153235689Snwhitehorn for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2154235689Snwhitehorn pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2155235689Snwhitehorn tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2156279252Snwhitehorn 2157279252Snwhitehorn /* 2158279252Snwhitehorn * For locking reasons, remove this from the page table and 2159279252Snwhitehorn * pmap, but save delinking from the vm_page for a second 2160279252Snwhitehorn * pass 2161279252Snwhitehorn */ 2162279252Snwhitehorn moea64_pvo_remove_from_pmap(mmu, pvo); 2163279252Snwhitehorn RB_INSERT(pvo_tree, &tofree, pvo); 2164190681Snwhitehorn } 2165190681Snwhitehorn PMAP_UNLOCK(pm); 2166279252Snwhitehorn 2167279252Snwhitehorn RB_FOREACH_SAFE(pvo, pvo_tree, &tofree, tpvo) { 2168279252Snwhitehorn PV_LOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2169279252Snwhitehorn moea64_pvo_remove_from_page(mmu, pvo); 2170279252Snwhitehorn PV_UNLOCK(pvo->pvo_pte.pa & LPTE_RPGN); 2171279252Snwhitehorn RB_REMOVE(pvo_tree, &tofree, pvo); 2172279252Snwhitehorn free_pvo_entry(pvo); 2173279252Snwhitehorn } 2174190681Snwhitehorn} 2175190681Snwhitehorn 2176190681Snwhitehorn/* 2177190681Snwhitehorn * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2178190681Snwhitehorn * will reflect changes in pte's back to the vm_page. 2179190681Snwhitehorn */ 2180190681Snwhitehornvoid 2181190681Snwhitehornmoea64_remove_all(mmu_t mmu, vm_page_t m) 2182190681Snwhitehorn{ 2183190681Snwhitehorn struct pvo_entry *pvo, *next_pvo; 2184279252Snwhitehorn struct pvo_head freequeue; 2185279252Snwhitehorn int wasdead; 2186190681Snwhitehorn pmap_t pmap; 2187190681Snwhitehorn 2188279252Snwhitehorn LIST_INIT(&freequeue); 2189279252Snwhitehorn 2190279252Snwhitehorn PV_PAGE_LOCK(m); 2191233949Snwhitehorn LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2192190681Snwhitehorn pmap = pvo->pvo_pmap; 2193190681Snwhitehorn PMAP_LOCK(pmap); 2194279252Snwhitehorn wasdead = (pvo->pvo_vaddr & PVO_DEAD); 2195279252Snwhitehorn if (!wasdead) 2196279252Snwhitehorn moea64_pvo_remove_from_pmap(mmu, pvo); 2197279252Snwhitehorn moea64_pvo_remove_from_page(mmu, pvo); 2198279252Snwhitehorn if (!wasdead) 2199279252Snwhitehorn LIST_INSERT_HEAD(&freequeue, pvo, pvo_vlink); 2200190681Snwhitehorn PMAP_UNLOCK(pmap); 2201279252Snwhitehorn 2202190681Snwhitehorn } 2203279252Snwhitehorn KASSERT(!pmap_page_is_mapped(m), ("Page still has mappings")); 2204279252Snwhitehorn KASSERT(!(m->aflags & PGA_WRITEABLE), ("Page still writable")); 2205279252Snwhitehorn PV_PAGE_UNLOCK(m); 2206279252Snwhitehorn 2207279252Snwhitehorn /* Clean up UMA allocations */ 2208279252Snwhitehorn LIST_FOREACH_SAFE(pvo, &freequeue, pvo_vlink, next_pvo) 2209279252Snwhitehorn free_pvo_entry(pvo); 2210190681Snwhitehorn} 2211190681Snwhitehorn 2212190681Snwhitehorn/* 2213190681Snwhitehorn * Allocate a physical page of memory directly from the phys_avail map. 2214190681Snwhitehorn * Can only be called from moea64_bootstrap before avail start and end are 2215190681Snwhitehorn * calculated. 2216190681Snwhitehorn */ 2217216174Snwhitehornvm_offset_t 2218190681Snwhitehornmoea64_bootstrap_alloc(vm_size_t size, u_int align) 2219190681Snwhitehorn{ 2220190681Snwhitehorn vm_offset_t s, e; 2221190681Snwhitehorn int i, j; 2222190681Snwhitehorn 2223190681Snwhitehorn size = round_page(size); 2224190681Snwhitehorn for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2225190681Snwhitehorn if (align != 0) 2226190681Snwhitehorn s = (phys_avail[i] + align - 1) & ~(align - 1); 2227190681Snwhitehorn else 2228190681Snwhitehorn s = phys_avail[i]; 2229190681Snwhitehorn e = s + size; 2230190681Snwhitehorn 2231190681Snwhitehorn if (s < phys_avail[i] || e > phys_avail[i + 1]) 2232190681Snwhitehorn continue; 2233190681Snwhitehorn 2234215159Snwhitehorn if (s + size > platform_real_maxaddr()) 2235215159Snwhitehorn continue; 2236215159Snwhitehorn 2237190681Snwhitehorn if (s == phys_avail[i]) { 2238190681Snwhitehorn phys_avail[i] += size; 2239190681Snwhitehorn } else if (e == phys_avail[i + 1]) { 2240190681Snwhitehorn phys_avail[i + 1] -= size; 2241190681Snwhitehorn } else { 2242190681Snwhitehorn for (j = phys_avail_count * 2; j > i; j -= 2) { 2243190681Snwhitehorn phys_avail[j] = phys_avail[j - 2]; 2244190681Snwhitehorn phys_avail[j + 1] = phys_avail[j - 1]; 2245190681Snwhitehorn } 2246190681Snwhitehorn 2247190681Snwhitehorn phys_avail[i + 3] = phys_avail[i + 1]; 2248190681Snwhitehorn phys_avail[i + 1] = s; 2249190681Snwhitehorn phys_avail[i + 2] = e; 2250190681Snwhitehorn phys_avail_count++; 2251190681Snwhitehorn } 2252190681Snwhitehorn 2253190681Snwhitehorn return (s); 2254190681Snwhitehorn } 2255190681Snwhitehorn panic("moea64_bootstrap_alloc: could not allocate memory"); 2256190681Snwhitehorn} 2257190681Snwhitehorn 2258190681Snwhitehornstatic int 2259279252Snwhitehornmoea64_pvo_enter(mmu_t mmu, struct pvo_entry *pvo, struct pvo_head *pvo_head) 2260190681Snwhitehorn{ 2261279252Snwhitehorn int first, err; 2262190681Snwhitehorn 2263279252Snwhitehorn PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2264279252Snwhitehorn KASSERT(moea64_pvo_find_va(pvo->pvo_pmap, PVO_VADDR(pvo)) == NULL, 2265279252Snwhitehorn ("Existing mapping for VA %#jx", (uintmax_t)PVO_VADDR(pvo))); 2266190681Snwhitehorn 2267212363Snwhitehorn moea64_pvo_enter_calls++; 2268212363Snwhitehorn 2269190681Snwhitehorn /* 2270228412Snwhitehorn * Add to pmap list 2271228412Snwhitehorn */ 2272279252Snwhitehorn RB_INSERT(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2273228412Snwhitehorn 2274228412Snwhitehorn /* 2275190681Snwhitehorn * Remember if the list was empty and therefore will be the first 2276190681Snwhitehorn * item. 2277190681Snwhitehorn */ 2278235689Snwhitehorn if (pvo_head != NULL) { 2279235689Snwhitehorn if (LIST_FIRST(pvo_head) == NULL) 2280235689Snwhitehorn first = 1; 2281235689Snwhitehorn LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2282235689Snwhitehorn } 2283190681Snwhitehorn 2284279252Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2285279252Snwhitehorn pvo->pvo_pmap->pm_stats.wired_count++; 2286279252Snwhitehorn pvo->pvo_pmap->pm_stats.resident_count++; 2287190681Snwhitehorn 2288190681Snwhitehorn /* 2289279252Snwhitehorn * Insert it into the hardware page table 2290190681Snwhitehorn */ 2291279252Snwhitehorn err = MOEA64_PTE_INSERT(mmu, pvo); 2292279252Snwhitehorn if (err != 0) { 2293190681Snwhitehorn panic("moea64_pvo_enter: overflow"); 2294190681Snwhitehorn } 2295190681Snwhitehorn 2296279252Snwhitehorn moea64_pvo_entries++; 2297279252Snwhitehorn 2298279252Snwhitehorn if (pvo->pvo_pmap == kernel_pmap) 2299204042Snwhitehorn isync(); 2300204042Snwhitehorn 2301209975Snwhitehorn#ifdef __powerpc64__ 2302209975Snwhitehorn /* 2303209975Snwhitehorn * Make sure all our bootstrap mappings are in the SLB as soon 2304209975Snwhitehorn * as virtual memory is switched on. 2305209975Snwhitehorn */ 2306209975Snwhitehorn if (!pmap_bootstrapped) 2307279252Snwhitehorn moea64_bootstrap_slb_prefault(PVO_VADDR(pvo), 2308279252Snwhitehorn pvo->pvo_vaddr & PVO_LARGE); 2309209975Snwhitehorn#endif 2310209975Snwhitehorn 2311190681Snwhitehorn return (first ? ENOENT : 0); 2312190681Snwhitehorn} 2313190681Snwhitehorn 2314190681Snwhitehornstatic void 2315279252Snwhitehornmoea64_pvo_remove_from_pmap(mmu_t mmu, struct pvo_entry *pvo) 2316190681Snwhitehorn{ 2317233949Snwhitehorn struct vm_page *pg; 2318279252Snwhitehorn int32_t refchg; 2319190681Snwhitehorn 2320279252Snwhitehorn KASSERT(pvo->pvo_pmap != NULL, ("Trying to remove PVO with no pmap")); 2321233529Snwhitehorn PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2322279252Snwhitehorn KASSERT(!(pvo->pvo_vaddr & PVO_DEAD), ("Trying to remove dead PVO")); 2323233529Snwhitehorn 2324190681Snwhitehorn /* 2325279252Snwhitehorn * If there is an active pte entry, we need to deactivate it 2326190681Snwhitehorn */ 2327279252Snwhitehorn refchg = MOEA64_PTE_UNSET(mmu, pvo); 2328279252Snwhitehorn if (refchg < 0) { 2329279252Snwhitehorn /* 2330279252Snwhitehorn * If it was evicted from the page table, be pessimistic and 2331279252Snwhitehorn * dirty the page. 2332279252Snwhitehorn */ 2333279252Snwhitehorn if (pvo->pvo_pte.prot & VM_PROT_WRITE) 2334279252Snwhitehorn refchg = LPTE_CHG; 2335279252Snwhitehorn else 2336279252Snwhitehorn refchg = 0; 2337190681Snwhitehorn } 2338190681Snwhitehorn 2339190681Snwhitehorn /* 2340190681Snwhitehorn * Update our statistics. 2341190681Snwhitehorn */ 2342190681Snwhitehorn pvo->pvo_pmap->pm_stats.resident_count--; 2343204042Snwhitehorn if (pvo->pvo_vaddr & PVO_WIRED) 2344190681Snwhitehorn pvo->pvo_pmap->pm_stats.wired_count--; 2345190681Snwhitehorn 2346190681Snwhitehorn /* 2347235689Snwhitehorn * Remove this PVO from the pmap list. 2348233529Snwhitehorn */ 2349235689Snwhitehorn RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2350233529Snwhitehorn 2351233529Snwhitehorn /* 2352279252Snwhitehorn * Mark this for the next sweep 2353233529Snwhitehorn */ 2354279252Snwhitehorn pvo->pvo_vaddr |= PVO_DEAD; 2355233529Snwhitehorn 2356279252Snwhitehorn /* Send RC bits to VM */ 2357279252Snwhitehorn if ((pvo->pvo_vaddr & PVO_MANAGED) && 2358279252Snwhitehorn (pvo->pvo_pte.prot & VM_PROT_WRITE)) { 2359279252Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 2360279252Snwhitehorn if (pg != NULL) { 2361279252Snwhitehorn refchg |= atomic_readandclear_32(&pg->md.mdpg_attrs); 2362279252Snwhitehorn if (refchg & LPTE_CHG) 2363279252Snwhitehorn vm_page_dirty(pg); 2364279252Snwhitehorn if (refchg & LPTE_REF) 2365279252Snwhitehorn vm_page_aflag_set(pg, PGA_REFERENCED); 2366279252Snwhitehorn } 2367279252Snwhitehorn } 2368279252Snwhitehorn} 2369279252Snwhitehorn 2370279252Snwhitehornstatic void 2371279252Snwhitehornmoea64_pvo_remove_from_page(mmu_t mmu, struct pvo_entry *pvo) 2372279252Snwhitehorn{ 2373279252Snwhitehorn struct vm_page *pg; 2374279252Snwhitehorn 2375279252Snwhitehorn KASSERT(pvo->pvo_vaddr & PVO_DEAD, ("Trying to delink live page")); 2376279252Snwhitehorn 2377279252Snwhitehorn /* Use NULL pmaps as a sentinel for races in page deletion */ 2378279252Snwhitehorn if (pvo->pvo_pmap == NULL) 2379279252Snwhitehorn return; 2380279252Snwhitehorn pvo->pvo_pmap = NULL; 2381279252Snwhitehorn 2382233529Snwhitehorn /* 2383279252Snwhitehorn * Update vm about page writeability/executability if managed 2384190681Snwhitehorn */ 2385279252Snwhitehorn PV_LOCKASSERT(pvo->pvo_pte.pa & LPTE_RPGN); 2386279252Snwhitehorn pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pa & LPTE_RPGN); 2387233949Snwhitehorn 2388279252Snwhitehorn if ((pvo->pvo_vaddr & PVO_MANAGED) && pg != NULL) { 2389235689Snwhitehorn LIST_REMOVE(pvo, pvo_vlink); 2390234155Snwhitehorn if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2391279252Snwhitehorn vm_page_aflag_clear(pg, PGA_WRITEABLE | PGA_EXECUTABLE); 2392190681Snwhitehorn } 2393190681Snwhitehorn 2394212363Snwhitehorn moea64_pvo_entries--; 2395212363Snwhitehorn moea64_pvo_remove_calls++; 2396190681Snwhitehorn} 2397190681Snwhitehorn 2398190681Snwhitehornstatic struct pvo_entry * 2399209975Snwhitehornmoea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2400190681Snwhitehorn{ 2401235689Snwhitehorn struct pvo_entry key; 2402190681Snwhitehorn 2403279252Snwhitehorn PMAP_LOCK_ASSERT(pm, MA_OWNED); 2404279252Snwhitehorn 2405235689Snwhitehorn key.pvo_vaddr = va & ~ADDR_POFF; 2406235689Snwhitehorn return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2407190681Snwhitehorn} 2408190681Snwhitehorn 2409190681Snwhitehornstatic boolean_t 2410279252Snwhitehornmoea64_query_bit(mmu_t mmu, vm_page_t m, uint64_t ptebit) 2411190681Snwhitehorn{ 2412190681Snwhitehorn struct pvo_entry *pvo; 2413279252Snwhitehorn int64_t ret; 2414279252Snwhitehorn boolean_t rv; 2415190681Snwhitehorn 2416279252Snwhitehorn /* 2417279252Snwhitehorn * See if this bit is stored in the page already. 2418279252Snwhitehorn */ 2419279252Snwhitehorn if (m->md.mdpg_attrs & ptebit) 2420279252Snwhitehorn return (TRUE); 2421190681Snwhitehorn 2422190681Snwhitehorn /* 2423279252Snwhitehorn * Examine each PTE. Sync so that any pending REF/CHG bits are 2424279252Snwhitehorn * flushed to the PTEs. 2425190681Snwhitehorn */ 2426279252Snwhitehorn rv = FALSE; 2427216174Snwhitehorn powerpc_sync(); 2428279252Snwhitehorn PV_PAGE_LOCK(m); 2429190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2430279252Snwhitehorn ret = 0; 2431190681Snwhitehorn 2432190681Snwhitehorn /* 2433190681Snwhitehorn * See if this pvo has a valid PTE. if so, fetch the 2434190681Snwhitehorn * REF/CHG bits from the valid PTE. If the appropriate 2435233434Snwhitehorn * ptebit is set, return success. 2436190681Snwhitehorn */ 2437233529Snwhitehorn PMAP_LOCK(pvo->pvo_pmap); 2438279252Snwhitehorn if (!(pvo->pvo_vaddr & PVO_DEAD)) 2439279252Snwhitehorn ret = MOEA64_PTE_SYNCH(mmu, pvo); 2440279252Snwhitehorn PMAP_UNLOCK(pvo->pvo_pmap); 2441279252Snwhitehorn 2442279252Snwhitehorn if (ret > 0) { 2443279252Snwhitehorn atomic_set_32(&m->md.mdpg_attrs, 2444279252Snwhitehorn ret & (LPTE_CHG | LPTE_REF)); 2445279252Snwhitehorn if (ret & ptebit) { 2446279252Snwhitehorn rv = TRUE; 2447279252Snwhitehorn break; 2448190681Snwhitehorn } 2449190681Snwhitehorn } 2450190681Snwhitehorn } 2451279252Snwhitehorn PV_PAGE_UNLOCK(m); 2452190681Snwhitehorn 2453279252Snwhitehorn return (rv); 2454190681Snwhitehorn} 2455190681Snwhitehorn 2456190681Snwhitehornstatic u_int 2457216174Snwhitehornmoea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2458190681Snwhitehorn{ 2459190681Snwhitehorn u_int count; 2460190681Snwhitehorn struct pvo_entry *pvo; 2461279252Snwhitehorn int64_t ret; 2462190681Snwhitehorn 2463190681Snwhitehorn /* 2464190681Snwhitehorn * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2465279252Snwhitehorn * we can reset the right ones). 2466190681Snwhitehorn */ 2467216174Snwhitehorn powerpc_sync(); 2468190681Snwhitehorn 2469190681Snwhitehorn /* 2470279252Snwhitehorn * For each pvo entry, clear the pte's ptebit. 2471190681Snwhitehorn */ 2472190681Snwhitehorn count = 0; 2473279252Snwhitehorn PV_PAGE_LOCK(m); 2474190681Snwhitehorn LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2475279252Snwhitehorn ret = 0; 2476279252Snwhitehorn 2477233529Snwhitehorn PMAP_LOCK(pvo->pvo_pmap); 2478279252Snwhitehorn if (!(pvo->pvo_vaddr & PVO_DEAD)) 2479279252Snwhitehorn ret = MOEA64_PTE_CLEAR(mmu, pvo, ptebit); 2480233529Snwhitehorn PMAP_UNLOCK(pvo->pvo_pmap); 2481279252Snwhitehorn 2482279252Snwhitehorn if (ret > 0 && (ret & ptebit)) 2483279252Snwhitehorn count++; 2484190681Snwhitehorn } 2485279252Snwhitehorn atomic_clear_32(&m->md.mdpg_attrs, ptebit); 2486279252Snwhitehorn PV_PAGE_UNLOCK(m); 2487190681Snwhitehorn 2488190681Snwhitehorn return (count); 2489190681Snwhitehorn} 2490190681Snwhitehorn 2491190681Snwhitehornboolean_t 2492236019Srajmoea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2493190681Snwhitehorn{ 2494235689Snwhitehorn struct pvo_entry *pvo, key; 2495204296Snwhitehorn vm_offset_t ppa; 2496204296Snwhitehorn int error = 0; 2497204296Snwhitehorn 2498204296Snwhitehorn PMAP_LOCK(kernel_pmap); 2499235689Snwhitehorn key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2500235689Snwhitehorn for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2501235689Snwhitehorn ppa < pa + size; ppa += PAGE_SIZE, 2502235689Snwhitehorn pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2503279252Snwhitehorn if (pvo == NULL || (pvo->pvo_pte.pa & LPTE_RPGN) != ppa) { 2504204296Snwhitehorn error = EFAULT; 2505204296Snwhitehorn break; 2506204296Snwhitehorn } 2507204296Snwhitehorn } 2508204296Snwhitehorn PMAP_UNLOCK(kernel_pmap); 2509204296Snwhitehorn 2510204296Snwhitehorn return (error); 2511190681Snwhitehorn} 2512190681Snwhitehorn 2513190681Snwhitehorn/* 2514190681Snwhitehorn * Map a set of physical memory pages into the kernel virtual 2515190681Snwhitehorn * address space. Return a pointer to where it is mapped. This 2516190681Snwhitehorn * routine is intended to be used for mapping device memory, 2517190681Snwhitehorn * NOT real memory. 2518190681Snwhitehorn */ 2519190681Snwhitehornvoid * 2520213307Snwhitehornmoea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2521190681Snwhitehorn{ 2522190681Snwhitehorn vm_offset_t va, tmpva, ppa, offset; 2523190681Snwhitehorn 2524190681Snwhitehorn ppa = trunc_page(pa); 2525190681Snwhitehorn offset = pa & PAGE_MASK; 2526233618Snwhitehorn size = roundup2(offset + size, PAGE_SIZE); 2527190681Snwhitehorn 2528254025Sjeff va = kva_alloc(size); 2529190681Snwhitehorn 2530190681Snwhitehorn if (!va) 2531190681Snwhitehorn panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2532190681Snwhitehorn 2533190681Snwhitehorn for (tmpva = va; size > 0;) { 2534213307Snwhitehorn moea64_kenter_attr(mmu, tmpva, ppa, ma); 2535190681Snwhitehorn size -= PAGE_SIZE; 2536190681Snwhitehorn tmpva += PAGE_SIZE; 2537190681Snwhitehorn ppa += PAGE_SIZE; 2538190681Snwhitehorn } 2539190681Snwhitehorn 2540190681Snwhitehorn return ((void *)(va + offset)); 2541190681Snwhitehorn} 2542190681Snwhitehorn 2543213307Snwhitehornvoid * 2544236019Srajmoea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2545213307Snwhitehorn{ 2546213307Snwhitehorn 2547213307Snwhitehorn return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2548213307Snwhitehorn} 2549213307Snwhitehorn 2550190681Snwhitehornvoid 2551190681Snwhitehornmoea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2552190681Snwhitehorn{ 2553190681Snwhitehorn vm_offset_t base, offset; 2554190681Snwhitehorn 2555190681Snwhitehorn base = trunc_page(va); 2556190681Snwhitehorn offset = va & PAGE_MASK; 2557233618Snwhitehorn size = roundup2(offset + size, PAGE_SIZE); 2558190681Snwhitehorn 2559254025Sjeff kva_free(base, size); 2560190681Snwhitehorn} 2561190681Snwhitehorn 2562216174Snwhitehornvoid 2563198341Smarcelmoea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2564198341Smarcel{ 2565198341Smarcel struct pvo_entry *pvo; 2566198341Smarcel vm_offset_t lim; 2567198341Smarcel vm_paddr_t pa; 2568198341Smarcel vm_size_t len; 2569198341Smarcel 2570198341Smarcel PMAP_LOCK(pm); 2571198341Smarcel while (sz > 0) { 2572198341Smarcel lim = round_page(va); 2573198341Smarcel len = MIN(lim - va, sz); 2574209975Snwhitehorn pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2575279252Snwhitehorn if (pvo != NULL && !(pvo->pvo_pte.pa & LPTE_I)) { 2576279252Snwhitehorn pa = (pvo->pvo_pte.pa & LPTE_RPGN) | (va & ADDR_POFF); 2577216174Snwhitehorn moea64_syncicache(mmu, pm, va, pa, len); 2578198341Smarcel } 2579198341Smarcel va += len; 2580198341Smarcel sz -= len; 2581198341Smarcel } 2582198341Smarcel PMAP_UNLOCK(pm); 2583198341Smarcel} 2584257941Sjhibbits 2585276772Smarkjvoid 2586276772Smarkjmoea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz, void **va) 2587257941Sjhibbits{ 2588276772Smarkj 2589276772Smarkj *va = (void *)pa; 2590257941Sjhibbits} 2591257941Sjhibbits 2592276772Smarkjextern struct dump_pa dump_map[PHYS_AVAIL_SZ + 1]; 2593276772Smarkj 2594276772Smarkjvoid 2595276772Smarkjmoea64_scan_init(mmu_t mmu) 2596257941Sjhibbits{ 2597257941Sjhibbits struct pvo_entry *pvo; 2598257941Sjhibbits vm_offset_t va; 2599276772Smarkj int i; 2600276772Smarkj 2601276772Smarkj if (!do_minidump) { 2602276772Smarkj /* Initialize phys. segments for dumpsys(). */ 2603276772Smarkj memset(&dump_map, 0, sizeof(dump_map)); 2604276772Smarkj mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 2605276772Smarkj for (i = 0; i < pregions_sz; i++) { 2606276772Smarkj dump_map[i].pa_start = pregions[i].mr_start; 2607276772Smarkj dump_map[i].pa_size = pregions[i].mr_size; 2608257941Sjhibbits } 2609276772Smarkj return; 2610276772Smarkj } 2611276772Smarkj 2612276772Smarkj /* Virtual segments for minidumps: */ 2613276772Smarkj memset(&dump_map, 0, sizeof(dump_map)); 2614276772Smarkj 2615276772Smarkj /* 1st: kernel .data and .bss. */ 2616276772Smarkj dump_map[0].pa_start = trunc_page((uintptr_t)_etext); 2617279252Snwhitehorn dump_map[0].pa_size = round_page((uintptr_t)_end) - 2618279252Snwhitehorn dump_map[0].pa_start; 2619276772Smarkj 2620276772Smarkj /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2621276772Smarkj dump_map[1].pa_start = (vm_paddr_t)msgbufp->msg_ptr; 2622276772Smarkj dump_map[1].pa_size = round_page(msgbufp->msg_size); 2623276772Smarkj 2624276772Smarkj /* 3rd: kernel VM. */ 2625276772Smarkj va = dump_map[1].pa_start + dump_map[1].pa_size; 2626276772Smarkj /* Find start of next chunk (from va). */ 2627276772Smarkj while (va < virtual_end) { 2628276772Smarkj /* Don't dump the buffer cache. */ 2629276772Smarkj if (va >= kmi.buffer_sva && va < kmi.buffer_eva) { 2630276772Smarkj va = kmi.buffer_eva; 2631276772Smarkj continue; 2632276772Smarkj } 2633276772Smarkj pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); 2634279252Snwhitehorn if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) 2635257941Sjhibbits break; 2636276772Smarkj va += PAGE_SIZE; 2637276772Smarkj } 2638276772Smarkj if (va < virtual_end) { 2639276772Smarkj dump_map[2].pa_start = va; 2640276772Smarkj va += PAGE_SIZE; 2641276772Smarkj /* Find last page in chunk. */ 2642276772Smarkj while (va < virtual_end) { 2643276772Smarkj /* Don't run into the buffer cache. */ 2644276772Smarkj if (va == kmi.buffer_sva) 2645257941Sjhibbits break; 2646276772Smarkj pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF); 2647279252Snwhitehorn if (pvo != NULL && !(pvo->pvo_vaddr & PVO_DEAD)) 2648276772Smarkj break; 2649276772Smarkj va += PAGE_SIZE; 2650257941Sjhibbits } 2651276772Smarkj dump_map[2].pa_size = va - dump_map[2].pa_start; 2652257941Sjhibbits } 2653257941Sjhibbits} 2654279252Snwhitehorn 2655