pmap.c revision 187151
1176771Sraj/*- 2187149Sraj * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4176771Sraj * All rights reserved. 5176771Sraj * 6176771Sraj * Redistribution and use in source and binary forms, with or without 7176771Sraj * modification, are permitted provided that the following conditions 8176771Sraj * are met: 9176771Sraj * 1. Redistributions of source code must retain the above copyright 10176771Sraj * notice, this list of conditions and the following disclaimer. 11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright 12176771Sraj * notice, this list of conditions and the following disclaimer in the 13176771Sraj * documentation and/or other materials provided with the distribution. 14176771Sraj * 15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25176771Sraj * 26176771Sraj * Some hw specific parts of this pmap were derived or influenced 27176771Sraj * by NetBSD's ibm4xx pmap module. More generic code is shared with 28176771Sraj * a few other pmap modules from the FreeBSD tree. 29176771Sraj */ 30176771Sraj 31176771Sraj /* 32176771Sraj * VM layout notes: 33176771Sraj * 34176771Sraj * Kernel and user threads run within one common virtual address space 35176771Sraj * defined by AS=0. 36176771Sraj * 37176771Sraj * Virtual address space layout: 38176771Sraj * ----------------------------- 39187151Sraj * 0x0000_0000 - 0xafff_ffff : user process 40187151Sraj * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41187151Sraj * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42187151Sraj * 0xc000_0000 - kernelend : kernel code+data, env, metadata etc. 43187151Sraj * 0xc100_0000 - 0xfeef_ffff : KVA 44187151Sraj * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45187151Sraj * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46187151Sraj * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47187151Sraj * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48187151Sraj * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49176771Sraj */ 50176771Sraj 51176771Sraj#include <sys/cdefs.h> 52176771Sraj__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 187151 2009-01-13 16:15:49Z raj $"); 53176771Sraj 54176771Sraj#include <sys/types.h> 55176771Sraj#include <sys/param.h> 56176771Sraj#include <sys/malloc.h> 57187149Sraj#include <sys/ktr.h> 58176771Sraj#include <sys/proc.h> 59176771Sraj#include <sys/user.h> 60176771Sraj#include <sys/queue.h> 61176771Sraj#include <sys/systm.h> 62176771Sraj#include <sys/kernel.h> 63176771Sraj#include <sys/msgbuf.h> 64176771Sraj#include <sys/lock.h> 65176771Sraj#include <sys/mutex.h> 66176771Sraj#include <sys/vmmeter.h> 67176771Sraj 68176771Sraj#include <vm/vm.h> 69176771Sraj#include <vm/vm_page.h> 70176771Sraj#include <vm/vm_kern.h> 71176771Sraj#include <vm/vm_pageout.h> 72176771Sraj#include <vm/vm_extern.h> 73176771Sraj#include <vm/vm_object.h> 74176771Sraj#include <vm/vm_param.h> 75176771Sraj#include <vm/vm_map.h> 76176771Sraj#include <vm/vm_pager.h> 77176771Sraj#include <vm/uma.h> 78176771Sraj 79176771Sraj#include <machine/cpu.h> 80176771Sraj#include <machine/pcb.h> 81176771Sraj#include <machine/powerpc.h> 82176771Sraj 83176771Sraj#include <machine/tlb.h> 84176771Sraj#include <machine/spr.h> 85176771Sraj#include <machine/vmparam.h> 86176771Sraj#include <machine/md_var.h> 87176771Sraj#include <machine/mmuvar.h> 88176771Sraj#include <machine/pmap.h> 89176771Sraj#include <machine/pte.h> 90176771Sraj 91176771Sraj#include "mmu_if.h" 92176771Sraj 93176771Sraj#define DEBUG 94176771Sraj#undef DEBUG 95176771Sraj 96176771Sraj#ifdef DEBUG 97176771Sraj#define debugf(fmt, args...) printf(fmt, ##args) 98176771Sraj#else 99176771Sraj#define debugf(fmt, args...) 100176771Sraj#endif 101176771Sraj 102176771Sraj#define TODO panic("%s: not implemented", __func__); 103176771Sraj#define memmove(d, s, l) bcopy(s, d, l) 104176771Sraj 105176771Sraj#include "opt_sched.h" 106176771Sraj#ifndef SCHED_4BSD 107176771Sraj#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 108176771Sraj#endif 109176771Srajextern struct mtx sched_lock; 110176771Sraj 111176771Sraj/* Kernel physical load address. */ 112176771Srajextern uint32_t kernload; 113176771Sraj 114176771Srajstruct mem_region availmem_regions[MEM_REGIONS]; 115176771Srajint availmem_regions_sz; 116176771Sraj 117176771Sraj/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 118176771Srajstatic vm_offset_t zero_page_va; 119176771Srajstatic struct mtx zero_page_mutex; 120176771Sraj 121187149Srajstatic struct mtx tlbivax_mutex; 122187149Sraj 123176771Sraj/* 124176771Sraj * Reserved KVA space for mmu_booke_zero_page_idle. This is used 125176771Sraj * by idle thred only, no lock required. 126176771Sraj */ 127176771Srajstatic vm_offset_t zero_page_idle_va; 128176771Sraj 129176771Sraj/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 130176771Srajstatic vm_offset_t copy_page_src_va; 131176771Srajstatic vm_offset_t copy_page_dst_va; 132176771Srajstatic struct mtx copy_page_mutex; 133176771Sraj 134176771Sraj/**************************************************************************/ 135176771Sraj/* PMAP */ 136176771Sraj/**************************************************************************/ 137176771Sraj 138176771Srajstatic void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 139176771Sraj vm_prot_t, boolean_t); 140176771Sraj 141176771Srajunsigned int kptbl_min; /* Index of the first kernel ptbl. */ 142176771Srajunsigned int kernel_ptbls; /* Number of KVA ptbls. */ 143176771Sraj 144176771Srajstatic int pagedaemon_waken; 145176771Sraj 146176771Sraj/* 147176771Sraj * If user pmap is processed with mmu_booke_remove and the resident count 148176771Sraj * drops to 0, there are no more pages to remove, so we need not continue. 149176771Sraj */ 150176771Sraj#define PMAP_REMOVE_DONE(pmap) \ 151176771Sraj ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 152176771Sraj 153187149Srajextern void tlb_lock(uint32_t *); 154187149Srajextern void tlb_unlock(uint32_t *); 155187149Srajextern void tid_flush(tlbtid_t); 156176771Sraj 157176771Sraj/**************************************************************************/ 158176771Sraj/* TLB and TID handling */ 159176771Sraj/**************************************************************************/ 160176771Sraj 161176771Sraj/* Translation ID busy table */ 162187149Srajstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 163176771Sraj 164176771Sraj/* 165187149Sraj * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 166187149Sraj * core revisions and should be read from h/w registers during early config. 167176771Sraj */ 168187149Srajuint32_t tlb0_entries; 169187149Srajuint32_t tlb0_ways; 170187149Srajuint32_t tlb0_entries_per_way; 171176771Sraj 172187149Sraj#define TLB0_ENTRIES (tlb0_entries) 173187149Sraj#define TLB0_WAYS (tlb0_ways) 174187149Sraj#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 175176771Sraj 176187149Sraj#define TLB1_ENTRIES 16 177176771Sraj 178176771Sraj/* In-ram copy of the TLB1 */ 179187149Srajstatic tlb_entry_t tlb1[TLB1_ENTRIES]; 180176771Sraj 181176771Sraj/* Next free entry in the TLB1 */ 182176771Srajstatic unsigned int tlb1_idx; 183176771Sraj 184176771Srajstatic tlbtid_t tid_alloc(struct pmap *); 185176771Sraj 186187149Srajstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 187176771Sraj 188187149Srajstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 189176771Srajstatic void tlb1_write_entry(unsigned int); 190176771Srajstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 191176771Srajstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 192176771Sraj 193176771Srajstatic vm_size_t tsize2size(unsigned int); 194176771Srajstatic unsigned int size2tsize(vm_size_t); 195176771Srajstatic unsigned int ilog2(unsigned int); 196176771Sraj 197176771Srajstatic void set_mas4_defaults(void); 198176771Sraj 199187149Srajstatic inline void tlb0_flush_entry(vm_offset_t); 200176771Srajstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 201176771Sraj 202176771Sraj/**************************************************************************/ 203176771Sraj/* Page table management */ 204176771Sraj/**************************************************************************/ 205176771Sraj 206176771Sraj/* Data for the pv entry allocation mechanism */ 207176771Srajstatic uma_zone_t pvzone; 208176771Srajstatic struct vm_object pvzone_obj; 209176771Srajstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 210176771Sraj 211176771Sraj#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 212176771Sraj 213176771Sraj#ifndef PMAP_SHPGPERPROC 214176771Sraj#define PMAP_SHPGPERPROC 200 215176771Sraj#endif 216176771Sraj 217176771Srajstatic void ptbl_init(void); 218176771Srajstatic struct ptbl_buf *ptbl_buf_alloc(void); 219176771Srajstatic void ptbl_buf_free(struct ptbl_buf *); 220176771Srajstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 221176771Sraj 222187149Srajstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 223176771Srajstatic void ptbl_free(mmu_t, pmap_t, unsigned int); 224176771Srajstatic void ptbl_hold(mmu_t, pmap_t, unsigned int); 225176771Srajstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int); 226176771Sraj 227176771Srajstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 228176771Srajstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 229187149Srajstatic void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 230187149Srajstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 231176771Sraj 232187149Srajstatic pv_entry_t pv_alloc(void); 233176771Srajstatic void pv_free(pv_entry_t); 234176771Srajstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t); 235176771Srajstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t); 236176771Sraj 237176771Sraj/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 238176771Sraj#define PTBL_BUFS (128 * 16) 239176771Sraj 240176771Srajstruct ptbl_buf { 241176771Sraj TAILQ_ENTRY(ptbl_buf) link; /* list link */ 242176771Sraj vm_offset_t kva; /* va of mapping */ 243176771Sraj}; 244176771Sraj 245176771Sraj/* ptbl free list and a lock used for access synchronization. */ 246176771Srajstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 247176771Srajstatic struct mtx ptbl_buf_freelist_lock; 248176771Sraj 249176771Sraj/* Base address of kva space allocated fot ptbl bufs. */ 250176771Srajstatic vm_offset_t ptbl_buf_pool_vabase; 251176771Sraj 252176771Sraj/* Pointer to ptbl_buf structures. */ 253176771Srajstatic struct ptbl_buf *ptbl_bufs; 254176771Sraj 255176771Sraj/* 256176771Sraj * Kernel MMU interface 257176771Sraj */ 258176771Srajstatic void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 259176771Srajstatic void mmu_booke_clear_modify(mmu_t, vm_page_t); 260176771Srajstatic void mmu_booke_clear_reference(mmu_t, vm_page_t); 261176771Srajstatic void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, 262176771Sraj vm_offset_t); 263176771Srajstatic void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 264176771Srajstatic void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 265176771Sraj vm_prot_t, boolean_t); 266176771Srajstatic void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 267176771Sraj vm_page_t, vm_prot_t); 268176771Srajstatic void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 269176771Sraj vm_prot_t); 270176771Srajstatic vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 271176771Srajstatic vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 272176771Sraj vm_prot_t); 273176771Srajstatic void mmu_booke_init(mmu_t); 274176771Srajstatic boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 275176771Srajstatic boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 276176771Srajstatic boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 277176771Srajstatic vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 278176771Sraj int); 279176771Srajstatic int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 280176771Srajstatic void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 281176771Sraj vm_object_t, vm_pindex_t, vm_size_t); 282176771Srajstatic boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 283176771Srajstatic void mmu_booke_page_init(mmu_t, vm_page_t); 284176771Srajstatic int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 285176771Srajstatic void mmu_booke_pinit(mmu_t, pmap_t); 286176771Srajstatic void mmu_booke_pinit0(mmu_t, pmap_t); 287176771Srajstatic void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 288176771Sraj vm_prot_t); 289176771Srajstatic void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 290176771Srajstatic void mmu_booke_qremove(mmu_t, vm_offset_t, int); 291176771Srajstatic void mmu_booke_release(mmu_t, pmap_t); 292176771Srajstatic void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 293176771Srajstatic void mmu_booke_remove_all(mmu_t, vm_page_t); 294176771Srajstatic void mmu_booke_remove_write(mmu_t, vm_page_t); 295176771Srajstatic void mmu_booke_zero_page(mmu_t, vm_page_t); 296176771Srajstatic void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 297176771Srajstatic void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 298176771Srajstatic void mmu_booke_activate(mmu_t, struct thread *); 299176771Srajstatic void mmu_booke_deactivate(mmu_t, struct thread *); 300176771Srajstatic void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 301176771Srajstatic void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 302176771Srajstatic void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 303176771Srajstatic vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 304176771Srajstatic void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 305176771Srajstatic void mmu_booke_kremove(mmu_t, vm_offset_t); 306176771Srajstatic boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 307176771Srajstatic boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 308176771Sraj 309176771Srajstatic mmu_method_t mmu_booke_methods[] = { 310176771Sraj /* pmap dispatcher interface */ 311176771Sraj MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 312176771Sraj MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 313176771Sraj MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 314176771Sraj MMUMETHOD(mmu_copy, mmu_booke_copy), 315176771Sraj MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 316176771Sraj MMUMETHOD(mmu_enter, mmu_booke_enter), 317176771Sraj MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 318176771Sraj MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 319176771Sraj MMUMETHOD(mmu_extract, mmu_booke_extract), 320176771Sraj MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 321176771Sraj MMUMETHOD(mmu_init, mmu_booke_init), 322176771Sraj MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 323176771Sraj MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 324176771Sraj MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 325176771Sraj MMUMETHOD(mmu_map, mmu_booke_map), 326176771Sraj MMUMETHOD(mmu_mincore, mmu_booke_mincore), 327176771Sraj MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 328176771Sraj MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 329176771Sraj MMUMETHOD(mmu_page_init, mmu_booke_page_init), 330176771Sraj MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 331176771Sraj MMUMETHOD(mmu_pinit, mmu_booke_pinit), 332176771Sraj MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 333176771Sraj MMUMETHOD(mmu_protect, mmu_booke_protect), 334176771Sraj MMUMETHOD(mmu_qenter, mmu_booke_qenter), 335176771Sraj MMUMETHOD(mmu_qremove, mmu_booke_qremove), 336176771Sraj MMUMETHOD(mmu_release, mmu_booke_release), 337176771Sraj MMUMETHOD(mmu_remove, mmu_booke_remove), 338176771Sraj MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 339176771Sraj MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 340176771Sraj MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 341176771Sraj MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 342176771Sraj MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 343176771Sraj MMUMETHOD(mmu_activate, mmu_booke_activate), 344176771Sraj MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 345176771Sraj 346176771Sraj /* Internal interfaces */ 347176771Sraj MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 348176771Sraj MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 349176771Sraj MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 350176771Sraj MMUMETHOD(mmu_kenter, mmu_booke_kenter), 351176771Sraj MMUMETHOD(mmu_kextract, mmu_booke_kextract), 352176771Sraj/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 353176771Sraj MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 354176771Sraj MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 355176771Sraj 356176771Sraj { 0, 0 } 357176771Sraj}; 358176771Sraj 359176771Srajstatic mmu_def_t booke_mmu = { 360176771Sraj MMU_TYPE_BOOKE, 361176771Sraj mmu_booke_methods, 362176771Sraj 0 363176771Sraj}; 364176771SrajMMU_DEF(booke_mmu); 365176771Sraj 366176771Sraj/* Return number of entries in TLB0. */ 367176771Srajstatic __inline void 368176771Srajtlb0_get_tlbconf(void) 369176771Sraj{ 370176771Sraj uint32_t tlb0_cfg; 371176771Sraj 372176771Sraj tlb0_cfg = mfspr(SPR_TLB0CFG); 373187149Sraj tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 374187149Sraj tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 375187149Sraj tlb0_entries_per_way = tlb0_entries / tlb0_ways; 376176771Sraj} 377176771Sraj 378176771Sraj/* Initialize pool of kva ptbl buffers. */ 379176771Srajstatic void 380176771Srajptbl_init(void) 381176771Sraj{ 382176771Sraj int i; 383176771Sraj 384187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 385187151Sraj (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 386187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 387187151Sraj __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 388176771Sraj 389176771Sraj mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 390176771Sraj TAILQ_INIT(&ptbl_buf_freelist); 391176771Sraj 392176771Sraj for (i = 0; i < PTBL_BUFS; i++) { 393176771Sraj ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 394176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 395176771Sraj } 396176771Sraj} 397176771Sraj 398182362Sraj/* Get a ptbl_buf from the freelist. */ 399176771Srajstatic struct ptbl_buf * 400176771Srajptbl_buf_alloc(void) 401176771Sraj{ 402176771Sraj struct ptbl_buf *buf; 403176771Sraj 404176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 405176771Sraj buf = TAILQ_FIRST(&ptbl_buf_freelist); 406176771Sraj if (buf != NULL) 407176771Sraj TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 408176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 409176771Sraj 410187151Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 411187151Sraj 412176771Sraj return (buf); 413176771Sraj} 414176771Sraj 415176771Sraj/* Return ptbl buff to free pool. */ 416176771Srajstatic void 417176771Srajptbl_buf_free(struct ptbl_buf *buf) 418176771Sraj{ 419176771Sraj 420187149Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 421176771Sraj 422176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 423176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 424176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 425176771Sraj} 426176771Sraj 427176771Sraj/* 428187149Sraj * Search the list of allocated ptbl bufs and find on list of allocated ptbls 429176771Sraj */ 430176771Srajstatic void 431176771Srajptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 432176771Sraj{ 433176771Sraj struct ptbl_buf *pbuf; 434176771Sraj 435187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 436176771Sraj 437187149Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 438187149Sraj 439187149Sraj TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 440176771Sraj if (pbuf->kva == (vm_offset_t)ptbl) { 441176771Sraj /* Remove from pmap ptbl buf list. */ 442187149Sraj TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 443176771Sraj 444187149Sraj /* Free corresponding ptbl buf. */ 445176771Sraj ptbl_buf_free(pbuf); 446176771Sraj break; 447176771Sraj } 448176771Sraj} 449176771Sraj 450176771Sraj/* Allocate page table. */ 451187149Srajstatic pte_t * 452176771Srajptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 453176771Sraj{ 454176771Sraj vm_page_t mtbl[PTBL_PAGES]; 455176771Sraj vm_page_t m; 456176771Sraj struct ptbl_buf *pbuf; 457176771Sraj unsigned int pidx; 458187149Sraj pte_t *ptbl; 459176771Sraj int i; 460176771Sraj 461187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 462187149Sraj (pmap == kernel_pmap), pdir_idx); 463176771Sraj 464176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 465176771Sraj ("ptbl_alloc: invalid pdir_idx")); 466176771Sraj KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 467176771Sraj ("pte_alloc: valid ptbl entry exists!")); 468176771Sraj 469176771Sraj pbuf = ptbl_buf_alloc(); 470176771Sraj if (pbuf == NULL) 471176771Sraj panic("pte_alloc: couldn't alloc kernel virtual memory"); 472187149Sraj 473187149Sraj ptbl = (pte_t *)pbuf->kva; 474176771Sraj 475187149Sraj CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 476187149Sraj 477176771Sraj /* Allocate ptbl pages, this will sleep! */ 478176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 479176771Sraj pidx = (PTBL_PAGES * pdir_idx) + i; 480187149Sraj while ((m = vm_page_alloc(NULL, pidx, 481187149Sraj VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 482187149Sraj 483176771Sraj PMAP_UNLOCK(pmap); 484176771Sraj vm_page_unlock_queues(); 485176771Sraj VM_WAIT; 486176771Sraj vm_page_lock_queues(); 487176771Sraj PMAP_LOCK(pmap); 488176771Sraj } 489176771Sraj mtbl[i] = m; 490176771Sraj } 491176771Sraj 492187149Sraj /* Map allocated pages into kernel_pmap. */ 493187149Sraj mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 494176771Sraj 495176771Sraj /* Zero whole ptbl. */ 496187149Sraj bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 497176771Sraj 498176771Sraj /* Add pbuf to the pmap ptbl bufs list. */ 499187149Sraj TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 500176771Sraj 501187149Sraj return (ptbl); 502176771Sraj} 503176771Sraj 504176771Sraj/* Free ptbl pages and invalidate pdir entry. */ 505176771Srajstatic void 506176771Srajptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 507176771Sraj{ 508176771Sraj pte_t *ptbl; 509176771Sraj vm_paddr_t pa; 510176771Sraj vm_offset_t va; 511176771Sraj vm_page_t m; 512176771Sraj int i; 513176771Sraj 514187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 515187149Sraj (pmap == kernel_pmap), pdir_idx); 516176771Sraj 517176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 518176771Sraj ("ptbl_free: invalid pdir_idx")); 519176771Sraj 520176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 521176771Sraj 522187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 523187149Sraj 524176771Sraj KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 525176771Sraj 526187149Sraj /* 527187149Sraj * Invalidate the pdir entry as soon as possible, so that other CPUs 528187149Sraj * don't attempt to look up the page tables we are releasing. 529187149Sraj */ 530187149Sraj mtx_lock_spin(&tlbivax_mutex); 531187149Sraj 532187149Sraj pmap->pm_pdir[pdir_idx] = NULL; 533187149Sraj 534187149Sraj mtx_unlock_spin(&tlbivax_mutex); 535187149Sraj 536176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 537176771Sraj va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 538176771Sraj pa = pte_vatopa(mmu, kernel_pmap, va); 539176771Sraj m = PHYS_TO_VM_PAGE(pa); 540176771Sraj vm_page_free_zero(m); 541176771Sraj atomic_subtract_int(&cnt.v_wire_count, 1); 542176771Sraj mmu_booke_kremove(mmu, va); 543176771Sraj } 544176771Sraj 545176771Sraj ptbl_free_pmap_ptbl(pmap, ptbl); 546176771Sraj} 547176771Sraj 548176771Sraj/* 549176771Sraj * Decrement ptbl pages hold count and attempt to free ptbl pages. 550176771Sraj * Called when removing pte entry from ptbl. 551176771Sraj * 552176771Sraj * Return 1 if ptbl pages were freed. 553176771Sraj */ 554176771Srajstatic int 555176771Srajptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 556176771Sraj{ 557176771Sraj pte_t *ptbl; 558176771Sraj vm_paddr_t pa; 559176771Sraj vm_page_t m; 560176771Sraj int i; 561176771Sraj 562187151Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 563187151Sraj (pmap == kernel_pmap), pdir_idx); 564176771Sraj 565176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 566176771Sraj ("ptbl_unhold: invalid pdir_idx")); 567176771Sraj KASSERT((pmap != kernel_pmap), 568176771Sraj ("ptbl_unhold: unholding kernel ptbl!")); 569176771Sraj 570176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 571176771Sraj 572176771Sraj //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 573176771Sraj KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 574176771Sraj ("ptbl_unhold: non kva ptbl")); 575176771Sraj 576176771Sraj /* decrement hold count */ 577176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 578187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 579187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 580176771Sraj m = PHYS_TO_VM_PAGE(pa); 581176771Sraj m->wire_count--; 582176771Sraj } 583176771Sraj 584176771Sraj /* 585176771Sraj * Free ptbl pages if there are no pte etries in this ptbl. 586187151Sraj * wire_count has the same value for all ptbl pages, so check the last 587187151Sraj * page. 588176771Sraj */ 589176771Sraj if (m->wire_count == 0) { 590176771Sraj ptbl_free(mmu, pmap, pdir_idx); 591176771Sraj 592176771Sraj //debugf("ptbl_unhold: e (freed ptbl)\n"); 593176771Sraj return (1); 594176771Sraj } 595176771Sraj 596176771Sraj return (0); 597176771Sraj} 598176771Sraj 599176771Sraj/* 600187151Sraj * Increment hold count for ptbl pages. This routine is used when a new pte 601187151Sraj * entry is being inserted into the ptbl. 602176771Sraj */ 603176771Srajstatic void 604176771Srajptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 605176771Sraj{ 606176771Sraj vm_paddr_t pa; 607176771Sraj pte_t *ptbl; 608176771Sraj vm_page_t m; 609176771Sraj int i; 610176771Sraj 611187151Sraj CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 612187151Sraj pdir_idx); 613176771Sraj 614176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 615176771Sraj ("ptbl_hold: invalid pdir_idx")); 616176771Sraj KASSERT((pmap != kernel_pmap), 617176771Sraj ("ptbl_hold: holding kernel ptbl!")); 618176771Sraj 619176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 620176771Sraj 621176771Sraj KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 622176771Sraj 623176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 624187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 625187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 626176771Sraj m = PHYS_TO_VM_PAGE(pa); 627176771Sraj m->wire_count++; 628176771Sraj } 629176771Sraj} 630176771Sraj 631176771Sraj/* Allocate pv_entry structure. */ 632176771Srajpv_entry_t 633176771Srajpv_alloc(void) 634176771Sraj{ 635176771Sraj pv_entry_t pv; 636176771Sraj 637176771Sraj pv_entry_count++; 638187151Sraj if ((pv_entry_count > pv_entry_high_water) && 639187151Sraj (pagedaemon_waken == 0)) { 640176771Sraj pagedaemon_waken = 1; 641187151Sraj wakeup(&vm_pages_needed); 642176771Sraj } 643176771Sraj pv = uma_zalloc(pvzone, M_NOWAIT); 644176771Sraj 645176771Sraj return (pv); 646176771Sraj} 647176771Sraj 648176771Sraj/* Free pv_entry structure. */ 649176771Srajstatic __inline void 650176771Srajpv_free(pv_entry_t pve) 651176771Sraj{ 652176771Sraj 653176771Sraj pv_entry_count--; 654176771Sraj uma_zfree(pvzone, pve); 655176771Sraj} 656176771Sraj 657176771Sraj 658176771Sraj/* Allocate and initialize pv_entry structure. */ 659176771Srajstatic void 660176771Srajpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 661176771Sraj{ 662176771Sraj pv_entry_t pve; 663176771Sraj 664176771Sraj //int su = (pmap == kernel_pmap); 665176771Sraj //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 666176771Sraj // (u_int32_t)pmap, va, (u_int32_t)m); 667176771Sraj 668176771Sraj pve = pv_alloc(); 669176771Sraj if (pve == NULL) 670176771Sraj panic("pv_insert: no pv entries!"); 671176771Sraj 672176771Sraj pve->pv_pmap = pmap; 673176771Sraj pve->pv_va = va; 674176771Sraj 675176771Sraj /* add to pv_list */ 676176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 677176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 678176771Sraj 679176771Sraj TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 680176771Sraj 681176771Sraj //debugf("pv_insert: e\n"); 682176771Sraj} 683176771Sraj 684176771Sraj/* Destroy pv entry. */ 685176771Srajstatic void 686176771Srajpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 687176771Sraj{ 688176771Sraj pv_entry_t pve; 689176771Sraj 690176771Sraj //int su = (pmap == kernel_pmap); 691176771Sraj //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 692176771Sraj 693176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 694176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 695176771Sraj 696176771Sraj /* find pv entry */ 697176771Sraj TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 698176771Sraj if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 699176771Sraj /* remove from pv_list */ 700176771Sraj TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 701176771Sraj if (TAILQ_EMPTY(&m->md.pv_list)) 702176771Sraj vm_page_flag_clear(m, PG_WRITEABLE); 703176771Sraj 704176771Sraj /* free pv entry struct */ 705176771Sraj pv_free(pve); 706176771Sraj break; 707176771Sraj } 708176771Sraj } 709176771Sraj 710176771Sraj //debugf("pv_remove: e\n"); 711176771Sraj} 712176771Sraj 713176771Sraj/* 714176771Sraj * Clean pte entry, try to free page table page if requested. 715176771Sraj * 716176771Sraj * Return 1 if ptbl pages were freed, otherwise return 0. 717176771Sraj */ 718176771Srajstatic int 719187151Srajpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 720176771Sraj{ 721176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 722176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 723176771Sraj vm_page_t m; 724176771Sraj pte_t *ptbl; 725176771Sraj pte_t *pte; 726176771Sraj 727176771Sraj //int su = (pmap == kernel_pmap); 728176771Sraj //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 729176771Sraj // su, (u_int32_t)pmap, va, flags); 730176771Sraj 731176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 732176771Sraj KASSERT(ptbl, ("pte_remove: null ptbl")); 733176771Sraj 734176771Sraj pte = &ptbl[ptbl_idx]; 735176771Sraj 736176771Sraj if (pte == NULL || !PTE_ISVALID(pte)) 737176771Sraj return (0); 738176771Sraj 739176771Sraj /* Get vm_page_t for mapped pte. */ 740176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 741176771Sraj 742176771Sraj if (PTE_ISWIRED(pte)) 743176771Sraj pmap->pm_stats.wired_count--; 744176771Sraj 745176771Sraj if (!PTE_ISFAKE(pte)) { 746176771Sraj /* Handle managed entry. */ 747176771Sraj if (PTE_ISMANAGED(pte)) { 748176771Sraj 749176771Sraj /* Handle modified pages. */ 750178626Smarcel if (PTE_ISMODIFIED(pte)) 751178626Smarcel vm_page_dirty(m); 752176771Sraj 753176771Sraj /* Referenced pages. */ 754176771Sraj if (PTE_ISREFERENCED(pte)) 755176771Sraj vm_page_flag_set(m, PG_REFERENCED); 756176771Sraj 757176771Sraj /* Remove pv_entry from pv_list. */ 758176771Sraj pv_remove(pmap, va, m); 759176771Sraj } 760176771Sraj } 761176771Sraj 762187149Sraj mtx_lock_spin(&tlbivax_mutex); 763187149Sraj 764187149Sraj tlb0_flush_entry(va); 765176771Sraj pte->flags = 0; 766176771Sraj pte->rpn = 0; 767187149Sraj 768187149Sraj mtx_unlock_spin(&tlbivax_mutex); 769187149Sraj 770176771Sraj pmap->pm_stats.resident_count--; 771176771Sraj 772176771Sraj if (flags & PTBL_UNHOLD) { 773176771Sraj //debugf("pte_remove: e (unhold)\n"); 774176771Sraj return (ptbl_unhold(mmu, pmap, pdir_idx)); 775176771Sraj } 776176771Sraj 777176771Sraj //debugf("pte_remove: e\n"); 778176771Sraj return (0); 779176771Sraj} 780176771Sraj 781176771Sraj/* 782176771Sraj * Insert PTE for a given page and virtual address. 783176771Sraj */ 784187149Srajstatic void 785187149Srajpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 786176771Sraj{ 787176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 788176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 789187149Sraj pte_t *ptbl, *pte; 790176771Sraj 791187149Sraj CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 792187149Sraj pmap == kernel_pmap, pmap, va); 793176771Sraj 794176771Sraj /* Get the page table pointer. */ 795176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 796176771Sraj 797187149Sraj if (ptbl == NULL) { 798187149Sraj /* Allocate page table pages. */ 799187149Sraj ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 800187149Sraj } else { 801176771Sraj /* 802176771Sraj * Check if there is valid mapping for requested 803176771Sraj * va, if there is, remove it. 804176771Sraj */ 805176771Sraj pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 806176771Sraj if (PTE_ISVALID(pte)) { 807176771Sraj pte_remove(mmu, pmap, va, PTBL_HOLD); 808176771Sraj } else { 809176771Sraj /* 810176771Sraj * pte is not used, increment hold count 811176771Sraj * for ptbl pages. 812176771Sraj */ 813176771Sraj if (pmap != kernel_pmap) 814176771Sraj ptbl_hold(mmu, pmap, pdir_idx); 815176771Sraj } 816176771Sraj } 817176771Sraj 818176771Sraj /* 819187149Sraj * Insert pv_entry into pv_list for mapped page if part of managed 820187149Sraj * memory. 821176771Sraj */ 822176771Sraj if ((m->flags & PG_FICTITIOUS) == 0) { 823176771Sraj if ((m->flags & PG_UNMANAGED) == 0) { 824187149Sraj flags |= PTE_MANAGED; 825176771Sraj 826176771Sraj /* Create and insert pv entry. */ 827176771Sraj pv_insert(pmap, va, m); 828176771Sraj } 829176771Sraj } else { 830187149Sraj flags |= PTE_FAKE; 831176771Sraj } 832176771Sraj 833176771Sraj pmap->pm_stats.resident_count++; 834187149Sraj 835187149Sraj mtx_lock_spin(&tlbivax_mutex); 836187149Sraj 837187149Sraj tlb0_flush_entry(va); 838187149Sraj if (pmap->pm_pdir[pdir_idx] == NULL) { 839187149Sraj /* 840187149Sraj * If we just allocated a new page table, hook it in 841187149Sraj * the pdir. 842187149Sraj */ 843187149Sraj pmap->pm_pdir[pdir_idx] = ptbl; 844187149Sraj } 845187149Sraj pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 846176771Sraj pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 847176771Sraj pte->flags |= (PTE_VALID | flags); 848176771Sraj 849187149Sraj mtx_unlock_spin(&tlbivax_mutex); 850176771Sraj} 851176771Sraj 852176771Sraj/* Return the pa for the given pmap/va. */ 853176771Srajstatic vm_paddr_t 854176771Srajpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 855176771Sraj{ 856176771Sraj vm_paddr_t pa = 0; 857176771Sraj pte_t *pte; 858176771Sraj 859176771Sraj pte = pte_find(mmu, pmap, va); 860176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 861176771Sraj pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 862176771Sraj return (pa); 863176771Sraj} 864176771Sraj 865176771Sraj/* Get a pointer to a PTE in a page table. */ 866176771Srajstatic pte_t * 867176771Srajpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 868176771Sraj{ 869176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 870176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 871176771Sraj 872176771Sraj KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 873176771Sraj 874176771Sraj if (pmap->pm_pdir[pdir_idx]) 875176771Sraj return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 876176771Sraj 877176771Sraj return (NULL); 878176771Sraj} 879176771Sraj 880176771Sraj/**************************************************************************/ 881176771Sraj/* PMAP related */ 882176771Sraj/**************************************************************************/ 883176771Sraj 884176771Sraj/* 885176771Sraj * This is called during e500_init, before the system is really initialized. 886176771Sraj */ 887176771Srajstatic void 888176771Srajmmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) 889176771Sraj{ 890176771Sraj vm_offset_t phys_kernelend; 891176771Sraj struct mem_region *mp, *mp1; 892176771Sraj int cnt, i, j; 893176771Sraj u_int s, e, sz; 894176771Sraj u_int phys_avail_count; 895182198Sraj vm_size_t physsz, hwphyssz, kstack0_sz; 896182198Sraj vm_offset_t kernel_pdir, kstack0; 897182198Sraj vm_paddr_t kstack0_phys; 898176771Sraj 899176771Sraj debugf("mmu_booke_bootstrap: entered\n"); 900176771Sraj 901187149Sraj /* Initialize invalidation mutex */ 902187149Sraj mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 903187149Sraj 904187149Sraj /* Read TLB0 size and associativity. */ 905187149Sraj tlb0_get_tlbconf(); 906187149Sraj 907176771Sraj /* Align kernel start and end address (kernel image). */ 908176771Sraj kernelstart = trunc_page(kernelstart); 909176771Sraj kernelend = round_page(kernelend); 910176771Sraj 911176771Sraj /* Allocate space for the message buffer. */ 912176771Sraj msgbufp = (struct msgbuf *)kernelend; 913176771Sraj kernelend += MSGBUF_SIZE; 914187149Sraj debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 915176771Sraj kernelend); 916176771Sraj 917176771Sraj kernelend = round_page(kernelend); 918176771Sraj 919176771Sraj /* Allocate space for ptbl_bufs. */ 920176771Sraj ptbl_bufs = (struct ptbl_buf *)kernelend; 921176771Sraj kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS; 922187149Sraj debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 923176771Sraj kernelend); 924176771Sraj 925176771Sraj kernelend = round_page(kernelend); 926176771Sraj 927176771Sraj /* Allocate PTE tables for kernel KVA. */ 928176771Sraj kernel_pdir = kernelend; 929176771Sraj kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 930176771Sraj PDIR_SIZE - 1) / PDIR_SIZE; 931176771Sraj kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 932176771Sraj debugf(" kernel ptbls: %d\n", kernel_ptbls); 933187149Sraj debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, kernelend); 934176771Sraj 935187149Sraj debugf(" kernelend: 0x%08x\n", kernelend); 936176771Sraj if (kernelend - kernelstart > 0x1000000) { 937176771Sraj kernelend = (kernelend + 0x3fffff) & ~0x3fffff; 938176771Sraj tlb1_mapin_region(kernelstart + 0x1000000, 939176771Sraj kernload + 0x1000000, kernelend - kernelstart - 0x1000000); 940176771Sraj } else 941176771Sraj kernelend = (kernelend + 0xffffff) & ~0xffffff; 942176771Sraj 943187149Sraj debugf(" updated kernelend: 0x%08x\n", kernelend); 944187149Sraj 945182362Sraj /* 946182362Sraj * Clear the structures - note we can only do it safely after the 947187149Sraj * possible additional TLB1 translations are in place (above) so that 948182362Sraj * all range up to the currently calculated 'kernelend' is covered. 949182362Sraj */ 950182362Sraj memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 951182362Sraj memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 952182362Sraj 953176771Sraj /*******************************************************/ 954176771Sraj /* Set the start and end of kva. */ 955176771Sraj /*******************************************************/ 956176771Sraj virtual_avail = kernelend; 957176771Sraj virtual_end = VM_MAX_KERNEL_ADDRESS; 958176771Sraj 959176771Sraj /* Allocate KVA space for page zero/copy operations. */ 960176771Sraj zero_page_va = virtual_avail; 961176771Sraj virtual_avail += PAGE_SIZE; 962176771Sraj zero_page_idle_va = virtual_avail; 963176771Sraj virtual_avail += PAGE_SIZE; 964176771Sraj copy_page_src_va = virtual_avail; 965176771Sraj virtual_avail += PAGE_SIZE; 966176771Sraj copy_page_dst_va = virtual_avail; 967176771Sraj virtual_avail += PAGE_SIZE; 968187149Sraj debugf("zero_page_va = 0x%08x\n", zero_page_va); 969187149Sraj debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 970187149Sraj debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 971187149Sraj debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 972176771Sraj 973176771Sraj /* Initialize page zero/copy mutexes. */ 974176771Sraj mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 975176771Sraj mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 976176771Sraj 977176771Sraj /* Allocate KVA space for ptbl bufs. */ 978176771Sraj ptbl_buf_pool_vabase = virtual_avail; 979176771Sraj virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 980187149Sraj debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 981187149Sraj ptbl_buf_pool_vabase, virtual_avail); 982176771Sraj 983176771Sraj /* Calculate corresponding physical addresses for the kernel region. */ 984176771Sraj phys_kernelend = kernload + (kernelend - kernelstart); 985176771Sraj debugf("kernel image and allocated data:\n"); 986176771Sraj debugf(" kernload = 0x%08x\n", kernload); 987176771Sraj debugf(" kernelstart = 0x%08x\n", kernelstart); 988176771Sraj debugf(" kernelend = 0x%08x\n", kernelend); 989176771Sraj debugf(" kernel size = 0x%08x\n", kernelend - kernelstart); 990176771Sraj 991176771Sraj if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 992176771Sraj panic("mmu_booke_bootstrap: phys_avail too small"); 993176771Sraj 994176771Sraj /* 995187151Sraj * Remove kernel physical address range from avail regions list. Page 996187151Sraj * align all regions. Non-page aligned memory isn't very interesting 997187151Sraj * to us. Also, sort the entries for ascending addresses. 998176771Sraj */ 999176771Sraj sz = 0; 1000176771Sraj cnt = availmem_regions_sz; 1001176771Sraj debugf("processing avail regions:\n"); 1002176771Sraj for (mp = availmem_regions; mp->mr_size; mp++) { 1003176771Sraj s = mp->mr_start; 1004176771Sraj e = mp->mr_start + mp->mr_size; 1005176771Sraj debugf(" %08x-%08x -> ", s, e); 1006176771Sraj /* Check whether this region holds all of the kernel. */ 1007176771Sraj if (s < kernload && e > phys_kernelend) { 1008176771Sraj availmem_regions[cnt].mr_start = phys_kernelend; 1009176771Sraj availmem_regions[cnt++].mr_size = e - phys_kernelend; 1010176771Sraj e = kernload; 1011176771Sraj } 1012176771Sraj /* Look whether this regions starts within the kernel. */ 1013176771Sraj if (s >= kernload && s < phys_kernelend) { 1014176771Sraj if (e <= phys_kernelend) 1015176771Sraj goto empty; 1016176771Sraj s = phys_kernelend; 1017176771Sraj } 1018176771Sraj /* Now look whether this region ends within the kernel. */ 1019176771Sraj if (e > kernload && e <= phys_kernelend) { 1020176771Sraj if (s >= kernload) 1021176771Sraj goto empty; 1022176771Sraj e = kernload; 1023176771Sraj } 1024176771Sraj /* Now page align the start and size of the region. */ 1025176771Sraj s = round_page(s); 1026176771Sraj e = trunc_page(e); 1027176771Sraj if (e < s) 1028176771Sraj e = s; 1029176771Sraj sz = e - s; 1030176771Sraj debugf("%08x-%08x = %x\n", s, e, sz); 1031176771Sraj 1032176771Sraj /* Check whether some memory is left here. */ 1033176771Sraj if (sz == 0) { 1034176771Sraj empty: 1035176771Sraj memmove(mp, mp + 1, 1036176771Sraj (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1037176771Sraj cnt--; 1038176771Sraj mp--; 1039176771Sraj continue; 1040176771Sraj } 1041176771Sraj 1042176771Sraj /* Do an insertion sort. */ 1043176771Sraj for (mp1 = availmem_regions; mp1 < mp; mp1++) 1044176771Sraj if (s < mp1->mr_start) 1045176771Sraj break; 1046176771Sraj if (mp1 < mp) { 1047176771Sraj memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1048176771Sraj mp1->mr_start = s; 1049176771Sraj mp1->mr_size = sz; 1050176771Sraj } else { 1051176771Sraj mp->mr_start = s; 1052176771Sraj mp->mr_size = sz; 1053176771Sraj } 1054176771Sraj } 1055176771Sraj availmem_regions_sz = cnt; 1056176771Sraj 1057176771Sraj /*******************************************************/ 1058182198Sraj /* Steal physical memory for kernel stack from the end */ 1059182198Sraj /* of the first avail region */ 1060182198Sraj /*******************************************************/ 1061182198Sraj kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1062182198Sraj kstack0_phys = availmem_regions[0].mr_start + 1063182198Sraj availmem_regions[0].mr_size; 1064182198Sraj kstack0_phys -= kstack0_sz; 1065182198Sraj availmem_regions[0].mr_size -= kstack0_sz; 1066182198Sraj 1067182198Sraj /*******************************************************/ 1068176771Sraj /* Fill in phys_avail table, based on availmem_regions */ 1069176771Sraj /*******************************************************/ 1070176771Sraj phys_avail_count = 0; 1071176771Sraj physsz = 0; 1072176771Sraj hwphyssz = 0; 1073176771Sraj TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1074176771Sraj 1075176771Sraj debugf("fill in phys_avail:\n"); 1076176771Sraj for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1077176771Sraj 1078176771Sraj debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1079176771Sraj availmem_regions[i].mr_start, 1080187151Sraj availmem_regions[i].mr_start + 1081187151Sraj availmem_regions[i].mr_size, 1082176771Sraj availmem_regions[i].mr_size); 1083176771Sraj 1084182362Sraj if (hwphyssz != 0 && 1085182362Sraj (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1086176771Sraj debugf(" hw.physmem adjust\n"); 1087176771Sraj if (physsz < hwphyssz) { 1088176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1089182362Sraj phys_avail[j + 1] = 1090182362Sraj availmem_regions[i].mr_start + 1091176771Sraj hwphyssz - physsz; 1092176771Sraj physsz = hwphyssz; 1093176771Sraj phys_avail_count++; 1094176771Sraj } 1095176771Sraj break; 1096176771Sraj } 1097176771Sraj 1098176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1099176771Sraj phys_avail[j + 1] = availmem_regions[i].mr_start + 1100176771Sraj availmem_regions[i].mr_size; 1101176771Sraj phys_avail_count++; 1102176771Sraj physsz += availmem_regions[i].mr_size; 1103176771Sraj } 1104176771Sraj physmem = btoc(physsz); 1105176771Sraj 1106176771Sraj /* Calculate the last available physical address. */ 1107176771Sraj for (i = 0; phys_avail[i + 2] != 0; i += 2) 1108176771Sraj ; 1109176771Sraj Maxmem = powerpc_btop(phys_avail[i + 1]); 1110176771Sraj 1111176771Sraj debugf("Maxmem = 0x%08lx\n", Maxmem); 1112176771Sraj debugf("phys_avail_count = %d\n", phys_avail_count); 1113187151Sraj debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1114187151Sraj physmem); 1115176771Sraj 1116176771Sraj /*******************************************************/ 1117176771Sraj /* Initialize (statically allocated) kernel pmap. */ 1118176771Sraj /*******************************************************/ 1119176771Sraj PMAP_LOCK_INIT(kernel_pmap); 1120176771Sraj kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1121176771Sraj 1122187149Sraj debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1123187149Sraj debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1124176771Sraj debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1125176771Sraj kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1126176771Sraj 1127176771Sraj /* Initialize kernel pdir */ 1128176771Sraj for (i = 0; i < kernel_ptbls; i++) 1129176771Sraj kernel_pmap->pm_pdir[kptbl_min + i] = 1130176771Sraj (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1131176771Sraj 1132187149Sraj for (i = 0; i < MAXCPU; i++) { 1133187149Sraj kernel_pmap->pm_tid[i] = TID_KERNEL; 1134187149Sraj 1135187149Sraj /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1136187149Sraj tidbusy[i][0] = kernel_pmap; 1137187149Sraj } 1138187149Sraj /* Mark kernel_pmap active on all CPUs */ 1139176771Sraj kernel_pmap->pm_active = ~0; 1140176771Sraj 1141176771Sraj /*******************************************************/ 1142176771Sraj /* Final setup */ 1143176771Sraj /*******************************************************/ 1144187149Sraj 1145182198Sraj /* Enter kstack0 into kernel map, provide guard page */ 1146182198Sraj kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1147182198Sraj thread0.td_kstack = kstack0; 1148182198Sraj thread0.td_kstack_pages = KSTACK_PAGES; 1149182198Sraj 1150182198Sraj debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1151182198Sraj debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1152182198Sraj kstack0_phys, kstack0_phys + kstack0_sz); 1153182198Sraj debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1154182198Sraj 1155182198Sraj virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1156182198Sraj for (i = 0; i < KSTACK_PAGES; i++) { 1157182198Sraj mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1158182198Sraj kstack0 += PAGE_SIZE; 1159182198Sraj kstack0_phys += PAGE_SIZE; 1160182198Sraj } 1161187149Sraj 1162187149Sraj debugf("virtual_avail = %08x\n", virtual_avail); 1163187149Sraj debugf("virtual_end = %08x\n", virtual_end); 1164182198Sraj 1165176771Sraj debugf("mmu_booke_bootstrap: exit\n"); 1166176771Sraj} 1167176771Sraj 1168176771Sraj/* 1169176771Sraj * Get the physical page address for the given pmap/virtual address. 1170176771Sraj */ 1171176771Srajstatic vm_paddr_t 1172176771Srajmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1173176771Sraj{ 1174176771Sraj vm_paddr_t pa; 1175176771Sraj 1176176771Sraj PMAP_LOCK(pmap); 1177176771Sraj pa = pte_vatopa(mmu, pmap, va); 1178176771Sraj PMAP_UNLOCK(pmap); 1179176771Sraj 1180176771Sraj return (pa); 1181176771Sraj} 1182176771Sraj 1183176771Sraj/* 1184176771Sraj * Extract the physical page address associated with the given 1185176771Sraj * kernel virtual address. 1186176771Sraj */ 1187176771Srajstatic vm_paddr_t 1188176771Srajmmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1189176771Sraj{ 1190176771Sraj 1191176771Sraj return (pte_vatopa(mmu, kernel_pmap, va)); 1192176771Sraj} 1193176771Sraj 1194176771Sraj/* 1195176771Sraj * Initialize the pmap module. 1196176771Sraj * Called by vm_init, to initialize any structures that the pmap 1197176771Sraj * system needs to map virtual memory. 1198176771Sraj */ 1199176771Srajstatic void 1200176771Srajmmu_booke_init(mmu_t mmu) 1201176771Sraj{ 1202176771Sraj int shpgperproc = PMAP_SHPGPERPROC; 1203176771Sraj 1204176771Sraj /* 1205176771Sraj * Initialize the address space (zone) for the pv entries. Set a 1206176771Sraj * high water mark so that the system can recover from excessive 1207176771Sraj * numbers of pv entries. 1208176771Sraj */ 1209176771Sraj pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1210176771Sraj NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1211176771Sraj 1212176771Sraj TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1213176771Sraj pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1214176771Sraj 1215176771Sraj TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1216176771Sraj pv_entry_high_water = 9 * (pv_entry_max / 10); 1217176771Sraj 1218176771Sraj uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1219176771Sraj 1220176771Sraj /* Pre-fill pvzone with initial number of pv entries. */ 1221176771Sraj uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1222176771Sraj 1223176771Sraj /* Initialize ptbl allocation. */ 1224176771Sraj ptbl_init(); 1225176771Sraj} 1226176771Sraj 1227176771Sraj/* 1228176771Sraj * Map a list of wired pages into kernel virtual address space. This is 1229176771Sraj * intended for temporary mappings which do not need page modification or 1230176771Sraj * references recorded. Existing mappings in the region are overwritten. 1231176771Sraj */ 1232176771Srajstatic void 1233176771Srajmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1234176771Sraj{ 1235176771Sraj vm_offset_t va; 1236176771Sraj 1237176771Sraj va = sva; 1238176771Sraj while (count-- > 0) { 1239176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1240176771Sraj va += PAGE_SIZE; 1241176771Sraj m++; 1242176771Sraj } 1243176771Sraj} 1244176771Sraj 1245176771Sraj/* 1246176771Sraj * Remove page mappings from kernel virtual address space. Intended for 1247176771Sraj * temporary mappings entered by mmu_booke_qenter. 1248176771Sraj */ 1249176771Srajstatic void 1250176771Srajmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1251176771Sraj{ 1252176771Sraj vm_offset_t va; 1253176771Sraj 1254176771Sraj va = sva; 1255176771Sraj while (count-- > 0) { 1256176771Sraj mmu_booke_kremove(mmu, va); 1257176771Sraj va += PAGE_SIZE; 1258176771Sraj } 1259176771Sraj} 1260176771Sraj 1261176771Sraj/* 1262176771Sraj * Map a wired page into kernel virtual address space. 1263176771Sraj */ 1264176771Srajstatic void 1265176771Srajmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1266176771Sraj{ 1267176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1268176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1269187151Sraj uint32_t flags; 1270176771Sraj pte_t *pte; 1271176771Sraj 1272187151Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1273187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1274176771Sraj 1275176771Sraj#if 0 1276176771Sraj /* assume IO mapping, set I, G bits */ 1277176771Sraj flags = (PTE_G | PTE_I | PTE_FAKE); 1278176771Sraj 1279176771Sraj /* if mapping is within system memory, do not set I, G bits */ 1280176771Sraj for (i = 0; i < totalmem_regions_sz; i++) { 1281176771Sraj if ((pa >= totalmem_regions[i].mr_start) && 1282176771Sraj (pa < (totalmem_regions[i].mr_start + 1283176771Sraj totalmem_regions[i].mr_size))) { 1284176771Sraj flags &= ~(PTE_I | PTE_G | PTE_FAKE); 1285176771Sraj break; 1286176771Sraj } 1287176771Sraj } 1288176771Sraj#else 1289176771Sraj flags = 0; 1290176771Sraj#endif 1291176771Sraj 1292176771Sraj flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1293187149Sraj flags |= PTE_M; 1294176771Sraj 1295176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1296176771Sraj 1297187149Sraj mtx_lock_spin(&tlbivax_mutex); 1298187149Sraj 1299176771Sraj if (PTE_ISVALID(pte)) { 1300187149Sraj 1301187149Sraj CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1302176771Sraj 1303176771Sraj /* Flush entry from TLB0 */ 1304187149Sraj tlb0_flush_entry(va); 1305176771Sraj } 1306176771Sraj 1307176771Sraj pte->rpn = pa & ~PTE_PA_MASK; 1308176771Sraj pte->flags = flags; 1309176771Sraj 1310176771Sraj //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1311176771Sraj // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1312176771Sraj // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1313176771Sraj 1314176771Sraj /* Flush the real memory from the instruction cache. */ 1315176771Sraj if ((flags & (PTE_I | PTE_G)) == 0) { 1316176771Sraj __syncicache((void *)va, PAGE_SIZE); 1317176771Sraj } 1318176771Sraj 1319187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1320176771Sraj} 1321176771Sraj 1322176771Sraj/* 1323176771Sraj * Remove a page from kernel page table. 1324176771Sraj */ 1325176771Srajstatic void 1326176771Srajmmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1327176771Sraj{ 1328176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1329176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1330176771Sraj pte_t *pte; 1331176771Sraj 1332187149Sraj// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1333176771Sraj 1334187149Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1335187149Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1336176771Sraj ("mmu_booke_kremove: invalid va")); 1337176771Sraj 1338176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1339176771Sraj 1340176771Sraj if (!PTE_ISVALID(pte)) { 1341187149Sraj 1342187149Sraj CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1343187149Sraj 1344176771Sraj return; 1345176771Sraj } 1346176771Sraj 1347187149Sraj mtx_lock_spin(&tlbivax_mutex); 1348176771Sraj 1349187149Sraj /* Invalidate entry in TLB0, update PTE. */ 1350187149Sraj tlb0_flush_entry(va); 1351176771Sraj pte->flags = 0; 1352176771Sraj pte->rpn = 0; 1353176771Sraj 1354187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1355176771Sraj} 1356176771Sraj 1357176771Sraj/* 1358176771Sraj * Initialize pmap associated with process 0. 1359176771Sraj */ 1360176771Srajstatic void 1361176771Srajmmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1362176771Sraj{ 1363187151Sraj 1364176771Sraj mmu_booke_pinit(mmu, pmap); 1365176771Sraj PCPU_SET(curpmap, pmap); 1366176771Sraj} 1367176771Sraj 1368176771Sraj/* 1369176771Sraj * Initialize a preallocated and zeroed pmap structure, 1370176771Sraj * such as one in a vmspace structure. 1371176771Sraj */ 1372176771Srajstatic void 1373176771Srajmmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1374176771Sraj{ 1375187149Sraj int i; 1376176771Sraj 1377187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1378187149Sraj curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1379176771Sraj 1380187149Sraj KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1381176771Sraj 1382176771Sraj PMAP_LOCK_INIT(pmap); 1383187149Sraj for (i = 0; i < MAXCPU; i++) 1384187149Sraj pmap->pm_tid[i] = TID_NONE; 1385176771Sraj pmap->pm_active = 0; 1386176771Sraj bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1387176771Sraj bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1388187149Sraj TAILQ_INIT(&pmap->pm_ptbl_list); 1389176771Sraj} 1390176771Sraj 1391176771Sraj/* 1392176771Sraj * Release any resources held by the given physical map. 1393176771Sraj * Called when a pmap initialized by mmu_booke_pinit is being released. 1394176771Sraj * Should only be called if the map contains no valid mappings. 1395176771Sraj */ 1396176771Srajstatic void 1397176771Srajmmu_booke_release(mmu_t mmu, pmap_t pmap) 1398176771Sraj{ 1399176771Sraj 1400187151Sraj printf("mmu_booke_release: s\n"); 1401176771Sraj 1402187151Sraj KASSERT(pmap->pm_stats.resident_count == 0, 1403187151Sraj ("pmap_release: pmap resident count %ld != 0", 1404187151Sraj pmap->pm_stats.resident_count)); 1405187151Sraj 1406176771Sraj PMAP_LOCK_DESTROY(pmap); 1407176771Sraj} 1408176771Sraj 1409176771Sraj#if 0 1410176771Sraj/* Not needed, kernel page tables are statically allocated. */ 1411176771Srajvoid 1412176771Srajmmu_booke_growkernel(vm_offset_t maxkvaddr) 1413176771Sraj{ 1414176771Sraj} 1415176771Sraj#endif 1416176771Sraj 1417176771Sraj/* 1418176771Sraj * Insert the given physical page at the specified virtual address in the 1419176771Sraj * target physical map with the protection requested. If specified the page 1420176771Sraj * will be wired down. 1421176771Sraj */ 1422176771Srajstatic void 1423176771Srajmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1424176771Sraj vm_prot_t prot, boolean_t wired) 1425176771Sraj{ 1426187151Sraj 1427176771Sraj vm_page_lock_queues(); 1428176771Sraj PMAP_LOCK(pmap); 1429176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1430176771Sraj vm_page_unlock_queues(); 1431176771Sraj PMAP_UNLOCK(pmap); 1432176771Sraj} 1433176771Sraj 1434176771Srajstatic void 1435176771Srajmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1436176771Sraj vm_prot_t prot, boolean_t wired) 1437176771Sraj{ 1438176771Sraj pte_t *pte; 1439176771Sraj vm_paddr_t pa; 1440187151Sraj uint32_t flags; 1441176771Sraj int su, sync; 1442176771Sraj 1443176771Sraj pa = VM_PAGE_TO_PHYS(m); 1444176771Sraj su = (pmap == kernel_pmap); 1445176771Sraj sync = 0; 1446176771Sraj 1447176771Sraj //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1448176771Sraj // "pa=0x%08x prot=0x%08x wired=%d)\n", 1449176771Sraj // (u_int32_t)pmap, su, pmap->pm_tid, 1450176771Sraj // (u_int32_t)m, va, pa, prot, wired); 1451176771Sraj 1452176771Sraj if (su) { 1453187151Sraj KASSERT(((va >= virtual_avail) && 1454187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1455187151Sraj ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1456176771Sraj } else { 1457176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1458187151Sraj ("mmu_booke_enter_locked: user pmap, non user va")); 1459176771Sraj } 1460176771Sraj 1461176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1462176771Sraj 1463176771Sraj /* 1464176771Sraj * If there is an existing mapping, and the physical address has not 1465176771Sraj * changed, must be protection or wiring change. 1466176771Sraj */ 1467176771Sraj if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1468176771Sraj (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1469187149Sraj 1470187149Sraj /* 1471187149Sraj * Before actually updating pte->flags we calculate and 1472187149Sraj * prepare its new value in a helper var. 1473187149Sraj */ 1474187149Sraj flags = pte->flags; 1475187149Sraj flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1476176771Sraj 1477176771Sraj /* Wiring change, just update stats. */ 1478176771Sraj if (wired) { 1479176771Sraj if (!PTE_ISWIRED(pte)) { 1480187149Sraj flags |= PTE_WIRED; 1481176771Sraj pmap->pm_stats.wired_count++; 1482176771Sraj } 1483176771Sraj } else { 1484176771Sraj if (PTE_ISWIRED(pte)) { 1485187149Sraj flags &= ~PTE_WIRED; 1486176771Sraj pmap->pm_stats.wired_count--; 1487176771Sraj } 1488176771Sraj } 1489176771Sraj 1490176771Sraj if (prot & VM_PROT_WRITE) { 1491176771Sraj /* Add write permissions. */ 1492187149Sraj flags |= PTE_SW; 1493176771Sraj if (!su) 1494187149Sraj flags |= PTE_UW; 1495176771Sraj } else { 1496176771Sraj /* Handle modified pages, sense modify status. */ 1497187149Sraj 1498187149Sraj /* 1499187149Sraj * The PTE_MODIFIED flag could be set by underlying 1500187149Sraj * TLB misses since we last read it (above), possibly 1501187149Sraj * other CPUs could update it so we check in the PTE 1502187149Sraj * directly rather than rely on that saved local flags 1503187149Sraj * copy. 1504187149Sraj */ 1505178626Smarcel if (PTE_ISMODIFIED(pte)) 1506178626Smarcel vm_page_dirty(m); 1507176771Sraj } 1508176771Sraj 1509176771Sraj if (prot & VM_PROT_EXECUTE) { 1510187149Sraj flags |= PTE_SX; 1511176771Sraj if (!su) 1512187149Sraj flags |= PTE_UX; 1513176771Sraj 1514187149Sraj /* 1515187149Sraj * Check existing flags for execute permissions: if we 1516187149Sraj * are turning execute permissions on, icache should 1517187149Sraj * be flushed. 1518187149Sraj */ 1519176771Sraj if ((flags & (PTE_UX | PTE_SX)) == 0) 1520176771Sraj sync++; 1521176771Sraj } 1522176771Sraj 1523187149Sraj flags &= ~PTE_REFERENCED; 1524187149Sraj 1525187149Sraj /* 1526187149Sraj * The new flags value is all calculated -- only now actually 1527187149Sraj * update the PTE. 1528187149Sraj */ 1529187149Sraj mtx_lock_spin(&tlbivax_mutex); 1530187149Sraj 1531187149Sraj tlb0_flush_entry(va); 1532187149Sraj pte->flags = flags; 1533187149Sraj 1534187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1535187149Sraj 1536176771Sraj } else { 1537176771Sraj /* 1538187149Sraj * If there is an existing mapping, but it's for a different 1539176771Sraj * physical address, pte_enter() will delete the old mapping. 1540176771Sraj */ 1541176771Sraj //if ((pte != NULL) && PTE_ISVALID(pte)) 1542176771Sraj // debugf("mmu_booke_enter_locked: replace\n"); 1543176771Sraj //else 1544176771Sraj // debugf("mmu_booke_enter_locked: new\n"); 1545176771Sraj 1546176771Sraj /* Now set up the flags and install the new mapping. */ 1547176771Sraj flags = (PTE_SR | PTE_VALID); 1548187149Sraj flags |= PTE_M; 1549176771Sraj 1550176771Sraj if (!su) 1551176771Sraj flags |= PTE_UR; 1552176771Sraj 1553176771Sraj if (prot & VM_PROT_WRITE) { 1554176771Sraj flags |= PTE_SW; 1555176771Sraj if (!su) 1556176771Sraj flags |= PTE_UW; 1557176771Sraj } 1558176771Sraj 1559176771Sraj if (prot & VM_PROT_EXECUTE) { 1560176771Sraj flags |= PTE_SX; 1561176771Sraj if (!su) 1562176771Sraj flags |= PTE_UX; 1563176771Sraj } 1564176771Sraj 1565176771Sraj /* If its wired update stats. */ 1566176771Sraj if (wired) { 1567176771Sraj pmap->pm_stats.wired_count++; 1568176771Sraj flags |= PTE_WIRED; 1569176771Sraj } 1570176771Sraj 1571176771Sraj pte_enter(mmu, pmap, m, va, flags); 1572176771Sraj 1573176771Sraj /* Flush the real memory from the instruction cache. */ 1574176771Sraj if (prot & VM_PROT_EXECUTE) 1575176771Sraj sync++; 1576176771Sraj } 1577176771Sraj 1578176771Sraj if (sync && (su || pmap == PCPU_GET(curpmap))) { 1579176771Sraj __syncicache((void *)va, PAGE_SIZE); 1580176771Sraj sync = 0; 1581176771Sraj } 1582176771Sraj 1583176771Sraj if (sync) { 1584176771Sraj /* Create a temporary mapping. */ 1585176771Sraj pmap = PCPU_GET(curpmap); 1586176771Sraj 1587176771Sraj va = 0; 1588176771Sraj pte = pte_find(mmu, pmap, va); 1589176771Sraj KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1590176771Sraj 1591187149Sraj flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1592187149Sraj 1593176771Sraj pte_enter(mmu, pmap, m, va, flags); 1594176771Sraj __syncicache((void *)va, PAGE_SIZE); 1595176771Sraj pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1596176771Sraj } 1597176771Sraj} 1598176771Sraj 1599176771Sraj/* 1600176771Sraj * Maps a sequence of resident pages belonging to the same object. 1601176771Sraj * The sequence begins with the given page m_start. This page is 1602176771Sraj * mapped at the given virtual address start. Each subsequent page is 1603176771Sraj * mapped at a virtual address that is offset from start by the same 1604176771Sraj * amount as the page is offset from m_start within the object. The 1605176771Sraj * last page in the sequence is the page with the largest offset from 1606176771Sraj * m_start that can be mapped at a virtual address less than the given 1607176771Sraj * virtual address end. Not every virtual page between start and end 1608176771Sraj * is mapped; only those for which a resident page exists with the 1609176771Sraj * corresponding offset from m_start are mapped. 1610176771Sraj */ 1611176771Srajstatic void 1612176771Srajmmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1613176771Sraj vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1614176771Sraj{ 1615176771Sraj vm_page_t m; 1616176771Sraj vm_pindex_t diff, psize; 1617176771Sraj 1618176771Sraj psize = atop(end - start); 1619176771Sraj m = m_start; 1620176771Sraj PMAP_LOCK(pmap); 1621176771Sraj while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1622187151Sraj mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1623187151Sraj prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1624176771Sraj m = TAILQ_NEXT(m, listq); 1625176771Sraj } 1626176771Sraj PMAP_UNLOCK(pmap); 1627176771Sraj} 1628176771Sraj 1629176771Srajstatic void 1630176771Srajmmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1631176771Sraj vm_prot_t prot) 1632176771Sraj{ 1633176771Sraj 1634176771Sraj PMAP_LOCK(pmap); 1635176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, 1636176771Sraj prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1637176771Sraj PMAP_UNLOCK(pmap); 1638176771Sraj} 1639176771Sraj 1640176771Sraj/* 1641176771Sraj * Remove the given range of addresses from the specified map. 1642176771Sraj * 1643176771Sraj * It is assumed that the start and end are properly rounded to the page size. 1644176771Sraj */ 1645176771Srajstatic void 1646176771Srajmmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1647176771Sraj{ 1648176771Sraj pte_t *pte; 1649187151Sraj uint8_t hold_flag; 1650176771Sraj 1651176771Sraj int su = (pmap == kernel_pmap); 1652176771Sraj 1653176771Sraj //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1654176771Sraj // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1655176771Sraj 1656176771Sraj if (su) { 1657187151Sraj KASSERT(((va >= virtual_avail) && 1658187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1659187151Sraj ("mmu_booke_remove: kernel pmap, non kernel va")); 1660176771Sraj } else { 1661176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1662187151Sraj ("mmu_booke_remove: user pmap, non user va")); 1663176771Sraj } 1664176771Sraj 1665176771Sraj if (PMAP_REMOVE_DONE(pmap)) { 1666176771Sraj //debugf("mmu_booke_remove: e (empty)\n"); 1667176771Sraj return; 1668176771Sraj } 1669176771Sraj 1670176771Sraj hold_flag = PTBL_HOLD_FLAG(pmap); 1671176771Sraj //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1672176771Sraj 1673176771Sraj vm_page_lock_queues(); 1674176771Sraj PMAP_LOCK(pmap); 1675176771Sraj for (; va < endva; va += PAGE_SIZE) { 1676176771Sraj pte = pte_find(mmu, pmap, va); 1677187149Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 1678176771Sraj pte_remove(mmu, pmap, va, hold_flag); 1679176771Sraj } 1680176771Sraj PMAP_UNLOCK(pmap); 1681176771Sraj vm_page_unlock_queues(); 1682176771Sraj 1683176771Sraj //debugf("mmu_booke_remove: e\n"); 1684176771Sraj} 1685176771Sraj 1686176771Sraj/* 1687176771Sraj * Remove physical page from all pmaps in which it resides. 1688176771Sraj */ 1689176771Srajstatic void 1690176771Srajmmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1691176771Sraj{ 1692176771Sraj pv_entry_t pv, pvn; 1693187151Sraj uint8_t hold_flag; 1694176771Sraj 1695176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1696176771Sraj 1697176771Sraj for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1698176771Sraj pvn = TAILQ_NEXT(pv, pv_link); 1699176771Sraj 1700176771Sraj PMAP_LOCK(pv->pv_pmap); 1701176771Sraj hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1702176771Sraj pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1703176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1704176771Sraj } 1705176771Sraj vm_page_flag_clear(m, PG_WRITEABLE); 1706176771Sraj} 1707176771Sraj 1708176771Sraj/* 1709176771Sraj * Map a range of physical addresses into kernel virtual address space. 1710176771Sraj */ 1711176771Srajstatic vm_offset_t 1712176771Srajmmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1713176771Sraj vm_offset_t pa_end, int prot) 1714176771Sraj{ 1715176771Sraj vm_offset_t sva = *virt; 1716176771Sraj vm_offset_t va = sva; 1717176771Sraj 1718176771Sraj //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1719176771Sraj // sva, pa_start, pa_end); 1720176771Sraj 1721176771Sraj while (pa_start < pa_end) { 1722176771Sraj mmu_booke_kenter(mmu, va, pa_start); 1723176771Sraj va += PAGE_SIZE; 1724176771Sraj pa_start += PAGE_SIZE; 1725176771Sraj } 1726176771Sraj *virt = va; 1727176771Sraj 1728176771Sraj //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1729176771Sraj return (sva); 1730176771Sraj} 1731176771Sraj 1732176771Sraj/* 1733176771Sraj * The pmap must be activated before it's address space can be accessed in any 1734176771Sraj * way. 1735176771Sraj */ 1736176771Srajstatic void 1737176771Srajmmu_booke_activate(mmu_t mmu, struct thread *td) 1738176771Sraj{ 1739176771Sraj pmap_t pmap; 1740176771Sraj 1741176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1742176771Sraj 1743187149Sraj CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1744187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1745176771Sraj 1746176771Sraj KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1747176771Sraj 1748176771Sraj mtx_lock_spin(&sched_lock); 1749176771Sraj 1750187149Sraj atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1751176771Sraj PCPU_SET(curpmap, pmap); 1752187149Sraj 1753187149Sraj if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1754176771Sraj tid_alloc(pmap); 1755176771Sraj 1756176771Sraj /* Load PID0 register with pmap tid value. */ 1757187149Sraj mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1758187149Sraj __asm __volatile("isync"); 1759176771Sraj 1760176771Sraj mtx_unlock_spin(&sched_lock); 1761176771Sraj 1762187149Sraj CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1763187149Sraj pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1764176771Sraj} 1765176771Sraj 1766176771Sraj/* 1767176771Sraj * Deactivate the specified process's address space. 1768176771Sraj */ 1769176771Srajstatic void 1770176771Srajmmu_booke_deactivate(mmu_t mmu, struct thread *td) 1771176771Sraj{ 1772176771Sraj pmap_t pmap; 1773176771Sraj 1774176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1775187149Sraj 1776187149Sraj CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1777187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1778187149Sraj 1779187149Sraj atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1780176771Sraj PCPU_SET(curpmap, NULL); 1781176771Sraj} 1782176771Sraj 1783176771Sraj/* 1784176771Sraj * Copy the range specified by src_addr/len 1785176771Sraj * from the source map to the range dst_addr/len 1786176771Sraj * in the destination map. 1787176771Sraj * 1788176771Sraj * This routine is only advisory and need not do anything. 1789176771Sraj */ 1790176771Srajstatic void 1791176771Srajmmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1792176771Sraj vm_size_t len, vm_offset_t src_addr) 1793176771Sraj{ 1794176771Sraj 1795176771Sraj} 1796176771Sraj 1797176771Sraj/* 1798176771Sraj * Set the physical protection on the specified range of this map as requested. 1799176771Sraj */ 1800176771Srajstatic void 1801176771Srajmmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1802176771Sraj vm_prot_t prot) 1803176771Sraj{ 1804176771Sraj vm_offset_t va; 1805176771Sraj vm_page_t m; 1806176771Sraj pte_t *pte; 1807176771Sraj 1808176771Sraj if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1809176771Sraj mmu_booke_remove(mmu, pmap, sva, eva); 1810176771Sraj return; 1811176771Sraj } 1812176771Sraj 1813176771Sraj if (prot & VM_PROT_WRITE) 1814176771Sraj return; 1815176771Sraj 1816176771Sraj vm_page_lock_queues(); 1817176771Sraj PMAP_LOCK(pmap); 1818176771Sraj for (va = sva; va < eva; va += PAGE_SIZE) { 1819176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1820176771Sraj if (PTE_ISVALID(pte)) { 1821176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1822176771Sraj 1823187149Sraj mtx_lock_spin(&tlbivax_mutex); 1824187149Sraj 1825176771Sraj /* Handle modified pages. */ 1826178626Smarcel if (PTE_ISMODIFIED(pte)) 1827178626Smarcel vm_page_dirty(m); 1828176771Sraj 1829176771Sraj /* Referenced pages. */ 1830176771Sraj if (PTE_ISREFERENCED(pte)) 1831176771Sraj vm_page_flag_set(m, PG_REFERENCED); 1832176771Sraj 1833187149Sraj tlb0_flush_entry(va); 1834176771Sraj pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1835176771Sraj PTE_REFERENCED); 1836187149Sraj 1837187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1838176771Sraj } 1839176771Sraj } 1840176771Sraj } 1841176771Sraj PMAP_UNLOCK(pmap); 1842176771Sraj vm_page_unlock_queues(); 1843176771Sraj} 1844176771Sraj 1845176771Sraj/* 1846176771Sraj * Clear the write and modified bits in each of the given page's mappings. 1847176771Sraj */ 1848176771Srajstatic void 1849176771Srajmmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1850176771Sraj{ 1851176771Sraj pv_entry_t pv; 1852176771Sraj pte_t *pte; 1853176771Sraj 1854176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1855176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1856176771Sraj (m->flags & PG_WRITEABLE) == 0) 1857176771Sraj return; 1858176771Sraj 1859176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1860176771Sraj PMAP_LOCK(pv->pv_pmap); 1861176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1862176771Sraj if (PTE_ISVALID(pte)) { 1863176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1864176771Sraj 1865187149Sraj mtx_lock_spin(&tlbivax_mutex); 1866187149Sraj 1867176771Sraj /* Handle modified pages. */ 1868178626Smarcel if (PTE_ISMODIFIED(pte)) 1869178626Smarcel vm_page_dirty(m); 1870176771Sraj 1871176771Sraj /* Referenced pages. */ 1872176771Sraj if (PTE_ISREFERENCED(pte)) 1873176771Sraj vm_page_flag_set(m, PG_REFERENCED); 1874176771Sraj 1875176771Sraj /* Flush mapping from TLB0. */ 1876176771Sraj pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1877176771Sraj PTE_REFERENCED); 1878187149Sraj 1879187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1880176771Sraj } 1881176771Sraj } 1882176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1883176771Sraj } 1884176771Sraj vm_page_flag_clear(m, PG_WRITEABLE); 1885176771Sraj} 1886176771Sraj 1887176771Srajstatic boolean_t 1888176771Srajmmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1889176771Sraj{ 1890176771Sraj pv_entry_t pv; 1891176771Sraj pte_t *pte; 1892176771Sraj boolean_t executable; 1893176771Sraj 1894176771Sraj executable = FALSE; 1895176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1896176771Sraj PMAP_LOCK(pv->pv_pmap); 1897176771Sraj pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 1898176771Sraj if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 1899176771Sraj executable = TRUE; 1900176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1901176771Sraj if (executable) 1902176771Sraj break; 1903176771Sraj } 1904176771Sraj 1905176771Sraj return (executable); 1906176771Sraj} 1907176771Sraj 1908176771Sraj/* 1909176771Sraj * Atomically extract and hold the physical page with the given 1910176771Sraj * pmap and virtual address pair if that mapping permits the given 1911176771Sraj * protection. 1912176771Sraj */ 1913176771Srajstatic vm_page_t 1914176771Srajmmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1915176771Sraj vm_prot_t prot) 1916176771Sraj{ 1917176771Sraj pte_t *pte; 1918176771Sraj vm_page_t m; 1919187151Sraj uint32_t pte_wbit; 1920176771Sraj 1921176771Sraj m = NULL; 1922176771Sraj vm_page_lock_queues(); 1923176771Sraj PMAP_LOCK(pmap); 1924187151Sraj 1925176771Sraj pte = pte_find(mmu, pmap, va); 1926176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) { 1927176771Sraj if (pmap == kernel_pmap) 1928176771Sraj pte_wbit = PTE_SW; 1929176771Sraj else 1930176771Sraj pte_wbit = PTE_UW; 1931176771Sraj 1932176771Sraj if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1933176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1934176771Sraj vm_page_hold(m); 1935176771Sraj } 1936176771Sraj } 1937176771Sraj 1938176771Sraj vm_page_unlock_queues(); 1939176771Sraj PMAP_UNLOCK(pmap); 1940176771Sraj return (m); 1941176771Sraj} 1942176771Sraj 1943176771Sraj/* 1944176771Sraj * Initialize a vm_page's machine-dependent fields. 1945176771Sraj */ 1946176771Srajstatic void 1947176771Srajmmu_booke_page_init(mmu_t mmu, vm_page_t m) 1948176771Sraj{ 1949176771Sraj 1950176771Sraj TAILQ_INIT(&m->md.pv_list); 1951176771Sraj} 1952176771Sraj 1953176771Sraj/* 1954176771Sraj * mmu_booke_zero_page_area zeros the specified hardware page by 1955176771Sraj * mapping it into virtual memory and using bzero to clear 1956176771Sraj * its contents. 1957176771Sraj * 1958176771Sraj * off and size must reside within a single page. 1959176771Sraj */ 1960176771Srajstatic void 1961176771Srajmmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1962176771Sraj{ 1963176771Sraj vm_offset_t va; 1964176771Sraj 1965187151Sraj /* XXX KASSERT off and size are within a single page? */ 1966176771Sraj 1967176771Sraj mtx_lock(&zero_page_mutex); 1968176771Sraj va = zero_page_va; 1969176771Sraj 1970176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1971176771Sraj bzero((caddr_t)va + off, size); 1972176771Sraj mmu_booke_kremove(mmu, va); 1973176771Sraj 1974176771Sraj mtx_unlock(&zero_page_mutex); 1975176771Sraj} 1976176771Sraj 1977176771Sraj/* 1978176771Sraj * mmu_booke_zero_page zeros the specified hardware page. 1979176771Sraj */ 1980176771Srajstatic void 1981176771Srajmmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1982176771Sraj{ 1983176771Sraj 1984176771Sraj mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 1985176771Sraj} 1986176771Sraj 1987176771Sraj/* 1988176771Sraj * mmu_booke_copy_page copies the specified (machine independent) page by 1989176771Sraj * mapping the page into virtual memory and using memcopy to copy the page, 1990176771Sraj * one machine dependent page at a time. 1991176771Sraj */ 1992176771Srajstatic void 1993176771Srajmmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 1994176771Sraj{ 1995176771Sraj vm_offset_t sva, dva; 1996176771Sraj 1997176771Sraj sva = copy_page_src_va; 1998176771Sraj dva = copy_page_dst_va; 1999176771Sraj 2000187149Sraj mtx_lock(©_page_mutex); 2001176771Sraj mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2002176771Sraj mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2003176771Sraj memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2004176771Sraj mmu_booke_kremove(mmu, dva); 2005176771Sraj mmu_booke_kremove(mmu, sva); 2006176771Sraj mtx_unlock(©_page_mutex); 2007176771Sraj} 2008176771Sraj 2009176771Sraj#if 0 2010176771Sraj/* 2011176771Sraj * Remove all pages from specified address space, this aids process exit 2012176771Sraj * speeds. This is much faster than mmu_booke_remove in the case of running 2013176771Sraj * down an entire address space. Only works for the current pmap. 2014176771Sraj */ 2015176771Srajvoid 2016176771Srajmmu_booke_remove_pages(pmap_t pmap) 2017176771Sraj{ 2018176771Sraj} 2019176771Sraj#endif 2020176771Sraj 2021176771Sraj/* 2022176771Sraj * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2023176771Sraj * into virtual memory and using bzero to clear its contents. This is intended 2024176771Sraj * to be called from the vm_pagezero process only and outside of Giant. No 2025176771Sraj * lock is required. 2026176771Sraj */ 2027176771Srajstatic void 2028176771Srajmmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2029176771Sraj{ 2030176771Sraj vm_offset_t va; 2031176771Sraj 2032176771Sraj va = zero_page_idle_va; 2033176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2034176771Sraj bzero((caddr_t)va, PAGE_SIZE); 2035176771Sraj mmu_booke_kremove(mmu, va); 2036176771Sraj} 2037176771Sraj 2038176771Sraj/* 2039176771Sraj * Return whether or not the specified physical page was modified 2040176771Sraj * in any of physical maps. 2041176771Sraj */ 2042176771Srajstatic boolean_t 2043176771Srajmmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2044176771Sraj{ 2045176771Sraj pte_t *pte; 2046176771Sraj pv_entry_t pv; 2047176771Sraj 2048176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2049176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2050176771Sraj return (FALSE); 2051176771Sraj 2052176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2053176771Sraj PMAP_LOCK(pv->pv_pmap); 2054176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2055176771Sraj if (!PTE_ISVALID(pte)) 2056176771Sraj goto make_sure_to_unlock; 2057176771Sraj 2058176771Sraj if (PTE_ISMODIFIED(pte)) { 2059176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2060176771Sraj return (TRUE); 2061176771Sraj } 2062176771Sraj } 2063176771Srajmake_sure_to_unlock: 2064176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2065176771Sraj } 2066176771Sraj return (FALSE); 2067176771Sraj} 2068176771Sraj 2069176771Sraj/* 2070187151Sraj * Return whether or not the specified virtual address is eligible 2071176771Sraj * for prefault. 2072176771Sraj */ 2073176771Srajstatic boolean_t 2074176771Srajmmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2075176771Sraj{ 2076176771Sraj 2077176771Sraj return (FALSE); 2078176771Sraj} 2079176771Sraj 2080176771Sraj/* 2081176771Sraj * Clear the modify bits on the specified physical page. 2082176771Sraj */ 2083176771Srajstatic void 2084176771Srajmmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2085176771Sraj{ 2086176771Sraj pte_t *pte; 2087176771Sraj pv_entry_t pv; 2088176771Sraj 2089176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2090176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2091176771Sraj return; 2092176771Sraj 2093176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2094176771Sraj PMAP_LOCK(pv->pv_pmap); 2095176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2096176771Sraj if (!PTE_ISVALID(pte)) 2097176771Sraj goto make_sure_to_unlock; 2098176771Sraj 2099187149Sraj mtx_lock_spin(&tlbivax_mutex); 2100187149Sraj 2101176771Sraj if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2102187149Sraj tlb0_flush_entry(pv->pv_va); 2103176771Sraj pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2104176771Sraj PTE_REFERENCED); 2105176771Sraj } 2106187149Sraj 2107187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2108176771Sraj } 2109176771Srajmake_sure_to_unlock: 2110176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2111176771Sraj } 2112176771Sraj} 2113176771Sraj 2114176771Sraj/* 2115176771Sraj * Return a count of reference bits for a page, clearing those bits. 2116176771Sraj * It is not necessary for every reference bit to be cleared, but it 2117176771Sraj * is necessary that 0 only be returned when there are truly no 2118176771Sraj * reference bits set. 2119176771Sraj * 2120176771Sraj * XXX: The exact number of bits to check and clear is a matter that 2121176771Sraj * should be tested and standardized at some point in the future for 2122176771Sraj * optimal aging of shared pages. 2123176771Sraj */ 2124176771Srajstatic int 2125176771Srajmmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2126176771Sraj{ 2127176771Sraj pte_t *pte; 2128176771Sraj pv_entry_t pv; 2129176771Sraj int count; 2130176771Sraj 2131176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2132176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2133176771Sraj return (0); 2134176771Sraj 2135176771Sraj count = 0; 2136176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2137176771Sraj PMAP_LOCK(pv->pv_pmap); 2138176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2139176771Sraj if (!PTE_ISVALID(pte)) 2140176771Sraj goto make_sure_to_unlock; 2141176771Sraj 2142176771Sraj if (PTE_ISREFERENCED(pte)) { 2143187149Sraj mtx_lock_spin(&tlbivax_mutex); 2144187149Sraj 2145187149Sraj tlb0_flush_entry(pv->pv_va); 2146176771Sraj pte->flags &= ~PTE_REFERENCED; 2147176771Sraj 2148187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2149187149Sraj 2150176771Sraj if (++count > 4) { 2151176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2152176771Sraj break; 2153176771Sraj } 2154176771Sraj } 2155176771Sraj } 2156176771Srajmake_sure_to_unlock: 2157176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2158176771Sraj } 2159176771Sraj return (count); 2160176771Sraj} 2161176771Sraj 2162176771Sraj/* 2163176771Sraj * Clear the reference bit on the specified physical page. 2164176771Sraj */ 2165176771Srajstatic void 2166176771Srajmmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2167176771Sraj{ 2168176771Sraj pte_t *pte; 2169176771Sraj pv_entry_t pv; 2170176771Sraj 2171176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2172176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2173176771Sraj return; 2174176771Sraj 2175176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2176176771Sraj PMAP_LOCK(pv->pv_pmap); 2177176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2178176771Sraj if (!PTE_ISVALID(pte)) 2179176771Sraj goto make_sure_to_unlock; 2180176771Sraj 2181176771Sraj if (PTE_ISREFERENCED(pte)) { 2182187149Sraj mtx_lock_spin(&tlbivax_mutex); 2183187149Sraj 2184187149Sraj tlb0_flush_entry(pv->pv_va); 2185176771Sraj pte->flags &= ~PTE_REFERENCED; 2186187149Sraj 2187187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2188176771Sraj } 2189176771Sraj } 2190176771Srajmake_sure_to_unlock: 2191176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2192176771Sraj } 2193176771Sraj} 2194176771Sraj 2195176771Sraj/* 2196176771Sraj * Change wiring attribute for a map/virtual-address pair. 2197176771Sraj */ 2198176771Srajstatic void 2199176771Srajmmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2200176771Sraj{ 2201176771Sraj pte_t *pte;; 2202176771Sraj 2203176771Sraj PMAP_LOCK(pmap); 2204176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2205176771Sraj if (wired) { 2206176771Sraj if (!PTE_ISWIRED(pte)) { 2207176771Sraj pte->flags |= PTE_WIRED; 2208176771Sraj pmap->pm_stats.wired_count++; 2209176771Sraj } 2210176771Sraj } else { 2211176771Sraj if (PTE_ISWIRED(pte)) { 2212176771Sraj pte->flags &= ~PTE_WIRED; 2213176771Sraj pmap->pm_stats.wired_count--; 2214176771Sraj } 2215176771Sraj } 2216176771Sraj } 2217176771Sraj PMAP_UNLOCK(pmap); 2218176771Sraj} 2219176771Sraj 2220176771Sraj/* 2221176771Sraj * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2222176771Sraj * page. This count may be changed upwards or downwards in the future; it is 2223176771Sraj * only necessary that true be returned for a small subset of pmaps for proper 2224176771Sraj * page aging. 2225176771Sraj */ 2226176771Srajstatic boolean_t 2227176771Srajmmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2228176771Sraj{ 2229176771Sraj pv_entry_t pv; 2230176771Sraj int loops; 2231176771Sraj 2232176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2233176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2234176771Sraj return (FALSE); 2235176771Sraj 2236176771Sraj loops = 0; 2237176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2238176771Sraj if (pv->pv_pmap == pmap) 2239176771Sraj return (TRUE); 2240176771Sraj 2241176771Sraj if (++loops >= 16) 2242176771Sraj break; 2243176771Sraj } 2244176771Sraj return (FALSE); 2245176771Sraj} 2246176771Sraj 2247176771Sraj/* 2248176771Sraj * Return the number of managed mappings to the given physical page that are 2249176771Sraj * wired. 2250176771Sraj */ 2251176771Srajstatic int 2252176771Srajmmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2253176771Sraj{ 2254176771Sraj pv_entry_t pv; 2255176771Sraj pte_t *pte; 2256176771Sraj int count = 0; 2257176771Sraj 2258176771Sraj if ((m->flags & PG_FICTITIOUS) != 0) 2259176771Sraj return (count); 2260176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2261176771Sraj 2262176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2263176771Sraj PMAP_LOCK(pv->pv_pmap); 2264176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2265176771Sraj if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2266176771Sraj count++; 2267176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2268176771Sraj } 2269176771Sraj 2270176771Sraj return (count); 2271176771Sraj} 2272176771Sraj 2273176771Srajstatic int 2274176771Srajmmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2275176771Sraj{ 2276176771Sraj int i; 2277176771Sraj vm_offset_t va; 2278176771Sraj 2279176771Sraj /* 2280176771Sraj * This currently does not work for entries that 2281176771Sraj * overlap TLB1 entries. 2282176771Sraj */ 2283176771Sraj for (i = 0; i < tlb1_idx; i ++) { 2284176771Sraj if (tlb1_iomapped(i, pa, size, &va) == 0) 2285176771Sraj return (0); 2286176771Sraj } 2287176771Sraj 2288176771Sraj return (EFAULT); 2289176771Sraj} 2290176771Sraj 2291176771Sraj/* 2292176771Sraj * Map a set of physical memory pages into the kernel virtual address space. 2293176771Sraj * Return a pointer to where it is mapped. This routine is intended to be used 2294176771Sraj * for mapping device memory, NOT real memory. 2295176771Sraj */ 2296176771Srajstatic void * 2297176771Srajmmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2298176771Sraj{ 2299184244Smarcel void *res; 2300176771Sraj uintptr_t va; 2301184244Smarcel vm_size_t sz; 2302176771Sraj 2303176771Sraj va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2304184244Smarcel res = (void *)va; 2305184244Smarcel 2306184244Smarcel do { 2307184244Smarcel sz = 1 << (ilog2(size) & ~1); 2308184244Smarcel if (bootverbose) 2309184244Smarcel printf("Wiring VA=%x to PA=%x (size=%x), " 2310184244Smarcel "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2311184244Smarcel tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2312184244Smarcel size -= sz; 2313184244Smarcel pa += sz; 2314184244Smarcel va += sz; 2315184244Smarcel } while (size > 0); 2316184244Smarcel 2317184244Smarcel return (res); 2318176771Sraj} 2319176771Sraj 2320176771Sraj/* 2321176771Sraj * 'Unmap' a range mapped by mmu_booke_mapdev(). 2322176771Sraj */ 2323176771Srajstatic void 2324176771Srajmmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2325176771Sraj{ 2326176771Sraj vm_offset_t base, offset; 2327176771Sraj 2328176771Sraj /* 2329176771Sraj * Unmap only if this is inside kernel virtual space. 2330176771Sraj */ 2331176771Sraj if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2332176771Sraj base = trunc_page(va); 2333176771Sraj offset = va & PAGE_MASK; 2334176771Sraj size = roundup(offset + size, PAGE_SIZE); 2335176771Sraj kmem_free(kernel_map, base, size); 2336176771Sraj } 2337176771Sraj} 2338176771Sraj 2339176771Sraj/* 2340187151Sraj * mmu_booke_object_init_pt preloads the ptes for a given object into the 2341187151Sraj * specified pmap. This eliminates the blast of soft faults on process startup 2342187151Sraj * and immediately after an mmap. 2343176771Sraj */ 2344176771Srajstatic void 2345176771Srajmmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2346176771Sraj vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2347176771Sraj{ 2348187151Sraj 2349176771Sraj VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2350176771Sraj KASSERT(object->type == OBJT_DEVICE, 2351176771Sraj ("mmu_booke_object_init_pt: non-device object")); 2352176771Sraj} 2353176771Sraj 2354176771Sraj/* 2355176771Sraj * Perform the pmap work for mincore. 2356176771Sraj */ 2357176771Srajstatic int 2358176771Srajmmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2359176771Sraj{ 2360176771Sraj 2361176771Sraj TODO; 2362176771Sraj return (0); 2363176771Sraj} 2364176771Sraj 2365176771Sraj/**************************************************************************/ 2366176771Sraj/* TID handling */ 2367176771Sraj/**************************************************************************/ 2368176771Sraj 2369176771Sraj/* 2370176771Sraj * Allocate a TID. If necessary, steal one from someone else. 2371176771Sraj * The new TID is flushed from the TLB before returning. 2372176771Sraj */ 2373176771Srajstatic tlbtid_t 2374176771Srajtid_alloc(pmap_t pmap) 2375176771Sraj{ 2376176771Sraj tlbtid_t tid; 2377187149Sraj int thiscpu; 2378176771Sraj 2379187149Sraj KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2380176771Sraj 2381187149Sraj CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2382176771Sraj 2383187149Sraj thiscpu = PCPU_GET(cpuid); 2384176771Sraj 2385187149Sraj tid = PCPU_GET(tid_next); 2386187149Sraj if (tid > TID_MAX) 2387187149Sraj tid = TID_MIN; 2388187149Sraj PCPU_SET(tid_next, tid + 1); 2389176771Sraj 2390187149Sraj /* If we are stealing TID then clear the relevant pmap's field */ 2391187149Sraj if (tidbusy[thiscpu][tid] != NULL) { 2392176771Sraj 2393187149Sraj CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2394187149Sraj 2395187149Sraj tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2396176771Sraj 2397187149Sraj /* Flush all entries from TLB0 matching this TID. */ 2398187149Sraj tid_flush(tid); 2399176771Sraj } 2400176771Sraj 2401187149Sraj tidbusy[thiscpu][tid] = pmap; 2402187149Sraj pmap->pm_tid[thiscpu] = tid; 2403187149Sraj __asm __volatile("msync; isync"); 2404176771Sraj 2405187149Sraj CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2406187149Sraj PCPU_GET(tid_next)); 2407176771Sraj 2408176771Sraj return (tid); 2409176771Sraj} 2410176771Sraj 2411176771Sraj/**************************************************************************/ 2412176771Sraj/* TLB0 handling */ 2413176771Sraj/**************************************************************************/ 2414176771Sraj 2415176771Srajstatic void 2416187149Srajtlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2417187149Sraj uint32_t mas7) 2418176771Sraj{ 2419176771Sraj int as; 2420176771Sraj char desc[3]; 2421176771Sraj tlbtid_t tid; 2422176771Sraj vm_size_t size; 2423176771Sraj unsigned int tsize; 2424176771Sraj 2425176771Sraj desc[2] = '\0'; 2426176771Sraj if (mas1 & MAS1_VALID) 2427176771Sraj desc[0] = 'V'; 2428176771Sraj else 2429176771Sraj desc[0] = ' '; 2430176771Sraj 2431176771Sraj if (mas1 & MAS1_IPROT) 2432176771Sraj desc[1] = 'P'; 2433176771Sraj else 2434176771Sraj desc[1] = ' '; 2435176771Sraj 2436187149Sraj as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2437176771Sraj tid = MAS1_GETTID(mas1); 2438176771Sraj 2439176771Sraj tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2440176771Sraj size = 0; 2441176771Sraj if (tsize) 2442176771Sraj size = tsize2size(tsize); 2443176771Sraj 2444176771Sraj debugf("%3d: (%s) [AS=%d] " 2445176771Sraj "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2446176771Sraj "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2447176771Sraj i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2448176771Sraj} 2449176771Sraj 2450176771Sraj/* Convert TLB0 va and way number to tlb0[] table index. */ 2451176771Srajstatic inline unsigned int 2452176771Srajtlb0_tableidx(vm_offset_t va, unsigned int way) 2453176771Sraj{ 2454176771Sraj unsigned int idx; 2455176771Sraj 2456176771Sraj idx = (way * TLB0_ENTRIES_PER_WAY); 2457176771Sraj idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2458176771Sraj return (idx); 2459176771Sraj} 2460176771Sraj 2461176771Sraj/* 2462187149Sraj * Invalidate TLB0 entry. 2463176771Sraj */ 2464187149Srajstatic inline void 2465187149Srajtlb0_flush_entry(vm_offset_t va) 2466176771Sraj{ 2467176771Sraj 2468187149Sraj CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2469176771Sraj 2470187149Sraj mtx_assert(&tlbivax_mutex, MA_OWNED); 2471176771Sraj 2472187149Sraj __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2473187149Sraj __asm __volatile("isync; msync"); 2474187149Sraj __asm __volatile("tlbsync; msync"); 2475176771Sraj 2476187149Sraj CTR1(KTR_PMAP, "%s: e", __func__); 2477176771Sraj} 2478176771Sraj 2479176771Sraj/* Print out contents of the MAS registers for each TLB0 entry */ 2480187149Srajvoid 2481176771Srajtlb0_print_tlbentries(void) 2482176771Sraj{ 2483187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 2484176771Sraj int entryidx, way, idx; 2485176771Sraj 2486176771Sraj debugf("TLB0 entries:\n"); 2487187149Sraj for (way = 0; way < TLB0_WAYS; way ++) 2488176771Sraj for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2489176771Sraj 2490176771Sraj mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2491176771Sraj mtspr(SPR_MAS0, mas0); 2492187149Sraj __asm __volatile("isync"); 2493176771Sraj 2494176771Sraj mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2495176771Sraj mtspr(SPR_MAS2, mas2); 2496176771Sraj 2497187149Sraj __asm __volatile("isync; tlbre"); 2498176771Sraj 2499176771Sraj mas1 = mfspr(SPR_MAS1); 2500176771Sraj mas2 = mfspr(SPR_MAS2); 2501176771Sraj mas3 = mfspr(SPR_MAS3); 2502176771Sraj mas7 = mfspr(SPR_MAS7); 2503176771Sraj 2504176771Sraj idx = tlb0_tableidx(mas2, way); 2505176771Sraj tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2506176771Sraj } 2507176771Sraj} 2508176771Sraj 2509176771Sraj/**************************************************************************/ 2510176771Sraj/* TLB1 handling */ 2511176771Sraj/**************************************************************************/ 2512187149Sraj 2513176771Sraj/* 2514187149Sraj * TLB1 mapping notes: 2515187149Sraj * 2516187149Sraj * TLB1[0] CCSRBAR 2517187149Sraj * TLB1[1] Kernel text and data. 2518187149Sraj * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2519187149Sraj * windows, other devices mappings. 2520187149Sraj */ 2521187149Sraj 2522187149Sraj/* 2523176771Sraj * Write given entry to TLB1 hardware. 2524176771Sraj * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2525176771Sraj */ 2526176771Srajstatic void 2527176771Srajtlb1_write_entry(unsigned int idx) 2528176771Sraj{ 2529187151Sraj uint32_t mas0, mas7; 2530176771Sraj 2531176771Sraj //debugf("tlb1_write_entry: s\n"); 2532176771Sraj 2533176771Sraj /* Clear high order RPN bits */ 2534176771Sraj mas7 = 0; 2535176771Sraj 2536176771Sraj /* Select entry */ 2537176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2538176771Sraj //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2539176771Sraj 2540176771Sraj mtspr(SPR_MAS0, mas0); 2541187151Sraj __asm __volatile("isync"); 2542176771Sraj mtspr(SPR_MAS1, tlb1[idx].mas1); 2543187151Sraj __asm __volatile("isync"); 2544176771Sraj mtspr(SPR_MAS2, tlb1[idx].mas2); 2545187151Sraj __asm __volatile("isync"); 2546176771Sraj mtspr(SPR_MAS3, tlb1[idx].mas3); 2547187151Sraj __asm __volatile("isync"); 2548176771Sraj mtspr(SPR_MAS7, mas7); 2549187151Sraj __asm __volatile("isync; tlbwe; isync; msync"); 2550176771Sraj 2551176771Sraj //debugf("tlb1_write_entry: e\n");; 2552176771Sraj} 2553176771Sraj 2554176771Sraj/* 2555176771Sraj * Return the largest uint value log such that 2^log <= num. 2556176771Sraj */ 2557176771Srajstatic unsigned int 2558176771Srajilog2(unsigned int num) 2559176771Sraj{ 2560176771Sraj int lz; 2561176771Sraj 2562176771Sraj __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2563176771Sraj return (31 - lz); 2564176771Sraj} 2565176771Sraj 2566176771Sraj/* 2567176771Sraj * Convert TLB TSIZE value to mapped region size. 2568176771Sraj */ 2569176771Srajstatic vm_size_t 2570176771Srajtsize2size(unsigned int tsize) 2571176771Sraj{ 2572176771Sraj 2573176771Sraj /* 2574176771Sraj * size = 4^tsize KB 2575176771Sraj * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2576176771Sraj */ 2577176771Sraj 2578176771Sraj return ((1 << (2 * tsize)) * 1024); 2579176771Sraj} 2580176771Sraj 2581176771Sraj/* 2582176771Sraj * Convert region size (must be power of 4) to TLB TSIZE value. 2583176771Sraj */ 2584176771Srajstatic unsigned int 2585176771Srajsize2tsize(vm_size_t size) 2586176771Sraj{ 2587176771Sraj 2588176771Sraj return (ilog2(size) / 2 - 5); 2589176771Sraj} 2590176771Sraj 2591176771Sraj/* 2592187149Sraj * Register permanent kernel mapping in TLB1. 2593176771Sraj * 2594187149Sraj * Entries are created starting from index 0 (current free entry is 2595187149Sraj * kept in tlb1_idx) and are not supposed to be invalidated. 2596176771Sraj */ 2597187149Srajstatic int 2598187149Srajtlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2599187149Sraj uint32_t flags) 2600176771Sraj{ 2601187149Sraj uint32_t ts, tid; 2602176771Sraj int tsize; 2603187149Sraj 2604187149Sraj if (tlb1_idx >= TLB1_ENTRIES) { 2605187149Sraj printf("tlb1_set_entry: TLB1 full!\n"); 2606187149Sraj return (-1); 2607187149Sraj } 2608176771Sraj 2609176771Sraj /* Convert size to TSIZE */ 2610176771Sraj tsize = size2tsize(size); 2611176771Sraj 2612187149Sraj tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2613187149Sraj /* XXX TS is hard coded to 0 for now as we only use single address space */ 2614187149Sraj ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2615176771Sraj 2616187149Sraj /* XXX LOCK tlb1[] */ 2617176771Sraj 2618187149Sraj tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2619187149Sraj tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2620187149Sraj tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2621176771Sraj 2622187149Sraj /* Set supervisor RWX permission bits */ 2623187149Sraj tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2624176771Sraj 2625187149Sraj tlb1_write_entry(tlb1_idx++); 2626176771Sraj 2627187149Sraj /* XXX UNLOCK tlb1[] */ 2628176771Sraj 2629187149Sraj /* 2630187149Sraj * XXX in general TLB1 updates should be propagated between CPUs, 2631187149Sraj * since current design assumes to have the same TLB1 set-up on all 2632187149Sraj * cores. 2633187149Sraj */ 2634176771Sraj return (0); 2635176771Sraj} 2636176771Sraj 2637176771Srajstatic int 2638176771Srajtlb1_entry_size_cmp(const void *a, const void *b) 2639176771Sraj{ 2640176771Sraj const vm_size_t *sza; 2641176771Sraj const vm_size_t *szb; 2642176771Sraj 2643176771Sraj sza = a; 2644176771Sraj szb = b; 2645176771Sraj if (*sza > *szb) 2646176771Sraj return (-1); 2647176771Sraj else if (*sza < *szb) 2648176771Sraj return (1); 2649176771Sraj else 2650176771Sraj return (0); 2651176771Sraj} 2652176771Sraj 2653176771Sraj/* 2654187151Sraj * Map in contiguous RAM region into the TLB1 using maximum of 2655176771Sraj * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2656176771Sraj * 2657187151Sraj * If necessary round up last entry size and return total size 2658176771Sraj * used by all allocated entries. 2659176771Sraj */ 2660176771Srajvm_size_t 2661176771Srajtlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2662176771Sraj{ 2663176771Sraj vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2664176771Sraj vm_size_t mapped_size, sz, esz; 2665176771Sraj unsigned int log; 2666176771Sraj int i; 2667176771Sraj 2668187151Sraj CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2669187151Sraj __func__, size, va, pa); 2670176771Sraj 2671176771Sraj mapped_size = 0; 2672176771Sraj sz = size; 2673176771Sraj memset(entry_size, 0, sizeof(entry_size)); 2674176771Sraj 2675176771Sraj /* Calculate entry sizes. */ 2676176771Sraj for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2677176771Sraj 2678176771Sraj /* Largest region that is power of 4 and fits within size */ 2679187149Sraj log = ilog2(sz) / 2; 2680176771Sraj esz = 1 << (2 * log); 2681176771Sraj 2682176771Sraj /* If this is last entry cover remaining size. */ 2683176771Sraj if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2684176771Sraj while (esz < sz) 2685176771Sraj esz = esz << 2; 2686176771Sraj } 2687176771Sraj 2688176771Sraj entry_size[i] = esz; 2689176771Sraj mapped_size += esz; 2690176771Sraj if (esz < sz) 2691176771Sraj sz -= esz; 2692176771Sraj else 2693176771Sraj sz = 0; 2694176771Sraj } 2695176771Sraj 2696176771Sraj /* Sort entry sizes, required to get proper entry address alignment. */ 2697176771Sraj qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2698176771Sraj sizeof(vm_size_t), tlb1_entry_size_cmp); 2699176771Sraj 2700176771Sraj /* Load TLB1 entries. */ 2701176771Sraj for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2702176771Sraj esz = entry_size[i]; 2703176771Sraj if (!esz) 2704176771Sraj break; 2705187151Sraj 2706187151Sraj CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2707187151Sraj "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2708187151Sraj 2709176771Sraj tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2710176771Sraj 2711176771Sraj va += esz; 2712176771Sraj pa += esz; 2713176771Sraj } 2714176771Sraj 2715187151Sraj CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2716187151Sraj __func__, mapped_size, mapped_size - size); 2717176771Sraj 2718176771Sraj return (mapped_size); 2719176771Sraj} 2720176771Sraj 2721176771Sraj/* 2722176771Sraj * TLB1 initialization routine, to be called after the very first 2723176771Sraj * assembler level setup done in locore.S. 2724176771Sraj */ 2725176771Srajvoid 2726176771Srajtlb1_init(vm_offset_t ccsrbar) 2727176771Sraj{ 2728176771Sraj uint32_t mas0; 2729176771Sraj 2730187151Sraj /* TLB1[1] is used to map the kernel. Save that entry. */ 2731176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2732176771Sraj mtspr(SPR_MAS0, mas0); 2733176771Sraj __asm __volatile("isync; tlbre"); 2734176771Sraj 2735176771Sraj tlb1[1].mas1 = mfspr(SPR_MAS1); 2736176771Sraj tlb1[1].mas2 = mfspr(SPR_MAS2); 2737176771Sraj tlb1[1].mas3 = mfspr(SPR_MAS3); 2738176771Sraj 2739187149Sraj /* Map in CCSRBAR in TLB1[0] */ 2740187149Sraj tlb1_idx = 0; 2741187149Sraj tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2742187149Sraj /* 2743187149Sraj * Set the next available TLB1 entry index. Note TLB[1] is reserved 2744187149Sraj * for initial mapping of kernel text+data, which was set early in 2745187149Sraj * locore, we need to skip this [busy] entry. 2746187149Sraj */ 2747187149Sraj tlb1_idx = 2; 2748176771Sraj 2749176771Sraj /* Setup TLB miss defaults */ 2750176771Sraj set_mas4_defaults(); 2751176771Sraj} 2752176771Sraj 2753176771Sraj/* 2754176771Sraj * Setup MAS4 defaults. 2755176771Sraj * These values are loaded to MAS0-2 on a TLB miss. 2756176771Sraj */ 2757176771Srajstatic void 2758176771Srajset_mas4_defaults(void) 2759176771Sraj{ 2760187151Sraj uint32_t mas4; 2761176771Sraj 2762176771Sraj /* Defaults: TLB0, PID0, TSIZED=4K */ 2763176771Sraj mas4 = MAS4_TLBSELD0; 2764176771Sraj mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2765176771Sraj 2766176771Sraj mtspr(SPR_MAS4, mas4); 2767187151Sraj __asm __volatile("isync"); 2768176771Sraj} 2769176771Sraj 2770176771Sraj/* 2771176771Sraj * Print out contents of the MAS registers for each TLB1 entry 2772176771Sraj */ 2773176771Srajvoid 2774176771Srajtlb1_print_tlbentries(void) 2775176771Sraj{ 2776187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 2777176771Sraj int i; 2778176771Sraj 2779176771Sraj debugf("TLB1 entries:\n"); 2780187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) { 2781176771Sraj 2782176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 2783176771Sraj mtspr(SPR_MAS0, mas0); 2784176771Sraj 2785187149Sraj __asm __volatile("isync; tlbre"); 2786176771Sraj 2787176771Sraj mas1 = mfspr(SPR_MAS1); 2788176771Sraj mas2 = mfspr(SPR_MAS2); 2789176771Sraj mas3 = mfspr(SPR_MAS3); 2790176771Sraj mas7 = mfspr(SPR_MAS7); 2791176771Sraj 2792176771Sraj tlb_print_entry(i, mas1, mas2, mas3, mas7); 2793176771Sraj } 2794176771Sraj} 2795176771Sraj 2796176771Sraj/* 2797176771Sraj * Print out contents of the in-ram tlb1 table. 2798176771Sraj */ 2799176771Srajvoid 2800176771Srajtlb1_print_entries(void) 2801176771Sraj{ 2802176771Sraj int i; 2803176771Sraj 2804176771Sraj debugf("tlb1[] table entries:\n"); 2805187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) 2806176771Sraj tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 2807176771Sraj} 2808176771Sraj 2809176771Sraj/* 2810176771Sraj * Return 0 if the physical IO range is encompassed by one of the 2811176771Sraj * the TLB1 entries, otherwise return related error code. 2812176771Sraj */ 2813176771Srajstatic int 2814176771Srajtlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2815176771Sraj{ 2816187151Sraj uint32_t prot; 2817176771Sraj vm_paddr_t pa_start; 2818176771Sraj vm_paddr_t pa_end; 2819176771Sraj unsigned int entry_tsize; 2820176771Sraj vm_size_t entry_size; 2821176771Sraj 2822176771Sraj *va = (vm_offset_t)NULL; 2823176771Sraj 2824176771Sraj /* Skip invalid entries */ 2825176771Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 2826176771Sraj return (EINVAL); 2827176771Sraj 2828176771Sraj /* 2829176771Sraj * The entry must be cache-inhibited, guarded, and r/w 2830176771Sraj * so it can function as an i/o page 2831176771Sraj */ 2832176771Sraj prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 2833176771Sraj if (prot != (MAS2_I | MAS2_G)) 2834176771Sraj return (EPERM); 2835176771Sraj 2836176771Sraj prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 2837176771Sraj if (prot != (MAS3_SR | MAS3_SW)) 2838176771Sraj return (EPERM); 2839176771Sraj 2840176771Sraj /* The address should be within the entry range. */ 2841176771Sraj entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2842176771Sraj KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 2843176771Sraj 2844176771Sraj entry_size = tsize2size(entry_tsize); 2845176771Sraj pa_start = tlb1[i].mas3 & MAS3_RPN; 2846176771Sraj pa_end = pa_start + entry_size - 1; 2847176771Sraj 2848176771Sraj if ((pa < pa_start) || ((pa + size) > pa_end)) 2849176771Sraj return (ERANGE); 2850176771Sraj 2851176771Sraj /* Return virtual address of this mapping. */ 2852187149Sraj *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 2853176771Sraj return (0); 2854176771Sraj} 2855