pmap.c revision 194784
1176771Sraj/*- 2192532Sraj * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4176771Sraj * All rights reserved. 5176771Sraj * 6176771Sraj * Redistribution and use in source and binary forms, with or without 7176771Sraj * modification, are permitted provided that the following conditions 8176771Sraj * are met: 9176771Sraj * 1. Redistributions of source code must retain the above copyright 10176771Sraj * notice, this list of conditions and the following disclaimer. 11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright 12176771Sraj * notice, this list of conditions and the following disclaimer in the 13176771Sraj * documentation and/or other materials provided with the distribution. 14176771Sraj * 15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25176771Sraj * 26176771Sraj * Some hw specific parts of this pmap were derived or influenced 27176771Sraj * by NetBSD's ibm4xx pmap module. More generic code is shared with 28176771Sraj * a few other pmap modules from the FreeBSD tree. 29176771Sraj */ 30176771Sraj 31176771Sraj /* 32176771Sraj * VM layout notes: 33176771Sraj * 34176771Sraj * Kernel and user threads run within one common virtual address space 35176771Sraj * defined by AS=0. 36176771Sraj * 37176771Sraj * Virtual address space layout: 38176771Sraj * ----------------------------- 39187151Sraj * 0x0000_0000 - 0xafff_ffff : user process 40187151Sraj * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41187151Sraj * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42190701Smarcel * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43187151Sraj * 0xc100_0000 - 0xfeef_ffff : KVA 44187151Sraj * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45187151Sraj * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46187151Sraj * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47187151Sraj * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48187151Sraj * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49176771Sraj */ 50176771Sraj 51176771Sraj#include <sys/cdefs.h> 52176771Sraj__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 194784 2009-06-23 22:42:39Z jeff $"); 53176771Sraj 54176771Sraj#include <sys/types.h> 55176771Sraj#include <sys/param.h> 56176771Sraj#include <sys/malloc.h> 57187149Sraj#include <sys/ktr.h> 58176771Sraj#include <sys/proc.h> 59176771Sraj#include <sys/user.h> 60176771Sraj#include <sys/queue.h> 61176771Sraj#include <sys/systm.h> 62176771Sraj#include <sys/kernel.h> 63176771Sraj#include <sys/msgbuf.h> 64176771Sraj#include <sys/lock.h> 65176771Sraj#include <sys/mutex.h> 66192532Sraj#include <sys/smp.h> 67176771Sraj#include <sys/vmmeter.h> 68176771Sraj 69176771Sraj#include <vm/vm.h> 70176771Sraj#include <vm/vm_page.h> 71176771Sraj#include <vm/vm_kern.h> 72176771Sraj#include <vm/vm_pageout.h> 73176771Sraj#include <vm/vm_extern.h> 74176771Sraj#include <vm/vm_object.h> 75176771Sraj#include <vm/vm_param.h> 76176771Sraj#include <vm/vm_map.h> 77176771Sraj#include <vm/vm_pager.h> 78176771Sraj#include <vm/uma.h> 79176771Sraj 80190701Smarcel#include <machine/bootinfo.h> 81176771Sraj#include <machine/cpu.h> 82176771Sraj#include <machine/pcb.h> 83192067Snwhitehorn#include <machine/platform.h> 84176771Sraj 85176771Sraj#include <machine/tlb.h> 86176771Sraj#include <machine/spr.h> 87176771Sraj#include <machine/vmparam.h> 88176771Sraj#include <machine/md_var.h> 89176771Sraj#include <machine/mmuvar.h> 90176771Sraj#include <machine/pmap.h> 91176771Sraj#include <machine/pte.h> 92176771Sraj 93176771Sraj#include "mmu_if.h" 94176771Sraj 95176771Sraj#define DEBUG 96176771Sraj#undef DEBUG 97176771Sraj 98176771Sraj#ifdef DEBUG 99176771Sraj#define debugf(fmt, args...) printf(fmt, ##args) 100176771Sraj#else 101176771Sraj#define debugf(fmt, args...) 102176771Sraj#endif 103176771Sraj 104176771Sraj#define TODO panic("%s: not implemented", __func__); 105176771Sraj 106176771Sraj#include "opt_sched.h" 107176771Sraj#ifndef SCHED_4BSD 108176771Sraj#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 109176771Sraj#endif 110176771Srajextern struct mtx sched_lock; 111176771Sraj 112190701Smarcelextern int dumpsys_minidump; 113190701Smarcel 114190701Smarcelextern unsigned char _etext[]; 115190701Smarcelextern unsigned char _end[]; 116190701Smarcel 117176771Sraj/* Kernel physical load address. */ 118176771Srajextern uint32_t kernload; 119190701Smarcelvm_offset_t kernstart; 120190701Smarcelvm_size_t kernsize; 121176771Sraj 122190701Smarcel/* Message buffer and tables. */ 123190701Smarcelstatic vm_offset_t data_start; 124190701Smarcelstatic vm_size_t data_end; 125190701Smarcel 126192067Snwhitehorn/* Phys/avail memory regions. */ 127192067Snwhitehornstatic struct mem_region *availmem_regions; 128192067Snwhitehornstatic int availmem_regions_sz; 129192067Snwhitehornstatic struct mem_region *physmem_regions; 130192067Snwhitehornstatic int physmem_regions_sz; 131176771Sraj 132176771Sraj/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133176771Srajstatic vm_offset_t zero_page_va; 134176771Srajstatic struct mtx zero_page_mutex; 135176771Sraj 136187149Srajstatic struct mtx tlbivax_mutex; 137187149Sraj 138176771Sraj/* 139176771Sraj * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140176771Sraj * by idle thred only, no lock required. 141176771Sraj */ 142176771Srajstatic vm_offset_t zero_page_idle_va; 143176771Sraj 144176771Sraj/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145176771Srajstatic vm_offset_t copy_page_src_va; 146176771Srajstatic vm_offset_t copy_page_dst_va; 147176771Srajstatic struct mtx copy_page_mutex; 148176771Sraj 149176771Sraj/**************************************************************************/ 150176771Sraj/* PMAP */ 151176771Sraj/**************************************************************************/ 152176771Sraj 153176771Srajstatic void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154176771Sraj vm_prot_t, boolean_t); 155176771Sraj 156176771Srajunsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157176771Srajunsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158176771Sraj 159176771Sraj/* 160176771Sraj * If user pmap is processed with mmu_booke_remove and the resident count 161176771Sraj * drops to 0, there are no more pages to remove, so we need not continue. 162176771Sraj */ 163176771Sraj#define PMAP_REMOVE_DONE(pmap) \ 164176771Sraj ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 165176771Sraj 166187149Srajextern void tlb_lock(uint32_t *); 167187149Srajextern void tlb_unlock(uint32_t *); 168187149Srajextern void tid_flush(tlbtid_t); 169176771Sraj 170176771Sraj/**************************************************************************/ 171176771Sraj/* TLB and TID handling */ 172176771Sraj/**************************************************************************/ 173176771Sraj 174176771Sraj/* Translation ID busy table */ 175187149Srajstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 176176771Sraj 177176771Sraj/* 178187149Sraj * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 179187149Sraj * core revisions and should be read from h/w registers during early config. 180176771Sraj */ 181187149Srajuint32_t tlb0_entries; 182187149Srajuint32_t tlb0_ways; 183187149Srajuint32_t tlb0_entries_per_way; 184176771Sraj 185187149Sraj#define TLB0_ENTRIES (tlb0_entries) 186187149Sraj#define TLB0_WAYS (tlb0_ways) 187187149Sraj#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 188176771Sraj 189187149Sraj#define TLB1_ENTRIES 16 190176771Sraj 191176771Sraj/* In-ram copy of the TLB1 */ 192187149Srajstatic tlb_entry_t tlb1[TLB1_ENTRIES]; 193176771Sraj 194176771Sraj/* Next free entry in the TLB1 */ 195176771Srajstatic unsigned int tlb1_idx; 196176771Sraj 197176771Srajstatic tlbtid_t tid_alloc(struct pmap *); 198176771Sraj 199187149Srajstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 200176771Sraj 201187149Srajstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 202176771Srajstatic void tlb1_write_entry(unsigned int); 203176771Srajstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 204176771Srajstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 205176771Sraj 206176771Srajstatic vm_size_t tsize2size(unsigned int); 207176771Srajstatic unsigned int size2tsize(vm_size_t); 208176771Srajstatic unsigned int ilog2(unsigned int); 209176771Sraj 210176771Srajstatic void set_mas4_defaults(void); 211176771Sraj 212187149Srajstatic inline void tlb0_flush_entry(vm_offset_t); 213176771Srajstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 214176771Sraj 215176771Sraj/**************************************************************************/ 216176771Sraj/* Page table management */ 217176771Sraj/**************************************************************************/ 218176771Sraj 219176771Sraj/* Data for the pv entry allocation mechanism */ 220176771Srajstatic uma_zone_t pvzone; 221176771Srajstatic struct vm_object pvzone_obj; 222176771Srajstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 223176771Sraj 224176771Sraj#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 225176771Sraj 226176771Sraj#ifndef PMAP_SHPGPERPROC 227176771Sraj#define PMAP_SHPGPERPROC 200 228176771Sraj#endif 229176771Sraj 230176771Srajstatic void ptbl_init(void); 231176771Srajstatic struct ptbl_buf *ptbl_buf_alloc(void); 232176771Srajstatic void ptbl_buf_free(struct ptbl_buf *); 233176771Srajstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 234176771Sraj 235187149Srajstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 236176771Srajstatic void ptbl_free(mmu_t, pmap_t, unsigned int); 237176771Srajstatic void ptbl_hold(mmu_t, pmap_t, unsigned int); 238176771Srajstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int); 239176771Sraj 240176771Srajstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 241176771Srajstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 242187149Srajstatic void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 243187149Srajstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 244176771Sraj 245187149Srajstatic pv_entry_t pv_alloc(void); 246176771Srajstatic void pv_free(pv_entry_t); 247176771Srajstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t); 248176771Srajstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t); 249176771Sraj 250176771Sraj/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 251176771Sraj#define PTBL_BUFS (128 * 16) 252176771Sraj 253176771Srajstruct ptbl_buf { 254176771Sraj TAILQ_ENTRY(ptbl_buf) link; /* list link */ 255176771Sraj vm_offset_t kva; /* va of mapping */ 256176771Sraj}; 257176771Sraj 258176771Sraj/* ptbl free list and a lock used for access synchronization. */ 259176771Srajstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 260176771Srajstatic struct mtx ptbl_buf_freelist_lock; 261176771Sraj 262176771Sraj/* Base address of kva space allocated fot ptbl bufs. */ 263176771Srajstatic vm_offset_t ptbl_buf_pool_vabase; 264176771Sraj 265176771Sraj/* Pointer to ptbl_buf structures. */ 266176771Srajstatic struct ptbl_buf *ptbl_bufs; 267176771Sraj 268192532Srajvoid pmap_bootstrap_ap(volatile uint32_t *); 269192532Sraj 270176771Sraj/* 271176771Sraj * Kernel MMU interface 272176771Sraj */ 273176771Srajstatic void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 274176771Srajstatic void mmu_booke_clear_modify(mmu_t, vm_page_t); 275176771Srajstatic void mmu_booke_clear_reference(mmu_t, vm_page_t); 276194101Srajstatic void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 277194101Sraj vm_size_t, vm_offset_t); 278176771Srajstatic void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 279176771Srajstatic void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280176771Sraj vm_prot_t, boolean_t); 281176771Srajstatic void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 282176771Sraj vm_page_t, vm_prot_t); 283176771Srajstatic void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 284176771Sraj vm_prot_t); 285176771Srajstatic vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 286176771Srajstatic vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 287176771Sraj vm_prot_t); 288176771Srajstatic void mmu_booke_init(mmu_t); 289176771Srajstatic boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 290176771Srajstatic boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 291176771Srajstatic boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 292176771Srajstatic vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 293176771Sraj int); 294176771Srajstatic int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 295176771Srajstatic void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296176771Sraj vm_object_t, vm_pindex_t, vm_size_t); 297176771Srajstatic boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298176771Srajstatic void mmu_booke_page_init(mmu_t, vm_page_t); 299176771Srajstatic int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300176771Srajstatic void mmu_booke_pinit(mmu_t, pmap_t); 301176771Srajstatic void mmu_booke_pinit0(mmu_t, pmap_t); 302176771Srajstatic void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303176771Sraj vm_prot_t); 304176771Srajstatic void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305176771Srajstatic void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306176771Srajstatic void mmu_booke_release(mmu_t, pmap_t); 307176771Srajstatic void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308176771Srajstatic void mmu_booke_remove_all(mmu_t, vm_page_t); 309176771Srajstatic void mmu_booke_remove_write(mmu_t, vm_page_t); 310176771Srajstatic void mmu_booke_zero_page(mmu_t, vm_page_t); 311176771Srajstatic void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312176771Srajstatic void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313176771Srajstatic void mmu_booke_activate(mmu_t, struct thread *); 314176771Srajstatic void mmu_booke_deactivate(mmu_t, struct thread *); 315176771Srajstatic void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316176771Srajstatic void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 317176771Srajstatic void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318176771Srajstatic vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 319176771Srajstatic void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 320176771Srajstatic void mmu_booke_kremove(mmu_t, vm_offset_t); 321176771Srajstatic boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 322176771Srajstatic boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 323190701Smarcelstatic vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 324190701Smarcel vm_size_t, vm_size_t *); 325190701Smarcelstatic void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 326190701Smarcel vm_size_t, vm_offset_t); 327190701Smarcelstatic struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 328176771Sraj 329176771Srajstatic mmu_method_t mmu_booke_methods[] = { 330176771Sraj /* pmap dispatcher interface */ 331176771Sraj MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 332176771Sraj MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 333176771Sraj MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 334176771Sraj MMUMETHOD(mmu_copy, mmu_booke_copy), 335176771Sraj MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 336176771Sraj MMUMETHOD(mmu_enter, mmu_booke_enter), 337176771Sraj MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 338176771Sraj MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 339176771Sraj MMUMETHOD(mmu_extract, mmu_booke_extract), 340176771Sraj MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 341176771Sraj MMUMETHOD(mmu_init, mmu_booke_init), 342176771Sraj MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 343176771Sraj MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 344176771Sraj MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 345176771Sraj MMUMETHOD(mmu_map, mmu_booke_map), 346176771Sraj MMUMETHOD(mmu_mincore, mmu_booke_mincore), 347176771Sraj MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 348176771Sraj MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 349176771Sraj MMUMETHOD(mmu_page_init, mmu_booke_page_init), 350176771Sraj MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 351176771Sraj MMUMETHOD(mmu_pinit, mmu_booke_pinit), 352176771Sraj MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 353176771Sraj MMUMETHOD(mmu_protect, mmu_booke_protect), 354176771Sraj MMUMETHOD(mmu_qenter, mmu_booke_qenter), 355176771Sraj MMUMETHOD(mmu_qremove, mmu_booke_qremove), 356176771Sraj MMUMETHOD(mmu_release, mmu_booke_release), 357176771Sraj MMUMETHOD(mmu_remove, mmu_booke_remove), 358176771Sraj MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 359176771Sraj MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 360176771Sraj MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 361176771Sraj MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 362176771Sraj MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 363176771Sraj MMUMETHOD(mmu_activate, mmu_booke_activate), 364176771Sraj MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 365176771Sraj 366176771Sraj /* Internal interfaces */ 367176771Sraj MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 368176771Sraj MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 369176771Sraj MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 370176771Sraj MMUMETHOD(mmu_kenter, mmu_booke_kenter), 371176771Sraj MMUMETHOD(mmu_kextract, mmu_booke_kextract), 372176771Sraj/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 373176771Sraj MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 374176771Sraj MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 375176771Sraj 376190701Smarcel /* dumpsys() support */ 377190701Smarcel MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 378190701Smarcel MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 379190701Smarcel MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 380190701Smarcel 381176771Sraj { 0, 0 } 382176771Sraj}; 383176771Sraj 384176771Srajstatic mmu_def_t booke_mmu = { 385176771Sraj MMU_TYPE_BOOKE, 386176771Sraj mmu_booke_methods, 387176771Sraj 0 388176771Sraj}; 389176771SrajMMU_DEF(booke_mmu); 390176771Sraj 391192532Srajstatic inline void 392192532Srajtlb_miss_lock(void) 393192532Sraj{ 394192532Sraj#ifdef SMP 395192532Sraj struct pcpu *pc; 396192532Sraj 397192532Sraj if (!smp_started) 398192532Sraj return; 399192532Sraj 400192532Sraj SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 401192532Sraj if (pc != pcpup) { 402192532Sraj 403192532Sraj CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 404192532Sraj "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 405192532Sraj 406192532Sraj KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 407192532Sraj ("tlb_miss_lock: tried to lock self")); 408192532Sraj 409192532Sraj tlb_lock(pc->pc_booke_tlb_lock); 410192532Sraj 411192532Sraj CTR1(KTR_PMAP, "%s: locked", __func__); 412192532Sraj } 413192532Sraj } 414192532Sraj#endif 415192532Sraj} 416192532Sraj 417192532Srajstatic inline void 418192532Srajtlb_miss_unlock(void) 419192532Sraj{ 420192532Sraj#ifdef SMP 421192532Sraj struct pcpu *pc; 422192532Sraj 423192532Sraj if (!smp_started) 424192532Sraj return; 425192532Sraj 426192532Sraj SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 427192532Sraj if (pc != pcpup) { 428192532Sraj CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 429192532Sraj __func__, pc->pc_cpuid); 430192532Sraj 431192532Sraj tlb_unlock(pc->pc_booke_tlb_lock); 432192532Sraj 433192532Sraj CTR1(KTR_PMAP, "%s: unlocked", __func__); 434192532Sraj } 435192532Sraj } 436192532Sraj#endif 437192532Sraj} 438192532Sraj 439176771Sraj/* Return number of entries in TLB0. */ 440176771Srajstatic __inline void 441176771Srajtlb0_get_tlbconf(void) 442176771Sraj{ 443176771Sraj uint32_t tlb0_cfg; 444176771Sraj 445176771Sraj tlb0_cfg = mfspr(SPR_TLB0CFG); 446187149Sraj tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 447187149Sraj tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 448187149Sraj tlb0_entries_per_way = tlb0_entries / tlb0_ways; 449176771Sraj} 450176771Sraj 451176771Sraj/* Initialize pool of kva ptbl buffers. */ 452176771Srajstatic void 453176771Srajptbl_init(void) 454176771Sraj{ 455176771Sraj int i; 456176771Sraj 457187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 458187151Sraj (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 459187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 460187151Sraj __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 461176771Sraj 462176771Sraj mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 463176771Sraj TAILQ_INIT(&ptbl_buf_freelist); 464176771Sraj 465176771Sraj for (i = 0; i < PTBL_BUFS; i++) { 466176771Sraj ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 467176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 468176771Sraj } 469176771Sraj} 470176771Sraj 471182362Sraj/* Get a ptbl_buf from the freelist. */ 472176771Srajstatic struct ptbl_buf * 473176771Srajptbl_buf_alloc(void) 474176771Sraj{ 475176771Sraj struct ptbl_buf *buf; 476176771Sraj 477176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 478176771Sraj buf = TAILQ_FIRST(&ptbl_buf_freelist); 479176771Sraj if (buf != NULL) 480176771Sraj TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 481176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 482176771Sraj 483187151Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 484187151Sraj 485176771Sraj return (buf); 486176771Sraj} 487176771Sraj 488176771Sraj/* Return ptbl buff to free pool. */ 489176771Srajstatic void 490176771Srajptbl_buf_free(struct ptbl_buf *buf) 491176771Sraj{ 492176771Sraj 493187149Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 494176771Sraj 495176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 496176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 497176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 498176771Sraj} 499176771Sraj 500176771Sraj/* 501187149Sraj * Search the list of allocated ptbl bufs and find on list of allocated ptbls 502176771Sraj */ 503176771Srajstatic void 504176771Srajptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 505176771Sraj{ 506176771Sraj struct ptbl_buf *pbuf; 507176771Sraj 508187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 509176771Sraj 510187149Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 511187149Sraj 512187149Sraj TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 513176771Sraj if (pbuf->kva == (vm_offset_t)ptbl) { 514176771Sraj /* Remove from pmap ptbl buf list. */ 515187149Sraj TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 516176771Sraj 517187149Sraj /* Free corresponding ptbl buf. */ 518176771Sraj ptbl_buf_free(pbuf); 519176771Sraj break; 520176771Sraj } 521176771Sraj} 522176771Sraj 523176771Sraj/* Allocate page table. */ 524187149Srajstatic pte_t * 525176771Srajptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 526176771Sraj{ 527176771Sraj vm_page_t mtbl[PTBL_PAGES]; 528176771Sraj vm_page_t m; 529176771Sraj struct ptbl_buf *pbuf; 530176771Sraj unsigned int pidx; 531187149Sraj pte_t *ptbl; 532176771Sraj int i; 533176771Sraj 534187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 535187149Sraj (pmap == kernel_pmap), pdir_idx); 536176771Sraj 537176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 538176771Sraj ("ptbl_alloc: invalid pdir_idx")); 539176771Sraj KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 540176771Sraj ("pte_alloc: valid ptbl entry exists!")); 541176771Sraj 542176771Sraj pbuf = ptbl_buf_alloc(); 543176771Sraj if (pbuf == NULL) 544176771Sraj panic("pte_alloc: couldn't alloc kernel virtual memory"); 545187149Sraj 546187149Sraj ptbl = (pte_t *)pbuf->kva; 547176771Sraj 548187149Sraj CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 549187149Sraj 550176771Sraj /* Allocate ptbl pages, this will sleep! */ 551176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 552176771Sraj pidx = (PTBL_PAGES * pdir_idx) + i; 553187149Sraj while ((m = vm_page_alloc(NULL, pidx, 554187149Sraj VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 555187149Sraj 556176771Sraj PMAP_UNLOCK(pmap); 557176771Sraj vm_page_unlock_queues(); 558176771Sraj VM_WAIT; 559176771Sraj vm_page_lock_queues(); 560176771Sraj PMAP_LOCK(pmap); 561176771Sraj } 562176771Sraj mtbl[i] = m; 563176771Sraj } 564176771Sraj 565187149Sraj /* Map allocated pages into kernel_pmap. */ 566187149Sraj mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 567176771Sraj 568176771Sraj /* Zero whole ptbl. */ 569187149Sraj bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 570176771Sraj 571176771Sraj /* Add pbuf to the pmap ptbl bufs list. */ 572187149Sraj TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 573176771Sraj 574187149Sraj return (ptbl); 575176771Sraj} 576176771Sraj 577176771Sraj/* Free ptbl pages and invalidate pdir entry. */ 578176771Srajstatic void 579176771Srajptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 580176771Sraj{ 581176771Sraj pte_t *ptbl; 582176771Sraj vm_paddr_t pa; 583176771Sraj vm_offset_t va; 584176771Sraj vm_page_t m; 585176771Sraj int i; 586176771Sraj 587187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 588187149Sraj (pmap == kernel_pmap), pdir_idx); 589176771Sraj 590176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 591176771Sraj ("ptbl_free: invalid pdir_idx")); 592176771Sraj 593176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 594176771Sraj 595187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 596187149Sraj 597176771Sraj KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 598176771Sraj 599187149Sraj /* 600187149Sraj * Invalidate the pdir entry as soon as possible, so that other CPUs 601187149Sraj * don't attempt to look up the page tables we are releasing. 602187149Sraj */ 603187149Sraj mtx_lock_spin(&tlbivax_mutex); 604192532Sraj tlb_miss_lock(); 605187149Sraj 606187149Sraj pmap->pm_pdir[pdir_idx] = NULL; 607187149Sraj 608192532Sraj tlb_miss_unlock(); 609187149Sraj mtx_unlock_spin(&tlbivax_mutex); 610187149Sraj 611176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 612176771Sraj va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 613176771Sraj pa = pte_vatopa(mmu, kernel_pmap, va); 614176771Sraj m = PHYS_TO_VM_PAGE(pa); 615176771Sraj vm_page_free_zero(m); 616176771Sraj atomic_subtract_int(&cnt.v_wire_count, 1); 617176771Sraj mmu_booke_kremove(mmu, va); 618176771Sraj } 619176771Sraj 620176771Sraj ptbl_free_pmap_ptbl(pmap, ptbl); 621176771Sraj} 622176771Sraj 623176771Sraj/* 624176771Sraj * Decrement ptbl pages hold count and attempt to free ptbl pages. 625176771Sraj * Called when removing pte entry from ptbl. 626176771Sraj * 627176771Sraj * Return 1 if ptbl pages were freed. 628176771Sraj */ 629176771Srajstatic int 630176771Srajptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 631176771Sraj{ 632176771Sraj pte_t *ptbl; 633176771Sraj vm_paddr_t pa; 634176771Sraj vm_page_t m; 635176771Sraj int i; 636176771Sraj 637187151Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 638187151Sraj (pmap == kernel_pmap), pdir_idx); 639176771Sraj 640176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 641176771Sraj ("ptbl_unhold: invalid pdir_idx")); 642176771Sraj KASSERT((pmap != kernel_pmap), 643176771Sraj ("ptbl_unhold: unholding kernel ptbl!")); 644176771Sraj 645176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 646176771Sraj 647176771Sraj //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 648176771Sraj KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 649176771Sraj ("ptbl_unhold: non kva ptbl")); 650176771Sraj 651176771Sraj /* decrement hold count */ 652176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 653187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 654187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 655176771Sraj m = PHYS_TO_VM_PAGE(pa); 656176771Sraj m->wire_count--; 657176771Sraj } 658176771Sraj 659176771Sraj /* 660176771Sraj * Free ptbl pages if there are no pte etries in this ptbl. 661187151Sraj * wire_count has the same value for all ptbl pages, so check the last 662187151Sraj * page. 663176771Sraj */ 664176771Sraj if (m->wire_count == 0) { 665176771Sraj ptbl_free(mmu, pmap, pdir_idx); 666176771Sraj 667176771Sraj //debugf("ptbl_unhold: e (freed ptbl)\n"); 668176771Sraj return (1); 669176771Sraj } 670176771Sraj 671176771Sraj return (0); 672176771Sraj} 673176771Sraj 674176771Sraj/* 675187151Sraj * Increment hold count for ptbl pages. This routine is used when a new pte 676187151Sraj * entry is being inserted into the ptbl. 677176771Sraj */ 678176771Srajstatic void 679176771Srajptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 680176771Sraj{ 681176771Sraj vm_paddr_t pa; 682176771Sraj pte_t *ptbl; 683176771Sraj vm_page_t m; 684176771Sraj int i; 685176771Sraj 686187151Sraj CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 687187151Sraj pdir_idx); 688176771Sraj 689176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 690176771Sraj ("ptbl_hold: invalid pdir_idx")); 691176771Sraj KASSERT((pmap != kernel_pmap), 692176771Sraj ("ptbl_hold: holding kernel ptbl!")); 693176771Sraj 694176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 695176771Sraj 696176771Sraj KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 697176771Sraj 698176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 699187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 700187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 701176771Sraj m = PHYS_TO_VM_PAGE(pa); 702176771Sraj m->wire_count++; 703176771Sraj } 704176771Sraj} 705176771Sraj 706176771Sraj/* Allocate pv_entry structure. */ 707176771Srajpv_entry_t 708176771Srajpv_alloc(void) 709176771Sraj{ 710176771Sraj pv_entry_t pv; 711176771Sraj 712176771Sraj pv_entry_count++; 713194123Salc if (pv_entry_count > pv_entry_high_water) 714194123Salc pagedaemon_wakeup(); 715176771Sraj pv = uma_zalloc(pvzone, M_NOWAIT); 716176771Sraj 717176771Sraj return (pv); 718176771Sraj} 719176771Sraj 720176771Sraj/* Free pv_entry structure. */ 721176771Srajstatic __inline void 722176771Srajpv_free(pv_entry_t pve) 723176771Sraj{ 724176771Sraj 725176771Sraj pv_entry_count--; 726176771Sraj uma_zfree(pvzone, pve); 727176771Sraj} 728176771Sraj 729176771Sraj 730176771Sraj/* Allocate and initialize pv_entry structure. */ 731176771Srajstatic void 732176771Srajpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 733176771Sraj{ 734176771Sraj pv_entry_t pve; 735176771Sraj 736176771Sraj //int su = (pmap == kernel_pmap); 737176771Sraj //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 738176771Sraj // (u_int32_t)pmap, va, (u_int32_t)m); 739176771Sraj 740176771Sraj pve = pv_alloc(); 741176771Sraj if (pve == NULL) 742176771Sraj panic("pv_insert: no pv entries!"); 743176771Sraj 744176771Sraj pve->pv_pmap = pmap; 745176771Sraj pve->pv_va = va; 746176771Sraj 747176771Sraj /* add to pv_list */ 748176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 749176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 750176771Sraj 751176771Sraj TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 752176771Sraj 753176771Sraj //debugf("pv_insert: e\n"); 754176771Sraj} 755176771Sraj 756176771Sraj/* Destroy pv entry. */ 757176771Srajstatic void 758176771Srajpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 759176771Sraj{ 760176771Sraj pv_entry_t pve; 761176771Sraj 762176771Sraj //int su = (pmap == kernel_pmap); 763176771Sraj //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 764176771Sraj 765176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 766176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 767176771Sraj 768176771Sraj /* find pv entry */ 769176771Sraj TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 770176771Sraj if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 771176771Sraj /* remove from pv_list */ 772176771Sraj TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 773176771Sraj if (TAILQ_EMPTY(&m->md.pv_list)) 774176771Sraj vm_page_flag_clear(m, PG_WRITEABLE); 775176771Sraj 776176771Sraj /* free pv entry struct */ 777176771Sraj pv_free(pve); 778176771Sraj break; 779176771Sraj } 780176771Sraj } 781176771Sraj 782176771Sraj //debugf("pv_remove: e\n"); 783176771Sraj} 784176771Sraj 785176771Sraj/* 786176771Sraj * Clean pte entry, try to free page table page if requested. 787176771Sraj * 788176771Sraj * Return 1 if ptbl pages were freed, otherwise return 0. 789176771Sraj */ 790176771Srajstatic int 791187151Srajpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 792176771Sraj{ 793176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 794176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 795176771Sraj vm_page_t m; 796176771Sraj pte_t *ptbl; 797176771Sraj pte_t *pte; 798176771Sraj 799176771Sraj //int su = (pmap == kernel_pmap); 800176771Sraj //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 801176771Sraj // su, (u_int32_t)pmap, va, flags); 802176771Sraj 803176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 804176771Sraj KASSERT(ptbl, ("pte_remove: null ptbl")); 805176771Sraj 806176771Sraj pte = &ptbl[ptbl_idx]; 807176771Sraj 808176771Sraj if (pte == NULL || !PTE_ISVALID(pte)) 809176771Sraj return (0); 810176771Sraj 811176771Sraj if (PTE_ISWIRED(pte)) 812176771Sraj pmap->pm_stats.wired_count--; 813176771Sraj 814191445Smarcel /* Handle managed entry. */ 815191445Smarcel if (PTE_ISMANAGED(pte)) { 816191445Smarcel /* Get vm_page_t for mapped pte. */ 817191445Smarcel m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 818176771Sraj 819191445Smarcel if (PTE_ISMODIFIED(pte)) 820191445Smarcel vm_page_dirty(m); 821176771Sraj 822191445Smarcel if (PTE_ISREFERENCED(pte)) 823191445Smarcel vm_page_flag_set(m, PG_REFERENCED); 824176771Sraj 825191445Smarcel pv_remove(pmap, va, m); 826176771Sraj } 827176771Sraj 828187149Sraj mtx_lock_spin(&tlbivax_mutex); 829192532Sraj tlb_miss_lock(); 830187149Sraj 831187149Sraj tlb0_flush_entry(va); 832176771Sraj pte->flags = 0; 833176771Sraj pte->rpn = 0; 834187149Sraj 835192532Sraj tlb_miss_unlock(); 836187149Sraj mtx_unlock_spin(&tlbivax_mutex); 837187149Sraj 838176771Sraj pmap->pm_stats.resident_count--; 839176771Sraj 840176771Sraj if (flags & PTBL_UNHOLD) { 841176771Sraj //debugf("pte_remove: e (unhold)\n"); 842176771Sraj return (ptbl_unhold(mmu, pmap, pdir_idx)); 843176771Sraj } 844176771Sraj 845176771Sraj //debugf("pte_remove: e\n"); 846176771Sraj return (0); 847176771Sraj} 848176771Sraj 849176771Sraj/* 850176771Sraj * Insert PTE for a given page and virtual address. 851176771Sraj */ 852187149Srajstatic void 853187149Srajpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 854176771Sraj{ 855176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 856176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 857187149Sraj pte_t *ptbl, *pte; 858176771Sraj 859187149Sraj CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 860187149Sraj pmap == kernel_pmap, pmap, va); 861176771Sraj 862176771Sraj /* Get the page table pointer. */ 863176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 864176771Sraj 865187149Sraj if (ptbl == NULL) { 866187149Sraj /* Allocate page table pages. */ 867187149Sraj ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 868187149Sraj } else { 869176771Sraj /* 870176771Sraj * Check if there is valid mapping for requested 871176771Sraj * va, if there is, remove it. 872176771Sraj */ 873176771Sraj pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 874176771Sraj if (PTE_ISVALID(pte)) { 875176771Sraj pte_remove(mmu, pmap, va, PTBL_HOLD); 876176771Sraj } else { 877176771Sraj /* 878176771Sraj * pte is not used, increment hold count 879176771Sraj * for ptbl pages. 880176771Sraj */ 881176771Sraj if (pmap != kernel_pmap) 882176771Sraj ptbl_hold(mmu, pmap, pdir_idx); 883176771Sraj } 884176771Sraj } 885176771Sraj 886176771Sraj /* 887187149Sraj * Insert pv_entry into pv_list for mapped page if part of managed 888187149Sraj * memory. 889176771Sraj */ 890176771Sraj if ((m->flags & PG_FICTITIOUS) == 0) { 891176771Sraj if ((m->flags & PG_UNMANAGED) == 0) { 892187149Sraj flags |= PTE_MANAGED; 893176771Sraj 894176771Sraj /* Create and insert pv entry. */ 895176771Sraj pv_insert(pmap, va, m); 896176771Sraj } 897176771Sraj } 898176771Sraj 899176771Sraj pmap->pm_stats.resident_count++; 900187149Sraj 901187149Sraj mtx_lock_spin(&tlbivax_mutex); 902192532Sraj tlb_miss_lock(); 903187149Sraj 904187149Sraj tlb0_flush_entry(va); 905187149Sraj if (pmap->pm_pdir[pdir_idx] == NULL) { 906187149Sraj /* 907187149Sraj * If we just allocated a new page table, hook it in 908187149Sraj * the pdir. 909187149Sraj */ 910187149Sraj pmap->pm_pdir[pdir_idx] = ptbl; 911187149Sraj } 912187149Sraj pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 913176771Sraj pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 914176771Sraj pte->flags |= (PTE_VALID | flags); 915176771Sraj 916192532Sraj tlb_miss_unlock(); 917187149Sraj mtx_unlock_spin(&tlbivax_mutex); 918176771Sraj} 919176771Sraj 920176771Sraj/* Return the pa for the given pmap/va. */ 921176771Srajstatic vm_paddr_t 922176771Srajpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 923176771Sraj{ 924176771Sraj vm_paddr_t pa = 0; 925176771Sraj pte_t *pte; 926176771Sraj 927176771Sraj pte = pte_find(mmu, pmap, va); 928176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 929176771Sraj pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 930176771Sraj return (pa); 931176771Sraj} 932176771Sraj 933176771Sraj/* Get a pointer to a PTE in a page table. */ 934176771Srajstatic pte_t * 935176771Srajpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 936176771Sraj{ 937176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 938176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 939176771Sraj 940176771Sraj KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 941176771Sraj 942176771Sraj if (pmap->pm_pdir[pdir_idx]) 943176771Sraj return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 944176771Sraj 945176771Sraj return (NULL); 946176771Sraj} 947176771Sraj 948176771Sraj/**************************************************************************/ 949176771Sraj/* PMAP related */ 950176771Sraj/**************************************************************************/ 951176771Sraj 952176771Sraj/* 953176771Sraj * This is called during e500_init, before the system is really initialized. 954176771Sraj */ 955176771Srajstatic void 956190701Smarcelmmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 957176771Sraj{ 958176771Sraj vm_offset_t phys_kernelend; 959176771Sraj struct mem_region *mp, *mp1; 960176771Sraj int cnt, i, j; 961176771Sraj u_int s, e, sz; 962176771Sraj u_int phys_avail_count; 963182198Sraj vm_size_t physsz, hwphyssz, kstack0_sz; 964193489Sraj vm_offset_t kernel_pdir, kstack0, va; 965182198Sraj vm_paddr_t kstack0_phys; 966194784Sjeff void *dpcpu; 967193489Sraj pte_t *pte; 968176771Sraj 969176771Sraj debugf("mmu_booke_bootstrap: entered\n"); 970176771Sraj 971187149Sraj /* Initialize invalidation mutex */ 972187149Sraj mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 973187149Sraj 974187149Sraj /* Read TLB0 size and associativity. */ 975187149Sraj tlb0_get_tlbconf(); 976187149Sraj 977176771Sraj /* Align kernel start and end address (kernel image). */ 978190701Smarcel kernstart = trunc_page(start); 979190701Smarcel data_start = round_page(kernelend); 980190701Smarcel kernsize = data_start - kernstart; 981176771Sraj 982190701Smarcel data_end = data_start; 983190701Smarcel 984176771Sraj /* Allocate space for the message buffer. */ 985190701Smarcel msgbufp = (struct msgbuf *)data_end; 986190701Smarcel data_end += MSGBUF_SIZE; 987187149Sraj debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 988190701Smarcel data_end); 989176771Sraj 990190701Smarcel data_end = round_page(data_end); 991176771Sraj 992194784Sjeff /* Allocate the dynamic per-cpu area. */ 993194784Sjeff dpcpu = (void *)data_end; 994194784Sjeff data_end += DPCPU_SIZE; 995194784Sjeff dpcpu_init(dpcpu, 0); 996194784Sjeff 997176771Sraj /* Allocate space for ptbl_bufs. */ 998190701Smarcel ptbl_bufs = (struct ptbl_buf *)data_end; 999190701Smarcel data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1000187149Sraj debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1001190701Smarcel data_end); 1002176771Sraj 1003190701Smarcel data_end = round_page(data_end); 1004176771Sraj 1005176771Sraj /* Allocate PTE tables for kernel KVA. */ 1006190701Smarcel kernel_pdir = data_end; 1007176771Sraj kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1008176771Sraj PDIR_SIZE - 1) / PDIR_SIZE; 1009190701Smarcel data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1010176771Sraj debugf(" kernel ptbls: %d\n", kernel_ptbls); 1011190701Smarcel debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1012176771Sraj 1013190701Smarcel debugf(" data_end: 0x%08x\n", data_end); 1014190701Smarcel if (data_end - kernstart > 0x1000000) { 1015190701Smarcel data_end = (data_end + 0x3fffff) & ~0x3fffff; 1016190701Smarcel tlb1_mapin_region(kernstart + 0x1000000, 1017190701Smarcel kernload + 0x1000000, data_end - kernstart - 0x1000000); 1018176771Sraj } else 1019190701Smarcel data_end = (data_end + 0xffffff) & ~0xffffff; 1020176771Sraj 1021190701Smarcel debugf(" updated data_end: 0x%08x\n", data_end); 1022187149Sraj 1023190701Smarcel kernsize += data_end - data_start; 1024190701Smarcel 1025182362Sraj /* 1026182362Sraj * Clear the structures - note we can only do it safely after the 1027187149Sraj * possible additional TLB1 translations are in place (above) so that 1028190701Smarcel * all range up to the currently calculated 'data_end' is covered. 1029182362Sraj */ 1030182362Sraj memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1031182362Sraj memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1032182362Sraj 1033176771Sraj /*******************************************************/ 1034176771Sraj /* Set the start and end of kva. */ 1035176771Sraj /*******************************************************/ 1036190701Smarcel virtual_avail = round_page(data_end); 1037176771Sraj virtual_end = VM_MAX_KERNEL_ADDRESS; 1038176771Sraj 1039176771Sraj /* Allocate KVA space for page zero/copy operations. */ 1040176771Sraj zero_page_va = virtual_avail; 1041176771Sraj virtual_avail += PAGE_SIZE; 1042176771Sraj zero_page_idle_va = virtual_avail; 1043176771Sraj virtual_avail += PAGE_SIZE; 1044176771Sraj copy_page_src_va = virtual_avail; 1045176771Sraj virtual_avail += PAGE_SIZE; 1046176771Sraj copy_page_dst_va = virtual_avail; 1047176771Sraj virtual_avail += PAGE_SIZE; 1048187149Sraj debugf("zero_page_va = 0x%08x\n", zero_page_va); 1049187149Sraj debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1050187149Sraj debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1051187149Sraj debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1052176771Sraj 1053176771Sraj /* Initialize page zero/copy mutexes. */ 1054176771Sraj mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1055176771Sraj mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1056176771Sraj 1057176771Sraj /* Allocate KVA space for ptbl bufs. */ 1058176771Sraj ptbl_buf_pool_vabase = virtual_avail; 1059176771Sraj virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1060187149Sraj debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1061187149Sraj ptbl_buf_pool_vabase, virtual_avail); 1062176771Sraj 1063176771Sraj /* Calculate corresponding physical addresses for the kernel region. */ 1064190701Smarcel phys_kernelend = kernload + kernsize; 1065176771Sraj debugf("kernel image and allocated data:\n"); 1066176771Sraj debugf(" kernload = 0x%08x\n", kernload); 1067190701Smarcel debugf(" kernstart = 0x%08x\n", kernstart); 1068190701Smarcel debugf(" kernsize = 0x%08x\n", kernsize); 1069176771Sraj 1070176771Sraj if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1071176771Sraj panic("mmu_booke_bootstrap: phys_avail too small"); 1072176771Sraj 1073176771Sraj /* 1074187151Sraj * Remove kernel physical address range from avail regions list. Page 1075187151Sraj * align all regions. Non-page aligned memory isn't very interesting 1076187151Sraj * to us. Also, sort the entries for ascending addresses. 1077176771Sraj */ 1078192067Snwhitehorn 1079192067Snwhitehorn /* Retrieve phys/avail mem regions */ 1080192067Snwhitehorn mem_regions(&physmem_regions, &physmem_regions_sz, 1081192067Snwhitehorn &availmem_regions, &availmem_regions_sz); 1082176771Sraj sz = 0; 1083176771Sraj cnt = availmem_regions_sz; 1084176771Sraj debugf("processing avail regions:\n"); 1085176771Sraj for (mp = availmem_regions; mp->mr_size; mp++) { 1086176771Sraj s = mp->mr_start; 1087176771Sraj e = mp->mr_start + mp->mr_size; 1088176771Sraj debugf(" %08x-%08x -> ", s, e); 1089176771Sraj /* Check whether this region holds all of the kernel. */ 1090176771Sraj if (s < kernload && e > phys_kernelend) { 1091176771Sraj availmem_regions[cnt].mr_start = phys_kernelend; 1092176771Sraj availmem_regions[cnt++].mr_size = e - phys_kernelend; 1093176771Sraj e = kernload; 1094176771Sraj } 1095176771Sraj /* Look whether this regions starts within the kernel. */ 1096176771Sraj if (s >= kernload && s < phys_kernelend) { 1097176771Sraj if (e <= phys_kernelend) 1098176771Sraj goto empty; 1099176771Sraj s = phys_kernelend; 1100176771Sraj } 1101176771Sraj /* Now look whether this region ends within the kernel. */ 1102176771Sraj if (e > kernload && e <= phys_kernelend) { 1103176771Sraj if (s >= kernload) 1104176771Sraj goto empty; 1105176771Sraj e = kernload; 1106176771Sraj } 1107176771Sraj /* Now page align the start and size of the region. */ 1108176771Sraj s = round_page(s); 1109176771Sraj e = trunc_page(e); 1110176771Sraj if (e < s) 1111176771Sraj e = s; 1112176771Sraj sz = e - s; 1113176771Sraj debugf("%08x-%08x = %x\n", s, e, sz); 1114176771Sraj 1115176771Sraj /* Check whether some memory is left here. */ 1116176771Sraj if (sz == 0) { 1117176771Sraj empty: 1118176771Sraj memmove(mp, mp + 1, 1119176771Sraj (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1120176771Sraj cnt--; 1121176771Sraj mp--; 1122176771Sraj continue; 1123176771Sraj } 1124176771Sraj 1125176771Sraj /* Do an insertion sort. */ 1126176771Sraj for (mp1 = availmem_regions; mp1 < mp; mp1++) 1127176771Sraj if (s < mp1->mr_start) 1128176771Sraj break; 1129176771Sraj if (mp1 < mp) { 1130176771Sraj memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1131176771Sraj mp1->mr_start = s; 1132176771Sraj mp1->mr_size = sz; 1133176771Sraj } else { 1134176771Sraj mp->mr_start = s; 1135176771Sraj mp->mr_size = sz; 1136176771Sraj } 1137176771Sraj } 1138176771Sraj availmem_regions_sz = cnt; 1139176771Sraj 1140176771Sraj /*******************************************************/ 1141182198Sraj /* Steal physical memory for kernel stack from the end */ 1142182198Sraj /* of the first avail region */ 1143182198Sraj /*******************************************************/ 1144182198Sraj kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1145182198Sraj kstack0_phys = availmem_regions[0].mr_start + 1146182198Sraj availmem_regions[0].mr_size; 1147182198Sraj kstack0_phys -= kstack0_sz; 1148182198Sraj availmem_regions[0].mr_size -= kstack0_sz; 1149182198Sraj 1150182198Sraj /*******************************************************/ 1151176771Sraj /* Fill in phys_avail table, based on availmem_regions */ 1152176771Sraj /*******************************************************/ 1153176771Sraj phys_avail_count = 0; 1154176771Sraj physsz = 0; 1155176771Sraj hwphyssz = 0; 1156176771Sraj TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1157176771Sraj 1158176771Sraj debugf("fill in phys_avail:\n"); 1159176771Sraj for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1160176771Sraj 1161176771Sraj debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1162176771Sraj availmem_regions[i].mr_start, 1163187151Sraj availmem_regions[i].mr_start + 1164187151Sraj availmem_regions[i].mr_size, 1165176771Sraj availmem_regions[i].mr_size); 1166176771Sraj 1167182362Sraj if (hwphyssz != 0 && 1168182362Sraj (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1169176771Sraj debugf(" hw.physmem adjust\n"); 1170176771Sraj if (physsz < hwphyssz) { 1171176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1172182362Sraj phys_avail[j + 1] = 1173182362Sraj availmem_regions[i].mr_start + 1174176771Sraj hwphyssz - physsz; 1175176771Sraj physsz = hwphyssz; 1176176771Sraj phys_avail_count++; 1177176771Sraj } 1178176771Sraj break; 1179176771Sraj } 1180176771Sraj 1181176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1182176771Sraj phys_avail[j + 1] = availmem_regions[i].mr_start + 1183176771Sraj availmem_regions[i].mr_size; 1184176771Sraj phys_avail_count++; 1185176771Sraj physsz += availmem_regions[i].mr_size; 1186176771Sraj } 1187176771Sraj physmem = btoc(physsz); 1188176771Sraj 1189176771Sraj /* Calculate the last available physical address. */ 1190176771Sraj for (i = 0; phys_avail[i + 2] != 0; i += 2) 1191176771Sraj ; 1192176771Sraj Maxmem = powerpc_btop(phys_avail[i + 1]); 1193176771Sraj 1194176771Sraj debugf("Maxmem = 0x%08lx\n", Maxmem); 1195176771Sraj debugf("phys_avail_count = %d\n", phys_avail_count); 1196187151Sraj debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1197187151Sraj physmem); 1198176771Sraj 1199176771Sraj /*******************************************************/ 1200176771Sraj /* Initialize (statically allocated) kernel pmap. */ 1201176771Sraj /*******************************************************/ 1202176771Sraj PMAP_LOCK_INIT(kernel_pmap); 1203176771Sraj kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1204176771Sraj 1205187149Sraj debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1206187149Sraj debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1207176771Sraj debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1208176771Sraj kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1209176771Sraj 1210176771Sraj /* Initialize kernel pdir */ 1211176771Sraj for (i = 0; i < kernel_ptbls; i++) 1212176771Sraj kernel_pmap->pm_pdir[kptbl_min + i] = 1213176771Sraj (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1214176771Sraj 1215187149Sraj for (i = 0; i < MAXCPU; i++) { 1216187149Sraj kernel_pmap->pm_tid[i] = TID_KERNEL; 1217187149Sraj 1218187149Sraj /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1219187149Sraj tidbusy[i][0] = kernel_pmap; 1220187149Sraj } 1221193489Sraj 1222193489Sraj /* 1223193489Sraj * Fill in PTEs covering kernel code and data. They are not required 1224193489Sraj * for address translation, as this area is covered by static TLB1 1225193489Sraj * entries, but for pte_vatopa() to work correctly with kernel area 1226193489Sraj * addresses. 1227193489Sraj */ 1228193489Sraj for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1229193489Sraj pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1230193489Sraj pte->rpn = kernload + (va - KERNBASE); 1231193489Sraj pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1232193489Sraj PTE_VALID; 1233193489Sraj } 1234187149Sraj /* Mark kernel_pmap active on all CPUs */ 1235176771Sraj kernel_pmap->pm_active = ~0; 1236176771Sraj 1237176771Sraj /*******************************************************/ 1238176771Sraj /* Final setup */ 1239176771Sraj /*******************************************************/ 1240187149Sraj 1241182198Sraj /* Enter kstack0 into kernel map, provide guard page */ 1242182198Sraj kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1243182198Sraj thread0.td_kstack = kstack0; 1244182198Sraj thread0.td_kstack_pages = KSTACK_PAGES; 1245182198Sraj 1246182198Sraj debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1247182198Sraj debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1248182198Sraj kstack0_phys, kstack0_phys + kstack0_sz); 1249182198Sraj debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1250182198Sraj 1251182198Sraj virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1252182198Sraj for (i = 0; i < KSTACK_PAGES; i++) { 1253182198Sraj mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1254182198Sraj kstack0 += PAGE_SIZE; 1255182198Sraj kstack0_phys += PAGE_SIZE; 1256182198Sraj } 1257187149Sraj 1258187149Sraj debugf("virtual_avail = %08x\n", virtual_avail); 1259187149Sraj debugf("virtual_end = %08x\n", virtual_end); 1260182198Sraj 1261176771Sraj debugf("mmu_booke_bootstrap: exit\n"); 1262176771Sraj} 1263176771Sraj 1264192532Srajvoid 1265192532Srajpmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1266192532Sraj{ 1267192532Sraj int i; 1268192532Sraj 1269192532Sraj /* 1270192532Sraj * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1271192532Sraj * have the snapshot of its contents in the s/w tlb1[] table, so use 1272192532Sraj * these values directly to (re)program AP's TLB1 hardware. 1273192532Sraj */ 1274192532Sraj for (i = 0; i < tlb1_idx; i ++) { 1275192532Sraj /* Skip invalid entries */ 1276192532Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 1277192532Sraj continue; 1278192532Sraj 1279192532Sraj tlb1_write_entry(i); 1280192532Sraj } 1281192532Sraj 1282192532Sraj set_mas4_defaults(); 1283192532Sraj} 1284192532Sraj 1285176771Sraj/* 1286176771Sraj * Get the physical page address for the given pmap/virtual address. 1287176771Sraj */ 1288176771Srajstatic vm_paddr_t 1289176771Srajmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1290176771Sraj{ 1291176771Sraj vm_paddr_t pa; 1292176771Sraj 1293176771Sraj PMAP_LOCK(pmap); 1294176771Sraj pa = pte_vatopa(mmu, pmap, va); 1295176771Sraj PMAP_UNLOCK(pmap); 1296176771Sraj 1297176771Sraj return (pa); 1298176771Sraj} 1299176771Sraj 1300176771Sraj/* 1301176771Sraj * Extract the physical page address associated with the given 1302176771Sraj * kernel virtual address. 1303176771Sraj */ 1304176771Srajstatic vm_paddr_t 1305176771Srajmmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1306176771Sraj{ 1307176771Sraj 1308176771Sraj return (pte_vatopa(mmu, kernel_pmap, va)); 1309176771Sraj} 1310176771Sraj 1311176771Sraj/* 1312176771Sraj * Initialize the pmap module. 1313176771Sraj * Called by vm_init, to initialize any structures that the pmap 1314176771Sraj * system needs to map virtual memory. 1315176771Sraj */ 1316176771Srajstatic void 1317176771Srajmmu_booke_init(mmu_t mmu) 1318176771Sraj{ 1319176771Sraj int shpgperproc = PMAP_SHPGPERPROC; 1320176771Sraj 1321176771Sraj /* 1322176771Sraj * Initialize the address space (zone) for the pv entries. Set a 1323176771Sraj * high water mark so that the system can recover from excessive 1324176771Sraj * numbers of pv entries. 1325176771Sraj */ 1326176771Sraj pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1327176771Sraj NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1328176771Sraj 1329176771Sraj TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1330176771Sraj pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1331176771Sraj 1332176771Sraj TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1333176771Sraj pv_entry_high_water = 9 * (pv_entry_max / 10); 1334176771Sraj 1335176771Sraj uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1336176771Sraj 1337176771Sraj /* Pre-fill pvzone with initial number of pv entries. */ 1338176771Sraj uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1339176771Sraj 1340176771Sraj /* Initialize ptbl allocation. */ 1341176771Sraj ptbl_init(); 1342176771Sraj} 1343176771Sraj 1344176771Sraj/* 1345176771Sraj * Map a list of wired pages into kernel virtual address space. This is 1346176771Sraj * intended for temporary mappings which do not need page modification or 1347176771Sraj * references recorded. Existing mappings in the region are overwritten. 1348176771Sraj */ 1349176771Srajstatic void 1350176771Srajmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1351176771Sraj{ 1352176771Sraj vm_offset_t va; 1353176771Sraj 1354176771Sraj va = sva; 1355176771Sraj while (count-- > 0) { 1356176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1357176771Sraj va += PAGE_SIZE; 1358176771Sraj m++; 1359176771Sraj } 1360176771Sraj} 1361176771Sraj 1362176771Sraj/* 1363176771Sraj * Remove page mappings from kernel virtual address space. Intended for 1364176771Sraj * temporary mappings entered by mmu_booke_qenter. 1365176771Sraj */ 1366176771Srajstatic void 1367176771Srajmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1368176771Sraj{ 1369176771Sraj vm_offset_t va; 1370176771Sraj 1371176771Sraj va = sva; 1372176771Sraj while (count-- > 0) { 1373176771Sraj mmu_booke_kremove(mmu, va); 1374176771Sraj va += PAGE_SIZE; 1375176771Sraj } 1376176771Sraj} 1377176771Sraj 1378176771Sraj/* 1379176771Sraj * Map a wired page into kernel virtual address space. 1380176771Sraj */ 1381176771Srajstatic void 1382176771Srajmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1383176771Sraj{ 1384176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1385176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1386187151Sraj uint32_t flags; 1387176771Sraj pte_t *pte; 1388176771Sraj 1389187151Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1390187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1391176771Sraj 1392176771Sraj flags = 0; 1393176771Sraj flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1394187149Sraj flags |= PTE_M; 1395176771Sraj 1396176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1397176771Sraj 1398187149Sraj mtx_lock_spin(&tlbivax_mutex); 1399192532Sraj tlb_miss_lock(); 1400187149Sraj 1401176771Sraj if (PTE_ISVALID(pte)) { 1402187149Sraj 1403187149Sraj CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1404176771Sraj 1405176771Sraj /* Flush entry from TLB0 */ 1406187149Sraj tlb0_flush_entry(va); 1407176771Sraj } 1408176771Sraj 1409176771Sraj pte->rpn = pa & ~PTE_PA_MASK; 1410176771Sraj pte->flags = flags; 1411176771Sraj 1412176771Sraj //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1413176771Sraj // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1414176771Sraj // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1415176771Sraj 1416176771Sraj /* Flush the real memory from the instruction cache. */ 1417176771Sraj if ((flags & (PTE_I | PTE_G)) == 0) { 1418176771Sraj __syncicache((void *)va, PAGE_SIZE); 1419176771Sraj } 1420176771Sraj 1421192532Sraj tlb_miss_unlock(); 1422187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1423176771Sraj} 1424176771Sraj 1425176771Sraj/* 1426176771Sraj * Remove a page from kernel page table. 1427176771Sraj */ 1428176771Srajstatic void 1429176771Srajmmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1430176771Sraj{ 1431176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1432176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1433176771Sraj pte_t *pte; 1434176771Sraj 1435187149Sraj// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1436176771Sraj 1437187149Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1438187149Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1439176771Sraj ("mmu_booke_kremove: invalid va")); 1440176771Sraj 1441176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1442176771Sraj 1443176771Sraj if (!PTE_ISVALID(pte)) { 1444187149Sraj 1445187149Sraj CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1446187149Sraj 1447176771Sraj return; 1448176771Sraj } 1449176771Sraj 1450187149Sraj mtx_lock_spin(&tlbivax_mutex); 1451192532Sraj tlb_miss_lock(); 1452176771Sraj 1453187149Sraj /* Invalidate entry in TLB0, update PTE. */ 1454187149Sraj tlb0_flush_entry(va); 1455176771Sraj pte->flags = 0; 1456176771Sraj pte->rpn = 0; 1457176771Sraj 1458192532Sraj tlb_miss_unlock(); 1459187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1460176771Sraj} 1461176771Sraj 1462176771Sraj/* 1463176771Sraj * Initialize pmap associated with process 0. 1464176771Sraj */ 1465176771Srajstatic void 1466176771Srajmmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1467176771Sraj{ 1468187151Sraj 1469176771Sraj mmu_booke_pinit(mmu, pmap); 1470176771Sraj PCPU_SET(curpmap, pmap); 1471176771Sraj} 1472176771Sraj 1473176771Sraj/* 1474176771Sraj * Initialize a preallocated and zeroed pmap structure, 1475176771Sraj * such as one in a vmspace structure. 1476176771Sraj */ 1477176771Srajstatic void 1478176771Srajmmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1479176771Sraj{ 1480187149Sraj int i; 1481176771Sraj 1482187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1483187149Sraj curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1484176771Sraj 1485187149Sraj KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1486176771Sraj 1487176771Sraj PMAP_LOCK_INIT(pmap); 1488187149Sraj for (i = 0; i < MAXCPU; i++) 1489187149Sraj pmap->pm_tid[i] = TID_NONE; 1490176771Sraj pmap->pm_active = 0; 1491176771Sraj bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1492176771Sraj bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1493187149Sraj TAILQ_INIT(&pmap->pm_ptbl_list); 1494176771Sraj} 1495176771Sraj 1496176771Sraj/* 1497176771Sraj * Release any resources held by the given physical map. 1498176771Sraj * Called when a pmap initialized by mmu_booke_pinit is being released. 1499176771Sraj * Should only be called if the map contains no valid mappings. 1500176771Sraj */ 1501176771Srajstatic void 1502176771Srajmmu_booke_release(mmu_t mmu, pmap_t pmap) 1503176771Sraj{ 1504176771Sraj 1505187151Sraj printf("mmu_booke_release: s\n"); 1506176771Sraj 1507187151Sraj KASSERT(pmap->pm_stats.resident_count == 0, 1508187151Sraj ("pmap_release: pmap resident count %ld != 0", 1509187151Sraj pmap->pm_stats.resident_count)); 1510187151Sraj 1511176771Sraj PMAP_LOCK_DESTROY(pmap); 1512176771Sraj} 1513176771Sraj 1514176771Sraj/* 1515176771Sraj * Insert the given physical page at the specified virtual address in the 1516176771Sraj * target physical map with the protection requested. If specified the page 1517176771Sraj * will be wired down. 1518176771Sraj */ 1519176771Srajstatic void 1520176771Srajmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1521176771Sraj vm_prot_t prot, boolean_t wired) 1522176771Sraj{ 1523187151Sraj 1524176771Sraj vm_page_lock_queues(); 1525176771Sraj PMAP_LOCK(pmap); 1526176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1527176771Sraj vm_page_unlock_queues(); 1528176771Sraj PMAP_UNLOCK(pmap); 1529176771Sraj} 1530176771Sraj 1531176771Srajstatic void 1532176771Srajmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1533176771Sraj vm_prot_t prot, boolean_t wired) 1534176771Sraj{ 1535176771Sraj pte_t *pte; 1536176771Sraj vm_paddr_t pa; 1537187151Sraj uint32_t flags; 1538176771Sraj int su, sync; 1539176771Sraj 1540176771Sraj pa = VM_PAGE_TO_PHYS(m); 1541176771Sraj su = (pmap == kernel_pmap); 1542176771Sraj sync = 0; 1543176771Sraj 1544176771Sraj //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1545176771Sraj // "pa=0x%08x prot=0x%08x wired=%d)\n", 1546176771Sraj // (u_int32_t)pmap, su, pmap->pm_tid, 1547176771Sraj // (u_int32_t)m, va, pa, prot, wired); 1548176771Sraj 1549176771Sraj if (su) { 1550187151Sraj KASSERT(((va >= virtual_avail) && 1551187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1552187151Sraj ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1553176771Sraj } else { 1554176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1555187151Sraj ("mmu_booke_enter_locked: user pmap, non user va")); 1556176771Sraj } 1557176771Sraj 1558176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1559176771Sraj 1560176771Sraj /* 1561176771Sraj * If there is an existing mapping, and the physical address has not 1562176771Sraj * changed, must be protection or wiring change. 1563176771Sraj */ 1564176771Sraj if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1565176771Sraj (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1566187149Sraj 1567187149Sraj /* 1568187149Sraj * Before actually updating pte->flags we calculate and 1569187149Sraj * prepare its new value in a helper var. 1570187149Sraj */ 1571187149Sraj flags = pte->flags; 1572187149Sraj flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1573176771Sraj 1574176771Sraj /* Wiring change, just update stats. */ 1575176771Sraj if (wired) { 1576176771Sraj if (!PTE_ISWIRED(pte)) { 1577187149Sraj flags |= PTE_WIRED; 1578176771Sraj pmap->pm_stats.wired_count++; 1579176771Sraj } 1580176771Sraj } else { 1581176771Sraj if (PTE_ISWIRED(pte)) { 1582187149Sraj flags &= ~PTE_WIRED; 1583176771Sraj pmap->pm_stats.wired_count--; 1584176771Sraj } 1585176771Sraj } 1586176771Sraj 1587176771Sraj if (prot & VM_PROT_WRITE) { 1588176771Sraj /* Add write permissions. */ 1589187149Sraj flags |= PTE_SW; 1590176771Sraj if (!su) 1591187149Sraj flags |= PTE_UW; 1592192795Sraj 1593192795Sraj vm_page_flag_set(m, PG_WRITEABLE); 1594176771Sraj } else { 1595176771Sraj /* Handle modified pages, sense modify status. */ 1596187149Sraj 1597187149Sraj /* 1598187149Sraj * The PTE_MODIFIED flag could be set by underlying 1599187149Sraj * TLB misses since we last read it (above), possibly 1600187149Sraj * other CPUs could update it so we check in the PTE 1601187149Sraj * directly rather than rely on that saved local flags 1602187149Sraj * copy. 1603187149Sraj */ 1604178626Smarcel if (PTE_ISMODIFIED(pte)) 1605178626Smarcel vm_page_dirty(m); 1606176771Sraj } 1607176771Sraj 1608176771Sraj if (prot & VM_PROT_EXECUTE) { 1609187149Sraj flags |= PTE_SX; 1610176771Sraj if (!su) 1611187149Sraj flags |= PTE_UX; 1612176771Sraj 1613187149Sraj /* 1614187149Sraj * Check existing flags for execute permissions: if we 1615187149Sraj * are turning execute permissions on, icache should 1616187149Sraj * be flushed. 1617187149Sraj */ 1618176771Sraj if ((flags & (PTE_UX | PTE_SX)) == 0) 1619176771Sraj sync++; 1620176771Sraj } 1621176771Sraj 1622187149Sraj flags &= ~PTE_REFERENCED; 1623187149Sraj 1624187149Sraj /* 1625187149Sraj * The new flags value is all calculated -- only now actually 1626187149Sraj * update the PTE. 1627187149Sraj */ 1628187149Sraj mtx_lock_spin(&tlbivax_mutex); 1629192532Sraj tlb_miss_lock(); 1630187149Sraj 1631187149Sraj tlb0_flush_entry(va); 1632187149Sraj pte->flags = flags; 1633187149Sraj 1634192532Sraj tlb_miss_unlock(); 1635187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1636187149Sraj 1637176771Sraj } else { 1638176771Sraj /* 1639187149Sraj * If there is an existing mapping, but it's for a different 1640176771Sraj * physical address, pte_enter() will delete the old mapping. 1641176771Sraj */ 1642176771Sraj //if ((pte != NULL) && PTE_ISVALID(pte)) 1643176771Sraj // debugf("mmu_booke_enter_locked: replace\n"); 1644176771Sraj //else 1645176771Sraj // debugf("mmu_booke_enter_locked: new\n"); 1646176771Sraj 1647176771Sraj /* Now set up the flags and install the new mapping. */ 1648176771Sraj flags = (PTE_SR | PTE_VALID); 1649187149Sraj flags |= PTE_M; 1650176771Sraj 1651176771Sraj if (!su) 1652176771Sraj flags |= PTE_UR; 1653176771Sraj 1654176771Sraj if (prot & VM_PROT_WRITE) { 1655176771Sraj flags |= PTE_SW; 1656176771Sraj if (!su) 1657176771Sraj flags |= PTE_UW; 1658192795Sraj 1659192795Sraj vm_page_flag_set(m, PG_WRITEABLE); 1660176771Sraj } 1661176771Sraj 1662176771Sraj if (prot & VM_PROT_EXECUTE) { 1663176771Sraj flags |= PTE_SX; 1664176771Sraj if (!su) 1665176771Sraj flags |= PTE_UX; 1666176771Sraj } 1667176771Sraj 1668176771Sraj /* If its wired update stats. */ 1669176771Sraj if (wired) { 1670176771Sraj pmap->pm_stats.wired_count++; 1671176771Sraj flags |= PTE_WIRED; 1672176771Sraj } 1673176771Sraj 1674176771Sraj pte_enter(mmu, pmap, m, va, flags); 1675176771Sraj 1676176771Sraj /* Flush the real memory from the instruction cache. */ 1677176771Sraj if (prot & VM_PROT_EXECUTE) 1678176771Sraj sync++; 1679176771Sraj } 1680176771Sraj 1681176771Sraj if (sync && (su || pmap == PCPU_GET(curpmap))) { 1682176771Sraj __syncicache((void *)va, PAGE_SIZE); 1683176771Sraj sync = 0; 1684176771Sraj } 1685176771Sraj 1686176771Sraj if (sync) { 1687176771Sraj /* Create a temporary mapping. */ 1688176771Sraj pmap = PCPU_GET(curpmap); 1689176771Sraj 1690176771Sraj va = 0; 1691176771Sraj pte = pte_find(mmu, pmap, va); 1692176771Sraj KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1693176771Sraj 1694187149Sraj flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1695187149Sraj 1696176771Sraj pte_enter(mmu, pmap, m, va, flags); 1697176771Sraj __syncicache((void *)va, PAGE_SIZE); 1698176771Sraj pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1699176771Sraj } 1700176771Sraj} 1701176771Sraj 1702176771Sraj/* 1703176771Sraj * Maps a sequence of resident pages belonging to the same object. 1704176771Sraj * The sequence begins with the given page m_start. This page is 1705176771Sraj * mapped at the given virtual address start. Each subsequent page is 1706176771Sraj * mapped at a virtual address that is offset from start by the same 1707176771Sraj * amount as the page is offset from m_start within the object. The 1708176771Sraj * last page in the sequence is the page with the largest offset from 1709176771Sraj * m_start that can be mapped at a virtual address less than the given 1710176771Sraj * virtual address end. Not every virtual page between start and end 1711176771Sraj * is mapped; only those for which a resident page exists with the 1712176771Sraj * corresponding offset from m_start are mapped. 1713176771Sraj */ 1714176771Srajstatic void 1715176771Srajmmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1716176771Sraj vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1717176771Sraj{ 1718176771Sraj vm_page_t m; 1719176771Sraj vm_pindex_t diff, psize; 1720176771Sraj 1721176771Sraj psize = atop(end - start); 1722176771Sraj m = m_start; 1723176771Sraj PMAP_LOCK(pmap); 1724176771Sraj while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1725187151Sraj mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1726187151Sraj prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1727176771Sraj m = TAILQ_NEXT(m, listq); 1728176771Sraj } 1729176771Sraj PMAP_UNLOCK(pmap); 1730176771Sraj} 1731176771Sraj 1732176771Srajstatic void 1733176771Srajmmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1734176771Sraj vm_prot_t prot) 1735176771Sraj{ 1736176771Sraj 1737176771Sraj PMAP_LOCK(pmap); 1738176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, 1739176771Sraj prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1740176771Sraj PMAP_UNLOCK(pmap); 1741176771Sraj} 1742176771Sraj 1743176771Sraj/* 1744176771Sraj * Remove the given range of addresses from the specified map. 1745176771Sraj * 1746176771Sraj * It is assumed that the start and end are properly rounded to the page size. 1747176771Sraj */ 1748176771Srajstatic void 1749176771Srajmmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1750176771Sraj{ 1751176771Sraj pte_t *pte; 1752187151Sraj uint8_t hold_flag; 1753176771Sraj 1754176771Sraj int su = (pmap == kernel_pmap); 1755176771Sraj 1756176771Sraj //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1757176771Sraj // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1758176771Sraj 1759176771Sraj if (su) { 1760187151Sraj KASSERT(((va >= virtual_avail) && 1761187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1762187151Sraj ("mmu_booke_remove: kernel pmap, non kernel va")); 1763176771Sraj } else { 1764176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1765187151Sraj ("mmu_booke_remove: user pmap, non user va")); 1766176771Sraj } 1767176771Sraj 1768176771Sraj if (PMAP_REMOVE_DONE(pmap)) { 1769176771Sraj //debugf("mmu_booke_remove: e (empty)\n"); 1770176771Sraj return; 1771176771Sraj } 1772176771Sraj 1773176771Sraj hold_flag = PTBL_HOLD_FLAG(pmap); 1774176771Sraj //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1775176771Sraj 1776176771Sraj vm_page_lock_queues(); 1777176771Sraj PMAP_LOCK(pmap); 1778176771Sraj for (; va < endva; va += PAGE_SIZE) { 1779176771Sraj pte = pte_find(mmu, pmap, va); 1780187149Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 1781176771Sraj pte_remove(mmu, pmap, va, hold_flag); 1782176771Sraj } 1783176771Sraj PMAP_UNLOCK(pmap); 1784176771Sraj vm_page_unlock_queues(); 1785176771Sraj 1786176771Sraj //debugf("mmu_booke_remove: e\n"); 1787176771Sraj} 1788176771Sraj 1789176771Sraj/* 1790176771Sraj * Remove physical page from all pmaps in which it resides. 1791176771Sraj */ 1792176771Srajstatic void 1793176771Srajmmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1794176771Sraj{ 1795176771Sraj pv_entry_t pv, pvn; 1796187151Sraj uint8_t hold_flag; 1797176771Sraj 1798176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1799176771Sraj 1800176771Sraj for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1801176771Sraj pvn = TAILQ_NEXT(pv, pv_link); 1802176771Sraj 1803176771Sraj PMAP_LOCK(pv->pv_pmap); 1804176771Sraj hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1805176771Sraj pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1806176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1807176771Sraj } 1808176771Sraj vm_page_flag_clear(m, PG_WRITEABLE); 1809176771Sraj} 1810176771Sraj 1811176771Sraj/* 1812176771Sraj * Map a range of physical addresses into kernel virtual address space. 1813176771Sraj */ 1814176771Srajstatic vm_offset_t 1815176771Srajmmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1816176771Sraj vm_offset_t pa_end, int prot) 1817176771Sraj{ 1818176771Sraj vm_offset_t sva = *virt; 1819176771Sraj vm_offset_t va = sva; 1820176771Sraj 1821176771Sraj //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1822176771Sraj // sva, pa_start, pa_end); 1823176771Sraj 1824176771Sraj while (pa_start < pa_end) { 1825176771Sraj mmu_booke_kenter(mmu, va, pa_start); 1826176771Sraj va += PAGE_SIZE; 1827176771Sraj pa_start += PAGE_SIZE; 1828176771Sraj } 1829176771Sraj *virt = va; 1830176771Sraj 1831176771Sraj //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1832176771Sraj return (sva); 1833176771Sraj} 1834176771Sraj 1835176771Sraj/* 1836176771Sraj * The pmap must be activated before it's address space can be accessed in any 1837176771Sraj * way. 1838176771Sraj */ 1839176771Srajstatic void 1840176771Srajmmu_booke_activate(mmu_t mmu, struct thread *td) 1841176771Sraj{ 1842176771Sraj pmap_t pmap; 1843176771Sraj 1844176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1845176771Sraj 1846187149Sraj CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1847187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1848176771Sraj 1849176771Sraj KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1850176771Sraj 1851176771Sraj mtx_lock_spin(&sched_lock); 1852176771Sraj 1853187149Sraj atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1854176771Sraj PCPU_SET(curpmap, pmap); 1855187149Sraj 1856187149Sraj if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1857176771Sraj tid_alloc(pmap); 1858176771Sraj 1859176771Sraj /* Load PID0 register with pmap tid value. */ 1860187149Sraj mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1861187149Sraj __asm __volatile("isync"); 1862176771Sraj 1863176771Sraj mtx_unlock_spin(&sched_lock); 1864176771Sraj 1865187149Sraj CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1866187149Sraj pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1867176771Sraj} 1868176771Sraj 1869176771Sraj/* 1870176771Sraj * Deactivate the specified process's address space. 1871176771Sraj */ 1872176771Srajstatic void 1873176771Srajmmu_booke_deactivate(mmu_t mmu, struct thread *td) 1874176771Sraj{ 1875176771Sraj pmap_t pmap; 1876176771Sraj 1877176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1878187149Sraj 1879187149Sraj CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1880187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1881187149Sraj 1882187149Sraj atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1883176771Sraj PCPU_SET(curpmap, NULL); 1884176771Sraj} 1885176771Sraj 1886176771Sraj/* 1887176771Sraj * Copy the range specified by src_addr/len 1888176771Sraj * from the source map to the range dst_addr/len 1889176771Sraj * in the destination map. 1890176771Sraj * 1891176771Sraj * This routine is only advisory and need not do anything. 1892176771Sraj */ 1893176771Srajstatic void 1894194101Srajmmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1895194101Sraj vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1896176771Sraj{ 1897176771Sraj 1898176771Sraj} 1899176771Sraj 1900176771Sraj/* 1901176771Sraj * Set the physical protection on the specified range of this map as requested. 1902176771Sraj */ 1903176771Srajstatic void 1904176771Srajmmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1905176771Sraj vm_prot_t prot) 1906176771Sraj{ 1907176771Sraj vm_offset_t va; 1908176771Sraj vm_page_t m; 1909176771Sraj pte_t *pte; 1910176771Sraj 1911176771Sraj if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1912176771Sraj mmu_booke_remove(mmu, pmap, sva, eva); 1913176771Sraj return; 1914176771Sraj } 1915176771Sraj 1916176771Sraj if (prot & VM_PROT_WRITE) 1917176771Sraj return; 1918176771Sraj 1919176771Sraj vm_page_lock_queues(); 1920176771Sraj PMAP_LOCK(pmap); 1921176771Sraj for (va = sva; va < eva; va += PAGE_SIZE) { 1922176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1923176771Sraj if (PTE_ISVALID(pte)) { 1924176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1925176771Sraj 1926187149Sraj mtx_lock_spin(&tlbivax_mutex); 1927192532Sraj tlb_miss_lock(); 1928187149Sraj 1929176771Sraj /* Handle modified pages. */ 1930178626Smarcel if (PTE_ISMODIFIED(pte)) 1931178626Smarcel vm_page_dirty(m); 1932176771Sraj 1933176771Sraj /* Referenced pages. */ 1934176771Sraj if (PTE_ISREFERENCED(pte)) 1935176771Sraj vm_page_flag_set(m, PG_REFERENCED); 1936176771Sraj 1937187149Sraj tlb0_flush_entry(va); 1938176771Sraj pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1939176771Sraj PTE_REFERENCED); 1940187149Sraj 1941192532Sraj tlb_miss_unlock(); 1942187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1943176771Sraj } 1944176771Sraj } 1945176771Sraj } 1946176771Sraj PMAP_UNLOCK(pmap); 1947176771Sraj vm_page_unlock_queues(); 1948176771Sraj} 1949176771Sraj 1950176771Sraj/* 1951176771Sraj * Clear the write and modified bits in each of the given page's mappings. 1952176771Sraj */ 1953176771Srajstatic void 1954176771Srajmmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1955176771Sraj{ 1956176771Sraj pv_entry_t pv; 1957176771Sraj pte_t *pte; 1958176771Sraj 1959176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1960176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1961176771Sraj (m->flags & PG_WRITEABLE) == 0) 1962176771Sraj return; 1963176771Sraj 1964176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1965176771Sraj PMAP_LOCK(pv->pv_pmap); 1966176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1967176771Sraj if (PTE_ISVALID(pte)) { 1968176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1969176771Sraj 1970187149Sraj mtx_lock_spin(&tlbivax_mutex); 1971192532Sraj tlb_miss_lock(); 1972187149Sraj 1973176771Sraj /* Handle modified pages. */ 1974178626Smarcel if (PTE_ISMODIFIED(pte)) 1975178626Smarcel vm_page_dirty(m); 1976176771Sraj 1977176771Sraj /* Referenced pages. */ 1978176771Sraj if (PTE_ISREFERENCED(pte)) 1979176771Sraj vm_page_flag_set(m, PG_REFERENCED); 1980176771Sraj 1981176771Sraj /* Flush mapping from TLB0. */ 1982176771Sraj pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1983176771Sraj PTE_REFERENCED); 1984187149Sraj 1985192532Sraj tlb_miss_unlock(); 1986187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1987176771Sraj } 1988176771Sraj } 1989176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1990176771Sraj } 1991176771Sraj vm_page_flag_clear(m, PG_WRITEABLE); 1992176771Sraj} 1993176771Sraj 1994176771Srajstatic boolean_t 1995176771Srajmmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1996176771Sraj{ 1997176771Sraj pv_entry_t pv; 1998176771Sraj pte_t *pte; 1999176771Sraj boolean_t executable; 2000176771Sraj 2001176771Sraj executable = FALSE; 2002176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2003176771Sraj PMAP_LOCK(pv->pv_pmap); 2004176771Sraj pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 2005176771Sraj if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 2006176771Sraj executable = TRUE; 2007176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2008176771Sraj if (executable) 2009176771Sraj break; 2010176771Sraj } 2011176771Sraj 2012176771Sraj return (executable); 2013176771Sraj} 2014176771Sraj 2015176771Sraj/* 2016176771Sraj * Atomically extract and hold the physical page with the given 2017176771Sraj * pmap and virtual address pair if that mapping permits the given 2018176771Sraj * protection. 2019176771Sraj */ 2020176771Srajstatic vm_page_t 2021176771Srajmmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2022176771Sraj vm_prot_t prot) 2023176771Sraj{ 2024176771Sraj pte_t *pte; 2025176771Sraj vm_page_t m; 2026187151Sraj uint32_t pte_wbit; 2027176771Sraj 2028176771Sraj m = NULL; 2029176771Sraj vm_page_lock_queues(); 2030176771Sraj PMAP_LOCK(pmap); 2031187151Sraj 2032176771Sraj pte = pte_find(mmu, pmap, va); 2033176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) { 2034176771Sraj if (pmap == kernel_pmap) 2035176771Sraj pte_wbit = PTE_SW; 2036176771Sraj else 2037176771Sraj pte_wbit = PTE_UW; 2038176771Sraj 2039176771Sraj if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2040176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2041176771Sraj vm_page_hold(m); 2042176771Sraj } 2043176771Sraj } 2044176771Sraj 2045176771Sraj vm_page_unlock_queues(); 2046176771Sraj PMAP_UNLOCK(pmap); 2047176771Sraj return (m); 2048176771Sraj} 2049176771Sraj 2050176771Sraj/* 2051176771Sraj * Initialize a vm_page's machine-dependent fields. 2052176771Sraj */ 2053176771Srajstatic void 2054176771Srajmmu_booke_page_init(mmu_t mmu, vm_page_t m) 2055176771Sraj{ 2056176771Sraj 2057176771Sraj TAILQ_INIT(&m->md.pv_list); 2058176771Sraj} 2059176771Sraj 2060176771Sraj/* 2061176771Sraj * mmu_booke_zero_page_area zeros the specified hardware page by 2062176771Sraj * mapping it into virtual memory and using bzero to clear 2063176771Sraj * its contents. 2064176771Sraj * 2065176771Sraj * off and size must reside within a single page. 2066176771Sraj */ 2067176771Srajstatic void 2068176771Srajmmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2069176771Sraj{ 2070176771Sraj vm_offset_t va; 2071176771Sraj 2072187151Sraj /* XXX KASSERT off and size are within a single page? */ 2073176771Sraj 2074176771Sraj mtx_lock(&zero_page_mutex); 2075176771Sraj va = zero_page_va; 2076176771Sraj 2077176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2078176771Sraj bzero((caddr_t)va + off, size); 2079176771Sraj mmu_booke_kremove(mmu, va); 2080176771Sraj 2081176771Sraj mtx_unlock(&zero_page_mutex); 2082176771Sraj} 2083176771Sraj 2084176771Sraj/* 2085176771Sraj * mmu_booke_zero_page zeros the specified hardware page. 2086176771Sraj */ 2087176771Srajstatic void 2088176771Srajmmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2089176771Sraj{ 2090176771Sraj 2091176771Sraj mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2092176771Sraj} 2093176771Sraj 2094176771Sraj/* 2095176771Sraj * mmu_booke_copy_page copies the specified (machine independent) page by 2096176771Sraj * mapping the page into virtual memory and using memcopy to copy the page, 2097176771Sraj * one machine dependent page at a time. 2098176771Sraj */ 2099176771Srajstatic void 2100176771Srajmmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2101176771Sraj{ 2102176771Sraj vm_offset_t sva, dva; 2103176771Sraj 2104176771Sraj sva = copy_page_src_va; 2105176771Sraj dva = copy_page_dst_va; 2106176771Sraj 2107187149Sraj mtx_lock(©_page_mutex); 2108176771Sraj mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2109176771Sraj mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2110176771Sraj memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2111176771Sraj mmu_booke_kremove(mmu, dva); 2112176771Sraj mmu_booke_kremove(mmu, sva); 2113176771Sraj mtx_unlock(©_page_mutex); 2114176771Sraj} 2115176771Sraj 2116176771Sraj/* 2117176771Sraj * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2118176771Sraj * into virtual memory and using bzero to clear its contents. This is intended 2119176771Sraj * to be called from the vm_pagezero process only and outside of Giant. No 2120176771Sraj * lock is required. 2121176771Sraj */ 2122176771Srajstatic void 2123176771Srajmmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2124176771Sraj{ 2125176771Sraj vm_offset_t va; 2126176771Sraj 2127176771Sraj va = zero_page_idle_va; 2128176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2129176771Sraj bzero((caddr_t)va, PAGE_SIZE); 2130176771Sraj mmu_booke_kremove(mmu, va); 2131176771Sraj} 2132176771Sraj 2133176771Sraj/* 2134176771Sraj * Return whether or not the specified physical page was modified 2135176771Sraj * in any of physical maps. 2136176771Sraj */ 2137176771Srajstatic boolean_t 2138176771Srajmmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2139176771Sraj{ 2140176771Sraj pte_t *pte; 2141176771Sraj pv_entry_t pv; 2142176771Sraj 2143176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2144176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2145176771Sraj return (FALSE); 2146176771Sraj 2147176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2148176771Sraj PMAP_LOCK(pv->pv_pmap); 2149176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2150176771Sraj if (!PTE_ISVALID(pte)) 2151176771Sraj goto make_sure_to_unlock; 2152176771Sraj 2153176771Sraj if (PTE_ISMODIFIED(pte)) { 2154176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2155176771Sraj return (TRUE); 2156176771Sraj } 2157176771Sraj } 2158176771Srajmake_sure_to_unlock: 2159176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2160176771Sraj } 2161176771Sraj return (FALSE); 2162176771Sraj} 2163176771Sraj 2164176771Sraj/* 2165187151Sraj * Return whether or not the specified virtual address is eligible 2166176771Sraj * for prefault. 2167176771Sraj */ 2168176771Srajstatic boolean_t 2169176771Srajmmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2170176771Sraj{ 2171176771Sraj 2172176771Sraj return (FALSE); 2173176771Sraj} 2174176771Sraj 2175176771Sraj/* 2176176771Sraj * Clear the modify bits on the specified physical page. 2177176771Sraj */ 2178176771Srajstatic void 2179176771Srajmmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2180176771Sraj{ 2181176771Sraj pte_t *pte; 2182176771Sraj pv_entry_t pv; 2183176771Sraj 2184176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2185176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2186176771Sraj return; 2187176771Sraj 2188176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2189176771Sraj PMAP_LOCK(pv->pv_pmap); 2190176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2191176771Sraj if (!PTE_ISVALID(pte)) 2192176771Sraj goto make_sure_to_unlock; 2193176771Sraj 2194187149Sraj mtx_lock_spin(&tlbivax_mutex); 2195192532Sraj tlb_miss_lock(); 2196187149Sraj 2197176771Sraj if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2198187149Sraj tlb0_flush_entry(pv->pv_va); 2199176771Sraj pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2200176771Sraj PTE_REFERENCED); 2201176771Sraj } 2202187149Sraj 2203192532Sraj tlb_miss_unlock(); 2204187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2205176771Sraj } 2206176771Srajmake_sure_to_unlock: 2207176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2208176771Sraj } 2209176771Sraj} 2210176771Sraj 2211176771Sraj/* 2212176771Sraj * Return a count of reference bits for a page, clearing those bits. 2213176771Sraj * It is not necessary for every reference bit to be cleared, but it 2214176771Sraj * is necessary that 0 only be returned when there are truly no 2215176771Sraj * reference bits set. 2216176771Sraj * 2217176771Sraj * XXX: The exact number of bits to check and clear is a matter that 2218176771Sraj * should be tested and standardized at some point in the future for 2219176771Sraj * optimal aging of shared pages. 2220176771Sraj */ 2221176771Srajstatic int 2222176771Srajmmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2223176771Sraj{ 2224176771Sraj pte_t *pte; 2225176771Sraj pv_entry_t pv; 2226176771Sraj int count; 2227176771Sraj 2228176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2229176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2230176771Sraj return (0); 2231176771Sraj 2232176771Sraj count = 0; 2233176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2234176771Sraj PMAP_LOCK(pv->pv_pmap); 2235176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2236176771Sraj if (!PTE_ISVALID(pte)) 2237176771Sraj goto make_sure_to_unlock; 2238176771Sraj 2239176771Sraj if (PTE_ISREFERENCED(pte)) { 2240187149Sraj mtx_lock_spin(&tlbivax_mutex); 2241192532Sraj tlb_miss_lock(); 2242187149Sraj 2243187149Sraj tlb0_flush_entry(pv->pv_va); 2244176771Sraj pte->flags &= ~PTE_REFERENCED; 2245176771Sraj 2246192532Sraj tlb_miss_unlock(); 2247187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2248187149Sraj 2249176771Sraj if (++count > 4) { 2250176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2251176771Sraj break; 2252176771Sraj } 2253176771Sraj } 2254176771Sraj } 2255176771Srajmake_sure_to_unlock: 2256176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2257176771Sraj } 2258176771Sraj return (count); 2259176771Sraj} 2260176771Sraj 2261176771Sraj/* 2262176771Sraj * Clear the reference bit on the specified physical page. 2263176771Sraj */ 2264176771Srajstatic void 2265176771Srajmmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2266176771Sraj{ 2267176771Sraj pte_t *pte; 2268176771Sraj pv_entry_t pv; 2269176771Sraj 2270176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2271176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2272176771Sraj return; 2273176771Sraj 2274176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2275176771Sraj PMAP_LOCK(pv->pv_pmap); 2276176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2277176771Sraj if (!PTE_ISVALID(pte)) 2278176771Sraj goto make_sure_to_unlock; 2279176771Sraj 2280176771Sraj if (PTE_ISREFERENCED(pte)) { 2281187149Sraj mtx_lock_spin(&tlbivax_mutex); 2282192532Sraj tlb_miss_lock(); 2283187149Sraj 2284187149Sraj tlb0_flush_entry(pv->pv_va); 2285176771Sraj pte->flags &= ~PTE_REFERENCED; 2286187149Sraj 2287192532Sraj tlb_miss_unlock(); 2288187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2289176771Sraj } 2290176771Sraj } 2291176771Srajmake_sure_to_unlock: 2292176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2293176771Sraj } 2294176771Sraj} 2295176771Sraj 2296176771Sraj/* 2297176771Sraj * Change wiring attribute for a map/virtual-address pair. 2298176771Sraj */ 2299176771Srajstatic void 2300176771Srajmmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2301176771Sraj{ 2302176771Sraj pte_t *pte;; 2303176771Sraj 2304176771Sraj PMAP_LOCK(pmap); 2305176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2306176771Sraj if (wired) { 2307176771Sraj if (!PTE_ISWIRED(pte)) { 2308176771Sraj pte->flags |= PTE_WIRED; 2309176771Sraj pmap->pm_stats.wired_count++; 2310176771Sraj } 2311176771Sraj } else { 2312176771Sraj if (PTE_ISWIRED(pte)) { 2313176771Sraj pte->flags &= ~PTE_WIRED; 2314176771Sraj pmap->pm_stats.wired_count--; 2315176771Sraj } 2316176771Sraj } 2317176771Sraj } 2318176771Sraj PMAP_UNLOCK(pmap); 2319176771Sraj} 2320176771Sraj 2321176771Sraj/* 2322176771Sraj * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2323176771Sraj * page. This count may be changed upwards or downwards in the future; it is 2324176771Sraj * only necessary that true be returned for a small subset of pmaps for proper 2325176771Sraj * page aging. 2326176771Sraj */ 2327176771Srajstatic boolean_t 2328176771Srajmmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2329176771Sraj{ 2330176771Sraj pv_entry_t pv; 2331176771Sraj int loops; 2332176771Sraj 2333176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2334176771Sraj if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2335176771Sraj return (FALSE); 2336176771Sraj 2337176771Sraj loops = 0; 2338176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2339176771Sraj if (pv->pv_pmap == pmap) 2340176771Sraj return (TRUE); 2341176771Sraj 2342176771Sraj if (++loops >= 16) 2343176771Sraj break; 2344176771Sraj } 2345176771Sraj return (FALSE); 2346176771Sraj} 2347176771Sraj 2348176771Sraj/* 2349176771Sraj * Return the number of managed mappings to the given physical page that are 2350176771Sraj * wired. 2351176771Sraj */ 2352176771Srajstatic int 2353176771Srajmmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2354176771Sraj{ 2355176771Sraj pv_entry_t pv; 2356176771Sraj pte_t *pte; 2357176771Sraj int count = 0; 2358176771Sraj 2359176771Sraj if ((m->flags & PG_FICTITIOUS) != 0) 2360176771Sraj return (count); 2361176771Sraj mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2362176771Sraj 2363176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2364176771Sraj PMAP_LOCK(pv->pv_pmap); 2365176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2366176771Sraj if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2367176771Sraj count++; 2368176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2369176771Sraj } 2370176771Sraj 2371176771Sraj return (count); 2372176771Sraj} 2373176771Sraj 2374176771Srajstatic int 2375176771Srajmmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2376176771Sraj{ 2377176771Sraj int i; 2378176771Sraj vm_offset_t va; 2379176771Sraj 2380176771Sraj /* 2381176771Sraj * This currently does not work for entries that 2382176771Sraj * overlap TLB1 entries. 2383176771Sraj */ 2384176771Sraj for (i = 0; i < tlb1_idx; i ++) { 2385176771Sraj if (tlb1_iomapped(i, pa, size, &va) == 0) 2386176771Sraj return (0); 2387176771Sraj } 2388176771Sraj 2389176771Sraj return (EFAULT); 2390176771Sraj} 2391176771Sraj 2392190701Smarcelvm_offset_t 2393190701Smarcelmmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2394190701Smarcel vm_size_t *sz) 2395190701Smarcel{ 2396190701Smarcel vm_paddr_t pa, ppa; 2397190701Smarcel vm_offset_t va; 2398190701Smarcel vm_size_t gran; 2399190701Smarcel 2400190701Smarcel /* Raw physical memory dumps don't have a virtual address. */ 2401190701Smarcel if (md->md_vaddr == ~0UL) { 2402190701Smarcel /* We always map a 256MB page at 256M. */ 2403190701Smarcel gran = 256 * 1024 * 1024; 2404190701Smarcel pa = md->md_paddr + ofs; 2405190701Smarcel ppa = pa & ~(gran - 1); 2406190701Smarcel ofs = pa - ppa; 2407190701Smarcel va = gran; 2408190701Smarcel tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2409190701Smarcel if (*sz > (gran - ofs)) 2410190701Smarcel *sz = gran - ofs; 2411190701Smarcel return (va + ofs); 2412190701Smarcel } 2413190701Smarcel 2414190701Smarcel /* Minidumps are based on virtual memory addresses. */ 2415190701Smarcel va = md->md_vaddr + ofs; 2416190701Smarcel if (va >= kernstart + kernsize) { 2417190701Smarcel gran = PAGE_SIZE - (va & PAGE_MASK); 2418190701Smarcel if (*sz > gran) 2419190701Smarcel *sz = gran; 2420190701Smarcel } 2421190701Smarcel return (va); 2422190701Smarcel} 2423190701Smarcel 2424190701Smarcelvoid 2425190701Smarcelmmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2426190701Smarcel vm_offset_t va) 2427190701Smarcel{ 2428190701Smarcel 2429190701Smarcel /* Raw physical memory dumps don't have a virtual address. */ 2430190701Smarcel if (md->md_vaddr == ~0UL) { 2431190701Smarcel tlb1_idx--; 2432190701Smarcel tlb1[tlb1_idx].mas1 = 0; 2433190701Smarcel tlb1[tlb1_idx].mas2 = 0; 2434190701Smarcel tlb1[tlb1_idx].mas3 = 0; 2435190701Smarcel tlb1_write_entry(tlb1_idx); 2436190701Smarcel return; 2437190701Smarcel } 2438190701Smarcel 2439190701Smarcel /* Minidumps are based on virtual memory addresses. */ 2440190701Smarcel /* Nothing to do... */ 2441190701Smarcel} 2442190701Smarcel 2443190701Smarcelstruct pmap_md * 2444190701Smarcelmmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2445190701Smarcel{ 2446190701Smarcel static struct pmap_md md; 2447190701Smarcel struct bi_mem_region *mr; 2448190701Smarcel pte_t *pte; 2449190701Smarcel vm_offset_t va; 2450190701Smarcel 2451190701Smarcel if (dumpsys_minidump) { 2452190701Smarcel md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2453190701Smarcel if (prev == NULL) { 2454190701Smarcel /* 1st: kernel .data and .bss. */ 2455190701Smarcel md.md_index = 1; 2456190701Smarcel md.md_vaddr = trunc_page((uintptr_t)_etext); 2457190701Smarcel md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2458190701Smarcel return (&md); 2459190701Smarcel } 2460190701Smarcel switch (prev->md_index) { 2461190701Smarcel case 1: 2462190701Smarcel /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2463190701Smarcel md.md_index = 2; 2464190701Smarcel md.md_vaddr = data_start; 2465190701Smarcel md.md_size = data_end - data_start; 2466190701Smarcel break; 2467190701Smarcel case 2: 2468190701Smarcel /* 3rd: kernel VM. */ 2469190701Smarcel va = prev->md_vaddr + prev->md_size; 2470190701Smarcel /* Find start of next chunk (from va). */ 2471190701Smarcel while (va < virtual_end) { 2472190701Smarcel /* Don't dump the buffer cache. */ 2473190701Smarcel if (va >= kmi.buffer_sva && 2474190701Smarcel va < kmi.buffer_eva) { 2475190701Smarcel va = kmi.buffer_eva; 2476190701Smarcel continue; 2477190701Smarcel } 2478190701Smarcel pte = pte_find(mmu, kernel_pmap, va); 2479190701Smarcel if (pte != NULL && PTE_ISVALID(pte)) 2480190701Smarcel break; 2481190701Smarcel va += PAGE_SIZE; 2482190701Smarcel } 2483190701Smarcel if (va < virtual_end) { 2484190701Smarcel md.md_vaddr = va; 2485190701Smarcel va += PAGE_SIZE; 2486190701Smarcel /* Find last page in chunk. */ 2487190701Smarcel while (va < virtual_end) { 2488190701Smarcel /* Don't run into the buffer cache. */ 2489190701Smarcel if (va == kmi.buffer_sva) 2490190701Smarcel break; 2491190701Smarcel pte = pte_find(mmu, kernel_pmap, va); 2492190701Smarcel if (pte == NULL || !PTE_ISVALID(pte)) 2493190701Smarcel break; 2494190701Smarcel va += PAGE_SIZE; 2495190701Smarcel } 2496190701Smarcel md.md_size = va - md.md_vaddr; 2497190701Smarcel break; 2498190701Smarcel } 2499190701Smarcel md.md_index = 3; 2500190701Smarcel /* FALLTHROUGH */ 2501190701Smarcel default: 2502190701Smarcel return (NULL); 2503190701Smarcel } 2504190701Smarcel } else { /* minidumps */ 2505190701Smarcel mr = bootinfo_mr(); 2506190701Smarcel if (prev == NULL) { 2507190701Smarcel /* first physical chunk. */ 2508190701Smarcel md.md_paddr = mr->mem_base; 2509190701Smarcel md.md_size = mr->mem_size; 2510190701Smarcel md.md_vaddr = ~0UL; 2511190701Smarcel md.md_index = 1; 2512190701Smarcel } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2513190701Smarcel md.md_paddr = mr[md.md_index].mem_base; 2514190701Smarcel md.md_size = mr[md.md_index].mem_size; 2515190701Smarcel md.md_vaddr = ~0UL; 2516190701Smarcel md.md_index++; 2517190701Smarcel } else { 2518190701Smarcel /* There's no next physical chunk. */ 2519190701Smarcel return (NULL); 2520190701Smarcel } 2521190701Smarcel } 2522190701Smarcel 2523190701Smarcel return (&md); 2524190701Smarcel} 2525190701Smarcel 2526176771Sraj/* 2527176771Sraj * Map a set of physical memory pages into the kernel virtual address space. 2528176771Sraj * Return a pointer to where it is mapped. This routine is intended to be used 2529176771Sraj * for mapping device memory, NOT real memory. 2530176771Sraj */ 2531176771Srajstatic void * 2532176771Srajmmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2533176771Sraj{ 2534184244Smarcel void *res; 2535176771Sraj uintptr_t va; 2536184244Smarcel vm_size_t sz; 2537176771Sraj 2538176771Sraj va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2539184244Smarcel res = (void *)va; 2540184244Smarcel 2541184244Smarcel do { 2542184244Smarcel sz = 1 << (ilog2(size) & ~1); 2543184244Smarcel if (bootverbose) 2544184244Smarcel printf("Wiring VA=%x to PA=%x (size=%x), " 2545184244Smarcel "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2546184244Smarcel tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2547184244Smarcel size -= sz; 2548184244Smarcel pa += sz; 2549184244Smarcel va += sz; 2550184244Smarcel } while (size > 0); 2551184244Smarcel 2552184244Smarcel return (res); 2553176771Sraj} 2554176771Sraj 2555176771Sraj/* 2556176771Sraj * 'Unmap' a range mapped by mmu_booke_mapdev(). 2557176771Sraj */ 2558176771Srajstatic void 2559176771Srajmmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2560176771Sraj{ 2561176771Sraj vm_offset_t base, offset; 2562176771Sraj 2563176771Sraj /* 2564176771Sraj * Unmap only if this is inside kernel virtual space. 2565176771Sraj */ 2566176771Sraj if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2567176771Sraj base = trunc_page(va); 2568176771Sraj offset = va & PAGE_MASK; 2569176771Sraj size = roundup(offset + size, PAGE_SIZE); 2570176771Sraj kmem_free(kernel_map, base, size); 2571176771Sraj } 2572176771Sraj} 2573176771Sraj 2574176771Sraj/* 2575187151Sraj * mmu_booke_object_init_pt preloads the ptes for a given object into the 2576187151Sraj * specified pmap. This eliminates the blast of soft faults on process startup 2577187151Sraj * and immediately after an mmap. 2578176771Sraj */ 2579176771Srajstatic void 2580176771Srajmmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2581176771Sraj vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2582176771Sraj{ 2583187151Sraj 2584176771Sraj VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2585176771Sraj KASSERT(object->type == OBJT_DEVICE, 2586176771Sraj ("mmu_booke_object_init_pt: non-device object")); 2587176771Sraj} 2588176771Sraj 2589176771Sraj/* 2590176771Sraj * Perform the pmap work for mincore. 2591176771Sraj */ 2592176771Srajstatic int 2593176771Srajmmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2594176771Sraj{ 2595176771Sraj 2596176771Sraj TODO; 2597176771Sraj return (0); 2598176771Sraj} 2599176771Sraj 2600176771Sraj/**************************************************************************/ 2601176771Sraj/* TID handling */ 2602176771Sraj/**************************************************************************/ 2603176771Sraj 2604176771Sraj/* 2605176771Sraj * Allocate a TID. If necessary, steal one from someone else. 2606176771Sraj * The new TID is flushed from the TLB before returning. 2607176771Sraj */ 2608176771Srajstatic tlbtid_t 2609176771Srajtid_alloc(pmap_t pmap) 2610176771Sraj{ 2611176771Sraj tlbtid_t tid; 2612187149Sraj int thiscpu; 2613176771Sraj 2614187149Sraj KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2615176771Sraj 2616187149Sraj CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2617176771Sraj 2618187149Sraj thiscpu = PCPU_GET(cpuid); 2619176771Sraj 2620187149Sraj tid = PCPU_GET(tid_next); 2621187149Sraj if (tid > TID_MAX) 2622187149Sraj tid = TID_MIN; 2623187149Sraj PCPU_SET(tid_next, tid + 1); 2624176771Sraj 2625187149Sraj /* If we are stealing TID then clear the relevant pmap's field */ 2626187149Sraj if (tidbusy[thiscpu][tid] != NULL) { 2627176771Sraj 2628187149Sraj CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2629187149Sraj 2630187149Sraj tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2631176771Sraj 2632187149Sraj /* Flush all entries from TLB0 matching this TID. */ 2633187149Sraj tid_flush(tid); 2634176771Sraj } 2635176771Sraj 2636187149Sraj tidbusy[thiscpu][tid] = pmap; 2637187149Sraj pmap->pm_tid[thiscpu] = tid; 2638187149Sraj __asm __volatile("msync; isync"); 2639176771Sraj 2640187149Sraj CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2641187149Sraj PCPU_GET(tid_next)); 2642176771Sraj 2643176771Sraj return (tid); 2644176771Sraj} 2645176771Sraj 2646176771Sraj/**************************************************************************/ 2647176771Sraj/* TLB0 handling */ 2648176771Sraj/**************************************************************************/ 2649176771Sraj 2650176771Srajstatic void 2651187149Srajtlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2652187149Sraj uint32_t mas7) 2653176771Sraj{ 2654176771Sraj int as; 2655176771Sraj char desc[3]; 2656176771Sraj tlbtid_t tid; 2657176771Sraj vm_size_t size; 2658176771Sraj unsigned int tsize; 2659176771Sraj 2660176771Sraj desc[2] = '\0'; 2661176771Sraj if (mas1 & MAS1_VALID) 2662176771Sraj desc[0] = 'V'; 2663176771Sraj else 2664176771Sraj desc[0] = ' '; 2665176771Sraj 2666176771Sraj if (mas1 & MAS1_IPROT) 2667176771Sraj desc[1] = 'P'; 2668176771Sraj else 2669176771Sraj desc[1] = ' '; 2670176771Sraj 2671187149Sraj as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2672176771Sraj tid = MAS1_GETTID(mas1); 2673176771Sraj 2674176771Sraj tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2675176771Sraj size = 0; 2676176771Sraj if (tsize) 2677176771Sraj size = tsize2size(tsize); 2678176771Sraj 2679176771Sraj debugf("%3d: (%s) [AS=%d] " 2680176771Sraj "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2681176771Sraj "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2682176771Sraj i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2683176771Sraj} 2684176771Sraj 2685176771Sraj/* Convert TLB0 va and way number to tlb0[] table index. */ 2686176771Srajstatic inline unsigned int 2687176771Srajtlb0_tableidx(vm_offset_t va, unsigned int way) 2688176771Sraj{ 2689176771Sraj unsigned int idx; 2690176771Sraj 2691176771Sraj idx = (way * TLB0_ENTRIES_PER_WAY); 2692176771Sraj idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2693176771Sraj return (idx); 2694176771Sraj} 2695176771Sraj 2696176771Sraj/* 2697187149Sraj * Invalidate TLB0 entry. 2698176771Sraj */ 2699187149Srajstatic inline void 2700187149Srajtlb0_flush_entry(vm_offset_t va) 2701176771Sraj{ 2702176771Sraj 2703187149Sraj CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2704176771Sraj 2705187149Sraj mtx_assert(&tlbivax_mutex, MA_OWNED); 2706176771Sraj 2707187149Sraj __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2708187149Sraj __asm __volatile("isync; msync"); 2709187149Sraj __asm __volatile("tlbsync; msync"); 2710176771Sraj 2711187149Sraj CTR1(KTR_PMAP, "%s: e", __func__); 2712176771Sraj} 2713176771Sraj 2714176771Sraj/* Print out contents of the MAS registers for each TLB0 entry */ 2715187149Srajvoid 2716176771Srajtlb0_print_tlbentries(void) 2717176771Sraj{ 2718187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 2719176771Sraj int entryidx, way, idx; 2720176771Sraj 2721176771Sraj debugf("TLB0 entries:\n"); 2722187149Sraj for (way = 0; way < TLB0_WAYS; way ++) 2723176771Sraj for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2724176771Sraj 2725176771Sraj mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2726176771Sraj mtspr(SPR_MAS0, mas0); 2727187149Sraj __asm __volatile("isync"); 2728176771Sraj 2729176771Sraj mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2730176771Sraj mtspr(SPR_MAS2, mas2); 2731176771Sraj 2732187149Sraj __asm __volatile("isync; tlbre"); 2733176771Sraj 2734176771Sraj mas1 = mfspr(SPR_MAS1); 2735176771Sraj mas2 = mfspr(SPR_MAS2); 2736176771Sraj mas3 = mfspr(SPR_MAS3); 2737176771Sraj mas7 = mfspr(SPR_MAS7); 2738176771Sraj 2739176771Sraj idx = tlb0_tableidx(mas2, way); 2740176771Sraj tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2741176771Sraj } 2742176771Sraj} 2743176771Sraj 2744176771Sraj/**************************************************************************/ 2745176771Sraj/* TLB1 handling */ 2746176771Sraj/**************************************************************************/ 2747187149Sraj 2748176771Sraj/* 2749187149Sraj * TLB1 mapping notes: 2750187149Sraj * 2751187149Sraj * TLB1[0] CCSRBAR 2752187149Sraj * TLB1[1] Kernel text and data. 2753187149Sraj * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2754187149Sraj * windows, other devices mappings. 2755187149Sraj */ 2756187149Sraj 2757187149Sraj/* 2758176771Sraj * Write given entry to TLB1 hardware. 2759176771Sraj * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2760176771Sraj */ 2761176771Srajstatic void 2762176771Srajtlb1_write_entry(unsigned int idx) 2763176771Sraj{ 2764187151Sraj uint32_t mas0, mas7; 2765176771Sraj 2766176771Sraj //debugf("tlb1_write_entry: s\n"); 2767176771Sraj 2768176771Sraj /* Clear high order RPN bits */ 2769176771Sraj mas7 = 0; 2770176771Sraj 2771176771Sraj /* Select entry */ 2772176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2773176771Sraj //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2774176771Sraj 2775176771Sraj mtspr(SPR_MAS0, mas0); 2776187151Sraj __asm __volatile("isync"); 2777176771Sraj mtspr(SPR_MAS1, tlb1[idx].mas1); 2778187151Sraj __asm __volatile("isync"); 2779176771Sraj mtspr(SPR_MAS2, tlb1[idx].mas2); 2780187151Sraj __asm __volatile("isync"); 2781176771Sraj mtspr(SPR_MAS3, tlb1[idx].mas3); 2782187151Sraj __asm __volatile("isync"); 2783176771Sraj mtspr(SPR_MAS7, mas7); 2784187151Sraj __asm __volatile("isync; tlbwe; isync; msync"); 2785176771Sraj 2786176771Sraj //debugf("tlb1_write_entry: e\n");; 2787176771Sraj} 2788176771Sraj 2789176771Sraj/* 2790176771Sraj * Return the largest uint value log such that 2^log <= num. 2791176771Sraj */ 2792176771Srajstatic unsigned int 2793176771Srajilog2(unsigned int num) 2794176771Sraj{ 2795176771Sraj int lz; 2796176771Sraj 2797176771Sraj __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2798176771Sraj return (31 - lz); 2799176771Sraj} 2800176771Sraj 2801176771Sraj/* 2802176771Sraj * Convert TLB TSIZE value to mapped region size. 2803176771Sraj */ 2804176771Srajstatic vm_size_t 2805176771Srajtsize2size(unsigned int tsize) 2806176771Sraj{ 2807176771Sraj 2808176771Sraj /* 2809176771Sraj * size = 4^tsize KB 2810176771Sraj * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2811176771Sraj */ 2812176771Sraj 2813176771Sraj return ((1 << (2 * tsize)) * 1024); 2814176771Sraj} 2815176771Sraj 2816176771Sraj/* 2817176771Sraj * Convert region size (must be power of 4) to TLB TSIZE value. 2818176771Sraj */ 2819176771Srajstatic unsigned int 2820176771Srajsize2tsize(vm_size_t size) 2821176771Sraj{ 2822176771Sraj 2823176771Sraj return (ilog2(size) / 2 - 5); 2824176771Sraj} 2825176771Sraj 2826176771Sraj/* 2827187149Sraj * Register permanent kernel mapping in TLB1. 2828176771Sraj * 2829187149Sraj * Entries are created starting from index 0 (current free entry is 2830187149Sraj * kept in tlb1_idx) and are not supposed to be invalidated. 2831176771Sraj */ 2832187149Srajstatic int 2833187149Srajtlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2834187149Sraj uint32_t flags) 2835176771Sraj{ 2836187149Sraj uint32_t ts, tid; 2837176771Sraj int tsize; 2838187149Sraj 2839187149Sraj if (tlb1_idx >= TLB1_ENTRIES) { 2840187149Sraj printf("tlb1_set_entry: TLB1 full!\n"); 2841187149Sraj return (-1); 2842187149Sraj } 2843176771Sraj 2844176771Sraj /* Convert size to TSIZE */ 2845176771Sraj tsize = size2tsize(size); 2846176771Sraj 2847187149Sraj tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2848187149Sraj /* XXX TS is hard coded to 0 for now as we only use single address space */ 2849187149Sraj ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2850176771Sraj 2851187149Sraj /* XXX LOCK tlb1[] */ 2852176771Sraj 2853187149Sraj tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2854187149Sraj tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2855187149Sraj tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2856176771Sraj 2857187149Sraj /* Set supervisor RWX permission bits */ 2858187149Sraj tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2859176771Sraj 2860187149Sraj tlb1_write_entry(tlb1_idx++); 2861176771Sraj 2862187149Sraj /* XXX UNLOCK tlb1[] */ 2863176771Sraj 2864187149Sraj /* 2865187149Sraj * XXX in general TLB1 updates should be propagated between CPUs, 2866187149Sraj * since current design assumes to have the same TLB1 set-up on all 2867187149Sraj * cores. 2868187149Sraj */ 2869176771Sraj return (0); 2870176771Sraj} 2871176771Sraj 2872176771Srajstatic int 2873176771Srajtlb1_entry_size_cmp(const void *a, const void *b) 2874176771Sraj{ 2875176771Sraj const vm_size_t *sza; 2876176771Sraj const vm_size_t *szb; 2877176771Sraj 2878176771Sraj sza = a; 2879176771Sraj szb = b; 2880176771Sraj if (*sza > *szb) 2881176771Sraj return (-1); 2882176771Sraj else if (*sza < *szb) 2883176771Sraj return (1); 2884176771Sraj else 2885176771Sraj return (0); 2886176771Sraj} 2887176771Sraj 2888176771Sraj/* 2889187151Sraj * Map in contiguous RAM region into the TLB1 using maximum of 2890176771Sraj * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2891176771Sraj * 2892187151Sraj * If necessary round up last entry size and return total size 2893176771Sraj * used by all allocated entries. 2894176771Sraj */ 2895176771Srajvm_size_t 2896176771Srajtlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2897176771Sraj{ 2898176771Sraj vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2899176771Sraj vm_size_t mapped_size, sz, esz; 2900176771Sraj unsigned int log; 2901176771Sraj int i; 2902176771Sraj 2903187151Sraj CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2904187151Sraj __func__, size, va, pa); 2905176771Sraj 2906176771Sraj mapped_size = 0; 2907176771Sraj sz = size; 2908176771Sraj memset(entry_size, 0, sizeof(entry_size)); 2909176771Sraj 2910176771Sraj /* Calculate entry sizes. */ 2911176771Sraj for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2912176771Sraj 2913176771Sraj /* Largest region that is power of 4 and fits within size */ 2914187149Sraj log = ilog2(sz) / 2; 2915176771Sraj esz = 1 << (2 * log); 2916176771Sraj 2917176771Sraj /* If this is last entry cover remaining size. */ 2918176771Sraj if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2919176771Sraj while (esz < sz) 2920176771Sraj esz = esz << 2; 2921176771Sraj } 2922176771Sraj 2923176771Sraj entry_size[i] = esz; 2924176771Sraj mapped_size += esz; 2925176771Sraj if (esz < sz) 2926176771Sraj sz -= esz; 2927176771Sraj else 2928176771Sraj sz = 0; 2929176771Sraj } 2930176771Sraj 2931176771Sraj /* Sort entry sizes, required to get proper entry address alignment. */ 2932176771Sraj qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2933176771Sraj sizeof(vm_size_t), tlb1_entry_size_cmp); 2934176771Sraj 2935176771Sraj /* Load TLB1 entries. */ 2936176771Sraj for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2937176771Sraj esz = entry_size[i]; 2938176771Sraj if (!esz) 2939176771Sraj break; 2940187151Sraj 2941187151Sraj CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2942187151Sraj "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2943187151Sraj 2944176771Sraj tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2945176771Sraj 2946176771Sraj va += esz; 2947176771Sraj pa += esz; 2948176771Sraj } 2949176771Sraj 2950187151Sraj CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2951187151Sraj __func__, mapped_size, mapped_size - size); 2952176771Sraj 2953176771Sraj return (mapped_size); 2954176771Sraj} 2955176771Sraj 2956176771Sraj/* 2957176771Sraj * TLB1 initialization routine, to be called after the very first 2958176771Sraj * assembler level setup done in locore.S. 2959176771Sraj */ 2960176771Srajvoid 2961176771Srajtlb1_init(vm_offset_t ccsrbar) 2962176771Sraj{ 2963176771Sraj uint32_t mas0; 2964176771Sraj 2965187151Sraj /* TLB1[1] is used to map the kernel. Save that entry. */ 2966176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2967176771Sraj mtspr(SPR_MAS0, mas0); 2968176771Sraj __asm __volatile("isync; tlbre"); 2969176771Sraj 2970176771Sraj tlb1[1].mas1 = mfspr(SPR_MAS1); 2971176771Sraj tlb1[1].mas2 = mfspr(SPR_MAS2); 2972176771Sraj tlb1[1].mas3 = mfspr(SPR_MAS3); 2973176771Sraj 2974187149Sraj /* Map in CCSRBAR in TLB1[0] */ 2975187149Sraj tlb1_idx = 0; 2976187149Sraj tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2977187149Sraj /* 2978187149Sraj * Set the next available TLB1 entry index. Note TLB[1] is reserved 2979187149Sraj * for initial mapping of kernel text+data, which was set early in 2980187149Sraj * locore, we need to skip this [busy] entry. 2981187149Sraj */ 2982187149Sraj tlb1_idx = 2; 2983176771Sraj 2984176771Sraj /* Setup TLB miss defaults */ 2985176771Sraj set_mas4_defaults(); 2986176771Sraj} 2987176771Sraj 2988176771Sraj/* 2989176771Sraj * Setup MAS4 defaults. 2990176771Sraj * These values are loaded to MAS0-2 on a TLB miss. 2991176771Sraj */ 2992176771Srajstatic void 2993176771Srajset_mas4_defaults(void) 2994176771Sraj{ 2995187151Sraj uint32_t mas4; 2996176771Sraj 2997176771Sraj /* Defaults: TLB0, PID0, TSIZED=4K */ 2998176771Sraj mas4 = MAS4_TLBSELD0; 2999176771Sraj mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3000192532Sraj#ifdef SMP 3001192532Sraj mas4 |= MAS4_MD; 3002192532Sraj#endif 3003176771Sraj mtspr(SPR_MAS4, mas4); 3004187151Sraj __asm __volatile("isync"); 3005176771Sraj} 3006176771Sraj 3007176771Sraj/* 3008176771Sraj * Print out contents of the MAS registers for each TLB1 entry 3009176771Sraj */ 3010176771Srajvoid 3011176771Srajtlb1_print_tlbentries(void) 3012176771Sraj{ 3013187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 3014176771Sraj int i; 3015176771Sraj 3016176771Sraj debugf("TLB1 entries:\n"); 3017187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) { 3018176771Sraj 3019176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3020176771Sraj mtspr(SPR_MAS0, mas0); 3021176771Sraj 3022187149Sraj __asm __volatile("isync; tlbre"); 3023176771Sraj 3024176771Sraj mas1 = mfspr(SPR_MAS1); 3025176771Sraj mas2 = mfspr(SPR_MAS2); 3026176771Sraj mas3 = mfspr(SPR_MAS3); 3027176771Sraj mas7 = mfspr(SPR_MAS7); 3028176771Sraj 3029176771Sraj tlb_print_entry(i, mas1, mas2, mas3, mas7); 3030176771Sraj } 3031176771Sraj} 3032176771Sraj 3033176771Sraj/* 3034176771Sraj * Print out contents of the in-ram tlb1 table. 3035176771Sraj */ 3036176771Srajvoid 3037176771Srajtlb1_print_entries(void) 3038176771Sraj{ 3039176771Sraj int i; 3040176771Sraj 3041176771Sraj debugf("tlb1[] table entries:\n"); 3042187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) 3043176771Sraj tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3044176771Sraj} 3045176771Sraj 3046176771Sraj/* 3047176771Sraj * Return 0 if the physical IO range is encompassed by one of the 3048176771Sraj * the TLB1 entries, otherwise return related error code. 3049176771Sraj */ 3050176771Srajstatic int 3051176771Srajtlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3052176771Sraj{ 3053187151Sraj uint32_t prot; 3054176771Sraj vm_paddr_t pa_start; 3055176771Sraj vm_paddr_t pa_end; 3056176771Sraj unsigned int entry_tsize; 3057176771Sraj vm_size_t entry_size; 3058176771Sraj 3059176771Sraj *va = (vm_offset_t)NULL; 3060176771Sraj 3061176771Sraj /* Skip invalid entries */ 3062176771Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 3063176771Sraj return (EINVAL); 3064176771Sraj 3065176771Sraj /* 3066176771Sraj * The entry must be cache-inhibited, guarded, and r/w 3067176771Sraj * so it can function as an i/o page 3068176771Sraj */ 3069176771Sraj prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3070176771Sraj if (prot != (MAS2_I | MAS2_G)) 3071176771Sraj return (EPERM); 3072176771Sraj 3073176771Sraj prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3074176771Sraj if (prot != (MAS3_SR | MAS3_SW)) 3075176771Sraj return (EPERM); 3076176771Sraj 3077176771Sraj /* The address should be within the entry range. */ 3078176771Sraj entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3079176771Sraj KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3080176771Sraj 3081176771Sraj entry_size = tsize2size(entry_tsize); 3082176771Sraj pa_start = tlb1[i].mas3 & MAS3_RPN; 3083176771Sraj pa_end = pa_start + entry_size - 1; 3084176771Sraj 3085176771Sraj if ((pa < pa_start) || ((pa + size) > pa_end)) 3086176771Sraj return (ERANGE); 3087176771Sraj 3088176771Sraj /* Return virtual address of this mapping. */ 3089187149Sraj *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3090176771Sraj return (0); 3091176771Sraj} 3092