1148456Spjd/*- 2213073Spjd * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3148456Spjd * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4148456Spjd * All rights reserved. 5148456Spjd * 6148456Spjd * Redistribution and use in source and binary forms, with or without 7148456Spjd * modification, are permitted provided that the following conditions 8148456Spjd * are met: 9148456Spjd * 1. Redistributions of source code must retain the above copyright 10148456Spjd * notice, this list of conditions and the following disclaimer. 11148456Spjd * 2. Redistributions in binary form must reproduce the above copyright 12148456Spjd * notice, this list of conditions and the following disclaimer in the 13155175Spjd * documentation and/or other materials provided with the distribution. 14148456Spjd * 15148456Spjd * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16148456Spjd * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17148456Spjd * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18148456Spjd * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19148456Spjd * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20148456Spjd * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21148456Spjd * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22148456Spjd * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23148456Spjd * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24148456Spjd * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25148456Spjd * 26148456Spjd * Some hw specific parts of this pmap were derived or influenced 27148456Spjd * by NetBSD's ibm4xx pmap module. More generic code is shared with 28148456Spjd * a few other pmap modules from the FreeBSD tree. 29148456Spjd */ 30226715Spjd 31226715Spjd /* 32213060Spjd * VM layout notes: 33226715Spjd * 34226715Spjd * Kernel and user threads run within one common virtual address space 35213060Spjd * defined by AS=0. 36226715Spjd * 37226715Spjd * Virtual address space layout: 38226715Spjd * ----------------------------- 39226715Spjd * 0x0000_0000 - 0xafff_ffff : user process 40226715Spjd * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41226715Spjd * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42226715Spjd * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43213172Spjd * 0xc100_0000 - 0xfeef_ffff : KVA 44226715Spjd * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45148456Spjd * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46148456Spjd * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47148456Spjd * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48148456Spjd * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49226715Spjd */ 50148456Spjd 51148456Spjd#include <sys/cdefs.h> 52148456Spjd__FBSDID("$FreeBSD$"); 53148456Spjd 54148456Spjd#include <sys/param.h> 55148456Spjd#include <sys/malloc.h> 56148456Spjd#include <sys/ktr.h> 57148456Spjd#include <sys/proc.h> 58148456Spjd#include <sys/user.h> 59148456Spjd#include <sys/queue.h> 60148456Spjd#include <sys/systm.h> 61182452Spjd#include <sys/kernel.h> 62212547Spjd#include <sys/linker.h> 63182452Spjd#include <sys/msgbuf.h> 64148456Spjd#include <sys/lock.h> 65148456Spjd#include <sys/mutex.h> 66148456Spjd#include <sys/rwlock.h> 67162353Spjd#include <sys/sched.h> 68148456Spjd#include <sys/smp.h> 69148456Spjd#include <sys/vmmeter.h> 70214118Spjd 71148456Spjd#include <vm/vm.h> 72148456Spjd#include <vm/vm_page.h> 73148456Spjd#include <vm/vm_kern.h> 74212934Sbrian#include <vm/vm_pageout.h> 75226723Spjd#include <vm/vm_extern.h> 76148456Spjd#include <vm/vm_object.h> 77148456Spjd#include <vm/vm_param.h> 78148456Spjd#include <vm/vm_map.h> 79182452Spjd#include <vm/vm_pager.h> 80182452Spjd#include <vm/uma.h> 81182452Spjd 82148456Spjd#include <machine/cpu.h> 83148456Spjd#include <machine/pcb.h> 84148456Spjd#include <machine/platform.h> 85226733Spjd 86148456Spjd#include <machine/tlb.h> 87213172Spjd#include <machine/spr.h> 88148456Spjd#include <machine/md_var.h> 89148456Spjd#include <machine/mmuvar.h> 90181639Spjd#include <machine/pmap.h> 91162353Spjd#include <machine/pte.h> 92213172Spjd 93148456Spjd#include "mmu_if.h" 94214118Spjd 95214118Spjd#ifdef DEBUG 96148456Spjd#define debugf(fmt, args...) printf(fmt, ##args) 97148456Spjd#else 98212934Sbrian#define debugf(fmt, args...) 99212934Sbrian#endif 100226723Spjd 101148456Spjd#define TODO panic("%s: not implemented", __func__); 102148456Spjd 103148456Spjdextern int dumpsys_minidump; 104148456Spjd 105148456Spjdextern unsigned char _etext[]; 106148456Spjdextern unsigned char _end[]; 107212547Spjd 108162868Spjdextern uint32_t *bootinfo; 109212547Spjd 110226733Spjd#ifdef SMP 111212554Spjdextern uint32_t bp_ntlb1s; 112213172Spjd#endif 113213172Spjd 114212554Spjdvm_paddr_t kernload; 115162868Spjdvm_offset_t kernstart; 116212554Spjdvm_size_t kernsize; 117226733Spjd 118148456Spjd/* Message buffer and tables. */ 119148456Spjdstatic vm_offset_t data_start; 120226733Spjdstatic vm_size_t data_end; 121148456Spjd 122148456Spjd/* Phys/avail memory regions. */ 123148456Spjdstatic struct mem_region *availmem_regions; 124212547Spjdstatic int availmem_regions_sz; 125162868Spjdstatic struct mem_region *physmem_regions; 126212547Spjdstatic int physmem_regions_sz; 127226733Spjd 128212554Spjd/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 129213172Spjdstatic vm_offset_t zero_page_va; 130213172Spjdstatic struct mtx zero_page_mutex; 131212554Spjd 132162868Spjdstatic struct mtx tlbivax_mutex; 133212554Spjd 134226733Spjd/* 135148456Spjd * Reserved KVA space for mmu_booke_zero_page_idle. This is used 136148456Spjd * by idle thred only, no lock required. 137212554Spjd */ 138148456Spjdstatic vm_offset_t zero_page_idle_va; 139148456Spjd 140148456Spjd/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 141162868Spjdstatic vm_offset_t copy_page_src_va; 142213172Spjdstatic vm_offset_t copy_page_dst_va; 143213172Spjdstatic struct mtx copy_page_mutex; 144162868Spjd 145162868Spjd/**************************************************************************/ 146148456Spjd/* PMAP */ 147148456Spjd/**************************************************************************/ 148213172Spjd 149148456Spjdstatic int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 150148456Spjd vm_prot_t, u_int flags, int8_t psind); 151148456Spjd 152162868Spjdunsigned int kptbl_min; /* Index of the first kernel ptbl. */ 153162868Spjdunsigned int kernel_ptbls; /* Number of KVA ptbls. */ 154148456Spjd 155148456Spjd/* 156212554Spjd * If user pmap is processed with mmu_booke_remove and the resident count 157148456Spjd * drops to 0, there are no more pages to remove, so we need not continue. 158148456Spjd */ 159148456Spjd#define PMAP_REMOVE_DONE(pmap) \ 160162868Spjd ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 161162868Spjd 162148456Spjdextern void tid_flush(tlbtid_t); 163148456Spjd 164212554Spjd/**************************************************************************/ 165148456Spjd/* TLB and TID handling */ 166148456Spjd/**************************************************************************/ 167148456Spjd 168212547Spjd/* Translation ID busy table */ 169162868Spjdstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 170212547Spjd 171212554Spjd/* 172212554Spjd * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 173148456Spjd * core revisions and should be read from h/w registers during early config. 174148456Spjd */ 175212554Spjduint32_t tlb0_entries; 176148456Spjduint32_t tlb0_ways; 177162353Spjduint32_t tlb0_entries_per_way; 178162353Spjd 179162868Spjd#define TLB0_ENTRIES (tlb0_entries) 180162868Spjd#define TLB0_WAYS (tlb0_ways) 181162353Spjd#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 182162353Spjd 183212554Spjd#define TLB1_ENTRIES 16 184162353Spjd 185148456Spjd/* In-ram copy of the TLB1 */ 186148456Spjdstatic tlb_entry_t tlb1[TLB1_ENTRIES]; 187212554Spjd 188213172Spjd/* Next free entry in the TLB1 */ 189213172Spjdstatic unsigned int tlb1_idx; 190213172Spjdstatic vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS; 191213172Spjd 192212554Spjdstatic tlbtid_t tid_alloc(struct pmap *); 193162868Spjd 194162868Spjdstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 195148456Spjd 196148456Spjdstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 197213172Spjdstatic void tlb1_write_entry(unsigned int); 198148456Spjdstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 199148456Spjdstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 200148456Spjd 201162868Spjdstatic vm_size_t tsize2size(unsigned int); 202162868Spjdstatic unsigned int size2tsize(vm_size_t); 203212554Spjdstatic unsigned int ilog2(unsigned int); 204148456Spjd 205148456Spjdstatic void set_mas4_defaults(void); 206212554Spjd 207148456Spjdstatic inline void tlb0_flush_entry(vm_offset_t); 208214118Spjdstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 209214118Spjd 210214118Spjd/**************************************************************************/ 211214118Spjd/* Page table management */ 212214118Spjd/**************************************************************************/ 213214118Spjd 214214118Spjdstatic struct rwlock_padalign pvh_global_lock; 215214118Spjd 216214118Spjd/* Data for the pv entry allocation mechanism */ 217214118Spjdstatic uma_zone_t pvzone; 218214118Spjdstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 219214118Spjd 220214118Spjd#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 221214118Spjd 222214118Spjd#ifndef PMAP_SHPGPERPROC 223214118Spjd#define PMAP_SHPGPERPROC 200 224148456Spjd#endif 225148456Spjd 226162868Spjdstatic void ptbl_init(void); 227148456Spjdstatic struct ptbl_buf *ptbl_buf_alloc(void); 228148456Spjdstatic void ptbl_buf_free(struct ptbl_buf *); 229212554Spjdstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 230148456Spjd 231212554Spjdstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t); 232148456Spjdstatic void ptbl_free(mmu_t, pmap_t, unsigned int); 233148456Spjdstatic void ptbl_hold(mmu_t, pmap_t, unsigned int); 234212934Sbrianstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int); 235212934Sbrian 236212934Sbrianstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 237212934Sbrianstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 238212934Sbrianstatic int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); 239212934Sbrianstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 240148456Spjd 241212934Sbrianstatic pv_entry_t pv_alloc(void); 242212934Sbrianstatic void pv_free(pv_entry_t); 243212934Sbrianstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t); 244212934Sbrianstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t); 245212934Sbrian 246212934Sbrian/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 247212934Sbrian#define PTBL_BUFS (128 * 16) 248226723Spjd 249226723Spjdstruct ptbl_buf { 250226723Spjd TAILQ_ENTRY(ptbl_buf) link; /* list link */ 251212554Spjd vm_offset_t kva; /* va of mapping */ 252148456Spjd}; 253148456Spjd 254212554Spjd/* ptbl free list and a lock used for access synchronization. */ 255148456Spjdstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 256148456Spjdstatic struct mtx ptbl_buf_freelist_lock; 257148456Spjd 258148456Spjd/* Base address of kva space allocated fot ptbl bufs. */ 259148456Spjdstatic vm_offset_t ptbl_buf_pool_vabase; 260148456Spjd 261148456Spjd/* Pointer to ptbl_buf structures. */ 262248475Spjdstatic struct ptbl_buf *ptbl_bufs; 263248475Spjd 264148456Spjdvoid pmap_bootstrap_ap(volatile uint32_t *); 265148456Spjd 266148456Spjd/* 267148456Spjd * Kernel MMU interface 268148456Spjd */ 269148456Spjdstatic void mmu_booke_clear_modify(mmu_t, vm_page_t); 270148456Spjdstatic void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 271148456Spjd vm_size_t, vm_offset_t); 272148456Spjdstatic void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 273148456Spjdstatic void mmu_booke_copy_pages(mmu_t, vm_page_t *, 274148456Spjd vm_offset_t, vm_page_t *, vm_offset_t, int); 275148456Spjdstatic int mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 276148456Spjd vm_prot_t, u_int flags, int8_t psind); 277148456Spjdstatic void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 278148456Spjd vm_page_t, vm_prot_t); 279148456Spjdstatic void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280148456Spjd vm_prot_t); 281148456Spjdstatic vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 282148456Spjdstatic vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 283148456Spjd vm_prot_t); 284148456Spjdstatic void mmu_booke_init(mmu_t); 285148456Spjdstatic boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 286213172Spjdstatic boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 287148456Spjdstatic boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 288148456Spjdstatic int mmu_booke_ts_referenced(mmu_t, vm_page_t); 289148456Spjdstatic vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 290148456Spjd int); 291148456Spjdstatic int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 292148456Spjd vm_paddr_t *); 293148456Spjdstatic void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 294148456Spjd vm_object_t, vm_pindex_t, vm_size_t); 295148456Spjdstatic boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 296153190Spjdstatic void mmu_booke_page_init(mmu_t, vm_page_t); 297148456Spjdstatic int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 298148456Spjdstatic void mmu_booke_pinit(mmu_t, pmap_t); 299148456Spjdstatic void mmu_booke_pinit0(mmu_t, pmap_t); 300148456Spjdstatic void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 301148456Spjd vm_prot_t); 302148456Spjdstatic void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 303148456Spjdstatic void mmu_booke_qremove(mmu_t, vm_offset_t, int); 304148456Spjdstatic void mmu_booke_release(mmu_t, pmap_t); 305162353Spjdstatic void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 306162353Spjdstatic void mmu_booke_remove_all(mmu_t, vm_page_t); 307148456Spjdstatic void mmu_booke_remove_write(mmu_t, vm_page_t); 308148456Spjdstatic void mmu_booke_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 309148456Spjdstatic void mmu_booke_zero_page(mmu_t, vm_page_t); 310148456Spjdstatic void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 311214118Spjdstatic void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 312214118Spjdstatic void mmu_booke_activate(mmu_t, struct thread *); 313148456Spjdstatic void mmu_booke_deactivate(mmu_t, struct thread *); 314148456Spjdstatic void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 315148456Spjdstatic void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 316148456Spjdstatic void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 317148456Spjdstatic void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318148456Spjdstatic vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 319212934Sbrianstatic void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 320212934Sbrianstatic void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 321226723Spjdstatic void mmu_booke_kremove(mmu_t, vm_offset_t); 322226723Spjdstatic boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 323148456Spjdstatic void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 324148456Spjd vm_size_t); 325148456Spjdstatic vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 326148456Spjd vm_size_t, vm_size_t *); 327148456Spjdstatic void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 328148456Spjd vm_size_t, vm_offset_t); 329148456Spjdstatic struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 330148456Spjd 331226717Spjdstatic mmu_method_t mmu_booke_methods[] = { 332148456Spjd /* pmap dispatcher interface */ 333148456Spjd MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 334148456Spjd MMUMETHOD(mmu_copy, mmu_booke_copy), 335148456Spjd MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 336148456Spjd MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 337148456Spjd MMUMETHOD(mmu_enter, mmu_booke_enter), 338148456Spjd MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 339148456Spjd MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 340148456Spjd MMUMETHOD(mmu_extract, mmu_booke_extract), 341148456Spjd MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 342226717Spjd MMUMETHOD(mmu_init, mmu_booke_init), 343148456Spjd MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 344148456Spjd MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 345213172Spjd MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 346213172Spjd MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 347213172Spjd MMUMETHOD(mmu_map, mmu_booke_map), 348148456Spjd MMUMETHOD(mmu_mincore, mmu_booke_mincore), 349248475Spjd MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 350213172Spjd MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 351213172Spjd MMUMETHOD(mmu_page_init, mmu_booke_page_init), 352213172Spjd MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 353148456Spjd MMUMETHOD(mmu_pinit, mmu_booke_pinit), 354213172Spjd MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 355213172Spjd MMUMETHOD(mmu_protect, mmu_booke_protect), 356213172Spjd MMUMETHOD(mmu_qenter, mmu_booke_qenter), 357213172Spjd MMUMETHOD(mmu_qremove, mmu_booke_qremove), 358213172Spjd MMUMETHOD(mmu_release, mmu_booke_release), 359148456Spjd MMUMETHOD(mmu_remove, mmu_booke_remove), 360213172Spjd MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 361213172Spjd MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 362213172Spjd MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 363148456Spjd MMUMETHOD(mmu_unwire, mmu_booke_unwire), 364213172Spjd MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 365213172Spjd MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 366213172Spjd MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 367148456Spjd MMUMETHOD(mmu_activate, mmu_booke_activate), 368215704Sbrucec MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 369213172Spjd 370213172Spjd /* Internal interfaces */ 371213172Spjd MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 372148456Spjd MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 373148456Spjd MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 374213172Spjd MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 375148456Spjd MMUMETHOD(mmu_kenter, mmu_booke_kenter), 376213172Spjd MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 377213172Spjd MMUMETHOD(mmu_kextract, mmu_booke_kextract), 378213172Spjd/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 379148456Spjd MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 380148456Spjd 381213172Spjd /* dumpsys() support */ 382213172Spjd MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 383213172Spjd MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 384213172Spjd MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 385246621Spjd 386246621Spjd { 0, 0 } 387213172Spjd}; 388213172Spjd 389213172SpjdMMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 390213172Spjd 391213172Spjdstatic __inline uint32_t 392213172Spjdtlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 393213172Spjd{ 394213172Spjd uint32_t attrib; 395213172Spjd int i; 396213172Spjd 397213172Spjd if (ma != VM_MEMATTR_DEFAULT) { 398213172Spjd switch (ma) { 399213172Spjd case VM_MEMATTR_UNCACHEABLE: 400213172Spjd return (PTE_I | PTE_G); 401213172Spjd case VM_MEMATTR_WRITE_COMBINING: 402213172Spjd case VM_MEMATTR_WRITE_BACK: 403213172Spjd case VM_MEMATTR_PREFETCHABLE: 404213172Spjd return (PTE_I); 405148456Spjd case VM_MEMATTR_WRITE_THROUGH: 406213172Spjd return (PTE_W | PTE_M); 407148456Spjd } 408148456Spjd } 409148456Spjd 410213172Spjd /* 411213172Spjd * Assume the page is cache inhibited and access is guarded unless 412213172Spjd * it's in our available memory array. 413148456Spjd */ 414148456Spjd attrib = _TLB_ENTRY_IO; 415213172Spjd for (i = 0; i < physmem_regions_sz; i++) { 416213172Spjd if ((pa >= physmem_regions[i].mr_start) && 417148456Spjd (pa < (physmem_regions[i].mr_start + 418213172Spjd physmem_regions[i].mr_size))) { 419213172Spjd attrib = _TLB_ENTRY_MEM; 420213172Spjd break; 421213172Spjd } 422213172Spjd } 423148456Spjd 424213172Spjd return (attrib); 425213172Spjd} 426284752Sbrueffer 427213172Spjdstatic inline void 428213172Spjdtlb_miss_lock(void) 429213172Spjd{ 430213172Spjd#ifdef SMP 431213172Spjd struct pcpu *pc; 432213172Spjd 433149047Spjd if (!smp_started) 434213172Spjd return; 435213172Spjd 436248475Spjd STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 437213172Spjd if (pc != pcpup) { 438213172Spjd 439213172Spjd CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 440213172Spjd "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 441148456Spjd 442213172Spjd KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 443213172Spjd ("tlb_miss_lock: tried to lock self")); 444213172Spjd 445148456Spjd tlb_lock(pc->pc_booke_tlb_lock); 446213172Spjd 447148456Spjd CTR1(KTR_PMAP, "%s: locked", __func__); 448213172Spjd } 449213172Spjd } 450213172Spjd#endif 451213172Spjd} 452213172Spjd 453148456Spjdstatic inline void 454213172Spjdtlb_miss_unlock(void) 455148456Spjd{ 456213172Spjd#ifdef SMP 457213172Spjd struct pcpu *pc; 458213172Spjd 459213172Spjd if (!smp_started) 460213172Spjd return; 461213172Spjd 462213172Spjd STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 463213172Spjd if (pc != pcpup) { 464213172Spjd CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 465248475Spjd __func__, pc->pc_cpuid); 466213172Spjd 467213172Spjd tlb_unlock(pc->pc_booke_tlb_lock); 468213172Spjd 469213172Spjd CTR1(KTR_PMAP, "%s: unlocked", __func__); 470213172Spjd } 471213172Spjd } 472213172Spjd#endif 473213172Spjd} 474213172Spjd 475213172Spjd/* Return number of entries in TLB0. */ 476213172Spjdstatic __inline void 477148456Spjdtlb0_get_tlbconf(void) 478213172Spjd{ 479213172Spjd uint32_t tlb0_cfg; 480148456Spjd 481213172Spjd tlb0_cfg = mfspr(SPR_TLB0CFG); 482213172Spjd tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 483213172Spjd tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 484213172Spjd tlb0_entries_per_way = tlb0_entries / tlb0_ways; 485213172Spjd} 486213172Spjd 487213172Spjd/* Initialize pool of kva ptbl buffers. */ 488213172Spjdstatic void 489213172Spjdptbl_init(void) 490213172Spjd{ 491213172Spjd int i; 492213172Spjd 493213172Spjd CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 494148456Spjd (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 495148456Spjd CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 496213172Spjd __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 497213172Spjd 498213172Spjd mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 499213172Spjd TAILQ_INIT(&ptbl_buf_freelist); 500213172Spjd 501213172Spjd for (i = 0; i < PTBL_BUFS; i++) { 502213172Spjd ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 503213172Spjd TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 504213172Spjd } 505213172Spjd} 506213172Spjd 507213172Spjd/* Get a ptbl_buf from the freelist. */ 508213172Spjdstatic struct ptbl_buf * 509213172Spjdptbl_buf_alloc(void) 510213172Spjd{ 511213172Spjd struct ptbl_buf *buf; 512213172Spjd 513213172Spjd mtx_lock(&ptbl_buf_freelist_lock); 514213172Spjd buf = TAILQ_FIRST(&ptbl_buf_freelist); 515213172Spjd if (buf != NULL) 516213172Spjd TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 517213172Spjd mtx_unlock(&ptbl_buf_freelist_lock); 518213172Spjd 519213172Spjd CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 520213172Spjd 521213172Spjd return (buf); 522213172Spjd} 523213172Spjd 524213172Spjd/* Return ptbl buff to free pool. */ 525213172Spjdstatic void 526213172Spjdptbl_buf_free(struct ptbl_buf *buf) 527213172Spjd{ 528213172Spjd 529213172Spjd CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 530213172Spjd 531213172Spjd mtx_lock(&ptbl_buf_freelist_lock); 532213172Spjd TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 533213172Spjd mtx_unlock(&ptbl_buf_freelist_lock); 534213172Spjd} 535213172Spjd 536213172Spjd/* 537213172Spjd * Search the list of allocated ptbl bufs and find on list of allocated ptbls 538213172Spjd */ 539213172Spjdstatic void 540213172Spjdptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 541213172Spjd{ 542213172Spjd struct ptbl_buf *pbuf; 543213172Spjd 544213172Spjd CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 545213172Spjd 546213172Spjd PMAP_LOCK_ASSERT(pmap, MA_OWNED); 547213172Spjd 548213172Spjd TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 549213172Spjd if (pbuf->kva == (vm_offset_t)ptbl) { 550213172Spjd /* Remove from pmap ptbl buf list. */ 551213172Spjd TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 552213172Spjd 553213172Spjd /* Free corresponding ptbl buf. */ 554213172Spjd ptbl_buf_free(pbuf); 555148456Spjd break; 556213172Spjd } 557148456Spjd} 558148456Spjd 559148456Spjd/* Allocate page table. */ 560148456Spjdstatic pte_t * 561148456Spjdptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) 562148456Spjd{ 563148456Spjd vm_page_t mtbl[PTBL_PAGES]; 564148456Spjd vm_page_t m; 565148456Spjd struct ptbl_buf *pbuf; 566148456Spjd unsigned int pidx; 567148456Spjd pte_t *ptbl; 568148456Spjd int i, j; 569148456Spjd 570148456Spjd CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 571148456Spjd (pmap == kernel_pmap), pdir_idx); 572148456Spjd 573148456Spjd KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 574148456Spjd ("ptbl_alloc: invalid pdir_idx")); 575148456Spjd KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 576148456Spjd ("pte_alloc: valid ptbl entry exists!")); 577148456Spjd 578148456Spjd pbuf = ptbl_buf_alloc(); 579148456Spjd if (pbuf == NULL) 580148456Spjd panic("pte_alloc: couldn't alloc kernel virtual memory"); 581148456Spjd 582148456Spjd ptbl = (pte_t *)pbuf->kva; 583148456Spjd 584148456Spjd CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 585148456Spjd 586148456Spjd /* Allocate ptbl pages, this will sleep! */ 587148456Spjd for (i = 0; i < PTBL_PAGES; i++) { 588148456Spjd pidx = (PTBL_PAGES * pdir_idx) + i; 589148456Spjd while ((m = vm_page_alloc(NULL, pidx, 590148456Spjd VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 591148456Spjd PMAP_UNLOCK(pmap); 592148456Spjd rw_wunlock(&pvh_global_lock); 593148456Spjd if (nosleep) { 594226722Spjd ptbl_free_pmap_ptbl(pmap, ptbl); 595226722Spjd for (j = 0; j < i; j++) 596226722Spjd vm_page_free(mtbl[j]); 597226722Spjd atomic_subtract_int(&cnt.v_wire_count, i); 598226722Spjd return (NULL); 599226722Spjd } 600226722Spjd VM_WAIT; 601226722Spjd rw_wlock(&pvh_global_lock); 602226722Spjd PMAP_LOCK(pmap); 603148456Spjd } 604226722Spjd mtbl[i] = m; 605226722Spjd } 606226722Spjd 607226722Spjd /* Map allocated pages into kernel_pmap. */ 608226722Spjd mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 609226722Spjd 610226722Spjd /* Zero whole ptbl. */ 611226722Spjd bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 612148456Spjd 613148456Spjd /* Add pbuf to the pmap ptbl bufs list. */ 614148456Spjd TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 615148456Spjd 616148456Spjd return (ptbl); 617148456Spjd} 618148456Spjd 619148456Spjd/* Free ptbl pages and invalidate pdir entry. */ 620148456Spjdstatic void 621148456Spjdptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 622148456Spjd{ 623148456Spjd pte_t *ptbl; 624148456Spjd vm_paddr_t pa; 625148456Spjd vm_offset_t va; 626148456Spjd vm_page_t m; 627148456Spjd int i; 628148456Spjd 629148456Spjd CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 630148456Spjd (pmap == kernel_pmap), pdir_idx); 631148456Spjd 632148456Spjd KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 633148456Spjd ("ptbl_free: invalid pdir_idx")); 634148456Spjd 635148456Spjd ptbl = pmap->pm_pdir[pdir_idx]; 636148456Spjd 637148456Spjd CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 638148456Spjd 639148456Spjd KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 640148456Spjd 641148456Spjd /* 642148456Spjd * Invalidate the pdir entry as soon as possible, so that other CPUs 643148456Spjd * don't attempt to look up the page tables we are releasing. 644148456Spjd */ 645148456Spjd mtx_lock_spin(&tlbivax_mutex); 646148456Spjd tlb_miss_lock(); 647148456Spjd 648148456Spjd pmap->pm_pdir[pdir_idx] = NULL; 649148456Spjd 650148456Spjd tlb_miss_unlock(); 651148456Spjd mtx_unlock_spin(&tlbivax_mutex); 652148456Spjd 653148456Spjd for (i = 0; i < PTBL_PAGES; i++) { 654148456Spjd va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 655148456Spjd pa = pte_vatopa(mmu, kernel_pmap, va); 656148456Spjd m = PHYS_TO_VM_PAGE(pa); 657148456Spjd vm_page_free_zero(m); 658148456Spjd atomic_subtract_int(&cnt.v_wire_count, 1); 659148456Spjd mmu_booke_kremove(mmu, va); 660148456Spjd } 661148456Spjd 662148456Spjd ptbl_free_pmap_ptbl(pmap, ptbl); 663182452Spjd} 664148456Spjd 665226733Spjd/* 666148456Spjd * Decrement ptbl pages hold count and attempt to free ptbl pages. 667153190Spjd * Called when removing pte entry from ptbl. 668155536Spjd * 669148456Spjd * Return 1 if ptbl pages were freed. 670153190Spjd */ 671153190Spjdstatic int 672158214Spjdptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 673148456Spjd{ 674148456Spjd pte_t *ptbl; 675153190Spjd vm_paddr_t pa; 676148456Spjd vm_page_t m; 677148456Spjd int i; 678148456Spjd 679148456Spjd CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 680148456Spjd (pmap == kernel_pmap), pdir_idx); 681148456Spjd 682148456Spjd KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 683148456Spjd ("ptbl_unhold: invalid pdir_idx")); 684148456Spjd KASSERT((pmap != kernel_pmap), 685148456Spjd ("ptbl_unhold: unholding kernel ptbl!")); 686226733Spjd 687226733Spjd ptbl = pmap->pm_pdir[pdir_idx]; 688226733Spjd 689226733Spjd //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 690226733Spjd KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 691226733Spjd ("ptbl_unhold: non kva ptbl")); 692226733Spjd 693226733Spjd /* decrement hold count */ 694226733Spjd for (i = 0; i < PTBL_PAGES; i++) { 695226733Spjd pa = pte_vatopa(mmu, kernel_pmap, 696226733Spjd (vm_offset_t)ptbl + (i * PAGE_SIZE)); 697226733Spjd m = PHYS_TO_VM_PAGE(pa); 698148456Spjd m->wire_count--; 699155536Spjd } 700148456Spjd 701159361Spjd /* 702159308Spjd * Free ptbl pages if there are no pte etries in this ptbl. 703212547Spjd * wire_count has the same value for all ptbl pages, so check the last 704226733Spjd * page. 705226733Spjd */ 706226733Spjd if (m->wire_count == 0) { 707226733Spjd ptbl_free(mmu, pmap, pdir_idx); 708226733Spjd 709226733Spjd //debugf("ptbl_unhold: e (freed ptbl)\n"); 710159308Spjd return (1); 711159361Spjd } 712159361Spjd 713159361Spjd return (0); 714159361Spjd} 715159361Spjd 716159361Spjd/* 717159361Spjd * Increment hold count for ptbl pages. This routine is used when a new pte 718159361Spjd * entry is being inserted into the ptbl. 719159361Spjd */ 720159361Spjdstatic void 721159361Spjdptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 722159361Spjd{ 723159361Spjd vm_paddr_t pa; 724159361Spjd pte_t *ptbl; 725159361Spjd vm_page_t m; 726159361Spjd int i; 727159361Spjd 728159361Spjd CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 729159361Spjd pdir_idx); 730159308Spjd 731159308Spjd KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 732159308Spjd ("ptbl_hold: invalid pdir_idx")); 733159308Spjd KASSERT((pmap != kernel_pmap), 734159361Spjd ("ptbl_hold: holding kernel ptbl!")); 735226733Spjd 736226733Spjd ptbl = pmap->pm_pdir[pdir_idx]; 737226733Spjd 738226733Spjd KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 739226733Spjd 740226733Spjd for (i = 0; i < PTBL_PAGES; i++) { 741159361Spjd pa = pte_vatopa(mmu, kernel_pmap, 742159361Spjd (vm_offset_t)ptbl + (i * PAGE_SIZE)); 743159361Spjd m = PHYS_TO_VM_PAGE(pa); 744159361Spjd m->wire_count++; 745159361Spjd } 746159361Spjd} 747226733Spjd 748226733Spjd/* Allocate pv_entry structure. */ 749226733Spjdpv_entry_t 750226733Spjdpv_alloc(void) 751226733Spjd{ 752226733Spjd pv_entry_t pv; 753226733Spjd 754226733Spjd pv_entry_count++; 755226733Spjd if (pv_entry_count > pv_entry_high_water) 756226733Spjd pagedaemon_wakeup(); 757226733Spjd pv = uma_zalloc(pvzone, M_NOWAIT); 758226733Spjd 759226733Spjd return (pv); 760226733Spjd} 761148456Spjd 762153190Spjd/* Free pv_entry structure. */ 763153190Spjdstatic __inline void 764159308Spjdpv_free(pv_entry_t pve) 765148456Spjd{ 766148456Spjd 767148456Spjd pv_entry_count--; 768148456Spjd uma_zfree(pvzone, pve); 769148456Spjd} 770148456Spjd 771153190Spjd 772155536Spjd/* Allocate and initialize pv_entry structure. */ 773155536Spjdstatic void 774155536Spjdpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 775155536Spjd{ 776155536Spjd pv_entry_t pve; 777155536Spjd 778155536Spjd //int su = (pmap == kernel_pmap); 779155536Spjd //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 780155536Spjd // (u_int32_t)pmap, va, (u_int32_t)m); 781155536Spjd 782155536Spjd pve = pv_alloc(); 783155536Spjd if (pve == NULL) 784155536Spjd panic("pv_insert: no pv entries!"); 785155536Spjd 786153190Spjd pve->pv_pmap = pmap; 787148456Spjd pve->pv_va = va; 788153190Spjd 789153190Spjd /* add to pv_list */ 790148456Spjd PMAP_LOCK_ASSERT(pmap, MA_OWNED); 791148456Spjd rw_assert(&pvh_global_lock, RA_WLOCKED); 792153190Spjd 793148456Spjd TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 794148456Spjd 795148456Spjd //debugf("pv_insert: e\n"); 796167229Spjd} 797214404Spjd 798214404Spjd/* Destroy pv entry. */ 799167229Spjdstatic void 800153190Spjdpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 801148456Spjd{ 802148456Spjd pv_entry_t pve; 803148456Spjd 804246620Spjd //int su = (pmap == kernel_pmap); 805246620Spjd //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 806148456Spjd 807148456Spjd PMAP_LOCK_ASSERT(pmap, MA_OWNED); 808213172Spjd rw_assert(&pvh_global_lock, RA_WLOCKED); 809148456Spjd 810148456Spjd /* find pv entry */ 811148456Spjd TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 812148456Spjd if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 813148456Spjd /* remove from pv_list */ 814148456Spjd TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 815159308Spjd if (TAILQ_EMPTY(&m->md.pv_list)) 816148456Spjd vm_page_aflag_clear(m, PGA_WRITEABLE); 817148456Spjd 818148456Spjd /* free pv entry struct */ 819148456Spjd pv_free(pve); 820148456Spjd break; 821148456Spjd } 822148456Spjd } 823148456Spjd 824148456Spjd //debugf("pv_remove: e\n"); 825148456Spjd} 826148456Spjd 827148456Spjd/* 828148456Spjd * Clean pte entry, try to free page table page if requested. 829148456Spjd * 830148456Spjd * Return 1 if ptbl pages were freed, otherwise return 0. 831148456Spjd */ 832148456Spjdstatic int 833148456Spjdpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 834148456Spjd{ 835182452Spjd unsigned int pdir_idx = PDIR_IDX(va); 836182452Spjd unsigned int ptbl_idx = PTBL_IDX(va); 837182452Spjd vm_page_t m; 838182452Spjd pte_t *ptbl; 839182452Spjd pte_t *pte; 840182452Spjd 841182452Spjd //int su = (pmap == kernel_pmap); 842182452Spjd //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 843182452Spjd // su, (u_int32_t)pmap, va, flags); 844182452Spjd 845213662Sae ptbl = pmap->pm_pdir[pdir_idx]; 846213662Sae KASSERT(ptbl, ("pte_remove: null ptbl")); 847182452Spjd 848182452Spjd pte = &ptbl[ptbl_idx]; 849182452Spjd 850182452Spjd if (pte == NULL || !PTE_ISVALID(pte)) 851182452Spjd return (0); 852182452Spjd 853182452Spjd if (PTE_ISWIRED(pte)) 854182452Spjd pmap->pm_stats.wired_count--; 855182452Spjd 856182452Spjd /* Handle managed entry. */ 857182452Spjd if (PTE_ISMANAGED(pte)) { 858182452Spjd /* Get vm_page_t for mapped pte. */ 859182452Spjd m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 860182452Spjd 861148456Spjd if (PTE_ISMODIFIED(pte)) 862148456Spjd vm_page_dirty(m); 863148456Spjd 864148456Spjd if (PTE_ISREFERENCED(pte)) 865148456Spjd vm_page_aflag_set(m, PGA_REFERENCED); 866148456Spjd 867148456Spjd pv_remove(pmap, va, m); 868148456Spjd } 869212934Sbrian 870153190Spjd mtx_lock_spin(&tlbivax_mutex); 871148456Spjd tlb_miss_lock(); 872153190Spjd 873153190Spjd tlb0_flush_entry(va); 874158214Spjd pte->flags = 0; 875148456Spjd pte->rpn = 0; 876148456Spjd 877153190Spjd tlb_miss_unlock(); 878148456Spjd mtx_unlock_spin(&tlbivax_mutex); 879148456Spjd 880148456Spjd pmap->pm_stats.resident_count--; 881148456Spjd 882212934Sbrian if (flags & PTBL_UNHOLD) { 883212934Sbrian //debugf("pte_remove: e (unhold)\n"); 884212934Sbrian return (ptbl_unhold(mmu, pmap, pdir_idx)); 885212934Sbrian } 886212934Sbrian 887212934Sbrian //debugf("pte_remove: e\n"); 888213172Spjd return (0); 889148456Spjd} 890148456Spjd 891148456Spjd/* 892148456Spjd * Insert PTE for a given page and virtual address. 893148456Spjd */ 894148456Spjdstatic int 895148456Spjdpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, 896166892Spjd boolean_t nosleep) 897148456Spjd{ 898148456Spjd unsigned int pdir_idx = PDIR_IDX(va); 899148456Spjd unsigned int ptbl_idx = PTBL_IDX(va); 900148456Spjd pte_t *ptbl, *pte; 901148456Spjd 902213172Spjd CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 903162353Spjd pmap == kernel_pmap, pmap, va); 904162353Spjd 905162353Spjd /* Get the page table pointer. */ 906162353Spjd ptbl = pmap->pm_pdir[pdir_idx]; 907162353Spjd 908162353Spjd if (ptbl == NULL) { 909162353Spjd /* Allocate page table pages. */ 910162353Spjd ptbl = ptbl_alloc(mmu, pmap, pdir_idx, nosleep); 911162353Spjd if (ptbl == NULL) { 912162353Spjd KASSERT(nosleep, ("nosleep and NULL ptbl")); 913162353Spjd return (ENOMEM); 914162353Spjd } 915162353Spjd } else { 916162353Spjd /* 917162353Spjd * Check if there is valid mapping for requested 918162353Spjd * va, if there is, remove it. 919162353Spjd */ 920162353Spjd pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 921162353Spjd if (PTE_ISVALID(pte)) { 922162353Spjd pte_remove(mmu, pmap, va, PTBL_HOLD); 923162353Spjd } else { 924162353Spjd /* 925162353Spjd * pte is not used, increment hold count 926162353Spjd * for ptbl pages. 927162353Spjd */ 928162353Spjd if (pmap != kernel_pmap) 929213172Spjd ptbl_hold(mmu, pmap, pdir_idx); 930213172Spjd } 931162353Spjd } 932162353Spjd 933162353Spjd /* 934162353Spjd * Insert pv_entry into pv_list for mapped page if part of managed 935162353Spjd * memory. 936162353Spjd */ 937162353Spjd if ((m->oflags & VPO_UNMANAGED) == 0) { 938162353Spjd flags |= PTE_MANAGED; 939162353Spjd 940162353Spjd /* Create and insert pv entry. */ 941162353Spjd pv_insert(pmap, va, m); 942162353Spjd } 943162353Spjd 944162353Spjd pmap->pm_stats.resident_count++; 945162353Spjd 946162353Spjd mtx_lock_spin(&tlbivax_mutex); 947162353Spjd tlb_miss_lock(); 948162353Spjd 949162353Spjd tlb0_flush_entry(va); 950162353Spjd if (pmap->pm_pdir[pdir_idx] == NULL) { 951162353Spjd /* 952162353Spjd * If we just allocated a new page table, hook it in 953162353Spjd * the pdir. 954162353Spjd */ 955162353Spjd pmap->pm_pdir[pdir_idx] = ptbl; 956162353Spjd } 957162353Spjd pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 958162353Spjd pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 959162353Spjd pte->flags |= (PTE_VALID | flags); 960162353Spjd 961155101Spjd tlb_miss_unlock(); 962148456Spjd mtx_unlock_spin(&tlbivax_mutex); 963148456Spjd return (0); 964166216Spjd} 965166216Spjd 966148456Spjd/* Return the pa for the given pmap/va. */ 967153190Spjdstatic vm_paddr_t 968149304Spjdpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 969153190Spjd{ 970153190Spjd vm_paddr_t pa = 0; 971166216Spjd pte_t *pte; 972166216Spjd 973148456Spjd pte = pte_find(mmu, pmap, va); 974148456Spjd if ((pte != NULL) && PTE_ISVALID(pte)) 975213172Spjd pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 976148456Spjd return (pa); 977148456Spjd} 978148456Spjd 979166216Spjd/* Get a pointer to a PTE in a page table. */ 980166216Spjdstatic pte_t * 981166216Spjdpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 982166216Spjd{ 983166216Spjd unsigned int pdir_idx = PDIR_IDX(va); 984166216Spjd unsigned int ptbl_idx = PTBL_IDX(va); 985166216Spjd 986166216Spjd KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 987166216Spjd 988148456Spjd if (pmap->pm_pdir[pdir_idx]) 989148456Spjd return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 990148456Spjd 991148456Spjd return (NULL); 992148456Spjd} 993148456Spjd 994148456Spjd/**************************************************************************/ 995149304Spjd/* PMAP related */ 996149304Spjd/**************************************************************************/ 997148456Spjd 998148456Spjd/* 999148456Spjd * This is called during booke_init, before the system is really initialized. 1000213172Spjd */ 1001153190Spjdstatic void 1002148456Spjdmmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 1003148456Spjd{ 1004149928Spjd vm_offset_t phys_kernelend; 1005149928Spjd struct mem_region *mp, *mp1; 1006149928Spjd int cnt, i, j; 1007149928Spjd u_int s, e, sz; 1008149928Spjd u_int phys_avail_count; 1009148456Spjd vm_size_t physsz, hwphyssz, kstack0_sz; 1010213172Spjd vm_offset_t kernel_pdir, kstack0, va; 1011148456Spjd vm_paddr_t kstack0_phys; 1012148456Spjd void *dpcpu; 1013148456Spjd pte_t *pte; 1014148456Spjd 1015148456Spjd debugf("mmu_booke_bootstrap: entered\n"); 1016149304Spjd 1017148456Spjd /* Initialize invalidation mutex */ 1018148456Spjd mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 1019149304Spjd 1020148456Spjd /* Read TLB0 size and associativity. */ 1021148456Spjd tlb0_get_tlbconf(); 1022148456Spjd 1023148456Spjd /* 1024148456Spjd * Align kernel start and end address (kernel image). 1025148456Spjd * Note that kernel end does not necessarily relate to kernsize. 1026148456Spjd * kernsize is the size of the kernel that is actually mapped. 1027148456Spjd * Also note that "start - 1" is deliberate. With SMP, the 1028148456Spjd * entry point is exactly a page from the actual load address. 1029148456Spjd * As such, trunc_page() has no effect and we're off by a page. 1030148456Spjd * Since we always have the ELF header between the load address 1031153190Spjd * and the entry point, we can safely subtract 1 to compensate. 1032153190Spjd */ 1033153190Spjd kernstart = trunc_page(start - 1); 1034148456Spjd data_start = round_page(kernelend); 1035148456Spjd data_end = data_start; 1036148456Spjd 1037148456Spjd /* 1038148456Spjd * Addresses of preloaded modules (like file systems) use 1039148456Spjd * physical addresses. Make sure we relocate those into 1040148456Spjd * virtual addresses. 1041148456Spjd */ 1042148456Spjd preload_addr_relocate = kernstart - kernload; 1043153190Spjd 1044149304Spjd /* Allocate the dynamic per-cpu area. */ 1045153190Spjd dpcpu = (void *)data_end; 1046149304Spjd data_end += DPCPU_SIZE; 1047149304Spjd 1048149304Spjd /* Allocate space for the message buffer. */ 1049149304Spjd msgbufp = (struct msgbuf *)data_end; 1050149304Spjd data_end += msgbufsize; 1051149304Spjd debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1052149304Spjd data_end); 1053149304Spjd 1054149304Spjd data_end = round_page(data_end); 1055149304Spjd 1056153190Spjd /* Allocate space for ptbl_bufs. */ 1057149304Spjd ptbl_bufs = (struct ptbl_buf *)data_end; 1058148456Spjd data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1059149304Spjd debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1060149304Spjd data_end); 1061149304Spjd 1062148456Spjd data_end = round_page(data_end); 1063148456Spjd 1064148456Spjd /* Allocate PTE tables for kernel KVA. */ 1065148456Spjd kernel_pdir = data_end; 1066213172Spjd kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1067148456Spjd PDIR_SIZE - 1) / PDIR_SIZE; 1068149304Spjd data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1069148456Spjd debugf(" kernel ptbls: %d\n", kernel_ptbls); 1070148456Spjd debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1071148456Spjd 1072148456Spjd debugf(" data_end: 0x%08x\n", data_end); 1073159308Spjd if (data_end - kernstart > kernsize) { 1074148456Spjd kernsize += tlb1_mapin_region(kernstart + kernsize, 1075148456Spjd kernload + kernsize, (data_end - kernstart) - kernsize); 1076149304Spjd } 1077148456Spjd data_end = kernstart + kernsize; 1078148456Spjd debugf(" updated data_end: 0x%08x\n", data_end); 1079148456Spjd 1080148456Spjd /* 1081148456Spjd * Clear the structures - note we can only do it safely after the 1082148456Spjd * possible additional TLB1 translations are in place (above) so that 1083149304Spjd * all range up to the currently calculated 'data_end' is covered. 1084149304Spjd */ 1085148456Spjd dpcpu_init(dpcpu, 0); 1086148456Spjd memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1087148456Spjd memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1088148456Spjd 1089148456Spjd /*******************************************************/ 1090149304Spjd /* Set the start and end of kva. */ 1091148456Spjd /*******************************************************/ 1092153190Spjd virtual_avail = round_page(data_end); 1093148456Spjd virtual_end = VM_MAX_KERNEL_ADDRESS; 1094153190Spjd 1095153190Spjd /* Allocate KVA space for page zero/copy operations. */ 1096158214Spjd zero_page_va = virtual_avail; 1097148456Spjd virtual_avail += PAGE_SIZE; 1098148456Spjd zero_page_idle_va = virtual_avail; 1099153190Spjd virtual_avail += PAGE_SIZE; 1100148456Spjd copy_page_src_va = virtual_avail; 1101149304Spjd virtual_avail += PAGE_SIZE; 1102149304Spjd copy_page_dst_va = virtual_avail; 1103149304Spjd virtual_avail += PAGE_SIZE; 1104148456Spjd debugf("zero_page_va = 0x%08x\n", zero_page_va); 1105155101Spjd debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1106148456Spjd debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1107149304Spjd debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1108182452Spjd 1109182452Spjd /* Initialize page zero/copy mutexes. */ 1110182452Spjd mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1111182452Spjd mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1112182452Spjd 1113182452Spjd /* Allocate KVA space for ptbl bufs. */ 1114148456Spjd ptbl_buf_pool_vabase = virtual_avail; 1115148456Spjd virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1116148456Spjd debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1117148456Spjd ptbl_buf_pool_vabase, virtual_avail); 1118148456Spjd 1119148456Spjd /* Calculate corresponding physical addresses for the kernel region. */ 1120148456Spjd phys_kernelend = kernload + kernsize; 1121148456Spjd debugf("kernel image and allocated data:\n"); 1122148456Spjd debugf(" kernload = 0x%08x\n", kernload); 1123148456Spjd debugf(" kernstart = 0x%08x\n", kernstart); 1124148456Spjd debugf(" kernsize = 0x%08x\n", kernsize); 1125148456Spjd 1126148456Spjd if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1127148456Spjd panic("mmu_booke_bootstrap: phys_avail too small"); 1128213172Spjd 1129153190Spjd /* 1130213172Spjd * Remove kernel physical address range from avail regions list. Page 1131148456Spjd * align all regions. Non-page aligned memory isn't very interesting 1132148456Spjd * to us. Also, sort the entries for ascending addresses. 1133148456Spjd */ 1134148456Spjd 1135153190Spjd /* Retrieve phys/avail mem regions */ 1136153190Spjd mem_regions(&physmem_regions, &physmem_regions_sz, 1137246620Spjd &availmem_regions, &availmem_regions_sz); 1138148456Spjd sz = 0; 1139153190Spjd cnt = availmem_regions_sz; 1140153190Spjd debugf("processing avail regions:\n"); 1141153190Spjd for (mp = availmem_regions; mp->mr_size; mp++) { 1142148456Spjd s = mp->mr_start; 1143148456Spjd e = mp->mr_start + mp->mr_size; 1144148456Spjd debugf(" %08x-%08x -> ", s, e); 1145153190Spjd /* Check whether this region holds all of the kernel. */ 1146148456Spjd if (s < kernload && e > phys_kernelend) { 1147148456Spjd availmem_regions[cnt].mr_start = phys_kernelend; 1148148456Spjd availmem_regions[cnt++].mr_size = e - phys_kernelend; 1149148456Spjd e = kernload; 1150153190Spjd } 1151148456Spjd /* Look whether this regions starts within the kernel. */ 1152148456Spjd if (s >= kernload && s < phys_kernelend) { 1153148456Spjd if (e <= phys_kernelend) 1154148456Spjd goto empty; 1155153190Spjd s = phys_kernelend; 1156148456Spjd } 1157148456Spjd /* Now look whether this region ends within the kernel. */ 1158148456Spjd if (e > kernload && e <= phys_kernelend) { 1159148456Spjd if (s >= kernload) 1160148456Spjd goto empty; 1161246620Spjd e = kernload; 1162148456Spjd } 1163148456Spjd /* Now page align the start and size of the region. */ 1164148456Spjd s = round_page(s); 1165148456Spjd e = trunc_page(e); 1166148456Spjd if (e < s) 1167148456Spjd e = s; 1168148456Spjd sz = e - s; 1169148456Spjd debugf("%08x-%08x = %x\n", s, e, sz); 1170148456Spjd 1171148456Spjd /* Check whether some memory is left here. */ 1172153190Spjd if (sz == 0) { 1173148456Spjd empty: 1174153190Spjd memmove(mp, mp + 1, 1175153190Spjd (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1176158214Spjd cnt--; 1177148456Spjd mp--; 1178148456Spjd continue; 1179153190Spjd } 1180148456Spjd 1181148456Spjd /* Do an insertion sort. */ 1182148456Spjd for (mp1 = availmem_regions; mp1 < mp; mp1++) 1183148456Spjd if (s < mp1->mr_start) 1184148456Spjd break; 1185148456Spjd if (mp1 < mp) { 1186148456Spjd memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1187214118Spjd mp1->mr_start = s; 1188214118Spjd mp1->mr_size = sz; 1189214118Spjd } else { 1190214118Spjd mp->mr_start = s; 1191214118Spjd mp->mr_size = sz; 1192214118Spjd } 1193214118Spjd } 1194214118Spjd availmem_regions_sz = cnt; 1195214118Spjd 1196214118Spjd /*******************************************************/ 1197214118Spjd /* Steal physical memory for kernel stack from the end */ 1198214118Spjd /* of the first avail region */ 1199214118Spjd /*******************************************************/ 1200214118Spjd kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1201214118Spjd kstack0_phys = availmem_regions[0].mr_start + 1202214118Spjd availmem_regions[0].mr_size; 1203214118Spjd kstack0_phys -= kstack0_sz; 1204214118Spjd availmem_regions[0].mr_size -= kstack0_sz; 1205214118Spjd 1206214118Spjd /*******************************************************/ 1207214118Spjd /* Fill in phys_avail table, based on availmem_regions */ 1208214118Spjd /*******************************************************/ 1209214118Spjd phys_avail_count = 0; 1210214118Spjd physsz = 0; 1211214118Spjd hwphyssz = 0; 1212214118Spjd TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1213214118Spjd 1214214118Spjd debugf("fill in phys_avail:\n"); 1215214118Spjd for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1216214118Spjd 1217214118Spjd debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1218214118Spjd availmem_regions[i].mr_start, 1219214118Spjd availmem_regions[i].mr_start + 1220214118Spjd availmem_regions[i].mr_size, 1221214118Spjd availmem_regions[i].mr_size); 1222214118Spjd 1223214118Spjd if (hwphyssz != 0 && 1224214118Spjd (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1225213060Spjd debugf(" hw.physmem adjust\n"); 1226213060Spjd if (physsz < hwphyssz) { 1227213060Spjd phys_avail[j] = availmem_regions[i].mr_start; 1228213060Spjd phys_avail[j + 1] = 1229213060Spjd availmem_regions[i].mr_start + 1230213060Spjd hwphyssz - physsz; 1231213060Spjd physsz = hwphyssz; 1232213060Spjd phys_avail_count++; 1233213060Spjd } 1234213060Spjd break; 1235213060Spjd } 1236213060Spjd 1237213060Spjd phys_avail[j] = availmem_regions[i].mr_start; 1238213060Spjd phys_avail[j + 1] = availmem_regions[i].mr_start + 1239213060Spjd availmem_regions[i].mr_size; 1240213060Spjd phys_avail_count++; 1241213060Spjd physsz += availmem_regions[i].mr_size; 1242213060Spjd } 1243213060Spjd physmem = btoc(physsz); 1244213060Spjd 1245213060Spjd /* Calculate the last available physical address. */ 1246213060Spjd for (i = 0; phys_avail[i + 2] != 0; i += 2) 1247213060Spjd ; 1248213060Spjd Maxmem = powerpc_btop(phys_avail[i + 1]); 1249213060Spjd 1250213060Spjd debugf("Maxmem = 0x%08lx\n", Maxmem); 1251213060Spjd debugf("phys_avail_count = %d\n", phys_avail_count); 1252213060Spjd debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1253246620Spjd physmem); 1254213060Spjd 1255213060Spjd /*******************************************************/ 1256213060Spjd /* Initialize (statically allocated) kernel pmap. */ 1257213060Spjd /*******************************************************/ 1258213060Spjd PMAP_LOCK_INIT(kernel_pmap); 1259213060Spjd kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1260246622Spjd 1261213060Spjd debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1262213060Spjd debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1263213060Spjd debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1264213060Spjd kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1265213060Spjd 1266213060Spjd /* Initialize kernel pdir */ 1267213060Spjd for (i = 0; i < kernel_ptbls; i++) 1268213060Spjd kernel_pmap->pm_pdir[kptbl_min + i] = 1269148456Spjd (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1270148456Spjd 1271148456Spjd for (i = 0; i < MAXCPU; i++) { 1272213060Spjd kernel_pmap->pm_tid[i] = TID_KERNEL; 1273213060Spjd 1274148456Spjd /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1275148456Spjd tidbusy[i][0] = kernel_pmap; 1276148456Spjd } 1277148456Spjd 1278148456Spjd /* 1279148456Spjd * Fill in PTEs covering kernel code and data. They are not required 1280148456Spjd * for address translation, as this area is covered by static TLB1 1281148456Spjd * entries, but for pte_vatopa() to work correctly with kernel area 1282148456Spjd * addresses. 1283148456Spjd */ 1284148456Spjd for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1285148456Spjd pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1286148456Spjd pte->rpn = kernload + (va - kernstart); 1287148456Spjd pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1288148456Spjd PTE_VALID; 1289148456Spjd } 1290213060Spjd /* Mark kernel_pmap active on all CPUs */ 1291213060Spjd CPU_FILL(&kernel_pmap->pm_active); 1292213060Spjd 1293213060Spjd /* 1294213060Spjd * Initialize the global pv list lock. 1295148456Spjd */ 1296213060Spjd rw_init(&pvh_global_lock, "pmap pv global"); 1297213060Spjd 1298213060Spjd /*******************************************************/ 1299213060Spjd /* Final setup */ 1300213060Spjd /*******************************************************/ 1301213060Spjd 1302213060Spjd /* Enter kstack0 into kernel map, provide guard page */ 1303213060Spjd kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1304213060Spjd thread0.td_kstack = kstack0; 1305213060Spjd thread0.td_kstack_pages = KSTACK_PAGES; 1306148456Spjd 1307148456Spjd debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1308148456Spjd debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1309148456Spjd kstack0_phys, kstack0_phys + kstack0_sz); 1310148456Spjd debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1311148456Spjd 1312153190Spjd virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1313148456Spjd for (i = 0; i < KSTACK_PAGES; i++) { 1314153190Spjd mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1315153190Spjd kstack0 += PAGE_SIZE; 1316153190Spjd kstack0_phys += PAGE_SIZE; 1317148456Spjd } 1318148456Spjd 1319148456Spjd debugf("virtual_avail = %08x\n", virtual_avail); 1320148456Spjd debugf("virtual_end = %08x\n", virtual_end); 1321148456Spjd 1322148456Spjd debugf("mmu_booke_bootstrap: exit\n"); 1323148456Spjd} 1324169312Spjd 1325148456Spjdvoid 1326148456Spjdpmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1327148456Spjd{ 1328148456Spjd int i; 1329148456Spjd 1330148456Spjd /* 1331169312Spjd * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1332153190Spjd * have the snapshot of its contents in the s/w tlb1[] table, so use 1333153190Spjd * these values directly to (re)program AP's TLB1 hardware. 1334148456Spjd */ 1335148456Spjd for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1336148456Spjd /* Skip invalid entries */ 1337162347Spjd if (!(tlb1[i].mas1 & MAS1_VALID)) 1338162347Spjd continue; 1339148456Spjd 1340148456Spjd tlb1_write_entry(i); 1341182452Spjd } 1342182452Spjd 1343148456Spjd set_mas4_defaults(); 1344148456Spjd} 1345213059Spjd 1346226716Spjd/* 1347148456Spjd * Get the physical page address for the given pmap/virtual address. 1348182452Spjd */ 1349226716Spjdstatic vm_paddr_t 1350148456Spjdmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1351148456Spjd{ 1352148456Spjd vm_paddr_t pa; 1353226716Spjd 1354226716Spjd PMAP_LOCK(pmap); 1355148456Spjd pa = pte_vatopa(mmu, pmap, va); 1356148456Spjd PMAP_UNLOCK(pmap); 1357169193Spjd 1358148456Spjd return (pa); 1359148456Spjd} 1360148456Spjd 1361148456Spjd/* 1362169193Spjd * Extract the physical page address associated with the given 1363148456Spjd * kernel virtual address. 1364148456Spjd */ 1365226716Spjdstatic vm_paddr_t 1366226716Spjdmmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1367226716Spjd{ 1368226716Spjd int i; 1369148456Spjd 1370148456Spjd /* Check TLB1 mappings */ 1371226716Spjd for (i = 0; i < tlb1_idx; i++) { 1372226716Spjd if (!(tlb1[i].mas1 & MAS1_VALID)) 1373226716Spjd continue; 1374226716Spjd if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size) 1375226716Spjd return (tlb1[i].phys + (va - tlb1[i].virt)); 1376148456Spjd } 1377148456Spjd 1378148456Spjd return (pte_vatopa(mmu, kernel_pmap, va)); 1379213059Spjd} 1380226716Spjd 1381148456Spjd/* 1382226716Spjd * Initialize the pmap module. 1383226716Spjd * Called by vm_init, to initialize any structures that the pmap 1384148456Spjd * system needs to map virtual memory. 1385148456Spjd */ 1386213059Spjdstatic void 1387226716Spjdmmu_booke_init(mmu_t mmu) 1388182452Spjd{ 1389182452Spjd int shpgperproc = PMAP_SHPGPERPROC; 1390148456Spjd 1391148456Spjd /* 1392148456Spjd * Initialize the address space (zone) for the pv entries. Set a 1393148456Spjd * high water mark so that the system can recover from excessive 1394148456Spjd * numbers of pv entries. 1395182452Spjd */ 1396148456Spjd pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1397148456Spjd NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1398148456Spjd 1399182452Spjd TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1400182452Spjd pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1401182452Spjd 1402182452Spjd TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1403182452Spjd pv_entry_high_water = 9 * (pv_entry_max / 10); 1404182452Spjd 1405182452Spjd uma_zone_reserve_kva(pvzone, pv_entry_max); 1406182452Spjd 1407182452Spjd /* Pre-fill pvzone with initial number of pv entries. */ 1408182452Spjd uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1409182452Spjd 1410182452Spjd /* Initialize ptbl allocation. */ 1411182452Spjd ptbl_init(); 1412182452Spjd} 1413182452Spjd 1414182452Spjd/* 1415182452Spjd * Map a list of wired pages into kernel virtual address space. This is 1416148456Spjd * intended for temporary mappings which do not need page modification or 1417148456Spjd * references recorded. Existing mappings in the region are overwritten. 1418148456Spjd */ 1419148456Spjdstatic void 1420148456Spjdmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1421226716Spjd{ 1422148456Spjd vm_offset_t va; 1423153190Spjd 1424153190Spjd va = sva; 1425148456Spjd while (count-- > 0) { 1426148456Spjd mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1427148456Spjd va += PAGE_SIZE; 1428153190Spjd m++; 1429153190Spjd } 1430148456Spjd} 1431226716Spjd 1432226716Spjd/* 1433226716Spjd * Remove page mappings from kernel virtual address space. Intended for 1434226716Spjd * temporary mappings entered by mmu_booke_qenter. 1435226716Spjd */ 1436226716Spjdstatic void 1437148456Spjdmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1438148456Spjd{ 1439226716Spjd vm_offset_t va; 1440148456Spjd 1441212934Sbrian va = sva; 1442212934Sbrian while (count-- > 0) { 1443212934Sbrian mmu_booke_kremove(mmu, va); 1444212934Sbrian va += PAGE_SIZE; 1445212934Sbrian } 1446212934Sbrian} 1447212934Sbrian 1448226716Spjd/* 1449212934Sbrian * Map a wired page into kernel virtual address space. 1450212934Sbrian */ 1451226716Spjdstatic void 1452226716Spjdmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1453148456Spjd{ 1454148456Spjd 1455148456Spjd mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1456212934Sbrian} 1457212934Sbrian 1458212934Sbrianstatic void 1459212934Sbrianmmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1460212934Sbrian{ 1461213056Spjd unsigned int pdir_idx = PDIR_IDX(va); 1462212934Sbrian unsigned int ptbl_idx = PTBL_IDX(va); 1463226722Spjd uint32_t flags; 1464212934Sbrian pte_t *pte; 1465212934Sbrian 1466212934Sbrian KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1467212934Sbrian (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1468212934Sbrian 1469212934Sbrian flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1470212934Sbrian flags |= tlb_calc_wimg(pa, ma); 1471212934Sbrian 1472212934Sbrian pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1473212934Sbrian 1474212934Sbrian mtx_lock_spin(&tlbivax_mutex); 1475212934Sbrian tlb_miss_lock(); 1476213056Spjd 1477212934Sbrian if (PTE_ISVALID(pte)) { 1478212934Sbrian 1479212934Sbrian CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1480212934Sbrian 1481212934Sbrian /* Flush entry from TLB0 */ 1482213056Spjd tlb0_flush_entry(va); 1483213056Spjd } 1484213056Spjd 1485212934Sbrian pte->rpn = pa & ~PTE_PA_MASK; 1486212934Sbrian pte->flags = flags; 1487212934Sbrian 1488212934Sbrian //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1489212934Sbrian // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1490212934Sbrian // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1491212934Sbrian 1492212934Sbrian /* Flush the real memory from the instruction cache. */ 1493212934Sbrian if ((flags & (PTE_I | PTE_G)) == 0) { 1494212934Sbrian __syncicache((void *)va, PAGE_SIZE); 1495212934Sbrian } 1496212934Sbrian 1497212934Sbrian tlb_miss_unlock(); 1498212934Sbrian mtx_unlock_spin(&tlbivax_mutex); 1499212934Sbrian} 1500212934Sbrian 1501213058Spjd/* 1502213058Spjd * Remove a page from kernel page table. 1503213058Spjd */ 1504213058Spjdstatic void 1505212934Sbrianmmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1506212934Sbrian{ 1507213056Spjd unsigned int pdir_idx = PDIR_IDX(va); 1508212934Sbrian unsigned int ptbl_idx = PTBL_IDX(va); 1509212934Sbrian pte_t *pte; 1510212934Sbrian 1511212934Sbrian// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1512212934Sbrian 1513212934Sbrian KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1514226722Spjd (va <= VM_MAX_KERNEL_ADDRESS)), 1515226722Spjd ("mmu_booke_kremove: invalid va")); 1516226722Spjd 1517226722Spjd pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1518226722Spjd 1519226722Spjd if (!PTE_ISVALID(pte)) { 1520226722Spjd 1521226722Spjd CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1522226722Spjd 1523212934Sbrian return; 1524226722Spjd } 1525226722Spjd 1526226722Spjd mtx_lock_spin(&tlbivax_mutex); 1527226722Spjd tlb_miss_lock(); 1528226722Spjd 1529226722Spjd /* Invalidate entry in TLB0, update PTE. */ 1530226722Spjd tlb0_flush_entry(va); 1531226722Spjd pte->flags = 0; 1532212934Sbrian pte->rpn = 0; 1533212934Sbrian 1534212934Sbrian tlb_miss_unlock(); 1535212934Sbrian mtx_unlock_spin(&tlbivax_mutex); 1536212934Sbrian} 1537212934Sbrian 1538212934Sbrian/* 1539212934Sbrian * Initialize pmap associated with process 0. 1540212934Sbrian */ 1541212934Sbrianstatic void 1542212934Sbrianmmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1543212934Sbrian{ 1544212934Sbrian 1545212934Sbrian PMAP_LOCK_INIT(pmap); 1546212934Sbrian mmu_booke_pinit(mmu, pmap); 1547212934Sbrian PCPU_SET(curpmap, pmap); 1548226720Spjd} 1549226720Spjd 1550212934Sbrian/* 1551226720Spjd * Initialize a preallocated and zeroed pmap structure, 1552212934Sbrian * such as one in a vmspace structure. 1553226720Spjd */ 1554213056Spjdstatic void 1555212934Sbrianmmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1556212934Sbrian{ 1557212934Sbrian int i; 1558212934Sbrian 1559212934Sbrian CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1560212934Sbrian curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1561212934Sbrian 1562226723Spjd KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1563226723Spjd 1564226723Spjd for (i = 0; i < MAXCPU; i++) 1565226723Spjd pmap->pm_tid[i] = TID_NONE; 1566226723Spjd CPU_ZERO(&kernel_pmap->pm_active); 1567226723Spjd bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1568226723Spjd bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1569226723Spjd TAILQ_INIT(&pmap->pm_ptbl_list); 1570226723Spjd} 1571226723Spjd 1572226723Spjd/* 1573226723Spjd * Release any resources held by the given physical map. 1574226723Spjd * Called when a pmap initialized by mmu_booke_pinit is being released. 1575226723Spjd * Should only be called if the map contains no valid mappings. 1576226723Spjd */ 1577226723Spjdstatic void 1578226723Spjdmmu_booke_release(mmu_t mmu, pmap_t pmap) 1579226723Spjd{ 1580226723Spjd 1581226723Spjd KASSERT(pmap->pm_stats.resident_count == 0, 1582226723Spjd ("pmap_release: pmap resident count %ld != 0", 1583226723Spjd pmap->pm_stats.resident_count)); 1584226723Spjd} 1585226723Spjd 1586226723Spjd/* 1587226723Spjd * Insert the given physical page at the specified virtual address in the 1588226723Spjd * target physical map with the protection requested. If specified the page 1589226723Spjd * will be wired down. 1590226723Spjd */ 1591226723Spjdstatic int 1592226723Spjdmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1593226723Spjd vm_prot_t prot, u_int flags, int8_t psind) 1594226723Spjd{ 1595226723Spjd int error; 1596226723Spjd 1597226723Spjd rw_wlock(&pvh_global_lock); 1598226723Spjd PMAP_LOCK(pmap); 1599226723Spjd error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind); 1600226723Spjd rw_wunlock(&pvh_global_lock); 1601226723Spjd PMAP_UNLOCK(pmap); 1602148456Spjd return (error); 1603148456Spjd} 1604148456Spjd 1605153190Spjdstatic int 1606148456Spjdmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1607153190Spjd vm_prot_t prot, u_int pmap_flags, int8_t psind __unused) 1608153190Spjd{ 1609148456Spjd pte_t *pte; 1610148456Spjd vm_paddr_t pa; 1611148456Spjd uint32_t flags; 1612148456Spjd int error, su, sync; 1613153190Spjd 1614153190Spjd pa = VM_PAGE_TO_PHYS(m); 1615148456Spjd su = (pmap == kernel_pmap); 1616148456Spjd sync = 0; 1617148456Spjd 1618148456Spjd //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1619148456Spjd // "pa=0x%08x prot=0x%08x flags=%#x)\n", 1620148456Spjd // (u_int32_t)pmap, su, pmap->pm_tid, 1621148456Spjd // (u_int32_t)m, va, pa, prot, flags); 1622148456Spjd 1623155175Spjd if (su) { 1624148456Spjd KASSERT(((va >= virtual_avail) && 1625148456Spjd (va <= VM_MAX_KERNEL_ADDRESS)), 1626148456Spjd ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1627148456Spjd } else { 1628148456Spjd KASSERT((va <= VM_MAXUSER_ADDRESS), 1629148456Spjd ("mmu_booke_enter_locked: user pmap, non user va")); 1630226719Spjd } 1631148456Spjd if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1632226719Spjd VM_OBJECT_ASSERT_LOCKED(m->object); 1633148456Spjd 1634153190Spjd PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1635153190Spjd 1636148456Spjd /* 1637148456Spjd * If there is an existing mapping, and the physical address has not 1638148456Spjd * changed, must be protection or wiring change. 1639148456Spjd */ 1640153190Spjd if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1641153190Spjd (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1642226719Spjd 1643148456Spjd /* 1644148456Spjd * Before actually updating pte->flags we calculate and 1645148456Spjd * prepare its new value in a helper var. 1646148456Spjd */ 1647148456Spjd flags = pte->flags; 1648148456Spjd flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1649148456Spjd 1650148456Spjd /* Wiring change, just update stats. */ 1651 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) { 1652 if (!PTE_ISWIRED(pte)) { 1653 flags |= PTE_WIRED; 1654 pmap->pm_stats.wired_count++; 1655 } 1656 } else { 1657 if (PTE_ISWIRED(pte)) { 1658 flags &= ~PTE_WIRED; 1659 pmap->pm_stats.wired_count--; 1660 } 1661 } 1662 1663 if (prot & VM_PROT_WRITE) { 1664 /* Add write permissions. */ 1665 flags |= PTE_SW; 1666 if (!su) 1667 flags |= PTE_UW; 1668 1669 if ((flags & PTE_MANAGED) != 0) 1670 vm_page_aflag_set(m, PGA_WRITEABLE); 1671 } else { 1672 /* Handle modified pages, sense modify status. */ 1673 1674 /* 1675 * The PTE_MODIFIED flag could be set by underlying 1676 * TLB misses since we last read it (above), possibly 1677 * other CPUs could update it so we check in the PTE 1678 * directly rather than rely on that saved local flags 1679 * copy. 1680 */ 1681 if (PTE_ISMODIFIED(pte)) 1682 vm_page_dirty(m); 1683 } 1684 1685 if (prot & VM_PROT_EXECUTE) { 1686 flags |= PTE_SX; 1687 if (!su) 1688 flags |= PTE_UX; 1689 1690 /* 1691 * Check existing flags for execute permissions: if we 1692 * are turning execute permissions on, icache should 1693 * be flushed. 1694 */ 1695 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1696 sync++; 1697 } 1698 1699 flags &= ~PTE_REFERENCED; 1700 1701 /* 1702 * The new flags value is all calculated -- only now actually 1703 * update the PTE. 1704 */ 1705 mtx_lock_spin(&tlbivax_mutex); 1706 tlb_miss_lock(); 1707 1708 tlb0_flush_entry(va); 1709 pte->flags = flags; 1710 1711 tlb_miss_unlock(); 1712 mtx_unlock_spin(&tlbivax_mutex); 1713 1714 } else { 1715 /* 1716 * If there is an existing mapping, but it's for a different 1717 * physical address, pte_enter() will delete the old mapping. 1718 */ 1719 //if ((pte != NULL) && PTE_ISVALID(pte)) 1720 // debugf("mmu_booke_enter_locked: replace\n"); 1721 //else 1722 // debugf("mmu_booke_enter_locked: new\n"); 1723 1724 /* Now set up the flags and install the new mapping. */ 1725 flags = (PTE_SR | PTE_VALID); 1726 flags |= PTE_M; 1727 1728 if (!su) 1729 flags |= PTE_UR; 1730 1731 if (prot & VM_PROT_WRITE) { 1732 flags |= PTE_SW; 1733 if (!su) 1734 flags |= PTE_UW; 1735 1736 if ((m->oflags & VPO_UNMANAGED) == 0) 1737 vm_page_aflag_set(m, PGA_WRITEABLE); 1738 } 1739 1740 if (prot & VM_PROT_EXECUTE) { 1741 flags |= PTE_SX; 1742 if (!su) 1743 flags |= PTE_UX; 1744 } 1745 1746 /* If its wired update stats. */ 1747 if ((pmap_flags & PMAP_ENTER_WIRED) != 0) 1748 flags |= PTE_WIRED; 1749 1750 error = pte_enter(mmu, pmap, m, va, flags, 1751 (pmap_flags & PMAP_ENTER_NOSLEEP) != 0); 1752 if (error != 0) 1753 return (KERN_RESOURCE_SHORTAGE); 1754 1755 if ((flags & PMAP_ENTER_WIRED) != 0) 1756 pmap->pm_stats.wired_count++; 1757 1758 /* Flush the real memory from the instruction cache. */ 1759 if (prot & VM_PROT_EXECUTE) 1760 sync++; 1761 } 1762 1763 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1764 __syncicache((void *)va, PAGE_SIZE); 1765 sync = 0; 1766 } 1767 1768 return (KERN_SUCCESS); 1769} 1770 1771/* 1772 * Maps a sequence of resident pages belonging to the same object. 1773 * The sequence begins with the given page m_start. This page is 1774 * mapped at the given virtual address start. Each subsequent page is 1775 * mapped at a virtual address that is offset from start by the same 1776 * amount as the page is offset from m_start within the object. The 1777 * last page in the sequence is the page with the largest offset from 1778 * m_start that can be mapped at a virtual address less than the given 1779 * virtual address end. Not every virtual page between start and end 1780 * is mapped; only those for which a resident page exists with the 1781 * corresponding offset from m_start are mapped. 1782 */ 1783static void 1784mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1785 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1786{ 1787 vm_page_t m; 1788 vm_pindex_t diff, psize; 1789 1790 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1791 1792 psize = atop(end - start); 1793 m = m_start; 1794 rw_wlock(&pvh_global_lock); 1795 PMAP_LOCK(pmap); 1796 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1797 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1798 prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1799 PMAP_ENTER_NOSLEEP, 0); 1800 m = TAILQ_NEXT(m, listq); 1801 } 1802 rw_wunlock(&pvh_global_lock); 1803 PMAP_UNLOCK(pmap); 1804} 1805 1806static void 1807mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1808 vm_prot_t prot) 1809{ 1810 1811 rw_wlock(&pvh_global_lock); 1812 PMAP_LOCK(pmap); 1813 mmu_booke_enter_locked(mmu, pmap, va, m, 1814 prot & (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP, 1815 0); 1816 rw_wunlock(&pvh_global_lock); 1817 PMAP_UNLOCK(pmap); 1818} 1819 1820/* 1821 * Remove the given range of addresses from the specified map. 1822 * 1823 * It is assumed that the start and end are properly rounded to the page size. 1824 */ 1825static void 1826mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1827{ 1828 pte_t *pte; 1829 uint8_t hold_flag; 1830 1831 int su = (pmap == kernel_pmap); 1832 1833 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1834 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1835 1836 if (su) { 1837 KASSERT(((va >= virtual_avail) && 1838 (va <= VM_MAX_KERNEL_ADDRESS)), 1839 ("mmu_booke_remove: kernel pmap, non kernel va")); 1840 } else { 1841 KASSERT((va <= VM_MAXUSER_ADDRESS), 1842 ("mmu_booke_remove: user pmap, non user va")); 1843 } 1844 1845 if (PMAP_REMOVE_DONE(pmap)) { 1846 //debugf("mmu_booke_remove: e (empty)\n"); 1847 return; 1848 } 1849 1850 hold_flag = PTBL_HOLD_FLAG(pmap); 1851 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1852 1853 rw_wlock(&pvh_global_lock); 1854 PMAP_LOCK(pmap); 1855 for (; va < endva; va += PAGE_SIZE) { 1856 pte = pte_find(mmu, pmap, va); 1857 if ((pte != NULL) && PTE_ISVALID(pte)) 1858 pte_remove(mmu, pmap, va, hold_flag); 1859 } 1860 PMAP_UNLOCK(pmap); 1861 rw_wunlock(&pvh_global_lock); 1862 1863 //debugf("mmu_booke_remove: e\n"); 1864} 1865 1866/* 1867 * Remove physical page from all pmaps in which it resides. 1868 */ 1869static void 1870mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1871{ 1872 pv_entry_t pv, pvn; 1873 uint8_t hold_flag; 1874 1875 rw_wlock(&pvh_global_lock); 1876 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1877 pvn = TAILQ_NEXT(pv, pv_link); 1878 1879 PMAP_LOCK(pv->pv_pmap); 1880 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1881 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1882 PMAP_UNLOCK(pv->pv_pmap); 1883 } 1884 vm_page_aflag_clear(m, PGA_WRITEABLE); 1885 rw_wunlock(&pvh_global_lock); 1886} 1887 1888/* 1889 * Map a range of physical addresses into kernel virtual address space. 1890 */ 1891static vm_offset_t 1892mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1893 vm_paddr_t pa_end, int prot) 1894{ 1895 vm_offset_t sva = *virt; 1896 vm_offset_t va = sva; 1897 1898 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1899 // sva, pa_start, pa_end); 1900 1901 while (pa_start < pa_end) { 1902 mmu_booke_kenter(mmu, va, pa_start); 1903 va += PAGE_SIZE; 1904 pa_start += PAGE_SIZE; 1905 } 1906 *virt = va; 1907 1908 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1909 return (sva); 1910} 1911 1912/* 1913 * The pmap must be activated before it's address space can be accessed in any 1914 * way. 1915 */ 1916static void 1917mmu_booke_activate(mmu_t mmu, struct thread *td) 1918{ 1919 pmap_t pmap; 1920 u_int cpuid; 1921 1922 pmap = &td->td_proc->p_vmspace->vm_pmap; 1923 1924 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1925 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1926 1927 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1928 1929 sched_pin(); 1930 1931 cpuid = PCPU_GET(cpuid); 1932 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1933 PCPU_SET(curpmap, pmap); 1934 1935 if (pmap->pm_tid[cpuid] == TID_NONE) 1936 tid_alloc(pmap); 1937 1938 /* Load PID0 register with pmap tid value. */ 1939 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1940 __asm __volatile("isync"); 1941 1942 sched_unpin(); 1943 1944 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1945 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1946} 1947 1948/* 1949 * Deactivate the specified process's address space. 1950 */ 1951static void 1952mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1953{ 1954 pmap_t pmap; 1955 1956 pmap = &td->td_proc->p_vmspace->vm_pmap; 1957 1958 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1959 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1960 1961 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1962 PCPU_SET(curpmap, NULL); 1963} 1964 1965/* 1966 * Copy the range specified by src_addr/len 1967 * from the source map to the range dst_addr/len 1968 * in the destination map. 1969 * 1970 * This routine is only advisory and need not do anything. 1971 */ 1972static void 1973mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1974 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1975{ 1976 1977} 1978 1979/* 1980 * Set the physical protection on the specified range of this map as requested. 1981 */ 1982static void 1983mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1984 vm_prot_t prot) 1985{ 1986 vm_offset_t va; 1987 vm_page_t m; 1988 pte_t *pte; 1989 1990 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1991 mmu_booke_remove(mmu, pmap, sva, eva); 1992 return; 1993 } 1994 1995 if (prot & VM_PROT_WRITE) 1996 return; 1997 1998 PMAP_LOCK(pmap); 1999 for (va = sva; va < eva; va += PAGE_SIZE) { 2000 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2001 if (PTE_ISVALID(pte)) { 2002 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2003 2004 mtx_lock_spin(&tlbivax_mutex); 2005 tlb_miss_lock(); 2006 2007 /* Handle modified pages. */ 2008 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 2009 vm_page_dirty(m); 2010 2011 tlb0_flush_entry(va); 2012 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2013 2014 tlb_miss_unlock(); 2015 mtx_unlock_spin(&tlbivax_mutex); 2016 } 2017 } 2018 } 2019 PMAP_UNLOCK(pmap); 2020} 2021 2022/* 2023 * Clear the write and modified bits in each of the given page's mappings. 2024 */ 2025static void 2026mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 2027{ 2028 pv_entry_t pv; 2029 pte_t *pte; 2030 2031 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2032 ("mmu_booke_remove_write: page %p is not managed", m)); 2033 2034 /* 2035 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2036 * set by another thread while the object is locked. Thus, 2037 * if PGA_WRITEABLE is clear, no page table entries need updating. 2038 */ 2039 VM_OBJECT_ASSERT_WLOCKED(m->object); 2040 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2041 return; 2042 rw_wlock(&pvh_global_lock); 2043 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2044 PMAP_LOCK(pv->pv_pmap); 2045 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2046 if (PTE_ISVALID(pte)) { 2047 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2048 2049 mtx_lock_spin(&tlbivax_mutex); 2050 tlb_miss_lock(); 2051 2052 /* Handle modified pages. */ 2053 if (PTE_ISMODIFIED(pte)) 2054 vm_page_dirty(m); 2055 2056 /* Flush mapping from TLB0. */ 2057 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2058 2059 tlb_miss_unlock(); 2060 mtx_unlock_spin(&tlbivax_mutex); 2061 } 2062 } 2063 PMAP_UNLOCK(pv->pv_pmap); 2064 } 2065 vm_page_aflag_clear(m, PGA_WRITEABLE); 2066 rw_wunlock(&pvh_global_lock); 2067} 2068 2069static void 2070mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2071{ 2072 pte_t *pte; 2073 pmap_t pmap; 2074 vm_page_t m; 2075 vm_offset_t addr; 2076 vm_paddr_t pa; 2077 int active, valid; 2078 2079 va = trunc_page(va); 2080 sz = round_page(sz); 2081 2082 rw_wlock(&pvh_global_lock); 2083 pmap = PCPU_GET(curpmap); 2084 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2085 while (sz > 0) { 2086 PMAP_LOCK(pm); 2087 pte = pte_find(mmu, pm, va); 2088 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2089 if (valid) 2090 pa = PTE_PA(pte); 2091 PMAP_UNLOCK(pm); 2092 if (valid) { 2093 if (!active) { 2094 /* Create a mapping in the active pmap. */ 2095 addr = 0; 2096 m = PHYS_TO_VM_PAGE(pa); 2097 PMAP_LOCK(pmap); 2098 pte_enter(mmu, pmap, m, addr, 2099 PTE_SR | PTE_VALID | PTE_UR, FALSE); 2100 __syncicache((void *)addr, PAGE_SIZE); 2101 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2102 PMAP_UNLOCK(pmap); 2103 } else 2104 __syncicache((void *)va, PAGE_SIZE); 2105 } 2106 va += PAGE_SIZE; 2107 sz -= PAGE_SIZE; 2108 } 2109 rw_wunlock(&pvh_global_lock); 2110} 2111 2112/* 2113 * Atomically extract and hold the physical page with the given 2114 * pmap and virtual address pair if that mapping permits the given 2115 * protection. 2116 */ 2117static vm_page_t 2118mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2119 vm_prot_t prot) 2120{ 2121 pte_t *pte; 2122 vm_page_t m; 2123 uint32_t pte_wbit; 2124 vm_paddr_t pa; 2125 2126 m = NULL; 2127 pa = 0; 2128 PMAP_LOCK(pmap); 2129retry: 2130 pte = pte_find(mmu, pmap, va); 2131 if ((pte != NULL) && PTE_ISVALID(pte)) { 2132 if (pmap == kernel_pmap) 2133 pte_wbit = PTE_SW; 2134 else 2135 pte_wbit = PTE_UW; 2136 2137 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2138 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2139 goto retry; 2140 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2141 vm_page_hold(m); 2142 } 2143 } 2144 2145 PA_UNLOCK_COND(pa); 2146 PMAP_UNLOCK(pmap); 2147 return (m); 2148} 2149 2150/* 2151 * Initialize a vm_page's machine-dependent fields. 2152 */ 2153static void 2154mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2155{ 2156 2157 TAILQ_INIT(&m->md.pv_list); 2158} 2159 2160/* 2161 * mmu_booke_zero_page_area zeros the specified hardware page by 2162 * mapping it into virtual memory and using bzero to clear 2163 * its contents. 2164 * 2165 * off and size must reside within a single page. 2166 */ 2167static void 2168mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2169{ 2170 vm_offset_t va; 2171 2172 /* XXX KASSERT off and size are within a single page? */ 2173 2174 mtx_lock(&zero_page_mutex); 2175 va = zero_page_va; 2176 2177 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2178 bzero((caddr_t)va + off, size); 2179 mmu_booke_kremove(mmu, va); 2180 2181 mtx_unlock(&zero_page_mutex); 2182} 2183 2184/* 2185 * mmu_booke_zero_page zeros the specified hardware page. 2186 */ 2187static void 2188mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2189{ 2190 2191 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2192} 2193 2194/* 2195 * mmu_booke_copy_page copies the specified (machine independent) page by 2196 * mapping the page into virtual memory and using memcopy to copy the page, 2197 * one machine dependent page at a time. 2198 */ 2199static void 2200mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2201{ 2202 vm_offset_t sva, dva; 2203 2204 sva = copy_page_src_va; 2205 dva = copy_page_dst_va; 2206 2207 mtx_lock(©_page_mutex); 2208 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2209 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2210 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2211 mmu_booke_kremove(mmu, dva); 2212 mmu_booke_kremove(mmu, sva); 2213 mtx_unlock(©_page_mutex); 2214} 2215 2216static inline void 2217mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2218 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2219{ 2220 void *a_cp, *b_cp; 2221 vm_offset_t a_pg_offset, b_pg_offset; 2222 int cnt; 2223 2224 mtx_lock(©_page_mutex); 2225 while (xfersize > 0) { 2226 a_pg_offset = a_offset & PAGE_MASK; 2227 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2228 mmu_booke_kenter(mmu, copy_page_src_va, 2229 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2230 a_cp = (char *)copy_page_src_va + a_pg_offset; 2231 b_pg_offset = b_offset & PAGE_MASK; 2232 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2233 mmu_booke_kenter(mmu, copy_page_dst_va, 2234 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2235 b_cp = (char *)copy_page_dst_va + b_pg_offset; 2236 bcopy(a_cp, b_cp, cnt); 2237 mmu_booke_kremove(mmu, copy_page_dst_va); 2238 mmu_booke_kremove(mmu, copy_page_src_va); 2239 a_offset += cnt; 2240 b_offset += cnt; 2241 xfersize -= cnt; 2242 } 2243 mtx_unlock(©_page_mutex); 2244} 2245 2246/* 2247 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2248 * into virtual memory and using bzero to clear its contents. This is intended 2249 * to be called from the vm_pagezero process only and outside of Giant. No 2250 * lock is required. 2251 */ 2252static void 2253mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2254{ 2255 vm_offset_t va; 2256 2257 va = zero_page_idle_va; 2258 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2259 bzero((caddr_t)va, PAGE_SIZE); 2260 mmu_booke_kremove(mmu, va); 2261} 2262 2263/* 2264 * Return whether or not the specified physical page was modified 2265 * in any of physical maps. 2266 */ 2267static boolean_t 2268mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2269{ 2270 pte_t *pte; 2271 pv_entry_t pv; 2272 boolean_t rv; 2273 2274 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2275 ("mmu_booke_is_modified: page %p is not managed", m)); 2276 rv = FALSE; 2277 2278 /* 2279 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2280 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2281 * is clear, no PTEs can be modified. 2282 */ 2283 VM_OBJECT_ASSERT_WLOCKED(m->object); 2284 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2285 return (rv); 2286 rw_wlock(&pvh_global_lock); 2287 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2288 PMAP_LOCK(pv->pv_pmap); 2289 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2290 PTE_ISVALID(pte)) { 2291 if (PTE_ISMODIFIED(pte)) 2292 rv = TRUE; 2293 } 2294 PMAP_UNLOCK(pv->pv_pmap); 2295 if (rv) 2296 break; 2297 } 2298 rw_wunlock(&pvh_global_lock); 2299 return (rv); 2300} 2301 2302/* 2303 * Return whether or not the specified virtual address is eligible 2304 * for prefault. 2305 */ 2306static boolean_t 2307mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2308{ 2309 2310 return (FALSE); 2311} 2312 2313/* 2314 * Return whether or not the specified physical page was referenced 2315 * in any physical maps. 2316 */ 2317static boolean_t 2318mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2319{ 2320 pte_t *pte; 2321 pv_entry_t pv; 2322 boolean_t rv; 2323 2324 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2325 ("mmu_booke_is_referenced: page %p is not managed", m)); 2326 rv = FALSE; 2327 rw_wlock(&pvh_global_lock); 2328 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2329 PMAP_LOCK(pv->pv_pmap); 2330 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2331 PTE_ISVALID(pte)) { 2332 if (PTE_ISREFERENCED(pte)) 2333 rv = TRUE; 2334 } 2335 PMAP_UNLOCK(pv->pv_pmap); 2336 if (rv) 2337 break; 2338 } 2339 rw_wunlock(&pvh_global_lock); 2340 return (rv); 2341} 2342 2343/* 2344 * Clear the modify bits on the specified physical page. 2345 */ 2346static void 2347mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2348{ 2349 pte_t *pte; 2350 pv_entry_t pv; 2351 2352 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2353 ("mmu_booke_clear_modify: page %p is not managed", m)); 2354 VM_OBJECT_ASSERT_WLOCKED(m->object); 2355 KASSERT(!vm_page_xbusied(m), 2356 ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 2357 2358 /* 2359 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2360 * If the object containing the page is locked and the page is not 2361 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 2362 */ 2363 if ((m->aflags & PGA_WRITEABLE) == 0) 2364 return; 2365 rw_wlock(&pvh_global_lock); 2366 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2367 PMAP_LOCK(pv->pv_pmap); 2368 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2369 PTE_ISVALID(pte)) { 2370 mtx_lock_spin(&tlbivax_mutex); 2371 tlb_miss_lock(); 2372 2373 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2374 tlb0_flush_entry(pv->pv_va); 2375 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2376 PTE_REFERENCED); 2377 } 2378 2379 tlb_miss_unlock(); 2380 mtx_unlock_spin(&tlbivax_mutex); 2381 } 2382 PMAP_UNLOCK(pv->pv_pmap); 2383 } 2384 rw_wunlock(&pvh_global_lock); 2385} 2386 2387/* 2388 * Return a count of reference bits for a page, clearing those bits. 2389 * It is not necessary for every reference bit to be cleared, but it 2390 * is necessary that 0 only be returned when there are truly no 2391 * reference bits set. 2392 * 2393 * XXX: The exact number of bits to check and clear is a matter that 2394 * should be tested and standardized at some point in the future for 2395 * optimal aging of shared pages. 2396 */ 2397static int 2398mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2399{ 2400 pte_t *pte; 2401 pv_entry_t pv; 2402 int count; 2403 2404 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2405 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2406 count = 0; 2407 rw_wlock(&pvh_global_lock); 2408 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2409 PMAP_LOCK(pv->pv_pmap); 2410 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2411 PTE_ISVALID(pte)) { 2412 if (PTE_ISREFERENCED(pte)) { 2413 mtx_lock_spin(&tlbivax_mutex); 2414 tlb_miss_lock(); 2415 2416 tlb0_flush_entry(pv->pv_va); 2417 pte->flags &= ~PTE_REFERENCED; 2418 2419 tlb_miss_unlock(); 2420 mtx_unlock_spin(&tlbivax_mutex); 2421 2422 if (++count > 4) { 2423 PMAP_UNLOCK(pv->pv_pmap); 2424 break; 2425 } 2426 } 2427 } 2428 PMAP_UNLOCK(pv->pv_pmap); 2429 } 2430 rw_wunlock(&pvh_global_lock); 2431 return (count); 2432} 2433 2434/* 2435 * Clear the wired attribute from the mappings for the specified range of 2436 * addresses in the given pmap. Every valid mapping within that range must 2437 * have the wired attribute set. In contrast, invalid mappings cannot have 2438 * the wired attribute set, so they are ignored. 2439 * 2440 * The wired attribute of the page table entry is not a hardware feature, so 2441 * there is no need to invalidate any TLB entries. 2442 */ 2443static void 2444mmu_booke_unwire(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2445{ 2446 vm_offset_t va; 2447 pte_t *pte; 2448 2449 PMAP_LOCK(pmap); 2450 for (va = sva; va < eva; va += PAGE_SIZE) { 2451 if ((pte = pte_find(mmu, pmap, va)) != NULL && 2452 PTE_ISVALID(pte)) { 2453 if (!PTE_ISWIRED(pte)) 2454 panic("mmu_booke_unwire: pte %p isn't wired", 2455 pte); 2456 pte->flags &= ~PTE_WIRED; 2457 pmap->pm_stats.wired_count--; 2458 } 2459 } 2460 PMAP_UNLOCK(pmap); 2461 2462} 2463 2464/* 2465 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2466 * page. This count may be changed upwards or downwards in the future; it is 2467 * only necessary that true be returned for a small subset of pmaps for proper 2468 * page aging. 2469 */ 2470static boolean_t 2471mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2472{ 2473 pv_entry_t pv; 2474 int loops; 2475 boolean_t rv; 2476 2477 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2478 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2479 loops = 0; 2480 rv = FALSE; 2481 rw_wlock(&pvh_global_lock); 2482 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2483 if (pv->pv_pmap == pmap) { 2484 rv = TRUE; 2485 break; 2486 } 2487 if (++loops >= 16) 2488 break; 2489 } 2490 rw_wunlock(&pvh_global_lock); 2491 return (rv); 2492} 2493 2494/* 2495 * Return the number of managed mappings to the given physical page that are 2496 * wired. 2497 */ 2498static int 2499mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2500{ 2501 pv_entry_t pv; 2502 pte_t *pte; 2503 int count = 0; 2504 2505 if ((m->oflags & VPO_UNMANAGED) != 0) 2506 return (count); 2507 rw_wlock(&pvh_global_lock); 2508 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2509 PMAP_LOCK(pv->pv_pmap); 2510 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2511 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2512 count++; 2513 PMAP_UNLOCK(pv->pv_pmap); 2514 } 2515 rw_wunlock(&pvh_global_lock); 2516 return (count); 2517} 2518 2519static int 2520mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2521{ 2522 int i; 2523 vm_offset_t va; 2524 2525 /* 2526 * This currently does not work for entries that 2527 * overlap TLB1 entries. 2528 */ 2529 for (i = 0; i < tlb1_idx; i ++) { 2530 if (tlb1_iomapped(i, pa, size, &va) == 0) 2531 return (0); 2532 } 2533 2534 return (EFAULT); 2535} 2536 2537vm_offset_t 2538mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2539 vm_size_t *sz) 2540{ 2541 vm_paddr_t pa, ppa; 2542 vm_offset_t va; 2543 vm_size_t gran; 2544 2545 /* Raw physical memory dumps don't have a virtual address. */ 2546 if (md->md_vaddr == ~0UL) { 2547 /* We always map a 256MB page at 256M. */ 2548 gran = 256 * 1024 * 1024; 2549 pa = md->md_paddr + ofs; 2550 ppa = pa & ~(gran - 1); 2551 ofs = pa - ppa; 2552 va = gran; 2553 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2554 if (*sz > (gran - ofs)) 2555 *sz = gran - ofs; 2556 return (va + ofs); 2557 } 2558 2559 /* Minidumps are based on virtual memory addresses. */ 2560 va = md->md_vaddr + ofs; 2561 if (va >= kernstart + kernsize) { 2562 gran = PAGE_SIZE - (va & PAGE_MASK); 2563 if (*sz > gran) 2564 *sz = gran; 2565 } 2566 return (va); 2567} 2568 2569void 2570mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2571 vm_offset_t va) 2572{ 2573 2574 /* Raw physical memory dumps don't have a virtual address. */ 2575 if (md->md_vaddr == ~0UL) { 2576 tlb1_idx--; 2577 tlb1[tlb1_idx].mas1 = 0; 2578 tlb1[tlb1_idx].mas2 = 0; 2579 tlb1[tlb1_idx].mas3 = 0; 2580 tlb1_write_entry(tlb1_idx); 2581 return; 2582 } 2583 2584 /* Minidumps are based on virtual memory addresses. */ 2585 /* Nothing to do... */ 2586} 2587 2588struct pmap_md * 2589mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2590{ 2591 static struct pmap_md md; 2592 pte_t *pte; 2593 vm_offset_t va; 2594 2595 if (dumpsys_minidump) { 2596 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2597 if (prev == NULL) { 2598 /* 1st: kernel .data and .bss. */ 2599 md.md_index = 1; 2600 md.md_vaddr = trunc_page((uintptr_t)_etext); 2601 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2602 return (&md); 2603 } 2604 switch (prev->md_index) { 2605 case 1: 2606 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2607 md.md_index = 2; 2608 md.md_vaddr = data_start; 2609 md.md_size = data_end - data_start; 2610 break; 2611 case 2: 2612 /* 3rd: kernel VM. */ 2613 va = prev->md_vaddr + prev->md_size; 2614 /* Find start of next chunk (from va). */ 2615 while (va < virtual_end) { 2616 /* Don't dump the buffer cache. */ 2617 if (va >= kmi.buffer_sva && 2618 va < kmi.buffer_eva) { 2619 va = kmi.buffer_eva; 2620 continue; 2621 } 2622 pte = pte_find(mmu, kernel_pmap, va); 2623 if (pte != NULL && PTE_ISVALID(pte)) 2624 break; 2625 va += PAGE_SIZE; 2626 } 2627 if (va < virtual_end) { 2628 md.md_vaddr = va; 2629 va += PAGE_SIZE; 2630 /* Find last page in chunk. */ 2631 while (va < virtual_end) { 2632 /* Don't run into the buffer cache. */ 2633 if (va == kmi.buffer_sva) 2634 break; 2635 pte = pte_find(mmu, kernel_pmap, va); 2636 if (pte == NULL || !PTE_ISVALID(pte)) 2637 break; 2638 va += PAGE_SIZE; 2639 } 2640 md.md_size = va - md.md_vaddr; 2641 break; 2642 } 2643 md.md_index = 3; 2644 /* FALLTHROUGH */ 2645 default: 2646 return (NULL); 2647 } 2648 } else { /* minidumps */ 2649 mem_regions(&physmem_regions, &physmem_regions_sz, 2650 &availmem_regions, &availmem_regions_sz); 2651 2652 if (prev == NULL) { 2653 /* first physical chunk. */ 2654 md.md_paddr = physmem_regions[0].mr_start; 2655 md.md_size = physmem_regions[0].mr_size; 2656 md.md_vaddr = ~0UL; 2657 md.md_index = 1; 2658 } else if (md.md_index < physmem_regions_sz) { 2659 md.md_paddr = physmem_regions[md.md_index].mr_start; 2660 md.md_size = physmem_regions[md.md_index].mr_size; 2661 md.md_vaddr = ~0UL; 2662 md.md_index++; 2663 } else { 2664 /* There's no next physical chunk. */ 2665 return (NULL); 2666 } 2667 } 2668 2669 return (&md); 2670} 2671 2672/* 2673 * Map a set of physical memory pages into the kernel virtual address space. 2674 * Return a pointer to where it is mapped. This routine is intended to be used 2675 * for mapping device memory, NOT real memory. 2676 */ 2677static void * 2678mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2679{ 2680 2681 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2682} 2683 2684static void * 2685mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2686{ 2687 void *res; 2688 uintptr_t va; 2689 vm_size_t sz; 2690 int i; 2691 2692 /* 2693 * Check if this is premapped in TLB1. Note: this should probably also 2694 * check whether a sequence of TLB1 entries exist that match the 2695 * requirement, but now only checks the easy case. 2696 */ 2697 if (ma == VM_MEMATTR_DEFAULT) { 2698 for (i = 0; i < tlb1_idx; i++) { 2699 if (!(tlb1[i].mas1 & MAS1_VALID)) 2700 continue; 2701 if (pa >= tlb1[i].phys && 2702 (pa + size) <= (tlb1[i].phys + tlb1[i].size)) 2703 return (void *)(tlb1[i].virt + 2704 (pa - tlb1[i].phys)); 2705 } 2706 } 2707 2708 size = roundup(size, PAGE_SIZE); 2709 2710 /* 2711 * We leave a hole for device direct mapping between the maximum user 2712 * address (0x8000000) and the minimum KVA address (0xc0000000). If 2713 * devices are in there, just map them 1:1. If not, map them to the 2714 * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped 2715 * addresses should be pulled from an allocator, but since we do not 2716 * ever free TLB1 entries, it is safe just to increment a counter. 2717 * Note that there isn't a lot of address space here (128 MB) and it 2718 * is not at all difficult to imagine running out, since that is a 4:1 2719 * compression from the 0xc0000000 - 0xf0000000 address space that gets 2720 * mapped there. 2721 */ 2722 if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) && 2723 (pa + size - 1) < VM_MIN_KERNEL_ADDRESS) 2724 va = pa; 2725 else 2726 va = atomic_fetchadd_int(&tlb1_map_base, size); 2727 res = (void *)va; 2728 2729 do { 2730 sz = 1 << (ilog2(size) & ~1); 2731 if (bootverbose) 2732 printf("Wiring VA=%x to PA=%x (size=%x), " 2733 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2734 tlb1_set_entry(va, pa, sz, tlb_calc_wimg(pa, ma)); 2735 size -= sz; 2736 pa += sz; 2737 va += sz; 2738 } while (size > 0); 2739 2740 return (res); 2741} 2742 2743/* 2744 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2745 */ 2746static void 2747mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2748{ 2749#ifdef SUPPORTS_SHRINKING_TLB1 2750 vm_offset_t base, offset; 2751 2752 /* 2753 * Unmap only if this is inside kernel virtual space. 2754 */ 2755 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2756 base = trunc_page(va); 2757 offset = va & PAGE_MASK; 2758 size = roundup(offset + size, PAGE_SIZE); 2759 kva_free(base, size); 2760 } 2761#endif 2762} 2763 2764/* 2765 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2766 * specified pmap. This eliminates the blast of soft faults on process startup 2767 * and immediately after an mmap. 2768 */ 2769static void 2770mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2771 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2772{ 2773 2774 VM_OBJECT_ASSERT_WLOCKED(object); 2775 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2776 ("mmu_booke_object_init_pt: non-device object")); 2777} 2778 2779/* 2780 * Perform the pmap work for mincore. 2781 */ 2782static int 2783mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2784 vm_paddr_t *locked_pa) 2785{ 2786 2787 /* XXX: this should be implemented at some point */ 2788 return (0); 2789} 2790 2791/**************************************************************************/ 2792/* TID handling */ 2793/**************************************************************************/ 2794 2795/* 2796 * Allocate a TID. If necessary, steal one from someone else. 2797 * The new TID is flushed from the TLB before returning. 2798 */ 2799static tlbtid_t 2800tid_alloc(pmap_t pmap) 2801{ 2802 tlbtid_t tid; 2803 int thiscpu; 2804 2805 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2806 2807 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2808 2809 thiscpu = PCPU_GET(cpuid); 2810 2811 tid = PCPU_GET(tid_next); 2812 if (tid > TID_MAX) 2813 tid = TID_MIN; 2814 PCPU_SET(tid_next, tid + 1); 2815 2816 /* If we are stealing TID then clear the relevant pmap's field */ 2817 if (tidbusy[thiscpu][tid] != NULL) { 2818 2819 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2820 2821 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2822 2823 /* Flush all entries from TLB0 matching this TID. */ 2824 tid_flush(tid); 2825 } 2826 2827 tidbusy[thiscpu][tid] = pmap; 2828 pmap->pm_tid[thiscpu] = tid; 2829 __asm __volatile("msync; isync"); 2830 2831 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2832 PCPU_GET(tid_next)); 2833 2834 return (tid); 2835} 2836 2837/**************************************************************************/ 2838/* TLB0 handling */ 2839/**************************************************************************/ 2840 2841static void 2842tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2843 uint32_t mas7) 2844{ 2845 int as; 2846 char desc[3]; 2847 tlbtid_t tid; 2848 vm_size_t size; 2849 unsigned int tsize; 2850 2851 desc[2] = '\0'; 2852 if (mas1 & MAS1_VALID) 2853 desc[0] = 'V'; 2854 else 2855 desc[0] = ' '; 2856 2857 if (mas1 & MAS1_IPROT) 2858 desc[1] = 'P'; 2859 else 2860 desc[1] = ' '; 2861 2862 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2863 tid = MAS1_GETTID(mas1); 2864 2865 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2866 size = 0; 2867 if (tsize) 2868 size = tsize2size(tsize); 2869 2870 debugf("%3d: (%s) [AS=%d] " 2871 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2872 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2873 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2874} 2875 2876/* Convert TLB0 va and way number to tlb0[] table index. */ 2877static inline unsigned int 2878tlb0_tableidx(vm_offset_t va, unsigned int way) 2879{ 2880 unsigned int idx; 2881 2882 idx = (way * TLB0_ENTRIES_PER_WAY); 2883 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2884 return (idx); 2885} 2886 2887/* 2888 * Invalidate TLB0 entry. 2889 */ 2890static inline void 2891tlb0_flush_entry(vm_offset_t va) 2892{ 2893 2894 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2895 2896 mtx_assert(&tlbivax_mutex, MA_OWNED); 2897 2898 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2899 __asm __volatile("isync; msync"); 2900 __asm __volatile("tlbsync; msync"); 2901 2902 CTR1(KTR_PMAP, "%s: e", __func__); 2903} 2904 2905/* Print out contents of the MAS registers for each TLB0 entry */ 2906void 2907tlb0_print_tlbentries(void) 2908{ 2909 uint32_t mas0, mas1, mas2, mas3, mas7; 2910 int entryidx, way, idx; 2911 2912 debugf("TLB0 entries:\n"); 2913 for (way = 0; way < TLB0_WAYS; way ++) 2914 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2915 2916 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2917 mtspr(SPR_MAS0, mas0); 2918 __asm __volatile("isync"); 2919 2920 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2921 mtspr(SPR_MAS2, mas2); 2922 2923 __asm __volatile("isync; tlbre"); 2924 2925 mas1 = mfspr(SPR_MAS1); 2926 mas2 = mfspr(SPR_MAS2); 2927 mas3 = mfspr(SPR_MAS3); 2928 mas7 = mfspr(SPR_MAS7); 2929 2930 idx = tlb0_tableidx(mas2, way); 2931 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2932 } 2933} 2934 2935/**************************************************************************/ 2936/* TLB1 handling */ 2937/**************************************************************************/ 2938 2939/* 2940 * TLB1 mapping notes: 2941 * 2942 * TLB1[0] Kernel text and data. 2943 * TLB1[1-15] Additional kernel text and data mappings (if required), PCI 2944 * windows, other devices mappings. 2945 */ 2946 2947/* 2948 * Write given entry to TLB1 hardware. 2949 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2950 */ 2951static void 2952tlb1_write_entry(unsigned int idx) 2953{ 2954 uint32_t mas0, mas7; 2955 2956 //debugf("tlb1_write_entry: s\n"); 2957 2958 /* Clear high order RPN bits */ 2959 mas7 = 0; 2960 2961 /* Select entry */ 2962 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2963 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2964 2965 mtspr(SPR_MAS0, mas0); 2966 __asm __volatile("isync"); 2967 mtspr(SPR_MAS1, tlb1[idx].mas1); 2968 __asm __volatile("isync"); 2969 mtspr(SPR_MAS2, tlb1[idx].mas2); 2970 __asm __volatile("isync"); 2971 mtspr(SPR_MAS3, tlb1[idx].mas3); 2972 __asm __volatile("isync"); 2973 mtspr(SPR_MAS7, mas7); 2974 __asm __volatile("isync; tlbwe; isync; msync"); 2975 2976 //debugf("tlb1_write_entry: e\n"); 2977} 2978 2979/* 2980 * Return the largest uint value log such that 2^log <= num. 2981 */ 2982static unsigned int 2983ilog2(unsigned int num) 2984{ 2985 int lz; 2986 2987 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2988 return (31 - lz); 2989} 2990 2991/* 2992 * Convert TLB TSIZE value to mapped region size. 2993 */ 2994static vm_size_t 2995tsize2size(unsigned int tsize) 2996{ 2997 2998 /* 2999 * size = 4^tsize KB 3000 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 3001 */ 3002 3003 return ((1 << (2 * tsize)) * 1024); 3004} 3005 3006/* 3007 * Convert region size (must be power of 4) to TLB TSIZE value. 3008 */ 3009static unsigned int 3010size2tsize(vm_size_t size) 3011{ 3012 3013 return (ilog2(size) / 2 - 5); 3014} 3015 3016/* 3017 * Register permanent kernel mapping in TLB1. 3018 * 3019 * Entries are created starting from index 0 (current free entry is 3020 * kept in tlb1_idx) and are not supposed to be invalidated. 3021 */ 3022static int 3023tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 3024 uint32_t flags) 3025{ 3026 uint32_t ts, tid; 3027 int tsize, index; 3028 3029 index = atomic_fetchadd_int(&tlb1_idx, 1); 3030 if (index >= TLB1_ENTRIES) { 3031 printf("tlb1_set_entry: TLB1 full!\n"); 3032 return (-1); 3033 } 3034 3035 /* Convert size to TSIZE */ 3036 tsize = size2tsize(size); 3037 3038 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 3039 /* XXX TS is hard coded to 0 for now as we only use single address space */ 3040 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 3041 3042 /* 3043 * Atomicity is preserved by the atomic increment above since nothing 3044 * is ever removed from tlb1. 3045 */ 3046 3047 tlb1[index].phys = pa; 3048 tlb1[index].virt = va; 3049 tlb1[index].size = size; 3050 tlb1[index].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 3051 tlb1[index].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 3052 tlb1[index].mas2 = (va & MAS2_EPN_MASK) | flags; 3053 3054 /* Set supervisor RWX permission bits */ 3055 tlb1[index].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 3056 3057 tlb1_write_entry(index); 3058 3059 /* 3060 * XXX in general TLB1 updates should be propagated between CPUs, 3061 * since current design assumes to have the same TLB1 set-up on all 3062 * cores. 3063 */ 3064 return (0); 3065} 3066 3067/* 3068 * Map in contiguous RAM region into the TLB1 using maximum of 3069 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 3070 * 3071 * If necessary round up last entry size and return total size 3072 * used by all allocated entries. 3073 */ 3074vm_size_t 3075tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 3076{ 3077 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 3078 vm_size_t mapped, pgsz, base, mask; 3079 int idx, nents; 3080 3081 /* Round up to the next 1M */ 3082 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 3083 3084 mapped = 0; 3085 idx = 0; 3086 base = va; 3087 pgsz = 64*1024*1024; 3088 while (mapped < size) { 3089 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 3090 while (pgsz > (size - mapped)) 3091 pgsz >>= 2; 3092 pgs[idx++] = pgsz; 3093 mapped += pgsz; 3094 } 3095 3096 /* We under-map. Correct for this. */ 3097 if (mapped < size) { 3098 while (pgs[idx - 1] == pgsz) { 3099 idx--; 3100 mapped -= pgsz; 3101 } 3102 /* XXX We may increase beyond out starting point. */ 3103 pgsz <<= 2; 3104 pgs[idx++] = pgsz; 3105 mapped += pgsz; 3106 } 3107 } 3108 3109 nents = idx; 3110 mask = pgs[0] - 1; 3111 /* Align address to the boundary */ 3112 if (va & mask) { 3113 va = (va + mask) & ~mask; 3114 pa = (pa + mask) & ~mask; 3115 } 3116 3117 for (idx = 0; idx < nents; idx++) { 3118 pgsz = pgs[idx]; 3119 debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 3120 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 3121 pa += pgsz; 3122 va += pgsz; 3123 } 3124 3125 mapped = (va - base); 3126 printf("mapped size 0x%08x (wasted space 0x%08x)\n", 3127 mapped, mapped - size); 3128 return (mapped); 3129} 3130 3131/* 3132 * TLB1 initialization routine, to be called after the very first 3133 * assembler level setup done in locore.S. 3134 */ 3135void 3136tlb1_init() 3137{ 3138 uint32_t mas0, mas1, mas2, mas3; 3139 uint32_t tsz; 3140 u_int i; 3141 3142 if (bootinfo != NULL && bootinfo[0] != 1) { 3143 tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3144 } else 3145 tlb1_idx = 1; 3146 3147 /* The first entry/entries are used to map the kernel. */ 3148 for (i = 0; i < tlb1_idx; i++) { 3149 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3150 mtspr(SPR_MAS0, mas0); 3151 __asm __volatile("isync; tlbre"); 3152 3153 mas1 = mfspr(SPR_MAS1); 3154 if ((mas1 & MAS1_VALID) == 0) 3155 continue; 3156 3157 mas2 = mfspr(SPR_MAS2); 3158 mas3 = mfspr(SPR_MAS3); 3159 3160 tlb1[i].mas1 = mas1; 3161 tlb1[i].mas2 = mfspr(SPR_MAS2); 3162 tlb1[i].mas3 = mas3; 3163 tlb1[i].virt = mas2 & MAS2_EPN_MASK; 3164 tlb1[i].phys = mas3 & MAS3_RPN; 3165 3166 if (i == 0) 3167 kernload = mas3 & MAS3_RPN; 3168 3169 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3170 tlb1[i].size = (tsz > 0) ? tsize2size(tsz) : 0; 3171 kernsize += tlb1[i].size; 3172 } 3173 3174#ifdef SMP 3175 bp_ntlb1s = tlb1_idx; 3176#endif 3177 3178 /* Purge the remaining entries */ 3179 for (i = tlb1_idx; i < TLB1_ENTRIES; i++) 3180 tlb1_write_entry(i); 3181 3182 /* Setup TLB miss defaults */ 3183 set_mas4_defaults(); 3184} 3185 3186vm_offset_t 3187pmap_early_io_map(vm_paddr_t pa, vm_size_t size) 3188{ 3189 vm_paddr_t pa_base; 3190 vm_offset_t va, sz; 3191 int i; 3192 3193 KASSERT(!pmap_bootstrapped, ("Do not use after PMAP is up!")); 3194 3195 for (i = 0; i < tlb1_idx; i++) { 3196 if (!(tlb1[i].mas1 & MAS1_VALID)) 3197 continue; 3198 if (pa >= tlb1[i].phys && (pa + size) <= 3199 (tlb1[i].phys + tlb1[i].size)) 3200 return (tlb1[i].virt + (pa - tlb1[i].phys)); 3201 } 3202 3203 pa_base = trunc_page(pa); 3204 size = roundup(size + (pa - pa_base), PAGE_SIZE); 3205 tlb1_map_base = roundup2(tlb1_map_base, 1 << (ilog2(size) & ~1)); 3206 va = tlb1_map_base + (pa - pa_base); 3207 3208 do { 3209 sz = 1 << (ilog2(size) & ~1); 3210 tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO); 3211 size -= sz; 3212 pa_base += sz; 3213 tlb1_map_base += sz; 3214 } while (size > 0); 3215 3216#ifdef SMP 3217 bp_ntlb1s = tlb1_idx; 3218#endif 3219 3220 return (va); 3221} 3222 3223/* 3224 * Setup MAS4 defaults. 3225 * These values are loaded to MAS0-2 on a TLB miss. 3226 */ 3227static void 3228set_mas4_defaults(void) 3229{ 3230 uint32_t mas4; 3231 3232 /* Defaults: TLB0, PID0, TSIZED=4K */ 3233 mas4 = MAS4_TLBSELD0; 3234 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3235#ifdef SMP 3236 mas4 |= MAS4_MD; 3237#endif 3238 mtspr(SPR_MAS4, mas4); 3239 __asm __volatile("isync"); 3240} 3241 3242/* 3243 * Print out contents of the MAS registers for each TLB1 entry 3244 */ 3245void 3246tlb1_print_tlbentries(void) 3247{ 3248 uint32_t mas0, mas1, mas2, mas3, mas7; 3249 int i; 3250 3251 debugf("TLB1 entries:\n"); 3252 for (i = 0; i < TLB1_ENTRIES; i++) { 3253 3254 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3255 mtspr(SPR_MAS0, mas0); 3256 3257 __asm __volatile("isync; tlbre"); 3258 3259 mas1 = mfspr(SPR_MAS1); 3260 mas2 = mfspr(SPR_MAS2); 3261 mas3 = mfspr(SPR_MAS3); 3262 mas7 = mfspr(SPR_MAS7); 3263 3264 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3265 } 3266} 3267 3268/* 3269 * Print out contents of the in-ram tlb1 table. 3270 */ 3271void 3272tlb1_print_entries(void) 3273{ 3274 int i; 3275 3276 debugf("tlb1[] table entries:\n"); 3277 for (i = 0; i < TLB1_ENTRIES; i++) 3278 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3279} 3280 3281/* 3282 * Return 0 if the physical IO range is encompassed by one of the 3283 * the TLB1 entries, otherwise return related error code. 3284 */ 3285static int 3286tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3287{ 3288 uint32_t prot; 3289 vm_paddr_t pa_start; 3290 vm_paddr_t pa_end; 3291 unsigned int entry_tsize; 3292 vm_size_t entry_size; 3293 3294 *va = (vm_offset_t)NULL; 3295 3296 /* Skip invalid entries */ 3297 if (!(tlb1[i].mas1 & MAS1_VALID)) 3298 return (EINVAL); 3299 3300 /* 3301 * The entry must be cache-inhibited, guarded, and r/w 3302 * so it can function as an i/o page 3303 */ 3304 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3305 if (prot != (MAS2_I | MAS2_G)) 3306 return (EPERM); 3307 3308 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3309 if (prot != (MAS3_SR | MAS3_SW)) 3310 return (EPERM); 3311 3312 /* The address should be within the entry range. */ 3313 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3314 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3315 3316 entry_size = tsize2size(entry_tsize); 3317 pa_start = tlb1[i].mas3 & MAS3_RPN; 3318 pa_end = pa_start + entry_size - 1; 3319 3320 if ((pa < pa_start) || ((pa + size) > pa_end)) 3321 return (ERANGE); 3322 3323 /* Return virtual address of this mapping. */ 3324 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3325 return (0); 3326} 3327