1176771Sraj/*- 2192532Sraj * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3176771Sraj * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4176771Sraj * All rights reserved. 5176771Sraj * 6176771Sraj * Redistribution and use in source and binary forms, with or without 7176771Sraj * modification, are permitted provided that the following conditions 8176771Sraj * are met: 9176771Sraj * 1. Redistributions of source code must retain the above copyright 10176771Sraj * notice, this list of conditions and the following disclaimer. 11176771Sraj * 2. Redistributions in binary form must reproduce the above copyright 12176771Sraj * notice, this list of conditions and the following disclaimer in the 13176771Sraj * documentation and/or other materials provided with the distribution. 14176771Sraj * 15176771Sraj * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16176771Sraj * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17176771Sraj * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18176771Sraj * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19176771Sraj * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20176771Sraj * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21176771Sraj * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22176771Sraj * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23176771Sraj * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24176771Sraj * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25176771Sraj * 26176771Sraj * Some hw specific parts of this pmap were derived or influenced 27176771Sraj * by NetBSD's ibm4xx pmap module. More generic code is shared with 28176771Sraj * a few other pmap modules from the FreeBSD tree. 29176771Sraj */ 30176771Sraj 31176771Sraj /* 32176771Sraj * VM layout notes: 33176771Sraj * 34176771Sraj * Kernel and user threads run within one common virtual address space 35176771Sraj * defined by AS=0. 36176771Sraj * 37176771Sraj * Virtual address space layout: 38176771Sraj * ----------------------------- 39187151Sraj * 0x0000_0000 - 0xafff_ffff : user process 40187151Sraj * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41187151Sraj * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42190701Smarcel * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43187151Sraj * 0xc100_0000 - 0xfeef_ffff : KVA 44187151Sraj * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45187151Sraj * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46187151Sraj * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47187151Sraj * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48187151Sraj * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49176771Sraj */ 50176771Sraj 51176771Sraj#include <sys/cdefs.h> 52176771Sraj__FBSDID("$FreeBSD$"); 53176771Sraj 54176771Sraj#include <sys/param.h> 55176771Sraj#include <sys/malloc.h> 56187149Sraj#include <sys/ktr.h> 57176771Sraj#include <sys/proc.h> 58176771Sraj#include <sys/user.h> 59176771Sraj#include <sys/queue.h> 60176771Sraj#include <sys/systm.h> 61176771Sraj#include <sys/kernel.h> 62224611Smarcel#include <sys/linker.h> 63176771Sraj#include <sys/msgbuf.h> 64176771Sraj#include <sys/lock.h> 65176771Sraj#include <sys/mutex.h> 66242535Salc#include <sys/rwlock.h> 67222813Sattilio#include <sys/sched.h> 68192532Sraj#include <sys/smp.h> 69176771Sraj#include <sys/vmmeter.h> 70176771Sraj 71176771Sraj#include <vm/vm.h> 72176771Sraj#include <vm/vm_page.h> 73176771Sraj#include <vm/vm_kern.h> 74176771Sraj#include <vm/vm_pageout.h> 75176771Sraj#include <vm/vm_extern.h> 76176771Sraj#include <vm/vm_object.h> 77176771Sraj#include <vm/vm_param.h> 78176771Sraj#include <vm/vm_map.h> 79176771Sraj#include <vm/vm_pager.h> 80176771Sraj#include <vm/uma.h> 81176771Sraj 82176771Sraj#include <machine/cpu.h> 83176771Sraj#include <machine/pcb.h> 84192067Snwhitehorn#include <machine/platform.h> 85176771Sraj 86176771Sraj#include <machine/tlb.h> 87176771Sraj#include <machine/spr.h> 88176771Sraj#include <machine/md_var.h> 89176771Sraj#include <machine/mmuvar.h> 90176771Sraj#include <machine/pmap.h> 91176771Sraj#include <machine/pte.h> 92176771Sraj 93176771Sraj#include "mmu_if.h" 94176771Sraj 95176771Sraj#ifdef DEBUG 96176771Sraj#define debugf(fmt, args...) printf(fmt, ##args) 97176771Sraj#else 98176771Sraj#define debugf(fmt, args...) 99176771Sraj#endif 100176771Sraj 101176771Sraj#define TODO panic("%s: not implemented", __func__); 102176771Sraj 103176771Srajextern struct mtx sched_lock; 104176771Sraj 105190701Smarcelextern int dumpsys_minidump; 106190701Smarcel 107190701Smarcelextern unsigned char _etext[]; 108190701Smarcelextern unsigned char _end[]; 109190701Smarcel 110224611Smarcelextern uint32_t *bootinfo; 111224611Smarcel 112224611Smarcel#ifdef SMP 113242526Smarcelextern uint32_t bp_ntlb1s; 114224611Smarcel#endif 115224611Smarcel 116242526Smarcelvm_paddr_t ccsrbar_pa; 117224611Smarcelvm_paddr_t kernload; 118190701Smarcelvm_offset_t kernstart; 119190701Smarcelvm_size_t kernsize; 120176771Sraj 121190701Smarcel/* Message buffer and tables. */ 122190701Smarcelstatic vm_offset_t data_start; 123190701Smarcelstatic vm_size_t data_end; 124190701Smarcel 125192067Snwhitehorn/* Phys/avail memory regions. */ 126192067Snwhitehornstatic struct mem_region *availmem_regions; 127192067Snwhitehornstatic int availmem_regions_sz; 128192067Snwhitehornstatic struct mem_region *physmem_regions; 129192067Snwhitehornstatic int physmem_regions_sz; 130176771Sraj 131176771Sraj/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 132176771Srajstatic vm_offset_t zero_page_va; 133176771Srajstatic struct mtx zero_page_mutex; 134176771Sraj 135187149Srajstatic struct mtx tlbivax_mutex; 136187149Sraj 137176771Sraj/* 138176771Sraj * Reserved KVA space for mmu_booke_zero_page_idle. This is used 139176771Sraj * by idle thred only, no lock required. 140176771Sraj */ 141176771Srajstatic vm_offset_t zero_page_idle_va; 142176771Sraj 143176771Sraj/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 144176771Srajstatic vm_offset_t copy_page_src_va; 145176771Srajstatic vm_offset_t copy_page_dst_va; 146176771Srajstatic struct mtx copy_page_mutex; 147176771Sraj 148176771Sraj/**************************************************************************/ 149176771Sraj/* PMAP */ 150176771Sraj/**************************************************************************/ 151176771Sraj 152176771Srajstatic void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 153176771Sraj vm_prot_t, boolean_t); 154176771Sraj 155176771Srajunsigned int kptbl_min; /* Index of the first kernel ptbl. */ 156176771Srajunsigned int kernel_ptbls; /* Number of KVA ptbls. */ 157176771Sraj 158176771Sraj/* 159176771Sraj * If user pmap is processed with mmu_booke_remove and the resident count 160176771Sraj * drops to 0, there are no more pages to remove, so we need not continue. 161176771Sraj */ 162176771Sraj#define PMAP_REMOVE_DONE(pmap) \ 163176771Sraj ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 164176771Sraj 165187149Srajextern void tid_flush(tlbtid_t); 166176771Sraj 167176771Sraj/**************************************************************************/ 168176771Sraj/* TLB and TID handling */ 169176771Sraj/**************************************************************************/ 170176771Sraj 171176771Sraj/* Translation ID busy table */ 172187149Srajstatic volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 173176771Sraj 174176771Sraj/* 175187149Sraj * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 176187149Sraj * core revisions and should be read from h/w registers during early config. 177176771Sraj */ 178187149Srajuint32_t tlb0_entries; 179187149Srajuint32_t tlb0_ways; 180187149Srajuint32_t tlb0_entries_per_way; 181176771Sraj 182187149Sraj#define TLB0_ENTRIES (tlb0_entries) 183187149Sraj#define TLB0_WAYS (tlb0_ways) 184187149Sraj#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 185176771Sraj 186187149Sraj#define TLB1_ENTRIES 16 187176771Sraj 188176771Sraj/* In-ram copy of the TLB1 */ 189187149Srajstatic tlb_entry_t tlb1[TLB1_ENTRIES]; 190176771Sraj 191176771Sraj/* Next free entry in the TLB1 */ 192176771Srajstatic unsigned int tlb1_idx; 193176771Sraj 194176771Srajstatic tlbtid_t tid_alloc(struct pmap *); 195176771Sraj 196187149Srajstatic void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 197176771Sraj 198187149Srajstatic int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 199176771Srajstatic void tlb1_write_entry(unsigned int); 200176771Srajstatic int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 201224611Smarcelstatic vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 202176771Sraj 203176771Srajstatic vm_size_t tsize2size(unsigned int); 204176771Srajstatic unsigned int size2tsize(vm_size_t); 205176771Srajstatic unsigned int ilog2(unsigned int); 206176771Sraj 207176771Srajstatic void set_mas4_defaults(void); 208176771Sraj 209187149Srajstatic inline void tlb0_flush_entry(vm_offset_t); 210176771Srajstatic inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 211176771Sraj 212176771Sraj/**************************************************************************/ 213176771Sraj/* Page table management */ 214176771Sraj/**************************************************************************/ 215176771Sraj 216242535Salcstatic struct rwlock_padalign pvh_global_lock; 217242535Salc 218176771Sraj/* Data for the pv entry allocation mechanism */ 219176771Srajstatic uma_zone_t pvzone; 220176771Srajstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 221176771Sraj 222176771Sraj#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 223176771Sraj 224176771Sraj#ifndef PMAP_SHPGPERPROC 225176771Sraj#define PMAP_SHPGPERPROC 200 226176771Sraj#endif 227176771Sraj 228176771Srajstatic void ptbl_init(void); 229176771Srajstatic struct ptbl_buf *ptbl_buf_alloc(void); 230176771Srajstatic void ptbl_buf_free(struct ptbl_buf *); 231176771Srajstatic void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 232176771Sraj 233187149Srajstatic pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 234176771Srajstatic void ptbl_free(mmu_t, pmap_t, unsigned int); 235176771Srajstatic void ptbl_hold(mmu_t, pmap_t, unsigned int); 236176771Srajstatic int ptbl_unhold(mmu_t, pmap_t, unsigned int); 237176771Sraj 238176771Srajstatic vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 239176771Srajstatic pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 240187149Srajstatic void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 241187149Srajstatic int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 242176771Sraj 243187149Srajstatic pv_entry_t pv_alloc(void); 244176771Srajstatic void pv_free(pv_entry_t); 245176771Srajstatic void pv_insert(pmap_t, vm_offset_t, vm_page_t); 246176771Srajstatic void pv_remove(pmap_t, vm_offset_t, vm_page_t); 247176771Sraj 248176771Sraj/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 249176771Sraj#define PTBL_BUFS (128 * 16) 250176771Sraj 251176771Srajstruct ptbl_buf { 252176771Sraj TAILQ_ENTRY(ptbl_buf) link; /* list link */ 253176771Sraj vm_offset_t kva; /* va of mapping */ 254176771Sraj}; 255176771Sraj 256176771Sraj/* ptbl free list and a lock used for access synchronization. */ 257176771Srajstatic TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 258176771Srajstatic struct mtx ptbl_buf_freelist_lock; 259176771Sraj 260176771Sraj/* Base address of kva space allocated fot ptbl bufs. */ 261176771Srajstatic vm_offset_t ptbl_buf_pool_vabase; 262176771Sraj 263176771Sraj/* Pointer to ptbl_buf structures. */ 264176771Srajstatic struct ptbl_buf *ptbl_bufs; 265176771Sraj 266192532Srajvoid pmap_bootstrap_ap(volatile uint32_t *); 267192532Sraj 268176771Sraj/* 269176771Sraj * Kernel MMU interface 270176771Sraj */ 271176771Srajstatic void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 272176771Srajstatic void mmu_booke_clear_modify(mmu_t, vm_page_t); 273194101Srajstatic void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 274194101Sraj vm_size_t, vm_offset_t); 275176771Srajstatic void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 276248280Skibstatic void mmu_booke_copy_pages(mmu_t, vm_page_t *, 277248280Skib vm_offset_t, vm_page_t *, vm_offset_t, int); 278176771Srajstatic void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 279176771Sraj vm_prot_t, boolean_t); 280176771Srajstatic void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 281176771Sraj vm_page_t, vm_prot_t); 282176771Srajstatic void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 283176771Sraj vm_prot_t); 284176771Srajstatic vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 285176771Srajstatic vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 286176771Sraj vm_prot_t); 287176771Srajstatic void mmu_booke_init(mmu_t); 288176771Srajstatic boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 289176771Srajstatic boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 290207155Salcstatic boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 291238357Salcstatic int mmu_booke_ts_referenced(mmu_t, vm_page_t); 292235936Srajstatic vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 293176771Sraj int); 294208504Salcstatic int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 295208504Salc vm_paddr_t *); 296176771Srajstatic void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 297176771Sraj vm_object_t, vm_pindex_t, vm_size_t); 298176771Srajstatic boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 299176771Srajstatic void mmu_booke_page_init(mmu_t, vm_page_t); 300176771Srajstatic int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 301176771Srajstatic void mmu_booke_pinit(mmu_t, pmap_t); 302176771Srajstatic void mmu_booke_pinit0(mmu_t, pmap_t); 303176771Srajstatic void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 304176771Sraj vm_prot_t); 305176771Srajstatic void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 306176771Srajstatic void mmu_booke_qremove(mmu_t, vm_offset_t, int); 307176771Srajstatic void mmu_booke_release(mmu_t, pmap_t); 308176771Srajstatic void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 309176771Srajstatic void mmu_booke_remove_all(mmu_t, vm_page_t); 310176771Srajstatic void mmu_booke_remove_write(mmu_t, vm_page_t); 311176771Srajstatic void mmu_booke_zero_page(mmu_t, vm_page_t); 312176771Srajstatic void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 313176771Srajstatic void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 314176771Srajstatic void mmu_booke_activate(mmu_t, struct thread *); 315176771Srajstatic void mmu_booke_deactivate(mmu_t, struct thread *); 316176771Srajstatic void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 317235936Srajstatic void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 318176771Srajstatic void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 319235936Srajstatic vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 320235936Srajstatic void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 321176771Srajstatic void mmu_booke_kremove(mmu_t, vm_offset_t); 322235936Srajstatic boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 323198341Smarcelstatic void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 324198341Smarcel vm_size_t); 325190701Smarcelstatic vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 326190701Smarcel vm_size_t, vm_size_t *); 327190701Smarcelstatic void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 328190701Smarcel vm_size_t, vm_offset_t); 329190701Smarcelstatic struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 330176771Sraj 331176771Srajstatic mmu_method_t mmu_booke_methods[] = { 332176771Sraj /* pmap dispatcher interface */ 333176771Sraj MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 334176771Sraj MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 335176771Sraj MMUMETHOD(mmu_copy, mmu_booke_copy), 336176771Sraj MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 337248280Skib MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 338176771Sraj MMUMETHOD(mmu_enter, mmu_booke_enter), 339176771Sraj MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 340176771Sraj MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 341176771Sraj MMUMETHOD(mmu_extract, mmu_booke_extract), 342176771Sraj MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 343176771Sraj MMUMETHOD(mmu_init, mmu_booke_init), 344176771Sraj MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 345176771Sraj MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 346207155Salc MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 347176771Sraj MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 348176771Sraj MMUMETHOD(mmu_map, mmu_booke_map), 349176771Sraj MMUMETHOD(mmu_mincore, mmu_booke_mincore), 350176771Sraj MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 351176771Sraj MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 352176771Sraj MMUMETHOD(mmu_page_init, mmu_booke_page_init), 353176771Sraj MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 354176771Sraj MMUMETHOD(mmu_pinit, mmu_booke_pinit), 355176771Sraj MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 356176771Sraj MMUMETHOD(mmu_protect, mmu_booke_protect), 357176771Sraj MMUMETHOD(mmu_qenter, mmu_booke_qenter), 358176771Sraj MMUMETHOD(mmu_qremove, mmu_booke_qremove), 359176771Sraj MMUMETHOD(mmu_release, mmu_booke_release), 360176771Sraj MMUMETHOD(mmu_remove, mmu_booke_remove), 361176771Sraj MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 362176771Sraj MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 363198341Smarcel MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 364176771Sraj MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 365176771Sraj MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 366176771Sraj MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 367176771Sraj MMUMETHOD(mmu_activate, mmu_booke_activate), 368176771Sraj MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 369176771Sraj 370176771Sraj /* Internal interfaces */ 371176771Sraj MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 372176771Sraj MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 373176771Sraj MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 374176771Sraj MMUMETHOD(mmu_kenter, mmu_booke_kenter), 375176771Sraj MMUMETHOD(mmu_kextract, mmu_booke_kextract), 376176771Sraj/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 377176771Sraj MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 378176771Sraj 379190701Smarcel /* dumpsys() support */ 380190701Smarcel MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 381190701Smarcel MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 382190701Smarcel MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 383190701Smarcel 384176771Sraj { 0, 0 } 385176771Sraj}; 386176771Sraj 387212627SgrehanMMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 388176771Sraj 389192532Srajstatic inline void 390192532Srajtlb_miss_lock(void) 391192532Sraj{ 392192532Sraj#ifdef SMP 393192532Sraj struct pcpu *pc; 394192532Sraj 395192532Sraj if (!smp_started) 396192532Sraj return; 397192532Sraj 398222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 399192532Sraj if (pc != pcpup) { 400192532Sraj 401192532Sraj CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 402192532Sraj "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 403192532Sraj 404192532Sraj KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 405192532Sraj ("tlb_miss_lock: tried to lock self")); 406192532Sraj 407192532Sraj tlb_lock(pc->pc_booke_tlb_lock); 408192532Sraj 409192532Sraj CTR1(KTR_PMAP, "%s: locked", __func__); 410192532Sraj } 411192532Sraj } 412192532Sraj#endif 413192532Sraj} 414192532Sraj 415192532Srajstatic inline void 416192532Srajtlb_miss_unlock(void) 417192532Sraj{ 418192532Sraj#ifdef SMP 419192532Sraj struct pcpu *pc; 420192532Sraj 421192532Sraj if (!smp_started) 422192532Sraj return; 423192532Sraj 424222531Snwhitehorn STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 425192532Sraj if (pc != pcpup) { 426192532Sraj CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 427192532Sraj __func__, pc->pc_cpuid); 428192532Sraj 429192532Sraj tlb_unlock(pc->pc_booke_tlb_lock); 430192532Sraj 431192532Sraj CTR1(KTR_PMAP, "%s: unlocked", __func__); 432192532Sraj } 433192532Sraj } 434192532Sraj#endif 435192532Sraj} 436192532Sraj 437176771Sraj/* Return number of entries in TLB0. */ 438176771Srajstatic __inline void 439176771Srajtlb0_get_tlbconf(void) 440176771Sraj{ 441176771Sraj uint32_t tlb0_cfg; 442176771Sraj 443176771Sraj tlb0_cfg = mfspr(SPR_TLB0CFG); 444187149Sraj tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 445187149Sraj tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 446187149Sraj tlb0_entries_per_way = tlb0_entries / tlb0_ways; 447176771Sraj} 448176771Sraj 449176771Sraj/* Initialize pool of kva ptbl buffers. */ 450176771Srajstatic void 451176771Srajptbl_init(void) 452176771Sraj{ 453176771Sraj int i; 454176771Sraj 455187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 456187151Sraj (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 457187151Sraj CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 458187151Sraj __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 459176771Sraj 460176771Sraj mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 461176771Sraj TAILQ_INIT(&ptbl_buf_freelist); 462176771Sraj 463176771Sraj for (i = 0; i < PTBL_BUFS; i++) { 464176771Sraj ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 465176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 466176771Sraj } 467176771Sraj} 468176771Sraj 469182362Sraj/* Get a ptbl_buf from the freelist. */ 470176771Srajstatic struct ptbl_buf * 471176771Srajptbl_buf_alloc(void) 472176771Sraj{ 473176771Sraj struct ptbl_buf *buf; 474176771Sraj 475176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 476176771Sraj buf = TAILQ_FIRST(&ptbl_buf_freelist); 477176771Sraj if (buf != NULL) 478176771Sraj TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 479176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 480176771Sraj 481187151Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 482187151Sraj 483176771Sraj return (buf); 484176771Sraj} 485176771Sraj 486176771Sraj/* Return ptbl buff to free pool. */ 487176771Srajstatic void 488176771Srajptbl_buf_free(struct ptbl_buf *buf) 489176771Sraj{ 490176771Sraj 491187149Sraj CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 492176771Sraj 493176771Sraj mtx_lock(&ptbl_buf_freelist_lock); 494176771Sraj TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 495176771Sraj mtx_unlock(&ptbl_buf_freelist_lock); 496176771Sraj} 497176771Sraj 498176771Sraj/* 499187149Sraj * Search the list of allocated ptbl bufs and find on list of allocated ptbls 500176771Sraj */ 501176771Srajstatic void 502176771Srajptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 503176771Sraj{ 504176771Sraj struct ptbl_buf *pbuf; 505176771Sraj 506187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 507176771Sraj 508187149Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 509187149Sraj 510187149Sraj TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 511176771Sraj if (pbuf->kva == (vm_offset_t)ptbl) { 512176771Sraj /* Remove from pmap ptbl buf list. */ 513187149Sraj TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 514176771Sraj 515187149Sraj /* Free corresponding ptbl buf. */ 516176771Sraj ptbl_buf_free(pbuf); 517176771Sraj break; 518176771Sraj } 519176771Sraj} 520176771Sraj 521176771Sraj/* Allocate page table. */ 522187149Srajstatic pte_t * 523176771Srajptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 524176771Sraj{ 525176771Sraj vm_page_t mtbl[PTBL_PAGES]; 526176771Sraj vm_page_t m; 527176771Sraj struct ptbl_buf *pbuf; 528176771Sraj unsigned int pidx; 529187149Sraj pte_t *ptbl; 530176771Sraj int i; 531176771Sraj 532187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 533187149Sraj (pmap == kernel_pmap), pdir_idx); 534176771Sraj 535176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 536176771Sraj ("ptbl_alloc: invalid pdir_idx")); 537176771Sraj KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 538176771Sraj ("pte_alloc: valid ptbl entry exists!")); 539176771Sraj 540176771Sraj pbuf = ptbl_buf_alloc(); 541176771Sraj if (pbuf == NULL) 542176771Sraj panic("pte_alloc: couldn't alloc kernel virtual memory"); 543187149Sraj 544187149Sraj ptbl = (pte_t *)pbuf->kva; 545176771Sraj 546187149Sraj CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 547187149Sraj 548176771Sraj /* Allocate ptbl pages, this will sleep! */ 549176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 550176771Sraj pidx = (PTBL_PAGES * pdir_idx) + i; 551187149Sraj while ((m = vm_page_alloc(NULL, pidx, 552187149Sraj VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 553187149Sraj 554176771Sraj PMAP_UNLOCK(pmap); 555242535Salc rw_wunlock(&pvh_global_lock); 556176771Sraj VM_WAIT; 557242535Salc rw_wlock(&pvh_global_lock); 558176771Sraj PMAP_LOCK(pmap); 559176771Sraj } 560176771Sraj mtbl[i] = m; 561176771Sraj } 562176771Sraj 563187149Sraj /* Map allocated pages into kernel_pmap. */ 564187149Sraj mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 565176771Sraj 566176771Sraj /* Zero whole ptbl. */ 567187149Sraj bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 568176771Sraj 569176771Sraj /* Add pbuf to the pmap ptbl bufs list. */ 570187149Sraj TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 571176771Sraj 572187149Sraj return (ptbl); 573176771Sraj} 574176771Sraj 575176771Sraj/* Free ptbl pages and invalidate pdir entry. */ 576176771Srajstatic void 577176771Srajptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 578176771Sraj{ 579176771Sraj pte_t *ptbl; 580176771Sraj vm_paddr_t pa; 581176771Sraj vm_offset_t va; 582176771Sraj vm_page_t m; 583176771Sraj int i; 584176771Sraj 585187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 586187149Sraj (pmap == kernel_pmap), pdir_idx); 587176771Sraj 588176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 589176771Sraj ("ptbl_free: invalid pdir_idx")); 590176771Sraj 591176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 592176771Sraj 593187149Sraj CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 594187149Sraj 595176771Sraj KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 596176771Sraj 597187149Sraj /* 598187149Sraj * Invalidate the pdir entry as soon as possible, so that other CPUs 599187149Sraj * don't attempt to look up the page tables we are releasing. 600187149Sraj */ 601187149Sraj mtx_lock_spin(&tlbivax_mutex); 602192532Sraj tlb_miss_lock(); 603187149Sraj 604187149Sraj pmap->pm_pdir[pdir_idx] = NULL; 605187149Sraj 606192532Sraj tlb_miss_unlock(); 607187149Sraj mtx_unlock_spin(&tlbivax_mutex); 608187149Sraj 609176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 610176771Sraj va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 611176771Sraj pa = pte_vatopa(mmu, kernel_pmap, va); 612176771Sraj m = PHYS_TO_VM_PAGE(pa); 613176771Sraj vm_page_free_zero(m); 614176771Sraj atomic_subtract_int(&cnt.v_wire_count, 1); 615176771Sraj mmu_booke_kremove(mmu, va); 616176771Sraj } 617176771Sraj 618176771Sraj ptbl_free_pmap_ptbl(pmap, ptbl); 619176771Sraj} 620176771Sraj 621176771Sraj/* 622176771Sraj * Decrement ptbl pages hold count and attempt to free ptbl pages. 623176771Sraj * Called when removing pte entry from ptbl. 624176771Sraj * 625176771Sraj * Return 1 if ptbl pages were freed. 626176771Sraj */ 627176771Srajstatic int 628176771Srajptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 629176771Sraj{ 630176771Sraj pte_t *ptbl; 631176771Sraj vm_paddr_t pa; 632176771Sraj vm_page_t m; 633176771Sraj int i; 634176771Sraj 635187151Sraj CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 636187151Sraj (pmap == kernel_pmap), pdir_idx); 637176771Sraj 638176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 639176771Sraj ("ptbl_unhold: invalid pdir_idx")); 640176771Sraj KASSERT((pmap != kernel_pmap), 641176771Sraj ("ptbl_unhold: unholding kernel ptbl!")); 642176771Sraj 643176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 644176771Sraj 645176771Sraj //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 646176771Sraj KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 647176771Sraj ("ptbl_unhold: non kva ptbl")); 648176771Sraj 649176771Sraj /* decrement hold count */ 650176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 651187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 652187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 653176771Sraj m = PHYS_TO_VM_PAGE(pa); 654176771Sraj m->wire_count--; 655176771Sraj } 656176771Sraj 657176771Sraj /* 658176771Sraj * Free ptbl pages if there are no pte etries in this ptbl. 659187151Sraj * wire_count has the same value for all ptbl pages, so check the last 660187151Sraj * page. 661176771Sraj */ 662176771Sraj if (m->wire_count == 0) { 663176771Sraj ptbl_free(mmu, pmap, pdir_idx); 664176771Sraj 665176771Sraj //debugf("ptbl_unhold: e (freed ptbl)\n"); 666176771Sraj return (1); 667176771Sraj } 668176771Sraj 669176771Sraj return (0); 670176771Sraj} 671176771Sraj 672176771Sraj/* 673187151Sraj * Increment hold count for ptbl pages. This routine is used when a new pte 674187151Sraj * entry is being inserted into the ptbl. 675176771Sraj */ 676176771Srajstatic void 677176771Srajptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 678176771Sraj{ 679176771Sraj vm_paddr_t pa; 680176771Sraj pte_t *ptbl; 681176771Sraj vm_page_t m; 682176771Sraj int i; 683176771Sraj 684187151Sraj CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 685187151Sraj pdir_idx); 686176771Sraj 687176771Sraj KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 688176771Sraj ("ptbl_hold: invalid pdir_idx")); 689176771Sraj KASSERT((pmap != kernel_pmap), 690176771Sraj ("ptbl_hold: holding kernel ptbl!")); 691176771Sraj 692176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 693176771Sraj 694176771Sraj KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 695176771Sraj 696176771Sraj for (i = 0; i < PTBL_PAGES; i++) { 697187151Sraj pa = pte_vatopa(mmu, kernel_pmap, 698187151Sraj (vm_offset_t)ptbl + (i * PAGE_SIZE)); 699176771Sraj m = PHYS_TO_VM_PAGE(pa); 700176771Sraj m->wire_count++; 701176771Sraj } 702176771Sraj} 703176771Sraj 704176771Sraj/* Allocate pv_entry structure. */ 705176771Srajpv_entry_t 706176771Srajpv_alloc(void) 707176771Sraj{ 708176771Sraj pv_entry_t pv; 709176771Sraj 710176771Sraj pv_entry_count++; 711194123Salc if (pv_entry_count > pv_entry_high_water) 712194123Salc pagedaemon_wakeup(); 713176771Sraj pv = uma_zalloc(pvzone, M_NOWAIT); 714176771Sraj 715176771Sraj return (pv); 716176771Sraj} 717176771Sraj 718176771Sraj/* Free pv_entry structure. */ 719176771Srajstatic __inline void 720176771Srajpv_free(pv_entry_t pve) 721176771Sraj{ 722176771Sraj 723176771Sraj pv_entry_count--; 724176771Sraj uma_zfree(pvzone, pve); 725176771Sraj} 726176771Sraj 727176771Sraj 728176771Sraj/* Allocate and initialize pv_entry structure. */ 729176771Srajstatic void 730176771Srajpv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 731176771Sraj{ 732176771Sraj pv_entry_t pve; 733176771Sraj 734176771Sraj //int su = (pmap == kernel_pmap); 735176771Sraj //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 736176771Sraj // (u_int32_t)pmap, va, (u_int32_t)m); 737176771Sraj 738176771Sraj pve = pv_alloc(); 739176771Sraj if (pve == NULL) 740176771Sraj panic("pv_insert: no pv entries!"); 741176771Sraj 742176771Sraj pve->pv_pmap = pmap; 743176771Sraj pve->pv_va = va; 744176771Sraj 745176771Sraj /* add to pv_list */ 746176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 747242535Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 748176771Sraj 749176771Sraj TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 750176771Sraj 751176771Sraj //debugf("pv_insert: e\n"); 752176771Sraj} 753176771Sraj 754176771Sraj/* Destroy pv entry. */ 755176771Srajstatic void 756176771Srajpv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 757176771Sraj{ 758176771Sraj pv_entry_t pve; 759176771Sraj 760176771Sraj //int su = (pmap == kernel_pmap); 761176771Sraj //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 762176771Sraj 763176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 764242535Salc rw_assert(&pvh_global_lock, RA_WLOCKED); 765176771Sraj 766176771Sraj /* find pv entry */ 767176771Sraj TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 768176771Sraj if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 769176771Sraj /* remove from pv_list */ 770176771Sraj TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 771176771Sraj if (TAILQ_EMPTY(&m->md.pv_list)) 772225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 773176771Sraj 774176771Sraj /* free pv entry struct */ 775176771Sraj pv_free(pve); 776176771Sraj break; 777176771Sraj } 778176771Sraj } 779176771Sraj 780176771Sraj //debugf("pv_remove: e\n"); 781176771Sraj} 782176771Sraj 783176771Sraj/* 784176771Sraj * Clean pte entry, try to free page table page if requested. 785176771Sraj * 786176771Sraj * Return 1 if ptbl pages were freed, otherwise return 0. 787176771Sraj */ 788176771Srajstatic int 789187151Srajpte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 790176771Sraj{ 791176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 792176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 793176771Sraj vm_page_t m; 794176771Sraj pte_t *ptbl; 795176771Sraj pte_t *pte; 796176771Sraj 797176771Sraj //int su = (pmap == kernel_pmap); 798176771Sraj //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 799176771Sraj // su, (u_int32_t)pmap, va, flags); 800176771Sraj 801176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 802176771Sraj KASSERT(ptbl, ("pte_remove: null ptbl")); 803176771Sraj 804176771Sraj pte = &ptbl[ptbl_idx]; 805176771Sraj 806176771Sraj if (pte == NULL || !PTE_ISVALID(pte)) 807176771Sraj return (0); 808176771Sraj 809176771Sraj if (PTE_ISWIRED(pte)) 810176771Sraj pmap->pm_stats.wired_count--; 811176771Sraj 812191445Smarcel /* Handle managed entry. */ 813191445Smarcel if (PTE_ISMANAGED(pte)) { 814191445Smarcel /* Get vm_page_t for mapped pte. */ 815191445Smarcel m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 816176771Sraj 817191445Smarcel if (PTE_ISMODIFIED(pte)) 818191445Smarcel vm_page_dirty(m); 819176771Sraj 820191445Smarcel if (PTE_ISREFERENCED(pte)) 821225418Skib vm_page_aflag_set(m, PGA_REFERENCED); 822176771Sraj 823191445Smarcel pv_remove(pmap, va, m); 824176771Sraj } 825176771Sraj 826187149Sraj mtx_lock_spin(&tlbivax_mutex); 827192532Sraj tlb_miss_lock(); 828187149Sraj 829187149Sraj tlb0_flush_entry(va); 830176771Sraj pte->flags = 0; 831176771Sraj pte->rpn = 0; 832187149Sraj 833192532Sraj tlb_miss_unlock(); 834187149Sraj mtx_unlock_spin(&tlbivax_mutex); 835187149Sraj 836176771Sraj pmap->pm_stats.resident_count--; 837176771Sraj 838176771Sraj if (flags & PTBL_UNHOLD) { 839176771Sraj //debugf("pte_remove: e (unhold)\n"); 840176771Sraj return (ptbl_unhold(mmu, pmap, pdir_idx)); 841176771Sraj } 842176771Sraj 843176771Sraj //debugf("pte_remove: e\n"); 844176771Sraj return (0); 845176771Sraj} 846176771Sraj 847176771Sraj/* 848176771Sraj * Insert PTE for a given page and virtual address. 849176771Sraj */ 850187149Srajstatic void 851187149Srajpte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 852176771Sraj{ 853176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 854176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 855187149Sraj pte_t *ptbl, *pte; 856176771Sraj 857187149Sraj CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 858187149Sraj pmap == kernel_pmap, pmap, va); 859176771Sraj 860176771Sraj /* Get the page table pointer. */ 861176771Sraj ptbl = pmap->pm_pdir[pdir_idx]; 862176771Sraj 863187149Sraj if (ptbl == NULL) { 864187149Sraj /* Allocate page table pages. */ 865187149Sraj ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 866187149Sraj } else { 867176771Sraj /* 868176771Sraj * Check if there is valid mapping for requested 869176771Sraj * va, if there is, remove it. 870176771Sraj */ 871176771Sraj pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 872176771Sraj if (PTE_ISVALID(pte)) { 873176771Sraj pte_remove(mmu, pmap, va, PTBL_HOLD); 874176771Sraj } else { 875176771Sraj /* 876176771Sraj * pte is not used, increment hold count 877176771Sraj * for ptbl pages. 878176771Sraj */ 879176771Sraj if (pmap != kernel_pmap) 880176771Sraj ptbl_hold(mmu, pmap, pdir_idx); 881176771Sraj } 882176771Sraj } 883176771Sraj 884176771Sraj /* 885187149Sraj * Insert pv_entry into pv_list for mapped page if part of managed 886187149Sraj * memory. 887176771Sraj */ 888224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) { 889224746Skib flags |= PTE_MANAGED; 890176771Sraj 891224746Skib /* Create and insert pv entry. */ 892224746Skib pv_insert(pmap, va, m); 893176771Sraj } 894176771Sraj 895176771Sraj pmap->pm_stats.resident_count++; 896187149Sraj 897187149Sraj mtx_lock_spin(&tlbivax_mutex); 898192532Sraj tlb_miss_lock(); 899187149Sraj 900187149Sraj tlb0_flush_entry(va); 901187149Sraj if (pmap->pm_pdir[pdir_idx] == NULL) { 902187149Sraj /* 903187149Sraj * If we just allocated a new page table, hook it in 904187149Sraj * the pdir. 905187149Sraj */ 906187149Sraj pmap->pm_pdir[pdir_idx] = ptbl; 907187149Sraj } 908187149Sraj pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 909176771Sraj pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 910176771Sraj pte->flags |= (PTE_VALID | flags); 911176771Sraj 912192532Sraj tlb_miss_unlock(); 913187149Sraj mtx_unlock_spin(&tlbivax_mutex); 914176771Sraj} 915176771Sraj 916176771Sraj/* Return the pa for the given pmap/va. */ 917176771Srajstatic vm_paddr_t 918176771Srajpte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 919176771Sraj{ 920176771Sraj vm_paddr_t pa = 0; 921176771Sraj pte_t *pte; 922176771Sraj 923176771Sraj pte = pte_find(mmu, pmap, va); 924176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 925176771Sraj pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 926176771Sraj return (pa); 927176771Sraj} 928176771Sraj 929176771Sraj/* Get a pointer to a PTE in a page table. */ 930176771Srajstatic pte_t * 931176771Srajpte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 932176771Sraj{ 933176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 934176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 935176771Sraj 936176771Sraj KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 937176771Sraj 938176771Sraj if (pmap->pm_pdir[pdir_idx]) 939176771Sraj return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 940176771Sraj 941176771Sraj return (NULL); 942176771Sraj} 943176771Sraj 944176771Sraj/**************************************************************************/ 945176771Sraj/* PMAP related */ 946176771Sraj/**************************************************************************/ 947176771Sraj 948176771Sraj/* 949222400Smarcel * This is called during booke_init, before the system is really initialized. 950176771Sraj */ 951176771Srajstatic void 952190701Smarcelmmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 953176771Sraj{ 954176771Sraj vm_offset_t phys_kernelend; 955176771Sraj struct mem_region *mp, *mp1; 956176771Sraj int cnt, i, j; 957176771Sraj u_int s, e, sz; 958176771Sraj u_int phys_avail_count; 959182198Sraj vm_size_t physsz, hwphyssz, kstack0_sz; 960193489Sraj vm_offset_t kernel_pdir, kstack0, va; 961182198Sraj vm_paddr_t kstack0_phys; 962194784Sjeff void *dpcpu; 963193489Sraj pte_t *pte; 964176771Sraj 965176771Sraj debugf("mmu_booke_bootstrap: entered\n"); 966176771Sraj 967187149Sraj /* Initialize invalidation mutex */ 968187149Sraj mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 969187149Sraj 970187149Sraj /* Read TLB0 size and associativity. */ 971187149Sraj tlb0_get_tlbconf(); 972187149Sraj 973224611Smarcel /* 974224611Smarcel * Align kernel start and end address (kernel image). 975224611Smarcel * Note that kernel end does not necessarily relate to kernsize. 976224611Smarcel * kernsize is the size of the kernel that is actually mapped. 977235932Smarcel * Also note that "start - 1" is deliberate. With SMP, the 978235932Smarcel * entry point is exactly a page from the actual load address. 979235932Smarcel * As such, trunc_page() has no effect and we're off by a page. 980235932Smarcel * Since we always have the ELF header between the load address 981235932Smarcel * and the entry point, we can safely subtract 1 to compensate. 982224611Smarcel */ 983235932Smarcel kernstart = trunc_page(start - 1); 984190701Smarcel data_start = round_page(kernelend); 985190701Smarcel data_end = data_start; 986190701Smarcel 987224611Smarcel /* 988224611Smarcel * Addresses of preloaded modules (like file systems) use 989224611Smarcel * physical addresses. Make sure we relocate those into 990224611Smarcel * virtual addresses. 991224611Smarcel */ 992224611Smarcel preload_addr_relocate = kernstart - kernload; 993224611Smarcel 994224611Smarcel /* Allocate the dynamic per-cpu area. */ 995224611Smarcel dpcpu = (void *)data_end; 996224611Smarcel data_end += DPCPU_SIZE; 997224611Smarcel 998176771Sraj /* Allocate space for the message buffer. */ 999190701Smarcel msgbufp = (struct msgbuf *)data_end; 1000217688Spluknet data_end += msgbufsize; 1001187149Sraj debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1002190701Smarcel data_end); 1003176771Sraj 1004190701Smarcel data_end = round_page(data_end); 1005176771Sraj 1006176771Sraj /* Allocate space for ptbl_bufs. */ 1007190701Smarcel ptbl_bufs = (struct ptbl_buf *)data_end; 1008190701Smarcel data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1009187149Sraj debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1010190701Smarcel data_end); 1011176771Sraj 1012190701Smarcel data_end = round_page(data_end); 1013176771Sraj 1014176771Sraj /* Allocate PTE tables for kernel KVA. */ 1015190701Smarcel kernel_pdir = data_end; 1016176771Sraj kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1017176771Sraj PDIR_SIZE - 1) / PDIR_SIZE; 1018190701Smarcel data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1019176771Sraj debugf(" kernel ptbls: %d\n", kernel_ptbls); 1020190701Smarcel debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1021176771Sraj 1022190701Smarcel debugf(" data_end: 0x%08x\n", data_end); 1023224611Smarcel if (data_end - kernstart > kernsize) { 1024224611Smarcel kernsize += tlb1_mapin_region(kernstart + kernsize, 1025224611Smarcel kernload + kernsize, (data_end - kernstart) - kernsize); 1026224611Smarcel } 1027224611Smarcel data_end = kernstart + kernsize; 1028190701Smarcel debugf(" updated data_end: 0x%08x\n", data_end); 1029187149Sraj 1030182362Sraj /* 1031182362Sraj * Clear the structures - note we can only do it safely after the 1032187149Sraj * possible additional TLB1 translations are in place (above) so that 1033190701Smarcel * all range up to the currently calculated 'data_end' is covered. 1034182362Sraj */ 1035224611Smarcel dpcpu_init(dpcpu, 0); 1036182362Sraj memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1037182362Sraj memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1038182362Sraj 1039176771Sraj /*******************************************************/ 1040176771Sraj /* Set the start and end of kva. */ 1041176771Sraj /*******************************************************/ 1042190701Smarcel virtual_avail = round_page(data_end); 1043176771Sraj virtual_end = VM_MAX_KERNEL_ADDRESS; 1044176771Sraj 1045176771Sraj /* Allocate KVA space for page zero/copy operations. */ 1046176771Sraj zero_page_va = virtual_avail; 1047176771Sraj virtual_avail += PAGE_SIZE; 1048176771Sraj zero_page_idle_va = virtual_avail; 1049176771Sraj virtual_avail += PAGE_SIZE; 1050176771Sraj copy_page_src_va = virtual_avail; 1051176771Sraj virtual_avail += PAGE_SIZE; 1052176771Sraj copy_page_dst_va = virtual_avail; 1053176771Sraj virtual_avail += PAGE_SIZE; 1054187149Sraj debugf("zero_page_va = 0x%08x\n", zero_page_va); 1055187149Sraj debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1056187149Sraj debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1057187149Sraj debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1058176771Sraj 1059176771Sraj /* Initialize page zero/copy mutexes. */ 1060176771Sraj mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1061176771Sraj mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1062176771Sraj 1063176771Sraj /* Allocate KVA space for ptbl bufs. */ 1064176771Sraj ptbl_buf_pool_vabase = virtual_avail; 1065176771Sraj virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1066187149Sraj debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1067187149Sraj ptbl_buf_pool_vabase, virtual_avail); 1068176771Sraj 1069176771Sraj /* Calculate corresponding physical addresses for the kernel region. */ 1070190701Smarcel phys_kernelend = kernload + kernsize; 1071176771Sraj debugf("kernel image and allocated data:\n"); 1072176771Sraj debugf(" kernload = 0x%08x\n", kernload); 1073190701Smarcel debugf(" kernstart = 0x%08x\n", kernstart); 1074190701Smarcel debugf(" kernsize = 0x%08x\n", kernsize); 1075176771Sraj 1076176771Sraj if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1077176771Sraj panic("mmu_booke_bootstrap: phys_avail too small"); 1078176771Sraj 1079176771Sraj /* 1080187151Sraj * Remove kernel physical address range from avail regions list. Page 1081187151Sraj * align all regions. Non-page aligned memory isn't very interesting 1082187151Sraj * to us. Also, sort the entries for ascending addresses. 1083176771Sraj */ 1084192067Snwhitehorn 1085192067Snwhitehorn /* Retrieve phys/avail mem regions */ 1086192067Snwhitehorn mem_regions(&physmem_regions, &physmem_regions_sz, 1087192067Snwhitehorn &availmem_regions, &availmem_regions_sz); 1088176771Sraj sz = 0; 1089176771Sraj cnt = availmem_regions_sz; 1090176771Sraj debugf("processing avail regions:\n"); 1091176771Sraj for (mp = availmem_regions; mp->mr_size; mp++) { 1092176771Sraj s = mp->mr_start; 1093176771Sraj e = mp->mr_start + mp->mr_size; 1094176771Sraj debugf(" %08x-%08x -> ", s, e); 1095176771Sraj /* Check whether this region holds all of the kernel. */ 1096176771Sraj if (s < kernload && e > phys_kernelend) { 1097176771Sraj availmem_regions[cnt].mr_start = phys_kernelend; 1098176771Sraj availmem_regions[cnt++].mr_size = e - phys_kernelend; 1099176771Sraj e = kernload; 1100176771Sraj } 1101176771Sraj /* Look whether this regions starts within the kernel. */ 1102176771Sraj if (s >= kernload && s < phys_kernelend) { 1103176771Sraj if (e <= phys_kernelend) 1104176771Sraj goto empty; 1105176771Sraj s = phys_kernelend; 1106176771Sraj } 1107176771Sraj /* Now look whether this region ends within the kernel. */ 1108176771Sraj if (e > kernload && e <= phys_kernelend) { 1109176771Sraj if (s >= kernload) 1110176771Sraj goto empty; 1111176771Sraj e = kernload; 1112176771Sraj } 1113176771Sraj /* Now page align the start and size of the region. */ 1114176771Sraj s = round_page(s); 1115176771Sraj e = trunc_page(e); 1116176771Sraj if (e < s) 1117176771Sraj e = s; 1118176771Sraj sz = e - s; 1119176771Sraj debugf("%08x-%08x = %x\n", s, e, sz); 1120176771Sraj 1121176771Sraj /* Check whether some memory is left here. */ 1122176771Sraj if (sz == 0) { 1123176771Sraj empty: 1124176771Sraj memmove(mp, mp + 1, 1125176771Sraj (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1126176771Sraj cnt--; 1127176771Sraj mp--; 1128176771Sraj continue; 1129176771Sraj } 1130176771Sraj 1131176771Sraj /* Do an insertion sort. */ 1132176771Sraj for (mp1 = availmem_regions; mp1 < mp; mp1++) 1133176771Sraj if (s < mp1->mr_start) 1134176771Sraj break; 1135176771Sraj if (mp1 < mp) { 1136176771Sraj memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1137176771Sraj mp1->mr_start = s; 1138176771Sraj mp1->mr_size = sz; 1139176771Sraj } else { 1140176771Sraj mp->mr_start = s; 1141176771Sraj mp->mr_size = sz; 1142176771Sraj } 1143176771Sraj } 1144176771Sraj availmem_regions_sz = cnt; 1145176771Sraj 1146176771Sraj /*******************************************************/ 1147182198Sraj /* Steal physical memory for kernel stack from the end */ 1148182198Sraj /* of the first avail region */ 1149182198Sraj /*******************************************************/ 1150182198Sraj kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1151182198Sraj kstack0_phys = availmem_regions[0].mr_start + 1152182198Sraj availmem_regions[0].mr_size; 1153182198Sraj kstack0_phys -= kstack0_sz; 1154182198Sraj availmem_regions[0].mr_size -= kstack0_sz; 1155182198Sraj 1156182198Sraj /*******************************************************/ 1157176771Sraj /* Fill in phys_avail table, based on availmem_regions */ 1158176771Sraj /*******************************************************/ 1159176771Sraj phys_avail_count = 0; 1160176771Sraj physsz = 0; 1161176771Sraj hwphyssz = 0; 1162176771Sraj TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1163176771Sraj 1164176771Sraj debugf("fill in phys_avail:\n"); 1165176771Sraj for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1166176771Sraj 1167176771Sraj debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1168176771Sraj availmem_regions[i].mr_start, 1169187151Sraj availmem_regions[i].mr_start + 1170187151Sraj availmem_regions[i].mr_size, 1171176771Sraj availmem_regions[i].mr_size); 1172176771Sraj 1173182362Sraj if (hwphyssz != 0 && 1174182362Sraj (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1175176771Sraj debugf(" hw.physmem adjust\n"); 1176176771Sraj if (physsz < hwphyssz) { 1177176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1178182362Sraj phys_avail[j + 1] = 1179182362Sraj availmem_regions[i].mr_start + 1180176771Sraj hwphyssz - physsz; 1181176771Sraj physsz = hwphyssz; 1182176771Sraj phys_avail_count++; 1183176771Sraj } 1184176771Sraj break; 1185176771Sraj } 1186176771Sraj 1187176771Sraj phys_avail[j] = availmem_regions[i].mr_start; 1188176771Sraj phys_avail[j + 1] = availmem_regions[i].mr_start + 1189176771Sraj availmem_regions[i].mr_size; 1190176771Sraj phys_avail_count++; 1191176771Sraj physsz += availmem_regions[i].mr_size; 1192176771Sraj } 1193176771Sraj physmem = btoc(physsz); 1194176771Sraj 1195176771Sraj /* Calculate the last available physical address. */ 1196176771Sraj for (i = 0; phys_avail[i + 2] != 0; i += 2) 1197176771Sraj ; 1198176771Sraj Maxmem = powerpc_btop(phys_avail[i + 1]); 1199176771Sraj 1200176771Sraj debugf("Maxmem = 0x%08lx\n", Maxmem); 1201176771Sraj debugf("phys_avail_count = %d\n", phys_avail_count); 1202187151Sraj debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1203187151Sraj physmem); 1204176771Sraj 1205176771Sraj /*******************************************************/ 1206176771Sraj /* Initialize (statically allocated) kernel pmap. */ 1207176771Sraj /*******************************************************/ 1208176771Sraj PMAP_LOCK_INIT(kernel_pmap); 1209176771Sraj kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1210176771Sraj 1211187149Sraj debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1212187149Sraj debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1213176771Sraj debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1214176771Sraj kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1215176771Sraj 1216176771Sraj /* Initialize kernel pdir */ 1217176771Sraj for (i = 0; i < kernel_ptbls; i++) 1218176771Sraj kernel_pmap->pm_pdir[kptbl_min + i] = 1219176771Sraj (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1220176771Sraj 1221187149Sraj for (i = 0; i < MAXCPU; i++) { 1222187149Sraj kernel_pmap->pm_tid[i] = TID_KERNEL; 1223187149Sraj 1224187149Sraj /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1225187149Sraj tidbusy[i][0] = kernel_pmap; 1226187149Sraj } 1227193489Sraj 1228193489Sraj /* 1229193489Sraj * Fill in PTEs covering kernel code and data. They are not required 1230193489Sraj * for address translation, as this area is covered by static TLB1 1231193489Sraj * entries, but for pte_vatopa() to work correctly with kernel area 1232193489Sraj * addresses. 1233193489Sraj */ 1234235932Smarcel for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1235193489Sraj pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1236235932Smarcel pte->rpn = kernload + (va - kernstart); 1237193489Sraj pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1238193489Sraj PTE_VALID; 1239193489Sraj } 1240187149Sraj /* Mark kernel_pmap active on all CPUs */ 1241222813Sattilio CPU_FILL(&kernel_pmap->pm_active); 1242176771Sraj 1243242535Salc /* 1244242535Salc * Initialize the global pv list lock. 1245242535Salc */ 1246242535Salc rw_init(&pvh_global_lock, "pmap pv global"); 1247242535Salc 1248176771Sraj /*******************************************************/ 1249176771Sraj /* Final setup */ 1250176771Sraj /*******************************************************/ 1251187149Sraj 1252182198Sraj /* Enter kstack0 into kernel map, provide guard page */ 1253182198Sraj kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1254182198Sraj thread0.td_kstack = kstack0; 1255182198Sraj thread0.td_kstack_pages = KSTACK_PAGES; 1256182198Sraj 1257182198Sraj debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1258182198Sraj debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1259182198Sraj kstack0_phys, kstack0_phys + kstack0_sz); 1260182198Sraj debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1261182198Sraj 1262182198Sraj virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1263182198Sraj for (i = 0; i < KSTACK_PAGES; i++) { 1264182198Sraj mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1265182198Sraj kstack0 += PAGE_SIZE; 1266182198Sraj kstack0_phys += PAGE_SIZE; 1267182198Sraj } 1268187149Sraj 1269187149Sraj debugf("virtual_avail = %08x\n", virtual_avail); 1270187149Sraj debugf("virtual_end = %08x\n", virtual_end); 1271182198Sraj 1272176771Sraj debugf("mmu_booke_bootstrap: exit\n"); 1273176771Sraj} 1274176771Sraj 1275192532Srajvoid 1276192532Srajpmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1277192532Sraj{ 1278192532Sraj int i; 1279192532Sraj 1280192532Sraj /* 1281192532Sraj * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1282192532Sraj * have the snapshot of its contents in the s/w tlb1[] table, so use 1283192532Sraj * these values directly to (re)program AP's TLB1 hardware. 1284192532Sraj */ 1285242526Smarcel for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1286192532Sraj /* Skip invalid entries */ 1287192532Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 1288192532Sraj continue; 1289192532Sraj 1290192532Sraj tlb1_write_entry(i); 1291192532Sraj } 1292192532Sraj 1293192532Sraj set_mas4_defaults(); 1294192532Sraj} 1295192532Sraj 1296176771Sraj/* 1297176771Sraj * Get the physical page address for the given pmap/virtual address. 1298176771Sraj */ 1299176771Srajstatic vm_paddr_t 1300176771Srajmmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1301176771Sraj{ 1302176771Sraj vm_paddr_t pa; 1303176771Sraj 1304176771Sraj PMAP_LOCK(pmap); 1305176771Sraj pa = pte_vatopa(mmu, pmap, va); 1306176771Sraj PMAP_UNLOCK(pmap); 1307176771Sraj 1308176771Sraj return (pa); 1309176771Sraj} 1310176771Sraj 1311176771Sraj/* 1312176771Sraj * Extract the physical page address associated with the given 1313176771Sraj * kernel virtual address. 1314176771Sraj */ 1315176771Srajstatic vm_paddr_t 1316176771Srajmmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1317176771Sraj{ 1318176771Sraj 1319176771Sraj return (pte_vatopa(mmu, kernel_pmap, va)); 1320176771Sraj} 1321176771Sraj 1322176771Sraj/* 1323176771Sraj * Initialize the pmap module. 1324176771Sraj * Called by vm_init, to initialize any structures that the pmap 1325176771Sraj * system needs to map virtual memory. 1326176771Sraj */ 1327176771Srajstatic void 1328176771Srajmmu_booke_init(mmu_t mmu) 1329176771Sraj{ 1330176771Sraj int shpgperproc = PMAP_SHPGPERPROC; 1331176771Sraj 1332176771Sraj /* 1333176771Sraj * Initialize the address space (zone) for the pv entries. Set a 1334176771Sraj * high water mark so that the system can recover from excessive 1335176771Sraj * numbers of pv entries. 1336176771Sraj */ 1337176771Sraj pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1338176771Sraj NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1339176771Sraj 1340176771Sraj TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1341176771Sraj pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1342176771Sraj 1343176771Sraj TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1344176771Sraj pv_entry_high_water = 9 * (pv_entry_max / 10); 1345176771Sraj 1346247360Sattilio uma_zone_reserve_kva(pvzone, pv_entry_max); 1347176771Sraj 1348176771Sraj /* Pre-fill pvzone with initial number of pv entries. */ 1349176771Sraj uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1350176771Sraj 1351176771Sraj /* Initialize ptbl allocation. */ 1352176771Sraj ptbl_init(); 1353176771Sraj} 1354176771Sraj 1355176771Sraj/* 1356176771Sraj * Map a list of wired pages into kernel virtual address space. This is 1357176771Sraj * intended for temporary mappings which do not need page modification or 1358176771Sraj * references recorded. Existing mappings in the region are overwritten. 1359176771Sraj */ 1360176771Srajstatic void 1361176771Srajmmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1362176771Sraj{ 1363176771Sraj vm_offset_t va; 1364176771Sraj 1365176771Sraj va = sva; 1366176771Sraj while (count-- > 0) { 1367176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1368176771Sraj va += PAGE_SIZE; 1369176771Sraj m++; 1370176771Sraj } 1371176771Sraj} 1372176771Sraj 1373176771Sraj/* 1374176771Sraj * Remove page mappings from kernel virtual address space. Intended for 1375176771Sraj * temporary mappings entered by mmu_booke_qenter. 1376176771Sraj */ 1377176771Srajstatic void 1378176771Srajmmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1379176771Sraj{ 1380176771Sraj vm_offset_t va; 1381176771Sraj 1382176771Sraj va = sva; 1383176771Sraj while (count-- > 0) { 1384176771Sraj mmu_booke_kremove(mmu, va); 1385176771Sraj va += PAGE_SIZE; 1386176771Sraj } 1387176771Sraj} 1388176771Sraj 1389176771Sraj/* 1390176771Sraj * Map a wired page into kernel virtual address space. 1391176771Sraj */ 1392176771Srajstatic void 1393235936Srajmmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1394176771Sraj{ 1395176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1396176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1397187151Sraj uint32_t flags; 1398176771Sraj pte_t *pte; 1399176771Sraj 1400187151Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1401187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1402176771Sraj 1403235932Smarcel flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1404176771Sraj 1405176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1406176771Sraj 1407187149Sraj mtx_lock_spin(&tlbivax_mutex); 1408192532Sraj tlb_miss_lock(); 1409187149Sraj 1410176771Sraj if (PTE_ISVALID(pte)) { 1411187149Sraj 1412187149Sraj CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1413176771Sraj 1414176771Sraj /* Flush entry from TLB0 */ 1415187149Sraj tlb0_flush_entry(va); 1416176771Sraj } 1417176771Sraj 1418176771Sraj pte->rpn = pa & ~PTE_PA_MASK; 1419176771Sraj pte->flags = flags; 1420176771Sraj 1421176771Sraj //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1422176771Sraj // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1423176771Sraj // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1424176771Sraj 1425176771Sraj /* Flush the real memory from the instruction cache. */ 1426176771Sraj if ((flags & (PTE_I | PTE_G)) == 0) { 1427176771Sraj __syncicache((void *)va, PAGE_SIZE); 1428176771Sraj } 1429176771Sraj 1430192532Sraj tlb_miss_unlock(); 1431187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1432176771Sraj} 1433176771Sraj 1434176771Sraj/* 1435176771Sraj * Remove a page from kernel page table. 1436176771Sraj */ 1437176771Srajstatic void 1438176771Srajmmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1439176771Sraj{ 1440176771Sraj unsigned int pdir_idx = PDIR_IDX(va); 1441176771Sraj unsigned int ptbl_idx = PTBL_IDX(va); 1442176771Sraj pte_t *pte; 1443176771Sraj 1444187149Sraj// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1445176771Sraj 1446187149Sraj KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1447187149Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1448176771Sraj ("mmu_booke_kremove: invalid va")); 1449176771Sraj 1450176771Sraj pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1451176771Sraj 1452176771Sraj if (!PTE_ISVALID(pte)) { 1453187149Sraj 1454187149Sraj CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1455187149Sraj 1456176771Sraj return; 1457176771Sraj } 1458176771Sraj 1459187149Sraj mtx_lock_spin(&tlbivax_mutex); 1460192532Sraj tlb_miss_lock(); 1461176771Sraj 1462187149Sraj /* Invalidate entry in TLB0, update PTE. */ 1463187149Sraj tlb0_flush_entry(va); 1464176771Sraj pte->flags = 0; 1465176771Sraj pte->rpn = 0; 1466176771Sraj 1467192532Sraj tlb_miss_unlock(); 1468187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1469176771Sraj} 1470176771Sraj 1471176771Sraj/* 1472176771Sraj * Initialize pmap associated with process 0. 1473176771Sraj */ 1474176771Srajstatic void 1475176771Srajmmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1476176771Sraj{ 1477187151Sraj 1478254667Skib PMAP_LOCK_INIT(pmap); 1479176771Sraj mmu_booke_pinit(mmu, pmap); 1480176771Sraj PCPU_SET(curpmap, pmap); 1481176771Sraj} 1482176771Sraj 1483176771Sraj/* 1484176771Sraj * Initialize a preallocated and zeroed pmap structure, 1485176771Sraj * such as one in a vmspace structure. 1486176771Sraj */ 1487176771Srajstatic void 1488176771Srajmmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1489176771Sraj{ 1490187149Sraj int i; 1491176771Sraj 1492187149Sraj CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1493187149Sraj curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1494176771Sraj 1495187149Sraj KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1496176771Sraj 1497187149Sraj for (i = 0; i < MAXCPU; i++) 1498187149Sraj pmap->pm_tid[i] = TID_NONE; 1499222813Sattilio CPU_ZERO(&kernel_pmap->pm_active); 1500176771Sraj bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1501176771Sraj bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1502187149Sraj TAILQ_INIT(&pmap->pm_ptbl_list); 1503176771Sraj} 1504176771Sraj 1505176771Sraj/* 1506176771Sraj * Release any resources held by the given physical map. 1507176771Sraj * Called when a pmap initialized by mmu_booke_pinit is being released. 1508176771Sraj * Should only be called if the map contains no valid mappings. 1509176771Sraj */ 1510176771Srajstatic void 1511176771Srajmmu_booke_release(mmu_t mmu, pmap_t pmap) 1512176771Sraj{ 1513176771Sraj 1514187151Sraj KASSERT(pmap->pm_stats.resident_count == 0, 1515187151Sraj ("pmap_release: pmap resident count %ld != 0", 1516187151Sraj pmap->pm_stats.resident_count)); 1517176771Sraj} 1518176771Sraj 1519176771Sraj/* 1520176771Sraj * Insert the given physical page at the specified virtual address in the 1521176771Sraj * target physical map with the protection requested. If specified the page 1522176771Sraj * will be wired down. 1523176771Sraj */ 1524176771Srajstatic void 1525176771Srajmmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1526176771Sraj vm_prot_t prot, boolean_t wired) 1527176771Sraj{ 1528187151Sraj 1529242535Salc rw_wlock(&pvh_global_lock); 1530176771Sraj PMAP_LOCK(pmap); 1531176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1532242535Salc rw_wunlock(&pvh_global_lock); 1533176771Sraj PMAP_UNLOCK(pmap); 1534176771Sraj} 1535176771Sraj 1536176771Srajstatic void 1537176771Srajmmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1538176771Sraj vm_prot_t prot, boolean_t wired) 1539176771Sraj{ 1540176771Sraj pte_t *pte; 1541176771Sraj vm_paddr_t pa; 1542187151Sraj uint32_t flags; 1543176771Sraj int su, sync; 1544176771Sraj 1545176771Sraj pa = VM_PAGE_TO_PHYS(m); 1546176771Sraj su = (pmap == kernel_pmap); 1547176771Sraj sync = 0; 1548176771Sraj 1549176771Sraj //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1550176771Sraj // "pa=0x%08x prot=0x%08x wired=%d)\n", 1551176771Sraj // (u_int32_t)pmap, su, pmap->pm_tid, 1552176771Sraj // (u_int32_t)m, va, pa, prot, wired); 1553176771Sraj 1554176771Sraj if (su) { 1555187151Sraj KASSERT(((va >= virtual_avail) && 1556187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1557187151Sraj ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1558176771Sraj } else { 1559176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1560187151Sraj ("mmu_booke_enter_locked: user pmap, non user va")); 1561176771Sraj } 1562254138Sattilio if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1563250747Salc VM_OBJECT_ASSERT_LOCKED(m->object); 1564176771Sraj 1565176771Sraj PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1566176771Sraj 1567176771Sraj /* 1568176771Sraj * If there is an existing mapping, and the physical address has not 1569176771Sraj * changed, must be protection or wiring change. 1570176771Sraj */ 1571176771Sraj if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1572176771Sraj (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1573187149Sraj 1574187149Sraj /* 1575187149Sraj * Before actually updating pte->flags we calculate and 1576187149Sraj * prepare its new value in a helper var. 1577187149Sraj */ 1578187149Sraj flags = pte->flags; 1579187149Sraj flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1580176771Sraj 1581176771Sraj /* Wiring change, just update stats. */ 1582176771Sraj if (wired) { 1583176771Sraj if (!PTE_ISWIRED(pte)) { 1584187149Sraj flags |= PTE_WIRED; 1585176771Sraj pmap->pm_stats.wired_count++; 1586176771Sraj } 1587176771Sraj } else { 1588176771Sraj if (PTE_ISWIRED(pte)) { 1589187149Sraj flags &= ~PTE_WIRED; 1590176771Sraj pmap->pm_stats.wired_count--; 1591176771Sraj } 1592176771Sraj } 1593176771Sraj 1594176771Sraj if (prot & VM_PROT_WRITE) { 1595176771Sraj /* Add write permissions. */ 1596187149Sraj flags |= PTE_SW; 1597176771Sraj if (!su) 1598187149Sraj flags |= PTE_UW; 1599192795Sraj 1600208846Salc if ((flags & PTE_MANAGED) != 0) 1601225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1602176771Sraj } else { 1603176771Sraj /* Handle modified pages, sense modify status. */ 1604187149Sraj 1605187149Sraj /* 1606187149Sraj * The PTE_MODIFIED flag could be set by underlying 1607187149Sraj * TLB misses since we last read it (above), possibly 1608187149Sraj * other CPUs could update it so we check in the PTE 1609187149Sraj * directly rather than rely on that saved local flags 1610187149Sraj * copy. 1611187149Sraj */ 1612178626Smarcel if (PTE_ISMODIFIED(pte)) 1613178626Smarcel vm_page_dirty(m); 1614176771Sraj } 1615176771Sraj 1616176771Sraj if (prot & VM_PROT_EXECUTE) { 1617187149Sraj flags |= PTE_SX; 1618176771Sraj if (!su) 1619187149Sraj flags |= PTE_UX; 1620176771Sraj 1621187149Sraj /* 1622187149Sraj * Check existing flags for execute permissions: if we 1623187149Sraj * are turning execute permissions on, icache should 1624187149Sraj * be flushed. 1625187149Sraj */ 1626208720Salc if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1627176771Sraj sync++; 1628176771Sraj } 1629176771Sraj 1630187149Sraj flags &= ~PTE_REFERENCED; 1631187149Sraj 1632187149Sraj /* 1633187149Sraj * The new flags value is all calculated -- only now actually 1634187149Sraj * update the PTE. 1635187149Sraj */ 1636187149Sraj mtx_lock_spin(&tlbivax_mutex); 1637192532Sraj tlb_miss_lock(); 1638187149Sraj 1639187149Sraj tlb0_flush_entry(va); 1640187149Sraj pte->flags = flags; 1641187149Sraj 1642192532Sraj tlb_miss_unlock(); 1643187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1644187149Sraj 1645176771Sraj } else { 1646176771Sraj /* 1647187149Sraj * If there is an existing mapping, but it's for a different 1648176771Sraj * physical address, pte_enter() will delete the old mapping. 1649176771Sraj */ 1650176771Sraj //if ((pte != NULL) && PTE_ISVALID(pte)) 1651176771Sraj // debugf("mmu_booke_enter_locked: replace\n"); 1652176771Sraj //else 1653176771Sraj // debugf("mmu_booke_enter_locked: new\n"); 1654176771Sraj 1655176771Sraj /* Now set up the flags and install the new mapping. */ 1656176771Sraj flags = (PTE_SR | PTE_VALID); 1657187149Sraj flags |= PTE_M; 1658176771Sraj 1659176771Sraj if (!su) 1660176771Sraj flags |= PTE_UR; 1661176771Sraj 1662176771Sraj if (prot & VM_PROT_WRITE) { 1663176771Sraj flags |= PTE_SW; 1664176771Sraj if (!su) 1665176771Sraj flags |= PTE_UW; 1666192795Sraj 1667224746Skib if ((m->oflags & VPO_UNMANAGED) == 0) 1668225418Skib vm_page_aflag_set(m, PGA_WRITEABLE); 1669176771Sraj } 1670176771Sraj 1671176771Sraj if (prot & VM_PROT_EXECUTE) { 1672176771Sraj flags |= PTE_SX; 1673176771Sraj if (!su) 1674176771Sraj flags |= PTE_UX; 1675176771Sraj } 1676176771Sraj 1677176771Sraj /* If its wired update stats. */ 1678176771Sraj if (wired) { 1679176771Sraj pmap->pm_stats.wired_count++; 1680176771Sraj flags |= PTE_WIRED; 1681176771Sraj } 1682176771Sraj 1683176771Sraj pte_enter(mmu, pmap, m, va, flags); 1684176771Sraj 1685176771Sraj /* Flush the real memory from the instruction cache. */ 1686176771Sraj if (prot & VM_PROT_EXECUTE) 1687176771Sraj sync++; 1688176771Sraj } 1689176771Sraj 1690176771Sraj if (sync && (su || pmap == PCPU_GET(curpmap))) { 1691176771Sraj __syncicache((void *)va, PAGE_SIZE); 1692176771Sraj sync = 0; 1693176771Sraj } 1694176771Sraj} 1695176771Sraj 1696176771Sraj/* 1697176771Sraj * Maps a sequence of resident pages belonging to the same object. 1698176771Sraj * The sequence begins with the given page m_start. This page is 1699176771Sraj * mapped at the given virtual address start. Each subsequent page is 1700176771Sraj * mapped at a virtual address that is offset from start by the same 1701176771Sraj * amount as the page is offset from m_start within the object. The 1702176771Sraj * last page in the sequence is the page with the largest offset from 1703176771Sraj * m_start that can be mapped at a virtual address less than the given 1704176771Sraj * virtual address end. Not every virtual page between start and end 1705176771Sraj * is mapped; only those for which a resident page exists with the 1706176771Sraj * corresponding offset from m_start are mapped. 1707176771Sraj */ 1708176771Srajstatic void 1709176771Srajmmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1710176771Sraj vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1711176771Sraj{ 1712176771Sraj vm_page_t m; 1713176771Sraj vm_pindex_t diff, psize; 1714176771Sraj 1715250884Sattilio VM_OBJECT_ASSERT_LOCKED(m_start->object); 1716250884Sattilio 1717176771Sraj psize = atop(end - start); 1718176771Sraj m = m_start; 1719242535Salc rw_wlock(&pvh_global_lock); 1720176771Sraj PMAP_LOCK(pmap); 1721176771Sraj while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1722187151Sraj mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1723187151Sraj prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1724176771Sraj m = TAILQ_NEXT(m, listq); 1725176771Sraj } 1726242535Salc rw_wunlock(&pvh_global_lock); 1727176771Sraj PMAP_UNLOCK(pmap); 1728176771Sraj} 1729176771Sraj 1730176771Srajstatic void 1731176771Srajmmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1732176771Sraj vm_prot_t prot) 1733176771Sraj{ 1734176771Sraj 1735242535Salc rw_wlock(&pvh_global_lock); 1736176771Sraj PMAP_LOCK(pmap); 1737176771Sraj mmu_booke_enter_locked(mmu, pmap, va, m, 1738176771Sraj prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1739242535Salc rw_wunlock(&pvh_global_lock); 1740176771Sraj PMAP_UNLOCK(pmap); 1741176771Sraj} 1742176771Sraj 1743176771Sraj/* 1744176771Sraj * Remove the given range of addresses from the specified map. 1745176771Sraj * 1746176771Sraj * It is assumed that the start and end are properly rounded to the page size. 1747176771Sraj */ 1748176771Srajstatic void 1749176771Srajmmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1750176771Sraj{ 1751176771Sraj pte_t *pte; 1752187151Sraj uint8_t hold_flag; 1753176771Sraj 1754176771Sraj int su = (pmap == kernel_pmap); 1755176771Sraj 1756176771Sraj //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1757176771Sraj // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1758176771Sraj 1759176771Sraj if (su) { 1760187151Sraj KASSERT(((va >= virtual_avail) && 1761187151Sraj (va <= VM_MAX_KERNEL_ADDRESS)), 1762187151Sraj ("mmu_booke_remove: kernel pmap, non kernel va")); 1763176771Sraj } else { 1764176771Sraj KASSERT((va <= VM_MAXUSER_ADDRESS), 1765187151Sraj ("mmu_booke_remove: user pmap, non user va")); 1766176771Sraj } 1767176771Sraj 1768176771Sraj if (PMAP_REMOVE_DONE(pmap)) { 1769176771Sraj //debugf("mmu_booke_remove: e (empty)\n"); 1770176771Sraj return; 1771176771Sraj } 1772176771Sraj 1773176771Sraj hold_flag = PTBL_HOLD_FLAG(pmap); 1774176771Sraj //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1775176771Sraj 1776242535Salc rw_wlock(&pvh_global_lock); 1777176771Sraj PMAP_LOCK(pmap); 1778176771Sraj for (; va < endva; va += PAGE_SIZE) { 1779176771Sraj pte = pte_find(mmu, pmap, va); 1780187149Sraj if ((pte != NULL) && PTE_ISVALID(pte)) 1781176771Sraj pte_remove(mmu, pmap, va, hold_flag); 1782176771Sraj } 1783176771Sraj PMAP_UNLOCK(pmap); 1784242535Salc rw_wunlock(&pvh_global_lock); 1785176771Sraj 1786176771Sraj //debugf("mmu_booke_remove: e\n"); 1787176771Sraj} 1788176771Sraj 1789176771Sraj/* 1790176771Sraj * Remove physical page from all pmaps in which it resides. 1791176771Sraj */ 1792176771Srajstatic void 1793176771Srajmmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1794176771Sraj{ 1795176771Sraj pv_entry_t pv, pvn; 1796187151Sraj uint8_t hold_flag; 1797176771Sraj 1798242535Salc rw_wlock(&pvh_global_lock); 1799176771Sraj for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1800176771Sraj pvn = TAILQ_NEXT(pv, pv_link); 1801176771Sraj 1802176771Sraj PMAP_LOCK(pv->pv_pmap); 1803176771Sraj hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1804176771Sraj pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1805176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1806176771Sraj } 1807225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1808242535Salc rw_wunlock(&pvh_global_lock); 1809176771Sraj} 1810176771Sraj 1811176771Sraj/* 1812176771Sraj * Map a range of physical addresses into kernel virtual address space. 1813176771Sraj */ 1814176771Srajstatic vm_offset_t 1815235936Srajmmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1816235936Sraj vm_paddr_t pa_end, int prot) 1817176771Sraj{ 1818176771Sraj vm_offset_t sva = *virt; 1819176771Sraj vm_offset_t va = sva; 1820176771Sraj 1821176771Sraj //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1822176771Sraj // sva, pa_start, pa_end); 1823176771Sraj 1824176771Sraj while (pa_start < pa_end) { 1825176771Sraj mmu_booke_kenter(mmu, va, pa_start); 1826176771Sraj va += PAGE_SIZE; 1827176771Sraj pa_start += PAGE_SIZE; 1828176771Sraj } 1829176771Sraj *virt = va; 1830176771Sraj 1831176771Sraj //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1832176771Sraj return (sva); 1833176771Sraj} 1834176771Sraj 1835176771Sraj/* 1836176771Sraj * The pmap must be activated before it's address space can be accessed in any 1837176771Sraj * way. 1838176771Sraj */ 1839176771Srajstatic void 1840176771Srajmmu_booke_activate(mmu_t mmu, struct thread *td) 1841176771Sraj{ 1842176771Sraj pmap_t pmap; 1843223758Sattilio u_int cpuid; 1844176771Sraj 1845176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1846176771Sraj 1847187149Sraj CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1848187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1849176771Sraj 1850176771Sraj KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1851176771Sraj 1852176771Sraj mtx_lock_spin(&sched_lock); 1853176771Sraj 1854223758Sattilio cpuid = PCPU_GET(cpuid); 1855223758Sattilio CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1856176771Sraj PCPU_SET(curpmap, pmap); 1857187149Sraj 1858223758Sattilio if (pmap->pm_tid[cpuid] == TID_NONE) 1859176771Sraj tid_alloc(pmap); 1860176771Sraj 1861176771Sraj /* Load PID0 register with pmap tid value. */ 1862223758Sattilio mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1863187149Sraj __asm __volatile("isync"); 1864176771Sraj 1865176771Sraj mtx_unlock_spin(&sched_lock); 1866176771Sraj 1867187149Sraj CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1868187149Sraj pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1869176771Sraj} 1870176771Sraj 1871176771Sraj/* 1872176771Sraj * Deactivate the specified process's address space. 1873176771Sraj */ 1874176771Srajstatic void 1875176771Srajmmu_booke_deactivate(mmu_t mmu, struct thread *td) 1876176771Sraj{ 1877176771Sraj pmap_t pmap; 1878176771Sraj 1879176771Sraj pmap = &td->td_proc->p_vmspace->vm_pmap; 1880187149Sraj 1881187149Sraj CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1882187149Sraj __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1883187149Sraj 1884223758Sattilio CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1885176771Sraj PCPU_SET(curpmap, NULL); 1886176771Sraj} 1887176771Sraj 1888176771Sraj/* 1889176771Sraj * Copy the range specified by src_addr/len 1890176771Sraj * from the source map to the range dst_addr/len 1891176771Sraj * in the destination map. 1892176771Sraj * 1893176771Sraj * This routine is only advisory and need not do anything. 1894176771Sraj */ 1895176771Srajstatic void 1896194101Srajmmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1897194101Sraj vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1898176771Sraj{ 1899176771Sraj 1900176771Sraj} 1901176771Sraj 1902176771Sraj/* 1903176771Sraj * Set the physical protection on the specified range of this map as requested. 1904176771Sraj */ 1905176771Srajstatic void 1906176771Srajmmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1907176771Sraj vm_prot_t prot) 1908176771Sraj{ 1909176771Sraj vm_offset_t va; 1910176771Sraj vm_page_t m; 1911176771Sraj pte_t *pte; 1912176771Sraj 1913176771Sraj if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1914176771Sraj mmu_booke_remove(mmu, pmap, sva, eva); 1915176771Sraj return; 1916176771Sraj } 1917176771Sraj 1918176771Sraj if (prot & VM_PROT_WRITE) 1919176771Sraj return; 1920176771Sraj 1921176771Sraj PMAP_LOCK(pmap); 1922176771Sraj for (va = sva; va < eva; va += PAGE_SIZE) { 1923176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1924176771Sraj if (PTE_ISVALID(pte)) { 1925176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1926176771Sraj 1927187149Sraj mtx_lock_spin(&tlbivax_mutex); 1928192532Sraj tlb_miss_lock(); 1929187149Sraj 1930176771Sraj /* Handle modified pages. */ 1931207437Salc if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1932178626Smarcel vm_page_dirty(m); 1933176771Sraj 1934187149Sraj tlb0_flush_entry(va); 1935207437Salc pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1936187149Sraj 1937192532Sraj tlb_miss_unlock(); 1938187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1939176771Sraj } 1940176771Sraj } 1941176771Sraj } 1942176771Sraj PMAP_UNLOCK(pmap); 1943176771Sraj} 1944176771Sraj 1945176771Sraj/* 1946176771Sraj * Clear the write and modified bits in each of the given page's mappings. 1947176771Sraj */ 1948176771Srajstatic void 1949176771Srajmmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1950176771Sraj{ 1951176771Sraj pv_entry_t pv; 1952176771Sraj pte_t *pte; 1953176771Sraj 1954224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1955208175Salc ("mmu_booke_remove_write: page %p is not managed", m)); 1956208175Salc 1957208175Salc /* 1958254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1959254138Sattilio * set by another thread while the object is locked. Thus, 1960254138Sattilio * if PGA_WRITEABLE is clear, no page table entries need updating. 1961208175Salc */ 1962248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 1963254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1964176771Sraj return; 1965242535Salc rw_wlock(&pvh_global_lock); 1966176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1967176771Sraj PMAP_LOCK(pv->pv_pmap); 1968176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1969176771Sraj if (PTE_ISVALID(pte)) { 1970176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1971176771Sraj 1972187149Sraj mtx_lock_spin(&tlbivax_mutex); 1973192532Sraj tlb_miss_lock(); 1974187149Sraj 1975176771Sraj /* Handle modified pages. */ 1976178626Smarcel if (PTE_ISMODIFIED(pte)) 1977178626Smarcel vm_page_dirty(m); 1978176771Sraj 1979176771Sraj /* Flush mapping from TLB0. */ 1980207437Salc pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1981187149Sraj 1982192532Sraj tlb_miss_unlock(); 1983187149Sraj mtx_unlock_spin(&tlbivax_mutex); 1984176771Sraj } 1985176771Sraj } 1986176771Sraj PMAP_UNLOCK(pv->pv_pmap); 1987176771Sraj } 1988225418Skib vm_page_aflag_clear(m, PGA_WRITEABLE); 1989242535Salc rw_wunlock(&pvh_global_lock); 1990176771Sraj} 1991176771Sraj 1992198341Smarcelstatic void 1993198341Smarcelmmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1994176771Sraj{ 1995176771Sraj pte_t *pte; 1996198341Smarcel pmap_t pmap; 1997198341Smarcel vm_page_t m; 1998198341Smarcel vm_offset_t addr; 1999198341Smarcel vm_paddr_t pa; 2000198341Smarcel int active, valid; 2001198341Smarcel 2002198341Smarcel va = trunc_page(va); 2003198341Smarcel sz = round_page(sz); 2004176771Sraj 2005242535Salc rw_wlock(&pvh_global_lock); 2006198341Smarcel pmap = PCPU_GET(curpmap); 2007198341Smarcel active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2008198341Smarcel while (sz > 0) { 2009198341Smarcel PMAP_LOCK(pm); 2010198341Smarcel pte = pte_find(mmu, pm, va); 2011198341Smarcel valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2012198341Smarcel if (valid) 2013198341Smarcel pa = PTE_PA(pte); 2014198341Smarcel PMAP_UNLOCK(pm); 2015198341Smarcel if (valid) { 2016198341Smarcel if (!active) { 2017198341Smarcel /* Create a mapping in the active pmap. */ 2018198341Smarcel addr = 0; 2019198341Smarcel m = PHYS_TO_VM_PAGE(pa); 2020198341Smarcel PMAP_LOCK(pmap); 2021198341Smarcel pte_enter(mmu, pmap, m, addr, 2022198341Smarcel PTE_SR | PTE_VALID | PTE_UR); 2023198341Smarcel __syncicache((void *)addr, PAGE_SIZE); 2024198341Smarcel pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2025198341Smarcel PMAP_UNLOCK(pmap); 2026198341Smarcel } else 2027198341Smarcel __syncicache((void *)va, PAGE_SIZE); 2028198341Smarcel } 2029198341Smarcel va += PAGE_SIZE; 2030198341Smarcel sz -= PAGE_SIZE; 2031176771Sraj } 2032242535Salc rw_wunlock(&pvh_global_lock); 2033176771Sraj} 2034176771Sraj 2035176771Sraj/* 2036176771Sraj * Atomically extract and hold the physical page with the given 2037176771Sraj * pmap and virtual address pair if that mapping permits the given 2038176771Sraj * protection. 2039176771Sraj */ 2040176771Srajstatic vm_page_t 2041176771Srajmmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2042176771Sraj vm_prot_t prot) 2043176771Sraj{ 2044176771Sraj pte_t *pte; 2045176771Sraj vm_page_t m; 2046187151Sraj uint32_t pte_wbit; 2047207410Skmacy vm_paddr_t pa; 2048207410Skmacy 2049176771Sraj m = NULL; 2050207410Skmacy pa = 0; 2051176771Sraj PMAP_LOCK(pmap); 2052207410Skmacyretry: 2053176771Sraj pte = pte_find(mmu, pmap, va); 2054176771Sraj if ((pte != NULL) && PTE_ISVALID(pte)) { 2055176771Sraj if (pmap == kernel_pmap) 2056176771Sraj pte_wbit = PTE_SW; 2057176771Sraj else 2058176771Sraj pte_wbit = PTE_UW; 2059176771Sraj 2060176771Sraj if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2061207410Skmacy if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2062207410Skmacy goto retry; 2063176771Sraj m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2064176771Sraj vm_page_hold(m); 2065176771Sraj } 2066176771Sraj } 2067176771Sraj 2068207410Skmacy PA_UNLOCK_COND(pa); 2069176771Sraj PMAP_UNLOCK(pmap); 2070176771Sraj return (m); 2071176771Sraj} 2072176771Sraj 2073176771Sraj/* 2074176771Sraj * Initialize a vm_page's machine-dependent fields. 2075176771Sraj */ 2076176771Srajstatic void 2077176771Srajmmu_booke_page_init(mmu_t mmu, vm_page_t m) 2078176771Sraj{ 2079176771Sraj 2080176771Sraj TAILQ_INIT(&m->md.pv_list); 2081176771Sraj} 2082176771Sraj 2083176771Sraj/* 2084176771Sraj * mmu_booke_zero_page_area zeros the specified hardware page by 2085176771Sraj * mapping it into virtual memory and using bzero to clear 2086176771Sraj * its contents. 2087176771Sraj * 2088176771Sraj * off and size must reside within a single page. 2089176771Sraj */ 2090176771Srajstatic void 2091176771Srajmmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2092176771Sraj{ 2093176771Sraj vm_offset_t va; 2094176771Sraj 2095187151Sraj /* XXX KASSERT off and size are within a single page? */ 2096176771Sraj 2097176771Sraj mtx_lock(&zero_page_mutex); 2098176771Sraj va = zero_page_va; 2099176771Sraj 2100176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2101176771Sraj bzero((caddr_t)va + off, size); 2102176771Sraj mmu_booke_kremove(mmu, va); 2103176771Sraj 2104176771Sraj mtx_unlock(&zero_page_mutex); 2105176771Sraj} 2106176771Sraj 2107176771Sraj/* 2108176771Sraj * mmu_booke_zero_page zeros the specified hardware page. 2109176771Sraj */ 2110176771Srajstatic void 2111176771Srajmmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2112176771Sraj{ 2113176771Sraj 2114176771Sraj mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2115176771Sraj} 2116176771Sraj 2117176771Sraj/* 2118176771Sraj * mmu_booke_copy_page copies the specified (machine independent) page by 2119176771Sraj * mapping the page into virtual memory and using memcopy to copy the page, 2120176771Sraj * one machine dependent page at a time. 2121176771Sraj */ 2122176771Srajstatic void 2123176771Srajmmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2124176771Sraj{ 2125176771Sraj vm_offset_t sva, dva; 2126176771Sraj 2127176771Sraj sva = copy_page_src_va; 2128176771Sraj dva = copy_page_dst_va; 2129176771Sraj 2130187149Sraj mtx_lock(©_page_mutex); 2131176771Sraj mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2132176771Sraj mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2133176771Sraj memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2134176771Sraj mmu_booke_kremove(mmu, dva); 2135176771Sraj mmu_booke_kremove(mmu, sva); 2136176771Sraj mtx_unlock(©_page_mutex); 2137176771Sraj} 2138176771Sraj 2139248280Skibstatic inline void 2140248280Skibmmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2141248280Skib vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2142248280Skib{ 2143248280Skib void *a_cp, *b_cp; 2144248280Skib vm_offset_t a_pg_offset, b_pg_offset; 2145248280Skib int cnt; 2146248280Skib 2147248280Skib mtx_lock(©_page_mutex); 2148248280Skib while (xfersize > 0) { 2149248280Skib a_pg_offset = a_offset & PAGE_MASK; 2150248280Skib cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2151248280Skib mmu_booke_kenter(mmu, copy_page_src_va, 2152248280Skib VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2153248280Skib a_cp = (char *)copy_page_src_va + a_pg_offset; 2154248280Skib b_pg_offset = b_offset & PAGE_MASK; 2155248280Skib cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2156248280Skib mmu_booke_kenter(mmu, copy_page_dst_va, 2157248280Skib VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2158248280Skib b_cp = (char *)copy_page_dst_va + b_pg_offset; 2159248280Skib bcopy(a_cp, b_cp, cnt); 2160248280Skib mmu_booke_kremove(mmu, copy_page_dst_va); 2161248280Skib mmu_booke_kremove(mmu, copy_page_src_va); 2162248280Skib a_offset += cnt; 2163248280Skib b_offset += cnt; 2164248280Skib xfersize -= cnt; 2165248280Skib } 2166248280Skib mtx_unlock(©_page_mutex); 2167248280Skib} 2168248280Skib 2169176771Sraj/* 2170176771Sraj * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2171176771Sraj * into virtual memory and using bzero to clear its contents. This is intended 2172176771Sraj * to be called from the vm_pagezero process only and outside of Giant. No 2173176771Sraj * lock is required. 2174176771Sraj */ 2175176771Srajstatic void 2176176771Srajmmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2177176771Sraj{ 2178176771Sraj vm_offset_t va; 2179176771Sraj 2180176771Sraj va = zero_page_idle_va; 2181176771Sraj mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2182176771Sraj bzero((caddr_t)va, PAGE_SIZE); 2183176771Sraj mmu_booke_kremove(mmu, va); 2184176771Sraj} 2185176771Sraj 2186176771Sraj/* 2187176771Sraj * Return whether or not the specified physical page was modified 2188176771Sraj * in any of physical maps. 2189176771Sraj */ 2190176771Srajstatic boolean_t 2191176771Srajmmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2192176771Sraj{ 2193176771Sraj pte_t *pte; 2194176771Sraj pv_entry_t pv; 2195208504Salc boolean_t rv; 2196176771Sraj 2197224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2198208504Salc ("mmu_booke_is_modified: page %p is not managed", m)); 2199208504Salc rv = FALSE; 2200176771Sraj 2201208504Salc /* 2202254138Sattilio * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2203225418Skib * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2204208504Salc * is clear, no PTEs can be modified. 2205208504Salc */ 2206248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2207254138Sattilio if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2208208504Salc return (rv); 2209242535Salc rw_wlock(&pvh_global_lock); 2210176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2211176771Sraj PMAP_LOCK(pv->pv_pmap); 2212208504Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2213208504Salc PTE_ISVALID(pte)) { 2214208504Salc if (PTE_ISMODIFIED(pte)) 2215208504Salc rv = TRUE; 2216176771Sraj } 2217176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2218208504Salc if (rv) 2219208504Salc break; 2220176771Sraj } 2221242535Salc rw_wunlock(&pvh_global_lock); 2222208504Salc return (rv); 2223176771Sraj} 2224176771Sraj 2225176771Sraj/* 2226187151Sraj * Return whether or not the specified virtual address is eligible 2227176771Sraj * for prefault. 2228176771Sraj */ 2229176771Srajstatic boolean_t 2230176771Srajmmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2231176771Sraj{ 2232176771Sraj 2233176771Sraj return (FALSE); 2234176771Sraj} 2235176771Sraj 2236176771Sraj/* 2237207155Salc * Return whether or not the specified physical page was referenced 2238207155Salc * in any physical maps. 2239207155Salc */ 2240207155Salcstatic boolean_t 2241207155Salcmmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2242207155Salc{ 2243207155Salc pte_t *pte; 2244207155Salc pv_entry_t pv; 2245207155Salc boolean_t rv; 2246207155Salc 2247224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2248208574Salc ("mmu_booke_is_referenced: page %p is not managed", m)); 2249207155Salc rv = FALSE; 2250242535Salc rw_wlock(&pvh_global_lock); 2251207155Salc TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2252207155Salc PMAP_LOCK(pv->pv_pmap); 2253207155Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2254208574Salc PTE_ISVALID(pte)) { 2255208574Salc if (PTE_ISREFERENCED(pte)) 2256208574Salc rv = TRUE; 2257208574Salc } 2258207155Salc PMAP_UNLOCK(pv->pv_pmap); 2259207155Salc if (rv) 2260207155Salc break; 2261207155Salc } 2262242535Salc rw_wunlock(&pvh_global_lock); 2263207155Salc return (rv); 2264207155Salc} 2265207155Salc 2266207155Salc/* 2267176771Sraj * Clear the modify bits on the specified physical page. 2268176771Sraj */ 2269176771Srajstatic void 2270176771Srajmmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2271176771Sraj{ 2272176771Sraj pte_t *pte; 2273176771Sraj pv_entry_t pv; 2274176771Sraj 2275224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2276208504Salc ("mmu_booke_clear_modify: page %p is not managed", m)); 2277248084Sattilio VM_OBJECT_ASSERT_WLOCKED(m->object); 2278254138Sattilio KASSERT(!vm_page_xbusied(m), 2279254138Sattilio ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 2280208504Salc 2281208504Salc /* 2282225418Skib * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2283208504Salc * If the object containing the page is locked and the page is not 2284254138Sattilio * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 2285208504Salc */ 2286225418Skib if ((m->aflags & PGA_WRITEABLE) == 0) 2287176771Sraj return; 2288242535Salc rw_wlock(&pvh_global_lock); 2289176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2290176771Sraj PMAP_LOCK(pv->pv_pmap); 2291208504Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2292208504Salc PTE_ISVALID(pte)) { 2293187149Sraj mtx_lock_spin(&tlbivax_mutex); 2294192532Sraj tlb_miss_lock(); 2295187149Sraj 2296176771Sraj if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2297187149Sraj tlb0_flush_entry(pv->pv_va); 2298176771Sraj pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2299176771Sraj PTE_REFERENCED); 2300176771Sraj } 2301187149Sraj 2302192532Sraj tlb_miss_unlock(); 2303187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2304176771Sraj } 2305176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2306176771Sraj } 2307242535Salc rw_wunlock(&pvh_global_lock); 2308176771Sraj} 2309176771Sraj 2310176771Sraj/* 2311176771Sraj * Return a count of reference bits for a page, clearing those bits. 2312176771Sraj * It is not necessary for every reference bit to be cleared, but it 2313176771Sraj * is necessary that 0 only be returned when there are truly no 2314176771Sraj * reference bits set. 2315176771Sraj * 2316176771Sraj * XXX: The exact number of bits to check and clear is a matter that 2317176771Sraj * should be tested and standardized at some point in the future for 2318176771Sraj * optimal aging of shared pages. 2319176771Sraj */ 2320176771Srajstatic int 2321176771Srajmmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2322176771Sraj{ 2323176771Sraj pte_t *pte; 2324176771Sraj pv_entry_t pv; 2325176771Sraj int count; 2326176771Sraj 2327224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2328208990Salc ("mmu_booke_ts_referenced: page %p is not managed", m)); 2329176771Sraj count = 0; 2330242535Salc rw_wlock(&pvh_global_lock); 2331176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2332176771Sraj PMAP_LOCK(pv->pv_pmap); 2333208990Salc if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2334208990Salc PTE_ISVALID(pte)) { 2335176771Sraj if (PTE_ISREFERENCED(pte)) { 2336187149Sraj mtx_lock_spin(&tlbivax_mutex); 2337192532Sraj tlb_miss_lock(); 2338187149Sraj 2339187149Sraj tlb0_flush_entry(pv->pv_va); 2340176771Sraj pte->flags &= ~PTE_REFERENCED; 2341176771Sraj 2342192532Sraj tlb_miss_unlock(); 2343187149Sraj mtx_unlock_spin(&tlbivax_mutex); 2344187149Sraj 2345176771Sraj if (++count > 4) { 2346176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2347176771Sraj break; 2348176771Sraj } 2349176771Sraj } 2350176771Sraj } 2351176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2352176771Sraj } 2353242535Salc rw_wunlock(&pvh_global_lock); 2354176771Sraj return (count); 2355176771Sraj} 2356176771Sraj 2357176771Sraj/* 2358176771Sraj * Change wiring attribute for a map/virtual-address pair. 2359176771Sraj */ 2360176771Srajstatic void 2361176771Srajmmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2362176771Sraj{ 2363201758Smbr pte_t *pte; 2364176771Sraj 2365176771Sraj PMAP_LOCK(pmap); 2366176771Sraj if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2367176771Sraj if (wired) { 2368176771Sraj if (!PTE_ISWIRED(pte)) { 2369176771Sraj pte->flags |= PTE_WIRED; 2370176771Sraj pmap->pm_stats.wired_count++; 2371176771Sraj } 2372176771Sraj } else { 2373176771Sraj if (PTE_ISWIRED(pte)) { 2374176771Sraj pte->flags &= ~PTE_WIRED; 2375176771Sraj pmap->pm_stats.wired_count--; 2376176771Sraj } 2377176771Sraj } 2378176771Sraj } 2379176771Sraj PMAP_UNLOCK(pmap); 2380176771Sraj} 2381176771Sraj 2382176771Sraj/* 2383176771Sraj * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2384176771Sraj * page. This count may be changed upwards or downwards in the future; it is 2385176771Sraj * only necessary that true be returned for a small subset of pmaps for proper 2386176771Sraj * page aging. 2387176771Sraj */ 2388176771Srajstatic boolean_t 2389176771Srajmmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2390176771Sraj{ 2391176771Sraj pv_entry_t pv; 2392176771Sraj int loops; 2393208990Salc boolean_t rv; 2394176771Sraj 2395224746Skib KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2396208990Salc ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2397176771Sraj loops = 0; 2398208990Salc rv = FALSE; 2399242535Salc rw_wlock(&pvh_global_lock); 2400176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2401208990Salc if (pv->pv_pmap == pmap) { 2402208990Salc rv = TRUE; 2403208990Salc break; 2404208990Salc } 2405176771Sraj if (++loops >= 16) 2406176771Sraj break; 2407176771Sraj } 2408242535Salc rw_wunlock(&pvh_global_lock); 2409208990Salc return (rv); 2410176771Sraj} 2411176771Sraj 2412176771Sraj/* 2413176771Sraj * Return the number of managed mappings to the given physical page that are 2414176771Sraj * wired. 2415176771Sraj */ 2416176771Srajstatic int 2417176771Srajmmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2418176771Sraj{ 2419176771Sraj pv_entry_t pv; 2420176771Sraj pte_t *pte; 2421176771Sraj int count = 0; 2422176771Sraj 2423224746Skib if ((m->oflags & VPO_UNMANAGED) != 0) 2424176771Sraj return (count); 2425242535Salc rw_wlock(&pvh_global_lock); 2426176771Sraj TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2427176771Sraj PMAP_LOCK(pv->pv_pmap); 2428176771Sraj if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2429176771Sraj if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2430176771Sraj count++; 2431176771Sraj PMAP_UNLOCK(pv->pv_pmap); 2432176771Sraj } 2433242535Salc rw_wunlock(&pvh_global_lock); 2434176771Sraj return (count); 2435176771Sraj} 2436176771Sraj 2437176771Srajstatic int 2438235936Srajmmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2439176771Sraj{ 2440176771Sraj int i; 2441176771Sraj vm_offset_t va; 2442176771Sraj 2443176771Sraj /* 2444176771Sraj * This currently does not work for entries that 2445176771Sraj * overlap TLB1 entries. 2446176771Sraj */ 2447176771Sraj for (i = 0; i < tlb1_idx; i ++) { 2448176771Sraj if (tlb1_iomapped(i, pa, size, &va) == 0) 2449176771Sraj return (0); 2450176771Sraj } 2451176771Sraj 2452176771Sraj return (EFAULT); 2453176771Sraj} 2454176771Sraj 2455190701Smarcelvm_offset_t 2456190701Smarcelmmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2457190701Smarcel vm_size_t *sz) 2458190701Smarcel{ 2459190701Smarcel vm_paddr_t pa, ppa; 2460190701Smarcel vm_offset_t va; 2461190701Smarcel vm_size_t gran; 2462190701Smarcel 2463190701Smarcel /* Raw physical memory dumps don't have a virtual address. */ 2464190701Smarcel if (md->md_vaddr == ~0UL) { 2465190701Smarcel /* We always map a 256MB page at 256M. */ 2466190701Smarcel gran = 256 * 1024 * 1024; 2467190701Smarcel pa = md->md_paddr + ofs; 2468190701Smarcel ppa = pa & ~(gran - 1); 2469190701Smarcel ofs = pa - ppa; 2470190701Smarcel va = gran; 2471190701Smarcel tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2472190701Smarcel if (*sz > (gran - ofs)) 2473190701Smarcel *sz = gran - ofs; 2474190701Smarcel return (va + ofs); 2475190701Smarcel } 2476190701Smarcel 2477190701Smarcel /* Minidumps are based on virtual memory addresses. */ 2478190701Smarcel va = md->md_vaddr + ofs; 2479190701Smarcel if (va >= kernstart + kernsize) { 2480190701Smarcel gran = PAGE_SIZE - (va & PAGE_MASK); 2481190701Smarcel if (*sz > gran) 2482190701Smarcel *sz = gran; 2483190701Smarcel } 2484190701Smarcel return (va); 2485190701Smarcel} 2486190701Smarcel 2487190701Smarcelvoid 2488190701Smarcelmmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2489190701Smarcel vm_offset_t va) 2490190701Smarcel{ 2491190701Smarcel 2492190701Smarcel /* Raw physical memory dumps don't have a virtual address. */ 2493190701Smarcel if (md->md_vaddr == ~0UL) { 2494190701Smarcel tlb1_idx--; 2495190701Smarcel tlb1[tlb1_idx].mas1 = 0; 2496190701Smarcel tlb1[tlb1_idx].mas2 = 0; 2497190701Smarcel tlb1[tlb1_idx].mas3 = 0; 2498190701Smarcel tlb1_write_entry(tlb1_idx); 2499190701Smarcel return; 2500190701Smarcel } 2501190701Smarcel 2502190701Smarcel /* Minidumps are based on virtual memory addresses. */ 2503190701Smarcel /* Nothing to do... */ 2504190701Smarcel} 2505190701Smarcel 2506190701Smarcelstruct pmap_md * 2507190701Smarcelmmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2508190701Smarcel{ 2509190701Smarcel static struct pmap_md md; 2510190701Smarcel pte_t *pte; 2511190701Smarcel vm_offset_t va; 2512190701Smarcel 2513190701Smarcel if (dumpsys_minidump) { 2514190701Smarcel md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2515190701Smarcel if (prev == NULL) { 2516190701Smarcel /* 1st: kernel .data and .bss. */ 2517190701Smarcel md.md_index = 1; 2518190701Smarcel md.md_vaddr = trunc_page((uintptr_t)_etext); 2519190701Smarcel md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2520190701Smarcel return (&md); 2521190701Smarcel } 2522190701Smarcel switch (prev->md_index) { 2523190701Smarcel case 1: 2524190701Smarcel /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2525190701Smarcel md.md_index = 2; 2526190701Smarcel md.md_vaddr = data_start; 2527190701Smarcel md.md_size = data_end - data_start; 2528190701Smarcel break; 2529190701Smarcel case 2: 2530190701Smarcel /* 3rd: kernel VM. */ 2531190701Smarcel va = prev->md_vaddr + prev->md_size; 2532190701Smarcel /* Find start of next chunk (from va). */ 2533190701Smarcel while (va < virtual_end) { 2534190701Smarcel /* Don't dump the buffer cache. */ 2535190701Smarcel if (va >= kmi.buffer_sva && 2536190701Smarcel va < kmi.buffer_eva) { 2537190701Smarcel va = kmi.buffer_eva; 2538190701Smarcel continue; 2539190701Smarcel } 2540190701Smarcel pte = pte_find(mmu, kernel_pmap, va); 2541190701Smarcel if (pte != NULL && PTE_ISVALID(pte)) 2542190701Smarcel break; 2543190701Smarcel va += PAGE_SIZE; 2544190701Smarcel } 2545190701Smarcel if (va < virtual_end) { 2546190701Smarcel md.md_vaddr = va; 2547190701Smarcel va += PAGE_SIZE; 2548190701Smarcel /* Find last page in chunk. */ 2549190701Smarcel while (va < virtual_end) { 2550190701Smarcel /* Don't run into the buffer cache. */ 2551190701Smarcel if (va == kmi.buffer_sva) 2552190701Smarcel break; 2553190701Smarcel pte = pte_find(mmu, kernel_pmap, va); 2554190701Smarcel if (pte == NULL || !PTE_ISVALID(pte)) 2555190701Smarcel break; 2556190701Smarcel va += PAGE_SIZE; 2557190701Smarcel } 2558190701Smarcel md.md_size = va - md.md_vaddr; 2559190701Smarcel break; 2560190701Smarcel } 2561190701Smarcel md.md_index = 3; 2562190701Smarcel /* FALLTHROUGH */ 2563190701Smarcel default: 2564190701Smarcel return (NULL); 2565190701Smarcel } 2566190701Smarcel } else { /* minidumps */ 2567209908Sraj mem_regions(&physmem_regions, &physmem_regions_sz, 2568209908Sraj &availmem_regions, &availmem_regions_sz); 2569209908Sraj 2570190701Smarcel if (prev == NULL) { 2571190701Smarcel /* first physical chunk. */ 2572209908Sraj md.md_paddr = physmem_regions[0].mr_start; 2573209908Sraj md.md_size = physmem_regions[0].mr_size; 2574190701Smarcel md.md_vaddr = ~0UL; 2575190701Smarcel md.md_index = 1; 2576209908Sraj } else if (md.md_index < physmem_regions_sz) { 2577209908Sraj md.md_paddr = physmem_regions[md.md_index].mr_start; 2578209908Sraj md.md_size = physmem_regions[md.md_index].mr_size; 2579190701Smarcel md.md_vaddr = ~0UL; 2580190701Smarcel md.md_index++; 2581190701Smarcel } else { 2582190701Smarcel /* There's no next physical chunk. */ 2583190701Smarcel return (NULL); 2584190701Smarcel } 2585190701Smarcel } 2586190701Smarcel 2587190701Smarcel return (&md); 2588190701Smarcel} 2589190701Smarcel 2590176771Sraj/* 2591176771Sraj * Map a set of physical memory pages into the kernel virtual address space. 2592176771Sraj * Return a pointer to where it is mapped. This routine is intended to be used 2593176771Sraj * for mapping device memory, NOT real memory. 2594176771Sraj */ 2595176771Srajstatic void * 2596235936Srajmmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2597176771Sraj{ 2598184244Smarcel void *res; 2599176771Sraj uintptr_t va; 2600184244Smarcel vm_size_t sz; 2601176771Sraj 2602242526Smarcel /* 2603242526Smarcel * CCSR is premapped. Note that (pa + size - 1) is there to make sure 2604242526Smarcel * we don't wrap around. Devices on the local bus typically extend all 2605242526Smarcel * the way up to and including 0xffffffff. In that case (pa + size) 2606242526Smarcel * would be 0. This creates a false positive (i.e. we think it's 2607242526Smarcel * within the CCSR) and not create a mapping. 2608242526Smarcel */ 2609242526Smarcel if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) { 2610242526Smarcel va = CCSRBAR_VA + (pa - ccsrbar_pa); 2611242526Smarcel return ((void *)va); 2612242526Smarcel } 2613242526Smarcel 2614176771Sraj va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2615184244Smarcel res = (void *)va; 2616184244Smarcel 2617184244Smarcel do { 2618184244Smarcel sz = 1 << (ilog2(size) & ~1); 2619184244Smarcel if (bootverbose) 2620184244Smarcel printf("Wiring VA=%x to PA=%x (size=%x), " 2621184244Smarcel "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2622184244Smarcel tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2623184244Smarcel size -= sz; 2624184244Smarcel pa += sz; 2625184244Smarcel va += sz; 2626184244Smarcel } while (size > 0); 2627184244Smarcel 2628184244Smarcel return (res); 2629176771Sraj} 2630176771Sraj 2631176771Sraj/* 2632176771Sraj * 'Unmap' a range mapped by mmu_booke_mapdev(). 2633176771Sraj */ 2634176771Srajstatic void 2635176771Srajmmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2636176771Sraj{ 2637176771Sraj vm_offset_t base, offset; 2638176771Sraj 2639176771Sraj /* 2640176771Sraj * Unmap only if this is inside kernel virtual space. 2641176771Sraj */ 2642176771Sraj if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2643176771Sraj base = trunc_page(va); 2644176771Sraj offset = va & PAGE_MASK; 2645176771Sraj size = roundup(offset + size, PAGE_SIZE); 2646254025Sjeff kva_free(base, size); 2647176771Sraj } 2648176771Sraj} 2649176771Sraj 2650176771Sraj/* 2651187151Sraj * mmu_booke_object_init_pt preloads the ptes for a given object into the 2652187151Sraj * specified pmap. This eliminates the blast of soft faults on process startup 2653187151Sraj * and immediately after an mmap. 2654176771Sraj */ 2655176771Srajstatic void 2656176771Srajmmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2657176771Sraj vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2658176771Sraj{ 2659187151Sraj 2660248084Sattilio VM_OBJECT_ASSERT_WLOCKED(object); 2661195840Sjhb KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2662176771Sraj ("mmu_booke_object_init_pt: non-device object")); 2663176771Sraj} 2664176771Sraj 2665176771Sraj/* 2666176771Sraj * Perform the pmap work for mincore. 2667176771Sraj */ 2668176771Srajstatic int 2669208504Salcmmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2670208504Salc vm_paddr_t *locked_pa) 2671176771Sraj{ 2672176771Sraj 2673176771Sraj TODO; 2674176771Sraj return (0); 2675176771Sraj} 2676176771Sraj 2677176771Sraj/**************************************************************************/ 2678176771Sraj/* TID handling */ 2679176771Sraj/**************************************************************************/ 2680176771Sraj 2681176771Sraj/* 2682176771Sraj * Allocate a TID. If necessary, steal one from someone else. 2683176771Sraj * The new TID is flushed from the TLB before returning. 2684176771Sraj */ 2685176771Srajstatic tlbtid_t 2686176771Srajtid_alloc(pmap_t pmap) 2687176771Sraj{ 2688176771Sraj tlbtid_t tid; 2689187149Sraj int thiscpu; 2690176771Sraj 2691187149Sraj KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2692176771Sraj 2693187149Sraj CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2694176771Sraj 2695187149Sraj thiscpu = PCPU_GET(cpuid); 2696176771Sraj 2697187149Sraj tid = PCPU_GET(tid_next); 2698187149Sraj if (tid > TID_MAX) 2699187149Sraj tid = TID_MIN; 2700187149Sraj PCPU_SET(tid_next, tid + 1); 2701176771Sraj 2702187149Sraj /* If we are stealing TID then clear the relevant pmap's field */ 2703187149Sraj if (tidbusy[thiscpu][tid] != NULL) { 2704176771Sraj 2705187149Sraj CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2706187149Sraj 2707187149Sraj tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2708176771Sraj 2709187149Sraj /* Flush all entries from TLB0 matching this TID. */ 2710187149Sraj tid_flush(tid); 2711176771Sraj } 2712176771Sraj 2713187149Sraj tidbusy[thiscpu][tid] = pmap; 2714187149Sraj pmap->pm_tid[thiscpu] = tid; 2715187149Sraj __asm __volatile("msync; isync"); 2716176771Sraj 2717187149Sraj CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2718187149Sraj PCPU_GET(tid_next)); 2719176771Sraj 2720176771Sraj return (tid); 2721176771Sraj} 2722176771Sraj 2723176771Sraj/**************************************************************************/ 2724176771Sraj/* TLB0 handling */ 2725176771Sraj/**************************************************************************/ 2726176771Sraj 2727176771Srajstatic void 2728187149Srajtlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2729187149Sraj uint32_t mas7) 2730176771Sraj{ 2731176771Sraj int as; 2732176771Sraj char desc[3]; 2733176771Sraj tlbtid_t tid; 2734176771Sraj vm_size_t size; 2735176771Sraj unsigned int tsize; 2736176771Sraj 2737176771Sraj desc[2] = '\0'; 2738176771Sraj if (mas1 & MAS1_VALID) 2739176771Sraj desc[0] = 'V'; 2740176771Sraj else 2741176771Sraj desc[0] = ' '; 2742176771Sraj 2743176771Sraj if (mas1 & MAS1_IPROT) 2744176771Sraj desc[1] = 'P'; 2745176771Sraj else 2746176771Sraj desc[1] = ' '; 2747176771Sraj 2748187149Sraj as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2749176771Sraj tid = MAS1_GETTID(mas1); 2750176771Sraj 2751176771Sraj tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2752176771Sraj size = 0; 2753176771Sraj if (tsize) 2754176771Sraj size = tsize2size(tsize); 2755176771Sraj 2756176771Sraj debugf("%3d: (%s) [AS=%d] " 2757176771Sraj "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2758176771Sraj "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2759176771Sraj i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2760176771Sraj} 2761176771Sraj 2762176771Sraj/* Convert TLB0 va and way number to tlb0[] table index. */ 2763176771Srajstatic inline unsigned int 2764176771Srajtlb0_tableidx(vm_offset_t va, unsigned int way) 2765176771Sraj{ 2766176771Sraj unsigned int idx; 2767176771Sraj 2768176771Sraj idx = (way * TLB0_ENTRIES_PER_WAY); 2769176771Sraj idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2770176771Sraj return (idx); 2771176771Sraj} 2772176771Sraj 2773176771Sraj/* 2774187149Sraj * Invalidate TLB0 entry. 2775176771Sraj */ 2776187149Srajstatic inline void 2777187149Srajtlb0_flush_entry(vm_offset_t va) 2778176771Sraj{ 2779176771Sraj 2780187149Sraj CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2781176771Sraj 2782187149Sraj mtx_assert(&tlbivax_mutex, MA_OWNED); 2783176771Sraj 2784187149Sraj __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2785187149Sraj __asm __volatile("isync; msync"); 2786187149Sraj __asm __volatile("tlbsync; msync"); 2787176771Sraj 2788187149Sraj CTR1(KTR_PMAP, "%s: e", __func__); 2789176771Sraj} 2790176771Sraj 2791176771Sraj/* Print out contents of the MAS registers for each TLB0 entry */ 2792187149Srajvoid 2793176771Srajtlb0_print_tlbentries(void) 2794176771Sraj{ 2795187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 2796176771Sraj int entryidx, way, idx; 2797176771Sraj 2798176771Sraj debugf("TLB0 entries:\n"); 2799187149Sraj for (way = 0; way < TLB0_WAYS; way ++) 2800176771Sraj for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2801176771Sraj 2802176771Sraj mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2803176771Sraj mtspr(SPR_MAS0, mas0); 2804187149Sraj __asm __volatile("isync"); 2805176771Sraj 2806176771Sraj mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2807176771Sraj mtspr(SPR_MAS2, mas2); 2808176771Sraj 2809187149Sraj __asm __volatile("isync; tlbre"); 2810176771Sraj 2811176771Sraj mas1 = mfspr(SPR_MAS1); 2812176771Sraj mas2 = mfspr(SPR_MAS2); 2813176771Sraj mas3 = mfspr(SPR_MAS3); 2814176771Sraj mas7 = mfspr(SPR_MAS7); 2815176771Sraj 2816176771Sraj idx = tlb0_tableidx(mas2, way); 2817176771Sraj tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2818176771Sraj } 2819176771Sraj} 2820176771Sraj 2821176771Sraj/**************************************************************************/ 2822176771Sraj/* TLB1 handling */ 2823176771Sraj/**************************************************************************/ 2824187149Sraj 2825176771Sraj/* 2826187149Sraj * TLB1 mapping notes: 2827187149Sraj * 2828187149Sraj * TLB1[0] CCSRBAR 2829187149Sraj * TLB1[1] Kernel text and data. 2830187149Sraj * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2831187149Sraj * windows, other devices mappings. 2832187149Sraj */ 2833187149Sraj 2834187149Sraj/* 2835176771Sraj * Write given entry to TLB1 hardware. 2836176771Sraj * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2837176771Sraj */ 2838176771Srajstatic void 2839176771Srajtlb1_write_entry(unsigned int idx) 2840176771Sraj{ 2841187151Sraj uint32_t mas0, mas7; 2842176771Sraj 2843176771Sraj //debugf("tlb1_write_entry: s\n"); 2844176771Sraj 2845176771Sraj /* Clear high order RPN bits */ 2846176771Sraj mas7 = 0; 2847176771Sraj 2848176771Sraj /* Select entry */ 2849176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2850176771Sraj //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2851176771Sraj 2852176771Sraj mtspr(SPR_MAS0, mas0); 2853187151Sraj __asm __volatile("isync"); 2854176771Sraj mtspr(SPR_MAS1, tlb1[idx].mas1); 2855187151Sraj __asm __volatile("isync"); 2856176771Sraj mtspr(SPR_MAS2, tlb1[idx].mas2); 2857187151Sraj __asm __volatile("isync"); 2858176771Sraj mtspr(SPR_MAS3, tlb1[idx].mas3); 2859187151Sraj __asm __volatile("isync"); 2860176771Sraj mtspr(SPR_MAS7, mas7); 2861187151Sraj __asm __volatile("isync; tlbwe; isync; msync"); 2862176771Sraj 2863201758Smbr //debugf("tlb1_write_entry: e\n"); 2864176771Sraj} 2865176771Sraj 2866176771Sraj/* 2867176771Sraj * Return the largest uint value log such that 2^log <= num. 2868176771Sraj */ 2869176771Srajstatic unsigned int 2870176771Srajilog2(unsigned int num) 2871176771Sraj{ 2872176771Sraj int lz; 2873176771Sraj 2874176771Sraj __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2875176771Sraj return (31 - lz); 2876176771Sraj} 2877176771Sraj 2878176771Sraj/* 2879176771Sraj * Convert TLB TSIZE value to mapped region size. 2880176771Sraj */ 2881176771Srajstatic vm_size_t 2882176771Srajtsize2size(unsigned int tsize) 2883176771Sraj{ 2884176771Sraj 2885176771Sraj /* 2886176771Sraj * size = 4^tsize KB 2887176771Sraj * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2888176771Sraj */ 2889176771Sraj 2890176771Sraj return ((1 << (2 * tsize)) * 1024); 2891176771Sraj} 2892176771Sraj 2893176771Sraj/* 2894176771Sraj * Convert region size (must be power of 4) to TLB TSIZE value. 2895176771Sraj */ 2896176771Srajstatic unsigned int 2897176771Srajsize2tsize(vm_size_t size) 2898176771Sraj{ 2899176771Sraj 2900176771Sraj return (ilog2(size) / 2 - 5); 2901176771Sraj} 2902176771Sraj 2903176771Sraj/* 2904187149Sraj * Register permanent kernel mapping in TLB1. 2905176771Sraj * 2906187149Sraj * Entries are created starting from index 0 (current free entry is 2907187149Sraj * kept in tlb1_idx) and are not supposed to be invalidated. 2908176771Sraj */ 2909187149Srajstatic int 2910187149Srajtlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2911187149Sraj uint32_t flags) 2912176771Sraj{ 2913187149Sraj uint32_t ts, tid; 2914176771Sraj int tsize; 2915187149Sraj 2916187149Sraj if (tlb1_idx >= TLB1_ENTRIES) { 2917187149Sraj printf("tlb1_set_entry: TLB1 full!\n"); 2918187149Sraj return (-1); 2919187149Sraj } 2920176771Sraj 2921176771Sraj /* Convert size to TSIZE */ 2922176771Sraj tsize = size2tsize(size); 2923176771Sraj 2924187149Sraj tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2925187149Sraj /* XXX TS is hard coded to 0 for now as we only use single address space */ 2926187149Sraj ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2927176771Sraj 2928187149Sraj /* XXX LOCK tlb1[] */ 2929176771Sraj 2930187149Sraj tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2931187149Sraj tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2932187149Sraj tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2933176771Sraj 2934187149Sraj /* Set supervisor RWX permission bits */ 2935187149Sraj tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2936176771Sraj 2937187149Sraj tlb1_write_entry(tlb1_idx++); 2938176771Sraj 2939187149Sraj /* XXX UNLOCK tlb1[] */ 2940176771Sraj 2941187149Sraj /* 2942187149Sraj * XXX in general TLB1 updates should be propagated between CPUs, 2943187149Sraj * since current design assumes to have the same TLB1 set-up on all 2944187149Sraj * cores. 2945187149Sraj */ 2946176771Sraj return (0); 2947176771Sraj} 2948176771Sraj 2949176771Sraj/* 2950187151Sraj * Map in contiguous RAM region into the TLB1 using maximum of 2951176771Sraj * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2952176771Sraj * 2953187151Sraj * If necessary round up last entry size and return total size 2954176771Sraj * used by all allocated entries. 2955176771Sraj */ 2956176771Srajvm_size_t 2957224611Smarceltlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 2958176771Sraj{ 2959224611Smarcel vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 2960224611Smarcel vm_size_t mapped, pgsz, base, mask; 2961224611Smarcel int idx, nents; 2962176771Sraj 2963224611Smarcel /* Round up to the next 1M */ 2964224611Smarcel size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 2965176771Sraj 2966224611Smarcel mapped = 0; 2967224611Smarcel idx = 0; 2968224611Smarcel base = va; 2969224611Smarcel pgsz = 64*1024*1024; 2970224611Smarcel while (mapped < size) { 2971224611Smarcel while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 2972224611Smarcel while (pgsz > (size - mapped)) 2973224611Smarcel pgsz >>= 2; 2974224611Smarcel pgs[idx++] = pgsz; 2975224611Smarcel mapped += pgsz; 2976224611Smarcel } 2977176771Sraj 2978224611Smarcel /* We under-map. Correct for this. */ 2979224611Smarcel if (mapped < size) { 2980224611Smarcel while (pgs[idx - 1] == pgsz) { 2981224611Smarcel idx--; 2982224611Smarcel mapped -= pgsz; 2983224611Smarcel } 2984224611Smarcel /* XXX We may increase beyond out starting point. */ 2985224611Smarcel pgsz <<= 2; 2986224611Smarcel pgs[idx++] = pgsz; 2987224611Smarcel mapped += pgsz; 2988176771Sraj } 2989224611Smarcel } 2990176771Sraj 2991224611Smarcel nents = idx; 2992224611Smarcel mask = pgs[0] - 1; 2993224611Smarcel /* Align address to the boundary */ 2994224611Smarcel if (va & mask) { 2995224611Smarcel va = (va + mask) & ~mask; 2996224611Smarcel pa = (pa + mask) & ~mask; 2997176771Sraj } 2998176771Sraj 2999224611Smarcel for (idx = 0; idx < nents; idx++) { 3000224611Smarcel pgsz = pgs[idx]; 3001224611Smarcel debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 3002224611Smarcel tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 3003224611Smarcel pa += pgsz; 3004224611Smarcel va += pgsz; 3005176771Sraj } 3006176771Sraj 3007224611Smarcel mapped = (va - base); 3008224611Smarcel debugf("mapped size 0x%08x (wasted space 0x%08x)\n", 3009224611Smarcel mapped, mapped - size); 3010224611Smarcel return (mapped); 3011176771Sraj} 3012176771Sraj 3013176771Sraj/* 3014176771Sraj * TLB1 initialization routine, to be called after the very first 3015176771Sraj * assembler level setup done in locore.S. 3016176771Sraj */ 3017176771Srajvoid 3018176771Srajtlb1_init(vm_offset_t ccsrbar) 3019176771Sraj{ 3020224611Smarcel uint32_t mas0, mas1, mas3; 3021224611Smarcel uint32_t tsz; 3022224611Smarcel u_int i; 3023176771Sraj 3024242526Smarcel ccsrbar_pa = ccsrbar; 3025242526Smarcel 3026224611Smarcel if (bootinfo != NULL && bootinfo[0] != 1) { 3027224611Smarcel tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3028224611Smarcel } else 3029224611Smarcel tlb1_idx = 1; 3030176771Sraj 3031224611Smarcel /* The first entry/entries are used to map the kernel. */ 3032224611Smarcel for (i = 0; i < tlb1_idx; i++) { 3033224611Smarcel mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3034224611Smarcel mtspr(SPR_MAS0, mas0); 3035224611Smarcel __asm __volatile("isync; tlbre"); 3036176771Sraj 3037224611Smarcel mas1 = mfspr(SPR_MAS1); 3038224611Smarcel if ((mas1 & MAS1_VALID) == 0) 3039224611Smarcel continue; 3040224611Smarcel 3041224611Smarcel mas3 = mfspr(SPR_MAS3); 3042224611Smarcel 3043224611Smarcel tlb1[i].mas1 = mas1; 3044224611Smarcel tlb1[i].mas2 = mfspr(SPR_MAS2); 3045224611Smarcel tlb1[i].mas3 = mas3; 3046224611Smarcel 3047224611Smarcel if (i == 0) 3048224611Smarcel kernload = mas3 & MAS3_RPN; 3049224611Smarcel 3050224611Smarcel tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3051224611Smarcel kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 3052224611Smarcel } 3053224611Smarcel 3054224611Smarcel /* Map in CCSRBAR. */ 3055187149Sraj tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3056176771Sraj 3057242526Smarcel#ifdef SMP 3058242526Smarcel bp_ntlb1s = tlb1_idx; 3059242526Smarcel#endif 3060242526Smarcel 3061238031Smarcel /* Purge the remaining entries */ 3062238031Smarcel for (i = tlb1_idx; i < TLB1_ENTRIES; i++) 3063238031Smarcel tlb1_write_entry(i); 3064238031Smarcel 3065176771Sraj /* Setup TLB miss defaults */ 3066176771Sraj set_mas4_defaults(); 3067176771Sraj} 3068176771Sraj 3069176771Sraj/* 3070176771Sraj * Setup MAS4 defaults. 3071176771Sraj * These values are loaded to MAS0-2 on a TLB miss. 3072176771Sraj */ 3073176771Srajstatic void 3074176771Srajset_mas4_defaults(void) 3075176771Sraj{ 3076187151Sraj uint32_t mas4; 3077176771Sraj 3078176771Sraj /* Defaults: TLB0, PID0, TSIZED=4K */ 3079176771Sraj mas4 = MAS4_TLBSELD0; 3080176771Sraj mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3081192532Sraj#ifdef SMP 3082192532Sraj mas4 |= MAS4_MD; 3083192532Sraj#endif 3084176771Sraj mtspr(SPR_MAS4, mas4); 3085187151Sraj __asm __volatile("isync"); 3086176771Sraj} 3087176771Sraj 3088176771Sraj/* 3089176771Sraj * Print out contents of the MAS registers for each TLB1 entry 3090176771Sraj */ 3091176771Srajvoid 3092176771Srajtlb1_print_tlbentries(void) 3093176771Sraj{ 3094187149Sraj uint32_t mas0, mas1, mas2, mas3, mas7; 3095176771Sraj int i; 3096176771Sraj 3097176771Sraj debugf("TLB1 entries:\n"); 3098187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) { 3099176771Sraj 3100176771Sraj mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3101176771Sraj mtspr(SPR_MAS0, mas0); 3102176771Sraj 3103187149Sraj __asm __volatile("isync; tlbre"); 3104176771Sraj 3105176771Sraj mas1 = mfspr(SPR_MAS1); 3106176771Sraj mas2 = mfspr(SPR_MAS2); 3107176771Sraj mas3 = mfspr(SPR_MAS3); 3108176771Sraj mas7 = mfspr(SPR_MAS7); 3109176771Sraj 3110176771Sraj tlb_print_entry(i, mas1, mas2, mas3, mas7); 3111176771Sraj } 3112176771Sraj} 3113176771Sraj 3114176771Sraj/* 3115176771Sraj * Print out contents of the in-ram tlb1 table. 3116176771Sraj */ 3117176771Srajvoid 3118176771Srajtlb1_print_entries(void) 3119176771Sraj{ 3120176771Sraj int i; 3121176771Sraj 3122176771Sraj debugf("tlb1[] table entries:\n"); 3123187149Sraj for (i = 0; i < TLB1_ENTRIES; i++) 3124176771Sraj tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3125176771Sraj} 3126176771Sraj 3127176771Sraj/* 3128176771Sraj * Return 0 if the physical IO range is encompassed by one of the 3129176771Sraj * the TLB1 entries, otherwise return related error code. 3130176771Sraj */ 3131176771Srajstatic int 3132176771Srajtlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3133176771Sraj{ 3134187151Sraj uint32_t prot; 3135176771Sraj vm_paddr_t pa_start; 3136176771Sraj vm_paddr_t pa_end; 3137176771Sraj unsigned int entry_tsize; 3138176771Sraj vm_size_t entry_size; 3139176771Sraj 3140176771Sraj *va = (vm_offset_t)NULL; 3141176771Sraj 3142176771Sraj /* Skip invalid entries */ 3143176771Sraj if (!(tlb1[i].mas1 & MAS1_VALID)) 3144176771Sraj return (EINVAL); 3145176771Sraj 3146176771Sraj /* 3147176771Sraj * The entry must be cache-inhibited, guarded, and r/w 3148176771Sraj * so it can function as an i/o page 3149176771Sraj */ 3150176771Sraj prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3151176771Sraj if (prot != (MAS2_I | MAS2_G)) 3152176771Sraj return (EPERM); 3153176771Sraj 3154176771Sraj prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3155176771Sraj if (prot != (MAS3_SR | MAS3_SW)) 3156176771Sraj return (EPERM); 3157176771Sraj 3158176771Sraj /* The address should be within the entry range. */ 3159176771Sraj entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3160176771Sraj KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3161176771Sraj 3162176771Sraj entry_size = tsize2size(entry_tsize); 3163176771Sraj pa_start = tlb1[i].mas3 & MAS3_RPN; 3164176771Sraj pa_end = pa_start + entry_size - 1; 3165176771Sraj 3166176771Sraj if ((pa < pa_start) || ((pa + size) > pa_end)) 3167176771Sraj return (ERANGE); 3168176771Sraj 3169176771Sraj /* Return virtual address of this mapping. */ 3170187149Sraj *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3171176771Sraj return (0); 3172176771Sraj} 3173