pmap.c revision 208504
11556Srgrimes/*- 21556Srgrimes * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 31556Srgrimes * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 41556Srgrimes * All rights reserved. 51556Srgrimes * 61556Srgrimes * Redistribution and use in source and binary forms, with or without 71556Srgrimes * modification, are permitted provided that the following conditions 81556Srgrimes * are met: 91556Srgrimes * 1. Redistributions of source code must retain the above copyright 101556Srgrimes * notice, this list of conditions and the following disclaimer. 111556Srgrimes * 2. Redistributions in binary form must reproduce the above copyright 121556Srgrimes * notice, this list of conditions and the following disclaimer in the 131556Srgrimes * documentation and/or other materials provided with the distribution. 141556Srgrimes * 151556Srgrimes * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 161556Srgrimes * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 171556Srgrimes * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 181556Srgrimes * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 191556Srgrimes * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 201556Srgrimes * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 211556Srgrimes * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 221556Srgrimes * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 231556Srgrimes * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 241556Srgrimes * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 251556Srgrimes * 261556Srgrimes * Some hw specific parts of this pmap were derived or influenced 271556Srgrimes * by NetBSD's ibm4xx pmap module. More generic code is shared with 281556Srgrimes * a few other pmap modules from the FreeBSD tree. 291556Srgrimes */ 301556Srgrimes 311556Srgrimes /* 323044Sdg * VM layout notes: 337165Sjoerg * 341556Srgrimes * Kernel and user threads run within one common virtual address space 351556Srgrimes * defined by AS=0. 361556Srgrimes * 371556Srgrimes * Virtual address space layout: 381556Srgrimes * ----------------------------- 391556Srgrimes * 0x0000_0000 - 0xafff_ffff : user process 401556Srgrimes * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 411556Srgrimes * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 421556Srgrimes * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 431556Srgrimes * 0xc100_0000 - 0xfeef_ffff : KVA 441556Srgrimes * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 451556Srgrimes * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 461556Srgrimes * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 471556Srgrimes * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 481556Srgrimes * 0xfef0_0000 - 0xffff_ffff : I/O devices region 491556Srgrimes */ 501556Srgrimes 511556Srgrimes#include <sys/cdefs.h> 521556Srgrimes__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 208504 2010-05-24 14:26:57Z alc $"); 531556Srgrimes 541556Srgrimes#include <sys/types.h> 551556Srgrimes#include <sys/param.h> 561556Srgrimes#include <sys/malloc.h> 571556Srgrimes#include <sys/ktr.h> 581556Srgrimes#include <sys/proc.h> 591556Srgrimes#include <sys/user.h> 601556Srgrimes#include <sys/queue.h> 611556Srgrimes#include <sys/systm.h> 621556Srgrimes#include <sys/kernel.h> 631556Srgrimes#include <sys/msgbuf.h> 641556Srgrimes#include <sys/lock.h> 651556Srgrimes#include <sys/mutex.h> 661556Srgrimes#include <sys/smp.h> 677165Sjoerg#include <sys/vmmeter.h> 681556Srgrimes 691556Srgrimes#include <vm/vm.h> 701556Srgrimes#include <vm/vm_page.h> 717165Sjoerg#include <vm/vm_kern.h> 721556Srgrimes#include <vm/vm_pageout.h> 731556Srgrimes#include <vm/vm_extern.h> 741556Srgrimes#include <vm/vm_object.h> 751556Srgrimes#include <vm/vm_param.h> 761556Srgrimes#include <vm/vm_map.h> 771556Srgrimes#include <vm/vm_pager.h> 781556Srgrimes#include <vm/uma.h> 791556Srgrimes 801556Srgrimes#include <machine/bootinfo.h> 811556Srgrimes#include <machine/cpu.h> 821556Srgrimes#include <machine/pcb.h> 831556Srgrimes#include <machine/platform.h> 841556Srgrimes 851556Srgrimes#include <machine/tlb.h> 861556Srgrimes#include <machine/spr.h> 871556Srgrimes#include <machine/vmparam.h> 881556Srgrimes#include <machine/md_var.h> 891556Srgrimes#include <machine/mmuvar.h> 901556Srgrimes#include <machine/pmap.h> 911556Srgrimes#include <machine/pte.h> 921556Srgrimes 931556Srgrimes#include "mmu_if.h" 941556Srgrimes 951556Srgrimes#define DEBUG 961556Srgrimes#undef DEBUG 971556Srgrimes 981556Srgrimes#ifdef DEBUG 991556Srgrimes#define debugf(fmt, args...) printf(fmt, ##args) 1001556Srgrimes#else 1011556Srgrimes#define debugf(fmt, args...) 1021556Srgrimes#endif 1031556Srgrimes 1041556Srgrimes#define TODO panic("%s: not implemented", __func__); 1051556Srgrimes 1061556Srgrimes#include "opt_sched.h" 1071556Srgrimes#ifndef SCHED_4BSD 1081556Srgrimes#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 1091556Srgrimes#endif 1101556Srgrimesextern struct mtx sched_lock; 1111556Srgrimes 1121556Srgrimesextern int dumpsys_minidump; 1131556Srgrimes 1141556Srgrimesextern unsigned char _etext[]; 1151556Srgrimesextern unsigned char _end[]; 1161556Srgrimes 1171556Srgrimes/* Kernel physical load address. */ 1181556Srgrimesextern uint32_t kernload; 1191556Srgrimesvm_offset_t kernstart; 1201556Srgrimesvm_size_t kernsize; 1211556Srgrimes 1221556Srgrimes/* Message buffer and tables. */ 1231556Srgrimesstatic vm_offset_t data_start; 1241556Srgrimesstatic vm_size_t data_end; 1251556Srgrimes 1261556Srgrimes/* Phys/avail memory regions. */ 1271556Srgrimesstatic struct mem_region *availmem_regions; 1281556Srgrimesstatic int availmem_regions_sz; 1291556Srgrimesstatic struct mem_region *physmem_regions; 1301556Srgrimesstatic int physmem_regions_sz; 1311556Srgrimes 132/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133static vm_offset_t zero_page_va; 134static struct mtx zero_page_mutex; 135 136static struct mtx tlbivax_mutex; 137 138/* 139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140 * by idle thred only, no lock required. 141 */ 142static vm_offset_t zero_page_idle_va; 143 144/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145static vm_offset_t copy_page_src_va; 146static vm_offset_t copy_page_dst_va; 147static struct mtx copy_page_mutex; 148 149/**************************************************************************/ 150/* PMAP */ 151/**************************************************************************/ 152 153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154 vm_prot_t, boolean_t); 155 156unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158 159/* 160 * If user pmap is processed with mmu_booke_remove and the resident count 161 * drops to 0, there are no more pages to remove, so we need not continue. 162 */ 163#define PMAP_REMOVE_DONE(pmap) \ 164 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 165 166extern void tlb_lock(uint32_t *); 167extern void tlb_unlock(uint32_t *); 168extern void tid_flush(tlbtid_t); 169 170/**************************************************************************/ 171/* TLB and TID handling */ 172/**************************************************************************/ 173 174/* Translation ID busy table */ 175static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 176 177/* 178 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 179 * core revisions and should be read from h/w registers during early config. 180 */ 181uint32_t tlb0_entries; 182uint32_t tlb0_ways; 183uint32_t tlb0_entries_per_way; 184 185#define TLB0_ENTRIES (tlb0_entries) 186#define TLB0_WAYS (tlb0_ways) 187#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 188 189#define TLB1_ENTRIES 16 190 191/* In-ram copy of the TLB1 */ 192static tlb_entry_t tlb1[TLB1_ENTRIES]; 193 194/* Next free entry in the TLB1 */ 195static unsigned int tlb1_idx; 196 197static tlbtid_t tid_alloc(struct pmap *); 198 199static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 200 201static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 202static void tlb1_write_entry(unsigned int); 203static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 204static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 205 206static vm_size_t tsize2size(unsigned int); 207static unsigned int size2tsize(vm_size_t); 208static unsigned int ilog2(unsigned int); 209 210static void set_mas4_defaults(void); 211 212static inline void tlb0_flush_entry(vm_offset_t); 213static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 214 215/**************************************************************************/ 216/* Page table management */ 217/**************************************************************************/ 218 219/* Data for the pv entry allocation mechanism */ 220static uma_zone_t pvzone; 221static struct vm_object pvzone_obj; 222static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 223 224#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 225 226#ifndef PMAP_SHPGPERPROC 227#define PMAP_SHPGPERPROC 200 228#endif 229 230static void ptbl_init(void); 231static struct ptbl_buf *ptbl_buf_alloc(void); 232static void ptbl_buf_free(struct ptbl_buf *); 233static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 234 235static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 236static void ptbl_free(mmu_t, pmap_t, unsigned int); 237static void ptbl_hold(mmu_t, pmap_t, unsigned int); 238static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 239 240static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 241static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 242static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 243static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 244 245static pv_entry_t pv_alloc(void); 246static void pv_free(pv_entry_t); 247static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 248static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 249 250/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 251#define PTBL_BUFS (128 * 16) 252 253struct ptbl_buf { 254 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 255 vm_offset_t kva; /* va of mapping */ 256}; 257 258/* ptbl free list and a lock used for access synchronization. */ 259static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 260static struct mtx ptbl_buf_freelist_lock; 261 262/* Base address of kva space allocated fot ptbl bufs. */ 263static vm_offset_t ptbl_buf_pool_vabase; 264 265/* Pointer to ptbl_buf structures. */ 266static struct ptbl_buf *ptbl_bufs; 267 268void pmap_bootstrap_ap(volatile uint32_t *); 269 270/* 271 * Kernel MMU interface 272 */ 273static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 274static void mmu_booke_clear_modify(mmu_t, vm_page_t); 275static void mmu_booke_clear_reference(mmu_t, vm_page_t); 276static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 277 vm_size_t, vm_offset_t); 278static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 279static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280 vm_prot_t, boolean_t); 281static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 282 vm_page_t, vm_prot_t); 283static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 284 vm_prot_t); 285static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 286static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 287 vm_prot_t); 288static void mmu_booke_init(mmu_t); 289static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 290static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 291static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 292static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 293static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 294 int); 295static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 296 vm_paddr_t *); 297static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 298 vm_object_t, vm_pindex_t, vm_size_t); 299static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 300static void mmu_booke_page_init(mmu_t, vm_page_t); 301static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 302static void mmu_booke_pinit(mmu_t, pmap_t); 303static void mmu_booke_pinit0(mmu_t, pmap_t); 304static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 305 vm_prot_t); 306static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 307static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 308static void mmu_booke_release(mmu_t, pmap_t); 309static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 310static void mmu_booke_remove_all(mmu_t, vm_page_t); 311static void mmu_booke_remove_write(mmu_t, vm_page_t); 312static void mmu_booke_zero_page(mmu_t, vm_page_t); 313static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 314static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 315static void mmu_booke_activate(mmu_t, struct thread *); 316static void mmu_booke_deactivate(mmu_t, struct thread *); 317static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 318static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 319static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 320static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 321static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 322static void mmu_booke_kremove(mmu_t, vm_offset_t); 323static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 324static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 325 vm_size_t); 326static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 327 vm_size_t, vm_size_t *); 328static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 329 vm_size_t, vm_offset_t); 330static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 331 332static mmu_method_t mmu_booke_methods[] = { 333 /* pmap dispatcher interface */ 334 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 335 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 336 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 337 MMUMETHOD(mmu_copy, mmu_booke_copy), 338 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 339 MMUMETHOD(mmu_enter, mmu_booke_enter), 340 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 341 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 342 MMUMETHOD(mmu_extract, mmu_booke_extract), 343 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 344 MMUMETHOD(mmu_init, mmu_booke_init), 345 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 346 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 347 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 348 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 349 MMUMETHOD(mmu_map, mmu_booke_map), 350 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 351 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 352 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 353 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 354 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 355 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 356 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 357 MMUMETHOD(mmu_protect, mmu_booke_protect), 358 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 359 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 360 MMUMETHOD(mmu_release, mmu_booke_release), 361 MMUMETHOD(mmu_remove, mmu_booke_remove), 362 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 363 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 364 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 365 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 366 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 367 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 368 MMUMETHOD(mmu_activate, mmu_booke_activate), 369 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 370 371 /* Internal interfaces */ 372 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 373 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 374 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 375 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 376 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 377/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 378 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 379 380 /* dumpsys() support */ 381 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 382 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 383 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 384 385 { 0, 0 } 386}; 387 388static mmu_def_t booke_mmu = { 389 MMU_TYPE_BOOKE, 390 mmu_booke_methods, 391 0 392}; 393MMU_DEF(booke_mmu); 394 395static inline void 396tlb_miss_lock(void) 397{ 398#ifdef SMP 399 struct pcpu *pc; 400 401 if (!smp_started) 402 return; 403 404 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 405 if (pc != pcpup) { 406 407 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 408 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 409 410 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 411 ("tlb_miss_lock: tried to lock self")); 412 413 tlb_lock(pc->pc_booke_tlb_lock); 414 415 CTR1(KTR_PMAP, "%s: locked", __func__); 416 } 417 } 418#endif 419} 420 421static inline void 422tlb_miss_unlock(void) 423{ 424#ifdef SMP 425 struct pcpu *pc; 426 427 if (!smp_started) 428 return; 429 430 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 431 if (pc != pcpup) { 432 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 433 __func__, pc->pc_cpuid); 434 435 tlb_unlock(pc->pc_booke_tlb_lock); 436 437 CTR1(KTR_PMAP, "%s: unlocked", __func__); 438 } 439 } 440#endif 441} 442 443/* Return number of entries in TLB0. */ 444static __inline void 445tlb0_get_tlbconf(void) 446{ 447 uint32_t tlb0_cfg; 448 449 tlb0_cfg = mfspr(SPR_TLB0CFG); 450 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 451 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 452 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 453} 454 455/* Initialize pool of kva ptbl buffers. */ 456static void 457ptbl_init(void) 458{ 459 int i; 460 461 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 462 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 463 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 464 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 465 466 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 467 TAILQ_INIT(&ptbl_buf_freelist); 468 469 for (i = 0; i < PTBL_BUFS; i++) { 470 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 471 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 472 } 473} 474 475/* Get a ptbl_buf from the freelist. */ 476static struct ptbl_buf * 477ptbl_buf_alloc(void) 478{ 479 struct ptbl_buf *buf; 480 481 mtx_lock(&ptbl_buf_freelist_lock); 482 buf = TAILQ_FIRST(&ptbl_buf_freelist); 483 if (buf != NULL) 484 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 485 mtx_unlock(&ptbl_buf_freelist_lock); 486 487 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 488 489 return (buf); 490} 491 492/* Return ptbl buff to free pool. */ 493static void 494ptbl_buf_free(struct ptbl_buf *buf) 495{ 496 497 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 498 499 mtx_lock(&ptbl_buf_freelist_lock); 500 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 501 mtx_unlock(&ptbl_buf_freelist_lock); 502} 503 504/* 505 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 506 */ 507static void 508ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 509{ 510 struct ptbl_buf *pbuf; 511 512 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 513 514 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 515 516 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 517 if (pbuf->kva == (vm_offset_t)ptbl) { 518 /* Remove from pmap ptbl buf list. */ 519 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 520 521 /* Free corresponding ptbl buf. */ 522 ptbl_buf_free(pbuf); 523 break; 524 } 525} 526 527/* Allocate page table. */ 528static pte_t * 529ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 530{ 531 vm_page_t mtbl[PTBL_PAGES]; 532 vm_page_t m; 533 struct ptbl_buf *pbuf; 534 unsigned int pidx; 535 pte_t *ptbl; 536 int i; 537 538 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 539 (pmap == kernel_pmap), pdir_idx); 540 541 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 542 ("ptbl_alloc: invalid pdir_idx")); 543 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 544 ("pte_alloc: valid ptbl entry exists!")); 545 546 pbuf = ptbl_buf_alloc(); 547 if (pbuf == NULL) 548 panic("pte_alloc: couldn't alloc kernel virtual memory"); 549 550 ptbl = (pte_t *)pbuf->kva; 551 552 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 553 554 /* Allocate ptbl pages, this will sleep! */ 555 for (i = 0; i < PTBL_PAGES; i++) { 556 pidx = (PTBL_PAGES * pdir_idx) + i; 557 while ((m = vm_page_alloc(NULL, pidx, 558 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 559 560 PMAP_UNLOCK(pmap); 561 vm_page_unlock_queues(); 562 VM_WAIT; 563 vm_page_lock_queues(); 564 PMAP_LOCK(pmap); 565 } 566 mtbl[i] = m; 567 } 568 569 /* Map allocated pages into kernel_pmap. */ 570 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 571 572 /* Zero whole ptbl. */ 573 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 574 575 /* Add pbuf to the pmap ptbl bufs list. */ 576 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 577 578 return (ptbl); 579} 580 581/* Free ptbl pages and invalidate pdir entry. */ 582static void 583ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 584{ 585 pte_t *ptbl; 586 vm_paddr_t pa; 587 vm_offset_t va; 588 vm_page_t m; 589 int i; 590 591 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 592 (pmap == kernel_pmap), pdir_idx); 593 594 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 595 ("ptbl_free: invalid pdir_idx")); 596 597 ptbl = pmap->pm_pdir[pdir_idx]; 598 599 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 600 601 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 602 603 /* 604 * Invalidate the pdir entry as soon as possible, so that other CPUs 605 * don't attempt to look up the page tables we are releasing. 606 */ 607 mtx_lock_spin(&tlbivax_mutex); 608 tlb_miss_lock(); 609 610 pmap->pm_pdir[pdir_idx] = NULL; 611 612 tlb_miss_unlock(); 613 mtx_unlock_spin(&tlbivax_mutex); 614 615 for (i = 0; i < PTBL_PAGES; i++) { 616 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 617 pa = pte_vatopa(mmu, kernel_pmap, va); 618 m = PHYS_TO_VM_PAGE(pa); 619 vm_page_free_zero(m); 620 atomic_subtract_int(&cnt.v_wire_count, 1); 621 mmu_booke_kremove(mmu, va); 622 } 623 624 ptbl_free_pmap_ptbl(pmap, ptbl); 625} 626 627/* 628 * Decrement ptbl pages hold count and attempt to free ptbl pages. 629 * Called when removing pte entry from ptbl. 630 * 631 * Return 1 if ptbl pages were freed. 632 */ 633static int 634ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 635{ 636 pte_t *ptbl; 637 vm_paddr_t pa; 638 vm_page_t m; 639 int i; 640 641 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 642 (pmap == kernel_pmap), pdir_idx); 643 644 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 645 ("ptbl_unhold: invalid pdir_idx")); 646 KASSERT((pmap != kernel_pmap), 647 ("ptbl_unhold: unholding kernel ptbl!")); 648 649 ptbl = pmap->pm_pdir[pdir_idx]; 650 651 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 652 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 653 ("ptbl_unhold: non kva ptbl")); 654 655 /* decrement hold count */ 656 for (i = 0; i < PTBL_PAGES; i++) { 657 pa = pte_vatopa(mmu, kernel_pmap, 658 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 659 m = PHYS_TO_VM_PAGE(pa); 660 m->wire_count--; 661 } 662 663 /* 664 * Free ptbl pages if there are no pte etries in this ptbl. 665 * wire_count has the same value for all ptbl pages, so check the last 666 * page. 667 */ 668 if (m->wire_count == 0) { 669 ptbl_free(mmu, pmap, pdir_idx); 670 671 //debugf("ptbl_unhold: e (freed ptbl)\n"); 672 return (1); 673 } 674 675 return (0); 676} 677 678/* 679 * Increment hold count for ptbl pages. This routine is used when a new pte 680 * entry is being inserted into the ptbl. 681 */ 682static void 683ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 684{ 685 vm_paddr_t pa; 686 pte_t *ptbl; 687 vm_page_t m; 688 int i; 689 690 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 691 pdir_idx); 692 693 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 694 ("ptbl_hold: invalid pdir_idx")); 695 KASSERT((pmap != kernel_pmap), 696 ("ptbl_hold: holding kernel ptbl!")); 697 698 ptbl = pmap->pm_pdir[pdir_idx]; 699 700 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 701 702 for (i = 0; i < PTBL_PAGES; i++) { 703 pa = pte_vatopa(mmu, kernel_pmap, 704 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 705 m = PHYS_TO_VM_PAGE(pa); 706 m->wire_count++; 707 } 708} 709 710/* Allocate pv_entry structure. */ 711pv_entry_t 712pv_alloc(void) 713{ 714 pv_entry_t pv; 715 716 pv_entry_count++; 717 if (pv_entry_count > pv_entry_high_water) 718 pagedaemon_wakeup(); 719 pv = uma_zalloc(pvzone, M_NOWAIT); 720 721 return (pv); 722} 723 724/* Free pv_entry structure. */ 725static __inline void 726pv_free(pv_entry_t pve) 727{ 728 729 pv_entry_count--; 730 uma_zfree(pvzone, pve); 731} 732 733 734/* Allocate and initialize pv_entry structure. */ 735static void 736pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 737{ 738 pv_entry_t pve; 739 740 //int su = (pmap == kernel_pmap); 741 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 742 // (u_int32_t)pmap, va, (u_int32_t)m); 743 744 pve = pv_alloc(); 745 if (pve == NULL) 746 panic("pv_insert: no pv entries!"); 747 748 pve->pv_pmap = pmap; 749 pve->pv_va = va; 750 751 /* add to pv_list */ 752 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 753 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 754 755 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 756 757 //debugf("pv_insert: e\n"); 758} 759 760/* Destroy pv entry. */ 761static void 762pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 763{ 764 pv_entry_t pve; 765 766 //int su = (pmap == kernel_pmap); 767 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 768 769 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 770 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 771 772 /* find pv entry */ 773 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 774 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 775 /* remove from pv_list */ 776 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 777 if (TAILQ_EMPTY(&m->md.pv_list)) 778 vm_page_flag_clear(m, PG_WRITEABLE); 779 780 /* free pv entry struct */ 781 pv_free(pve); 782 break; 783 } 784 } 785 786 //debugf("pv_remove: e\n"); 787} 788 789/* 790 * Clean pte entry, try to free page table page if requested. 791 * 792 * Return 1 if ptbl pages were freed, otherwise return 0. 793 */ 794static int 795pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 796{ 797 unsigned int pdir_idx = PDIR_IDX(va); 798 unsigned int ptbl_idx = PTBL_IDX(va); 799 vm_page_t m; 800 pte_t *ptbl; 801 pte_t *pte; 802 803 //int su = (pmap == kernel_pmap); 804 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 805 // su, (u_int32_t)pmap, va, flags); 806 807 ptbl = pmap->pm_pdir[pdir_idx]; 808 KASSERT(ptbl, ("pte_remove: null ptbl")); 809 810 pte = &ptbl[ptbl_idx]; 811 812 if (pte == NULL || !PTE_ISVALID(pte)) 813 return (0); 814 815 if (PTE_ISWIRED(pte)) 816 pmap->pm_stats.wired_count--; 817 818 /* Handle managed entry. */ 819 if (PTE_ISMANAGED(pte)) { 820 /* Get vm_page_t for mapped pte. */ 821 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 822 823 if (PTE_ISMODIFIED(pte)) 824 vm_page_dirty(m); 825 826 if (PTE_ISREFERENCED(pte)) 827 vm_page_flag_set(m, PG_REFERENCED); 828 829 pv_remove(pmap, va, m); 830 } 831 832 mtx_lock_spin(&tlbivax_mutex); 833 tlb_miss_lock(); 834 835 tlb0_flush_entry(va); 836 pte->flags = 0; 837 pte->rpn = 0; 838 839 tlb_miss_unlock(); 840 mtx_unlock_spin(&tlbivax_mutex); 841 842 pmap->pm_stats.resident_count--; 843 844 if (flags & PTBL_UNHOLD) { 845 //debugf("pte_remove: e (unhold)\n"); 846 return (ptbl_unhold(mmu, pmap, pdir_idx)); 847 } 848 849 //debugf("pte_remove: e\n"); 850 return (0); 851} 852 853/* 854 * Insert PTE for a given page and virtual address. 855 */ 856static void 857pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 858{ 859 unsigned int pdir_idx = PDIR_IDX(va); 860 unsigned int ptbl_idx = PTBL_IDX(va); 861 pte_t *ptbl, *pte; 862 863 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 864 pmap == kernel_pmap, pmap, va); 865 866 /* Get the page table pointer. */ 867 ptbl = pmap->pm_pdir[pdir_idx]; 868 869 if (ptbl == NULL) { 870 /* Allocate page table pages. */ 871 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 872 } else { 873 /* 874 * Check if there is valid mapping for requested 875 * va, if there is, remove it. 876 */ 877 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 878 if (PTE_ISVALID(pte)) { 879 pte_remove(mmu, pmap, va, PTBL_HOLD); 880 } else { 881 /* 882 * pte is not used, increment hold count 883 * for ptbl pages. 884 */ 885 if (pmap != kernel_pmap) 886 ptbl_hold(mmu, pmap, pdir_idx); 887 } 888 } 889 890 /* 891 * Insert pv_entry into pv_list for mapped page if part of managed 892 * memory. 893 */ 894 if ((m->flags & PG_FICTITIOUS) == 0) { 895 if ((m->flags & PG_UNMANAGED) == 0) { 896 flags |= PTE_MANAGED; 897 898 /* Create and insert pv entry. */ 899 pv_insert(pmap, va, m); 900 } 901 } 902 903 pmap->pm_stats.resident_count++; 904 905 mtx_lock_spin(&tlbivax_mutex); 906 tlb_miss_lock(); 907 908 tlb0_flush_entry(va); 909 if (pmap->pm_pdir[pdir_idx] == NULL) { 910 /* 911 * If we just allocated a new page table, hook it in 912 * the pdir. 913 */ 914 pmap->pm_pdir[pdir_idx] = ptbl; 915 } 916 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 917 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 918 pte->flags |= (PTE_VALID | flags); 919 920 tlb_miss_unlock(); 921 mtx_unlock_spin(&tlbivax_mutex); 922} 923 924/* Return the pa for the given pmap/va. */ 925static vm_paddr_t 926pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 927{ 928 vm_paddr_t pa = 0; 929 pte_t *pte; 930 931 pte = pte_find(mmu, pmap, va); 932 if ((pte != NULL) && PTE_ISVALID(pte)) 933 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 934 return (pa); 935} 936 937/* Get a pointer to a PTE in a page table. */ 938static pte_t * 939pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 940{ 941 unsigned int pdir_idx = PDIR_IDX(va); 942 unsigned int ptbl_idx = PTBL_IDX(va); 943 944 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 945 946 if (pmap->pm_pdir[pdir_idx]) 947 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 948 949 return (NULL); 950} 951 952/**************************************************************************/ 953/* PMAP related */ 954/**************************************************************************/ 955 956/* 957 * This is called during e500_init, before the system is really initialized. 958 */ 959static void 960mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 961{ 962 vm_offset_t phys_kernelend; 963 struct mem_region *mp, *mp1; 964 int cnt, i, j; 965 u_int s, e, sz; 966 u_int phys_avail_count; 967 vm_size_t physsz, hwphyssz, kstack0_sz; 968 vm_offset_t kernel_pdir, kstack0, va; 969 vm_paddr_t kstack0_phys; 970 void *dpcpu; 971 pte_t *pte; 972 973 debugf("mmu_booke_bootstrap: entered\n"); 974 975 /* Initialize invalidation mutex */ 976 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 977 978 /* Read TLB0 size and associativity. */ 979 tlb0_get_tlbconf(); 980 981 /* Align kernel start and end address (kernel image). */ 982 kernstart = trunc_page(start); 983 data_start = round_page(kernelend); 984 kernsize = data_start - kernstart; 985 986 data_end = data_start; 987 988 /* Allocate space for the message buffer. */ 989 msgbufp = (struct msgbuf *)data_end; 990 data_end += MSGBUF_SIZE; 991 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 992 data_end); 993 994 data_end = round_page(data_end); 995 996 /* Allocate the dynamic per-cpu area. */ 997 dpcpu = (void *)data_end; 998 data_end += DPCPU_SIZE; 999 dpcpu_init(dpcpu, 0); 1000 1001 /* Allocate space for ptbl_bufs. */ 1002 ptbl_bufs = (struct ptbl_buf *)data_end; 1003 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1004 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1005 data_end); 1006 1007 data_end = round_page(data_end); 1008 1009 /* Allocate PTE tables for kernel KVA. */ 1010 kernel_pdir = data_end; 1011 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1012 PDIR_SIZE - 1) / PDIR_SIZE; 1013 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1014 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1015 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1016 1017 debugf(" data_end: 0x%08x\n", data_end); 1018 if (data_end - kernstart > 0x1000000) { 1019 data_end = (data_end + 0x3fffff) & ~0x3fffff; 1020 tlb1_mapin_region(kernstart + 0x1000000, 1021 kernload + 0x1000000, data_end - kernstart - 0x1000000); 1022 } else 1023 data_end = (data_end + 0xffffff) & ~0xffffff; 1024 1025 debugf(" updated data_end: 0x%08x\n", data_end); 1026 1027 kernsize += data_end - data_start; 1028 1029 /* 1030 * Clear the structures - note we can only do it safely after the 1031 * possible additional TLB1 translations are in place (above) so that 1032 * all range up to the currently calculated 'data_end' is covered. 1033 */ 1034 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1035 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1036 1037 /*******************************************************/ 1038 /* Set the start and end of kva. */ 1039 /*******************************************************/ 1040 virtual_avail = round_page(data_end); 1041 virtual_end = VM_MAX_KERNEL_ADDRESS; 1042 1043 /* Allocate KVA space for page zero/copy operations. */ 1044 zero_page_va = virtual_avail; 1045 virtual_avail += PAGE_SIZE; 1046 zero_page_idle_va = virtual_avail; 1047 virtual_avail += PAGE_SIZE; 1048 copy_page_src_va = virtual_avail; 1049 virtual_avail += PAGE_SIZE; 1050 copy_page_dst_va = virtual_avail; 1051 virtual_avail += PAGE_SIZE; 1052 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1053 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1054 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1055 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1056 1057 /* Initialize page zero/copy mutexes. */ 1058 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1059 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1060 1061 /* Allocate KVA space for ptbl bufs. */ 1062 ptbl_buf_pool_vabase = virtual_avail; 1063 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1064 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1065 ptbl_buf_pool_vabase, virtual_avail); 1066 1067 /* Calculate corresponding physical addresses for the kernel region. */ 1068 phys_kernelend = kernload + kernsize; 1069 debugf("kernel image and allocated data:\n"); 1070 debugf(" kernload = 0x%08x\n", kernload); 1071 debugf(" kernstart = 0x%08x\n", kernstart); 1072 debugf(" kernsize = 0x%08x\n", kernsize); 1073 1074 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1075 panic("mmu_booke_bootstrap: phys_avail too small"); 1076 1077 /* 1078 * Remove kernel physical address range from avail regions list. Page 1079 * align all regions. Non-page aligned memory isn't very interesting 1080 * to us. Also, sort the entries for ascending addresses. 1081 */ 1082 1083 /* Retrieve phys/avail mem regions */ 1084 mem_regions(&physmem_regions, &physmem_regions_sz, 1085 &availmem_regions, &availmem_regions_sz); 1086 sz = 0; 1087 cnt = availmem_regions_sz; 1088 debugf("processing avail regions:\n"); 1089 for (mp = availmem_regions; mp->mr_size; mp++) { 1090 s = mp->mr_start; 1091 e = mp->mr_start + mp->mr_size; 1092 debugf(" %08x-%08x -> ", s, e); 1093 /* Check whether this region holds all of the kernel. */ 1094 if (s < kernload && e > phys_kernelend) { 1095 availmem_regions[cnt].mr_start = phys_kernelend; 1096 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1097 e = kernload; 1098 } 1099 /* Look whether this regions starts within the kernel. */ 1100 if (s >= kernload && s < phys_kernelend) { 1101 if (e <= phys_kernelend) 1102 goto empty; 1103 s = phys_kernelend; 1104 } 1105 /* Now look whether this region ends within the kernel. */ 1106 if (e > kernload && e <= phys_kernelend) { 1107 if (s >= kernload) 1108 goto empty; 1109 e = kernload; 1110 } 1111 /* Now page align the start and size of the region. */ 1112 s = round_page(s); 1113 e = trunc_page(e); 1114 if (e < s) 1115 e = s; 1116 sz = e - s; 1117 debugf("%08x-%08x = %x\n", s, e, sz); 1118 1119 /* Check whether some memory is left here. */ 1120 if (sz == 0) { 1121 empty: 1122 memmove(mp, mp + 1, 1123 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1124 cnt--; 1125 mp--; 1126 continue; 1127 } 1128 1129 /* Do an insertion sort. */ 1130 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1131 if (s < mp1->mr_start) 1132 break; 1133 if (mp1 < mp) { 1134 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1135 mp1->mr_start = s; 1136 mp1->mr_size = sz; 1137 } else { 1138 mp->mr_start = s; 1139 mp->mr_size = sz; 1140 } 1141 } 1142 availmem_regions_sz = cnt; 1143 1144 /*******************************************************/ 1145 /* Steal physical memory for kernel stack from the end */ 1146 /* of the first avail region */ 1147 /*******************************************************/ 1148 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1149 kstack0_phys = availmem_regions[0].mr_start + 1150 availmem_regions[0].mr_size; 1151 kstack0_phys -= kstack0_sz; 1152 availmem_regions[0].mr_size -= kstack0_sz; 1153 1154 /*******************************************************/ 1155 /* Fill in phys_avail table, based on availmem_regions */ 1156 /*******************************************************/ 1157 phys_avail_count = 0; 1158 physsz = 0; 1159 hwphyssz = 0; 1160 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1161 1162 debugf("fill in phys_avail:\n"); 1163 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1164 1165 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1166 availmem_regions[i].mr_start, 1167 availmem_regions[i].mr_start + 1168 availmem_regions[i].mr_size, 1169 availmem_regions[i].mr_size); 1170 1171 if (hwphyssz != 0 && 1172 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1173 debugf(" hw.physmem adjust\n"); 1174 if (physsz < hwphyssz) { 1175 phys_avail[j] = availmem_regions[i].mr_start; 1176 phys_avail[j + 1] = 1177 availmem_regions[i].mr_start + 1178 hwphyssz - physsz; 1179 physsz = hwphyssz; 1180 phys_avail_count++; 1181 } 1182 break; 1183 } 1184 1185 phys_avail[j] = availmem_regions[i].mr_start; 1186 phys_avail[j + 1] = availmem_regions[i].mr_start + 1187 availmem_regions[i].mr_size; 1188 phys_avail_count++; 1189 physsz += availmem_regions[i].mr_size; 1190 } 1191 physmem = btoc(physsz); 1192 1193 /* Calculate the last available physical address. */ 1194 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1195 ; 1196 Maxmem = powerpc_btop(phys_avail[i + 1]); 1197 1198 debugf("Maxmem = 0x%08lx\n", Maxmem); 1199 debugf("phys_avail_count = %d\n", phys_avail_count); 1200 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1201 physmem); 1202 1203 /*******************************************************/ 1204 /* Initialize (statically allocated) kernel pmap. */ 1205 /*******************************************************/ 1206 PMAP_LOCK_INIT(kernel_pmap); 1207 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1208 1209 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1210 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1211 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1212 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1213 1214 /* Initialize kernel pdir */ 1215 for (i = 0; i < kernel_ptbls; i++) 1216 kernel_pmap->pm_pdir[kptbl_min + i] = 1217 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1218 1219 for (i = 0; i < MAXCPU; i++) { 1220 kernel_pmap->pm_tid[i] = TID_KERNEL; 1221 1222 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1223 tidbusy[i][0] = kernel_pmap; 1224 } 1225 1226 /* 1227 * Fill in PTEs covering kernel code and data. They are not required 1228 * for address translation, as this area is covered by static TLB1 1229 * entries, but for pte_vatopa() to work correctly with kernel area 1230 * addresses. 1231 */ 1232 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1233 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1234 pte->rpn = kernload + (va - KERNBASE); 1235 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1236 PTE_VALID; 1237 } 1238 /* Mark kernel_pmap active on all CPUs */ 1239 kernel_pmap->pm_active = ~0; 1240 1241 /*******************************************************/ 1242 /* Final setup */ 1243 /*******************************************************/ 1244 1245 /* Enter kstack0 into kernel map, provide guard page */ 1246 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1247 thread0.td_kstack = kstack0; 1248 thread0.td_kstack_pages = KSTACK_PAGES; 1249 1250 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1251 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1252 kstack0_phys, kstack0_phys + kstack0_sz); 1253 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1254 1255 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1256 for (i = 0; i < KSTACK_PAGES; i++) { 1257 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1258 kstack0 += PAGE_SIZE; 1259 kstack0_phys += PAGE_SIZE; 1260 } 1261 1262 debugf("virtual_avail = %08x\n", virtual_avail); 1263 debugf("virtual_end = %08x\n", virtual_end); 1264 1265 debugf("mmu_booke_bootstrap: exit\n"); 1266} 1267 1268void 1269pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1270{ 1271 int i; 1272 1273 /* 1274 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1275 * have the snapshot of its contents in the s/w tlb1[] table, so use 1276 * these values directly to (re)program AP's TLB1 hardware. 1277 */ 1278 for (i = 0; i < tlb1_idx; i ++) { 1279 /* Skip invalid entries */ 1280 if (!(tlb1[i].mas1 & MAS1_VALID)) 1281 continue; 1282 1283 tlb1_write_entry(i); 1284 } 1285 1286 set_mas4_defaults(); 1287} 1288 1289/* 1290 * Get the physical page address for the given pmap/virtual address. 1291 */ 1292static vm_paddr_t 1293mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1294{ 1295 vm_paddr_t pa; 1296 1297 PMAP_LOCK(pmap); 1298 pa = pte_vatopa(mmu, pmap, va); 1299 PMAP_UNLOCK(pmap); 1300 1301 return (pa); 1302} 1303 1304/* 1305 * Extract the physical page address associated with the given 1306 * kernel virtual address. 1307 */ 1308static vm_paddr_t 1309mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1310{ 1311 1312 return (pte_vatopa(mmu, kernel_pmap, va)); 1313} 1314 1315/* 1316 * Initialize the pmap module. 1317 * Called by vm_init, to initialize any structures that the pmap 1318 * system needs to map virtual memory. 1319 */ 1320static void 1321mmu_booke_init(mmu_t mmu) 1322{ 1323 int shpgperproc = PMAP_SHPGPERPROC; 1324 1325 /* 1326 * Initialize the address space (zone) for the pv entries. Set a 1327 * high water mark so that the system can recover from excessive 1328 * numbers of pv entries. 1329 */ 1330 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1331 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1332 1333 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1334 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1335 1336 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1337 pv_entry_high_water = 9 * (pv_entry_max / 10); 1338 1339 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1340 1341 /* Pre-fill pvzone with initial number of pv entries. */ 1342 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1343 1344 /* Initialize ptbl allocation. */ 1345 ptbl_init(); 1346} 1347 1348/* 1349 * Map a list of wired pages into kernel virtual address space. This is 1350 * intended for temporary mappings which do not need page modification or 1351 * references recorded. Existing mappings in the region are overwritten. 1352 */ 1353static void 1354mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1355{ 1356 vm_offset_t va; 1357 1358 va = sva; 1359 while (count-- > 0) { 1360 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1361 va += PAGE_SIZE; 1362 m++; 1363 } 1364} 1365 1366/* 1367 * Remove page mappings from kernel virtual address space. Intended for 1368 * temporary mappings entered by mmu_booke_qenter. 1369 */ 1370static void 1371mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1372{ 1373 vm_offset_t va; 1374 1375 va = sva; 1376 while (count-- > 0) { 1377 mmu_booke_kremove(mmu, va); 1378 va += PAGE_SIZE; 1379 } 1380} 1381 1382/* 1383 * Map a wired page into kernel virtual address space. 1384 */ 1385static void 1386mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1387{ 1388 unsigned int pdir_idx = PDIR_IDX(va); 1389 unsigned int ptbl_idx = PTBL_IDX(va); 1390 uint32_t flags; 1391 pte_t *pte; 1392 1393 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1394 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1395 1396 flags = 0; 1397 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1398 flags |= PTE_M; 1399 1400 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1401 1402 mtx_lock_spin(&tlbivax_mutex); 1403 tlb_miss_lock(); 1404 1405 if (PTE_ISVALID(pte)) { 1406 1407 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1408 1409 /* Flush entry from TLB0 */ 1410 tlb0_flush_entry(va); 1411 } 1412 1413 pte->rpn = pa & ~PTE_PA_MASK; 1414 pte->flags = flags; 1415 1416 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1417 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1418 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1419 1420 /* Flush the real memory from the instruction cache. */ 1421 if ((flags & (PTE_I | PTE_G)) == 0) { 1422 __syncicache((void *)va, PAGE_SIZE); 1423 } 1424 1425 tlb_miss_unlock(); 1426 mtx_unlock_spin(&tlbivax_mutex); 1427} 1428 1429/* 1430 * Remove a page from kernel page table. 1431 */ 1432static void 1433mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1434{ 1435 unsigned int pdir_idx = PDIR_IDX(va); 1436 unsigned int ptbl_idx = PTBL_IDX(va); 1437 pte_t *pte; 1438 1439// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1440 1441 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1442 (va <= VM_MAX_KERNEL_ADDRESS)), 1443 ("mmu_booke_kremove: invalid va")); 1444 1445 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1446 1447 if (!PTE_ISVALID(pte)) { 1448 1449 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1450 1451 return; 1452 } 1453 1454 mtx_lock_spin(&tlbivax_mutex); 1455 tlb_miss_lock(); 1456 1457 /* Invalidate entry in TLB0, update PTE. */ 1458 tlb0_flush_entry(va); 1459 pte->flags = 0; 1460 pte->rpn = 0; 1461 1462 tlb_miss_unlock(); 1463 mtx_unlock_spin(&tlbivax_mutex); 1464} 1465 1466/* 1467 * Initialize pmap associated with process 0. 1468 */ 1469static void 1470mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1471{ 1472 1473 mmu_booke_pinit(mmu, pmap); 1474 PCPU_SET(curpmap, pmap); 1475} 1476 1477/* 1478 * Initialize a preallocated and zeroed pmap structure, 1479 * such as one in a vmspace structure. 1480 */ 1481static void 1482mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1483{ 1484 int i; 1485 1486 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1487 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1488 1489 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1490 1491 PMAP_LOCK_INIT(pmap); 1492 for (i = 0; i < MAXCPU; i++) 1493 pmap->pm_tid[i] = TID_NONE; 1494 pmap->pm_active = 0; 1495 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1496 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1497 TAILQ_INIT(&pmap->pm_ptbl_list); 1498} 1499 1500/* 1501 * Release any resources held by the given physical map. 1502 * Called when a pmap initialized by mmu_booke_pinit is being released. 1503 * Should only be called if the map contains no valid mappings. 1504 */ 1505static void 1506mmu_booke_release(mmu_t mmu, pmap_t pmap) 1507{ 1508 1509 printf("mmu_booke_release: s\n"); 1510 1511 KASSERT(pmap->pm_stats.resident_count == 0, 1512 ("pmap_release: pmap resident count %ld != 0", 1513 pmap->pm_stats.resident_count)); 1514 1515 PMAP_LOCK_DESTROY(pmap); 1516} 1517 1518/* 1519 * Insert the given physical page at the specified virtual address in the 1520 * target physical map with the protection requested. If specified the page 1521 * will be wired down. 1522 */ 1523static void 1524mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1525 vm_prot_t prot, boolean_t wired) 1526{ 1527 1528 vm_page_lock_queues(); 1529 PMAP_LOCK(pmap); 1530 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1531 vm_page_unlock_queues(); 1532 PMAP_UNLOCK(pmap); 1533} 1534 1535static void 1536mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1537 vm_prot_t prot, boolean_t wired) 1538{ 1539 pte_t *pte; 1540 vm_paddr_t pa; 1541 uint32_t flags; 1542 int su, sync; 1543 1544 pa = VM_PAGE_TO_PHYS(m); 1545 su = (pmap == kernel_pmap); 1546 sync = 0; 1547 1548 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1549 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1550 // (u_int32_t)pmap, su, pmap->pm_tid, 1551 // (u_int32_t)m, va, pa, prot, wired); 1552 1553 if (su) { 1554 KASSERT(((va >= virtual_avail) && 1555 (va <= VM_MAX_KERNEL_ADDRESS)), 1556 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1557 } else { 1558 KASSERT((va <= VM_MAXUSER_ADDRESS), 1559 ("mmu_booke_enter_locked: user pmap, non user va")); 1560 } 1561 KASSERT((m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1562 ("mmu_booke_enter_locked: page %p is not busy", m)); 1563 1564 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1565 1566 /* 1567 * If there is an existing mapping, and the physical address has not 1568 * changed, must be protection or wiring change. 1569 */ 1570 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1571 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1572 1573 /* 1574 * Before actually updating pte->flags we calculate and 1575 * prepare its new value in a helper var. 1576 */ 1577 flags = pte->flags; 1578 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1579 1580 /* Wiring change, just update stats. */ 1581 if (wired) { 1582 if (!PTE_ISWIRED(pte)) { 1583 flags |= PTE_WIRED; 1584 pmap->pm_stats.wired_count++; 1585 } 1586 } else { 1587 if (PTE_ISWIRED(pte)) { 1588 flags &= ~PTE_WIRED; 1589 pmap->pm_stats.wired_count--; 1590 } 1591 } 1592 1593 if (prot & VM_PROT_WRITE) { 1594 /* Add write permissions. */ 1595 flags |= PTE_SW; 1596 if (!su) 1597 flags |= PTE_UW; 1598 1599 vm_page_flag_set(m, PG_WRITEABLE); 1600 } else { 1601 /* Handle modified pages, sense modify status. */ 1602 1603 /* 1604 * The PTE_MODIFIED flag could be set by underlying 1605 * TLB misses since we last read it (above), possibly 1606 * other CPUs could update it so we check in the PTE 1607 * directly rather than rely on that saved local flags 1608 * copy. 1609 */ 1610 if (PTE_ISMODIFIED(pte)) 1611 vm_page_dirty(m); 1612 } 1613 1614 if (prot & VM_PROT_EXECUTE) { 1615 flags |= PTE_SX; 1616 if (!su) 1617 flags |= PTE_UX; 1618 1619 /* 1620 * Check existing flags for execute permissions: if we 1621 * are turning execute permissions on, icache should 1622 * be flushed. 1623 */ 1624 if ((flags & (PTE_UX | PTE_SX)) == 0) 1625 sync++; 1626 } 1627 1628 flags &= ~PTE_REFERENCED; 1629 1630 /* 1631 * The new flags value is all calculated -- only now actually 1632 * update the PTE. 1633 */ 1634 mtx_lock_spin(&tlbivax_mutex); 1635 tlb_miss_lock(); 1636 1637 tlb0_flush_entry(va); 1638 pte->flags = flags; 1639 1640 tlb_miss_unlock(); 1641 mtx_unlock_spin(&tlbivax_mutex); 1642 1643 } else { 1644 /* 1645 * If there is an existing mapping, but it's for a different 1646 * physical address, pte_enter() will delete the old mapping. 1647 */ 1648 //if ((pte != NULL) && PTE_ISVALID(pte)) 1649 // debugf("mmu_booke_enter_locked: replace\n"); 1650 //else 1651 // debugf("mmu_booke_enter_locked: new\n"); 1652 1653 /* Now set up the flags and install the new mapping. */ 1654 flags = (PTE_SR | PTE_VALID); 1655 flags |= PTE_M; 1656 1657 if (!su) 1658 flags |= PTE_UR; 1659 1660 if (prot & VM_PROT_WRITE) { 1661 flags |= PTE_SW; 1662 if (!su) 1663 flags |= PTE_UW; 1664 1665 vm_page_flag_set(m, PG_WRITEABLE); 1666 } 1667 1668 if (prot & VM_PROT_EXECUTE) { 1669 flags |= PTE_SX; 1670 if (!su) 1671 flags |= PTE_UX; 1672 } 1673 1674 /* If its wired update stats. */ 1675 if (wired) { 1676 pmap->pm_stats.wired_count++; 1677 flags |= PTE_WIRED; 1678 } 1679 1680 pte_enter(mmu, pmap, m, va, flags); 1681 1682 /* Flush the real memory from the instruction cache. */ 1683 if (prot & VM_PROT_EXECUTE) 1684 sync++; 1685 } 1686 1687 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1688 __syncicache((void *)va, PAGE_SIZE); 1689 sync = 0; 1690 } 1691} 1692 1693/* 1694 * Maps a sequence of resident pages belonging to the same object. 1695 * The sequence begins with the given page m_start. This page is 1696 * mapped at the given virtual address start. Each subsequent page is 1697 * mapped at a virtual address that is offset from start by the same 1698 * amount as the page is offset from m_start within the object. The 1699 * last page in the sequence is the page with the largest offset from 1700 * m_start that can be mapped at a virtual address less than the given 1701 * virtual address end. Not every virtual page between start and end 1702 * is mapped; only those for which a resident page exists with the 1703 * corresponding offset from m_start are mapped. 1704 */ 1705static void 1706mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1707 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1708{ 1709 vm_page_t m; 1710 vm_pindex_t diff, psize; 1711 1712 psize = atop(end - start); 1713 m = m_start; 1714 PMAP_LOCK(pmap); 1715 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1716 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1717 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1718 m = TAILQ_NEXT(m, listq); 1719 } 1720 PMAP_UNLOCK(pmap); 1721} 1722 1723static void 1724mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1725 vm_prot_t prot) 1726{ 1727 1728 vm_page_lock_queues(); 1729 PMAP_LOCK(pmap); 1730 mmu_booke_enter_locked(mmu, pmap, va, m, 1731 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1732 vm_page_unlock_queues(); 1733 PMAP_UNLOCK(pmap); 1734} 1735 1736/* 1737 * Remove the given range of addresses from the specified map. 1738 * 1739 * It is assumed that the start and end are properly rounded to the page size. 1740 */ 1741static void 1742mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1743{ 1744 pte_t *pte; 1745 uint8_t hold_flag; 1746 1747 int su = (pmap == kernel_pmap); 1748 1749 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1750 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1751 1752 if (su) { 1753 KASSERT(((va >= virtual_avail) && 1754 (va <= VM_MAX_KERNEL_ADDRESS)), 1755 ("mmu_booke_remove: kernel pmap, non kernel va")); 1756 } else { 1757 KASSERT((va <= VM_MAXUSER_ADDRESS), 1758 ("mmu_booke_remove: user pmap, non user va")); 1759 } 1760 1761 if (PMAP_REMOVE_DONE(pmap)) { 1762 //debugf("mmu_booke_remove: e (empty)\n"); 1763 return; 1764 } 1765 1766 hold_flag = PTBL_HOLD_FLAG(pmap); 1767 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1768 1769 vm_page_lock_queues(); 1770 PMAP_LOCK(pmap); 1771 for (; va < endva; va += PAGE_SIZE) { 1772 pte = pte_find(mmu, pmap, va); 1773 if ((pte != NULL) && PTE_ISVALID(pte)) 1774 pte_remove(mmu, pmap, va, hold_flag); 1775 } 1776 PMAP_UNLOCK(pmap); 1777 vm_page_unlock_queues(); 1778 1779 //debugf("mmu_booke_remove: e\n"); 1780} 1781 1782/* 1783 * Remove physical page from all pmaps in which it resides. 1784 */ 1785static void 1786mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1787{ 1788 pv_entry_t pv, pvn; 1789 uint8_t hold_flag; 1790 1791 vm_page_lock_queues(); 1792 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1793 pvn = TAILQ_NEXT(pv, pv_link); 1794 1795 PMAP_LOCK(pv->pv_pmap); 1796 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1797 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1798 PMAP_UNLOCK(pv->pv_pmap); 1799 } 1800 vm_page_flag_clear(m, PG_WRITEABLE); 1801 vm_page_unlock_queues(); 1802} 1803 1804/* 1805 * Map a range of physical addresses into kernel virtual address space. 1806 */ 1807static vm_offset_t 1808mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1809 vm_offset_t pa_end, int prot) 1810{ 1811 vm_offset_t sva = *virt; 1812 vm_offset_t va = sva; 1813 1814 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1815 // sva, pa_start, pa_end); 1816 1817 while (pa_start < pa_end) { 1818 mmu_booke_kenter(mmu, va, pa_start); 1819 va += PAGE_SIZE; 1820 pa_start += PAGE_SIZE; 1821 } 1822 *virt = va; 1823 1824 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1825 return (sva); 1826} 1827 1828/* 1829 * The pmap must be activated before it's address space can be accessed in any 1830 * way. 1831 */ 1832static void 1833mmu_booke_activate(mmu_t mmu, struct thread *td) 1834{ 1835 pmap_t pmap; 1836 1837 pmap = &td->td_proc->p_vmspace->vm_pmap; 1838 1839 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1840 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1841 1842 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1843 1844 mtx_lock_spin(&sched_lock); 1845 1846 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1847 PCPU_SET(curpmap, pmap); 1848 1849 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1850 tid_alloc(pmap); 1851 1852 /* Load PID0 register with pmap tid value. */ 1853 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1854 __asm __volatile("isync"); 1855 1856 mtx_unlock_spin(&sched_lock); 1857 1858 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1859 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1860} 1861 1862/* 1863 * Deactivate the specified process's address space. 1864 */ 1865static void 1866mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1867{ 1868 pmap_t pmap; 1869 1870 pmap = &td->td_proc->p_vmspace->vm_pmap; 1871 1872 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1873 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1874 1875 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1876 PCPU_SET(curpmap, NULL); 1877} 1878 1879/* 1880 * Copy the range specified by src_addr/len 1881 * from the source map to the range dst_addr/len 1882 * in the destination map. 1883 * 1884 * This routine is only advisory and need not do anything. 1885 */ 1886static void 1887mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1888 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1889{ 1890 1891} 1892 1893/* 1894 * Set the physical protection on the specified range of this map as requested. 1895 */ 1896static void 1897mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1898 vm_prot_t prot) 1899{ 1900 vm_offset_t va; 1901 vm_page_t m; 1902 pte_t *pte; 1903 1904 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1905 mmu_booke_remove(mmu, pmap, sva, eva); 1906 return; 1907 } 1908 1909 if (prot & VM_PROT_WRITE) 1910 return; 1911 1912 vm_page_lock_queues(); 1913 PMAP_LOCK(pmap); 1914 for (va = sva; va < eva; va += PAGE_SIZE) { 1915 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1916 if (PTE_ISVALID(pte)) { 1917 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1918 1919 mtx_lock_spin(&tlbivax_mutex); 1920 tlb_miss_lock(); 1921 1922 /* Handle modified pages. */ 1923 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1924 vm_page_dirty(m); 1925 1926 tlb0_flush_entry(va); 1927 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1928 1929 tlb_miss_unlock(); 1930 mtx_unlock_spin(&tlbivax_mutex); 1931 } 1932 } 1933 } 1934 PMAP_UNLOCK(pmap); 1935 vm_page_unlock_queues(); 1936} 1937 1938/* 1939 * Clear the write and modified bits in each of the given page's mappings. 1940 */ 1941static void 1942mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1943{ 1944 pv_entry_t pv; 1945 pte_t *pte; 1946 1947 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1948 ("mmu_booke_remove_write: page %p is not managed", m)); 1949 1950 /* 1951 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1952 * another thread while the object is locked. Thus, if PG_WRITEABLE 1953 * is clear, no page table entries need updating. 1954 */ 1955 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1956 if ((m->oflags & VPO_BUSY) == 0 && 1957 (m->flags & PG_WRITEABLE) == 0) 1958 return; 1959 vm_page_lock_queues(); 1960 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1961 PMAP_LOCK(pv->pv_pmap); 1962 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1963 if (PTE_ISVALID(pte)) { 1964 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1965 1966 mtx_lock_spin(&tlbivax_mutex); 1967 tlb_miss_lock(); 1968 1969 /* Handle modified pages. */ 1970 if (PTE_ISMODIFIED(pte)) 1971 vm_page_dirty(m); 1972 1973 /* Flush mapping from TLB0. */ 1974 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1975 1976 tlb_miss_unlock(); 1977 mtx_unlock_spin(&tlbivax_mutex); 1978 } 1979 } 1980 PMAP_UNLOCK(pv->pv_pmap); 1981 } 1982 vm_page_flag_clear(m, PG_WRITEABLE); 1983 vm_page_unlock_queues(); 1984} 1985 1986static void 1987mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1988{ 1989 pte_t *pte; 1990 pmap_t pmap; 1991 vm_page_t m; 1992 vm_offset_t addr; 1993 vm_paddr_t pa; 1994 int active, valid; 1995 1996 va = trunc_page(va); 1997 sz = round_page(sz); 1998 1999 vm_page_lock_queues(); 2000 pmap = PCPU_GET(curpmap); 2001 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2002 while (sz > 0) { 2003 PMAP_LOCK(pm); 2004 pte = pte_find(mmu, pm, va); 2005 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2006 if (valid) 2007 pa = PTE_PA(pte); 2008 PMAP_UNLOCK(pm); 2009 if (valid) { 2010 if (!active) { 2011 /* Create a mapping in the active pmap. */ 2012 addr = 0; 2013 m = PHYS_TO_VM_PAGE(pa); 2014 PMAP_LOCK(pmap); 2015 pte_enter(mmu, pmap, m, addr, 2016 PTE_SR | PTE_VALID | PTE_UR); 2017 __syncicache((void *)addr, PAGE_SIZE); 2018 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2019 PMAP_UNLOCK(pmap); 2020 } else 2021 __syncicache((void *)va, PAGE_SIZE); 2022 } 2023 va += PAGE_SIZE; 2024 sz -= PAGE_SIZE; 2025 } 2026 vm_page_unlock_queues(); 2027} 2028 2029/* 2030 * Atomically extract and hold the physical page with the given 2031 * pmap and virtual address pair if that mapping permits the given 2032 * protection. 2033 */ 2034static vm_page_t 2035mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2036 vm_prot_t prot) 2037{ 2038 pte_t *pte; 2039 vm_page_t m; 2040 uint32_t pte_wbit; 2041 vm_paddr_t pa; 2042 2043 m = NULL; 2044 pa = 0; 2045 PMAP_LOCK(pmap); 2046retry: 2047 pte = pte_find(mmu, pmap, va); 2048 if ((pte != NULL) && PTE_ISVALID(pte)) { 2049 if (pmap == kernel_pmap) 2050 pte_wbit = PTE_SW; 2051 else 2052 pte_wbit = PTE_UW; 2053 2054 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2055 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2056 goto retry; 2057 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2058 vm_page_hold(m); 2059 } 2060 } 2061 2062 PA_UNLOCK_COND(pa); 2063 PMAP_UNLOCK(pmap); 2064 return (m); 2065} 2066 2067/* 2068 * Initialize a vm_page's machine-dependent fields. 2069 */ 2070static void 2071mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2072{ 2073 2074 TAILQ_INIT(&m->md.pv_list); 2075} 2076 2077/* 2078 * mmu_booke_zero_page_area zeros the specified hardware page by 2079 * mapping it into virtual memory and using bzero to clear 2080 * its contents. 2081 * 2082 * off and size must reside within a single page. 2083 */ 2084static void 2085mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2086{ 2087 vm_offset_t va; 2088 2089 /* XXX KASSERT off and size are within a single page? */ 2090 2091 mtx_lock(&zero_page_mutex); 2092 va = zero_page_va; 2093 2094 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2095 bzero((caddr_t)va + off, size); 2096 mmu_booke_kremove(mmu, va); 2097 2098 mtx_unlock(&zero_page_mutex); 2099} 2100 2101/* 2102 * mmu_booke_zero_page zeros the specified hardware page. 2103 */ 2104static void 2105mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2106{ 2107 2108 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2109} 2110 2111/* 2112 * mmu_booke_copy_page copies the specified (machine independent) page by 2113 * mapping the page into virtual memory and using memcopy to copy the page, 2114 * one machine dependent page at a time. 2115 */ 2116static void 2117mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2118{ 2119 vm_offset_t sva, dva; 2120 2121 sva = copy_page_src_va; 2122 dva = copy_page_dst_va; 2123 2124 mtx_lock(©_page_mutex); 2125 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2126 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2127 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2128 mmu_booke_kremove(mmu, dva); 2129 mmu_booke_kremove(mmu, sva); 2130 mtx_unlock(©_page_mutex); 2131} 2132 2133/* 2134 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2135 * into virtual memory and using bzero to clear its contents. This is intended 2136 * to be called from the vm_pagezero process only and outside of Giant. No 2137 * lock is required. 2138 */ 2139static void 2140mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2141{ 2142 vm_offset_t va; 2143 2144 va = zero_page_idle_va; 2145 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2146 bzero((caddr_t)va, PAGE_SIZE); 2147 mmu_booke_kremove(mmu, va); 2148} 2149 2150/* 2151 * Return whether or not the specified physical page was modified 2152 * in any of physical maps. 2153 */ 2154static boolean_t 2155mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2156{ 2157 pte_t *pte; 2158 pv_entry_t pv; 2159 boolean_t rv; 2160 2161 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2162 ("mmu_booke_is_modified: page %p is not managed", m)); 2163 rv = FALSE; 2164 2165 /* 2166 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 2167 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 2168 * is clear, no PTEs can be modified. 2169 */ 2170 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2171 if ((m->oflags & VPO_BUSY) == 0 && 2172 (m->flags & PG_WRITEABLE) == 0) 2173 return (rv); 2174 vm_page_lock_queues(); 2175 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2176 PMAP_LOCK(pv->pv_pmap); 2177 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2178 PTE_ISVALID(pte)) { 2179 if (PTE_ISMODIFIED(pte)) 2180 rv = TRUE; 2181 } 2182 PMAP_UNLOCK(pv->pv_pmap); 2183 if (rv) 2184 break; 2185 } 2186 vm_page_unlock_queues(); 2187 return (rv); 2188} 2189 2190/* 2191 * Return whether or not the specified virtual address is eligible 2192 * for prefault. 2193 */ 2194static boolean_t 2195mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2196{ 2197 2198 return (FALSE); 2199} 2200 2201/* 2202 * Return whether or not the specified physical page was referenced 2203 * in any physical maps. 2204 */ 2205static boolean_t 2206mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2207{ 2208 pte_t *pte; 2209 pv_entry_t pv; 2210 boolean_t rv; 2211 2212 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2213 rv = FALSE; 2214 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2215 return (rv); 2216 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2217 PMAP_LOCK(pv->pv_pmap); 2218 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2219 PTE_ISVALID(pte)) 2220 rv = PTE_ISREFERENCED(pte) ? TRUE : FALSE; 2221 PMAP_UNLOCK(pv->pv_pmap); 2222 if (rv) 2223 break; 2224 } 2225 return (rv); 2226} 2227 2228/* 2229 * Clear the modify bits on the specified physical page. 2230 */ 2231static void 2232mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2233{ 2234 pte_t *pte; 2235 pv_entry_t pv; 2236 2237 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2238 ("mmu_booke_clear_modify: page %p is not managed", m)); 2239 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2240 KASSERT((m->oflags & VPO_BUSY) == 0, 2241 ("mmu_booke_clear_modify: page %p is busy", m)); 2242 2243 /* 2244 * If the page is not PG_WRITEABLE, then no PTEs can be modified. 2245 * If the object containing the page is locked and the page is not 2246 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 2247 */ 2248 if ((m->flags & PG_WRITEABLE) == 0) 2249 return; 2250 vm_page_lock_queues(); 2251 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2252 PMAP_LOCK(pv->pv_pmap); 2253 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2254 PTE_ISVALID(pte)) { 2255 mtx_lock_spin(&tlbivax_mutex); 2256 tlb_miss_lock(); 2257 2258 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2259 tlb0_flush_entry(pv->pv_va); 2260 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2261 PTE_REFERENCED); 2262 } 2263 2264 tlb_miss_unlock(); 2265 mtx_unlock_spin(&tlbivax_mutex); 2266 } 2267 PMAP_UNLOCK(pv->pv_pmap); 2268 } 2269 vm_page_unlock_queues(); 2270} 2271 2272/* 2273 * Return a count of reference bits for a page, clearing those bits. 2274 * It is not necessary for every reference bit to be cleared, but it 2275 * is necessary that 0 only be returned when there are truly no 2276 * reference bits set. 2277 * 2278 * XXX: The exact number of bits to check and clear is a matter that 2279 * should be tested and standardized at some point in the future for 2280 * optimal aging of shared pages. 2281 */ 2282static int 2283mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2284{ 2285 pte_t *pte; 2286 pv_entry_t pv; 2287 int count; 2288 2289 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2290 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2291 return (0); 2292 2293 count = 0; 2294 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2295 PMAP_LOCK(pv->pv_pmap); 2296 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2297 if (!PTE_ISVALID(pte)) 2298 goto make_sure_to_unlock; 2299 2300 if (PTE_ISREFERENCED(pte)) { 2301 mtx_lock_spin(&tlbivax_mutex); 2302 tlb_miss_lock(); 2303 2304 tlb0_flush_entry(pv->pv_va); 2305 pte->flags &= ~PTE_REFERENCED; 2306 2307 tlb_miss_unlock(); 2308 mtx_unlock_spin(&tlbivax_mutex); 2309 2310 if (++count > 4) { 2311 PMAP_UNLOCK(pv->pv_pmap); 2312 break; 2313 } 2314 } 2315 } 2316make_sure_to_unlock: 2317 PMAP_UNLOCK(pv->pv_pmap); 2318 } 2319 return (count); 2320} 2321 2322/* 2323 * Clear the reference bit on the specified physical page. 2324 */ 2325static void 2326mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2327{ 2328 pte_t *pte; 2329 pv_entry_t pv; 2330 2331 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2332 ("mmu_booke_clear_reference: page %p is not managed", m)); 2333 vm_page_lock_queues(); 2334 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2335 PMAP_LOCK(pv->pv_pmap); 2336 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2337 PTE_ISVALID(pte)) { 2338 if (PTE_ISREFERENCED(pte)) { 2339 mtx_lock_spin(&tlbivax_mutex); 2340 tlb_miss_lock(); 2341 2342 tlb0_flush_entry(pv->pv_va); 2343 pte->flags &= ~PTE_REFERENCED; 2344 2345 tlb_miss_unlock(); 2346 mtx_unlock_spin(&tlbivax_mutex); 2347 } 2348 } 2349 PMAP_UNLOCK(pv->pv_pmap); 2350 } 2351 vm_page_unlock_queues(); 2352} 2353 2354/* 2355 * Change wiring attribute for a map/virtual-address pair. 2356 */ 2357static void 2358mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2359{ 2360 pte_t *pte; 2361 2362 PMAP_LOCK(pmap); 2363 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2364 if (wired) { 2365 if (!PTE_ISWIRED(pte)) { 2366 pte->flags |= PTE_WIRED; 2367 pmap->pm_stats.wired_count++; 2368 } 2369 } else { 2370 if (PTE_ISWIRED(pte)) { 2371 pte->flags &= ~PTE_WIRED; 2372 pmap->pm_stats.wired_count--; 2373 } 2374 } 2375 } 2376 PMAP_UNLOCK(pmap); 2377} 2378 2379/* 2380 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2381 * page. This count may be changed upwards or downwards in the future; it is 2382 * only necessary that true be returned for a small subset of pmaps for proper 2383 * page aging. 2384 */ 2385static boolean_t 2386mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2387{ 2388 pv_entry_t pv; 2389 int loops; 2390 2391 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2392 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2393 return (FALSE); 2394 2395 loops = 0; 2396 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2397 if (pv->pv_pmap == pmap) 2398 return (TRUE); 2399 2400 if (++loops >= 16) 2401 break; 2402 } 2403 return (FALSE); 2404} 2405 2406/* 2407 * Return the number of managed mappings to the given physical page that are 2408 * wired. 2409 */ 2410static int 2411mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2412{ 2413 pv_entry_t pv; 2414 pte_t *pte; 2415 int count = 0; 2416 2417 if ((m->flags & PG_FICTITIOUS) != 0) 2418 return (count); 2419 vm_page_lock_queues(); 2420 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2421 PMAP_LOCK(pv->pv_pmap); 2422 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2423 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2424 count++; 2425 PMAP_UNLOCK(pv->pv_pmap); 2426 } 2427 vm_page_unlock_queues(); 2428 return (count); 2429} 2430 2431static int 2432mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2433{ 2434 int i; 2435 vm_offset_t va; 2436 2437 /* 2438 * This currently does not work for entries that 2439 * overlap TLB1 entries. 2440 */ 2441 for (i = 0; i < tlb1_idx; i ++) { 2442 if (tlb1_iomapped(i, pa, size, &va) == 0) 2443 return (0); 2444 } 2445 2446 return (EFAULT); 2447} 2448 2449vm_offset_t 2450mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2451 vm_size_t *sz) 2452{ 2453 vm_paddr_t pa, ppa; 2454 vm_offset_t va; 2455 vm_size_t gran; 2456 2457 /* Raw physical memory dumps don't have a virtual address. */ 2458 if (md->md_vaddr == ~0UL) { 2459 /* We always map a 256MB page at 256M. */ 2460 gran = 256 * 1024 * 1024; 2461 pa = md->md_paddr + ofs; 2462 ppa = pa & ~(gran - 1); 2463 ofs = pa - ppa; 2464 va = gran; 2465 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2466 if (*sz > (gran - ofs)) 2467 *sz = gran - ofs; 2468 return (va + ofs); 2469 } 2470 2471 /* Minidumps are based on virtual memory addresses. */ 2472 va = md->md_vaddr + ofs; 2473 if (va >= kernstart + kernsize) { 2474 gran = PAGE_SIZE - (va & PAGE_MASK); 2475 if (*sz > gran) 2476 *sz = gran; 2477 } 2478 return (va); 2479} 2480 2481void 2482mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2483 vm_offset_t va) 2484{ 2485 2486 /* Raw physical memory dumps don't have a virtual address. */ 2487 if (md->md_vaddr == ~0UL) { 2488 tlb1_idx--; 2489 tlb1[tlb1_idx].mas1 = 0; 2490 tlb1[tlb1_idx].mas2 = 0; 2491 tlb1[tlb1_idx].mas3 = 0; 2492 tlb1_write_entry(tlb1_idx); 2493 return; 2494 } 2495 2496 /* Minidumps are based on virtual memory addresses. */ 2497 /* Nothing to do... */ 2498} 2499 2500struct pmap_md * 2501mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2502{ 2503 static struct pmap_md md; 2504 struct bi_mem_region *mr; 2505 pte_t *pte; 2506 vm_offset_t va; 2507 2508 if (dumpsys_minidump) { 2509 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2510 if (prev == NULL) { 2511 /* 1st: kernel .data and .bss. */ 2512 md.md_index = 1; 2513 md.md_vaddr = trunc_page((uintptr_t)_etext); 2514 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2515 return (&md); 2516 } 2517 switch (prev->md_index) { 2518 case 1: 2519 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2520 md.md_index = 2; 2521 md.md_vaddr = data_start; 2522 md.md_size = data_end - data_start; 2523 break; 2524 case 2: 2525 /* 3rd: kernel VM. */ 2526 va = prev->md_vaddr + prev->md_size; 2527 /* Find start of next chunk (from va). */ 2528 while (va < virtual_end) { 2529 /* Don't dump the buffer cache. */ 2530 if (va >= kmi.buffer_sva && 2531 va < kmi.buffer_eva) { 2532 va = kmi.buffer_eva; 2533 continue; 2534 } 2535 pte = pte_find(mmu, kernel_pmap, va); 2536 if (pte != NULL && PTE_ISVALID(pte)) 2537 break; 2538 va += PAGE_SIZE; 2539 } 2540 if (va < virtual_end) { 2541 md.md_vaddr = va; 2542 va += PAGE_SIZE; 2543 /* Find last page in chunk. */ 2544 while (va < virtual_end) { 2545 /* Don't run into the buffer cache. */ 2546 if (va == kmi.buffer_sva) 2547 break; 2548 pte = pte_find(mmu, kernel_pmap, va); 2549 if (pte == NULL || !PTE_ISVALID(pte)) 2550 break; 2551 va += PAGE_SIZE; 2552 } 2553 md.md_size = va - md.md_vaddr; 2554 break; 2555 } 2556 md.md_index = 3; 2557 /* FALLTHROUGH */ 2558 default: 2559 return (NULL); 2560 } 2561 } else { /* minidumps */ 2562 mr = bootinfo_mr(); 2563 if (prev == NULL) { 2564 /* first physical chunk. */ 2565 md.md_paddr = mr->mem_base; 2566 md.md_size = mr->mem_size; 2567 md.md_vaddr = ~0UL; 2568 md.md_index = 1; 2569 } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2570 md.md_paddr = mr[md.md_index].mem_base; 2571 md.md_size = mr[md.md_index].mem_size; 2572 md.md_vaddr = ~0UL; 2573 md.md_index++; 2574 } else { 2575 /* There's no next physical chunk. */ 2576 return (NULL); 2577 } 2578 } 2579 2580 return (&md); 2581} 2582 2583/* 2584 * Map a set of physical memory pages into the kernel virtual address space. 2585 * Return a pointer to where it is mapped. This routine is intended to be used 2586 * for mapping device memory, NOT real memory. 2587 */ 2588static void * 2589mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2590{ 2591 void *res; 2592 uintptr_t va; 2593 vm_size_t sz; 2594 2595 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2596 res = (void *)va; 2597 2598 do { 2599 sz = 1 << (ilog2(size) & ~1); 2600 if (bootverbose) 2601 printf("Wiring VA=%x to PA=%x (size=%x), " 2602 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2603 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2604 size -= sz; 2605 pa += sz; 2606 va += sz; 2607 } while (size > 0); 2608 2609 return (res); 2610} 2611 2612/* 2613 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2614 */ 2615static void 2616mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2617{ 2618 vm_offset_t base, offset; 2619 2620 /* 2621 * Unmap only if this is inside kernel virtual space. 2622 */ 2623 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2624 base = trunc_page(va); 2625 offset = va & PAGE_MASK; 2626 size = roundup(offset + size, PAGE_SIZE); 2627 kmem_free(kernel_map, base, size); 2628 } 2629} 2630 2631/* 2632 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2633 * specified pmap. This eliminates the blast of soft faults on process startup 2634 * and immediately after an mmap. 2635 */ 2636static void 2637mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2638 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2639{ 2640 2641 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2642 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2643 ("mmu_booke_object_init_pt: non-device object")); 2644} 2645 2646/* 2647 * Perform the pmap work for mincore. 2648 */ 2649static int 2650mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2651 vm_paddr_t *locked_pa) 2652{ 2653 2654 TODO; 2655 return (0); 2656} 2657 2658/**************************************************************************/ 2659/* TID handling */ 2660/**************************************************************************/ 2661 2662/* 2663 * Allocate a TID. If necessary, steal one from someone else. 2664 * The new TID is flushed from the TLB before returning. 2665 */ 2666static tlbtid_t 2667tid_alloc(pmap_t pmap) 2668{ 2669 tlbtid_t tid; 2670 int thiscpu; 2671 2672 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2673 2674 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2675 2676 thiscpu = PCPU_GET(cpuid); 2677 2678 tid = PCPU_GET(tid_next); 2679 if (tid > TID_MAX) 2680 tid = TID_MIN; 2681 PCPU_SET(tid_next, tid + 1); 2682 2683 /* If we are stealing TID then clear the relevant pmap's field */ 2684 if (tidbusy[thiscpu][tid] != NULL) { 2685 2686 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2687 2688 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2689 2690 /* Flush all entries from TLB0 matching this TID. */ 2691 tid_flush(tid); 2692 } 2693 2694 tidbusy[thiscpu][tid] = pmap; 2695 pmap->pm_tid[thiscpu] = tid; 2696 __asm __volatile("msync; isync"); 2697 2698 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2699 PCPU_GET(tid_next)); 2700 2701 return (tid); 2702} 2703 2704/**************************************************************************/ 2705/* TLB0 handling */ 2706/**************************************************************************/ 2707 2708static void 2709tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2710 uint32_t mas7) 2711{ 2712 int as; 2713 char desc[3]; 2714 tlbtid_t tid; 2715 vm_size_t size; 2716 unsigned int tsize; 2717 2718 desc[2] = '\0'; 2719 if (mas1 & MAS1_VALID) 2720 desc[0] = 'V'; 2721 else 2722 desc[0] = ' '; 2723 2724 if (mas1 & MAS1_IPROT) 2725 desc[1] = 'P'; 2726 else 2727 desc[1] = ' '; 2728 2729 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2730 tid = MAS1_GETTID(mas1); 2731 2732 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2733 size = 0; 2734 if (tsize) 2735 size = tsize2size(tsize); 2736 2737 debugf("%3d: (%s) [AS=%d] " 2738 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2739 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2740 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2741} 2742 2743/* Convert TLB0 va and way number to tlb0[] table index. */ 2744static inline unsigned int 2745tlb0_tableidx(vm_offset_t va, unsigned int way) 2746{ 2747 unsigned int idx; 2748 2749 idx = (way * TLB0_ENTRIES_PER_WAY); 2750 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2751 return (idx); 2752} 2753 2754/* 2755 * Invalidate TLB0 entry. 2756 */ 2757static inline void 2758tlb0_flush_entry(vm_offset_t va) 2759{ 2760 2761 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2762 2763 mtx_assert(&tlbivax_mutex, MA_OWNED); 2764 2765 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2766 __asm __volatile("isync; msync"); 2767 __asm __volatile("tlbsync; msync"); 2768 2769 CTR1(KTR_PMAP, "%s: e", __func__); 2770} 2771 2772/* Print out contents of the MAS registers for each TLB0 entry */ 2773void 2774tlb0_print_tlbentries(void) 2775{ 2776 uint32_t mas0, mas1, mas2, mas3, mas7; 2777 int entryidx, way, idx; 2778 2779 debugf("TLB0 entries:\n"); 2780 for (way = 0; way < TLB0_WAYS; way ++) 2781 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2782 2783 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2784 mtspr(SPR_MAS0, mas0); 2785 __asm __volatile("isync"); 2786 2787 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2788 mtspr(SPR_MAS2, mas2); 2789 2790 __asm __volatile("isync; tlbre"); 2791 2792 mas1 = mfspr(SPR_MAS1); 2793 mas2 = mfspr(SPR_MAS2); 2794 mas3 = mfspr(SPR_MAS3); 2795 mas7 = mfspr(SPR_MAS7); 2796 2797 idx = tlb0_tableidx(mas2, way); 2798 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2799 } 2800} 2801 2802/**************************************************************************/ 2803/* TLB1 handling */ 2804/**************************************************************************/ 2805 2806/* 2807 * TLB1 mapping notes: 2808 * 2809 * TLB1[0] CCSRBAR 2810 * TLB1[1] Kernel text and data. 2811 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2812 * windows, other devices mappings. 2813 */ 2814 2815/* 2816 * Write given entry to TLB1 hardware. 2817 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2818 */ 2819static void 2820tlb1_write_entry(unsigned int idx) 2821{ 2822 uint32_t mas0, mas7; 2823 2824 //debugf("tlb1_write_entry: s\n"); 2825 2826 /* Clear high order RPN bits */ 2827 mas7 = 0; 2828 2829 /* Select entry */ 2830 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2831 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2832 2833 mtspr(SPR_MAS0, mas0); 2834 __asm __volatile("isync"); 2835 mtspr(SPR_MAS1, tlb1[idx].mas1); 2836 __asm __volatile("isync"); 2837 mtspr(SPR_MAS2, tlb1[idx].mas2); 2838 __asm __volatile("isync"); 2839 mtspr(SPR_MAS3, tlb1[idx].mas3); 2840 __asm __volatile("isync"); 2841 mtspr(SPR_MAS7, mas7); 2842 __asm __volatile("isync; tlbwe; isync; msync"); 2843 2844 //debugf("tlb1_write_entry: e\n"); 2845} 2846 2847/* 2848 * Return the largest uint value log such that 2^log <= num. 2849 */ 2850static unsigned int 2851ilog2(unsigned int num) 2852{ 2853 int lz; 2854 2855 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2856 return (31 - lz); 2857} 2858 2859/* 2860 * Convert TLB TSIZE value to mapped region size. 2861 */ 2862static vm_size_t 2863tsize2size(unsigned int tsize) 2864{ 2865 2866 /* 2867 * size = 4^tsize KB 2868 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2869 */ 2870 2871 return ((1 << (2 * tsize)) * 1024); 2872} 2873 2874/* 2875 * Convert region size (must be power of 4) to TLB TSIZE value. 2876 */ 2877static unsigned int 2878size2tsize(vm_size_t size) 2879{ 2880 2881 return (ilog2(size) / 2 - 5); 2882} 2883 2884/* 2885 * Register permanent kernel mapping in TLB1. 2886 * 2887 * Entries are created starting from index 0 (current free entry is 2888 * kept in tlb1_idx) and are not supposed to be invalidated. 2889 */ 2890static int 2891tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2892 uint32_t flags) 2893{ 2894 uint32_t ts, tid; 2895 int tsize; 2896 2897 if (tlb1_idx >= TLB1_ENTRIES) { 2898 printf("tlb1_set_entry: TLB1 full!\n"); 2899 return (-1); 2900 } 2901 2902 /* Convert size to TSIZE */ 2903 tsize = size2tsize(size); 2904 2905 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2906 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2907 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2908 2909 /* XXX LOCK tlb1[] */ 2910 2911 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2912 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2913 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2914 2915 /* Set supervisor RWX permission bits */ 2916 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2917 2918 tlb1_write_entry(tlb1_idx++); 2919 2920 /* XXX UNLOCK tlb1[] */ 2921 2922 /* 2923 * XXX in general TLB1 updates should be propagated between CPUs, 2924 * since current design assumes to have the same TLB1 set-up on all 2925 * cores. 2926 */ 2927 return (0); 2928} 2929 2930static int 2931tlb1_entry_size_cmp(const void *a, const void *b) 2932{ 2933 const vm_size_t *sza; 2934 const vm_size_t *szb; 2935 2936 sza = a; 2937 szb = b; 2938 if (*sza > *szb) 2939 return (-1); 2940 else if (*sza < *szb) 2941 return (1); 2942 else 2943 return (0); 2944} 2945 2946/* 2947 * Map in contiguous RAM region into the TLB1 using maximum of 2948 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2949 * 2950 * If necessary round up last entry size and return total size 2951 * used by all allocated entries. 2952 */ 2953vm_size_t 2954tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2955{ 2956 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2957 vm_size_t mapped_size, sz, esz; 2958 unsigned int log; 2959 int i; 2960 2961 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2962 __func__, size, va, pa); 2963 2964 mapped_size = 0; 2965 sz = size; 2966 memset(entry_size, 0, sizeof(entry_size)); 2967 2968 /* Calculate entry sizes. */ 2969 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2970 2971 /* Largest region that is power of 4 and fits within size */ 2972 log = ilog2(sz) / 2; 2973 esz = 1 << (2 * log); 2974 2975 /* If this is last entry cover remaining size. */ 2976 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2977 while (esz < sz) 2978 esz = esz << 2; 2979 } 2980 2981 entry_size[i] = esz; 2982 mapped_size += esz; 2983 if (esz < sz) 2984 sz -= esz; 2985 else 2986 sz = 0; 2987 } 2988 2989 /* Sort entry sizes, required to get proper entry address alignment. */ 2990 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2991 sizeof(vm_size_t), tlb1_entry_size_cmp); 2992 2993 /* Load TLB1 entries. */ 2994 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2995 esz = entry_size[i]; 2996 if (!esz) 2997 break; 2998 2999 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 3000 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 3001 3002 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 3003 3004 va += esz; 3005 pa += esz; 3006 } 3007 3008 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 3009 __func__, mapped_size, mapped_size - size); 3010 3011 return (mapped_size); 3012} 3013 3014/* 3015 * TLB1 initialization routine, to be called after the very first 3016 * assembler level setup done in locore.S. 3017 */ 3018void 3019tlb1_init(vm_offset_t ccsrbar) 3020{ 3021 uint32_t mas0; 3022 3023 /* TLB1[1] is used to map the kernel. Save that entry. */ 3024 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 3025 mtspr(SPR_MAS0, mas0); 3026 __asm __volatile("isync; tlbre"); 3027 3028 tlb1[1].mas1 = mfspr(SPR_MAS1); 3029 tlb1[1].mas2 = mfspr(SPR_MAS2); 3030 tlb1[1].mas3 = mfspr(SPR_MAS3); 3031 3032 /* Map in CCSRBAR in TLB1[0] */ 3033 tlb1_idx = 0; 3034 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3035 /* 3036 * Set the next available TLB1 entry index. Note TLB[1] is reserved 3037 * for initial mapping of kernel text+data, which was set early in 3038 * locore, we need to skip this [busy] entry. 3039 */ 3040 tlb1_idx = 2; 3041 3042 /* Setup TLB miss defaults */ 3043 set_mas4_defaults(); 3044} 3045 3046/* 3047 * Setup MAS4 defaults. 3048 * These values are loaded to MAS0-2 on a TLB miss. 3049 */ 3050static void 3051set_mas4_defaults(void) 3052{ 3053 uint32_t mas4; 3054 3055 /* Defaults: TLB0, PID0, TSIZED=4K */ 3056 mas4 = MAS4_TLBSELD0; 3057 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3058#ifdef SMP 3059 mas4 |= MAS4_MD; 3060#endif 3061 mtspr(SPR_MAS4, mas4); 3062 __asm __volatile("isync"); 3063} 3064 3065/* 3066 * Print out contents of the MAS registers for each TLB1 entry 3067 */ 3068void 3069tlb1_print_tlbentries(void) 3070{ 3071 uint32_t mas0, mas1, mas2, mas3, mas7; 3072 int i; 3073 3074 debugf("TLB1 entries:\n"); 3075 for (i = 0; i < TLB1_ENTRIES; i++) { 3076 3077 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3078 mtspr(SPR_MAS0, mas0); 3079 3080 __asm __volatile("isync; tlbre"); 3081 3082 mas1 = mfspr(SPR_MAS1); 3083 mas2 = mfspr(SPR_MAS2); 3084 mas3 = mfspr(SPR_MAS3); 3085 mas7 = mfspr(SPR_MAS7); 3086 3087 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3088 } 3089} 3090 3091/* 3092 * Print out contents of the in-ram tlb1 table. 3093 */ 3094void 3095tlb1_print_entries(void) 3096{ 3097 int i; 3098 3099 debugf("tlb1[] table entries:\n"); 3100 for (i = 0; i < TLB1_ENTRIES; i++) 3101 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3102} 3103 3104/* 3105 * Return 0 if the physical IO range is encompassed by one of the 3106 * the TLB1 entries, otherwise return related error code. 3107 */ 3108static int 3109tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3110{ 3111 uint32_t prot; 3112 vm_paddr_t pa_start; 3113 vm_paddr_t pa_end; 3114 unsigned int entry_tsize; 3115 vm_size_t entry_size; 3116 3117 *va = (vm_offset_t)NULL; 3118 3119 /* Skip invalid entries */ 3120 if (!(tlb1[i].mas1 & MAS1_VALID)) 3121 return (EINVAL); 3122 3123 /* 3124 * The entry must be cache-inhibited, guarded, and r/w 3125 * so it can function as an i/o page 3126 */ 3127 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3128 if (prot != (MAS2_I | MAS2_G)) 3129 return (EPERM); 3130 3131 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3132 if (prot != (MAS3_SR | MAS3_SW)) 3133 return (EPERM); 3134 3135 /* The address should be within the entry range. */ 3136 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3137 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3138 3139 entry_size = tsize2size(entry_tsize); 3140 pa_start = tlb1[i].mas3 & MAS3_RPN; 3141 pa_end = pa_start + entry_size - 1; 3142 3143 if ((pa < pa_start) || ((pa + size) > pa_end)) 3144 return (ERANGE); 3145 3146 /* Return virtual address of this mapping. */ 3147 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3148 return (0); 3149} 3150