1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD$"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/linker.h> 64#include <sys/msgbuf.h> 65#include <sys/lock.h> 66#include <sys/mutex.h> 67#include <sys/sched.h> 68#include <sys/smp.h> 69#include <sys/vmmeter.h> 70 71#include <vm/vm.h> 72#include <vm/vm_page.h> 73#include <vm/vm_kern.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_extern.h> 76#include <vm/vm_object.h> 77#include <vm/vm_param.h> 78#include <vm/vm_map.h> 79#include <vm/vm_pager.h> 80#include <vm/uma.h> 81 82#include <machine/cpu.h> 83#include <machine/pcb.h> 84#include <machine/platform.h> 85 86#include <machine/tlb.h> 87#include <machine/spr.h> 88#include <machine/vmparam.h> 89#include <machine/md_var.h> 90#include <machine/mmuvar.h> 91#include <machine/pmap.h> 92#include <machine/pte.h> 93 94#include "mmu_if.h" 95 96#ifdef DEBUG 97#define debugf(fmt, args...) printf(fmt, ##args) 98#else 99#define debugf(fmt, args...) 100#endif 101 102#define TODO panic("%s: not implemented", __func__); 103 104#include "opt_sched.h" 105#ifndef SCHED_4BSD 106#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 107#endif 108extern struct mtx sched_lock; 109 110extern int dumpsys_minidump; 111 112extern unsigned char _etext[]; 113extern unsigned char _end[]; 114 115extern uint32_t *bootinfo; 116 117#ifdef SMP 118extern uint32_t kernload_ap; 119#endif 120 121vm_paddr_t kernload; 122vm_offset_t kernstart; 123vm_size_t kernsize; 124 125/* Message buffer and tables. */ 126static vm_offset_t data_start; 127static vm_size_t data_end; 128 129/* Phys/avail memory regions. */ 130static struct mem_region *availmem_regions; 131static int availmem_regions_sz; 132static struct mem_region *physmem_regions; 133static int physmem_regions_sz; 134 135/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 136static vm_offset_t zero_page_va; 137static struct mtx zero_page_mutex; 138 139static struct mtx tlbivax_mutex; 140 141/* 142 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 143 * by idle thred only, no lock required. 144 */ 145static vm_offset_t zero_page_idle_va; 146 147/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 148static vm_offset_t copy_page_src_va; 149static vm_offset_t copy_page_dst_va; 150static struct mtx copy_page_mutex; 151 152/**************************************************************************/ 153/* PMAP */ 154/**************************************************************************/ 155 156static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 157 vm_prot_t, boolean_t); 158 159unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 160unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 161 162/* 163 * If user pmap is processed with mmu_booke_remove and the resident count 164 * drops to 0, there are no more pages to remove, so we need not continue. 165 */ 166#define PMAP_REMOVE_DONE(pmap) \ 167 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 168 169extern void tid_flush(tlbtid_t); 170 171/**************************************************************************/ 172/* TLB and TID handling */ 173/**************************************************************************/ 174 175/* Translation ID busy table */ 176static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 177 178/* 179 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 180 * core revisions and should be read from h/w registers during early config. 181 */ 182uint32_t tlb0_entries; 183uint32_t tlb0_ways; 184uint32_t tlb0_entries_per_way; 185 186#define TLB0_ENTRIES (tlb0_entries) 187#define TLB0_WAYS (tlb0_ways) 188#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 189 190#define TLB1_ENTRIES 16 191 192/* In-ram copy of the TLB1 */ 193static tlb_entry_t tlb1[TLB1_ENTRIES]; 194 195/* Next free entry in the TLB1 */ 196static unsigned int tlb1_idx; 197 198static tlbtid_t tid_alloc(struct pmap *); 199 200static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 201 202static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 203static void tlb1_write_entry(unsigned int); 204static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 205static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 206 207static vm_size_t tsize2size(unsigned int); 208static unsigned int size2tsize(vm_size_t); 209static unsigned int ilog2(unsigned int); 210 211static void set_mas4_defaults(void); 212 213static inline void tlb0_flush_entry(vm_offset_t); 214static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 215 216/**************************************************************************/ 217/* Page table management */ 218/**************************************************************************/ 219 220/* Data for the pv entry allocation mechanism */ 221static uma_zone_t pvzone; 222static struct vm_object pvzone_obj; 223static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 224 225#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 226 227#ifndef PMAP_SHPGPERPROC 228#define PMAP_SHPGPERPROC 200 229#endif 230 231static void ptbl_init(void); 232static struct ptbl_buf *ptbl_buf_alloc(void); 233static void ptbl_buf_free(struct ptbl_buf *); 234static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 235 236static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 237static void ptbl_free(mmu_t, pmap_t, unsigned int); 238static void ptbl_hold(mmu_t, pmap_t, unsigned int); 239static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 240 241static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 242static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 243static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 244static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 245 246static pv_entry_t pv_alloc(void); 247static void pv_free(pv_entry_t); 248static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 249static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 250 251/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 252#define PTBL_BUFS (128 * 16) 253 254struct ptbl_buf { 255 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 256 vm_offset_t kva; /* va of mapping */ 257}; 258 259/* ptbl free list and a lock used for access synchronization. */ 260static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 261static struct mtx ptbl_buf_freelist_lock; 262 263/* Base address of kva space allocated fot ptbl bufs. */ 264static vm_offset_t ptbl_buf_pool_vabase; 265 266/* Pointer to ptbl_buf structures. */ 267static struct ptbl_buf *ptbl_bufs; 268 269void pmap_bootstrap_ap(volatile uint32_t *); 270 271/* 272 * Kernel MMU interface 273 */ 274static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 275static void mmu_booke_clear_modify(mmu_t, vm_page_t); 276static void mmu_booke_clear_reference(mmu_t, vm_page_t); 277static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 278 vm_size_t, vm_offset_t); 279static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 280static void mmu_booke_copy_pages(mmu_t, vm_page_t *, 281 vm_offset_t, vm_page_t *, vm_offset_t, int); 282static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 283 vm_prot_t, boolean_t); 284static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 285 vm_page_t, vm_prot_t); 286static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 287 vm_prot_t); 288static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 289static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 290 vm_prot_t); 291static void mmu_booke_init(mmu_t); 292static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 293static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 294static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 295static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 296static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 297 int); 298static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 299 vm_paddr_t *); 300static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 301 vm_object_t, vm_pindex_t, vm_size_t); 302static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 303static void mmu_booke_page_init(mmu_t, vm_page_t); 304static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 305static void mmu_booke_pinit(mmu_t, pmap_t); 306static void mmu_booke_pinit0(mmu_t, pmap_t); 307static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 308 vm_prot_t); 309static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 310static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 311static void mmu_booke_release(mmu_t, pmap_t); 312static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 313static void mmu_booke_remove_all(mmu_t, vm_page_t); 314static void mmu_booke_remove_write(mmu_t, vm_page_t); 315static void mmu_booke_zero_page(mmu_t, vm_page_t); 316static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 317static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 318static void mmu_booke_activate(mmu_t, struct thread *); 319static void mmu_booke_deactivate(mmu_t, struct thread *); 320static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 321static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 322static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 323static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 324static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 325static void mmu_booke_kremove(mmu_t, vm_offset_t); 326static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 327static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 328 vm_size_t); 329static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 330 vm_size_t, vm_size_t *); 331static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 332 vm_size_t, vm_offset_t); 333static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 334 335static mmu_method_t mmu_booke_methods[] = { 336 /* pmap dispatcher interface */ 337 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 338 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 339 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 340 MMUMETHOD(mmu_copy, mmu_booke_copy), 341 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 342 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 343 MMUMETHOD(mmu_enter, mmu_booke_enter), 344 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 345 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 346 MMUMETHOD(mmu_extract, mmu_booke_extract), 347 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 348 MMUMETHOD(mmu_init, mmu_booke_init), 349 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 350 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 351 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 352 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 353 MMUMETHOD(mmu_map, mmu_booke_map), 354 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 355 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 356 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 357 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 358 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 359 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 360 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 361 MMUMETHOD(mmu_protect, mmu_booke_protect), 362 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 363 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 364 MMUMETHOD(mmu_release, mmu_booke_release), 365 MMUMETHOD(mmu_remove, mmu_booke_remove), 366 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 367 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 368 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 369 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 370 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 371 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 372 MMUMETHOD(mmu_activate, mmu_booke_activate), 373 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 374 375 /* Internal interfaces */ 376 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 377 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 378 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 379 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 380 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 381/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 382 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 383 384 /* dumpsys() support */ 385 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 386 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 387 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 388 389 { 0, 0 } 390}; 391 392MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 393 394static inline void 395tlb_miss_lock(void) 396{ 397#ifdef SMP 398 struct pcpu *pc; 399 400 if (!smp_started) 401 return; 402 403 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 404 if (pc != pcpup) { 405 406 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 407 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 408 409 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 410 ("tlb_miss_lock: tried to lock self")); 411 412 tlb_lock(pc->pc_booke_tlb_lock); 413 414 CTR1(KTR_PMAP, "%s: locked", __func__); 415 } 416 } 417#endif 418} 419 420static inline void 421tlb_miss_unlock(void) 422{ 423#ifdef SMP 424 struct pcpu *pc; 425 426 if (!smp_started) 427 return; 428 429 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 430 if (pc != pcpup) { 431 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 432 __func__, pc->pc_cpuid); 433 434 tlb_unlock(pc->pc_booke_tlb_lock); 435 436 CTR1(KTR_PMAP, "%s: unlocked", __func__); 437 } 438 } 439#endif 440} 441 442/* Return number of entries in TLB0. */ 443static __inline void 444tlb0_get_tlbconf(void) 445{ 446 uint32_t tlb0_cfg; 447 448 tlb0_cfg = mfspr(SPR_TLB0CFG); 449 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 450 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 451 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 452} 453 454/* Initialize pool of kva ptbl buffers. */ 455static void 456ptbl_init(void) 457{ 458 int i; 459 460 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 461 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 462 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 463 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 464 465 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 466 TAILQ_INIT(&ptbl_buf_freelist); 467 468 for (i = 0; i < PTBL_BUFS; i++) { 469 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 470 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 471 } 472} 473 474/* Get a ptbl_buf from the freelist. */ 475static struct ptbl_buf * 476ptbl_buf_alloc(void) 477{ 478 struct ptbl_buf *buf; 479 480 mtx_lock(&ptbl_buf_freelist_lock); 481 buf = TAILQ_FIRST(&ptbl_buf_freelist); 482 if (buf != NULL) 483 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 484 mtx_unlock(&ptbl_buf_freelist_lock); 485 486 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 487 488 return (buf); 489} 490 491/* Return ptbl buff to free pool. */ 492static void 493ptbl_buf_free(struct ptbl_buf *buf) 494{ 495 496 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 497 498 mtx_lock(&ptbl_buf_freelist_lock); 499 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 500 mtx_unlock(&ptbl_buf_freelist_lock); 501} 502 503/* 504 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 505 */ 506static void 507ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 508{ 509 struct ptbl_buf *pbuf; 510 511 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 512 513 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 514 515 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 516 if (pbuf->kva == (vm_offset_t)ptbl) { 517 /* Remove from pmap ptbl buf list. */ 518 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 519 520 /* Free corresponding ptbl buf. */ 521 ptbl_buf_free(pbuf); 522 break; 523 } 524} 525 526/* Allocate page table. */ 527static pte_t * 528ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 529{ 530 vm_page_t mtbl[PTBL_PAGES]; 531 vm_page_t m; 532 struct ptbl_buf *pbuf; 533 unsigned int pidx; 534 pte_t *ptbl; 535 int i; 536 537 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 538 (pmap == kernel_pmap), pdir_idx); 539 540 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 541 ("ptbl_alloc: invalid pdir_idx")); 542 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 543 ("pte_alloc: valid ptbl entry exists!")); 544 545 pbuf = ptbl_buf_alloc(); 546 if (pbuf == NULL) 547 panic("pte_alloc: couldn't alloc kernel virtual memory"); 548 549 ptbl = (pte_t *)pbuf->kva; 550 551 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 552 553 /* Allocate ptbl pages, this will sleep! */ 554 for (i = 0; i < PTBL_PAGES; i++) { 555 pidx = (PTBL_PAGES * pdir_idx) + i; 556 while ((m = vm_page_alloc(NULL, pidx, 557 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 558 559 PMAP_UNLOCK(pmap); 560 vm_page_unlock_queues(); 561 VM_WAIT; 562 vm_page_lock_queues(); 563 PMAP_LOCK(pmap); 564 } 565 mtbl[i] = m; 566 } 567 568 /* Map allocated pages into kernel_pmap. */ 569 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 570 571 /* Zero whole ptbl. */ 572 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 573 574 /* Add pbuf to the pmap ptbl bufs list. */ 575 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 576 577 return (ptbl); 578} 579 580/* Free ptbl pages and invalidate pdir entry. */ 581static void 582ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 583{ 584 pte_t *ptbl; 585 vm_paddr_t pa; 586 vm_offset_t va; 587 vm_page_t m; 588 int i; 589 590 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 591 (pmap == kernel_pmap), pdir_idx); 592 593 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 594 ("ptbl_free: invalid pdir_idx")); 595 596 ptbl = pmap->pm_pdir[pdir_idx]; 597 598 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 599 600 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 601 602 /* 603 * Invalidate the pdir entry as soon as possible, so that other CPUs 604 * don't attempt to look up the page tables we are releasing. 605 */ 606 mtx_lock_spin(&tlbivax_mutex); 607 tlb_miss_lock(); 608 609 pmap->pm_pdir[pdir_idx] = NULL; 610 611 tlb_miss_unlock(); 612 mtx_unlock_spin(&tlbivax_mutex); 613 614 for (i = 0; i < PTBL_PAGES; i++) { 615 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 616 pa = pte_vatopa(mmu, kernel_pmap, va); 617 m = PHYS_TO_VM_PAGE(pa); 618 vm_page_free_zero(m); 619 atomic_subtract_int(&cnt.v_wire_count, 1); 620 mmu_booke_kremove(mmu, va); 621 } 622 623 ptbl_free_pmap_ptbl(pmap, ptbl); 624} 625 626/* 627 * Decrement ptbl pages hold count and attempt to free ptbl pages. 628 * Called when removing pte entry from ptbl. 629 * 630 * Return 1 if ptbl pages were freed. 631 */ 632static int 633ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 634{ 635 pte_t *ptbl; 636 vm_paddr_t pa; 637 vm_page_t m; 638 int i; 639 640 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 641 (pmap == kernel_pmap), pdir_idx); 642 643 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 644 ("ptbl_unhold: invalid pdir_idx")); 645 KASSERT((pmap != kernel_pmap), 646 ("ptbl_unhold: unholding kernel ptbl!")); 647 648 ptbl = pmap->pm_pdir[pdir_idx]; 649 650 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 651 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 652 ("ptbl_unhold: non kva ptbl")); 653 654 /* decrement hold count */ 655 for (i = 0; i < PTBL_PAGES; i++) { 656 pa = pte_vatopa(mmu, kernel_pmap, 657 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 658 m = PHYS_TO_VM_PAGE(pa); 659 m->wire_count--; 660 } 661 662 /* 663 * Free ptbl pages if there are no pte etries in this ptbl. 664 * wire_count has the same value for all ptbl pages, so check the last 665 * page. 666 */ 667 if (m->wire_count == 0) { 668 ptbl_free(mmu, pmap, pdir_idx); 669 670 //debugf("ptbl_unhold: e (freed ptbl)\n"); 671 return (1); 672 } 673 674 return (0); 675} 676 677/* 678 * Increment hold count for ptbl pages. This routine is used when a new pte 679 * entry is being inserted into the ptbl. 680 */ 681static void 682ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 683{ 684 vm_paddr_t pa; 685 pte_t *ptbl; 686 vm_page_t m; 687 int i; 688 689 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 690 pdir_idx); 691 692 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 693 ("ptbl_hold: invalid pdir_idx")); 694 KASSERT((pmap != kernel_pmap), 695 ("ptbl_hold: holding kernel ptbl!")); 696 697 ptbl = pmap->pm_pdir[pdir_idx]; 698 699 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 700 701 for (i = 0; i < PTBL_PAGES; i++) { 702 pa = pte_vatopa(mmu, kernel_pmap, 703 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 704 m = PHYS_TO_VM_PAGE(pa); 705 m->wire_count++; 706 } 707} 708 709/* Allocate pv_entry structure. */ 710pv_entry_t 711pv_alloc(void) 712{ 713 pv_entry_t pv; 714 715 pv_entry_count++; 716 if (pv_entry_count > pv_entry_high_water) 717 pagedaemon_wakeup(); 718 pv = uma_zalloc(pvzone, M_NOWAIT); 719 720 return (pv); 721} 722 723/* Free pv_entry structure. */ 724static __inline void 725pv_free(pv_entry_t pve) 726{ 727 728 pv_entry_count--; 729 uma_zfree(pvzone, pve); 730} 731 732 733/* Allocate and initialize pv_entry structure. */ 734static void 735pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 736{ 737 pv_entry_t pve; 738 739 //int su = (pmap == kernel_pmap); 740 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 741 // (u_int32_t)pmap, va, (u_int32_t)m); 742 743 pve = pv_alloc(); 744 if (pve == NULL) 745 panic("pv_insert: no pv entries!"); 746 747 pve->pv_pmap = pmap; 748 pve->pv_va = va; 749 750 /* add to pv_list */ 751 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 752 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 753 754 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 755 756 //debugf("pv_insert: e\n"); 757} 758 759/* Destroy pv entry. */ 760static void 761pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 762{ 763 pv_entry_t pve; 764 765 //int su = (pmap == kernel_pmap); 766 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 767 768 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 769 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 770 771 /* find pv entry */ 772 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 773 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 774 /* remove from pv_list */ 775 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 776 if (TAILQ_EMPTY(&m->md.pv_list)) 777 vm_page_aflag_clear(m, PGA_WRITEABLE); 778 779 /* free pv entry struct */ 780 pv_free(pve); 781 break; 782 } 783 } 784 785 //debugf("pv_remove: e\n"); 786} 787 788/* 789 * Clean pte entry, try to free page table page if requested. 790 * 791 * Return 1 if ptbl pages were freed, otherwise return 0. 792 */ 793static int 794pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 795{ 796 unsigned int pdir_idx = PDIR_IDX(va); 797 unsigned int ptbl_idx = PTBL_IDX(va); 798 vm_page_t m; 799 pte_t *ptbl; 800 pte_t *pte; 801 802 //int su = (pmap == kernel_pmap); 803 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 804 // su, (u_int32_t)pmap, va, flags); 805 806 ptbl = pmap->pm_pdir[pdir_idx]; 807 KASSERT(ptbl, ("pte_remove: null ptbl")); 808 809 pte = &ptbl[ptbl_idx]; 810 811 if (pte == NULL || !PTE_ISVALID(pte)) 812 return (0); 813 814 if (PTE_ISWIRED(pte)) 815 pmap->pm_stats.wired_count--; 816 817 /* Handle managed entry. */ 818 if (PTE_ISMANAGED(pte)) { 819 /* Get vm_page_t for mapped pte. */ 820 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 821 822 if (PTE_ISMODIFIED(pte)) 823 vm_page_dirty(m); 824 825 if (PTE_ISREFERENCED(pte)) 826 vm_page_aflag_set(m, PGA_REFERENCED); 827 828 pv_remove(pmap, va, m); 829 } 830 831 mtx_lock_spin(&tlbivax_mutex); 832 tlb_miss_lock(); 833 834 tlb0_flush_entry(va); 835 pte->flags = 0; 836 pte->rpn = 0; 837 838 tlb_miss_unlock(); 839 mtx_unlock_spin(&tlbivax_mutex); 840 841 pmap->pm_stats.resident_count--; 842 843 if (flags & PTBL_UNHOLD) { 844 //debugf("pte_remove: e (unhold)\n"); 845 return (ptbl_unhold(mmu, pmap, pdir_idx)); 846 } 847 848 //debugf("pte_remove: e\n"); 849 return (0); 850} 851 852/* 853 * Insert PTE for a given page and virtual address. 854 */ 855static void 856pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 857{ 858 unsigned int pdir_idx = PDIR_IDX(va); 859 unsigned int ptbl_idx = PTBL_IDX(va); 860 pte_t *ptbl, *pte; 861 862 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 863 pmap == kernel_pmap, pmap, va); 864 865 /* Get the page table pointer. */ 866 ptbl = pmap->pm_pdir[pdir_idx]; 867 868 if (ptbl == NULL) { 869 /* Allocate page table pages. */ 870 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 871 } else { 872 /* 873 * Check if there is valid mapping for requested 874 * va, if there is, remove it. 875 */ 876 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 877 if (PTE_ISVALID(pte)) { 878 pte_remove(mmu, pmap, va, PTBL_HOLD); 879 } else { 880 /* 881 * pte is not used, increment hold count 882 * for ptbl pages. 883 */ 884 if (pmap != kernel_pmap) 885 ptbl_hold(mmu, pmap, pdir_idx); 886 } 887 } 888 889 /* 890 * Insert pv_entry into pv_list for mapped page if part of managed 891 * memory. 892 */ 893 if ((m->oflags & VPO_UNMANAGED) == 0) { 894 flags |= PTE_MANAGED; 895 896 /* Create and insert pv entry. */ 897 pv_insert(pmap, va, m); 898 } 899 900 pmap->pm_stats.resident_count++; 901 902 mtx_lock_spin(&tlbivax_mutex); 903 tlb_miss_lock(); 904 905 tlb0_flush_entry(va); 906 if (pmap->pm_pdir[pdir_idx] == NULL) { 907 /* 908 * If we just allocated a new page table, hook it in 909 * the pdir. 910 */ 911 pmap->pm_pdir[pdir_idx] = ptbl; 912 } 913 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 914 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 915 pte->flags |= (PTE_VALID | flags); 916 917 tlb_miss_unlock(); 918 mtx_unlock_spin(&tlbivax_mutex); 919} 920 921/* Return the pa for the given pmap/va. */ 922static vm_paddr_t 923pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 924{ 925 vm_paddr_t pa = 0; 926 pte_t *pte; 927 928 pte = pte_find(mmu, pmap, va); 929 if ((pte != NULL) && PTE_ISVALID(pte)) 930 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 931 return (pa); 932} 933 934/* Get a pointer to a PTE in a page table. */ 935static pte_t * 936pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 937{ 938 unsigned int pdir_idx = PDIR_IDX(va); 939 unsigned int ptbl_idx = PTBL_IDX(va); 940 941 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 942 943 if (pmap->pm_pdir[pdir_idx]) 944 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 945 946 return (NULL); 947} 948 949/**************************************************************************/ 950/* PMAP related */ 951/**************************************************************************/ 952 953/* 954 * This is called during booke_init, before the system is really initialized. 955 */ 956static void 957mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 958{ 959 vm_offset_t phys_kernelend; 960 struct mem_region *mp, *mp1; 961 int cnt, i, j; 962 u_int s, e, sz; 963 u_int phys_avail_count; 964 vm_size_t physsz, hwphyssz, kstack0_sz; 965 vm_offset_t kernel_pdir, kstack0, va; 966 vm_paddr_t kstack0_phys; 967 void *dpcpu; 968 pte_t *pte; 969 970 debugf("mmu_booke_bootstrap: entered\n"); 971 972#ifdef SMP 973 kernload_ap = kernload; 974#endif 975 976 977 /* Initialize invalidation mutex */ 978 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 979 980 /* Read TLB0 size and associativity. */ 981 tlb0_get_tlbconf(); 982 983 /* 984 * Align kernel start and end address (kernel image). 985 * Note that kernel end does not necessarily relate to kernsize. 986 * kernsize is the size of the kernel that is actually mapped. 987 */ 988 kernstart = trunc_page(start); 989 data_start = round_page(kernelend); 990 data_end = data_start; 991 992 /* 993 * Addresses of preloaded modules (like file systems) use 994 * physical addresses. Make sure we relocate those into 995 * virtual addresses. 996 */ 997 preload_addr_relocate = kernstart - kernload; 998 999 /* Allocate the dynamic per-cpu area. */ 1000 dpcpu = (void *)data_end; 1001 data_end += DPCPU_SIZE; 1002 1003 /* Allocate space for the message buffer. */ 1004 msgbufp = (struct msgbuf *)data_end; 1005 data_end += msgbufsize; 1006 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1007 data_end); 1008 1009 data_end = round_page(data_end); 1010 1011 /* Allocate space for ptbl_bufs. */ 1012 ptbl_bufs = (struct ptbl_buf *)data_end; 1013 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1014 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1015 data_end); 1016 1017 data_end = round_page(data_end); 1018 1019 /* Allocate PTE tables for kernel KVA. */ 1020 kernel_pdir = data_end; 1021 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1022 PDIR_SIZE - 1) / PDIR_SIZE; 1023 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1024 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1025 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1026 1027 debugf(" data_end: 0x%08x\n", data_end); 1028 if (data_end - kernstart > kernsize) { 1029 kernsize += tlb1_mapin_region(kernstart + kernsize, 1030 kernload + kernsize, (data_end - kernstart) - kernsize); 1031 } 1032 data_end = kernstart + kernsize; 1033 debugf(" updated data_end: 0x%08x\n", data_end); 1034 1035 /* 1036 * Clear the structures - note we can only do it safely after the 1037 * possible additional TLB1 translations are in place (above) so that 1038 * all range up to the currently calculated 'data_end' is covered. 1039 */ 1040 dpcpu_init(dpcpu, 0); 1041 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1042 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1043 1044 /*******************************************************/ 1045 /* Set the start and end of kva. */ 1046 /*******************************************************/ 1047 virtual_avail = round_page(data_end); 1048 virtual_end = VM_MAX_KERNEL_ADDRESS; 1049 1050 /* Allocate KVA space for page zero/copy operations. */ 1051 zero_page_va = virtual_avail; 1052 virtual_avail += PAGE_SIZE; 1053 zero_page_idle_va = virtual_avail; 1054 virtual_avail += PAGE_SIZE; 1055 copy_page_src_va = virtual_avail; 1056 virtual_avail += PAGE_SIZE; 1057 copy_page_dst_va = virtual_avail; 1058 virtual_avail += PAGE_SIZE; 1059 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1060 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1061 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1062 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1063 1064 /* Initialize page zero/copy mutexes. */ 1065 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1066 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1067 1068 /* Allocate KVA space for ptbl bufs. */ 1069 ptbl_buf_pool_vabase = virtual_avail; 1070 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1071 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1072 ptbl_buf_pool_vabase, virtual_avail); 1073 1074 /* Calculate corresponding physical addresses for the kernel region. */ 1075 phys_kernelend = kernload + kernsize; 1076 debugf("kernel image and allocated data:\n"); 1077 debugf(" kernload = 0x%08x\n", kernload); 1078 debugf(" kernstart = 0x%08x\n", kernstart); 1079 debugf(" kernsize = 0x%08x\n", kernsize); 1080 1081 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1082 panic("mmu_booke_bootstrap: phys_avail too small"); 1083 1084 /* 1085 * Remove kernel physical address range from avail regions list. Page 1086 * align all regions. Non-page aligned memory isn't very interesting 1087 * to us. Also, sort the entries for ascending addresses. 1088 */ 1089 1090 /* Retrieve phys/avail mem regions */ 1091 mem_regions(&physmem_regions, &physmem_regions_sz, 1092 &availmem_regions, &availmem_regions_sz); 1093 sz = 0; 1094 cnt = availmem_regions_sz; 1095 debugf("processing avail regions:\n"); 1096 for (mp = availmem_regions; mp->mr_size; mp++) { 1097 s = mp->mr_start; 1098 e = mp->mr_start + mp->mr_size; 1099 debugf(" %08x-%08x -> ", s, e); 1100 /* Check whether this region holds all of the kernel. */ 1101 if (s < kernload && e > phys_kernelend) { 1102 availmem_regions[cnt].mr_start = phys_kernelend; 1103 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1104 e = kernload; 1105 } 1106 /* Look whether this regions starts within the kernel. */ 1107 if (s >= kernload && s < phys_kernelend) { 1108 if (e <= phys_kernelend) 1109 goto empty; 1110 s = phys_kernelend; 1111 } 1112 /* Now look whether this region ends within the kernel. */ 1113 if (e > kernload && e <= phys_kernelend) { 1114 if (s >= kernload) 1115 goto empty; 1116 e = kernload; 1117 } 1118 /* Now page align the start and size of the region. */ 1119 s = round_page(s); 1120 e = trunc_page(e); 1121 if (e < s) 1122 e = s; 1123 sz = e - s; 1124 debugf("%08x-%08x = %x\n", s, e, sz); 1125 1126 /* Check whether some memory is left here. */ 1127 if (sz == 0) { 1128 empty: 1129 memmove(mp, mp + 1, 1130 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1131 cnt--; 1132 mp--; 1133 continue; 1134 } 1135 1136 /* Do an insertion sort. */ 1137 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1138 if (s < mp1->mr_start) 1139 break; 1140 if (mp1 < mp) { 1141 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1142 mp1->mr_start = s; 1143 mp1->mr_size = sz; 1144 } else { 1145 mp->mr_start = s; 1146 mp->mr_size = sz; 1147 } 1148 } 1149 availmem_regions_sz = cnt; 1150 1151 /*******************************************************/ 1152 /* Steal physical memory for kernel stack from the end */ 1153 /* of the first avail region */ 1154 /*******************************************************/ 1155 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1156 kstack0_phys = availmem_regions[0].mr_start + 1157 availmem_regions[0].mr_size; 1158 kstack0_phys -= kstack0_sz; 1159 availmem_regions[0].mr_size -= kstack0_sz; 1160 1161 /*******************************************************/ 1162 /* Fill in phys_avail table, based on availmem_regions */ 1163 /*******************************************************/ 1164 phys_avail_count = 0; 1165 physsz = 0; 1166 hwphyssz = 0; 1167 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1168 1169 debugf("fill in phys_avail:\n"); 1170 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1171 1172 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1173 availmem_regions[i].mr_start, 1174 availmem_regions[i].mr_start + 1175 availmem_regions[i].mr_size, 1176 availmem_regions[i].mr_size); 1177 1178 if (hwphyssz != 0 && 1179 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1180 debugf(" hw.physmem adjust\n"); 1181 if (physsz < hwphyssz) { 1182 phys_avail[j] = availmem_regions[i].mr_start; 1183 phys_avail[j + 1] = 1184 availmem_regions[i].mr_start + 1185 hwphyssz - physsz; 1186 physsz = hwphyssz; 1187 phys_avail_count++; 1188 } 1189 break; 1190 } 1191 1192 phys_avail[j] = availmem_regions[i].mr_start; 1193 phys_avail[j + 1] = availmem_regions[i].mr_start + 1194 availmem_regions[i].mr_size; 1195 phys_avail_count++; 1196 physsz += availmem_regions[i].mr_size; 1197 } 1198 physmem = btoc(physsz); 1199 1200 /* Calculate the last available physical address. */ 1201 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1202 ; 1203 Maxmem = powerpc_btop(phys_avail[i + 1]); 1204 1205 debugf("Maxmem = 0x%08lx\n", Maxmem); 1206 debugf("phys_avail_count = %d\n", phys_avail_count); 1207 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1208 physmem); 1209 1210 /*******************************************************/ 1211 /* Initialize (statically allocated) kernel pmap. */ 1212 /*******************************************************/ 1213 PMAP_LOCK_INIT(kernel_pmap); 1214 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1215 1216 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1217 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1218 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1219 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1220 1221 /* Initialize kernel pdir */ 1222 for (i = 0; i < kernel_ptbls; i++) 1223 kernel_pmap->pm_pdir[kptbl_min + i] = 1224 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1225 1226 for (i = 0; i < MAXCPU; i++) { 1227 kernel_pmap->pm_tid[i] = TID_KERNEL; 1228 1229 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1230 tidbusy[i][0] = kernel_pmap; 1231 } 1232 1233 /* 1234 * Fill in PTEs covering kernel code and data. They are not required 1235 * for address translation, as this area is covered by static TLB1 1236 * entries, but for pte_vatopa() to work correctly with kernel area 1237 * addresses. 1238 */ 1239 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1240 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1241 pte->rpn = kernload + (va - KERNBASE); 1242 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1243 PTE_VALID; 1244 } 1245 /* Mark kernel_pmap active on all CPUs */ 1246 CPU_FILL(&kernel_pmap->pm_active); 1247 1248 /*******************************************************/ 1249 /* Final setup */ 1250 /*******************************************************/ 1251 1252 /* Enter kstack0 into kernel map, provide guard page */ 1253 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1254 thread0.td_kstack = kstack0; 1255 thread0.td_kstack_pages = KSTACK_PAGES; 1256 1257 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1258 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1259 kstack0_phys, kstack0_phys + kstack0_sz); 1260 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1261 1262 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1263 for (i = 0; i < KSTACK_PAGES; i++) { 1264 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1265 kstack0 += PAGE_SIZE; 1266 kstack0_phys += PAGE_SIZE; 1267 } 1268 1269 debugf("virtual_avail = %08x\n", virtual_avail); 1270 debugf("virtual_end = %08x\n", virtual_end); 1271 1272 debugf("mmu_booke_bootstrap: exit\n"); 1273} 1274 1275void 1276pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1277{ 1278 int i; 1279 1280 /* 1281 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1282 * have the snapshot of its contents in the s/w tlb1[] table, so use 1283 * these values directly to (re)program AP's TLB1 hardware. 1284 */ 1285 for (i = 0; i < tlb1_idx; i ++) { 1286 /* Skip invalid entries */ 1287 if (!(tlb1[i].mas1 & MAS1_VALID)) 1288 continue; 1289 1290 tlb1_write_entry(i); 1291 } 1292 1293 set_mas4_defaults(); 1294} 1295 1296/* 1297 * Get the physical page address for the given pmap/virtual address. 1298 */ 1299static vm_paddr_t 1300mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1301{ 1302 vm_paddr_t pa; 1303 1304 PMAP_LOCK(pmap); 1305 pa = pte_vatopa(mmu, pmap, va); 1306 PMAP_UNLOCK(pmap); 1307 1308 return (pa); 1309} 1310 1311/* 1312 * Extract the physical page address associated with the given 1313 * kernel virtual address. 1314 */ 1315static vm_paddr_t 1316mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1317{ 1318 1319 return (pte_vatopa(mmu, kernel_pmap, va)); 1320} 1321 1322/* 1323 * Initialize the pmap module. 1324 * Called by vm_init, to initialize any structures that the pmap 1325 * system needs to map virtual memory. 1326 */ 1327static void 1328mmu_booke_init(mmu_t mmu) 1329{ 1330 int shpgperproc = PMAP_SHPGPERPROC; 1331 1332 /* 1333 * Initialize the address space (zone) for the pv entries. Set a 1334 * high water mark so that the system can recover from excessive 1335 * numbers of pv entries. 1336 */ 1337 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1338 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1339 1340 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1341 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1342 1343 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1344 pv_entry_high_water = 9 * (pv_entry_max / 10); 1345 1346 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1347 1348 /* Pre-fill pvzone with initial number of pv entries. */ 1349 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1350 1351 /* Initialize ptbl allocation. */ 1352 ptbl_init(); 1353} 1354 1355/* 1356 * Map a list of wired pages into kernel virtual address space. This is 1357 * intended for temporary mappings which do not need page modification or 1358 * references recorded. Existing mappings in the region are overwritten. 1359 */ 1360static void 1361mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1362{ 1363 vm_offset_t va; 1364 1365 va = sva; 1366 while (count-- > 0) { 1367 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1368 va += PAGE_SIZE; 1369 m++; 1370 } 1371} 1372 1373/* 1374 * Remove page mappings from kernel virtual address space. Intended for 1375 * temporary mappings entered by mmu_booke_qenter. 1376 */ 1377static void 1378mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1379{ 1380 vm_offset_t va; 1381 1382 va = sva; 1383 while (count-- > 0) { 1384 mmu_booke_kremove(mmu, va); 1385 va += PAGE_SIZE; 1386 } 1387} 1388 1389/* 1390 * Map a wired page into kernel virtual address space. 1391 */ 1392static void 1393mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1394{ 1395 unsigned int pdir_idx = PDIR_IDX(va); 1396 unsigned int ptbl_idx = PTBL_IDX(va); 1397 uint32_t flags; 1398 pte_t *pte; 1399 1400 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1401 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1402 1403 flags = 0; 1404 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1405 flags |= PTE_M; 1406 1407 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1408 1409 mtx_lock_spin(&tlbivax_mutex); 1410 tlb_miss_lock(); 1411 1412 if (PTE_ISVALID(pte)) { 1413 1414 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1415 1416 /* Flush entry from TLB0 */ 1417 tlb0_flush_entry(va); 1418 } 1419 1420 pte->rpn = pa & ~PTE_PA_MASK; 1421 pte->flags = flags; 1422 1423 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1424 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1425 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1426 1427 /* Flush the real memory from the instruction cache. */ 1428 if ((flags & (PTE_I | PTE_G)) == 0) { 1429 __syncicache((void *)va, PAGE_SIZE); 1430 } 1431 1432 tlb_miss_unlock(); 1433 mtx_unlock_spin(&tlbivax_mutex); 1434} 1435 1436/* 1437 * Remove a page from kernel page table. 1438 */ 1439static void 1440mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1441{ 1442 unsigned int pdir_idx = PDIR_IDX(va); 1443 unsigned int ptbl_idx = PTBL_IDX(va); 1444 pte_t *pte; 1445 1446// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1447 1448 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1449 (va <= VM_MAX_KERNEL_ADDRESS)), 1450 ("mmu_booke_kremove: invalid va")); 1451 1452 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1453 1454 if (!PTE_ISVALID(pte)) { 1455 1456 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1457 1458 return; 1459 } 1460 1461 mtx_lock_spin(&tlbivax_mutex); 1462 tlb_miss_lock(); 1463 1464 /* Invalidate entry in TLB0, update PTE. */ 1465 tlb0_flush_entry(va); 1466 pte->flags = 0; 1467 pte->rpn = 0; 1468 1469 tlb_miss_unlock(); 1470 mtx_unlock_spin(&tlbivax_mutex); 1471} 1472 1473/* 1474 * Initialize pmap associated with process 0. 1475 */ 1476static void 1477mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1478{ 1479 1480 mmu_booke_pinit(mmu, pmap); 1481 PCPU_SET(curpmap, pmap); 1482} 1483 1484/* 1485 * Initialize a preallocated and zeroed pmap structure, 1486 * such as one in a vmspace structure. 1487 */ 1488static void 1489mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1490{ 1491 int i; 1492 1493 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1494 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1495 1496 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1497 1498 PMAP_LOCK_INIT(pmap); 1499 for (i = 0; i < MAXCPU; i++) 1500 pmap->pm_tid[i] = TID_NONE; 1501 CPU_ZERO(&kernel_pmap->pm_active); 1502 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1503 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1504 TAILQ_INIT(&pmap->pm_ptbl_list); 1505} 1506 1507/* 1508 * Release any resources held by the given physical map. 1509 * Called when a pmap initialized by mmu_booke_pinit is being released. 1510 * Should only be called if the map contains no valid mappings. 1511 */ 1512static void 1513mmu_booke_release(mmu_t mmu, pmap_t pmap) 1514{ 1515 1516 KASSERT(pmap->pm_stats.resident_count == 0, 1517 ("pmap_release: pmap resident count %ld != 0", 1518 pmap->pm_stats.resident_count)); 1519 1520 PMAP_LOCK_DESTROY(pmap); 1521} 1522 1523/* 1524 * Insert the given physical page at the specified virtual address in the 1525 * target physical map with the protection requested. If specified the page 1526 * will be wired down. 1527 */ 1528static void 1529mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1530 vm_prot_t prot, boolean_t wired) 1531{ 1532 1533 vm_page_lock_queues(); 1534 PMAP_LOCK(pmap); 1535 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1536 vm_page_unlock_queues(); 1537 PMAP_UNLOCK(pmap); 1538} 1539 1540static void 1541mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1542 vm_prot_t prot, boolean_t wired) 1543{ 1544 pte_t *pte; 1545 vm_paddr_t pa; 1546 uint32_t flags; 1547 int su, sync; 1548 1549 pa = VM_PAGE_TO_PHYS(m); 1550 su = (pmap == kernel_pmap); 1551 sync = 0; 1552 1553 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1554 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1555 // (u_int32_t)pmap, su, pmap->pm_tid, 1556 // (u_int32_t)m, va, pa, prot, wired); 1557 1558 if (su) { 1559 KASSERT(((va >= virtual_avail) && 1560 (va <= VM_MAX_KERNEL_ADDRESS)), 1561 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1562 } else { 1563 KASSERT((va <= VM_MAXUSER_ADDRESS), 1564 ("mmu_booke_enter_locked: user pmap, non user va")); 1565 } 1566 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1567 VM_OBJECT_LOCKED(m->object), 1568 ("mmu_booke_enter_locked: page %p is not busy", m)); 1569 1570 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1571 1572 /* 1573 * If there is an existing mapping, and the physical address has not 1574 * changed, must be protection or wiring change. 1575 */ 1576 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1577 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1578 1579 /* 1580 * Before actually updating pte->flags we calculate and 1581 * prepare its new value in a helper var. 1582 */ 1583 flags = pte->flags; 1584 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1585 1586 /* Wiring change, just update stats. */ 1587 if (wired) { 1588 if (!PTE_ISWIRED(pte)) { 1589 flags |= PTE_WIRED; 1590 pmap->pm_stats.wired_count++; 1591 } 1592 } else { 1593 if (PTE_ISWIRED(pte)) { 1594 flags &= ~PTE_WIRED; 1595 pmap->pm_stats.wired_count--; 1596 } 1597 } 1598 1599 if (prot & VM_PROT_WRITE) { 1600 /* Add write permissions. */ 1601 flags |= PTE_SW; 1602 if (!su) 1603 flags |= PTE_UW; 1604 1605 if ((flags & PTE_MANAGED) != 0) 1606 vm_page_aflag_set(m, PGA_WRITEABLE); 1607 } else { 1608 /* Handle modified pages, sense modify status. */ 1609 1610 /* 1611 * The PTE_MODIFIED flag could be set by underlying 1612 * TLB misses since we last read it (above), possibly 1613 * other CPUs could update it so we check in the PTE 1614 * directly rather than rely on that saved local flags 1615 * copy. 1616 */ 1617 if (PTE_ISMODIFIED(pte)) 1618 vm_page_dirty(m); 1619 } 1620 1621 if (prot & VM_PROT_EXECUTE) { 1622 flags |= PTE_SX; 1623 if (!su) 1624 flags |= PTE_UX; 1625 1626 /* 1627 * Check existing flags for execute permissions: if we 1628 * are turning execute permissions on, icache should 1629 * be flushed. 1630 */ 1631 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1632 sync++; 1633 } 1634 1635 flags &= ~PTE_REFERENCED; 1636 1637 /* 1638 * The new flags value is all calculated -- only now actually 1639 * update the PTE. 1640 */ 1641 mtx_lock_spin(&tlbivax_mutex); 1642 tlb_miss_lock(); 1643 1644 tlb0_flush_entry(va); 1645 pte->flags = flags; 1646 1647 tlb_miss_unlock(); 1648 mtx_unlock_spin(&tlbivax_mutex); 1649 1650 } else { 1651 /* 1652 * If there is an existing mapping, but it's for a different 1653 * physical address, pte_enter() will delete the old mapping. 1654 */ 1655 //if ((pte != NULL) && PTE_ISVALID(pte)) 1656 // debugf("mmu_booke_enter_locked: replace\n"); 1657 //else 1658 // debugf("mmu_booke_enter_locked: new\n"); 1659 1660 /* Now set up the flags and install the new mapping. */ 1661 flags = (PTE_SR | PTE_VALID); 1662 flags |= PTE_M; 1663 1664 if (!su) 1665 flags |= PTE_UR; 1666 1667 if (prot & VM_PROT_WRITE) { 1668 flags |= PTE_SW; 1669 if (!su) 1670 flags |= PTE_UW; 1671 1672 if ((m->oflags & VPO_UNMANAGED) == 0) 1673 vm_page_aflag_set(m, PGA_WRITEABLE); 1674 } 1675 1676 if (prot & VM_PROT_EXECUTE) { 1677 flags |= PTE_SX; 1678 if (!su) 1679 flags |= PTE_UX; 1680 } 1681 1682 /* If its wired update stats. */ 1683 if (wired) { 1684 pmap->pm_stats.wired_count++; 1685 flags |= PTE_WIRED; 1686 } 1687 1688 pte_enter(mmu, pmap, m, va, flags); 1689 1690 /* Flush the real memory from the instruction cache. */ 1691 if (prot & VM_PROT_EXECUTE) 1692 sync++; 1693 } 1694 1695 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1696 __syncicache((void *)va, PAGE_SIZE); 1697 sync = 0; 1698 } 1699} 1700 1701/* 1702 * Maps a sequence of resident pages belonging to the same object. 1703 * The sequence begins with the given page m_start. This page is 1704 * mapped at the given virtual address start. Each subsequent page is 1705 * mapped at a virtual address that is offset from start by the same 1706 * amount as the page is offset from m_start within the object. The 1707 * last page in the sequence is the page with the largest offset from 1708 * m_start that can be mapped at a virtual address less than the given 1709 * virtual address end. Not every virtual page between start and end 1710 * is mapped; only those for which a resident page exists with the 1711 * corresponding offset from m_start are mapped. 1712 */ 1713static void 1714mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1715 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1716{ 1717 vm_page_t m; 1718 vm_pindex_t diff, psize; 1719 1720 psize = atop(end - start); 1721 m = m_start; 1722 vm_page_lock_queues(); 1723 PMAP_LOCK(pmap); 1724 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1725 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1726 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1727 m = TAILQ_NEXT(m, listq); 1728 } 1729 vm_page_unlock_queues(); 1730 PMAP_UNLOCK(pmap); 1731} 1732 1733static void 1734mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1735 vm_prot_t prot) 1736{ 1737 1738 vm_page_lock_queues(); 1739 PMAP_LOCK(pmap); 1740 mmu_booke_enter_locked(mmu, pmap, va, m, 1741 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1742 vm_page_unlock_queues(); 1743 PMAP_UNLOCK(pmap); 1744} 1745 1746/* 1747 * Remove the given range of addresses from the specified map. 1748 * 1749 * It is assumed that the start and end are properly rounded to the page size. 1750 */ 1751static void 1752mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1753{ 1754 pte_t *pte; 1755 uint8_t hold_flag; 1756 1757 int su = (pmap == kernel_pmap); 1758 1759 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1760 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1761 1762 if (su) { 1763 KASSERT(((va >= virtual_avail) && 1764 (va <= VM_MAX_KERNEL_ADDRESS)), 1765 ("mmu_booke_remove: kernel pmap, non kernel va")); 1766 } else { 1767 KASSERT((va <= VM_MAXUSER_ADDRESS), 1768 ("mmu_booke_remove: user pmap, non user va")); 1769 } 1770 1771 if (PMAP_REMOVE_DONE(pmap)) { 1772 //debugf("mmu_booke_remove: e (empty)\n"); 1773 return; 1774 } 1775 1776 hold_flag = PTBL_HOLD_FLAG(pmap); 1777 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1778 1779 vm_page_lock_queues(); 1780 PMAP_LOCK(pmap); 1781 for (; va < endva; va += PAGE_SIZE) { 1782 pte = pte_find(mmu, pmap, va); 1783 if ((pte != NULL) && PTE_ISVALID(pte)) 1784 pte_remove(mmu, pmap, va, hold_flag); 1785 } 1786 PMAP_UNLOCK(pmap); 1787 vm_page_unlock_queues(); 1788 1789 //debugf("mmu_booke_remove: e\n"); 1790} 1791 1792/* 1793 * Remove physical page from all pmaps in which it resides. 1794 */ 1795static void 1796mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1797{ 1798 pv_entry_t pv, pvn; 1799 uint8_t hold_flag; 1800 1801 vm_page_lock_queues(); 1802 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1803 pvn = TAILQ_NEXT(pv, pv_link); 1804 1805 PMAP_LOCK(pv->pv_pmap); 1806 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1807 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1808 PMAP_UNLOCK(pv->pv_pmap); 1809 } 1810 vm_page_aflag_clear(m, PGA_WRITEABLE); 1811 vm_page_unlock_queues(); 1812} 1813 1814/* 1815 * Map a range of physical addresses into kernel virtual address space. 1816 */ 1817static vm_offset_t 1818mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1819 vm_offset_t pa_end, int prot) 1820{ 1821 vm_offset_t sva = *virt; 1822 vm_offset_t va = sva; 1823 1824 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1825 // sva, pa_start, pa_end); 1826 1827 while (pa_start < pa_end) { 1828 mmu_booke_kenter(mmu, va, pa_start); 1829 va += PAGE_SIZE; 1830 pa_start += PAGE_SIZE; 1831 } 1832 *virt = va; 1833 1834 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1835 return (sva); 1836} 1837 1838/* 1839 * The pmap must be activated before it's address space can be accessed in any 1840 * way. 1841 */ 1842static void 1843mmu_booke_activate(mmu_t mmu, struct thread *td) 1844{ 1845 pmap_t pmap; 1846 u_int cpuid; 1847 1848 pmap = &td->td_proc->p_vmspace->vm_pmap; 1849 1850 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1851 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1852 1853 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1854 1855 mtx_lock_spin(&sched_lock); 1856 1857 cpuid = PCPU_GET(cpuid); 1858 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1859 PCPU_SET(curpmap, pmap); 1860 1861 if (pmap->pm_tid[cpuid] == TID_NONE) 1862 tid_alloc(pmap); 1863 1864 /* Load PID0 register with pmap tid value. */ 1865 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1866 __asm __volatile("isync"); 1867 1868 mtx_unlock_spin(&sched_lock); 1869 1870 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1871 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1872} 1873 1874/* 1875 * Deactivate the specified process's address space. 1876 */ 1877static void 1878mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1879{ 1880 pmap_t pmap; 1881 1882 pmap = &td->td_proc->p_vmspace->vm_pmap; 1883 1884 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1885 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1886 1887 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1888 PCPU_SET(curpmap, NULL); 1889} 1890 1891/* 1892 * Copy the range specified by src_addr/len 1893 * from the source map to the range dst_addr/len 1894 * in the destination map. 1895 * 1896 * This routine is only advisory and need not do anything. 1897 */ 1898static void 1899mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1900 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1901{ 1902 1903} 1904 1905/* 1906 * Set the physical protection on the specified range of this map as requested. 1907 */ 1908static void 1909mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1910 vm_prot_t prot) 1911{ 1912 vm_offset_t va; 1913 vm_page_t m; 1914 pte_t *pte; 1915 1916 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1917 mmu_booke_remove(mmu, pmap, sva, eva); 1918 return; 1919 } 1920 1921 if (prot & VM_PROT_WRITE) 1922 return; 1923 1924 PMAP_LOCK(pmap); 1925 for (va = sva; va < eva; va += PAGE_SIZE) { 1926 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1927 if (PTE_ISVALID(pte)) { 1928 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1929 1930 mtx_lock_spin(&tlbivax_mutex); 1931 tlb_miss_lock(); 1932 1933 /* Handle modified pages. */ 1934 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1935 vm_page_dirty(m); 1936 1937 tlb0_flush_entry(va); 1938 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1939 1940 tlb_miss_unlock(); 1941 mtx_unlock_spin(&tlbivax_mutex); 1942 } 1943 } 1944 } 1945 PMAP_UNLOCK(pmap); 1946} 1947 1948/* 1949 * Clear the write and modified bits in each of the given page's mappings. 1950 */ 1951static void 1952mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1953{ 1954 pv_entry_t pv; 1955 pte_t *pte; 1956 1957 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1958 ("mmu_booke_remove_write: page %p is not managed", m)); 1959 1960 /* 1961 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1962 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1963 * is clear, no page table entries need updating. 1964 */ 1965 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1966 if ((m->oflags & VPO_BUSY) == 0 && 1967 (m->aflags & PGA_WRITEABLE) == 0) 1968 return; 1969 vm_page_lock_queues(); 1970 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1971 PMAP_LOCK(pv->pv_pmap); 1972 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1973 if (PTE_ISVALID(pte)) { 1974 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1975 1976 mtx_lock_spin(&tlbivax_mutex); 1977 tlb_miss_lock(); 1978 1979 /* Handle modified pages. */ 1980 if (PTE_ISMODIFIED(pte)) 1981 vm_page_dirty(m); 1982 1983 /* Flush mapping from TLB0. */ 1984 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1985 1986 tlb_miss_unlock(); 1987 mtx_unlock_spin(&tlbivax_mutex); 1988 } 1989 } 1990 PMAP_UNLOCK(pv->pv_pmap); 1991 } 1992 vm_page_aflag_clear(m, PGA_WRITEABLE); 1993 vm_page_unlock_queues(); 1994} 1995 1996static void 1997mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1998{ 1999 pte_t *pte; 2000 pmap_t pmap; 2001 vm_page_t m; 2002 vm_offset_t addr; 2003 vm_paddr_t pa; 2004 int active, valid; 2005 2006 va = trunc_page(va); 2007 sz = round_page(sz); 2008 2009 vm_page_lock_queues(); 2010 pmap = PCPU_GET(curpmap); 2011 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2012 while (sz > 0) { 2013 PMAP_LOCK(pm); 2014 pte = pte_find(mmu, pm, va); 2015 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2016 if (valid) 2017 pa = PTE_PA(pte); 2018 PMAP_UNLOCK(pm); 2019 if (valid) { 2020 if (!active) { 2021 /* Create a mapping in the active pmap. */ 2022 addr = 0; 2023 m = PHYS_TO_VM_PAGE(pa); 2024 PMAP_LOCK(pmap); 2025 pte_enter(mmu, pmap, m, addr, 2026 PTE_SR | PTE_VALID | PTE_UR); 2027 __syncicache((void *)addr, PAGE_SIZE); 2028 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2029 PMAP_UNLOCK(pmap); 2030 } else 2031 __syncicache((void *)va, PAGE_SIZE); 2032 } 2033 va += PAGE_SIZE; 2034 sz -= PAGE_SIZE; 2035 } 2036 vm_page_unlock_queues(); 2037} 2038 2039/* 2040 * Atomically extract and hold the physical page with the given 2041 * pmap and virtual address pair if that mapping permits the given 2042 * protection. 2043 */ 2044static vm_page_t 2045mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2046 vm_prot_t prot) 2047{ 2048 pte_t *pte; 2049 vm_page_t m; 2050 uint32_t pte_wbit; 2051 vm_paddr_t pa; 2052 2053 m = NULL; 2054 pa = 0; 2055 PMAP_LOCK(pmap); 2056retry: 2057 pte = pte_find(mmu, pmap, va); 2058 if ((pte != NULL) && PTE_ISVALID(pte)) { 2059 if (pmap == kernel_pmap) 2060 pte_wbit = PTE_SW; 2061 else 2062 pte_wbit = PTE_UW; 2063 2064 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2065 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2066 goto retry; 2067 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2068 vm_page_hold(m); 2069 } 2070 } 2071 2072 PA_UNLOCK_COND(pa); 2073 PMAP_UNLOCK(pmap); 2074 return (m); 2075} 2076 2077/* 2078 * Initialize a vm_page's machine-dependent fields. 2079 */ 2080static void 2081mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2082{ 2083 2084 TAILQ_INIT(&m->md.pv_list); 2085} 2086 2087/* 2088 * mmu_booke_zero_page_area zeros the specified hardware page by 2089 * mapping it into virtual memory and using bzero to clear 2090 * its contents. 2091 * 2092 * off and size must reside within a single page. 2093 */ 2094static void 2095mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2096{ 2097 vm_offset_t va; 2098 2099 /* XXX KASSERT off and size are within a single page? */ 2100 2101 mtx_lock(&zero_page_mutex); 2102 va = zero_page_va; 2103 2104 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2105 bzero((caddr_t)va + off, size); 2106 mmu_booke_kremove(mmu, va); 2107 2108 mtx_unlock(&zero_page_mutex); 2109} 2110 2111/* 2112 * mmu_booke_zero_page zeros the specified hardware page. 2113 */ 2114static void 2115mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2116{ 2117 2118 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2119} 2120 2121/* 2122 * mmu_booke_copy_page copies the specified (machine independent) page by 2123 * mapping the page into virtual memory and using memcopy to copy the page, 2124 * one machine dependent page at a time. 2125 */ 2126static void 2127mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2128{ 2129 vm_offset_t sva, dva; 2130 2131 sva = copy_page_src_va; 2132 dva = copy_page_dst_va; 2133 2134 mtx_lock(©_page_mutex); 2135 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2136 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2137 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2138 mmu_booke_kremove(mmu, dva); 2139 mmu_booke_kremove(mmu, sva); 2140 mtx_unlock(©_page_mutex); 2141} 2142 2143static inline void 2144mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2145 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2146{ 2147 void *a_cp, *b_cp; 2148 vm_offset_t a_pg_offset, b_pg_offset; 2149 int cnt; 2150 2151 mtx_lock(©_page_mutex); 2152 while (xfersize > 0) { 2153 a_pg_offset = a_offset & PAGE_MASK; 2154 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2155 mmu_booke_kenter(mmu, copy_page_src_va, 2156 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2157 a_cp = (char *)copy_page_src_va + a_pg_offset; 2158 b_pg_offset = b_offset & PAGE_MASK; 2159 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2160 mmu_booke_kenter(mmu, copy_page_dst_va, 2161 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2162 b_cp = (char *)copy_page_dst_va + b_pg_offset; 2163 bcopy(a_cp, b_cp, cnt); 2164 mmu_booke_kremove(mmu, copy_page_dst_va); 2165 mmu_booke_kremove(mmu, copy_page_src_va); 2166 a_offset += cnt; 2167 b_offset += cnt; 2168 xfersize -= cnt; 2169 } 2170 mtx_unlock(©_page_mutex); 2171} 2172 2173/* 2174 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2175 * into virtual memory and using bzero to clear its contents. This is intended 2176 * to be called from the vm_pagezero process only and outside of Giant. No 2177 * lock is required. 2178 */ 2179static void 2180mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2181{ 2182 vm_offset_t va; 2183 2184 va = zero_page_idle_va; 2185 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2186 bzero((caddr_t)va, PAGE_SIZE); 2187 mmu_booke_kremove(mmu, va); 2188} 2189 2190/* 2191 * Return whether or not the specified physical page was modified 2192 * in any of physical maps. 2193 */ 2194static boolean_t 2195mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2196{ 2197 pte_t *pte; 2198 pv_entry_t pv; 2199 boolean_t rv; 2200 2201 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2202 ("mmu_booke_is_modified: page %p is not managed", m)); 2203 rv = FALSE; 2204 2205 /* 2206 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2207 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2208 * is clear, no PTEs can be modified. 2209 */ 2210 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2211 if ((m->oflags & VPO_BUSY) == 0 && 2212 (m->aflags & PGA_WRITEABLE) == 0) 2213 return (rv); 2214 vm_page_lock_queues(); 2215 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2216 PMAP_LOCK(pv->pv_pmap); 2217 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2218 PTE_ISVALID(pte)) { 2219 if (PTE_ISMODIFIED(pte)) 2220 rv = TRUE; 2221 } 2222 PMAP_UNLOCK(pv->pv_pmap); 2223 if (rv) 2224 break; 2225 } 2226 vm_page_unlock_queues(); 2227 return (rv); 2228} 2229 2230/* 2231 * Return whether or not the specified virtual address is eligible 2232 * for prefault. 2233 */ 2234static boolean_t 2235mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2236{ 2237 2238 return (FALSE); 2239} 2240 2241/* 2242 * Return whether or not the specified physical page was referenced 2243 * in any physical maps. 2244 */ 2245static boolean_t 2246mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2247{ 2248 pte_t *pte; 2249 pv_entry_t pv; 2250 boolean_t rv; 2251 2252 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2253 ("mmu_booke_is_referenced: page %p is not managed", m)); 2254 rv = FALSE; 2255 vm_page_lock_queues(); 2256 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2257 PMAP_LOCK(pv->pv_pmap); 2258 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2259 PTE_ISVALID(pte)) { 2260 if (PTE_ISREFERENCED(pte)) 2261 rv = TRUE; 2262 } 2263 PMAP_UNLOCK(pv->pv_pmap); 2264 if (rv) 2265 break; 2266 } 2267 vm_page_unlock_queues(); 2268 return (rv); 2269} 2270 2271/* 2272 * Clear the modify bits on the specified physical page. 2273 */ 2274static void 2275mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2276{ 2277 pte_t *pte; 2278 pv_entry_t pv; 2279 2280 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2281 ("mmu_booke_clear_modify: page %p is not managed", m)); 2282 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2283 KASSERT((m->oflags & VPO_BUSY) == 0, 2284 ("mmu_booke_clear_modify: page %p is busy", m)); 2285 2286 /* 2287 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2288 * If the object containing the page is locked and the page is not 2289 * VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set. 2290 */ 2291 if ((m->aflags & PGA_WRITEABLE) == 0) 2292 return; 2293 vm_page_lock_queues(); 2294 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2295 PMAP_LOCK(pv->pv_pmap); 2296 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2297 PTE_ISVALID(pte)) { 2298 mtx_lock_spin(&tlbivax_mutex); 2299 tlb_miss_lock(); 2300 2301 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2302 tlb0_flush_entry(pv->pv_va); 2303 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2304 PTE_REFERENCED); 2305 } 2306 2307 tlb_miss_unlock(); 2308 mtx_unlock_spin(&tlbivax_mutex); 2309 } 2310 PMAP_UNLOCK(pv->pv_pmap); 2311 } 2312 vm_page_unlock_queues(); 2313} 2314 2315/* 2316 * Return a count of reference bits for a page, clearing those bits. 2317 * It is not necessary for every reference bit to be cleared, but it 2318 * is necessary that 0 only be returned when there are truly no 2319 * reference bits set. 2320 * 2321 * XXX: The exact number of bits to check and clear is a matter that 2322 * should be tested and standardized at some point in the future for 2323 * optimal aging of shared pages. 2324 */ 2325static int 2326mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2327{ 2328 pte_t *pte; 2329 pv_entry_t pv; 2330 int count; 2331 2332 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2333 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2334 count = 0; 2335 vm_page_lock_queues(); 2336 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2337 PMAP_LOCK(pv->pv_pmap); 2338 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2339 PTE_ISVALID(pte)) { 2340 if (PTE_ISREFERENCED(pte)) { 2341 mtx_lock_spin(&tlbivax_mutex); 2342 tlb_miss_lock(); 2343 2344 tlb0_flush_entry(pv->pv_va); 2345 pte->flags &= ~PTE_REFERENCED; 2346 2347 tlb_miss_unlock(); 2348 mtx_unlock_spin(&tlbivax_mutex); 2349 2350 if (++count > 4) { 2351 PMAP_UNLOCK(pv->pv_pmap); 2352 break; 2353 } 2354 } 2355 } 2356 PMAP_UNLOCK(pv->pv_pmap); 2357 } 2358 vm_page_unlock_queues(); 2359 return (count); 2360} 2361 2362/* 2363 * Clear the reference bit on the specified physical page. 2364 */ 2365static void 2366mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2367{ 2368 pte_t *pte; 2369 pv_entry_t pv; 2370 2371 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2372 ("mmu_booke_clear_reference: page %p is not managed", m)); 2373 vm_page_lock_queues(); 2374 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2375 PMAP_LOCK(pv->pv_pmap); 2376 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2377 PTE_ISVALID(pte)) { 2378 if (PTE_ISREFERENCED(pte)) { 2379 mtx_lock_spin(&tlbivax_mutex); 2380 tlb_miss_lock(); 2381 2382 tlb0_flush_entry(pv->pv_va); 2383 pte->flags &= ~PTE_REFERENCED; 2384 2385 tlb_miss_unlock(); 2386 mtx_unlock_spin(&tlbivax_mutex); 2387 } 2388 } 2389 PMAP_UNLOCK(pv->pv_pmap); 2390 } 2391 vm_page_unlock_queues(); 2392} 2393 2394/* 2395 * Change wiring attribute for a map/virtual-address pair. 2396 */ 2397static void 2398mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2399{ 2400 pte_t *pte; 2401 2402 PMAP_LOCK(pmap); 2403 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2404 if (wired) { 2405 if (!PTE_ISWIRED(pte)) { 2406 pte->flags |= PTE_WIRED; 2407 pmap->pm_stats.wired_count++; 2408 } 2409 } else { 2410 if (PTE_ISWIRED(pte)) { 2411 pte->flags &= ~PTE_WIRED; 2412 pmap->pm_stats.wired_count--; 2413 } 2414 } 2415 } 2416 PMAP_UNLOCK(pmap); 2417} 2418 2419/* 2420 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2421 * page. This count may be changed upwards or downwards in the future; it is 2422 * only necessary that true be returned for a small subset of pmaps for proper 2423 * page aging. 2424 */ 2425static boolean_t 2426mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2427{ 2428 pv_entry_t pv; 2429 int loops; 2430 boolean_t rv; 2431 2432 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2433 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2434 loops = 0; 2435 rv = FALSE; 2436 vm_page_lock_queues(); 2437 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2438 if (pv->pv_pmap == pmap) { 2439 rv = TRUE; 2440 break; 2441 } 2442 if (++loops >= 16) 2443 break; 2444 } 2445 vm_page_unlock_queues(); 2446 return (rv); 2447} 2448 2449/* 2450 * Return the number of managed mappings to the given physical page that are 2451 * wired. 2452 */ 2453static int 2454mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2455{ 2456 pv_entry_t pv; 2457 pte_t *pte; 2458 int count = 0; 2459 2460 if ((m->oflags & VPO_UNMANAGED) != 0) 2461 return (count); 2462 vm_page_lock_queues(); 2463 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2464 PMAP_LOCK(pv->pv_pmap); 2465 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2466 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2467 count++; 2468 PMAP_UNLOCK(pv->pv_pmap); 2469 } 2470 vm_page_unlock_queues(); 2471 return (count); 2472} 2473 2474static int 2475mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2476{ 2477 int i; 2478 vm_offset_t va; 2479 2480 /* 2481 * This currently does not work for entries that 2482 * overlap TLB1 entries. 2483 */ 2484 for (i = 0; i < tlb1_idx; i ++) { 2485 if (tlb1_iomapped(i, pa, size, &va) == 0) 2486 return (0); 2487 } 2488 2489 return (EFAULT); 2490} 2491 2492vm_offset_t 2493mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2494 vm_size_t *sz) 2495{ 2496 vm_paddr_t pa, ppa; 2497 vm_offset_t va; 2498 vm_size_t gran; 2499 2500 /* Raw physical memory dumps don't have a virtual address. */ 2501 if (md->md_vaddr == ~0UL) { 2502 /* We always map a 256MB page at 256M. */ 2503 gran = 256 * 1024 * 1024; 2504 pa = md->md_paddr + ofs; 2505 ppa = pa & ~(gran - 1); 2506 ofs = pa - ppa; 2507 va = gran; 2508 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2509 if (*sz > (gran - ofs)) 2510 *sz = gran - ofs; 2511 return (va + ofs); 2512 } 2513 2514 /* Minidumps are based on virtual memory addresses. */ 2515 va = md->md_vaddr + ofs; 2516 if (va >= kernstart + kernsize) { 2517 gran = PAGE_SIZE - (va & PAGE_MASK); 2518 if (*sz > gran) 2519 *sz = gran; 2520 } 2521 return (va); 2522} 2523 2524void 2525mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2526 vm_offset_t va) 2527{ 2528 2529 /* Raw physical memory dumps don't have a virtual address. */ 2530 if (md->md_vaddr == ~0UL) { 2531 tlb1_idx--; 2532 tlb1[tlb1_idx].mas1 = 0; 2533 tlb1[tlb1_idx].mas2 = 0; 2534 tlb1[tlb1_idx].mas3 = 0; 2535 tlb1_write_entry(tlb1_idx); 2536 return; 2537 } 2538 2539 /* Minidumps are based on virtual memory addresses. */ 2540 /* Nothing to do... */ 2541} 2542 2543struct pmap_md * 2544mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2545{ 2546 static struct pmap_md md; 2547 pte_t *pte; 2548 vm_offset_t va; 2549 2550 if (dumpsys_minidump) { 2551 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2552 if (prev == NULL) { 2553 /* 1st: kernel .data and .bss. */ 2554 md.md_index = 1; 2555 md.md_vaddr = trunc_page((uintptr_t)_etext); 2556 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2557 return (&md); 2558 } 2559 switch (prev->md_index) { 2560 case 1: 2561 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2562 md.md_index = 2; 2563 md.md_vaddr = data_start; 2564 md.md_size = data_end - data_start; 2565 break; 2566 case 2: 2567 /* 3rd: kernel VM. */ 2568 va = prev->md_vaddr + prev->md_size; 2569 /* Find start of next chunk (from va). */ 2570 while (va < virtual_end) { 2571 /* Don't dump the buffer cache. */ 2572 if (va >= kmi.buffer_sva && 2573 va < kmi.buffer_eva) { 2574 va = kmi.buffer_eva; 2575 continue; 2576 } 2577 pte = pte_find(mmu, kernel_pmap, va); 2578 if (pte != NULL && PTE_ISVALID(pte)) 2579 break; 2580 va += PAGE_SIZE; 2581 } 2582 if (va < virtual_end) { 2583 md.md_vaddr = va; 2584 va += PAGE_SIZE; 2585 /* Find last page in chunk. */ 2586 while (va < virtual_end) { 2587 /* Don't run into the buffer cache. */ 2588 if (va == kmi.buffer_sva) 2589 break; 2590 pte = pte_find(mmu, kernel_pmap, va); 2591 if (pte == NULL || !PTE_ISVALID(pte)) 2592 break; 2593 va += PAGE_SIZE; 2594 } 2595 md.md_size = va - md.md_vaddr; 2596 break; 2597 } 2598 md.md_index = 3; 2599 /* FALLTHROUGH */ 2600 default: 2601 return (NULL); 2602 } 2603 } else { /* minidumps */ 2604 mem_regions(&physmem_regions, &physmem_regions_sz, 2605 &availmem_regions, &availmem_regions_sz); 2606 2607 if (prev == NULL) { 2608 /* first physical chunk. */ 2609 md.md_paddr = physmem_regions[0].mr_start; 2610 md.md_size = physmem_regions[0].mr_size; 2611 md.md_vaddr = ~0UL; 2612 md.md_index = 1; 2613 } else if (md.md_index < physmem_regions_sz) { 2614 md.md_paddr = physmem_regions[md.md_index].mr_start; 2615 md.md_size = physmem_regions[md.md_index].mr_size; 2616 md.md_vaddr = ~0UL; 2617 md.md_index++; 2618 } else { 2619 /* There's no next physical chunk. */ 2620 return (NULL); 2621 } 2622 } 2623 2624 return (&md); 2625} 2626 2627/* 2628 * Map a set of physical memory pages into the kernel virtual address space. 2629 * Return a pointer to where it is mapped. This routine is intended to be used 2630 * for mapping device memory, NOT real memory. 2631 */ 2632static void * 2633mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2634{ 2635 void *res; 2636 uintptr_t va; 2637 vm_size_t sz; 2638 2639 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2640 res = (void *)va; 2641 2642 do { 2643 sz = 1 << (ilog2(size) & ~1); 2644 if (bootverbose) 2645 printf("Wiring VA=%x to PA=%x (size=%x), " 2646 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2647 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2648 size -= sz; 2649 pa += sz; 2650 va += sz; 2651 } while (size > 0); 2652 2653 return (res); 2654} 2655 2656/* 2657 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2658 */ 2659static void 2660mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2661{ 2662 vm_offset_t base, offset; 2663 2664 /* 2665 * Unmap only if this is inside kernel virtual space. 2666 */ 2667 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2668 base = trunc_page(va); 2669 offset = va & PAGE_MASK; 2670 size = roundup(offset + size, PAGE_SIZE); 2671 kmem_free(kernel_map, base, size); 2672 } 2673} 2674 2675/* 2676 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2677 * specified pmap. This eliminates the blast of soft faults on process startup 2678 * and immediately after an mmap. 2679 */ 2680static void 2681mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2682 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2683{ 2684 2685 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2686 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2687 ("mmu_booke_object_init_pt: non-device object")); 2688} 2689 2690/* 2691 * Perform the pmap work for mincore. 2692 */ 2693static int 2694mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2695 vm_paddr_t *locked_pa) 2696{ 2697 2698 TODO; 2699 return (0); 2700} 2701 2702/**************************************************************************/ 2703/* TID handling */ 2704/**************************************************************************/ 2705 2706/* 2707 * Allocate a TID. If necessary, steal one from someone else. 2708 * The new TID is flushed from the TLB before returning. 2709 */ 2710static tlbtid_t 2711tid_alloc(pmap_t pmap) 2712{ 2713 tlbtid_t tid; 2714 int thiscpu; 2715 2716 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2717 2718 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2719 2720 thiscpu = PCPU_GET(cpuid); 2721 2722 tid = PCPU_GET(tid_next); 2723 if (tid > TID_MAX) 2724 tid = TID_MIN; 2725 PCPU_SET(tid_next, tid + 1); 2726 2727 /* If we are stealing TID then clear the relevant pmap's field */ 2728 if (tidbusy[thiscpu][tid] != NULL) { 2729 2730 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2731 2732 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2733 2734 /* Flush all entries from TLB0 matching this TID. */ 2735 tid_flush(tid); 2736 } 2737 2738 tidbusy[thiscpu][tid] = pmap; 2739 pmap->pm_tid[thiscpu] = tid; 2740 __asm __volatile("msync; isync"); 2741 2742 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2743 PCPU_GET(tid_next)); 2744 2745 return (tid); 2746} 2747 2748/**************************************************************************/ 2749/* TLB0 handling */ 2750/**************************************************************************/ 2751 2752static void 2753tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2754 uint32_t mas7) 2755{ 2756 int as; 2757 char desc[3]; 2758 tlbtid_t tid; 2759 vm_size_t size; 2760 unsigned int tsize; 2761 2762 desc[2] = '\0'; 2763 if (mas1 & MAS1_VALID) 2764 desc[0] = 'V'; 2765 else 2766 desc[0] = ' '; 2767 2768 if (mas1 & MAS1_IPROT) 2769 desc[1] = 'P'; 2770 else 2771 desc[1] = ' '; 2772 2773 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2774 tid = MAS1_GETTID(mas1); 2775 2776 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2777 size = 0; 2778 if (tsize) 2779 size = tsize2size(tsize); 2780 2781 debugf("%3d: (%s) [AS=%d] " 2782 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2783 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2784 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2785} 2786 2787/* Convert TLB0 va and way number to tlb0[] table index. */ 2788static inline unsigned int 2789tlb0_tableidx(vm_offset_t va, unsigned int way) 2790{ 2791 unsigned int idx; 2792 2793 idx = (way * TLB0_ENTRIES_PER_WAY); 2794 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2795 return (idx); 2796} 2797 2798/* 2799 * Invalidate TLB0 entry. 2800 */ 2801static inline void 2802tlb0_flush_entry(vm_offset_t va) 2803{ 2804 2805 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2806 2807 mtx_assert(&tlbivax_mutex, MA_OWNED); 2808 2809 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2810 __asm __volatile("isync; msync"); 2811 __asm __volatile("tlbsync; msync"); 2812 2813 CTR1(KTR_PMAP, "%s: e", __func__); 2814} 2815 2816/* Print out contents of the MAS registers for each TLB0 entry */ 2817void 2818tlb0_print_tlbentries(void) 2819{ 2820 uint32_t mas0, mas1, mas2, mas3, mas7; 2821 int entryidx, way, idx; 2822 2823 debugf("TLB0 entries:\n"); 2824 for (way = 0; way < TLB0_WAYS; way ++) 2825 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2826 2827 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2828 mtspr(SPR_MAS0, mas0); 2829 __asm __volatile("isync"); 2830 2831 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2832 mtspr(SPR_MAS2, mas2); 2833 2834 __asm __volatile("isync; tlbre"); 2835 2836 mas1 = mfspr(SPR_MAS1); 2837 mas2 = mfspr(SPR_MAS2); 2838 mas3 = mfspr(SPR_MAS3); 2839 mas7 = mfspr(SPR_MAS7); 2840 2841 idx = tlb0_tableidx(mas2, way); 2842 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2843 } 2844} 2845 2846/**************************************************************************/ 2847/* TLB1 handling */ 2848/**************************************************************************/ 2849 2850/* 2851 * TLB1 mapping notes: 2852 * 2853 * TLB1[0] CCSRBAR 2854 * TLB1[1] Kernel text and data. 2855 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2856 * windows, other devices mappings. 2857 */ 2858 2859/* 2860 * Write given entry to TLB1 hardware. 2861 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2862 */ 2863static void 2864tlb1_write_entry(unsigned int idx) 2865{ 2866 uint32_t mas0, mas7; 2867 2868 //debugf("tlb1_write_entry: s\n"); 2869 2870 /* Clear high order RPN bits */ 2871 mas7 = 0; 2872 2873 /* Select entry */ 2874 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2875 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2876 2877 mtspr(SPR_MAS0, mas0); 2878 __asm __volatile("isync"); 2879 mtspr(SPR_MAS1, tlb1[idx].mas1); 2880 __asm __volatile("isync"); 2881 mtspr(SPR_MAS2, tlb1[idx].mas2); 2882 __asm __volatile("isync"); 2883 mtspr(SPR_MAS3, tlb1[idx].mas3); 2884 __asm __volatile("isync"); 2885 mtspr(SPR_MAS7, mas7); 2886 __asm __volatile("isync; tlbwe; isync; msync"); 2887 2888 //debugf("tlb1_write_entry: e\n"); 2889} 2890 2891/* 2892 * Return the largest uint value log such that 2^log <= num. 2893 */ 2894static unsigned int 2895ilog2(unsigned int num) 2896{ 2897 int lz; 2898 2899 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2900 return (31 - lz); 2901} 2902 2903/* 2904 * Convert TLB TSIZE value to mapped region size. 2905 */ 2906static vm_size_t 2907tsize2size(unsigned int tsize) 2908{ 2909 2910 /* 2911 * size = 4^tsize KB 2912 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2913 */ 2914 2915 return ((1 << (2 * tsize)) * 1024); 2916} 2917 2918/* 2919 * Convert region size (must be power of 4) to TLB TSIZE value. 2920 */ 2921static unsigned int 2922size2tsize(vm_size_t size) 2923{ 2924 2925 return (ilog2(size) / 2 - 5); 2926} 2927 2928/* 2929 * Register permanent kernel mapping in TLB1. 2930 * 2931 * Entries are created starting from index 0 (current free entry is 2932 * kept in tlb1_idx) and are not supposed to be invalidated. 2933 */ 2934static int 2935tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2936 uint32_t flags) 2937{ 2938 uint32_t ts, tid; 2939 int tsize; 2940 2941 if (tlb1_idx >= TLB1_ENTRIES) { 2942 printf("tlb1_set_entry: TLB1 full!\n"); 2943 return (-1); 2944 } 2945 2946 /* Convert size to TSIZE */ 2947 tsize = size2tsize(size); 2948 2949 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2950 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2951 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2952 2953 /* XXX LOCK tlb1[] */ 2954 2955 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2956 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2957 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2958 2959 /* Set supervisor RWX permission bits */ 2960 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2961 2962 tlb1_write_entry(tlb1_idx++); 2963 2964 /* XXX UNLOCK tlb1[] */ 2965 2966 /* 2967 * XXX in general TLB1 updates should be propagated between CPUs, 2968 * since current design assumes to have the same TLB1 set-up on all 2969 * cores. 2970 */ 2971 return (0); 2972} 2973 2974/* 2975 * Map in contiguous RAM region into the TLB1 using maximum of 2976 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2977 * 2978 * If necessary round up last entry size and return total size 2979 * used by all allocated entries. 2980 */ 2981vm_size_t 2982tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 2983{ 2984 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 2985 vm_size_t mapped, pgsz, base, mask; 2986 int idx, nents; 2987 2988 /* Round up to the next 1M */ 2989 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 2990 2991 mapped = 0; 2992 idx = 0; 2993 base = va; 2994 pgsz = 64*1024*1024; 2995 while (mapped < size) { 2996 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 2997 while (pgsz > (size - mapped)) 2998 pgsz >>= 2; 2999 pgs[idx++] = pgsz; 3000 mapped += pgsz; 3001 } 3002 3003 /* We under-map. Correct for this. */ 3004 if (mapped < size) { 3005 while (pgs[idx - 1] == pgsz) { 3006 idx--; 3007 mapped -= pgsz; 3008 } 3009 /* XXX We may increase beyond out starting point. */ 3010 pgsz <<= 2; 3011 pgs[idx++] = pgsz; 3012 mapped += pgsz; 3013 } 3014 } 3015 3016 nents = idx; 3017 mask = pgs[0] - 1; 3018 /* Align address to the boundary */ 3019 if (va & mask) { 3020 va = (va + mask) & ~mask; 3021 pa = (pa + mask) & ~mask; 3022 } 3023 3024 for (idx = 0; idx < nents; idx++) { 3025 pgsz = pgs[idx]; 3026 debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 3027 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 3028 pa += pgsz; 3029 va += pgsz; 3030 } 3031 3032 mapped = (va - base); 3033 debugf("mapped size 0x%08x (wasted space 0x%08x)\n", 3034 mapped, mapped - size); 3035 return (mapped); 3036} 3037 3038/* 3039 * TLB1 initialization routine, to be called after the very first 3040 * assembler level setup done in locore.S. 3041 */ 3042void 3043tlb1_init(vm_offset_t ccsrbar) 3044{ 3045 uint32_t mas0, mas1, mas3; 3046 uint32_t tsz; 3047 u_int i; 3048 3049 if (bootinfo != NULL && bootinfo[0] != 1) { 3050 tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3051 } else 3052 tlb1_idx = 1; 3053 3054 /* The first entry/entries are used to map the kernel. */ 3055 for (i = 0; i < tlb1_idx; i++) { 3056 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3057 mtspr(SPR_MAS0, mas0); 3058 __asm __volatile("isync; tlbre"); 3059 3060 mas1 = mfspr(SPR_MAS1); 3061 if ((mas1 & MAS1_VALID) == 0) 3062 continue; 3063 3064 mas3 = mfspr(SPR_MAS3); 3065 3066 tlb1[i].mas1 = mas1; 3067 tlb1[i].mas2 = mfspr(SPR_MAS2); 3068 tlb1[i].mas3 = mas3; 3069 3070 if (i == 0) 3071 kernload = mas3 & MAS3_RPN; 3072 3073 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3074 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 3075 } 3076 3077 /* Map in CCSRBAR. */ 3078 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3079 3080 /* Setup TLB miss defaults */ 3081 set_mas4_defaults(); 3082} 3083 3084/* 3085 * Setup MAS4 defaults. 3086 * These values are loaded to MAS0-2 on a TLB miss. 3087 */ 3088static void 3089set_mas4_defaults(void) 3090{ 3091 uint32_t mas4; 3092 3093 /* Defaults: TLB0, PID0, TSIZED=4K */ 3094 mas4 = MAS4_TLBSELD0; 3095 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3096#ifdef SMP 3097 mas4 |= MAS4_MD; 3098#endif 3099 mtspr(SPR_MAS4, mas4); 3100 __asm __volatile("isync"); 3101} 3102 3103/* 3104 * Print out contents of the MAS registers for each TLB1 entry 3105 */ 3106void 3107tlb1_print_tlbentries(void) 3108{ 3109 uint32_t mas0, mas1, mas2, mas3, mas7; 3110 int i; 3111 3112 debugf("TLB1 entries:\n"); 3113 for (i = 0; i < TLB1_ENTRIES; i++) { 3114 3115 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3116 mtspr(SPR_MAS0, mas0); 3117 3118 __asm __volatile("isync; tlbre"); 3119 3120 mas1 = mfspr(SPR_MAS1); 3121 mas2 = mfspr(SPR_MAS2); 3122 mas3 = mfspr(SPR_MAS3); 3123 mas7 = mfspr(SPR_MAS7); 3124 3125 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3126 } 3127} 3128 3129/* 3130 * Print out contents of the in-ram tlb1 table. 3131 */ 3132void 3133tlb1_print_entries(void) 3134{ 3135 int i; 3136 3137 debugf("tlb1[] table entries:\n"); 3138 for (i = 0; i < TLB1_ENTRIES; i++) 3139 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3140} 3141 3142/* 3143 * Return 0 if the physical IO range is encompassed by one of the 3144 * the TLB1 entries, otherwise return related error code. 3145 */ 3146static int 3147tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3148{ 3149 uint32_t prot; 3150 vm_paddr_t pa_start; 3151 vm_paddr_t pa_end; 3152 unsigned int entry_tsize; 3153 vm_size_t entry_size; 3154 3155 *va = (vm_offset_t)NULL; 3156 3157 /* Skip invalid entries */ 3158 if (!(tlb1[i].mas1 & MAS1_VALID)) 3159 return (EINVAL); 3160 3161 /* 3162 * The entry must be cache-inhibited, guarded, and r/w 3163 * so it can function as an i/o page 3164 */ 3165 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3166 if (prot != (MAS2_I | MAS2_G)) 3167 return (EPERM); 3168 3169 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3170 if (prot != (MAS3_SR | MAS3_SW)) 3171 return (EPERM); 3172 3173 /* The address should be within the entry range. */ 3174 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3175 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3176 3177 entry_size = tsize2size(entry_tsize); 3178 pa_start = tlb1[i].mas3 & MAS3_RPN; 3179 pa_end = pa_start + entry_size - 1; 3180 3181 if ((pa < pa_start) || ((pa + size) > pa_end)) 3182 return (ERANGE); 3183 3184 /* Return virtual address of this mapping. */ 3185 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3186 return (0); 3187} 3188