pmap.c revision 235936
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 235936 2012-05-24 21:13:24Z raj $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/linker.h> 64#include <sys/msgbuf.h> 65#include <sys/lock.h> 66#include <sys/mutex.h> 67#include <sys/sched.h> 68#include <sys/smp.h> 69#include <sys/vmmeter.h> 70 71#include <vm/vm.h> 72#include <vm/vm_page.h> 73#include <vm/vm_kern.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_extern.h> 76#include <vm/vm_object.h> 77#include <vm/vm_param.h> 78#include <vm/vm_map.h> 79#include <vm/vm_pager.h> 80#include <vm/uma.h> 81 82#include <machine/cpu.h> 83#include <machine/pcb.h> 84#include <machine/platform.h> 85 86#include <machine/tlb.h> 87#include <machine/spr.h> 88#include <machine/vmparam.h> 89#include <machine/md_var.h> 90#include <machine/mmuvar.h> 91#include <machine/pmap.h> 92#include <machine/pte.h> 93 94#include "mmu_if.h" 95 96#ifdef DEBUG 97#define debugf(fmt, args...) printf(fmt, ##args) 98#else 99#define debugf(fmt, args...) 100#endif 101 102#define TODO panic("%s: not implemented", __func__); 103 104#include "opt_sched.h" 105#ifndef SCHED_4BSD 106#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 107#endif 108extern struct mtx sched_lock; 109 110extern int dumpsys_minidump; 111 112extern unsigned char _etext[]; 113extern unsigned char _end[]; 114 115extern uint32_t *bootinfo; 116 117#ifdef SMP 118extern uint32_t bp_kernload; 119#endif 120 121vm_paddr_t kernload; 122vm_offset_t kernstart; 123vm_size_t kernsize; 124 125/* Message buffer and tables. */ 126static vm_offset_t data_start; 127static vm_size_t data_end; 128 129/* Phys/avail memory regions. */ 130static struct mem_region *availmem_regions; 131static int availmem_regions_sz; 132static struct mem_region *physmem_regions; 133static int physmem_regions_sz; 134 135/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 136static vm_offset_t zero_page_va; 137static struct mtx zero_page_mutex; 138 139static struct mtx tlbivax_mutex; 140 141/* 142 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 143 * by idle thred only, no lock required. 144 */ 145static vm_offset_t zero_page_idle_va; 146 147/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 148static vm_offset_t copy_page_src_va; 149static vm_offset_t copy_page_dst_va; 150static struct mtx copy_page_mutex; 151 152/**************************************************************************/ 153/* PMAP */ 154/**************************************************************************/ 155 156static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 157 vm_prot_t, boolean_t); 158 159unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 160unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 161 162/* 163 * If user pmap is processed with mmu_booke_remove and the resident count 164 * drops to 0, there are no more pages to remove, so we need not continue. 165 */ 166#define PMAP_REMOVE_DONE(pmap) \ 167 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 168 169extern void tid_flush(tlbtid_t); 170 171/**************************************************************************/ 172/* TLB and TID handling */ 173/**************************************************************************/ 174 175/* Translation ID busy table */ 176static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 177 178/* 179 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 180 * core revisions and should be read from h/w registers during early config. 181 */ 182uint32_t tlb0_entries; 183uint32_t tlb0_ways; 184uint32_t tlb0_entries_per_way; 185 186#define TLB0_ENTRIES (tlb0_entries) 187#define TLB0_WAYS (tlb0_ways) 188#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 189 190#define TLB1_ENTRIES 16 191 192/* In-ram copy of the TLB1 */ 193static tlb_entry_t tlb1[TLB1_ENTRIES]; 194 195/* Next free entry in the TLB1 */ 196static unsigned int tlb1_idx; 197 198static tlbtid_t tid_alloc(struct pmap *); 199 200static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 201 202static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 203static void tlb1_write_entry(unsigned int); 204static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 205static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 206 207static vm_size_t tsize2size(unsigned int); 208static unsigned int size2tsize(vm_size_t); 209static unsigned int ilog2(unsigned int); 210 211static void set_mas4_defaults(void); 212 213static inline void tlb0_flush_entry(vm_offset_t); 214static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 215 216/**************************************************************************/ 217/* Page table management */ 218/**************************************************************************/ 219 220/* Data for the pv entry allocation mechanism */ 221static uma_zone_t pvzone; 222static struct vm_object pvzone_obj; 223static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 224 225#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 226 227#ifndef PMAP_SHPGPERPROC 228#define PMAP_SHPGPERPROC 200 229#endif 230 231static void ptbl_init(void); 232static struct ptbl_buf *ptbl_buf_alloc(void); 233static void ptbl_buf_free(struct ptbl_buf *); 234static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 235 236static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 237static void ptbl_free(mmu_t, pmap_t, unsigned int); 238static void ptbl_hold(mmu_t, pmap_t, unsigned int); 239static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 240 241static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 242static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 243static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 244static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 245 246static pv_entry_t pv_alloc(void); 247static void pv_free(pv_entry_t); 248static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 249static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 250 251/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 252#define PTBL_BUFS (128 * 16) 253 254struct ptbl_buf { 255 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 256 vm_offset_t kva; /* va of mapping */ 257}; 258 259/* ptbl free list and a lock used for access synchronization. */ 260static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 261static struct mtx ptbl_buf_freelist_lock; 262 263/* Base address of kva space allocated fot ptbl bufs. */ 264static vm_offset_t ptbl_buf_pool_vabase; 265 266/* Pointer to ptbl_buf structures. */ 267static struct ptbl_buf *ptbl_bufs; 268 269void pmap_bootstrap_ap(volatile uint32_t *); 270 271/* 272 * Kernel MMU interface 273 */ 274static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 275static void mmu_booke_clear_modify(mmu_t, vm_page_t); 276static void mmu_booke_clear_reference(mmu_t, vm_page_t); 277static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 278 vm_size_t, vm_offset_t); 279static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 280static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 281 vm_prot_t, boolean_t); 282static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 283 vm_page_t, vm_prot_t); 284static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 285 vm_prot_t); 286static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 287static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 288 vm_prot_t); 289static void mmu_booke_init(mmu_t); 290static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 291static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 292static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 293static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 294static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 295 int); 296static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 297 vm_paddr_t *); 298static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 299 vm_object_t, vm_pindex_t, vm_size_t); 300static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 301static void mmu_booke_page_init(mmu_t, vm_page_t); 302static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 303static void mmu_booke_pinit(mmu_t, pmap_t); 304static void mmu_booke_pinit0(mmu_t, pmap_t); 305static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 306 vm_prot_t); 307static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 308static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 309static void mmu_booke_release(mmu_t, pmap_t); 310static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 311static void mmu_booke_remove_all(mmu_t, vm_page_t); 312static void mmu_booke_remove_write(mmu_t, vm_page_t); 313static void mmu_booke_zero_page(mmu_t, vm_page_t); 314static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 315static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 316static void mmu_booke_activate(mmu_t, struct thread *); 317static void mmu_booke_deactivate(mmu_t, struct thread *); 318static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 319static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 320static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 321static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 322static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 323static void mmu_booke_kremove(mmu_t, vm_offset_t); 324static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 325static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 326 vm_size_t); 327static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 328 vm_size_t, vm_size_t *); 329static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 330 vm_size_t, vm_offset_t); 331static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 332 333static mmu_method_t mmu_booke_methods[] = { 334 /* pmap dispatcher interface */ 335 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 336 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 337 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 338 MMUMETHOD(mmu_copy, mmu_booke_copy), 339 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 340 MMUMETHOD(mmu_enter, mmu_booke_enter), 341 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 342 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 343 MMUMETHOD(mmu_extract, mmu_booke_extract), 344 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 345 MMUMETHOD(mmu_init, mmu_booke_init), 346 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 347 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 348 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 349 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 350 MMUMETHOD(mmu_map, mmu_booke_map), 351 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 352 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 353 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 354 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 355 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 356 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 357 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 358 MMUMETHOD(mmu_protect, mmu_booke_protect), 359 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 360 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 361 MMUMETHOD(mmu_release, mmu_booke_release), 362 MMUMETHOD(mmu_remove, mmu_booke_remove), 363 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 364 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 365 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 366 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 367 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 368 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 369 MMUMETHOD(mmu_activate, mmu_booke_activate), 370 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 371 372 /* Internal interfaces */ 373 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 374 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 375 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 376 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 377 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 378/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 379 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 380 381 /* dumpsys() support */ 382 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 383 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 384 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 385 386 { 0, 0 } 387}; 388 389MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 390 391static inline void 392tlb_miss_lock(void) 393{ 394#ifdef SMP 395 struct pcpu *pc; 396 397 if (!smp_started) 398 return; 399 400 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 401 if (pc != pcpup) { 402 403 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 404 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 405 406 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 407 ("tlb_miss_lock: tried to lock self")); 408 409 tlb_lock(pc->pc_booke_tlb_lock); 410 411 CTR1(KTR_PMAP, "%s: locked", __func__); 412 } 413 } 414#endif 415} 416 417static inline void 418tlb_miss_unlock(void) 419{ 420#ifdef SMP 421 struct pcpu *pc; 422 423 if (!smp_started) 424 return; 425 426 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 427 if (pc != pcpup) { 428 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 429 __func__, pc->pc_cpuid); 430 431 tlb_unlock(pc->pc_booke_tlb_lock); 432 433 CTR1(KTR_PMAP, "%s: unlocked", __func__); 434 } 435 } 436#endif 437} 438 439/* Return number of entries in TLB0. */ 440static __inline void 441tlb0_get_tlbconf(void) 442{ 443 uint32_t tlb0_cfg; 444 445 tlb0_cfg = mfspr(SPR_TLB0CFG); 446 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 447 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 448 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 449} 450 451/* Initialize pool of kva ptbl buffers. */ 452static void 453ptbl_init(void) 454{ 455 int i; 456 457 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 458 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 459 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 460 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 461 462 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 463 TAILQ_INIT(&ptbl_buf_freelist); 464 465 for (i = 0; i < PTBL_BUFS; i++) { 466 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 467 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 468 } 469} 470 471/* Get a ptbl_buf from the freelist. */ 472static struct ptbl_buf * 473ptbl_buf_alloc(void) 474{ 475 struct ptbl_buf *buf; 476 477 mtx_lock(&ptbl_buf_freelist_lock); 478 buf = TAILQ_FIRST(&ptbl_buf_freelist); 479 if (buf != NULL) 480 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 481 mtx_unlock(&ptbl_buf_freelist_lock); 482 483 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 484 485 return (buf); 486} 487 488/* Return ptbl buff to free pool. */ 489static void 490ptbl_buf_free(struct ptbl_buf *buf) 491{ 492 493 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 494 495 mtx_lock(&ptbl_buf_freelist_lock); 496 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 497 mtx_unlock(&ptbl_buf_freelist_lock); 498} 499 500/* 501 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 502 */ 503static void 504ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 505{ 506 struct ptbl_buf *pbuf; 507 508 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 509 510 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 511 512 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 513 if (pbuf->kva == (vm_offset_t)ptbl) { 514 /* Remove from pmap ptbl buf list. */ 515 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 516 517 /* Free corresponding ptbl buf. */ 518 ptbl_buf_free(pbuf); 519 break; 520 } 521} 522 523/* Allocate page table. */ 524static pte_t * 525ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 526{ 527 vm_page_t mtbl[PTBL_PAGES]; 528 vm_page_t m; 529 struct ptbl_buf *pbuf; 530 unsigned int pidx; 531 pte_t *ptbl; 532 int i; 533 534 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 535 (pmap == kernel_pmap), pdir_idx); 536 537 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 538 ("ptbl_alloc: invalid pdir_idx")); 539 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 540 ("pte_alloc: valid ptbl entry exists!")); 541 542 pbuf = ptbl_buf_alloc(); 543 if (pbuf == NULL) 544 panic("pte_alloc: couldn't alloc kernel virtual memory"); 545 546 ptbl = (pte_t *)pbuf->kva; 547 548 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 549 550 /* Allocate ptbl pages, this will sleep! */ 551 for (i = 0; i < PTBL_PAGES; i++) { 552 pidx = (PTBL_PAGES * pdir_idx) + i; 553 while ((m = vm_page_alloc(NULL, pidx, 554 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 555 556 PMAP_UNLOCK(pmap); 557 vm_page_unlock_queues(); 558 VM_WAIT; 559 vm_page_lock_queues(); 560 PMAP_LOCK(pmap); 561 } 562 mtbl[i] = m; 563 } 564 565 /* Map allocated pages into kernel_pmap. */ 566 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 567 568 /* Zero whole ptbl. */ 569 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 570 571 /* Add pbuf to the pmap ptbl bufs list. */ 572 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 573 574 return (ptbl); 575} 576 577/* Free ptbl pages and invalidate pdir entry. */ 578static void 579ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 580{ 581 pte_t *ptbl; 582 vm_paddr_t pa; 583 vm_offset_t va; 584 vm_page_t m; 585 int i; 586 587 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 588 (pmap == kernel_pmap), pdir_idx); 589 590 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 591 ("ptbl_free: invalid pdir_idx")); 592 593 ptbl = pmap->pm_pdir[pdir_idx]; 594 595 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 596 597 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 598 599 /* 600 * Invalidate the pdir entry as soon as possible, so that other CPUs 601 * don't attempt to look up the page tables we are releasing. 602 */ 603 mtx_lock_spin(&tlbivax_mutex); 604 tlb_miss_lock(); 605 606 pmap->pm_pdir[pdir_idx] = NULL; 607 608 tlb_miss_unlock(); 609 mtx_unlock_spin(&tlbivax_mutex); 610 611 for (i = 0; i < PTBL_PAGES; i++) { 612 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 613 pa = pte_vatopa(mmu, kernel_pmap, va); 614 m = PHYS_TO_VM_PAGE(pa); 615 vm_page_free_zero(m); 616 atomic_subtract_int(&cnt.v_wire_count, 1); 617 mmu_booke_kremove(mmu, va); 618 } 619 620 ptbl_free_pmap_ptbl(pmap, ptbl); 621} 622 623/* 624 * Decrement ptbl pages hold count and attempt to free ptbl pages. 625 * Called when removing pte entry from ptbl. 626 * 627 * Return 1 if ptbl pages were freed. 628 */ 629static int 630ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 631{ 632 pte_t *ptbl; 633 vm_paddr_t pa; 634 vm_page_t m; 635 int i; 636 637 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 638 (pmap == kernel_pmap), pdir_idx); 639 640 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 641 ("ptbl_unhold: invalid pdir_idx")); 642 KASSERT((pmap != kernel_pmap), 643 ("ptbl_unhold: unholding kernel ptbl!")); 644 645 ptbl = pmap->pm_pdir[pdir_idx]; 646 647 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 648 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 649 ("ptbl_unhold: non kva ptbl")); 650 651 /* decrement hold count */ 652 for (i = 0; i < PTBL_PAGES; i++) { 653 pa = pte_vatopa(mmu, kernel_pmap, 654 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 655 m = PHYS_TO_VM_PAGE(pa); 656 m->wire_count--; 657 } 658 659 /* 660 * Free ptbl pages if there are no pte etries in this ptbl. 661 * wire_count has the same value for all ptbl pages, so check the last 662 * page. 663 */ 664 if (m->wire_count == 0) { 665 ptbl_free(mmu, pmap, pdir_idx); 666 667 //debugf("ptbl_unhold: e (freed ptbl)\n"); 668 return (1); 669 } 670 671 return (0); 672} 673 674/* 675 * Increment hold count for ptbl pages. This routine is used when a new pte 676 * entry is being inserted into the ptbl. 677 */ 678static void 679ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 680{ 681 vm_paddr_t pa; 682 pte_t *ptbl; 683 vm_page_t m; 684 int i; 685 686 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 687 pdir_idx); 688 689 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 690 ("ptbl_hold: invalid pdir_idx")); 691 KASSERT((pmap != kernel_pmap), 692 ("ptbl_hold: holding kernel ptbl!")); 693 694 ptbl = pmap->pm_pdir[pdir_idx]; 695 696 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 697 698 for (i = 0; i < PTBL_PAGES; i++) { 699 pa = pte_vatopa(mmu, kernel_pmap, 700 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 701 m = PHYS_TO_VM_PAGE(pa); 702 m->wire_count++; 703 } 704} 705 706/* Allocate pv_entry structure. */ 707pv_entry_t 708pv_alloc(void) 709{ 710 pv_entry_t pv; 711 712 pv_entry_count++; 713 if (pv_entry_count > pv_entry_high_water) 714 pagedaemon_wakeup(); 715 pv = uma_zalloc(pvzone, M_NOWAIT); 716 717 return (pv); 718} 719 720/* Free pv_entry structure. */ 721static __inline void 722pv_free(pv_entry_t pve) 723{ 724 725 pv_entry_count--; 726 uma_zfree(pvzone, pve); 727} 728 729 730/* Allocate and initialize pv_entry structure. */ 731static void 732pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 733{ 734 pv_entry_t pve; 735 736 //int su = (pmap == kernel_pmap); 737 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 738 // (u_int32_t)pmap, va, (u_int32_t)m); 739 740 pve = pv_alloc(); 741 if (pve == NULL) 742 panic("pv_insert: no pv entries!"); 743 744 pve->pv_pmap = pmap; 745 pve->pv_va = va; 746 747 /* add to pv_list */ 748 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 749 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 750 751 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 752 753 //debugf("pv_insert: e\n"); 754} 755 756/* Destroy pv entry. */ 757static void 758pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 759{ 760 pv_entry_t pve; 761 762 //int su = (pmap == kernel_pmap); 763 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 764 765 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 766 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 767 768 /* find pv entry */ 769 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 770 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 771 /* remove from pv_list */ 772 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 773 if (TAILQ_EMPTY(&m->md.pv_list)) 774 vm_page_aflag_clear(m, PGA_WRITEABLE); 775 776 /* free pv entry struct */ 777 pv_free(pve); 778 break; 779 } 780 } 781 782 //debugf("pv_remove: e\n"); 783} 784 785/* 786 * Clean pte entry, try to free page table page if requested. 787 * 788 * Return 1 if ptbl pages were freed, otherwise return 0. 789 */ 790static int 791pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 792{ 793 unsigned int pdir_idx = PDIR_IDX(va); 794 unsigned int ptbl_idx = PTBL_IDX(va); 795 vm_page_t m; 796 pte_t *ptbl; 797 pte_t *pte; 798 799 //int su = (pmap == kernel_pmap); 800 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 801 // su, (u_int32_t)pmap, va, flags); 802 803 ptbl = pmap->pm_pdir[pdir_idx]; 804 KASSERT(ptbl, ("pte_remove: null ptbl")); 805 806 pte = &ptbl[ptbl_idx]; 807 808 if (pte == NULL || !PTE_ISVALID(pte)) 809 return (0); 810 811 if (PTE_ISWIRED(pte)) 812 pmap->pm_stats.wired_count--; 813 814 /* Handle managed entry. */ 815 if (PTE_ISMANAGED(pte)) { 816 /* Get vm_page_t for mapped pte. */ 817 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 818 819 if (PTE_ISMODIFIED(pte)) 820 vm_page_dirty(m); 821 822 if (PTE_ISREFERENCED(pte)) 823 vm_page_aflag_set(m, PGA_REFERENCED); 824 825 pv_remove(pmap, va, m); 826 } 827 828 mtx_lock_spin(&tlbivax_mutex); 829 tlb_miss_lock(); 830 831 tlb0_flush_entry(va); 832 pte->flags = 0; 833 pte->rpn = 0; 834 835 tlb_miss_unlock(); 836 mtx_unlock_spin(&tlbivax_mutex); 837 838 pmap->pm_stats.resident_count--; 839 840 if (flags & PTBL_UNHOLD) { 841 //debugf("pte_remove: e (unhold)\n"); 842 return (ptbl_unhold(mmu, pmap, pdir_idx)); 843 } 844 845 //debugf("pte_remove: e\n"); 846 return (0); 847} 848 849/* 850 * Insert PTE for a given page and virtual address. 851 */ 852static void 853pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 854{ 855 unsigned int pdir_idx = PDIR_IDX(va); 856 unsigned int ptbl_idx = PTBL_IDX(va); 857 pte_t *ptbl, *pte; 858 859 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 860 pmap == kernel_pmap, pmap, va); 861 862 /* Get the page table pointer. */ 863 ptbl = pmap->pm_pdir[pdir_idx]; 864 865 if (ptbl == NULL) { 866 /* Allocate page table pages. */ 867 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 868 } else { 869 /* 870 * Check if there is valid mapping for requested 871 * va, if there is, remove it. 872 */ 873 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 874 if (PTE_ISVALID(pte)) { 875 pte_remove(mmu, pmap, va, PTBL_HOLD); 876 } else { 877 /* 878 * pte is not used, increment hold count 879 * for ptbl pages. 880 */ 881 if (pmap != kernel_pmap) 882 ptbl_hold(mmu, pmap, pdir_idx); 883 } 884 } 885 886 /* 887 * Insert pv_entry into pv_list for mapped page if part of managed 888 * memory. 889 */ 890 if ((m->oflags & VPO_UNMANAGED) == 0) { 891 flags |= PTE_MANAGED; 892 893 /* Create and insert pv entry. */ 894 pv_insert(pmap, va, m); 895 } 896 897 pmap->pm_stats.resident_count++; 898 899 mtx_lock_spin(&tlbivax_mutex); 900 tlb_miss_lock(); 901 902 tlb0_flush_entry(va); 903 if (pmap->pm_pdir[pdir_idx] == NULL) { 904 /* 905 * If we just allocated a new page table, hook it in 906 * the pdir. 907 */ 908 pmap->pm_pdir[pdir_idx] = ptbl; 909 } 910 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 911 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 912 pte->flags |= (PTE_VALID | flags); 913 914 tlb_miss_unlock(); 915 mtx_unlock_spin(&tlbivax_mutex); 916} 917 918/* Return the pa for the given pmap/va. */ 919static vm_paddr_t 920pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 921{ 922 vm_paddr_t pa = 0; 923 pte_t *pte; 924 925 pte = pte_find(mmu, pmap, va); 926 if ((pte != NULL) && PTE_ISVALID(pte)) 927 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 928 return (pa); 929} 930 931/* Get a pointer to a PTE in a page table. */ 932static pte_t * 933pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 934{ 935 unsigned int pdir_idx = PDIR_IDX(va); 936 unsigned int ptbl_idx = PTBL_IDX(va); 937 938 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 939 940 if (pmap->pm_pdir[pdir_idx]) 941 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 942 943 return (NULL); 944} 945 946/**************************************************************************/ 947/* PMAP related */ 948/**************************************************************************/ 949 950/* 951 * This is called during booke_init, before the system is really initialized. 952 */ 953static void 954mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 955{ 956 vm_offset_t phys_kernelend; 957 struct mem_region *mp, *mp1; 958 int cnt, i, j; 959 u_int s, e, sz; 960 u_int phys_avail_count; 961 vm_size_t physsz, hwphyssz, kstack0_sz; 962 vm_offset_t kernel_pdir, kstack0, va; 963 vm_paddr_t kstack0_phys; 964 void *dpcpu; 965 pte_t *pte; 966 967 debugf("mmu_booke_bootstrap: entered\n"); 968 969#ifdef SMP 970 bp_kernload = kernload; 971#endif 972 973 /* Initialize invalidation mutex */ 974 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 975 976 /* Read TLB0 size and associativity. */ 977 tlb0_get_tlbconf(); 978 979 /* 980 * Align kernel start and end address (kernel image). 981 * Note that kernel end does not necessarily relate to kernsize. 982 * kernsize is the size of the kernel that is actually mapped. 983 * Also note that "start - 1" is deliberate. With SMP, the 984 * entry point is exactly a page from the actual load address. 985 * As such, trunc_page() has no effect and we're off by a page. 986 * Since we always have the ELF header between the load address 987 * and the entry point, we can safely subtract 1 to compensate. 988 */ 989 kernstart = trunc_page(start - 1); 990 data_start = round_page(kernelend); 991 data_end = data_start; 992 993 /* 994 * Addresses of preloaded modules (like file systems) use 995 * physical addresses. Make sure we relocate those into 996 * virtual addresses. 997 */ 998 preload_addr_relocate = kernstart - kernload; 999 1000 /* Allocate the dynamic per-cpu area. */ 1001 dpcpu = (void *)data_end; 1002 data_end += DPCPU_SIZE; 1003 1004 /* Allocate space for the message buffer. */ 1005 msgbufp = (struct msgbuf *)data_end; 1006 data_end += msgbufsize; 1007 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1008 data_end); 1009 1010 data_end = round_page(data_end); 1011 1012 /* Allocate space for ptbl_bufs. */ 1013 ptbl_bufs = (struct ptbl_buf *)data_end; 1014 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1015 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1016 data_end); 1017 1018 data_end = round_page(data_end); 1019 1020 /* Allocate PTE tables for kernel KVA. */ 1021 kernel_pdir = data_end; 1022 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1023 PDIR_SIZE - 1) / PDIR_SIZE; 1024 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1025 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1026 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1027 1028 debugf(" data_end: 0x%08x\n", data_end); 1029 if (data_end - kernstart > kernsize) { 1030 kernsize += tlb1_mapin_region(kernstart + kernsize, 1031 kernload + kernsize, (data_end - kernstart) - kernsize); 1032 } 1033 data_end = kernstart + kernsize; 1034 debugf(" updated data_end: 0x%08x\n", data_end); 1035 1036 /* 1037 * Clear the structures - note we can only do it safely after the 1038 * possible additional TLB1 translations are in place (above) so that 1039 * all range up to the currently calculated 'data_end' is covered. 1040 */ 1041 dpcpu_init(dpcpu, 0); 1042 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1043 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1044 1045 /*******************************************************/ 1046 /* Set the start and end of kva. */ 1047 /*******************************************************/ 1048 virtual_avail = round_page(data_end); 1049 virtual_end = VM_MAX_KERNEL_ADDRESS; 1050 1051 /* Allocate KVA space for page zero/copy operations. */ 1052 zero_page_va = virtual_avail; 1053 virtual_avail += PAGE_SIZE; 1054 zero_page_idle_va = virtual_avail; 1055 virtual_avail += PAGE_SIZE; 1056 copy_page_src_va = virtual_avail; 1057 virtual_avail += PAGE_SIZE; 1058 copy_page_dst_va = virtual_avail; 1059 virtual_avail += PAGE_SIZE; 1060 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1061 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1062 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1063 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1064 1065 /* Initialize page zero/copy mutexes. */ 1066 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1067 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1068 1069 /* Allocate KVA space for ptbl bufs. */ 1070 ptbl_buf_pool_vabase = virtual_avail; 1071 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1072 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1073 ptbl_buf_pool_vabase, virtual_avail); 1074 1075 /* Calculate corresponding physical addresses for the kernel region. */ 1076 phys_kernelend = kernload + kernsize; 1077 debugf("kernel image and allocated data:\n"); 1078 debugf(" kernload = 0x%08x\n", kernload); 1079 debugf(" kernstart = 0x%08x\n", kernstart); 1080 debugf(" kernsize = 0x%08x\n", kernsize); 1081 1082 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1083 panic("mmu_booke_bootstrap: phys_avail too small"); 1084 1085 /* 1086 * Remove kernel physical address range from avail regions list. Page 1087 * align all regions. Non-page aligned memory isn't very interesting 1088 * to us. Also, sort the entries for ascending addresses. 1089 */ 1090 1091 /* Retrieve phys/avail mem regions */ 1092 mem_regions(&physmem_regions, &physmem_regions_sz, 1093 &availmem_regions, &availmem_regions_sz); 1094 sz = 0; 1095 cnt = availmem_regions_sz; 1096 debugf("processing avail regions:\n"); 1097 for (mp = availmem_regions; mp->mr_size; mp++) { 1098 s = mp->mr_start; 1099 e = mp->mr_start + mp->mr_size; 1100 debugf(" %08x-%08x -> ", s, e); 1101 /* Check whether this region holds all of the kernel. */ 1102 if (s < kernload && e > phys_kernelend) { 1103 availmem_regions[cnt].mr_start = phys_kernelend; 1104 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1105 e = kernload; 1106 } 1107 /* Look whether this regions starts within the kernel. */ 1108 if (s >= kernload && s < phys_kernelend) { 1109 if (e <= phys_kernelend) 1110 goto empty; 1111 s = phys_kernelend; 1112 } 1113 /* Now look whether this region ends within the kernel. */ 1114 if (e > kernload && e <= phys_kernelend) { 1115 if (s >= kernload) 1116 goto empty; 1117 e = kernload; 1118 } 1119 /* Now page align the start and size of the region. */ 1120 s = round_page(s); 1121 e = trunc_page(e); 1122 if (e < s) 1123 e = s; 1124 sz = e - s; 1125 debugf("%08x-%08x = %x\n", s, e, sz); 1126 1127 /* Check whether some memory is left here. */ 1128 if (sz == 0) { 1129 empty: 1130 memmove(mp, mp + 1, 1131 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1132 cnt--; 1133 mp--; 1134 continue; 1135 } 1136 1137 /* Do an insertion sort. */ 1138 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1139 if (s < mp1->mr_start) 1140 break; 1141 if (mp1 < mp) { 1142 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1143 mp1->mr_start = s; 1144 mp1->mr_size = sz; 1145 } else { 1146 mp->mr_start = s; 1147 mp->mr_size = sz; 1148 } 1149 } 1150 availmem_regions_sz = cnt; 1151 1152 /*******************************************************/ 1153 /* Steal physical memory for kernel stack from the end */ 1154 /* of the first avail region */ 1155 /*******************************************************/ 1156 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1157 kstack0_phys = availmem_regions[0].mr_start + 1158 availmem_regions[0].mr_size; 1159 kstack0_phys -= kstack0_sz; 1160 availmem_regions[0].mr_size -= kstack0_sz; 1161 1162 /*******************************************************/ 1163 /* Fill in phys_avail table, based on availmem_regions */ 1164 /*******************************************************/ 1165 phys_avail_count = 0; 1166 physsz = 0; 1167 hwphyssz = 0; 1168 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1169 1170 debugf("fill in phys_avail:\n"); 1171 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1172 1173 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1174 availmem_regions[i].mr_start, 1175 availmem_regions[i].mr_start + 1176 availmem_regions[i].mr_size, 1177 availmem_regions[i].mr_size); 1178 1179 if (hwphyssz != 0 && 1180 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1181 debugf(" hw.physmem adjust\n"); 1182 if (physsz < hwphyssz) { 1183 phys_avail[j] = availmem_regions[i].mr_start; 1184 phys_avail[j + 1] = 1185 availmem_regions[i].mr_start + 1186 hwphyssz - physsz; 1187 physsz = hwphyssz; 1188 phys_avail_count++; 1189 } 1190 break; 1191 } 1192 1193 phys_avail[j] = availmem_regions[i].mr_start; 1194 phys_avail[j + 1] = availmem_regions[i].mr_start + 1195 availmem_regions[i].mr_size; 1196 phys_avail_count++; 1197 physsz += availmem_regions[i].mr_size; 1198 } 1199 physmem = btoc(physsz); 1200 1201 /* Calculate the last available physical address. */ 1202 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1203 ; 1204 Maxmem = powerpc_btop(phys_avail[i + 1]); 1205 1206 debugf("Maxmem = 0x%08lx\n", Maxmem); 1207 debugf("phys_avail_count = %d\n", phys_avail_count); 1208 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1209 physmem); 1210 1211 /*******************************************************/ 1212 /* Initialize (statically allocated) kernel pmap. */ 1213 /*******************************************************/ 1214 PMAP_LOCK_INIT(kernel_pmap); 1215 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1216 1217 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1218 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1219 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1220 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1221 1222 /* Initialize kernel pdir */ 1223 for (i = 0; i < kernel_ptbls; i++) 1224 kernel_pmap->pm_pdir[kptbl_min + i] = 1225 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1226 1227 for (i = 0; i < MAXCPU; i++) { 1228 kernel_pmap->pm_tid[i] = TID_KERNEL; 1229 1230 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1231 tidbusy[i][0] = kernel_pmap; 1232 } 1233 1234 /* 1235 * Fill in PTEs covering kernel code and data. They are not required 1236 * for address translation, as this area is covered by static TLB1 1237 * entries, but for pte_vatopa() to work correctly with kernel area 1238 * addresses. 1239 */ 1240 for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1241 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1242 pte->rpn = kernload + (va - kernstart); 1243 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1244 PTE_VALID; 1245 } 1246 /* Mark kernel_pmap active on all CPUs */ 1247 CPU_FILL(&kernel_pmap->pm_active); 1248 1249 /*******************************************************/ 1250 /* Final setup */ 1251 /*******************************************************/ 1252 1253 /* Enter kstack0 into kernel map, provide guard page */ 1254 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1255 thread0.td_kstack = kstack0; 1256 thread0.td_kstack_pages = KSTACK_PAGES; 1257 1258 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1259 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1260 kstack0_phys, kstack0_phys + kstack0_sz); 1261 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1262 1263 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1264 for (i = 0; i < KSTACK_PAGES; i++) { 1265 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1266 kstack0 += PAGE_SIZE; 1267 kstack0_phys += PAGE_SIZE; 1268 } 1269 1270 debugf("virtual_avail = %08x\n", virtual_avail); 1271 debugf("virtual_end = %08x\n", virtual_end); 1272 1273 debugf("mmu_booke_bootstrap: exit\n"); 1274} 1275 1276void 1277pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1278{ 1279 int i; 1280 1281 /* 1282 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1283 * have the snapshot of its contents in the s/w tlb1[] table, so use 1284 * these values directly to (re)program AP's TLB1 hardware. 1285 */ 1286 for (i = 0; i < tlb1_idx; i ++) { 1287 /* Skip invalid entries */ 1288 if (!(tlb1[i].mas1 & MAS1_VALID)) 1289 continue; 1290 1291 tlb1_write_entry(i); 1292 } 1293 1294 set_mas4_defaults(); 1295} 1296 1297/* 1298 * Get the physical page address for the given pmap/virtual address. 1299 */ 1300static vm_paddr_t 1301mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1302{ 1303 vm_paddr_t pa; 1304 1305 PMAP_LOCK(pmap); 1306 pa = pte_vatopa(mmu, pmap, va); 1307 PMAP_UNLOCK(pmap); 1308 1309 return (pa); 1310} 1311 1312/* 1313 * Extract the physical page address associated with the given 1314 * kernel virtual address. 1315 */ 1316static vm_paddr_t 1317mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1318{ 1319 1320 return (pte_vatopa(mmu, kernel_pmap, va)); 1321} 1322 1323/* 1324 * Initialize the pmap module. 1325 * Called by vm_init, to initialize any structures that the pmap 1326 * system needs to map virtual memory. 1327 */ 1328static void 1329mmu_booke_init(mmu_t mmu) 1330{ 1331 int shpgperproc = PMAP_SHPGPERPROC; 1332 1333 /* 1334 * Initialize the address space (zone) for the pv entries. Set a 1335 * high water mark so that the system can recover from excessive 1336 * numbers of pv entries. 1337 */ 1338 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1339 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1340 1341 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1342 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1343 1344 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1345 pv_entry_high_water = 9 * (pv_entry_max / 10); 1346 1347 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1348 1349 /* Pre-fill pvzone with initial number of pv entries. */ 1350 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1351 1352 /* Initialize ptbl allocation. */ 1353 ptbl_init(); 1354} 1355 1356/* 1357 * Map a list of wired pages into kernel virtual address space. This is 1358 * intended for temporary mappings which do not need page modification or 1359 * references recorded. Existing mappings in the region are overwritten. 1360 */ 1361static void 1362mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1363{ 1364 vm_offset_t va; 1365 1366 va = sva; 1367 while (count-- > 0) { 1368 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1369 va += PAGE_SIZE; 1370 m++; 1371 } 1372} 1373 1374/* 1375 * Remove page mappings from kernel virtual address space. Intended for 1376 * temporary mappings entered by mmu_booke_qenter. 1377 */ 1378static void 1379mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1380{ 1381 vm_offset_t va; 1382 1383 va = sva; 1384 while (count-- > 0) { 1385 mmu_booke_kremove(mmu, va); 1386 va += PAGE_SIZE; 1387 } 1388} 1389 1390/* 1391 * Map a wired page into kernel virtual address space. 1392 */ 1393static void 1394mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1395{ 1396 unsigned int pdir_idx = PDIR_IDX(va); 1397 unsigned int ptbl_idx = PTBL_IDX(va); 1398 uint32_t flags; 1399 pte_t *pte; 1400 1401 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1402 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1403 1404 flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1405 1406 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1407 1408 mtx_lock_spin(&tlbivax_mutex); 1409 tlb_miss_lock(); 1410 1411 if (PTE_ISVALID(pte)) { 1412 1413 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1414 1415 /* Flush entry from TLB0 */ 1416 tlb0_flush_entry(va); 1417 } 1418 1419 pte->rpn = pa & ~PTE_PA_MASK; 1420 pte->flags = flags; 1421 1422 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1423 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1424 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1425 1426 /* Flush the real memory from the instruction cache. */ 1427 if ((flags & (PTE_I | PTE_G)) == 0) { 1428 __syncicache((void *)va, PAGE_SIZE); 1429 } 1430 1431 tlb_miss_unlock(); 1432 mtx_unlock_spin(&tlbivax_mutex); 1433} 1434 1435/* 1436 * Remove a page from kernel page table. 1437 */ 1438static void 1439mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1440{ 1441 unsigned int pdir_idx = PDIR_IDX(va); 1442 unsigned int ptbl_idx = PTBL_IDX(va); 1443 pte_t *pte; 1444 1445// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1446 1447 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1448 (va <= VM_MAX_KERNEL_ADDRESS)), 1449 ("mmu_booke_kremove: invalid va")); 1450 1451 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1452 1453 if (!PTE_ISVALID(pte)) { 1454 1455 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1456 1457 return; 1458 } 1459 1460 mtx_lock_spin(&tlbivax_mutex); 1461 tlb_miss_lock(); 1462 1463 /* Invalidate entry in TLB0, update PTE. */ 1464 tlb0_flush_entry(va); 1465 pte->flags = 0; 1466 pte->rpn = 0; 1467 1468 tlb_miss_unlock(); 1469 mtx_unlock_spin(&tlbivax_mutex); 1470} 1471 1472/* 1473 * Initialize pmap associated with process 0. 1474 */ 1475static void 1476mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1477{ 1478 1479 mmu_booke_pinit(mmu, pmap); 1480 PCPU_SET(curpmap, pmap); 1481} 1482 1483/* 1484 * Initialize a preallocated and zeroed pmap structure, 1485 * such as one in a vmspace structure. 1486 */ 1487static void 1488mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1489{ 1490 int i; 1491 1492 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1493 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1494 1495 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1496 1497 PMAP_LOCK_INIT(pmap); 1498 for (i = 0; i < MAXCPU; i++) 1499 pmap->pm_tid[i] = TID_NONE; 1500 CPU_ZERO(&kernel_pmap->pm_active); 1501 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1502 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1503 TAILQ_INIT(&pmap->pm_ptbl_list); 1504} 1505 1506/* 1507 * Release any resources held by the given physical map. 1508 * Called when a pmap initialized by mmu_booke_pinit is being released. 1509 * Should only be called if the map contains no valid mappings. 1510 */ 1511static void 1512mmu_booke_release(mmu_t mmu, pmap_t pmap) 1513{ 1514 1515 KASSERT(pmap->pm_stats.resident_count == 0, 1516 ("pmap_release: pmap resident count %ld != 0", 1517 pmap->pm_stats.resident_count)); 1518 1519 PMAP_LOCK_DESTROY(pmap); 1520} 1521 1522/* 1523 * Insert the given physical page at the specified virtual address in the 1524 * target physical map with the protection requested. If specified the page 1525 * will be wired down. 1526 */ 1527static void 1528mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1529 vm_prot_t prot, boolean_t wired) 1530{ 1531 1532 vm_page_lock_queues(); 1533 PMAP_LOCK(pmap); 1534 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1535 vm_page_unlock_queues(); 1536 PMAP_UNLOCK(pmap); 1537} 1538 1539static void 1540mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1541 vm_prot_t prot, boolean_t wired) 1542{ 1543 pte_t *pte; 1544 vm_paddr_t pa; 1545 uint32_t flags; 1546 int su, sync; 1547 1548 pa = VM_PAGE_TO_PHYS(m); 1549 su = (pmap == kernel_pmap); 1550 sync = 0; 1551 1552 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1553 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1554 // (u_int32_t)pmap, su, pmap->pm_tid, 1555 // (u_int32_t)m, va, pa, prot, wired); 1556 1557 if (su) { 1558 KASSERT(((va >= virtual_avail) && 1559 (va <= VM_MAX_KERNEL_ADDRESS)), 1560 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1561 } else { 1562 KASSERT((va <= VM_MAXUSER_ADDRESS), 1563 ("mmu_booke_enter_locked: user pmap, non user va")); 1564 } 1565 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1566 VM_OBJECT_LOCKED(m->object), 1567 ("mmu_booke_enter_locked: page %p is not busy", m)); 1568 1569 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1570 1571 /* 1572 * If there is an existing mapping, and the physical address has not 1573 * changed, must be protection or wiring change. 1574 */ 1575 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1576 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1577 1578 /* 1579 * Before actually updating pte->flags we calculate and 1580 * prepare its new value in a helper var. 1581 */ 1582 flags = pte->flags; 1583 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1584 1585 /* Wiring change, just update stats. */ 1586 if (wired) { 1587 if (!PTE_ISWIRED(pte)) { 1588 flags |= PTE_WIRED; 1589 pmap->pm_stats.wired_count++; 1590 } 1591 } else { 1592 if (PTE_ISWIRED(pte)) { 1593 flags &= ~PTE_WIRED; 1594 pmap->pm_stats.wired_count--; 1595 } 1596 } 1597 1598 if (prot & VM_PROT_WRITE) { 1599 /* Add write permissions. */ 1600 flags |= PTE_SW; 1601 if (!su) 1602 flags |= PTE_UW; 1603 1604 if ((flags & PTE_MANAGED) != 0) 1605 vm_page_aflag_set(m, PGA_WRITEABLE); 1606 } else { 1607 /* Handle modified pages, sense modify status. */ 1608 1609 /* 1610 * The PTE_MODIFIED flag could be set by underlying 1611 * TLB misses since we last read it (above), possibly 1612 * other CPUs could update it so we check in the PTE 1613 * directly rather than rely on that saved local flags 1614 * copy. 1615 */ 1616 if (PTE_ISMODIFIED(pte)) 1617 vm_page_dirty(m); 1618 } 1619 1620 if (prot & VM_PROT_EXECUTE) { 1621 flags |= PTE_SX; 1622 if (!su) 1623 flags |= PTE_UX; 1624 1625 /* 1626 * Check existing flags for execute permissions: if we 1627 * are turning execute permissions on, icache should 1628 * be flushed. 1629 */ 1630 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1631 sync++; 1632 } 1633 1634 flags &= ~PTE_REFERENCED; 1635 1636 /* 1637 * The new flags value is all calculated -- only now actually 1638 * update the PTE. 1639 */ 1640 mtx_lock_spin(&tlbivax_mutex); 1641 tlb_miss_lock(); 1642 1643 tlb0_flush_entry(va); 1644 pte->flags = flags; 1645 1646 tlb_miss_unlock(); 1647 mtx_unlock_spin(&tlbivax_mutex); 1648 1649 } else { 1650 /* 1651 * If there is an existing mapping, but it's for a different 1652 * physical address, pte_enter() will delete the old mapping. 1653 */ 1654 //if ((pte != NULL) && PTE_ISVALID(pte)) 1655 // debugf("mmu_booke_enter_locked: replace\n"); 1656 //else 1657 // debugf("mmu_booke_enter_locked: new\n"); 1658 1659 /* Now set up the flags and install the new mapping. */ 1660 flags = (PTE_SR | PTE_VALID); 1661 flags |= PTE_M; 1662 1663 if (!su) 1664 flags |= PTE_UR; 1665 1666 if (prot & VM_PROT_WRITE) { 1667 flags |= PTE_SW; 1668 if (!su) 1669 flags |= PTE_UW; 1670 1671 if ((m->oflags & VPO_UNMANAGED) == 0) 1672 vm_page_aflag_set(m, PGA_WRITEABLE); 1673 } 1674 1675 if (prot & VM_PROT_EXECUTE) { 1676 flags |= PTE_SX; 1677 if (!su) 1678 flags |= PTE_UX; 1679 } 1680 1681 /* If its wired update stats. */ 1682 if (wired) { 1683 pmap->pm_stats.wired_count++; 1684 flags |= PTE_WIRED; 1685 } 1686 1687 pte_enter(mmu, pmap, m, va, flags); 1688 1689 /* Flush the real memory from the instruction cache. */ 1690 if (prot & VM_PROT_EXECUTE) 1691 sync++; 1692 } 1693 1694 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1695 __syncicache((void *)va, PAGE_SIZE); 1696 sync = 0; 1697 } 1698} 1699 1700/* 1701 * Maps a sequence of resident pages belonging to the same object. 1702 * The sequence begins with the given page m_start. This page is 1703 * mapped at the given virtual address start. Each subsequent page is 1704 * mapped at a virtual address that is offset from start by the same 1705 * amount as the page is offset from m_start within the object. The 1706 * last page in the sequence is the page with the largest offset from 1707 * m_start that can be mapped at a virtual address less than the given 1708 * virtual address end. Not every virtual page between start and end 1709 * is mapped; only those for which a resident page exists with the 1710 * corresponding offset from m_start are mapped. 1711 */ 1712static void 1713mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1714 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1715{ 1716 vm_page_t m; 1717 vm_pindex_t diff, psize; 1718 1719 psize = atop(end - start); 1720 m = m_start; 1721 vm_page_lock_queues(); 1722 PMAP_LOCK(pmap); 1723 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1724 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1725 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1726 m = TAILQ_NEXT(m, listq); 1727 } 1728 vm_page_unlock_queues(); 1729 PMAP_UNLOCK(pmap); 1730} 1731 1732static void 1733mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1734 vm_prot_t prot) 1735{ 1736 1737 vm_page_lock_queues(); 1738 PMAP_LOCK(pmap); 1739 mmu_booke_enter_locked(mmu, pmap, va, m, 1740 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1741 vm_page_unlock_queues(); 1742 PMAP_UNLOCK(pmap); 1743} 1744 1745/* 1746 * Remove the given range of addresses from the specified map. 1747 * 1748 * It is assumed that the start and end are properly rounded to the page size. 1749 */ 1750static void 1751mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1752{ 1753 pte_t *pte; 1754 uint8_t hold_flag; 1755 1756 int su = (pmap == kernel_pmap); 1757 1758 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1759 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1760 1761 if (su) { 1762 KASSERT(((va >= virtual_avail) && 1763 (va <= VM_MAX_KERNEL_ADDRESS)), 1764 ("mmu_booke_remove: kernel pmap, non kernel va")); 1765 } else { 1766 KASSERT((va <= VM_MAXUSER_ADDRESS), 1767 ("mmu_booke_remove: user pmap, non user va")); 1768 } 1769 1770 if (PMAP_REMOVE_DONE(pmap)) { 1771 //debugf("mmu_booke_remove: e (empty)\n"); 1772 return; 1773 } 1774 1775 hold_flag = PTBL_HOLD_FLAG(pmap); 1776 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1777 1778 vm_page_lock_queues(); 1779 PMAP_LOCK(pmap); 1780 for (; va < endva; va += PAGE_SIZE) { 1781 pte = pte_find(mmu, pmap, va); 1782 if ((pte != NULL) && PTE_ISVALID(pte)) 1783 pte_remove(mmu, pmap, va, hold_flag); 1784 } 1785 PMAP_UNLOCK(pmap); 1786 vm_page_unlock_queues(); 1787 1788 //debugf("mmu_booke_remove: e\n"); 1789} 1790 1791/* 1792 * Remove physical page from all pmaps in which it resides. 1793 */ 1794static void 1795mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1796{ 1797 pv_entry_t pv, pvn; 1798 uint8_t hold_flag; 1799 1800 vm_page_lock_queues(); 1801 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1802 pvn = TAILQ_NEXT(pv, pv_link); 1803 1804 PMAP_LOCK(pv->pv_pmap); 1805 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1806 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1807 PMAP_UNLOCK(pv->pv_pmap); 1808 } 1809 vm_page_aflag_clear(m, PGA_WRITEABLE); 1810 vm_page_unlock_queues(); 1811} 1812 1813/* 1814 * Map a range of physical addresses into kernel virtual address space. 1815 */ 1816static vm_offset_t 1817mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1818 vm_paddr_t pa_end, int prot) 1819{ 1820 vm_offset_t sva = *virt; 1821 vm_offset_t va = sva; 1822 1823 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1824 // sva, pa_start, pa_end); 1825 1826 while (pa_start < pa_end) { 1827 mmu_booke_kenter(mmu, va, pa_start); 1828 va += PAGE_SIZE; 1829 pa_start += PAGE_SIZE; 1830 } 1831 *virt = va; 1832 1833 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1834 return (sva); 1835} 1836 1837/* 1838 * The pmap must be activated before it's address space can be accessed in any 1839 * way. 1840 */ 1841static void 1842mmu_booke_activate(mmu_t mmu, struct thread *td) 1843{ 1844 pmap_t pmap; 1845 u_int cpuid; 1846 1847 pmap = &td->td_proc->p_vmspace->vm_pmap; 1848 1849 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1850 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1851 1852 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1853 1854 mtx_lock_spin(&sched_lock); 1855 1856 cpuid = PCPU_GET(cpuid); 1857 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1858 PCPU_SET(curpmap, pmap); 1859 1860 if (pmap->pm_tid[cpuid] == TID_NONE) 1861 tid_alloc(pmap); 1862 1863 /* Load PID0 register with pmap tid value. */ 1864 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1865 __asm __volatile("isync"); 1866 1867 mtx_unlock_spin(&sched_lock); 1868 1869 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1870 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1871} 1872 1873/* 1874 * Deactivate the specified process's address space. 1875 */ 1876static void 1877mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1878{ 1879 pmap_t pmap; 1880 1881 pmap = &td->td_proc->p_vmspace->vm_pmap; 1882 1883 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1884 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1885 1886 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1887 PCPU_SET(curpmap, NULL); 1888} 1889 1890/* 1891 * Copy the range specified by src_addr/len 1892 * from the source map to the range dst_addr/len 1893 * in the destination map. 1894 * 1895 * This routine is only advisory and need not do anything. 1896 */ 1897static void 1898mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1899 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1900{ 1901 1902} 1903 1904/* 1905 * Set the physical protection on the specified range of this map as requested. 1906 */ 1907static void 1908mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1909 vm_prot_t prot) 1910{ 1911 vm_offset_t va; 1912 vm_page_t m; 1913 pte_t *pte; 1914 1915 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1916 mmu_booke_remove(mmu, pmap, sva, eva); 1917 return; 1918 } 1919 1920 if (prot & VM_PROT_WRITE) 1921 return; 1922 1923 PMAP_LOCK(pmap); 1924 for (va = sva; va < eva; va += PAGE_SIZE) { 1925 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1926 if (PTE_ISVALID(pte)) { 1927 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1928 1929 mtx_lock_spin(&tlbivax_mutex); 1930 tlb_miss_lock(); 1931 1932 /* Handle modified pages. */ 1933 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1934 vm_page_dirty(m); 1935 1936 tlb0_flush_entry(va); 1937 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1938 1939 tlb_miss_unlock(); 1940 mtx_unlock_spin(&tlbivax_mutex); 1941 } 1942 } 1943 } 1944 PMAP_UNLOCK(pmap); 1945} 1946 1947/* 1948 * Clear the write and modified bits in each of the given page's mappings. 1949 */ 1950static void 1951mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1952{ 1953 pv_entry_t pv; 1954 pte_t *pte; 1955 1956 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1957 ("mmu_booke_remove_write: page %p is not managed", m)); 1958 1959 /* 1960 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1961 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1962 * is clear, no page table entries need updating. 1963 */ 1964 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1965 if ((m->oflags & VPO_BUSY) == 0 && 1966 (m->aflags & PGA_WRITEABLE) == 0) 1967 return; 1968 vm_page_lock_queues(); 1969 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1970 PMAP_LOCK(pv->pv_pmap); 1971 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1972 if (PTE_ISVALID(pte)) { 1973 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1974 1975 mtx_lock_spin(&tlbivax_mutex); 1976 tlb_miss_lock(); 1977 1978 /* Handle modified pages. */ 1979 if (PTE_ISMODIFIED(pte)) 1980 vm_page_dirty(m); 1981 1982 /* Flush mapping from TLB0. */ 1983 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1984 1985 tlb_miss_unlock(); 1986 mtx_unlock_spin(&tlbivax_mutex); 1987 } 1988 } 1989 PMAP_UNLOCK(pv->pv_pmap); 1990 } 1991 vm_page_aflag_clear(m, PGA_WRITEABLE); 1992 vm_page_unlock_queues(); 1993} 1994 1995static void 1996mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1997{ 1998 pte_t *pte; 1999 pmap_t pmap; 2000 vm_page_t m; 2001 vm_offset_t addr; 2002 vm_paddr_t pa; 2003 int active, valid; 2004 2005 va = trunc_page(va); 2006 sz = round_page(sz); 2007 2008 vm_page_lock_queues(); 2009 pmap = PCPU_GET(curpmap); 2010 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2011 while (sz > 0) { 2012 PMAP_LOCK(pm); 2013 pte = pte_find(mmu, pm, va); 2014 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2015 if (valid) 2016 pa = PTE_PA(pte); 2017 PMAP_UNLOCK(pm); 2018 if (valid) { 2019 if (!active) { 2020 /* Create a mapping in the active pmap. */ 2021 addr = 0; 2022 m = PHYS_TO_VM_PAGE(pa); 2023 PMAP_LOCK(pmap); 2024 pte_enter(mmu, pmap, m, addr, 2025 PTE_SR | PTE_VALID | PTE_UR); 2026 __syncicache((void *)addr, PAGE_SIZE); 2027 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2028 PMAP_UNLOCK(pmap); 2029 } else 2030 __syncicache((void *)va, PAGE_SIZE); 2031 } 2032 va += PAGE_SIZE; 2033 sz -= PAGE_SIZE; 2034 } 2035 vm_page_unlock_queues(); 2036} 2037 2038/* 2039 * Atomically extract and hold the physical page with the given 2040 * pmap and virtual address pair if that mapping permits the given 2041 * protection. 2042 */ 2043static vm_page_t 2044mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2045 vm_prot_t prot) 2046{ 2047 pte_t *pte; 2048 vm_page_t m; 2049 uint32_t pte_wbit; 2050 vm_paddr_t pa; 2051 2052 m = NULL; 2053 pa = 0; 2054 PMAP_LOCK(pmap); 2055retry: 2056 pte = pte_find(mmu, pmap, va); 2057 if ((pte != NULL) && PTE_ISVALID(pte)) { 2058 if (pmap == kernel_pmap) 2059 pte_wbit = PTE_SW; 2060 else 2061 pte_wbit = PTE_UW; 2062 2063 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2064 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2065 goto retry; 2066 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2067 vm_page_hold(m); 2068 } 2069 } 2070 2071 PA_UNLOCK_COND(pa); 2072 PMAP_UNLOCK(pmap); 2073 return (m); 2074} 2075 2076/* 2077 * Initialize a vm_page's machine-dependent fields. 2078 */ 2079static void 2080mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2081{ 2082 2083 TAILQ_INIT(&m->md.pv_list); 2084} 2085 2086/* 2087 * mmu_booke_zero_page_area zeros the specified hardware page by 2088 * mapping it into virtual memory and using bzero to clear 2089 * its contents. 2090 * 2091 * off and size must reside within a single page. 2092 */ 2093static void 2094mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2095{ 2096 vm_offset_t va; 2097 2098 /* XXX KASSERT off and size are within a single page? */ 2099 2100 mtx_lock(&zero_page_mutex); 2101 va = zero_page_va; 2102 2103 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2104 bzero((caddr_t)va + off, size); 2105 mmu_booke_kremove(mmu, va); 2106 2107 mtx_unlock(&zero_page_mutex); 2108} 2109 2110/* 2111 * mmu_booke_zero_page zeros the specified hardware page. 2112 */ 2113static void 2114mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2115{ 2116 2117 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2118} 2119 2120/* 2121 * mmu_booke_copy_page copies the specified (machine independent) page by 2122 * mapping the page into virtual memory and using memcopy to copy the page, 2123 * one machine dependent page at a time. 2124 */ 2125static void 2126mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2127{ 2128 vm_offset_t sva, dva; 2129 2130 sva = copy_page_src_va; 2131 dva = copy_page_dst_va; 2132 2133 mtx_lock(©_page_mutex); 2134 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2135 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2136 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2137 mmu_booke_kremove(mmu, dva); 2138 mmu_booke_kremove(mmu, sva); 2139 mtx_unlock(©_page_mutex); 2140} 2141 2142/* 2143 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2144 * into virtual memory and using bzero to clear its contents. This is intended 2145 * to be called from the vm_pagezero process only and outside of Giant. No 2146 * lock is required. 2147 */ 2148static void 2149mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2150{ 2151 vm_offset_t va; 2152 2153 va = zero_page_idle_va; 2154 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2155 bzero((caddr_t)va, PAGE_SIZE); 2156 mmu_booke_kremove(mmu, va); 2157} 2158 2159/* 2160 * Return whether or not the specified physical page was modified 2161 * in any of physical maps. 2162 */ 2163static boolean_t 2164mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2165{ 2166 pte_t *pte; 2167 pv_entry_t pv; 2168 boolean_t rv; 2169 2170 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2171 ("mmu_booke_is_modified: page %p is not managed", m)); 2172 rv = FALSE; 2173 2174 /* 2175 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2176 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2177 * is clear, no PTEs can be modified. 2178 */ 2179 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2180 if ((m->oflags & VPO_BUSY) == 0 && 2181 (m->aflags & PGA_WRITEABLE) == 0) 2182 return (rv); 2183 vm_page_lock_queues(); 2184 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2185 PMAP_LOCK(pv->pv_pmap); 2186 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2187 PTE_ISVALID(pte)) { 2188 if (PTE_ISMODIFIED(pte)) 2189 rv = TRUE; 2190 } 2191 PMAP_UNLOCK(pv->pv_pmap); 2192 if (rv) 2193 break; 2194 } 2195 vm_page_unlock_queues(); 2196 return (rv); 2197} 2198 2199/* 2200 * Return whether or not the specified virtual address is eligible 2201 * for prefault. 2202 */ 2203static boolean_t 2204mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2205{ 2206 2207 return (FALSE); 2208} 2209 2210/* 2211 * Return whether or not the specified physical page was referenced 2212 * in any physical maps. 2213 */ 2214static boolean_t 2215mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2216{ 2217 pte_t *pte; 2218 pv_entry_t pv; 2219 boolean_t rv; 2220 2221 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2222 ("mmu_booke_is_referenced: page %p is not managed", m)); 2223 rv = FALSE; 2224 vm_page_lock_queues(); 2225 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2226 PMAP_LOCK(pv->pv_pmap); 2227 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2228 PTE_ISVALID(pte)) { 2229 if (PTE_ISREFERENCED(pte)) 2230 rv = TRUE; 2231 } 2232 PMAP_UNLOCK(pv->pv_pmap); 2233 if (rv) 2234 break; 2235 } 2236 vm_page_unlock_queues(); 2237 return (rv); 2238} 2239 2240/* 2241 * Clear the modify bits on the specified physical page. 2242 */ 2243static void 2244mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2245{ 2246 pte_t *pte; 2247 pv_entry_t pv; 2248 2249 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2250 ("mmu_booke_clear_modify: page %p is not managed", m)); 2251 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2252 KASSERT((m->oflags & VPO_BUSY) == 0, 2253 ("mmu_booke_clear_modify: page %p is busy", m)); 2254 2255 /* 2256 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2257 * If the object containing the page is locked and the page is not 2258 * VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set. 2259 */ 2260 if ((m->aflags & PGA_WRITEABLE) == 0) 2261 return; 2262 vm_page_lock_queues(); 2263 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2264 PMAP_LOCK(pv->pv_pmap); 2265 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2266 PTE_ISVALID(pte)) { 2267 mtx_lock_spin(&tlbivax_mutex); 2268 tlb_miss_lock(); 2269 2270 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2271 tlb0_flush_entry(pv->pv_va); 2272 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2273 PTE_REFERENCED); 2274 } 2275 2276 tlb_miss_unlock(); 2277 mtx_unlock_spin(&tlbivax_mutex); 2278 } 2279 PMAP_UNLOCK(pv->pv_pmap); 2280 } 2281 vm_page_unlock_queues(); 2282} 2283 2284/* 2285 * Return a count of reference bits for a page, clearing those bits. 2286 * It is not necessary for every reference bit to be cleared, but it 2287 * is necessary that 0 only be returned when there are truly no 2288 * reference bits set. 2289 * 2290 * XXX: The exact number of bits to check and clear is a matter that 2291 * should be tested and standardized at some point in the future for 2292 * optimal aging of shared pages. 2293 */ 2294static int 2295mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2296{ 2297 pte_t *pte; 2298 pv_entry_t pv; 2299 int count; 2300 2301 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2302 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2303 count = 0; 2304 vm_page_lock_queues(); 2305 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2306 PMAP_LOCK(pv->pv_pmap); 2307 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2308 PTE_ISVALID(pte)) { 2309 if (PTE_ISREFERENCED(pte)) { 2310 mtx_lock_spin(&tlbivax_mutex); 2311 tlb_miss_lock(); 2312 2313 tlb0_flush_entry(pv->pv_va); 2314 pte->flags &= ~PTE_REFERENCED; 2315 2316 tlb_miss_unlock(); 2317 mtx_unlock_spin(&tlbivax_mutex); 2318 2319 if (++count > 4) { 2320 PMAP_UNLOCK(pv->pv_pmap); 2321 break; 2322 } 2323 } 2324 } 2325 PMAP_UNLOCK(pv->pv_pmap); 2326 } 2327 vm_page_unlock_queues(); 2328 return (count); 2329} 2330 2331/* 2332 * Clear the reference bit on the specified physical page. 2333 */ 2334static void 2335mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2336{ 2337 pte_t *pte; 2338 pv_entry_t pv; 2339 2340 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2341 ("mmu_booke_clear_reference: page %p is not managed", m)); 2342 vm_page_lock_queues(); 2343 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2344 PMAP_LOCK(pv->pv_pmap); 2345 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2346 PTE_ISVALID(pte)) { 2347 if (PTE_ISREFERENCED(pte)) { 2348 mtx_lock_spin(&tlbivax_mutex); 2349 tlb_miss_lock(); 2350 2351 tlb0_flush_entry(pv->pv_va); 2352 pte->flags &= ~PTE_REFERENCED; 2353 2354 tlb_miss_unlock(); 2355 mtx_unlock_spin(&tlbivax_mutex); 2356 } 2357 } 2358 PMAP_UNLOCK(pv->pv_pmap); 2359 } 2360 vm_page_unlock_queues(); 2361} 2362 2363/* 2364 * Change wiring attribute for a map/virtual-address pair. 2365 */ 2366static void 2367mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2368{ 2369 pte_t *pte; 2370 2371 PMAP_LOCK(pmap); 2372 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2373 if (wired) { 2374 if (!PTE_ISWIRED(pte)) { 2375 pte->flags |= PTE_WIRED; 2376 pmap->pm_stats.wired_count++; 2377 } 2378 } else { 2379 if (PTE_ISWIRED(pte)) { 2380 pte->flags &= ~PTE_WIRED; 2381 pmap->pm_stats.wired_count--; 2382 } 2383 } 2384 } 2385 PMAP_UNLOCK(pmap); 2386} 2387 2388/* 2389 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2390 * page. This count may be changed upwards or downwards in the future; it is 2391 * only necessary that true be returned for a small subset of pmaps for proper 2392 * page aging. 2393 */ 2394static boolean_t 2395mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2396{ 2397 pv_entry_t pv; 2398 int loops; 2399 boolean_t rv; 2400 2401 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2402 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2403 loops = 0; 2404 rv = FALSE; 2405 vm_page_lock_queues(); 2406 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2407 if (pv->pv_pmap == pmap) { 2408 rv = TRUE; 2409 break; 2410 } 2411 if (++loops >= 16) 2412 break; 2413 } 2414 vm_page_unlock_queues(); 2415 return (rv); 2416} 2417 2418/* 2419 * Return the number of managed mappings to the given physical page that are 2420 * wired. 2421 */ 2422static int 2423mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2424{ 2425 pv_entry_t pv; 2426 pte_t *pte; 2427 int count = 0; 2428 2429 if ((m->oflags & VPO_UNMANAGED) != 0) 2430 return (count); 2431 vm_page_lock_queues(); 2432 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2433 PMAP_LOCK(pv->pv_pmap); 2434 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2435 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2436 count++; 2437 PMAP_UNLOCK(pv->pv_pmap); 2438 } 2439 vm_page_unlock_queues(); 2440 return (count); 2441} 2442 2443static int 2444mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2445{ 2446 int i; 2447 vm_offset_t va; 2448 2449 /* 2450 * This currently does not work for entries that 2451 * overlap TLB1 entries. 2452 */ 2453 for (i = 0; i < tlb1_idx; i ++) { 2454 if (tlb1_iomapped(i, pa, size, &va) == 0) 2455 return (0); 2456 } 2457 2458 return (EFAULT); 2459} 2460 2461vm_offset_t 2462mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2463 vm_size_t *sz) 2464{ 2465 vm_paddr_t pa, ppa; 2466 vm_offset_t va; 2467 vm_size_t gran; 2468 2469 /* Raw physical memory dumps don't have a virtual address. */ 2470 if (md->md_vaddr == ~0UL) { 2471 /* We always map a 256MB page at 256M. */ 2472 gran = 256 * 1024 * 1024; 2473 pa = md->md_paddr + ofs; 2474 ppa = pa & ~(gran - 1); 2475 ofs = pa - ppa; 2476 va = gran; 2477 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2478 if (*sz > (gran - ofs)) 2479 *sz = gran - ofs; 2480 return (va + ofs); 2481 } 2482 2483 /* Minidumps are based on virtual memory addresses. */ 2484 va = md->md_vaddr + ofs; 2485 if (va >= kernstart + kernsize) { 2486 gran = PAGE_SIZE - (va & PAGE_MASK); 2487 if (*sz > gran) 2488 *sz = gran; 2489 } 2490 return (va); 2491} 2492 2493void 2494mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2495 vm_offset_t va) 2496{ 2497 2498 /* Raw physical memory dumps don't have a virtual address. */ 2499 if (md->md_vaddr == ~0UL) { 2500 tlb1_idx--; 2501 tlb1[tlb1_idx].mas1 = 0; 2502 tlb1[tlb1_idx].mas2 = 0; 2503 tlb1[tlb1_idx].mas3 = 0; 2504 tlb1_write_entry(tlb1_idx); 2505 return; 2506 } 2507 2508 /* Minidumps are based on virtual memory addresses. */ 2509 /* Nothing to do... */ 2510} 2511 2512struct pmap_md * 2513mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2514{ 2515 static struct pmap_md md; 2516 pte_t *pte; 2517 vm_offset_t va; 2518 2519 if (dumpsys_minidump) { 2520 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2521 if (prev == NULL) { 2522 /* 1st: kernel .data and .bss. */ 2523 md.md_index = 1; 2524 md.md_vaddr = trunc_page((uintptr_t)_etext); 2525 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2526 return (&md); 2527 } 2528 switch (prev->md_index) { 2529 case 1: 2530 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2531 md.md_index = 2; 2532 md.md_vaddr = data_start; 2533 md.md_size = data_end - data_start; 2534 break; 2535 case 2: 2536 /* 3rd: kernel VM. */ 2537 va = prev->md_vaddr + prev->md_size; 2538 /* Find start of next chunk (from va). */ 2539 while (va < virtual_end) { 2540 /* Don't dump the buffer cache. */ 2541 if (va >= kmi.buffer_sva && 2542 va < kmi.buffer_eva) { 2543 va = kmi.buffer_eva; 2544 continue; 2545 } 2546 pte = pte_find(mmu, kernel_pmap, va); 2547 if (pte != NULL && PTE_ISVALID(pte)) 2548 break; 2549 va += PAGE_SIZE; 2550 } 2551 if (va < virtual_end) { 2552 md.md_vaddr = va; 2553 va += PAGE_SIZE; 2554 /* Find last page in chunk. */ 2555 while (va < virtual_end) { 2556 /* Don't run into the buffer cache. */ 2557 if (va == kmi.buffer_sva) 2558 break; 2559 pte = pte_find(mmu, kernel_pmap, va); 2560 if (pte == NULL || !PTE_ISVALID(pte)) 2561 break; 2562 va += PAGE_SIZE; 2563 } 2564 md.md_size = va - md.md_vaddr; 2565 break; 2566 } 2567 md.md_index = 3; 2568 /* FALLTHROUGH */ 2569 default: 2570 return (NULL); 2571 } 2572 } else { /* minidumps */ 2573 mem_regions(&physmem_regions, &physmem_regions_sz, 2574 &availmem_regions, &availmem_regions_sz); 2575 2576 if (prev == NULL) { 2577 /* first physical chunk. */ 2578 md.md_paddr = physmem_regions[0].mr_start; 2579 md.md_size = physmem_regions[0].mr_size; 2580 md.md_vaddr = ~0UL; 2581 md.md_index = 1; 2582 } else if (md.md_index < physmem_regions_sz) { 2583 md.md_paddr = physmem_regions[md.md_index].mr_start; 2584 md.md_size = physmem_regions[md.md_index].mr_size; 2585 md.md_vaddr = ~0UL; 2586 md.md_index++; 2587 } else { 2588 /* There's no next physical chunk. */ 2589 return (NULL); 2590 } 2591 } 2592 2593 return (&md); 2594} 2595 2596/* 2597 * Map a set of physical memory pages into the kernel virtual address space. 2598 * Return a pointer to where it is mapped. This routine is intended to be used 2599 * for mapping device memory, NOT real memory. 2600 */ 2601static void * 2602mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2603{ 2604 void *res; 2605 uintptr_t va; 2606 vm_size_t sz; 2607 2608 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2609 res = (void *)va; 2610 2611 do { 2612 sz = 1 << (ilog2(size) & ~1); 2613 if (bootverbose) 2614 printf("Wiring VA=%x to PA=%x (size=%x), " 2615 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2616 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2617 size -= sz; 2618 pa += sz; 2619 va += sz; 2620 } while (size > 0); 2621 2622 return (res); 2623} 2624 2625/* 2626 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2627 */ 2628static void 2629mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2630{ 2631 vm_offset_t base, offset; 2632 2633 /* 2634 * Unmap only if this is inside kernel virtual space. 2635 */ 2636 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2637 base = trunc_page(va); 2638 offset = va & PAGE_MASK; 2639 size = roundup(offset + size, PAGE_SIZE); 2640 kmem_free(kernel_map, base, size); 2641 } 2642} 2643 2644/* 2645 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2646 * specified pmap. This eliminates the blast of soft faults on process startup 2647 * and immediately after an mmap. 2648 */ 2649static void 2650mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2651 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2652{ 2653 2654 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2655 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2656 ("mmu_booke_object_init_pt: non-device object")); 2657} 2658 2659/* 2660 * Perform the pmap work for mincore. 2661 */ 2662static int 2663mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2664 vm_paddr_t *locked_pa) 2665{ 2666 2667 TODO; 2668 return (0); 2669} 2670 2671/**************************************************************************/ 2672/* TID handling */ 2673/**************************************************************************/ 2674 2675/* 2676 * Allocate a TID. If necessary, steal one from someone else. 2677 * The new TID is flushed from the TLB before returning. 2678 */ 2679static tlbtid_t 2680tid_alloc(pmap_t pmap) 2681{ 2682 tlbtid_t tid; 2683 int thiscpu; 2684 2685 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2686 2687 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2688 2689 thiscpu = PCPU_GET(cpuid); 2690 2691 tid = PCPU_GET(tid_next); 2692 if (tid > TID_MAX) 2693 tid = TID_MIN; 2694 PCPU_SET(tid_next, tid + 1); 2695 2696 /* If we are stealing TID then clear the relevant pmap's field */ 2697 if (tidbusy[thiscpu][tid] != NULL) { 2698 2699 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2700 2701 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2702 2703 /* Flush all entries from TLB0 matching this TID. */ 2704 tid_flush(tid); 2705 } 2706 2707 tidbusy[thiscpu][tid] = pmap; 2708 pmap->pm_tid[thiscpu] = tid; 2709 __asm __volatile("msync; isync"); 2710 2711 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2712 PCPU_GET(tid_next)); 2713 2714 return (tid); 2715} 2716 2717/**************************************************************************/ 2718/* TLB0 handling */ 2719/**************************************************************************/ 2720 2721static void 2722tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2723 uint32_t mas7) 2724{ 2725 int as; 2726 char desc[3]; 2727 tlbtid_t tid; 2728 vm_size_t size; 2729 unsigned int tsize; 2730 2731 desc[2] = '\0'; 2732 if (mas1 & MAS1_VALID) 2733 desc[0] = 'V'; 2734 else 2735 desc[0] = ' '; 2736 2737 if (mas1 & MAS1_IPROT) 2738 desc[1] = 'P'; 2739 else 2740 desc[1] = ' '; 2741 2742 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2743 tid = MAS1_GETTID(mas1); 2744 2745 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2746 size = 0; 2747 if (tsize) 2748 size = tsize2size(tsize); 2749 2750 debugf("%3d: (%s) [AS=%d] " 2751 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2752 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2753 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2754} 2755 2756/* Convert TLB0 va and way number to tlb0[] table index. */ 2757static inline unsigned int 2758tlb0_tableidx(vm_offset_t va, unsigned int way) 2759{ 2760 unsigned int idx; 2761 2762 idx = (way * TLB0_ENTRIES_PER_WAY); 2763 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2764 return (idx); 2765} 2766 2767/* 2768 * Invalidate TLB0 entry. 2769 */ 2770static inline void 2771tlb0_flush_entry(vm_offset_t va) 2772{ 2773 2774 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2775 2776 mtx_assert(&tlbivax_mutex, MA_OWNED); 2777 2778 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2779 __asm __volatile("isync; msync"); 2780 __asm __volatile("tlbsync; msync"); 2781 2782 CTR1(KTR_PMAP, "%s: e", __func__); 2783} 2784 2785/* Print out contents of the MAS registers for each TLB0 entry */ 2786void 2787tlb0_print_tlbentries(void) 2788{ 2789 uint32_t mas0, mas1, mas2, mas3, mas7; 2790 int entryidx, way, idx; 2791 2792 debugf("TLB0 entries:\n"); 2793 for (way = 0; way < TLB0_WAYS; way ++) 2794 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2795 2796 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2797 mtspr(SPR_MAS0, mas0); 2798 __asm __volatile("isync"); 2799 2800 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2801 mtspr(SPR_MAS2, mas2); 2802 2803 __asm __volatile("isync; tlbre"); 2804 2805 mas1 = mfspr(SPR_MAS1); 2806 mas2 = mfspr(SPR_MAS2); 2807 mas3 = mfspr(SPR_MAS3); 2808 mas7 = mfspr(SPR_MAS7); 2809 2810 idx = tlb0_tableidx(mas2, way); 2811 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2812 } 2813} 2814 2815/**************************************************************************/ 2816/* TLB1 handling */ 2817/**************************************************************************/ 2818 2819/* 2820 * TLB1 mapping notes: 2821 * 2822 * TLB1[0] CCSRBAR 2823 * TLB1[1] Kernel text and data. 2824 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2825 * windows, other devices mappings. 2826 */ 2827 2828/* 2829 * Write given entry to TLB1 hardware. 2830 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2831 */ 2832static void 2833tlb1_write_entry(unsigned int idx) 2834{ 2835 uint32_t mas0, mas7; 2836 2837 //debugf("tlb1_write_entry: s\n"); 2838 2839 /* Clear high order RPN bits */ 2840 mas7 = 0; 2841 2842 /* Select entry */ 2843 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2844 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2845 2846 mtspr(SPR_MAS0, mas0); 2847 __asm __volatile("isync"); 2848 mtspr(SPR_MAS1, tlb1[idx].mas1); 2849 __asm __volatile("isync"); 2850 mtspr(SPR_MAS2, tlb1[idx].mas2); 2851 __asm __volatile("isync"); 2852 mtspr(SPR_MAS3, tlb1[idx].mas3); 2853 __asm __volatile("isync"); 2854 mtspr(SPR_MAS7, mas7); 2855 __asm __volatile("isync; tlbwe; isync; msync"); 2856 2857 //debugf("tlb1_write_entry: e\n"); 2858} 2859 2860/* 2861 * Return the largest uint value log such that 2^log <= num. 2862 */ 2863static unsigned int 2864ilog2(unsigned int num) 2865{ 2866 int lz; 2867 2868 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2869 return (31 - lz); 2870} 2871 2872/* 2873 * Convert TLB TSIZE value to mapped region size. 2874 */ 2875static vm_size_t 2876tsize2size(unsigned int tsize) 2877{ 2878 2879 /* 2880 * size = 4^tsize KB 2881 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2882 */ 2883 2884 return ((1 << (2 * tsize)) * 1024); 2885} 2886 2887/* 2888 * Convert region size (must be power of 4) to TLB TSIZE value. 2889 */ 2890static unsigned int 2891size2tsize(vm_size_t size) 2892{ 2893 2894 return (ilog2(size) / 2 - 5); 2895} 2896 2897/* 2898 * Register permanent kernel mapping in TLB1. 2899 * 2900 * Entries are created starting from index 0 (current free entry is 2901 * kept in tlb1_idx) and are not supposed to be invalidated. 2902 */ 2903static int 2904tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2905 uint32_t flags) 2906{ 2907 uint32_t ts, tid; 2908 int tsize; 2909 2910 if (tlb1_idx >= TLB1_ENTRIES) { 2911 printf("tlb1_set_entry: TLB1 full!\n"); 2912 return (-1); 2913 } 2914 2915 /* Convert size to TSIZE */ 2916 tsize = size2tsize(size); 2917 2918 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2919 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2920 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2921 2922 /* XXX LOCK tlb1[] */ 2923 2924 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2925 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2926 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2927 2928 /* Set supervisor RWX permission bits */ 2929 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2930 2931 tlb1_write_entry(tlb1_idx++); 2932 2933 /* XXX UNLOCK tlb1[] */ 2934 2935 /* 2936 * XXX in general TLB1 updates should be propagated between CPUs, 2937 * since current design assumes to have the same TLB1 set-up on all 2938 * cores. 2939 */ 2940 return (0); 2941} 2942 2943/* 2944 * Map in contiguous RAM region into the TLB1 using maximum of 2945 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2946 * 2947 * If necessary round up last entry size and return total size 2948 * used by all allocated entries. 2949 */ 2950vm_size_t 2951tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 2952{ 2953 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 2954 vm_size_t mapped, pgsz, base, mask; 2955 int idx, nents; 2956 2957 /* Round up to the next 1M */ 2958 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 2959 2960 mapped = 0; 2961 idx = 0; 2962 base = va; 2963 pgsz = 64*1024*1024; 2964 while (mapped < size) { 2965 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 2966 while (pgsz > (size - mapped)) 2967 pgsz >>= 2; 2968 pgs[idx++] = pgsz; 2969 mapped += pgsz; 2970 } 2971 2972 /* We under-map. Correct for this. */ 2973 if (mapped < size) { 2974 while (pgs[idx - 1] == pgsz) { 2975 idx--; 2976 mapped -= pgsz; 2977 } 2978 /* XXX We may increase beyond out starting point. */ 2979 pgsz <<= 2; 2980 pgs[idx++] = pgsz; 2981 mapped += pgsz; 2982 } 2983 } 2984 2985 nents = idx; 2986 mask = pgs[0] - 1; 2987 /* Align address to the boundary */ 2988 if (va & mask) { 2989 va = (va + mask) & ~mask; 2990 pa = (pa + mask) & ~mask; 2991 } 2992 2993 for (idx = 0; idx < nents; idx++) { 2994 pgsz = pgs[idx]; 2995 debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 2996 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 2997 pa += pgsz; 2998 va += pgsz; 2999 } 3000 3001 mapped = (va - base); 3002 debugf("mapped size 0x%08x (wasted space 0x%08x)\n", 3003 mapped, mapped - size); 3004 return (mapped); 3005} 3006 3007/* 3008 * TLB1 initialization routine, to be called after the very first 3009 * assembler level setup done in locore.S. 3010 */ 3011void 3012tlb1_init(vm_offset_t ccsrbar) 3013{ 3014 uint32_t mas0, mas1, mas3; 3015 uint32_t tsz; 3016 u_int i; 3017 3018 if (bootinfo != NULL && bootinfo[0] != 1) { 3019 tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3020 } else 3021 tlb1_idx = 1; 3022 3023 /* The first entry/entries are used to map the kernel. */ 3024 for (i = 0; i < tlb1_idx; i++) { 3025 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3026 mtspr(SPR_MAS0, mas0); 3027 __asm __volatile("isync; tlbre"); 3028 3029 mas1 = mfspr(SPR_MAS1); 3030 if ((mas1 & MAS1_VALID) == 0) 3031 continue; 3032 3033 mas3 = mfspr(SPR_MAS3); 3034 3035 tlb1[i].mas1 = mas1; 3036 tlb1[i].mas2 = mfspr(SPR_MAS2); 3037 tlb1[i].mas3 = mas3; 3038 3039 if (i == 0) 3040 kernload = mas3 & MAS3_RPN; 3041 3042 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3043 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 3044 } 3045 3046 /* Map in CCSRBAR. */ 3047 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3048 3049 /* Setup TLB miss defaults */ 3050 set_mas4_defaults(); 3051} 3052 3053/* 3054 * Setup MAS4 defaults. 3055 * These values are loaded to MAS0-2 on a TLB miss. 3056 */ 3057static void 3058set_mas4_defaults(void) 3059{ 3060 uint32_t mas4; 3061 3062 /* Defaults: TLB0, PID0, TSIZED=4K */ 3063 mas4 = MAS4_TLBSELD0; 3064 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3065#ifdef SMP 3066 mas4 |= MAS4_MD; 3067#endif 3068 mtspr(SPR_MAS4, mas4); 3069 __asm __volatile("isync"); 3070} 3071 3072/* 3073 * Print out contents of the MAS registers for each TLB1 entry 3074 */ 3075void 3076tlb1_print_tlbentries(void) 3077{ 3078 uint32_t mas0, mas1, mas2, mas3, mas7; 3079 int i; 3080 3081 debugf("TLB1 entries:\n"); 3082 for (i = 0; i < TLB1_ENTRIES; i++) { 3083 3084 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3085 mtspr(SPR_MAS0, mas0); 3086 3087 __asm __volatile("isync; tlbre"); 3088 3089 mas1 = mfspr(SPR_MAS1); 3090 mas2 = mfspr(SPR_MAS2); 3091 mas3 = mfspr(SPR_MAS3); 3092 mas7 = mfspr(SPR_MAS7); 3093 3094 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3095 } 3096} 3097 3098/* 3099 * Print out contents of the in-ram tlb1 table. 3100 */ 3101void 3102tlb1_print_entries(void) 3103{ 3104 int i; 3105 3106 debugf("tlb1[] table entries:\n"); 3107 for (i = 0; i < TLB1_ENTRIES; i++) 3108 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3109} 3110 3111/* 3112 * Return 0 if the physical IO range is encompassed by one of the 3113 * the TLB1 entries, otherwise return related error code. 3114 */ 3115static int 3116tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3117{ 3118 uint32_t prot; 3119 vm_paddr_t pa_start; 3120 vm_paddr_t pa_end; 3121 unsigned int entry_tsize; 3122 vm_size_t entry_size; 3123 3124 *va = (vm_offset_t)NULL; 3125 3126 /* Skip invalid entries */ 3127 if (!(tlb1[i].mas1 & MAS1_VALID)) 3128 return (EINVAL); 3129 3130 /* 3131 * The entry must be cache-inhibited, guarded, and r/w 3132 * so it can function as an i/o page 3133 */ 3134 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3135 if (prot != (MAS2_I | MAS2_G)) 3136 return (EPERM); 3137 3138 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3139 if (prot != (MAS3_SR | MAS3_SW)) 3140 return (EPERM); 3141 3142 /* The address should be within the entry range. */ 3143 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3144 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3145 3146 entry_size = tsize2size(entry_tsize); 3147 pa_start = tlb1[i].mas3 & MAS3_RPN; 3148 pa_end = pa_start + entry_size - 1; 3149 3150 if ((pa < pa_start) || ((pa + size) > pa_end)) 3151 return (ERANGE); 3152 3153 /* Return virtual address of this mapping. */ 3154 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3155 return (0); 3156} 3157