pmap.c revision 222069
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 222069 2011-05-18 16:42:01Z attilio $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/sched.h> 67#include <sys/smp.h> 68#include <sys/vmmeter.h> 69 70#include <vm/vm.h> 71#include <vm/vm_page.h> 72#include <vm/vm_kern.h> 73#include <vm/vm_pageout.h> 74#include <vm/vm_extern.h> 75#include <vm/vm_object.h> 76#include <vm/vm_param.h> 77#include <vm/vm_map.h> 78#include <vm/vm_pager.h> 79#include <vm/uma.h> 80 81#include <machine/cpu.h> 82#include <machine/pcb.h> 83#include <machine/platform.h> 84 85#include <machine/tlb.h> 86#include <machine/spr.h> 87#include <machine/vmparam.h> 88#include <machine/md_var.h> 89#include <machine/mmuvar.h> 90#include <machine/pmap.h> 91#include <machine/pte.h> 92 93#include "mmu_if.h" 94 95#define DEBUG 96#undef DEBUG 97 98#ifdef DEBUG 99#define debugf(fmt, args...) printf(fmt, ##args) 100#else 101#define debugf(fmt, args...) 102#endif 103 104#define TODO panic("%s: not implemented", __func__); 105 106#include "opt_sched.h" 107#ifndef SCHED_4BSD 108#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 109#endif 110extern struct mtx sched_lock; 111 112extern int dumpsys_minidump; 113 114extern unsigned char _etext[]; 115extern unsigned char _end[]; 116 117/* Kernel physical load address. */ 118extern uint32_t kernload; 119vm_offset_t kernstart; 120vm_size_t kernsize; 121 122/* Message buffer and tables. */ 123static vm_offset_t data_start; 124static vm_size_t data_end; 125 126/* Phys/avail memory regions. */ 127static struct mem_region *availmem_regions; 128static int availmem_regions_sz; 129static struct mem_region *physmem_regions; 130static int physmem_regions_sz; 131 132/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133static vm_offset_t zero_page_va; 134static struct mtx zero_page_mutex; 135 136static struct mtx tlbivax_mutex; 137 138/* 139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140 * by idle thred only, no lock required. 141 */ 142static vm_offset_t zero_page_idle_va; 143 144/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145static vm_offset_t copy_page_src_va; 146static vm_offset_t copy_page_dst_va; 147static struct mtx copy_page_mutex; 148 149/**************************************************************************/ 150/* PMAP */ 151/**************************************************************************/ 152 153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154 vm_prot_t, boolean_t); 155 156unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158 159/* 160 * If user pmap is processed with mmu_booke_remove and the resident count 161 * drops to 0, there are no more pages to remove, so we need not continue. 162 */ 163#define PMAP_REMOVE_DONE(pmap) \ 164 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 165 166extern void tid_flush(tlbtid_t); 167 168/**************************************************************************/ 169/* TLB and TID handling */ 170/**************************************************************************/ 171 172/* Translation ID busy table */ 173static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 174 175/* 176 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 177 * core revisions and should be read from h/w registers during early config. 178 */ 179uint32_t tlb0_entries; 180uint32_t tlb0_ways; 181uint32_t tlb0_entries_per_way; 182 183#define TLB0_ENTRIES (tlb0_entries) 184#define TLB0_WAYS (tlb0_ways) 185#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 186 187#define TLB1_ENTRIES 16 188 189/* In-ram copy of the TLB1 */ 190static tlb_entry_t tlb1[TLB1_ENTRIES]; 191 192/* Next free entry in the TLB1 */ 193static unsigned int tlb1_idx; 194 195static tlbtid_t tid_alloc(struct pmap *); 196 197static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 198 199static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 200static void tlb1_write_entry(unsigned int); 201static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 202static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 203 204static vm_size_t tsize2size(unsigned int); 205static unsigned int size2tsize(vm_size_t); 206static unsigned int ilog2(unsigned int); 207 208static void set_mas4_defaults(void); 209 210static inline void tlb0_flush_entry(vm_offset_t); 211static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 212 213/**************************************************************************/ 214/* Page table management */ 215/**************************************************************************/ 216 217/* Data for the pv entry allocation mechanism */ 218static uma_zone_t pvzone; 219static struct vm_object pvzone_obj; 220static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 221 222#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 223 224#ifndef PMAP_SHPGPERPROC 225#define PMAP_SHPGPERPROC 200 226#endif 227 228static void ptbl_init(void); 229static struct ptbl_buf *ptbl_buf_alloc(void); 230static void ptbl_buf_free(struct ptbl_buf *); 231static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 232 233static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 234static void ptbl_free(mmu_t, pmap_t, unsigned int); 235static void ptbl_hold(mmu_t, pmap_t, unsigned int); 236static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 237 238static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 239static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 240static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 241static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 242 243static pv_entry_t pv_alloc(void); 244static void pv_free(pv_entry_t); 245static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 246static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 247 248/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 249#define PTBL_BUFS (128 * 16) 250 251struct ptbl_buf { 252 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 253 vm_offset_t kva; /* va of mapping */ 254}; 255 256/* ptbl free list and a lock used for access synchronization. */ 257static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 258static struct mtx ptbl_buf_freelist_lock; 259 260/* Base address of kva space allocated fot ptbl bufs. */ 261static vm_offset_t ptbl_buf_pool_vabase; 262 263/* Pointer to ptbl_buf structures. */ 264static struct ptbl_buf *ptbl_bufs; 265 266void pmap_bootstrap_ap(volatile uint32_t *); 267 268/* 269 * Kernel MMU interface 270 */ 271static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 272static void mmu_booke_clear_modify(mmu_t, vm_page_t); 273static void mmu_booke_clear_reference(mmu_t, vm_page_t); 274static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 275 vm_size_t, vm_offset_t); 276static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 277static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 278 vm_prot_t, boolean_t); 279static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 280 vm_page_t, vm_prot_t); 281static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 282 vm_prot_t); 283static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 284static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 285 vm_prot_t); 286static void mmu_booke_init(mmu_t); 287static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 288static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 289static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 290static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 291static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 292 int); 293static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 294 vm_paddr_t *); 295static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296 vm_object_t, vm_pindex_t, vm_size_t); 297static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298static void mmu_booke_page_init(mmu_t, vm_page_t); 299static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300static void mmu_booke_pinit(mmu_t, pmap_t); 301static void mmu_booke_pinit0(mmu_t, pmap_t); 302static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303 vm_prot_t); 304static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306static void mmu_booke_release(mmu_t, pmap_t); 307static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308static void mmu_booke_remove_all(mmu_t, vm_page_t); 309static void mmu_booke_remove_write(mmu_t, vm_page_t); 310static void mmu_booke_zero_page(mmu_t, vm_page_t); 311static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313static void mmu_booke_activate(mmu_t, struct thread *); 314static void mmu_booke_deactivate(mmu_t, struct thread *); 315static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 317static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 319static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 320static void mmu_booke_kremove(mmu_t, vm_offset_t); 321static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 322static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 323 vm_size_t); 324static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 325 vm_size_t, vm_size_t *); 326static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 327 vm_size_t, vm_offset_t); 328static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 329 330static mmu_method_t mmu_booke_methods[] = { 331 /* pmap dispatcher interface */ 332 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 333 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 334 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 335 MMUMETHOD(mmu_copy, mmu_booke_copy), 336 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 337 MMUMETHOD(mmu_enter, mmu_booke_enter), 338 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 339 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 340 MMUMETHOD(mmu_extract, mmu_booke_extract), 341 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 342 MMUMETHOD(mmu_init, mmu_booke_init), 343 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 344 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 345 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 346 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 347 MMUMETHOD(mmu_map, mmu_booke_map), 348 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 349 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 350 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 351 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 352 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 353 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 354 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 355 MMUMETHOD(mmu_protect, mmu_booke_protect), 356 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 357 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 358 MMUMETHOD(mmu_release, mmu_booke_release), 359 MMUMETHOD(mmu_remove, mmu_booke_remove), 360 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 361 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 362 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 363 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 364 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 365 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 366 MMUMETHOD(mmu_activate, mmu_booke_activate), 367 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 368 369 /* Internal interfaces */ 370 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 371 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 372 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 373 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 374 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 375/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 376 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 377 378 /* dumpsys() support */ 379 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 380 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 381 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 382 383 { 0, 0 } 384}; 385 386MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 387 388static inline void 389tlb_miss_lock(void) 390{ 391#ifdef SMP 392 struct pcpu *pc; 393 394 if (!smp_started) 395 return; 396 397 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 398 if (pc != pcpup) { 399 400 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 401 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 402 403 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 404 ("tlb_miss_lock: tried to lock self")); 405 406 tlb_lock(pc->pc_booke_tlb_lock); 407 408 CTR1(KTR_PMAP, "%s: locked", __func__); 409 } 410 } 411#endif 412} 413 414static inline void 415tlb_miss_unlock(void) 416{ 417#ifdef SMP 418 struct pcpu *pc; 419 420 if (!smp_started) 421 return; 422 423 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 424 if (pc != pcpup) { 425 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 426 __func__, pc->pc_cpuid); 427 428 tlb_unlock(pc->pc_booke_tlb_lock); 429 430 CTR1(KTR_PMAP, "%s: unlocked", __func__); 431 } 432 } 433#endif 434} 435 436/* Return number of entries in TLB0. */ 437static __inline void 438tlb0_get_tlbconf(void) 439{ 440 uint32_t tlb0_cfg; 441 442 tlb0_cfg = mfspr(SPR_TLB0CFG); 443 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 444 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 445 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 446} 447 448/* Initialize pool of kva ptbl buffers. */ 449static void 450ptbl_init(void) 451{ 452 int i; 453 454 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 455 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 456 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 457 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 458 459 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 460 TAILQ_INIT(&ptbl_buf_freelist); 461 462 for (i = 0; i < PTBL_BUFS; i++) { 463 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 464 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 465 } 466} 467 468/* Get a ptbl_buf from the freelist. */ 469static struct ptbl_buf * 470ptbl_buf_alloc(void) 471{ 472 struct ptbl_buf *buf; 473 474 mtx_lock(&ptbl_buf_freelist_lock); 475 buf = TAILQ_FIRST(&ptbl_buf_freelist); 476 if (buf != NULL) 477 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 478 mtx_unlock(&ptbl_buf_freelist_lock); 479 480 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 481 482 return (buf); 483} 484 485/* Return ptbl buff to free pool. */ 486static void 487ptbl_buf_free(struct ptbl_buf *buf) 488{ 489 490 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 491 492 mtx_lock(&ptbl_buf_freelist_lock); 493 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 494 mtx_unlock(&ptbl_buf_freelist_lock); 495} 496 497/* 498 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 499 */ 500static void 501ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 502{ 503 struct ptbl_buf *pbuf; 504 505 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 506 507 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 508 509 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 510 if (pbuf->kva == (vm_offset_t)ptbl) { 511 /* Remove from pmap ptbl buf list. */ 512 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 513 514 /* Free corresponding ptbl buf. */ 515 ptbl_buf_free(pbuf); 516 break; 517 } 518} 519 520/* Allocate page table. */ 521static pte_t * 522ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 523{ 524 vm_page_t mtbl[PTBL_PAGES]; 525 vm_page_t m; 526 struct ptbl_buf *pbuf; 527 unsigned int pidx; 528 pte_t *ptbl; 529 int i; 530 531 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 532 (pmap == kernel_pmap), pdir_idx); 533 534 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 535 ("ptbl_alloc: invalid pdir_idx")); 536 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 537 ("pte_alloc: valid ptbl entry exists!")); 538 539 pbuf = ptbl_buf_alloc(); 540 if (pbuf == NULL) 541 panic("pte_alloc: couldn't alloc kernel virtual memory"); 542 543 ptbl = (pte_t *)pbuf->kva; 544 545 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 546 547 /* Allocate ptbl pages, this will sleep! */ 548 for (i = 0; i < PTBL_PAGES; i++) { 549 pidx = (PTBL_PAGES * pdir_idx) + i; 550 while ((m = vm_page_alloc(NULL, pidx, 551 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 552 553 PMAP_UNLOCK(pmap); 554 vm_page_unlock_queues(); 555 VM_WAIT; 556 vm_page_lock_queues(); 557 PMAP_LOCK(pmap); 558 } 559 mtbl[i] = m; 560 } 561 562 /* Map allocated pages into kernel_pmap. */ 563 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 564 565 /* Zero whole ptbl. */ 566 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 567 568 /* Add pbuf to the pmap ptbl bufs list. */ 569 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 570 571 return (ptbl); 572} 573 574/* Free ptbl pages and invalidate pdir entry. */ 575static void 576ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 577{ 578 pte_t *ptbl; 579 vm_paddr_t pa; 580 vm_offset_t va; 581 vm_page_t m; 582 int i; 583 584 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 585 (pmap == kernel_pmap), pdir_idx); 586 587 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 588 ("ptbl_free: invalid pdir_idx")); 589 590 ptbl = pmap->pm_pdir[pdir_idx]; 591 592 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 593 594 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 595 596 /* 597 * Invalidate the pdir entry as soon as possible, so that other CPUs 598 * don't attempt to look up the page tables we are releasing. 599 */ 600 mtx_lock_spin(&tlbivax_mutex); 601 tlb_miss_lock(); 602 603 pmap->pm_pdir[pdir_idx] = NULL; 604 605 tlb_miss_unlock(); 606 mtx_unlock_spin(&tlbivax_mutex); 607 608 for (i = 0; i < PTBL_PAGES; i++) { 609 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 610 pa = pte_vatopa(mmu, kernel_pmap, va); 611 m = PHYS_TO_VM_PAGE(pa); 612 vm_page_free_zero(m); 613 atomic_subtract_int(&cnt.v_wire_count, 1); 614 mmu_booke_kremove(mmu, va); 615 } 616 617 ptbl_free_pmap_ptbl(pmap, ptbl); 618} 619 620/* 621 * Decrement ptbl pages hold count and attempt to free ptbl pages. 622 * Called when removing pte entry from ptbl. 623 * 624 * Return 1 if ptbl pages were freed. 625 */ 626static int 627ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 628{ 629 pte_t *ptbl; 630 vm_paddr_t pa; 631 vm_page_t m; 632 int i; 633 634 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 635 (pmap == kernel_pmap), pdir_idx); 636 637 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 638 ("ptbl_unhold: invalid pdir_idx")); 639 KASSERT((pmap != kernel_pmap), 640 ("ptbl_unhold: unholding kernel ptbl!")); 641 642 ptbl = pmap->pm_pdir[pdir_idx]; 643 644 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 645 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 646 ("ptbl_unhold: non kva ptbl")); 647 648 /* decrement hold count */ 649 for (i = 0; i < PTBL_PAGES; i++) { 650 pa = pte_vatopa(mmu, kernel_pmap, 651 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 652 m = PHYS_TO_VM_PAGE(pa); 653 m->wire_count--; 654 } 655 656 /* 657 * Free ptbl pages if there are no pte etries in this ptbl. 658 * wire_count has the same value for all ptbl pages, so check the last 659 * page. 660 */ 661 if (m->wire_count == 0) { 662 ptbl_free(mmu, pmap, pdir_idx); 663 664 //debugf("ptbl_unhold: e (freed ptbl)\n"); 665 return (1); 666 } 667 668 return (0); 669} 670 671/* 672 * Increment hold count for ptbl pages. This routine is used when a new pte 673 * entry is being inserted into the ptbl. 674 */ 675static void 676ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 677{ 678 vm_paddr_t pa; 679 pte_t *ptbl; 680 vm_page_t m; 681 int i; 682 683 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 684 pdir_idx); 685 686 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 687 ("ptbl_hold: invalid pdir_idx")); 688 KASSERT((pmap != kernel_pmap), 689 ("ptbl_hold: holding kernel ptbl!")); 690 691 ptbl = pmap->pm_pdir[pdir_idx]; 692 693 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 694 695 for (i = 0; i < PTBL_PAGES; i++) { 696 pa = pte_vatopa(mmu, kernel_pmap, 697 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 698 m = PHYS_TO_VM_PAGE(pa); 699 m->wire_count++; 700 } 701} 702 703/* Allocate pv_entry structure. */ 704pv_entry_t 705pv_alloc(void) 706{ 707 pv_entry_t pv; 708 709 pv_entry_count++; 710 if (pv_entry_count > pv_entry_high_water) 711 pagedaemon_wakeup(); 712 pv = uma_zalloc(pvzone, M_NOWAIT); 713 714 return (pv); 715} 716 717/* Free pv_entry structure. */ 718static __inline void 719pv_free(pv_entry_t pve) 720{ 721 722 pv_entry_count--; 723 uma_zfree(pvzone, pve); 724} 725 726 727/* Allocate and initialize pv_entry structure. */ 728static void 729pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 730{ 731 pv_entry_t pve; 732 733 //int su = (pmap == kernel_pmap); 734 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 735 // (u_int32_t)pmap, va, (u_int32_t)m); 736 737 pve = pv_alloc(); 738 if (pve == NULL) 739 panic("pv_insert: no pv entries!"); 740 741 pve->pv_pmap = pmap; 742 pve->pv_va = va; 743 744 /* add to pv_list */ 745 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 746 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 747 748 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 749 750 //debugf("pv_insert: e\n"); 751} 752 753/* Destroy pv entry. */ 754static void 755pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 756{ 757 pv_entry_t pve; 758 759 //int su = (pmap == kernel_pmap); 760 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 761 762 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 763 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 764 765 /* find pv entry */ 766 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 767 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 768 /* remove from pv_list */ 769 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 770 if (TAILQ_EMPTY(&m->md.pv_list)) 771 vm_page_flag_clear(m, PG_WRITEABLE); 772 773 /* free pv entry struct */ 774 pv_free(pve); 775 break; 776 } 777 } 778 779 //debugf("pv_remove: e\n"); 780} 781 782/* 783 * Clean pte entry, try to free page table page if requested. 784 * 785 * Return 1 if ptbl pages were freed, otherwise return 0. 786 */ 787static int 788pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 789{ 790 unsigned int pdir_idx = PDIR_IDX(va); 791 unsigned int ptbl_idx = PTBL_IDX(va); 792 vm_page_t m; 793 pte_t *ptbl; 794 pte_t *pte; 795 796 //int su = (pmap == kernel_pmap); 797 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 798 // su, (u_int32_t)pmap, va, flags); 799 800 ptbl = pmap->pm_pdir[pdir_idx]; 801 KASSERT(ptbl, ("pte_remove: null ptbl")); 802 803 pte = &ptbl[ptbl_idx]; 804 805 if (pte == NULL || !PTE_ISVALID(pte)) 806 return (0); 807 808 if (PTE_ISWIRED(pte)) 809 pmap->pm_stats.wired_count--; 810 811 /* Handle managed entry. */ 812 if (PTE_ISMANAGED(pte)) { 813 /* Get vm_page_t for mapped pte. */ 814 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 815 816 if (PTE_ISMODIFIED(pte)) 817 vm_page_dirty(m); 818 819 if (PTE_ISREFERENCED(pte)) 820 vm_page_flag_set(m, PG_REFERENCED); 821 822 pv_remove(pmap, va, m); 823 } 824 825 mtx_lock_spin(&tlbivax_mutex); 826 tlb_miss_lock(); 827 828 tlb0_flush_entry(va); 829 pte->flags = 0; 830 pte->rpn = 0; 831 832 tlb_miss_unlock(); 833 mtx_unlock_spin(&tlbivax_mutex); 834 835 pmap->pm_stats.resident_count--; 836 837 if (flags & PTBL_UNHOLD) { 838 //debugf("pte_remove: e (unhold)\n"); 839 return (ptbl_unhold(mmu, pmap, pdir_idx)); 840 } 841 842 //debugf("pte_remove: e\n"); 843 return (0); 844} 845 846/* 847 * Insert PTE for a given page and virtual address. 848 */ 849static void 850pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 851{ 852 unsigned int pdir_idx = PDIR_IDX(va); 853 unsigned int ptbl_idx = PTBL_IDX(va); 854 pte_t *ptbl, *pte; 855 856 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 857 pmap == kernel_pmap, pmap, va); 858 859 /* Get the page table pointer. */ 860 ptbl = pmap->pm_pdir[pdir_idx]; 861 862 if (ptbl == NULL) { 863 /* Allocate page table pages. */ 864 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 865 } else { 866 /* 867 * Check if there is valid mapping for requested 868 * va, if there is, remove it. 869 */ 870 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 871 if (PTE_ISVALID(pte)) { 872 pte_remove(mmu, pmap, va, PTBL_HOLD); 873 } else { 874 /* 875 * pte is not used, increment hold count 876 * for ptbl pages. 877 */ 878 if (pmap != kernel_pmap) 879 ptbl_hold(mmu, pmap, pdir_idx); 880 } 881 } 882 883 /* 884 * Insert pv_entry into pv_list for mapped page if part of managed 885 * memory. 886 */ 887 if ((m->flags & PG_FICTITIOUS) == 0) { 888 if ((m->flags & PG_UNMANAGED) == 0) { 889 flags |= PTE_MANAGED; 890 891 /* Create and insert pv entry. */ 892 pv_insert(pmap, va, m); 893 } 894 } 895 896 pmap->pm_stats.resident_count++; 897 898 mtx_lock_spin(&tlbivax_mutex); 899 tlb_miss_lock(); 900 901 tlb0_flush_entry(va); 902 if (pmap->pm_pdir[pdir_idx] == NULL) { 903 /* 904 * If we just allocated a new page table, hook it in 905 * the pdir. 906 */ 907 pmap->pm_pdir[pdir_idx] = ptbl; 908 } 909 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 910 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 911 pte->flags |= (PTE_VALID | flags); 912 913 tlb_miss_unlock(); 914 mtx_unlock_spin(&tlbivax_mutex); 915} 916 917/* Return the pa for the given pmap/va. */ 918static vm_paddr_t 919pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 920{ 921 vm_paddr_t pa = 0; 922 pte_t *pte; 923 924 pte = pte_find(mmu, pmap, va); 925 if ((pte != NULL) && PTE_ISVALID(pte)) 926 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 927 return (pa); 928} 929 930/* Get a pointer to a PTE in a page table. */ 931static pte_t * 932pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 933{ 934 unsigned int pdir_idx = PDIR_IDX(va); 935 unsigned int ptbl_idx = PTBL_IDX(va); 936 937 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 938 939 if (pmap->pm_pdir[pdir_idx]) 940 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 941 942 return (NULL); 943} 944 945/**************************************************************************/ 946/* PMAP related */ 947/**************************************************************************/ 948 949/* 950 * This is called during e500_init, before the system is really initialized. 951 */ 952static void 953mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 954{ 955 vm_offset_t phys_kernelend; 956 struct mem_region *mp, *mp1; 957 int cnt, i, j; 958 u_int s, e, sz; 959 u_int phys_avail_count; 960 vm_size_t physsz, hwphyssz, kstack0_sz; 961 vm_offset_t kernel_pdir, kstack0, va; 962 vm_paddr_t kstack0_phys; 963 void *dpcpu; 964 pte_t *pte; 965 966 debugf("mmu_booke_bootstrap: entered\n"); 967 968 /* Initialize invalidation mutex */ 969 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 970 971 /* Read TLB0 size and associativity. */ 972 tlb0_get_tlbconf(); 973 974 /* Align kernel start and end address (kernel image). */ 975 kernstart = trunc_page(start); 976 data_start = round_page(kernelend); 977 kernsize = data_start - kernstart; 978 979 data_end = data_start; 980 981 /* Allocate space for the message buffer. */ 982 msgbufp = (struct msgbuf *)data_end; 983 data_end += msgbufsize; 984 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 985 data_end); 986 987 data_end = round_page(data_end); 988 989 /* Allocate the dynamic per-cpu area. */ 990 dpcpu = (void *)data_end; 991 data_end += DPCPU_SIZE; 992 dpcpu_init(dpcpu, 0); 993 994 /* Allocate space for ptbl_bufs. */ 995 ptbl_bufs = (struct ptbl_buf *)data_end; 996 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 997 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 998 data_end); 999 1000 data_end = round_page(data_end); 1001 1002 /* Allocate PTE tables for kernel KVA. */ 1003 kernel_pdir = data_end; 1004 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1005 PDIR_SIZE - 1) / PDIR_SIZE; 1006 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1007 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1008 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1009 1010 debugf(" data_end: 0x%08x\n", data_end); 1011 if (data_end - kernstart > 0x1000000) { 1012 data_end = (data_end + 0x3fffff) & ~0x3fffff; 1013 tlb1_mapin_region(kernstart + 0x1000000, 1014 kernload + 0x1000000, data_end - kernstart - 0x1000000); 1015 } else 1016 data_end = (data_end + 0xffffff) & ~0xffffff; 1017 1018 debugf(" updated data_end: 0x%08x\n", data_end); 1019 1020 kernsize += data_end - data_start; 1021 1022 /* 1023 * Clear the structures - note we can only do it safely after the 1024 * possible additional TLB1 translations are in place (above) so that 1025 * all range up to the currently calculated 'data_end' is covered. 1026 */ 1027 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1028 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1029 1030 /*******************************************************/ 1031 /* Set the start and end of kva. */ 1032 /*******************************************************/ 1033 virtual_avail = round_page(data_end); 1034 virtual_end = VM_MAX_KERNEL_ADDRESS; 1035 1036 /* Allocate KVA space for page zero/copy operations. */ 1037 zero_page_va = virtual_avail; 1038 virtual_avail += PAGE_SIZE; 1039 zero_page_idle_va = virtual_avail; 1040 virtual_avail += PAGE_SIZE; 1041 copy_page_src_va = virtual_avail; 1042 virtual_avail += PAGE_SIZE; 1043 copy_page_dst_va = virtual_avail; 1044 virtual_avail += PAGE_SIZE; 1045 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1046 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1047 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1048 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1049 1050 /* Initialize page zero/copy mutexes. */ 1051 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1052 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1053 1054 /* Allocate KVA space for ptbl bufs. */ 1055 ptbl_buf_pool_vabase = virtual_avail; 1056 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1057 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1058 ptbl_buf_pool_vabase, virtual_avail); 1059 1060 /* Calculate corresponding physical addresses for the kernel region. */ 1061 phys_kernelend = kernload + kernsize; 1062 debugf("kernel image and allocated data:\n"); 1063 debugf(" kernload = 0x%08x\n", kernload); 1064 debugf(" kernstart = 0x%08x\n", kernstart); 1065 debugf(" kernsize = 0x%08x\n", kernsize); 1066 1067 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1068 panic("mmu_booke_bootstrap: phys_avail too small"); 1069 1070 /* 1071 * Remove kernel physical address range from avail regions list. Page 1072 * align all regions. Non-page aligned memory isn't very interesting 1073 * to us. Also, sort the entries for ascending addresses. 1074 */ 1075 1076 /* Retrieve phys/avail mem regions */ 1077 mem_regions(&physmem_regions, &physmem_regions_sz, 1078 &availmem_regions, &availmem_regions_sz); 1079 sz = 0; 1080 cnt = availmem_regions_sz; 1081 debugf("processing avail regions:\n"); 1082 for (mp = availmem_regions; mp->mr_size; mp++) { 1083 s = mp->mr_start; 1084 e = mp->mr_start + mp->mr_size; 1085 debugf(" %08x-%08x -> ", s, e); 1086 /* Check whether this region holds all of the kernel. */ 1087 if (s < kernload && e > phys_kernelend) { 1088 availmem_regions[cnt].mr_start = phys_kernelend; 1089 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1090 e = kernload; 1091 } 1092 /* Look whether this regions starts within the kernel. */ 1093 if (s >= kernload && s < phys_kernelend) { 1094 if (e <= phys_kernelend) 1095 goto empty; 1096 s = phys_kernelend; 1097 } 1098 /* Now look whether this region ends within the kernel. */ 1099 if (e > kernload && e <= phys_kernelend) { 1100 if (s >= kernload) 1101 goto empty; 1102 e = kernload; 1103 } 1104 /* Now page align the start and size of the region. */ 1105 s = round_page(s); 1106 e = trunc_page(e); 1107 if (e < s) 1108 e = s; 1109 sz = e - s; 1110 debugf("%08x-%08x = %x\n", s, e, sz); 1111 1112 /* Check whether some memory is left here. */ 1113 if (sz == 0) { 1114 empty: 1115 memmove(mp, mp + 1, 1116 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1117 cnt--; 1118 mp--; 1119 continue; 1120 } 1121 1122 /* Do an insertion sort. */ 1123 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1124 if (s < mp1->mr_start) 1125 break; 1126 if (mp1 < mp) { 1127 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1128 mp1->mr_start = s; 1129 mp1->mr_size = sz; 1130 } else { 1131 mp->mr_start = s; 1132 mp->mr_size = sz; 1133 } 1134 } 1135 availmem_regions_sz = cnt; 1136 1137 /*******************************************************/ 1138 /* Steal physical memory for kernel stack from the end */ 1139 /* of the first avail region */ 1140 /*******************************************************/ 1141 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1142 kstack0_phys = availmem_regions[0].mr_start + 1143 availmem_regions[0].mr_size; 1144 kstack0_phys -= kstack0_sz; 1145 availmem_regions[0].mr_size -= kstack0_sz; 1146 1147 /*******************************************************/ 1148 /* Fill in phys_avail table, based on availmem_regions */ 1149 /*******************************************************/ 1150 phys_avail_count = 0; 1151 physsz = 0; 1152 hwphyssz = 0; 1153 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1154 1155 debugf("fill in phys_avail:\n"); 1156 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1157 1158 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1159 availmem_regions[i].mr_start, 1160 availmem_regions[i].mr_start + 1161 availmem_regions[i].mr_size, 1162 availmem_regions[i].mr_size); 1163 1164 if (hwphyssz != 0 && 1165 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1166 debugf(" hw.physmem adjust\n"); 1167 if (physsz < hwphyssz) { 1168 phys_avail[j] = availmem_regions[i].mr_start; 1169 phys_avail[j + 1] = 1170 availmem_regions[i].mr_start + 1171 hwphyssz - physsz; 1172 physsz = hwphyssz; 1173 phys_avail_count++; 1174 } 1175 break; 1176 } 1177 1178 phys_avail[j] = availmem_regions[i].mr_start; 1179 phys_avail[j + 1] = availmem_regions[i].mr_start + 1180 availmem_regions[i].mr_size; 1181 phys_avail_count++; 1182 physsz += availmem_regions[i].mr_size; 1183 } 1184 physmem = btoc(physsz); 1185 1186 /* Calculate the last available physical address. */ 1187 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1188 ; 1189 Maxmem = powerpc_btop(phys_avail[i + 1]); 1190 1191 debugf("Maxmem = 0x%08lx\n", Maxmem); 1192 debugf("phys_avail_count = %d\n", phys_avail_count); 1193 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1194 physmem); 1195 1196 /*******************************************************/ 1197 /* Initialize (statically allocated) kernel pmap. */ 1198 /*******************************************************/ 1199 PMAP_LOCK_INIT(kernel_pmap); 1200 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1201 1202 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1203 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1204 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1205 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1206 1207 /* Initialize kernel pdir */ 1208 for (i = 0; i < kernel_ptbls; i++) 1209 kernel_pmap->pm_pdir[kptbl_min + i] = 1210 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1211 1212 for (i = 0; i < MAXCPU; i++) { 1213 kernel_pmap->pm_tid[i] = TID_KERNEL; 1214 1215 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1216 tidbusy[i][0] = kernel_pmap; 1217 } 1218 1219 /* 1220 * Fill in PTEs covering kernel code and data. They are not required 1221 * for address translation, as this area is covered by static TLB1 1222 * entries, but for pte_vatopa() to work correctly with kernel area 1223 * addresses. 1224 */ 1225 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1226 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1227 pte->rpn = kernload + (va - KERNBASE); 1228 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1229 PTE_VALID; 1230 } 1231 /* Mark kernel_pmap active on all CPUs */ 1232 CPU_FILL(&kernel_pmap->pm_active); 1233 1234 /*******************************************************/ 1235 /* Final setup */ 1236 /*******************************************************/ 1237 1238 /* Enter kstack0 into kernel map, provide guard page */ 1239 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1240 thread0.td_kstack = kstack0; 1241 thread0.td_kstack_pages = KSTACK_PAGES; 1242 1243 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1244 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1245 kstack0_phys, kstack0_phys + kstack0_sz); 1246 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1247 1248 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1249 for (i = 0; i < KSTACK_PAGES; i++) { 1250 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1251 kstack0 += PAGE_SIZE; 1252 kstack0_phys += PAGE_SIZE; 1253 } 1254 1255 debugf("virtual_avail = %08x\n", virtual_avail); 1256 debugf("virtual_end = %08x\n", virtual_end); 1257 1258 debugf("mmu_booke_bootstrap: exit\n"); 1259} 1260 1261void 1262pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1263{ 1264 int i; 1265 1266 /* 1267 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1268 * have the snapshot of its contents in the s/w tlb1[] table, so use 1269 * these values directly to (re)program AP's TLB1 hardware. 1270 */ 1271 for (i = 0; i < tlb1_idx; i ++) { 1272 /* Skip invalid entries */ 1273 if (!(tlb1[i].mas1 & MAS1_VALID)) 1274 continue; 1275 1276 tlb1_write_entry(i); 1277 } 1278 1279 set_mas4_defaults(); 1280} 1281 1282/* 1283 * Get the physical page address for the given pmap/virtual address. 1284 */ 1285static vm_paddr_t 1286mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1287{ 1288 vm_paddr_t pa; 1289 1290 PMAP_LOCK(pmap); 1291 pa = pte_vatopa(mmu, pmap, va); 1292 PMAP_UNLOCK(pmap); 1293 1294 return (pa); 1295} 1296 1297/* 1298 * Extract the physical page address associated with the given 1299 * kernel virtual address. 1300 */ 1301static vm_paddr_t 1302mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1303{ 1304 1305 return (pte_vatopa(mmu, kernel_pmap, va)); 1306} 1307 1308/* 1309 * Initialize the pmap module. 1310 * Called by vm_init, to initialize any structures that the pmap 1311 * system needs to map virtual memory. 1312 */ 1313static void 1314mmu_booke_init(mmu_t mmu) 1315{ 1316 int shpgperproc = PMAP_SHPGPERPROC; 1317 1318 /* 1319 * Initialize the address space (zone) for the pv entries. Set a 1320 * high water mark so that the system can recover from excessive 1321 * numbers of pv entries. 1322 */ 1323 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1324 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1325 1326 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1327 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1328 1329 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1330 pv_entry_high_water = 9 * (pv_entry_max / 10); 1331 1332 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1333 1334 /* Pre-fill pvzone with initial number of pv entries. */ 1335 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1336 1337 /* Initialize ptbl allocation. */ 1338 ptbl_init(); 1339} 1340 1341/* 1342 * Map a list of wired pages into kernel virtual address space. This is 1343 * intended for temporary mappings which do not need page modification or 1344 * references recorded. Existing mappings in the region are overwritten. 1345 */ 1346static void 1347mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1348{ 1349 vm_offset_t va; 1350 1351 va = sva; 1352 while (count-- > 0) { 1353 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1354 va += PAGE_SIZE; 1355 m++; 1356 } 1357} 1358 1359/* 1360 * Remove page mappings from kernel virtual address space. Intended for 1361 * temporary mappings entered by mmu_booke_qenter. 1362 */ 1363static void 1364mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1365{ 1366 vm_offset_t va; 1367 1368 va = sva; 1369 while (count-- > 0) { 1370 mmu_booke_kremove(mmu, va); 1371 va += PAGE_SIZE; 1372 } 1373} 1374 1375/* 1376 * Map a wired page into kernel virtual address space. 1377 */ 1378static void 1379mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1380{ 1381 unsigned int pdir_idx = PDIR_IDX(va); 1382 unsigned int ptbl_idx = PTBL_IDX(va); 1383 uint32_t flags; 1384 pte_t *pte; 1385 1386 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1387 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1388 1389 flags = 0; 1390 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1391 flags |= PTE_M; 1392 1393 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1394 1395 mtx_lock_spin(&tlbivax_mutex); 1396 tlb_miss_lock(); 1397 1398 if (PTE_ISVALID(pte)) { 1399 1400 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1401 1402 /* Flush entry from TLB0 */ 1403 tlb0_flush_entry(va); 1404 } 1405 1406 pte->rpn = pa & ~PTE_PA_MASK; 1407 pte->flags = flags; 1408 1409 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1410 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1411 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1412 1413 /* Flush the real memory from the instruction cache. */ 1414 if ((flags & (PTE_I | PTE_G)) == 0) { 1415 __syncicache((void *)va, PAGE_SIZE); 1416 } 1417 1418 tlb_miss_unlock(); 1419 mtx_unlock_spin(&tlbivax_mutex); 1420} 1421 1422/* 1423 * Remove a page from kernel page table. 1424 */ 1425static void 1426mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1427{ 1428 unsigned int pdir_idx = PDIR_IDX(va); 1429 unsigned int ptbl_idx = PTBL_IDX(va); 1430 pte_t *pte; 1431 1432// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1433 1434 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1435 (va <= VM_MAX_KERNEL_ADDRESS)), 1436 ("mmu_booke_kremove: invalid va")); 1437 1438 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1439 1440 if (!PTE_ISVALID(pte)) { 1441 1442 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1443 1444 return; 1445 } 1446 1447 mtx_lock_spin(&tlbivax_mutex); 1448 tlb_miss_lock(); 1449 1450 /* Invalidate entry in TLB0, update PTE. */ 1451 tlb0_flush_entry(va); 1452 pte->flags = 0; 1453 pte->rpn = 0; 1454 1455 tlb_miss_unlock(); 1456 mtx_unlock_spin(&tlbivax_mutex); 1457} 1458 1459/* 1460 * Initialize pmap associated with process 0. 1461 */ 1462static void 1463mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1464{ 1465 1466 mmu_booke_pinit(mmu, pmap); 1467 PCPU_SET(curpmap, pmap); 1468} 1469 1470/* 1471 * Initialize a preallocated and zeroed pmap structure, 1472 * such as one in a vmspace structure. 1473 */ 1474static void 1475mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1476{ 1477 int i; 1478 1479 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1480 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1481 1482 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1483 1484 PMAP_LOCK_INIT(pmap); 1485 for (i = 0; i < MAXCPU; i++) 1486 pmap->pm_tid[i] = TID_NONE; 1487 CPU_ZERO(&kernel_pmap->pm_active); 1488 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1489 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1490 TAILQ_INIT(&pmap->pm_ptbl_list); 1491} 1492 1493/* 1494 * Release any resources held by the given physical map. 1495 * Called when a pmap initialized by mmu_booke_pinit is being released. 1496 * Should only be called if the map contains no valid mappings. 1497 */ 1498static void 1499mmu_booke_release(mmu_t mmu, pmap_t pmap) 1500{ 1501 1502 KASSERT(pmap->pm_stats.resident_count == 0, 1503 ("pmap_release: pmap resident count %ld != 0", 1504 pmap->pm_stats.resident_count)); 1505 1506 PMAP_LOCK_DESTROY(pmap); 1507} 1508 1509/* 1510 * Insert the given physical page at the specified virtual address in the 1511 * target physical map with the protection requested. If specified the page 1512 * will be wired down. 1513 */ 1514static void 1515mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1516 vm_prot_t prot, boolean_t wired) 1517{ 1518 1519 vm_page_lock_queues(); 1520 PMAP_LOCK(pmap); 1521 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1522 vm_page_unlock_queues(); 1523 PMAP_UNLOCK(pmap); 1524} 1525 1526static void 1527mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1528 vm_prot_t prot, boolean_t wired) 1529{ 1530 pte_t *pte; 1531 vm_paddr_t pa; 1532 uint32_t flags; 1533 int su, sync; 1534 1535 pa = VM_PAGE_TO_PHYS(m); 1536 su = (pmap == kernel_pmap); 1537 sync = 0; 1538 1539 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1540 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1541 // (u_int32_t)pmap, su, pmap->pm_tid, 1542 // (u_int32_t)m, va, pa, prot, wired); 1543 1544 if (su) { 1545 KASSERT(((va >= virtual_avail) && 1546 (va <= VM_MAX_KERNEL_ADDRESS)), 1547 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1548 } else { 1549 KASSERT((va <= VM_MAXUSER_ADDRESS), 1550 ("mmu_booke_enter_locked: user pmap, non user va")); 1551 } 1552 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1553 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1554 ("mmu_booke_enter_locked: page %p is not busy", m)); 1555 1556 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1557 1558 /* 1559 * If there is an existing mapping, and the physical address has not 1560 * changed, must be protection or wiring change. 1561 */ 1562 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1563 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1564 1565 /* 1566 * Before actually updating pte->flags we calculate and 1567 * prepare its new value in a helper var. 1568 */ 1569 flags = pte->flags; 1570 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1571 1572 /* Wiring change, just update stats. */ 1573 if (wired) { 1574 if (!PTE_ISWIRED(pte)) { 1575 flags |= PTE_WIRED; 1576 pmap->pm_stats.wired_count++; 1577 } 1578 } else { 1579 if (PTE_ISWIRED(pte)) { 1580 flags &= ~PTE_WIRED; 1581 pmap->pm_stats.wired_count--; 1582 } 1583 } 1584 1585 if (prot & VM_PROT_WRITE) { 1586 /* Add write permissions. */ 1587 flags |= PTE_SW; 1588 if (!su) 1589 flags |= PTE_UW; 1590 1591 if ((flags & PTE_MANAGED) != 0) 1592 vm_page_flag_set(m, PG_WRITEABLE); 1593 } else { 1594 /* Handle modified pages, sense modify status. */ 1595 1596 /* 1597 * The PTE_MODIFIED flag could be set by underlying 1598 * TLB misses since we last read it (above), possibly 1599 * other CPUs could update it so we check in the PTE 1600 * directly rather than rely on that saved local flags 1601 * copy. 1602 */ 1603 if (PTE_ISMODIFIED(pte)) 1604 vm_page_dirty(m); 1605 } 1606 1607 if (prot & VM_PROT_EXECUTE) { 1608 flags |= PTE_SX; 1609 if (!su) 1610 flags |= PTE_UX; 1611 1612 /* 1613 * Check existing flags for execute permissions: if we 1614 * are turning execute permissions on, icache should 1615 * be flushed. 1616 */ 1617 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1618 sync++; 1619 } 1620 1621 flags &= ~PTE_REFERENCED; 1622 1623 /* 1624 * The new flags value is all calculated -- only now actually 1625 * update the PTE. 1626 */ 1627 mtx_lock_spin(&tlbivax_mutex); 1628 tlb_miss_lock(); 1629 1630 tlb0_flush_entry(va); 1631 pte->flags = flags; 1632 1633 tlb_miss_unlock(); 1634 mtx_unlock_spin(&tlbivax_mutex); 1635 1636 } else { 1637 /* 1638 * If there is an existing mapping, but it's for a different 1639 * physical address, pte_enter() will delete the old mapping. 1640 */ 1641 //if ((pte != NULL) && PTE_ISVALID(pte)) 1642 // debugf("mmu_booke_enter_locked: replace\n"); 1643 //else 1644 // debugf("mmu_booke_enter_locked: new\n"); 1645 1646 /* Now set up the flags and install the new mapping. */ 1647 flags = (PTE_SR | PTE_VALID); 1648 flags |= PTE_M; 1649 1650 if (!su) 1651 flags |= PTE_UR; 1652 1653 if (prot & VM_PROT_WRITE) { 1654 flags |= PTE_SW; 1655 if (!su) 1656 flags |= PTE_UW; 1657 1658 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1659 vm_page_flag_set(m, PG_WRITEABLE); 1660 } 1661 1662 if (prot & VM_PROT_EXECUTE) { 1663 flags |= PTE_SX; 1664 if (!su) 1665 flags |= PTE_UX; 1666 } 1667 1668 /* If its wired update stats. */ 1669 if (wired) { 1670 pmap->pm_stats.wired_count++; 1671 flags |= PTE_WIRED; 1672 } 1673 1674 pte_enter(mmu, pmap, m, va, flags); 1675 1676 /* Flush the real memory from the instruction cache. */ 1677 if (prot & VM_PROT_EXECUTE) 1678 sync++; 1679 } 1680 1681 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1682 __syncicache((void *)va, PAGE_SIZE); 1683 sync = 0; 1684 } 1685} 1686 1687/* 1688 * Maps a sequence of resident pages belonging to the same object. 1689 * The sequence begins with the given page m_start. This page is 1690 * mapped at the given virtual address start. Each subsequent page is 1691 * mapped at a virtual address that is offset from start by the same 1692 * amount as the page is offset from m_start within the object. The 1693 * last page in the sequence is the page with the largest offset from 1694 * m_start that can be mapped at a virtual address less than the given 1695 * virtual address end. Not every virtual page between start and end 1696 * is mapped; only those for which a resident page exists with the 1697 * corresponding offset from m_start are mapped. 1698 */ 1699static void 1700mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1701 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1702{ 1703 vm_page_t m; 1704 vm_pindex_t diff, psize; 1705 1706 psize = atop(end - start); 1707 m = m_start; 1708 vm_page_lock_queues(); 1709 PMAP_LOCK(pmap); 1710 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1711 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1712 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1713 m = TAILQ_NEXT(m, listq); 1714 } 1715 vm_page_unlock_queues(); 1716 PMAP_UNLOCK(pmap); 1717} 1718 1719static void 1720mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1721 vm_prot_t prot) 1722{ 1723 1724 vm_page_lock_queues(); 1725 PMAP_LOCK(pmap); 1726 mmu_booke_enter_locked(mmu, pmap, va, m, 1727 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1728 vm_page_unlock_queues(); 1729 PMAP_UNLOCK(pmap); 1730} 1731 1732/* 1733 * Remove the given range of addresses from the specified map. 1734 * 1735 * It is assumed that the start and end are properly rounded to the page size. 1736 */ 1737static void 1738mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1739{ 1740 pte_t *pte; 1741 uint8_t hold_flag; 1742 1743 int su = (pmap == kernel_pmap); 1744 1745 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1746 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1747 1748 if (su) { 1749 KASSERT(((va >= virtual_avail) && 1750 (va <= VM_MAX_KERNEL_ADDRESS)), 1751 ("mmu_booke_remove: kernel pmap, non kernel va")); 1752 } else { 1753 KASSERT((va <= VM_MAXUSER_ADDRESS), 1754 ("mmu_booke_remove: user pmap, non user va")); 1755 } 1756 1757 if (PMAP_REMOVE_DONE(pmap)) { 1758 //debugf("mmu_booke_remove: e (empty)\n"); 1759 return; 1760 } 1761 1762 hold_flag = PTBL_HOLD_FLAG(pmap); 1763 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1764 1765 vm_page_lock_queues(); 1766 PMAP_LOCK(pmap); 1767 for (; va < endva; va += PAGE_SIZE) { 1768 pte = pte_find(mmu, pmap, va); 1769 if ((pte != NULL) && PTE_ISVALID(pte)) 1770 pte_remove(mmu, pmap, va, hold_flag); 1771 } 1772 PMAP_UNLOCK(pmap); 1773 vm_page_unlock_queues(); 1774 1775 //debugf("mmu_booke_remove: e\n"); 1776} 1777 1778/* 1779 * Remove physical page from all pmaps in which it resides. 1780 */ 1781static void 1782mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1783{ 1784 pv_entry_t pv, pvn; 1785 uint8_t hold_flag; 1786 1787 vm_page_lock_queues(); 1788 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1789 pvn = TAILQ_NEXT(pv, pv_link); 1790 1791 PMAP_LOCK(pv->pv_pmap); 1792 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1793 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1794 PMAP_UNLOCK(pv->pv_pmap); 1795 } 1796 vm_page_flag_clear(m, PG_WRITEABLE); 1797 vm_page_unlock_queues(); 1798} 1799 1800/* 1801 * Map a range of physical addresses into kernel virtual address space. 1802 */ 1803static vm_offset_t 1804mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1805 vm_offset_t pa_end, int prot) 1806{ 1807 vm_offset_t sva = *virt; 1808 vm_offset_t va = sva; 1809 1810 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1811 // sva, pa_start, pa_end); 1812 1813 while (pa_start < pa_end) { 1814 mmu_booke_kenter(mmu, va, pa_start); 1815 va += PAGE_SIZE; 1816 pa_start += PAGE_SIZE; 1817 } 1818 *virt = va; 1819 1820 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1821 return (sva); 1822} 1823 1824/* 1825 * The pmap must be activated before it's address space can be accessed in any 1826 * way. 1827 */ 1828static void 1829mmu_booke_activate(mmu_t mmu, struct thread *td) 1830{ 1831 pmap_t pmap; 1832 1833 pmap = &td->td_proc->p_vmspace->vm_pmap; 1834 1835 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1836 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1837 1838 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1839 1840 mtx_lock_spin(&sched_lock); 1841 1842 CPU_OR_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); 1843 PCPU_SET(curpmap, pmap); 1844 1845 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1846 tid_alloc(pmap); 1847 1848 /* Load PID0 register with pmap tid value. */ 1849 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1850 __asm __volatile("isync"); 1851 1852 mtx_unlock_spin(&sched_lock); 1853 1854 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1855 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1856} 1857 1858/* 1859 * Deactivate the specified process's address space. 1860 */ 1861static void 1862mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1863{ 1864 pmap_t pmap; 1865 1866 pmap = &td->td_proc->p_vmspace->vm_pmap; 1867 1868 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1869 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1870 1871 sched_pin(); 1872 CPU_NAND_ATOMIC(&pmap->pm_active, PCPU_PTR(cpumask)); 1873 sched_unpin(); 1874 PCPU_SET(curpmap, NULL); 1875} 1876 1877/* 1878 * Copy the range specified by src_addr/len 1879 * from the source map to the range dst_addr/len 1880 * in the destination map. 1881 * 1882 * This routine is only advisory and need not do anything. 1883 */ 1884static void 1885mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1886 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1887{ 1888 1889} 1890 1891/* 1892 * Set the physical protection on the specified range of this map as requested. 1893 */ 1894static void 1895mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1896 vm_prot_t prot) 1897{ 1898 vm_offset_t va; 1899 vm_page_t m; 1900 pte_t *pte; 1901 1902 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1903 mmu_booke_remove(mmu, pmap, sva, eva); 1904 return; 1905 } 1906 1907 if (prot & VM_PROT_WRITE) 1908 return; 1909 1910 vm_page_lock_queues(); 1911 PMAP_LOCK(pmap); 1912 for (va = sva; va < eva; va += PAGE_SIZE) { 1913 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1914 if (PTE_ISVALID(pte)) { 1915 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1916 1917 mtx_lock_spin(&tlbivax_mutex); 1918 tlb_miss_lock(); 1919 1920 /* Handle modified pages. */ 1921 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1922 vm_page_dirty(m); 1923 1924 tlb0_flush_entry(va); 1925 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1926 1927 tlb_miss_unlock(); 1928 mtx_unlock_spin(&tlbivax_mutex); 1929 } 1930 } 1931 } 1932 PMAP_UNLOCK(pmap); 1933 vm_page_unlock_queues(); 1934} 1935 1936/* 1937 * Clear the write and modified bits in each of the given page's mappings. 1938 */ 1939static void 1940mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1941{ 1942 pv_entry_t pv; 1943 pte_t *pte; 1944 1945 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1946 ("mmu_booke_remove_write: page %p is not managed", m)); 1947 1948 /* 1949 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1950 * another thread while the object is locked. Thus, if PG_WRITEABLE 1951 * is clear, no page table entries need updating. 1952 */ 1953 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1954 if ((m->oflags & VPO_BUSY) == 0 && 1955 (m->flags & PG_WRITEABLE) == 0) 1956 return; 1957 vm_page_lock_queues(); 1958 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1959 PMAP_LOCK(pv->pv_pmap); 1960 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1961 if (PTE_ISVALID(pte)) { 1962 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1963 1964 mtx_lock_spin(&tlbivax_mutex); 1965 tlb_miss_lock(); 1966 1967 /* Handle modified pages. */ 1968 if (PTE_ISMODIFIED(pte)) 1969 vm_page_dirty(m); 1970 1971 /* Flush mapping from TLB0. */ 1972 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1973 1974 tlb_miss_unlock(); 1975 mtx_unlock_spin(&tlbivax_mutex); 1976 } 1977 } 1978 PMAP_UNLOCK(pv->pv_pmap); 1979 } 1980 vm_page_flag_clear(m, PG_WRITEABLE); 1981 vm_page_unlock_queues(); 1982} 1983 1984static void 1985mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1986{ 1987 pte_t *pte; 1988 pmap_t pmap; 1989 vm_page_t m; 1990 vm_offset_t addr; 1991 vm_paddr_t pa; 1992 int active, valid; 1993 1994 va = trunc_page(va); 1995 sz = round_page(sz); 1996 1997 vm_page_lock_queues(); 1998 pmap = PCPU_GET(curpmap); 1999 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2000 while (sz > 0) { 2001 PMAP_LOCK(pm); 2002 pte = pte_find(mmu, pm, va); 2003 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2004 if (valid) 2005 pa = PTE_PA(pte); 2006 PMAP_UNLOCK(pm); 2007 if (valid) { 2008 if (!active) { 2009 /* Create a mapping in the active pmap. */ 2010 addr = 0; 2011 m = PHYS_TO_VM_PAGE(pa); 2012 PMAP_LOCK(pmap); 2013 pte_enter(mmu, pmap, m, addr, 2014 PTE_SR | PTE_VALID | PTE_UR); 2015 __syncicache((void *)addr, PAGE_SIZE); 2016 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2017 PMAP_UNLOCK(pmap); 2018 } else 2019 __syncicache((void *)va, PAGE_SIZE); 2020 } 2021 va += PAGE_SIZE; 2022 sz -= PAGE_SIZE; 2023 } 2024 vm_page_unlock_queues(); 2025} 2026 2027/* 2028 * Atomically extract and hold the physical page with the given 2029 * pmap and virtual address pair if that mapping permits the given 2030 * protection. 2031 */ 2032static vm_page_t 2033mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2034 vm_prot_t prot) 2035{ 2036 pte_t *pte; 2037 vm_page_t m; 2038 uint32_t pte_wbit; 2039 vm_paddr_t pa; 2040 2041 m = NULL; 2042 pa = 0; 2043 PMAP_LOCK(pmap); 2044retry: 2045 pte = pte_find(mmu, pmap, va); 2046 if ((pte != NULL) && PTE_ISVALID(pte)) { 2047 if (pmap == kernel_pmap) 2048 pte_wbit = PTE_SW; 2049 else 2050 pte_wbit = PTE_UW; 2051 2052 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2053 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2054 goto retry; 2055 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2056 vm_page_hold(m); 2057 } 2058 } 2059 2060 PA_UNLOCK_COND(pa); 2061 PMAP_UNLOCK(pmap); 2062 return (m); 2063} 2064 2065/* 2066 * Initialize a vm_page's machine-dependent fields. 2067 */ 2068static void 2069mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2070{ 2071 2072 TAILQ_INIT(&m->md.pv_list); 2073} 2074 2075/* 2076 * mmu_booke_zero_page_area zeros the specified hardware page by 2077 * mapping it into virtual memory and using bzero to clear 2078 * its contents. 2079 * 2080 * off and size must reside within a single page. 2081 */ 2082static void 2083mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2084{ 2085 vm_offset_t va; 2086 2087 /* XXX KASSERT off and size are within a single page? */ 2088 2089 mtx_lock(&zero_page_mutex); 2090 va = zero_page_va; 2091 2092 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2093 bzero((caddr_t)va + off, size); 2094 mmu_booke_kremove(mmu, va); 2095 2096 mtx_unlock(&zero_page_mutex); 2097} 2098 2099/* 2100 * mmu_booke_zero_page zeros the specified hardware page. 2101 */ 2102static void 2103mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2104{ 2105 2106 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2107} 2108 2109/* 2110 * mmu_booke_copy_page copies the specified (machine independent) page by 2111 * mapping the page into virtual memory and using memcopy to copy the page, 2112 * one machine dependent page at a time. 2113 */ 2114static void 2115mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2116{ 2117 vm_offset_t sva, dva; 2118 2119 sva = copy_page_src_va; 2120 dva = copy_page_dst_va; 2121 2122 mtx_lock(©_page_mutex); 2123 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2124 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2125 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2126 mmu_booke_kremove(mmu, dva); 2127 mmu_booke_kremove(mmu, sva); 2128 mtx_unlock(©_page_mutex); 2129} 2130 2131/* 2132 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2133 * into virtual memory and using bzero to clear its contents. This is intended 2134 * to be called from the vm_pagezero process only and outside of Giant. No 2135 * lock is required. 2136 */ 2137static void 2138mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2139{ 2140 vm_offset_t va; 2141 2142 va = zero_page_idle_va; 2143 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2144 bzero((caddr_t)va, PAGE_SIZE); 2145 mmu_booke_kremove(mmu, va); 2146} 2147 2148/* 2149 * Return whether or not the specified physical page was modified 2150 * in any of physical maps. 2151 */ 2152static boolean_t 2153mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2154{ 2155 pte_t *pte; 2156 pv_entry_t pv; 2157 boolean_t rv; 2158 2159 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2160 ("mmu_booke_is_modified: page %p is not managed", m)); 2161 rv = FALSE; 2162 2163 /* 2164 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 2165 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 2166 * is clear, no PTEs can be modified. 2167 */ 2168 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2169 if ((m->oflags & VPO_BUSY) == 0 && 2170 (m->flags & PG_WRITEABLE) == 0) 2171 return (rv); 2172 vm_page_lock_queues(); 2173 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2174 PMAP_LOCK(pv->pv_pmap); 2175 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2176 PTE_ISVALID(pte)) { 2177 if (PTE_ISMODIFIED(pte)) 2178 rv = TRUE; 2179 } 2180 PMAP_UNLOCK(pv->pv_pmap); 2181 if (rv) 2182 break; 2183 } 2184 vm_page_unlock_queues(); 2185 return (rv); 2186} 2187 2188/* 2189 * Return whether or not the specified virtual address is eligible 2190 * for prefault. 2191 */ 2192static boolean_t 2193mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2194{ 2195 2196 return (FALSE); 2197} 2198 2199/* 2200 * Return whether or not the specified physical page was referenced 2201 * in any physical maps. 2202 */ 2203static boolean_t 2204mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2205{ 2206 pte_t *pte; 2207 pv_entry_t pv; 2208 boolean_t rv; 2209 2210 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2211 ("mmu_booke_is_referenced: page %p is not managed", m)); 2212 rv = FALSE; 2213 vm_page_lock_queues(); 2214 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2215 PMAP_LOCK(pv->pv_pmap); 2216 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2217 PTE_ISVALID(pte)) { 2218 if (PTE_ISREFERENCED(pte)) 2219 rv = TRUE; 2220 } 2221 PMAP_UNLOCK(pv->pv_pmap); 2222 if (rv) 2223 break; 2224 } 2225 vm_page_unlock_queues(); 2226 return (rv); 2227} 2228 2229/* 2230 * Clear the modify bits on the specified physical page. 2231 */ 2232static void 2233mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2234{ 2235 pte_t *pte; 2236 pv_entry_t pv; 2237 2238 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2239 ("mmu_booke_clear_modify: page %p is not managed", m)); 2240 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2241 KASSERT((m->oflags & VPO_BUSY) == 0, 2242 ("mmu_booke_clear_modify: page %p is busy", m)); 2243 2244 /* 2245 * If the page is not PG_WRITEABLE, then no PTEs can be modified. 2246 * If the object containing the page is locked and the page is not 2247 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 2248 */ 2249 if ((m->flags & PG_WRITEABLE) == 0) 2250 return; 2251 vm_page_lock_queues(); 2252 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2253 PMAP_LOCK(pv->pv_pmap); 2254 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2255 PTE_ISVALID(pte)) { 2256 mtx_lock_spin(&tlbivax_mutex); 2257 tlb_miss_lock(); 2258 2259 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2260 tlb0_flush_entry(pv->pv_va); 2261 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2262 PTE_REFERENCED); 2263 } 2264 2265 tlb_miss_unlock(); 2266 mtx_unlock_spin(&tlbivax_mutex); 2267 } 2268 PMAP_UNLOCK(pv->pv_pmap); 2269 } 2270 vm_page_unlock_queues(); 2271} 2272 2273/* 2274 * Return a count of reference bits for a page, clearing those bits. 2275 * It is not necessary for every reference bit to be cleared, but it 2276 * is necessary that 0 only be returned when there are truly no 2277 * reference bits set. 2278 * 2279 * XXX: The exact number of bits to check and clear is a matter that 2280 * should be tested and standardized at some point in the future for 2281 * optimal aging of shared pages. 2282 */ 2283static int 2284mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2285{ 2286 pte_t *pte; 2287 pv_entry_t pv; 2288 int count; 2289 2290 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2291 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2292 count = 0; 2293 vm_page_lock_queues(); 2294 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2295 PMAP_LOCK(pv->pv_pmap); 2296 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2297 PTE_ISVALID(pte)) { 2298 if (PTE_ISREFERENCED(pte)) { 2299 mtx_lock_spin(&tlbivax_mutex); 2300 tlb_miss_lock(); 2301 2302 tlb0_flush_entry(pv->pv_va); 2303 pte->flags &= ~PTE_REFERENCED; 2304 2305 tlb_miss_unlock(); 2306 mtx_unlock_spin(&tlbivax_mutex); 2307 2308 if (++count > 4) { 2309 PMAP_UNLOCK(pv->pv_pmap); 2310 break; 2311 } 2312 } 2313 } 2314 PMAP_UNLOCK(pv->pv_pmap); 2315 } 2316 vm_page_unlock_queues(); 2317 return (count); 2318} 2319 2320/* 2321 * Clear the reference bit on the specified physical page. 2322 */ 2323static void 2324mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2325{ 2326 pte_t *pte; 2327 pv_entry_t pv; 2328 2329 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2330 ("mmu_booke_clear_reference: page %p is not managed", m)); 2331 vm_page_lock_queues(); 2332 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2333 PMAP_LOCK(pv->pv_pmap); 2334 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2335 PTE_ISVALID(pte)) { 2336 if (PTE_ISREFERENCED(pte)) { 2337 mtx_lock_spin(&tlbivax_mutex); 2338 tlb_miss_lock(); 2339 2340 tlb0_flush_entry(pv->pv_va); 2341 pte->flags &= ~PTE_REFERENCED; 2342 2343 tlb_miss_unlock(); 2344 mtx_unlock_spin(&tlbivax_mutex); 2345 } 2346 } 2347 PMAP_UNLOCK(pv->pv_pmap); 2348 } 2349 vm_page_unlock_queues(); 2350} 2351 2352/* 2353 * Change wiring attribute for a map/virtual-address pair. 2354 */ 2355static void 2356mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2357{ 2358 pte_t *pte; 2359 2360 PMAP_LOCK(pmap); 2361 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2362 if (wired) { 2363 if (!PTE_ISWIRED(pte)) { 2364 pte->flags |= PTE_WIRED; 2365 pmap->pm_stats.wired_count++; 2366 } 2367 } else { 2368 if (PTE_ISWIRED(pte)) { 2369 pte->flags &= ~PTE_WIRED; 2370 pmap->pm_stats.wired_count--; 2371 } 2372 } 2373 } 2374 PMAP_UNLOCK(pmap); 2375} 2376 2377/* 2378 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2379 * page. This count may be changed upwards or downwards in the future; it is 2380 * only necessary that true be returned for a small subset of pmaps for proper 2381 * page aging. 2382 */ 2383static boolean_t 2384mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2385{ 2386 pv_entry_t pv; 2387 int loops; 2388 boolean_t rv; 2389 2390 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2391 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2392 loops = 0; 2393 rv = FALSE; 2394 vm_page_lock_queues(); 2395 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2396 if (pv->pv_pmap == pmap) { 2397 rv = TRUE; 2398 break; 2399 } 2400 if (++loops >= 16) 2401 break; 2402 } 2403 vm_page_unlock_queues(); 2404 return (rv); 2405} 2406 2407/* 2408 * Return the number of managed mappings to the given physical page that are 2409 * wired. 2410 */ 2411static int 2412mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2413{ 2414 pv_entry_t pv; 2415 pte_t *pte; 2416 int count = 0; 2417 2418 if ((m->flags & PG_FICTITIOUS) != 0) 2419 return (count); 2420 vm_page_lock_queues(); 2421 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2422 PMAP_LOCK(pv->pv_pmap); 2423 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2424 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2425 count++; 2426 PMAP_UNLOCK(pv->pv_pmap); 2427 } 2428 vm_page_unlock_queues(); 2429 return (count); 2430} 2431 2432static int 2433mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2434{ 2435 int i; 2436 vm_offset_t va; 2437 2438 /* 2439 * This currently does not work for entries that 2440 * overlap TLB1 entries. 2441 */ 2442 for (i = 0; i < tlb1_idx; i ++) { 2443 if (tlb1_iomapped(i, pa, size, &va) == 0) 2444 return (0); 2445 } 2446 2447 return (EFAULT); 2448} 2449 2450vm_offset_t 2451mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2452 vm_size_t *sz) 2453{ 2454 vm_paddr_t pa, ppa; 2455 vm_offset_t va; 2456 vm_size_t gran; 2457 2458 /* Raw physical memory dumps don't have a virtual address. */ 2459 if (md->md_vaddr == ~0UL) { 2460 /* We always map a 256MB page at 256M. */ 2461 gran = 256 * 1024 * 1024; 2462 pa = md->md_paddr + ofs; 2463 ppa = pa & ~(gran - 1); 2464 ofs = pa - ppa; 2465 va = gran; 2466 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2467 if (*sz > (gran - ofs)) 2468 *sz = gran - ofs; 2469 return (va + ofs); 2470 } 2471 2472 /* Minidumps are based on virtual memory addresses. */ 2473 va = md->md_vaddr + ofs; 2474 if (va >= kernstart + kernsize) { 2475 gran = PAGE_SIZE - (va & PAGE_MASK); 2476 if (*sz > gran) 2477 *sz = gran; 2478 } 2479 return (va); 2480} 2481 2482void 2483mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2484 vm_offset_t va) 2485{ 2486 2487 /* Raw physical memory dumps don't have a virtual address. */ 2488 if (md->md_vaddr == ~0UL) { 2489 tlb1_idx--; 2490 tlb1[tlb1_idx].mas1 = 0; 2491 tlb1[tlb1_idx].mas2 = 0; 2492 tlb1[tlb1_idx].mas3 = 0; 2493 tlb1_write_entry(tlb1_idx); 2494 return; 2495 } 2496 2497 /* Minidumps are based on virtual memory addresses. */ 2498 /* Nothing to do... */ 2499} 2500 2501struct pmap_md * 2502mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2503{ 2504 static struct pmap_md md; 2505 pte_t *pte; 2506 vm_offset_t va; 2507 2508 if (dumpsys_minidump) { 2509 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2510 if (prev == NULL) { 2511 /* 1st: kernel .data and .bss. */ 2512 md.md_index = 1; 2513 md.md_vaddr = trunc_page((uintptr_t)_etext); 2514 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2515 return (&md); 2516 } 2517 switch (prev->md_index) { 2518 case 1: 2519 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2520 md.md_index = 2; 2521 md.md_vaddr = data_start; 2522 md.md_size = data_end - data_start; 2523 break; 2524 case 2: 2525 /* 3rd: kernel VM. */ 2526 va = prev->md_vaddr + prev->md_size; 2527 /* Find start of next chunk (from va). */ 2528 while (va < virtual_end) { 2529 /* Don't dump the buffer cache. */ 2530 if (va >= kmi.buffer_sva && 2531 va < kmi.buffer_eva) { 2532 va = kmi.buffer_eva; 2533 continue; 2534 } 2535 pte = pte_find(mmu, kernel_pmap, va); 2536 if (pte != NULL && PTE_ISVALID(pte)) 2537 break; 2538 va += PAGE_SIZE; 2539 } 2540 if (va < virtual_end) { 2541 md.md_vaddr = va; 2542 va += PAGE_SIZE; 2543 /* Find last page in chunk. */ 2544 while (va < virtual_end) { 2545 /* Don't run into the buffer cache. */ 2546 if (va == kmi.buffer_sva) 2547 break; 2548 pte = pte_find(mmu, kernel_pmap, va); 2549 if (pte == NULL || !PTE_ISVALID(pte)) 2550 break; 2551 va += PAGE_SIZE; 2552 } 2553 md.md_size = va - md.md_vaddr; 2554 break; 2555 } 2556 md.md_index = 3; 2557 /* FALLTHROUGH */ 2558 default: 2559 return (NULL); 2560 } 2561 } else { /* minidumps */ 2562 mem_regions(&physmem_regions, &physmem_regions_sz, 2563 &availmem_regions, &availmem_regions_sz); 2564 2565 if (prev == NULL) { 2566 /* first physical chunk. */ 2567 md.md_paddr = physmem_regions[0].mr_start; 2568 md.md_size = physmem_regions[0].mr_size; 2569 md.md_vaddr = ~0UL; 2570 md.md_index = 1; 2571 } else if (md.md_index < physmem_regions_sz) { 2572 md.md_paddr = physmem_regions[md.md_index].mr_start; 2573 md.md_size = physmem_regions[md.md_index].mr_size; 2574 md.md_vaddr = ~0UL; 2575 md.md_index++; 2576 } else { 2577 /* There's no next physical chunk. */ 2578 return (NULL); 2579 } 2580 } 2581 2582 return (&md); 2583} 2584 2585/* 2586 * Map a set of physical memory pages into the kernel virtual address space. 2587 * Return a pointer to where it is mapped. This routine is intended to be used 2588 * for mapping device memory, NOT real memory. 2589 */ 2590static void * 2591mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2592{ 2593 void *res; 2594 uintptr_t va; 2595 vm_size_t sz; 2596 2597 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2598 res = (void *)va; 2599 2600 do { 2601 sz = 1 << (ilog2(size) & ~1); 2602 if (bootverbose) 2603 printf("Wiring VA=%x to PA=%x (size=%x), " 2604 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2605 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2606 size -= sz; 2607 pa += sz; 2608 va += sz; 2609 } while (size > 0); 2610 2611 return (res); 2612} 2613 2614/* 2615 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2616 */ 2617static void 2618mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2619{ 2620 vm_offset_t base, offset; 2621 2622 /* 2623 * Unmap only if this is inside kernel virtual space. 2624 */ 2625 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2626 base = trunc_page(va); 2627 offset = va & PAGE_MASK; 2628 size = roundup(offset + size, PAGE_SIZE); 2629 kmem_free(kernel_map, base, size); 2630 } 2631} 2632 2633/* 2634 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2635 * specified pmap. This eliminates the blast of soft faults on process startup 2636 * and immediately after an mmap. 2637 */ 2638static void 2639mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2640 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2641{ 2642 2643 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2644 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2645 ("mmu_booke_object_init_pt: non-device object")); 2646} 2647 2648/* 2649 * Perform the pmap work for mincore. 2650 */ 2651static int 2652mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2653 vm_paddr_t *locked_pa) 2654{ 2655 2656 TODO; 2657 return (0); 2658} 2659 2660/**************************************************************************/ 2661/* TID handling */ 2662/**************************************************************************/ 2663 2664/* 2665 * Allocate a TID. If necessary, steal one from someone else. 2666 * The new TID is flushed from the TLB before returning. 2667 */ 2668static tlbtid_t 2669tid_alloc(pmap_t pmap) 2670{ 2671 tlbtid_t tid; 2672 int thiscpu; 2673 2674 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2675 2676 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2677 2678 thiscpu = PCPU_GET(cpuid); 2679 2680 tid = PCPU_GET(tid_next); 2681 if (tid > TID_MAX) 2682 tid = TID_MIN; 2683 PCPU_SET(tid_next, tid + 1); 2684 2685 /* If we are stealing TID then clear the relevant pmap's field */ 2686 if (tidbusy[thiscpu][tid] != NULL) { 2687 2688 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2689 2690 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2691 2692 /* Flush all entries from TLB0 matching this TID. */ 2693 tid_flush(tid); 2694 } 2695 2696 tidbusy[thiscpu][tid] = pmap; 2697 pmap->pm_tid[thiscpu] = tid; 2698 __asm __volatile("msync; isync"); 2699 2700 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2701 PCPU_GET(tid_next)); 2702 2703 return (tid); 2704} 2705 2706/**************************************************************************/ 2707/* TLB0 handling */ 2708/**************************************************************************/ 2709 2710static void 2711tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2712 uint32_t mas7) 2713{ 2714 int as; 2715 char desc[3]; 2716 tlbtid_t tid; 2717 vm_size_t size; 2718 unsigned int tsize; 2719 2720 desc[2] = '\0'; 2721 if (mas1 & MAS1_VALID) 2722 desc[0] = 'V'; 2723 else 2724 desc[0] = ' '; 2725 2726 if (mas1 & MAS1_IPROT) 2727 desc[1] = 'P'; 2728 else 2729 desc[1] = ' '; 2730 2731 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2732 tid = MAS1_GETTID(mas1); 2733 2734 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2735 size = 0; 2736 if (tsize) 2737 size = tsize2size(tsize); 2738 2739 debugf("%3d: (%s) [AS=%d] " 2740 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2741 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2742 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2743} 2744 2745/* Convert TLB0 va and way number to tlb0[] table index. */ 2746static inline unsigned int 2747tlb0_tableidx(vm_offset_t va, unsigned int way) 2748{ 2749 unsigned int idx; 2750 2751 idx = (way * TLB0_ENTRIES_PER_WAY); 2752 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2753 return (idx); 2754} 2755 2756/* 2757 * Invalidate TLB0 entry. 2758 */ 2759static inline void 2760tlb0_flush_entry(vm_offset_t va) 2761{ 2762 2763 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2764 2765 mtx_assert(&tlbivax_mutex, MA_OWNED); 2766 2767 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2768 __asm __volatile("isync; msync"); 2769 __asm __volatile("tlbsync; msync"); 2770 2771 CTR1(KTR_PMAP, "%s: e", __func__); 2772} 2773 2774/* Print out contents of the MAS registers for each TLB0 entry */ 2775void 2776tlb0_print_tlbentries(void) 2777{ 2778 uint32_t mas0, mas1, mas2, mas3, mas7; 2779 int entryidx, way, idx; 2780 2781 debugf("TLB0 entries:\n"); 2782 for (way = 0; way < TLB0_WAYS; way ++) 2783 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2784 2785 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2786 mtspr(SPR_MAS0, mas0); 2787 __asm __volatile("isync"); 2788 2789 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2790 mtspr(SPR_MAS2, mas2); 2791 2792 __asm __volatile("isync; tlbre"); 2793 2794 mas1 = mfspr(SPR_MAS1); 2795 mas2 = mfspr(SPR_MAS2); 2796 mas3 = mfspr(SPR_MAS3); 2797 mas7 = mfspr(SPR_MAS7); 2798 2799 idx = tlb0_tableidx(mas2, way); 2800 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2801 } 2802} 2803 2804/**************************************************************************/ 2805/* TLB1 handling */ 2806/**************************************************************************/ 2807 2808/* 2809 * TLB1 mapping notes: 2810 * 2811 * TLB1[0] CCSRBAR 2812 * TLB1[1] Kernel text and data. 2813 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2814 * windows, other devices mappings. 2815 */ 2816 2817/* 2818 * Write given entry to TLB1 hardware. 2819 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2820 */ 2821static void 2822tlb1_write_entry(unsigned int idx) 2823{ 2824 uint32_t mas0, mas7; 2825 2826 //debugf("tlb1_write_entry: s\n"); 2827 2828 /* Clear high order RPN bits */ 2829 mas7 = 0; 2830 2831 /* Select entry */ 2832 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2833 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2834 2835 mtspr(SPR_MAS0, mas0); 2836 __asm __volatile("isync"); 2837 mtspr(SPR_MAS1, tlb1[idx].mas1); 2838 __asm __volatile("isync"); 2839 mtspr(SPR_MAS2, tlb1[idx].mas2); 2840 __asm __volatile("isync"); 2841 mtspr(SPR_MAS3, tlb1[idx].mas3); 2842 __asm __volatile("isync"); 2843 mtspr(SPR_MAS7, mas7); 2844 __asm __volatile("isync; tlbwe; isync; msync"); 2845 2846 //debugf("tlb1_write_entry: e\n"); 2847} 2848 2849/* 2850 * Return the largest uint value log such that 2^log <= num. 2851 */ 2852static unsigned int 2853ilog2(unsigned int num) 2854{ 2855 int lz; 2856 2857 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2858 return (31 - lz); 2859} 2860 2861/* 2862 * Convert TLB TSIZE value to mapped region size. 2863 */ 2864static vm_size_t 2865tsize2size(unsigned int tsize) 2866{ 2867 2868 /* 2869 * size = 4^tsize KB 2870 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2871 */ 2872 2873 return ((1 << (2 * tsize)) * 1024); 2874} 2875 2876/* 2877 * Convert region size (must be power of 4) to TLB TSIZE value. 2878 */ 2879static unsigned int 2880size2tsize(vm_size_t size) 2881{ 2882 2883 return (ilog2(size) / 2 - 5); 2884} 2885 2886/* 2887 * Register permanent kernel mapping in TLB1. 2888 * 2889 * Entries are created starting from index 0 (current free entry is 2890 * kept in tlb1_idx) and are not supposed to be invalidated. 2891 */ 2892static int 2893tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2894 uint32_t flags) 2895{ 2896 uint32_t ts, tid; 2897 int tsize; 2898 2899 if (tlb1_idx >= TLB1_ENTRIES) { 2900 printf("tlb1_set_entry: TLB1 full!\n"); 2901 return (-1); 2902 } 2903 2904 /* Convert size to TSIZE */ 2905 tsize = size2tsize(size); 2906 2907 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2908 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2909 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2910 2911 /* XXX LOCK tlb1[] */ 2912 2913 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2914 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2915 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2916 2917 /* Set supervisor RWX permission bits */ 2918 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2919 2920 tlb1_write_entry(tlb1_idx++); 2921 2922 /* XXX UNLOCK tlb1[] */ 2923 2924 /* 2925 * XXX in general TLB1 updates should be propagated between CPUs, 2926 * since current design assumes to have the same TLB1 set-up on all 2927 * cores. 2928 */ 2929 return (0); 2930} 2931 2932static int 2933tlb1_entry_size_cmp(const void *a, const void *b) 2934{ 2935 const vm_size_t *sza; 2936 const vm_size_t *szb; 2937 2938 sza = a; 2939 szb = b; 2940 if (*sza > *szb) 2941 return (-1); 2942 else if (*sza < *szb) 2943 return (1); 2944 else 2945 return (0); 2946} 2947 2948/* 2949 * Map in contiguous RAM region into the TLB1 using maximum of 2950 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2951 * 2952 * If necessary round up last entry size and return total size 2953 * used by all allocated entries. 2954 */ 2955vm_size_t 2956tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2957{ 2958 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2959 vm_size_t mapped_size, sz, esz; 2960 unsigned int log; 2961 int i; 2962 2963 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2964 __func__, size, va, pa); 2965 2966 mapped_size = 0; 2967 sz = size; 2968 memset(entry_size, 0, sizeof(entry_size)); 2969 2970 /* Calculate entry sizes. */ 2971 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2972 2973 /* Largest region that is power of 4 and fits within size */ 2974 log = ilog2(sz) / 2; 2975 esz = 1 << (2 * log); 2976 2977 /* If this is last entry cover remaining size. */ 2978 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2979 while (esz < sz) 2980 esz = esz << 2; 2981 } 2982 2983 entry_size[i] = esz; 2984 mapped_size += esz; 2985 if (esz < sz) 2986 sz -= esz; 2987 else 2988 sz = 0; 2989 } 2990 2991 /* Sort entry sizes, required to get proper entry address alignment. */ 2992 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2993 sizeof(vm_size_t), tlb1_entry_size_cmp); 2994 2995 /* Load TLB1 entries. */ 2996 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2997 esz = entry_size[i]; 2998 if (!esz) 2999 break; 3000 3001 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 3002 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 3003 3004 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 3005 3006 va += esz; 3007 pa += esz; 3008 } 3009 3010 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 3011 __func__, mapped_size, mapped_size - size); 3012 3013 return (mapped_size); 3014} 3015 3016/* 3017 * TLB1 initialization routine, to be called after the very first 3018 * assembler level setup done in locore.S. 3019 */ 3020void 3021tlb1_init(vm_offset_t ccsrbar) 3022{ 3023 uint32_t mas0; 3024 3025 /* TLB1[1] is used to map the kernel. Save that entry. */ 3026 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 3027 mtspr(SPR_MAS0, mas0); 3028 __asm __volatile("isync; tlbre"); 3029 3030 tlb1[1].mas1 = mfspr(SPR_MAS1); 3031 tlb1[1].mas2 = mfspr(SPR_MAS2); 3032 tlb1[1].mas3 = mfspr(SPR_MAS3); 3033 3034 /* Map in CCSRBAR in TLB1[0] */ 3035 tlb1_idx = 0; 3036 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3037 /* 3038 * Set the next available TLB1 entry index. Note TLB[1] is reserved 3039 * for initial mapping of kernel text+data, which was set early in 3040 * locore, we need to skip this [busy] entry. 3041 */ 3042 tlb1_idx = 2; 3043 3044 /* Setup TLB miss defaults */ 3045 set_mas4_defaults(); 3046} 3047 3048/* 3049 * Setup MAS4 defaults. 3050 * These values are loaded to MAS0-2 on a TLB miss. 3051 */ 3052static void 3053set_mas4_defaults(void) 3054{ 3055 uint32_t mas4; 3056 3057 /* Defaults: TLB0, PID0, TSIZED=4K */ 3058 mas4 = MAS4_TLBSELD0; 3059 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3060#ifdef SMP 3061 mas4 |= MAS4_MD; 3062#endif 3063 mtspr(SPR_MAS4, mas4); 3064 __asm __volatile("isync"); 3065} 3066 3067/* 3068 * Print out contents of the MAS registers for each TLB1 entry 3069 */ 3070void 3071tlb1_print_tlbentries(void) 3072{ 3073 uint32_t mas0, mas1, mas2, mas3, mas7; 3074 int i; 3075 3076 debugf("TLB1 entries:\n"); 3077 for (i = 0; i < TLB1_ENTRIES; i++) { 3078 3079 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3080 mtspr(SPR_MAS0, mas0); 3081 3082 __asm __volatile("isync; tlbre"); 3083 3084 mas1 = mfspr(SPR_MAS1); 3085 mas2 = mfspr(SPR_MAS2); 3086 mas3 = mfspr(SPR_MAS3); 3087 mas7 = mfspr(SPR_MAS7); 3088 3089 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3090 } 3091} 3092 3093/* 3094 * Print out contents of the in-ram tlb1 table. 3095 */ 3096void 3097tlb1_print_entries(void) 3098{ 3099 int i; 3100 3101 debugf("tlb1[] table entries:\n"); 3102 for (i = 0; i < TLB1_ENTRIES; i++) 3103 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3104} 3105 3106/* 3107 * Return 0 if the physical IO range is encompassed by one of the 3108 * the TLB1 entries, otherwise return related error code. 3109 */ 3110static int 3111tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3112{ 3113 uint32_t prot; 3114 vm_paddr_t pa_start; 3115 vm_paddr_t pa_end; 3116 unsigned int entry_tsize; 3117 vm_size_t entry_size; 3118 3119 *va = (vm_offset_t)NULL; 3120 3121 /* Skip invalid entries */ 3122 if (!(tlb1[i].mas1 & MAS1_VALID)) 3123 return (EINVAL); 3124 3125 /* 3126 * The entry must be cache-inhibited, guarded, and r/w 3127 * so it can function as an i/o page 3128 */ 3129 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3130 if (prot != (MAS2_I | MAS2_G)) 3131 return (EPERM); 3132 3133 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3134 if (prot != (MAS3_SR | MAS3_SW)) 3135 return (EPERM); 3136 3137 /* The address should be within the entry range. */ 3138 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3139 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3140 3141 entry_size = tsize2size(entry_tsize); 3142 pa_start = tlb1[i].mas3 & MAS3_RPN; 3143 pa_end = pa_start + entry_size - 1; 3144 3145 if ((pa < pa_start) || ((pa + size) > pa_end)) 3146 return (ERANGE); 3147 3148 /* Return virtual address of this mapping. */ 3149 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3150 return (0); 3151} 3152