pmap.c revision 247360
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 247360 2013-02-26 23:35:27Z attilio $"); 53 54#include <sys/param.h> 55#include <sys/malloc.h> 56#include <sys/ktr.h> 57#include <sys/proc.h> 58#include <sys/user.h> 59#include <sys/queue.h> 60#include <sys/systm.h> 61#include <sys/kernel.h> 62#include <sys/linker.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/rwlock.h> 67#include <sys/sched.h> 68#include <sys/smp.h> 69#include <sys/vmmeter.h> 70 71#include <vm/vm.h> 72#include <vm/vm_page.h> 73#include <vm/vm_kern.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_extern.h> 76#include <vm/vm_object.h> 77#include <vm/vm_param.h> 78#include <vm/vm_map.h> 79#include <vm/vm_pager.h> 80#include <vm/uma.h> 81 82#include <machine/cpu.h> 83#include <machine/pcb.h> 84#include <machine/platform.h> 85 86#include <machine/tlb.h> 87#include <machine/spr.h> 88#include <machine/md_var.h> 89#include <machine/mmuvar.h> 90#include <machine/pmap.h> 91#include <machine/pte.h> 92 93#include "mmu_if.h" 94 95#ifdef DEBUG 96#define debugf(fmt, args...) printf(fmt, ##args) 97#else 98#define debugf(fmt, args...) 99#endif 100 101#define TODO panic("%s: not implemented", __func__); 102 103extern struct mtx sched_lock; 104 105extern int dumpsys_minidump; 106 107extern unsigned char _etext[]; 108extern unsigned char _end[]; 109 110extern uint32_t *bootinfo; 111 112#ifdef SMP 113extern uint32_t bp_ntlb1s; 114#endif 115 116vm_paddr_t ccsrbar_pa; 117vm_paddr_t kernload; 118vm_offset_t kernstart; 119vm_size_t kernsize; 120 121/* Message buffer and tables. */ 122static vm_offset_t data_start; 123static vm_size_t data_end; 124 125/* Phys/avail memory regions. */ 126static struct mem_region *availmem_regions; 127static int availmem_regions_sz; 128static struct mem_region *physmem_regions; 129static int physmem_regions_sz; 130 131/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 132static vm_offset_t zero_page_va; 133static struct mtx zero_page_mutex; 134 135static struct mtx tlbivax_mutex; 136 137/* 138 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 139 * by idle thred only, no lock required. 140 */ 141static vm_offset_t zero_page_idle_va; 142 143/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 144static vm_offset_t copy_page_src_va; 145static vm_offset_t copy_page_dst_va; 146static struct mtx copy_page_mutex; 147 148/**************************************************************************/ 149/* PMAP */ 150/**************************************************************************/ 151 152static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 153 vm_prot_t, boolean_t); 154 155unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 156unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 157 158/* 159 * If user pmap is processed with mmu_booke_remove and the resident count 160 * drops to 0, there are no more pages to remove, so we need not continue. 161 */ 162#define PMAP_REMOVE_DONE(pmap) \ 163 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 164 165extern void tid_flush(tlbtid_t); 166 167/**************************************************************************/ 168/* TLB and TID handling */ 169/**************************************************************************/ 170 171/* Translation ID busy table */ 172static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 173 174/* 175 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 176 * core revisions and should be read from h/w registers during early config. 177 */ 178uint32_t tlb0_entries; 179uint32_t tlb0_ways; 180uint32_t tlb0_entries_per_way; 181 182#define TLB0_ENTRIES (tlb0_entries) 183#define TLB0_WAYS (tlb0_ways) 184#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 185 186#define TLB1_ENTRIES 16 187 188/* In-ram copy of the TLB1 */ 189static tlb_entry_t tlb1[TLB1_ENTRIES]; 190 191/* Next free entry in the TLB1 */ 192static unsigned int tlb1_idx; 193 194static tlbtid_t tid_alloc(struct pmap *); 195 196static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 197 198static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 199static void tlb1_write_entry(unsigned int); 200static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 201static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 202 203static vm_size_t tsize2size(unsigned int); 204static unsigned int size2tsize(vm_size_t); 205static unsigned int ilog2(unsigned int); 206 207static void set_mas4_defaults(void); 208 209static inline void tlb0_flush_entry(vm_offset_t); 210static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 211 212/**************************************************************************/ 213/* Page table management */ 214/**************************************************************************/ 215 216static struct rwlock_padalign pvh_global_lock; 217 218/* Data for the pv entry allocation mechanism */ 219static uma_zone_t pvzone; 220static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 221 222#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 223 224#ifndef PMAP_SHPGPERPROC 225#define PMAP_SHPGPERPROC 200 226#endif 227 228static void ptbl_init(void); 229static struct ptbl_buf *ptbl_buf_alloc(void); 230static void ptbl_buf_free(struct ptbl_buf *); 231static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 232 233static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 234static void ptbl_free(mmu_t, pmap_t, unsigned int); 235static void ptbl_hold(mmu_t, pmap_t, unsigned int); 236static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 237 238static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 239static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 240static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 241static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 242 243static pv_entry_t pv_alloc(void); 244static void pv_free(pv_entry_t); 245static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 246static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 247 248/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 249#define PTBL_BUFS (128 * 16) 250 251struct ptbl_buf { 252 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 253 vm_offset_t kva; /* va of mapping */ 254}; 255 256/* ptbl free list and a lock used for access synchronization. */ 257static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 258static struct mtx ptbl_buf_freelist_lock; 259 260/* Base address of kva space allocated fot ptbl bufs. */ 261static vm_offset_t ptbl_buf_pool_vabase; 262 263/* Pointer to ptbl_buf structures. */ 264static struct ptbl_buf *ptbl_bufs; 265 266void pmap_bootstrap_ap(volatile uint32_t *); 267 268/* 269 * Kernel MMU interface 270 */ 271static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 272static void mmu_booke_clear_modify(mmu_t, vm_page_t); 273static void mmu_booke_clear_reference(mmu_t, vm_page_t); 274static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 275 vm_size_t, vm_offset_t); 276static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 277static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 278 vm_prot_t, boolean_t); 279static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 280 vm_page_t, vm_prot_t); 281static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 282 vm_prot_t); 283static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 284static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 285 vm_prot_t); 286static void mmu_booke_init(mmu_t); 287static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 288static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 289static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 290static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 291static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 292 int); 293static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 294 vm_paddr_t *); 295static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296 vm_object_t, vm_pindex_t, vm_size_t); 297static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298static void mmu_booke_page_init(mmu_t, vm_page_t); 299static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300static void mmu_booke_pinit(mmu_t, pmap_t); 301static void mmu_booke_pinit0(mmu_t, pmap_t); 302static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303 vm_prot_t); 304static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306static void mmu_booke_release(mmu_t, pmap_t); 307static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308static void mmu_booke_remove_all(mmu_t, vm_page_t); 309static void mmu_booke_remove_write(mmu_t, vm_page_t); 310static void mmu_booke_zero_page(mmu_t, vm_page_t); 311static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313static void mmu_booke_activate(mmu_t, struct thread *); 314static void mmu_booke_deactivate(mmu_t, struct thread *); 315static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 317static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 319static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 320static void mmu_booke_kremove(mmu_t, vm_offset_t); 321static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 322static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 323 vm_size_t); 324static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 325 vm_size_t, vm_size_t *); 326static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 327 vm_size_t, vm_offset_t); 328static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 329 330static mmu_method_t mmu_booke_methods[] = { 331 /* pmap dispatcher interface */ 332 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 333 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 334 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 335 MMUMETHOD(mmu_copy, mmu_booke_copy), 336 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 337 MMUMETHOD(mmu_enter, mmu_booke_enter), 338 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 339 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 340 MMUMETHOD(mmu_extract, mmu_booke_extract), 341 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 342 MMUMETHOD(mmu_init, mmu_booke_init), 343 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 344 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 345 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 346 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 347 MMUMETHOD(mmu_map, mmu_booke_map), 348 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 349 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 350 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 351 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 352 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 353 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 354 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 355 MMUMETHOD(mmu_protect, mmu_booke_protect), 356 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 357 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 358 MMUMETHOD(mmu_release, mmu_booke_release), 359 MMUMETHOD(mmu_remove, mmu_booke_remove), 360 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 361 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 362 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 363 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 364 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 365 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 366 MMUMETHOD(mmu_activate, mmu_booke_activate), 367 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 368 369 /* Internal interfaces */ 370 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 371 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 372 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 373 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 374 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 375/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 376 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 377 378 /* dumpsys() support */ 379 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 380 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 381 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 382 383 { 0, 0 } 384}; 385 386MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 387 388static inline void 389tlb_miss_lock(void) 390{ 391#ifdef SMP 392 struct pcpu *pc; 393 394 if (!smp_started) 395 return; 396 397 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 398 if (pc != pcpup) { 399 400 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 401 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 402 403 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 404 ("tlb_miss_lock: tried to lock self")); 405 406 tlb_lock(pc->pc_booke_tlb_lock); 407 408 CTR1(KTR_PMAP, "%s: locked", __func__); 409 } 410 } 411#endif 412} 413 414static inline void 415tlb_miss_unlock(void) 416{ 417#ifdef SMP 418 struct pcpu *pc; 419 420 if (!smp_started) 421 return; 422 423 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 424 if (pc != pcpup) { 425 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 426 __func__, pc->pc_cpuid); 427 428 tlb_unlock(pc->pc_booke_tlb_lock); 429 430 CTR1(KTR_PMAP, "%s: unlocked", __func__); 431 } 432 } 433#endif 434} 435 436/* Return number of entries in TLB0. */ 437static __inline void 438tlb0_get_tlbconf(void) 439{ 440 uint32_t tlb0_cfg; 441 442 tlb0_cfg = mfspr(SPR_TLB0CFG); 443 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 444 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 445 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 446} 447 448/* Initialize pool of kva ptbl buffers. */ 449static void 450ptbl_init(void) 451{ 452 int i; 453 454 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 455 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 456 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 457 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 458 459 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 460 TAILQ_INIT(&ptbl_buf_freelist); 461 462 for (i = 0; i < PTBL_BUFS; i++) { 463 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 464 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 465 } 466} 467 468/* Get a ptbl_buf from the freelist. */ 469static struct ptbl_buf * 470ptbl_buf_alloc(void) 471{ 472 struct ptbl_buf *buf; 473 474 mtx_lock(&ptbl_buf_freelist_lock); 475 buf = TAILQ_FIRST(&ptbl_buf_freelist); 476 if (buf != NULL) 477 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 478 mtx_unlock(&ptbl_buf_freelist_lock); 479 480 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 481 482 return (buf); 483} 484 485/* Return ptbl buff to free pool. */ 486static void 487ptbl_buf_free(struct ptbl_buf *buf) 488{ 489 490 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 491 492 mtx_lock(&ptbl_buf_freelist_lock); 493 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 494 mtx_unlock(&ptbl_buf_freelist_lock); 495} 496 497/* 498 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 499 */ 500static void 501ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 502{ 503 struct ptbl_buf *pbuf; 504 505 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 506 507 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 508 509 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 510 if (pbuf->kva == (vm_offset_t)ptbl) { 511 /* Remove from pmap ptbl buf list. */ 512 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 513 514 /* Free corresponding ptbl buf. */ 515 ptbl_buf_free(pbuf); 516 break; 517 } 518} 519 520/* Allocate page table. */ 521static pte_t * 522ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 523{ 524 vm_page_t mtbl[PTBL_PAGES]; 525 vm_page_t m; 526 struct ptbl_buf *pbuf; 527 unsigned int pidx; 528 pte_t *ptbl; 529 int i; 530 531 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 532 (pmap == kernel_pmap), pdir_idx); 533 534 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 535 ("ptbl_alloc: invalid pdir_idx")); 536 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 537 ("pte_alloc: valid ptbl entry exists!")); 538 539 pbuf = ptbl_buf_alloc(); 540 if (pbuf == NULL) 541 panic("pte_alloc: couldn't alloc kernel virtual memory"); 542 543 ptbl = (pte_t *)pbuf->kva; 544 545 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 546 547 /* Allocate ptbl pages, this will sleep! */ 548 for (i = 0; i < PTBL_PAGES; i++) { 549 pidx = (PTBL_PAGES * pdir_idx) + i; 550 while ((m = vm_page_alloc(NULL, pidx, 551 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 552 553 PMAP_UNLOCK(pmap); 554 rw_wunlock(&pvh_global_lock); 555 VM_WAIT; 556 rw_wlock(&pvh_global_lock); 557 PMAP_LOCK(pmap); 558 } 559 mtbl[i] = m; 560 } 561 562 /* Map allocated pages into kernel_pmap. */ 563 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 564 565 /* Zero whole ptbl. */ 566 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 567 568 /* Add pbuf to the pmap ptbl bufs list. */ 569 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 570 571 return (ptbl); 572} 573 574/* Free ptbl pages and invalidate pdir entry. */ 575static void 576ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 577{ 578 pte_t *ptbl; 579 vm_paddr_t pa; 580 vm_offset_t va; 581 vm_page_t m; 582 int i; 583 584 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 585 (pmap == kernel_pmap), pdir_idx); 586 587 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 588 ("ptbl_free: invalid pdir_idx")); 589 590 ptbl = pmap->pm_pdir[pdir_idx]; 591 592 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 593 594 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 595 596 /* 597 * Invalidate the pdir entry as soon as possible, so that other CPUs 598 * don't attempt to look up the page tables we are releasing. 599 */ 600 mtx_lock_spin(&tlbivax_mutex); 601 tlb_miss_lock(); 602 603 pmap->pm_pdir[pdir_idx] = NULL; 604 605 tlb_miss_unlock(); 606 mtx_unlock_spin(&tlbivax_mutex); 607 608 for (i = 0; i < PTBL_PAGES; i++) { 609 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 610 pa = pte_vatopa(mmu, kernel_pmap, va); 611 m = PHYS_TO_VM_PAGE(pa); 612 vm_page_free_zero(m); 613 atomic_subtract_int(&cnt.v_wire_count, 1); 614 mmu_booke_kremove(mmu, va); 615 } 616 617 ptbl_free_pmap_ptbl(pmap, ptbl); 618} 619 620/* 621 * Decrement ptbl pages hold count and attempt to free ptbl pages. 622 * Called when removing pte entry from ptbl. 623 * 624 * Return 1 if ptbl pages were freed. 625 */ 626static int 627ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 628{ 629 pte_t *ptbl; 630 vm_paddr_t pa; 631 vm_page_t m; 632 int i; 633 634 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 635 (pmap == kernel_pmap), pdir_idx); 636 637 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 638 ("ptbl_unhold: invalid pdir_idx")); 639 KASSERT((pmap != kernel_pmap), 640 ("ptbl_unhold: unholding kernel ptbl!")); 641 642 ptbl = pmap->pm_pdir[pdir_idx]; 643 644 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 645 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 646 ("ptbl_unhold: non kva ptbl")); 647 648 /* decrement hold count */ 649 for (i = 0; i < PTBL_PAGES; i++) { 650 pa = pte_vatopa(mmu, kernel_pmap, 651 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 652 m = PHYS_TO_VM_PAGE(pa); 653 m->wire_count--; 654 } 655 656 /* 657 * Free ptbl pages if there are no pte etries in this ptbl. 658 * wire_count has the same value for all ptbl pages, so check the last 659 * page. 660 */ 661 if (m->wire_count == 0) { 662 ptbl_free(mmu, pmap, pdir_idx); 663 664 //debugf("ptbl_unhold: e (freed ptbl)\n"); 665 return (1); 666 } 667 668 return (0); 669} 670 671/* 672 * Increment hold count for ptbl pages. This routine is used when a new pte 673 * entry is being inserted into the ptbl. 674 */ 675static void 676ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 677{ 678 vm_paddr_t pa; 679 pte_t *ptbl; 680 vm_page_t m; 681 int i; 682 683 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 684 pdir_idx); 685 686 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 687 ("ptbl_hold: invalid pdir_idx")); 688 KASSERT((pmap != kernel_pmap), 689 ("ptbl_hold: holding kernel ptbl!")); 690 691 ptbl = pmap->pm_pdir[pdir_idx]; 692 693 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 694 695 for (i = 0; i < PTBL_PAGES; i++) { 696 pa = pte_vatopa(mmu, kernel_pmap, 697 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 698 m = PHYS_TO_VM_PAGE(pa); 699 m->wire_count++; 700 } 701} 702 703/* Allocate pv_entry structure. */ 704pv_entry_t 705pv_alloc(void) 706{ 707 pv_entry_t pv; 708 709 pv_entry_count++; 710 if (pv_entry_count > pv_entry_high_water) 711 pagedaemon_wakeup(); 712 pv = uma_zalloc(pvzone, M_NOWAIT); 713 714 return (pv); 715} 716 717/* Free pv_entry structure. */ 718static __inline void 719pv_free(pv_entry_t pve) 720{ 721 722 pv_entry_count--; 723 uma_zfree(pvzone, pve); 724} 725 726 727/* Allocate and initialize pv_entry structure. */ 728static void 729pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 730{ 731 pv_entry_t pve; 732 733 //int su = (pmap == kernel_pmap); 734 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 735 // (u_int32_t)pmap, va, (u_int32_t)m); 736 737 pve = pv_alloc(); 738 if (pve == NULL) 739 panic("pv_insert: no pv entries!"); 740 741 pve->pv_pmap = pmap; 742 pve->pv_va = va; 743 744 /* add to pv_list */ 745 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 746 rw_assert(&pvh_global_lock, RA_WLOCKED); 747 748 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 749 750 //debugf("pv_insert: e\n"); 751} 752 753/* Destroy pv entry. */ 754static void 755pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 756{ 757 pv_entry_t pve; 758 759 //int su = (pmap == kernel_pmap); 760 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 761 762 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 763 rw_assert(&pvh_global_lock, RA_WLOCKED); 764 765 /* find pv entry */ 766 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 767 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 768 /* remove from pv_list */ 769 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 770 if (TAILQ_EMPTY(&m->md.pv_list)) 771 vm_page_aflag_clear(m, PGA_WRITEABLE); 772 773 /* free pv entry struct */ 774 pv_free(pve); 775 break; 776 } 777 } 778 779 //debugf("pv_remove: e\n"); 780} 781 782/* 783 * Clean pte entry, try to free page table page if requested. 784 * 785 * Return 1 if ptbl pages were freed, otherwise return 0. 786 */ 787static int 788pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 789{ 790 unsigned int pdir_idx = PDIR_IDX(va); 791 unsigned int ptbl_idx = PTBL_IDX(va); 792 vm_page_t m; 793 pte_t *ptbl; 794 pte_t *pte; 795 796 //int su = (pmap == kernel_pmap); 797 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 798 // su, (u_int32_t)pmap, va, flags); 799 800 ptbl = pmap->pm_pdir[pdir_idx]; 801 KASSERT(ptbl, ("pte_remove: null ptbl")); 802 803 pte = &ptbl[ptbl_idx]; 804 805 if (pte == NULL || !PTE_ISVALID(pte)) 806 return (0); 807 808 if (PTE_ISWIRED(pte)) 809 pmap->pm_stats.wired_count--; 810 811 /* Handle managed entry. */ 812 if (PTE_ISMANAGED(pte)) { 813 /* Get vm_page_t for mapped pte. */ 814 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 815 816 if (PTE_ISMODIFIED(pte)) 817 vm_page_dirty(m); 818 819 if (PTE_ISREFERENCED(pte)) 820 vm_page_aflag_set(m, PGA_REFERENCED); 821 822 pv_remove(pmap, va, m); 823 } 824 825 mtx_lock_spin(&tlbivax_mutex); 826 tlb_miss_lock(); 827 828 tlb0_flush_entry(va); 829 pte->flags = 0; 830 pte->rpn = 0; 831 832 tlb_miss_unlock(); 833 mtx_unlock_spin(&tlbivax_mutex); 834 835 pmap->pm_stats.resident_count--; 836 837 if (flags & PTBL_UNHOLD) { 838 //debugf("pte_remove: e (unhold)\n"); 839 return (ptbl_unhold(mmu, pmap, pdir_idx)); 840 } 841 842 //debugf("pte_remove: e\n"); 843 return (0); 844} 845 846/* 847 * Insert PTE for a given page and virtual address. 848 */ 849static void 850pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 851{ 852 unsigned int pdir_idx = PDIR_IDX(va); 853 unsigned int ptbl_idx = PTBL_IDX(va); 854 pte_t *ptbl, *pte; 855 856 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 857 pmap == kernel_pmap, pmap, va); 858 859 /* Get the page table pointer. */ 860 ptbl = pmap->pm_pdir[pdir_idx]; 861 862 if (ptbl == NULL) { 863 /* Allocate page table pages. */ 864 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 865 } else { 866 /* 867 * Check if there is valid mapping for requested 868 * va, if there is, remove it. 869 */ 870 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 871 if (PTE_ISVALID(pte)) { 872 pte_remove(mmu, pmap, va, PTBL_HOLD); 873 } else { 874 /* 875 * pte is not used, increment hold count 876 * for ptbl pages. 877 */ 878 if (pmap != kernel_pmap) 879 ptbl_hold(mmu, pmap, pdir_idx); 880 } 881 } 882 883 /* 884 * Insert pv_entry into pv_list for mapped page if part of managed 885 * memory. 886 */ 887 if ((m->oflags & VPO_UNMANAGED) == 0) { 888 flags |= PTE_MANAGED; 889 890 /* Create and insert pv entry. */ 891 pv_insert(pmap, va, m); 892 } 893 894 pmap->pm_stats.resident_count++; 895 896 mtx_lock_spin(&tlbivax_mutex); 897 tlb_miss_lock(); 898 899 tlb0_flush_entry(va); 900 if (pmap->pm_pdir[pdir_idx] == NULL) { 901 /* 902 * If we just allocated a new page table, hook it in 903 * the pdir. 904 */ 905 pmap->pm_pdir[pdir_idx] = ptbl; 906 } 907 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 908 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 909 pte->flags |= (PTE_VALID | flags); 910 911 tlb_miss_unlock(); 912 mtx_unlock_spin(&tlbivax_mutex); 913} 914 915/* Return the pa for the given pmap/va. */ 916static vm_paddr_t 917pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 918{ 919 vm_paddr_t pa = 0; 920 pte_t *pte; 921 922 pte = pte_find(mmu, pmap, va); 923 if ((pte != NULL) && PTE_ISVALID(pte)) 924 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 925 return (pa); 926} 927 928/* Get a pointer to a PTE in a page table. */ 929static pte_t * 930pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 931{ 932 unsigned int pdir_idx = PDIR_IDX(va); 933 unsigned int ptbl_idx = PTBL_IDX(va); 934 935 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 936 937 if (pmap->pm_pdir[pdir_idx]) 938 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 939 940 return (NULL); 941} 942 943/**************************************************************************/ 944/* PMAP related */ 945/**************************************************************************/ 946 947/* 948 * This is called during booke_init, before the system is really initialized. 949 */ 950static void 951mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 952{ 953 vm_offset_t phys_kernelend; 954 struct mem_region *mp, *mp1; 955 int cnt, i, j; 956 u_int s, e, sz; 957 u_int phys_avail_count; 958 vm_size_t physsz, hwphyssz, kstack0_sz; 959 vm_offset_t kernel_pdir, kstack0, va; 960 vm_paddr_t kstack0_phys; 961 void *dpcpu; 962 pte_t *pte; 963 964 debugf("mmu_booke_bootstrap: entered\n"); 965 966 /* Initialize invalidation mutex */ 967 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 968 969 /* Read TLB0 size and associativity. */ 970 tlb0_get_tlbconf(); 971 972 /* 973 * Align kernel start and end address (kernel image). 974 * Note that kernel end does not necessarily relate to kernsize. 975 * kernsize is the size of the kernel that is actually mapped. 976 * Also note that "start - 1" is deliberate. With SMP, the 977 * entry point is exactly a page from the actual load address. 978 * As such, trunc_page() has no effect and we're off by a page. 979 * Since we always have the ELF header between the load address 980 * and the entry point, we can safely subtract 1 to compensate. 981 */ 982 kernstart = trunc_page(start - 1); 983 data_start = round_page(kernelend); 984 data_end = data_start; 985 986 /* 987 * Addresses of preloaded modules (like file systems) use 988 * physical addresses. Make sure we relocate those into 989 * virtual addresses. 990 */ 991 preload_addr_relocate = kernstart - kernload; 992 993 /* Allocate the dynamic per-cpu area. */ 994 dpcpu = (void *)data_end; 995 data_end += DPCPU_SIZE; 996 997 /* Allocate space for the message buffer. */ 998 msgbufp = (struct msgbuf *)data_end; 999 data_end += msgbufsize; 1000 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1001 data_end); 1002 1003 data_end = round_page(data_end); 1004 1005 /* Allocate space for ptbl_bufs. */ 1006 ptbl_bufs = (struct ptbl_buf *)data_end; 1007 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1008 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1009 data_end); 1010 1011 data_end = round_page(data_end); 1012 1013 /* Allocate PTE tables for kernel KVA. */ 1014 kernel_pdir = data_end; 1015 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1016 PDIR_SIZE - 1) / PDIR_SIZE; 1017 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1018 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1019 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1020 1021 debugf(" data_end: 0x%08x\n", data_end); 1022 if (data_end - kernstart > kernsize) { 1023 kernsize += tlb1_mapin_region(kernstart + kernsize, 1024 kernload + kernsize, (data_end - kernstart) - kernsize); 1025 } 1026 data_end = kernstart + kernsize; 1027 debugf(" updated data_end: 0x%08x\n", data_end); 1028 1029 /* 1030 * Clear the structures - note we can only do it safely after the 1031 * possible additional TLB1 translations are in place (above) so that 1032 * all range up to the currently calculated 'data_end' is covered. 1033 */ 1034 dpcpu_init(dpcpu, 0); 1035 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1036 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1037 1038 /*******************************************************/ 1039 /* Set the start and end of kva. */ 1040 /*******************************************************/ 1041 virtual_avail = round_page(data_end); 1042 virtual_end = VM_MAX_KERNEL_ADDRESS; 1043 1044 /* Allocate KVA space for page zero/copy operations. */ 1045 zero_page_va = virtual_avail; 1046 virtual_avail += PAGE_SIZE; 1047 zero_page_idle_va = virtual_avail; 1048 virtual_avail += PAGE_SIZE; 1049 copy_page_src_va = virtual_avail; 1050 virtual_avail += PAGE_SIZE; 1051 copy_page_dst_va = virtual_avail; 1052 virtual_avail += PAGE_SIZE; 1053 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1054 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1055 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1056 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1057 1058 /* Initialize page zero/copy mutexes. */ 1059 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1060 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1061 1062 /* Allocate KVA space for ptbl bufs. */ 1063 ptbl_buf_pool_vabase = virtual_avail; 1064 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1065 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1066 ptbl_buf_pool_vabase, virtual_avail); 1067 1068 /* Calculate corresponding physical addresses for the kernel region. */ 1069 phys_kernelend = kernload + kernsize; 1070 debugf("kernel image and allocated data:\n"); 1071 debugf(" kernload = 0x%08x\n", kernload); 1072 debugf(" kernstart = 0x%08x\n", kernstart); 1073 debugf(" kernsize = 0x%08x\n", kernsize); 1074 1075 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1076 panic("mmu_booke_bootstrap: phys_avail too small"); 1077 1078 /* 1079 * Remove kernel physical address range from avail regions list. Page 1080 * align all regions. Non-page aligned memory isn't very interesting 1081 * to us. Also, sort the entries for ascending addresses. 1082 */ 1083 1084 /* Retrieve phys/avail mem regions */ 1085 mem_regions(&physmem_regions, &physmem_regions_sz, 1086 &availmem_regions, &availmem_regions_sz); 1087 sz = 0; 1088 cnt = availmem_regions_sz; 1089 debugf("processing avail regions:\n"); 1090 for (mp = availmem_regions; mp->mr_size; mp++) { 1091 s = mp->mr_start; 1092 e = mp->mr_start + mp->mr_size; 1093 debugf(" %08x-%08x -> ", s, e); 1094 /* Check whether this region holds all of the kernel. */ 1095 if (s < kernload && e > phys_kernelend) { 1096 availmem_regions[cnt].mr_start = phys_kernelend; 1097 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1098 e = kernload; 1099 } 1100 /* Look whether this regions starts within the kernel. */ 1101 if (s >= kernload && s < phys_kernelend) { 1102 if (e <= phys_kernelend) 1103 goto empty; 1104 s = phys_kernelend; 1105 } 1106 /* Now look whether this region ends within the kernel. */ 1107 if (e > kernload && e <= phys_kernelend) { 1108 if (s >= kernload) 1109 goto empty; 1110 e = kernload; 1111 } 1112 /* Now page align the start and size of the region. */ 1113 s = round_page(s); 1114 e = trunc_page(e); 1115 if (e < s) 1116 e = s; 1117 sz = e - s; 1118 debugf("%08x-%08x = %x\n", s, e, sz); 1119 1120 /* Check whether some memory is left here. */ 1121 if (sz == 0) { 1122 empty: 1123 memmove(mp, mp + 1, 1124 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1125 cnt--; 1126 mp--; 1127 continue; 1128 } 1129 1130 /* Do an insertion sort. */ 1131 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1132 if (s < mp1->mr_start) 1133 break; 1134 if (mp1 < mp) { 1135 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1136 mp1->mr_start = s; 1137 mp1->mr_size = sz; 1138 } else { 1139 mp->mr_start = s; 1140 mp->mr_size = sz; 1141 } 1142 } 1143 availmem_regions_sz = cnt; 1144 1145 /*******************************************************/ 1146 /* Steal physical memory for kernel stack from the end */ 1147 /* of the first avail region */ 1148 /*******************************************************/ 1149 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1150 kstack0_phys = availmem_regions[0].mr_start + 1151 availmem_regions[0].mr_size; 1152 kstack0_phys -= kstack0_sz; 1153 availmem_regions[0].mr_size -= kstack0_sz; 1154 1155 /*******************************************************/ 1156 /* Fill in phys_avail table, based on availmem_regions */ 1157 /*******************************************************/ 1158 phys_avail_count = 0; 1159 physsz = 0; 1160 hwphyssz = 0; 1161 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1162 1163 debugf("fill in phys_avail:\n"); 1164 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1165 1166 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1167 availmem_regions[i].mr_start, 1168 availmem_regions[i].mr_start + 1169 availmem_regions[i].mr_size, 1170 availmem_regions[i].mr_size); 1171 1172 if (hwphyssz != 0 && 1173 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1174 debugf(" hw.physmem adjust\n"); 1175 if (physsz < hwphyssz) { 1176 phys_avail[j] = availmem_regions[i].mr_start; 1177 phys_avail[j + 1] = 1178 availmem_regions[i].mr_start + 1179 hwphyssz - physsz; 1180 physsz = hwphyssz; 1181 phys_avail_count++; 1182 } 1183 break; 1184 } 1185 1186 phys_avail[j] = availmem_regions[i].mr_start; 1187 phys_avail[j + 1] = availmem_regions[i].mr_start + 1188 availmem_regions[i].mr_size; 1189 phys_avail_count++; 1190 physsz += availmem_regions[i].mr_size; 1191 } 1192 physmem = btoc(physsz); 1193 1194 /* Calculate the last available physical address. */ 1195 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1196 ; 1197 Maxmem = powerpc_btop(phys_avail[i + 1]); 1198 1199 debugf("Maxmem = 0x%08lx\n", Maxmem); 1200 debugf("phys_avail_count = %d\n", phys_avail_count); 1201 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1202 physmem); 1203 1204 /*******************************************************/ 1205 /* Initialize (statically allocated) kernel pmap. */ 1206 /*******************************************************/ 1207 PMAP_LOCK_INIT(kernel_pmap); 1208 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1209 1210 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1211 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1212 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1213 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1214 1215 /* Initialize kernel pdir */ 1216 for (i = 0; i < kernel_ptbls; i++) 1217 kernel_pmap->pm_pdir[kptbl_min + i] = 1218 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1219 1220 for (i = 0; i < MAXCPU; i++) { 1221 kernel_pmap->pm_tid[i] = TID_KERNEL; 1222 1223 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1224 tidbusy[i][0] = kernel_pmap; 1225 } 1226 1227 /* 1228 * Fill in PTEs covering kernel code and data. They are not required 1229 * for address translation, as this area is covered by static TLB1 1230 * entries, but for pte_vatopa() to work correctly with kernel area 1231 * addresses. 1232 */ 1233 for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1234 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1235 pte->rpn = kernload + (va - kernstart); 1236 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1237 PTE_VALID; 1238 } 1239 /* Mark kernel_pmap active on all CPUs */ 1240 CPU_FILL(&kernel_pmap->pm_active); 1241 1242 /* 1243 * Initialize the global pv list lock. 1244 */ 1245 rw_init(&pvh_global_lock, "pmap pv global"); 1246 1247 /*******************************************************/ 1248 /* Final setup */ 1249 /*******************************************************/ 1250 1251 /* Enter kstack0 into kernel map, provide guard page */ 1252 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1253 thread0.td_kstack = kstack0; 1254 thread0.td_kstack_pages = KSTACK_PAGES; 1255 1256 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1257 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1258 kstack0_phys, kstack0_phys + kstack0_sz); 1259 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1260 1261 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1262 for (i = 0; i < KSTACK_PAGES; i++) { 1263 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1264 kstack0 += PAGE_SIZE; 1265 kstack0_phys += PAGE_SIZE; 1266 } 1267 1268 debugf("virtual_avail = %08x\n", virtual_avail); 1269 debugf("virtual_end = %08x\n", virtual_end); 1270 1271 debugf("mmu_booke_bootstrap: exit\n"); 1272} 1273 1274void 1275pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1276{ 1277 int i; 1278 1279 /* 1280 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1281 * have the snapshot of its contents in the s/w tlb1[] table, so use 1282 * these values directly to (re)program AP's TLB1 hardware. 1283 */ 1284 for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1285 /* Skip invalid entries */ 1286 if (!(tlb1[i].mas1 & MAS1_VALID)) 1287 continue; 1288 1289 tlb1_write_entry(i); 1290 } 1291 1292 set_mas4_defaults(); 1293} 1294 1295/* 1296 * Get the physical page address for the given pmap/virtual address. 1297 */ 1298static vm_paddr_t 1299mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1300{ 1301 vm_paddr_t pa; 1302 1303 PMAP_LOCK(pmap); 1304 pa = pte_vatopa(mmu, pmap, va); 1305 PMAP_UNLOCK(pmap); 1306 1307 return (pa); 1308} 1309 1310/* 1311 * Extract the physical page address associated with the given 1312 * kernel virtual address. 1313 */ 1314static vm_paddr_t 1315mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1316{ 1317 1318 return (pte_vatopa(mmu, kernel_pmap, va)); 1319} 1320 1321/* 1322 * Initialize the pmap module. 1323 * Called by vm_init, to initialize any structures that the pmap 1324 * system needs to map virtual memory. 1325 */ 1326static void 1327mmu_booke_init(mmu_t mmu) 1328{ 1329 int shpgperproc = PMAP_SHPGPERPROC; 1330 1331 /* 1332 * Initialize the address space (zone) for the pv entries. Set a 1333 * high water mark so that the system can recover from excessive 1334 * numbers of pv entries. 1335 */ 1336 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1337 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1338 1339 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1340 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1341 1342 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1343 pv_entry_high_water = 9 * (pv_entry_max / 10); 1344 1345 uma_zone_reserve_kva(pvzone, pv_entry_max); 1346 1347 /* Pre-fill pvzone with initial number of pv entries. */ 1348 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1349 1350 /* Initialize ptbl allocation. */ 1351 ptbl_init(); 1352} 1353 1354/* 1355 * Map a list of wired pages into kernel virtual address space. This is 1356 * intended for temporary mappings which do not need page modification or 1357 * references recorded. Existing mappings in the region are overwritten. 1358 */ 1359static void 1360mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1361{ 1362 vm_offset_t va; 1363 1364 va = sva; 1365 while (count-- > 0) { 1366 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1367 va += PAGE_SIZE; 1368 m++; 1369 } 1370} 1371 1372/* 1373 * Remove page mappings from kernel virtual address space. Intended for 1374 * temporary mappings entered by mmu_booke_qenter. 1375 */ 1376static void 1377mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1378{ 1379 vm_offset_t va; 1380 1381 va = sva; 1382 while (count-- > 0) { 1383 mmu_booke_kremove(mmu, va); 1384 va += PAGE_SIZE; 1385 } 1386} 1387 1388/* 1389 * Map a wired page into kernel virtual address space. 1390 */ 1391static void 1392mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1393{ 1394 unsigned int pdir_idx = PDIR_IDX(va); 1395 unsigned int ptbl_idx = PTBL_IDX(va); 1396 uint32_t flags; 1397 pte_t *pte; 1398 1399 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1400 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1401 1402 flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1403 1404 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1405 1406 mtx_lock_spin(&tlbivax_mutex); 1407 tlb_miss_lock(); 1408 1409 if (PTE_ISVALID(pte)) { 1410 1411 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1412 1413 /* Flush entry from TLB0 */ 1414 tlb0_flush_entry(va); 1415 } 1416 1417 pte->rpn = pa & ~PTE_PA_MASK; 1418 pte->flags = flags; 1419 1420 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1421 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1422 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1423 1424 /* Flush the real memory from the instruction cache. */ 1425 if ((flags & (PTE_I | PTE_G)) == 0) { 1426 __syncicache((void *)va, PAGE_SIZE); 1427 } 1428 1429 tlb_miss_unlock(); 1430 mtx_unlock_spin(&tlbivax_mutex); 1431} 1432 1433/* 1434 * Remove a page from kernel page table. 1435 */ 1436static void 1437mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1438{ 1439 unsigned int pdir_idx = PDIR_IDX(va); 1440 unsigned int ptbl_idx = PTBL_IDX(va); 1441 pte_t *pte; 1442 1443// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1444 1445 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1446 (va <= VM_MAX_KERNEL_ADDRESS)), 1447 ("mmu_booke_kremove: invalid va")); 1448 1449 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1450 1451 if (!PTE_ISVALID(pte)) { 1452 1453 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1454 1455 return; 1456 } 1457 1458 mtx_lock_spin(&tlbivax_mutex); 1459 tlb_miss_lock(); 1460 1461 /* Invalidate entry in TLB0, update PTE. */ 1462 tlb0_flush_entry(va); 1463 pte->flags = 0; 1464 pte->rpn = 0; 1465 1466 tlb_miss_unlock(); 1467 mtx_unlock_spin(&tlbivax_mutex); 1468} 1469 1470/* 1471 * Initialize pmap associated with process 0. 1472 */ 1473static void 1474mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1475{ 1476 1477 mmu_booke_pinit(mmu, pmap); 1478 PCPU_SET(curpmap, pmap); 1479} 1480 1481/* 1482 * Initialize a preallocated and zeroed pmap structure, 1483 * such as one in a vmspace structure. 1484 */ 1485static void 1486mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1487{ 1488 int i; 1489 1490 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1491 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1492 1493 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1494 1495 PMAP_LOCK_INIT(pmap); 1496 for (i = 0; i < MAXCPU; i++) 1497 pmap->pm_tid[i] = TID_NONE; 1498 CPU_ZERO(&kernel_pmap->pm_active); 1499 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1500 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1501 TAILQ_INIT(&pmap->pm_ptbl_list); 1502} 1503 1504/* 1505 * Release any resources held by the given physical map. 1506 * Called when a pmap initialized by mmu_booke_pinit is being released. 1507 * Should only be called if the map contains no valid mappings. 1508 */ 1509static void 1510mmu_booke_release(mmu_t mmu, pmap_t pmap) 1511{ 1512 1513 KASSERT(pmap->pm_stats.resident_count == 0, 1514 ("pmap_release: pmap resident count %ld != 0", 1515 pmap->pm_stats.resident_count)); 1516 1517 PMAP_LOCK_DESTROY(pmap); 1518} 1519 1520/* 1521 * Insert the given physical page at the specified virtual address in the 1522 * target physical map with the protection requested. If specified the page 1523 * will be wired down. 1524 */ 1525static void 1526mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1527 vm_prot_t prot, boolean_t wired) 1528{ 1529 1530 rw_wlock(&pvh_global_lock); 1531 PMAP_LOCK(pmap); 1532 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1533 rw_wunlock(&pvh_global_lock); 1534 PMAP_UNLOCK(pmap); 1535} 1536 1537static void 1538mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1539 vm_prot_t prot, boolean_t wired) 1540{ 1541 pte_t *pte; 1542 vm_paddr_t pa; 1543 uint32_t flags; 1544 int su, sync; 1545 1546 pa = VM_PAGE_TO_PHYS(m); 1547 su = (pmap == kernel_pmap); 1548 sync = 0; 1549 1550 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1551 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1552 // (u_int32_t)pmap, su, pmap->pm_tid, 1553 // (u_int32_t)m, va, pa, prot, wired); 1554 1555 if (su) { 1556 KASSERT(((va >= virtual_avail) && 1557 (va <= VM_MAX_KERNEL_ADDRESS)), 1558 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1559 } else { 1560 KASSERT((va <= VM_MAXUSER_ADDRESS), 1561 ("mmu_booke_enter_locked: user pmap, non user va")); 1562 } 1563 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1564 VM_OBJECT_LOCKED(m->object), 1565 ("mmu_booke_enter_locked: page %p is not busy", m)); 1566 1567 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1568 1569 /* 1570 * If there is an existing mapping, and the physical address has not 1571 * changed, must be protection or wiring change. 1572 */ 1573 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1574 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1575 1576 /* 1577 * Before actually updating pte->flags we calculate and 1578 * prepare its new value in a helper var. 1579 */ 1580 flags = pte->flags; 1581 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1582 1583 /* Wiring change, just update stats. */ 1584 if (wired) { 1585 if (!PTE_ISWIRED(pte)) { 1586 flags |= PTE_WIRED; 1587 pmap->pm_stats.wired_count++; 1588 } 1589 } else { 1590 if (PTE_ISWIRED(pte)) { 1591 flags &= ~PTE_WIRED; 1592 pmap->pm_stats.wired_count--; 1593 } 1594 } 1595 1596 if (prot & VM_PROT_WRITE) { 1597 /* Add write permissions. */ 1598 flags |= PTE_SW; 1599 if (!su) 1600 flags |= PTE_UW; 1601 1602 if ((flags & PTE_MANAGED) != 0) 1603 vm_page_aflag_set(m, PGA_WRITEABLE); 1604 } else { 1605 /* Handle modified pages, sense modify status. */ 1606 1607 /* 1608 * The PTE_MODIFIED flag could be set by underlying 1609 * TLB misses since we last read it (above), possibly 1610 * other CPUs could update it so we check in the PTE 1611 * directly rather than rely on that saved local flags 1612 * copy. 1613 */ 1614 if (PTE_ISMODIFIED(pte)) 1615 vm_page_dirty(m); 1616 } 1617 1618 if (prot & VM_PROT_EXECUTE) { 1619 flags |= PTE_SX; 1620 if (!su) 1621 flags |= PTE_UX; 1622 1623 /* 1624 * Check existing flags for execute permissions: if we 1625 * are turning execute permissions on, icache should 1626 * be flushed. 1627 */ 1628 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1629 sync++; 1630 } 1631 1632 flags &= ~PTE_REFERENCED; 1633 1634 /* 1635 * The new flags value is all calculated -- only now actually 1636 * update the PTE. 1637 */ 1638 mtx_lock_spin(&tlbivax_mutex); 1639 tlb_miss_lock(); 1640 1641 tlb0_flush_entry(va); 1642 pte->flags = flags; 1643 1644 tlb_miss_unlock(); 1645 mtx_unlock_spin(&tlbivax_mutex); 1646 1647 } else { 1648 /* 1649 * If there is an existing mapping, but it's for a different 1650 * physical address, pte_enter() will delete the old mapping. 1651 */ 1652 //if ((pte != NULL) && PTE_ISVALID(pte)) 1653 // debugf("mmu_booke_enter_locked: replace\n"); 1654 //else 1655 // debugf("mmu_booke_enter_locked: new\n"); 1656 1657 /* Now set up the flags and install the new mapping. */ 1658 flags = (PTE_SR | PTE_VALID); 1659 flags |= PTE_M; 1660 1661 if (!su) 1662 flags |= PTE_UR; 1663 1664 if (prot & VM_PROT_WRITE) { 1665 flags |= PTE_SW; 1666 if (!su) 1667 flags |= PTE_UW; 1668 1669 if ((m->oflags & VPO_UNMANAGED) == 0) 1670 vm_page_aflag_set(m, PGA_WRITEABLE); 1671 } 1672 1673 if (prot & VM_PROT_EXECUTE) { 1674 flags |= PTE_SX; 1675 if (!su) 1676 flags |= PTE_UX; 1677 } 1678 1679 /* If its wired update stats. */ 1680 if (wired) { 1681 pmap->pm_stats.wired_count++; 1682 flags |= PTE_WIRED; 1683 } 1684 1685 pte_enter(mmu, pmap, m, va, flags); 1686 1687 /* Flush the real memory from the instruction cache. */ 1688 if (prot & VM_PROT_EXECUTE) 1689 sync++; 1690 } 1691 1692 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1693 __syncicache((void *)va, PAGE_SIZE); 1694 sync = 0; 1695 } 1696} 1697 1698/* 1699 * Maps a sequence of resident pages belonging to the same object. 1700 * The sequence begins with the given page m_start. This page is 1701 * mapped at the given virtual address start. Each subsequent page is 1702 * mapped at a virtual address that is offset from start by the same 1703 * amount as the page is offset from m_start within the object. The 1704 * last page in the sequence is the page with the largest offset from 1705 * m_start that can be mapped at a virtual address less than the given 1706 * virtual address end. Not every virtual page between start and end 1707 * is mapped; only those for which a resident page exists with the 1708 * corresponding offset from m_start are mapped. 1709 */ 1710static void 1711mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1712 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1713{ 1714 vm_page_t m; 1715 vm_pindex_t diff, psize; 1716 1717 psize = atop(end - start); 1718 m = m_start; 1719 rw_wlock(&pvh_global_lock); 1720 PMAP_LOCK(pmap); 1721 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1722 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1723 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1724 m = TAILQ_NEXT(m, listq); 1725 } 1726 rw_wunlock(&pvh_global_lock); 1727 PMAP_UNLOCK(pmap); 1728} 1729 1730static void 1731mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1732 vm_prot_t prot) 1733{ 1734 1735 rw_wlock(&pvh_global_lock); 1736 PMAP_LOCK(pmap); 1737 mmu_booke_enter_locked(mmu, pmap, va, m, 1738 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1739 rw_wunlock(&pvh_global_lock); 1740 PMAP_UNLOCK(pmap); 1741} 1742 1743/* 1744 * Remove the given range of addresses from the specified map. 1745 * 1746 * It is assumed that the start and end are properly rounded to the page size. 1747 */ 1748static void 1749mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1750{ 1751 pte_t *pte; 1752 uint8_t hold_flag; 1753 1754 int su = (pmap == kernel_pmap); 1755 1756 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1757 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1758 1759 if (su) { 1760 KASSERT(((va >= virtual_avail) && 1761 (va <= VM_MAX_KERNEL_ADDRESS)), 1762 ("mmu_booke_remove: kernel pmap, non kernel va")); 1763 } else { 1764 KASSERT((va <= VM_MAXUSER_ADDRESS), 1765 ("mmu_booke_remove: user pmap, non user va")); 1766 } 1767 1768 if (PMAP_REMOVE_DONE(pmap)) { 1769 //debugf("mmu_booke_remove: e (empty)\n"); 1770 return; 1771 } 1772 1773 hold_flag = PTBL_HOLD_FLAG(pmap); 1774 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1775 1776 rw_wlock(&pvh_global_lock); 1777 PMAP_LOCK(pmap); 1778 for (; va < endva; va += PAGE_SIZE) { 1779 pte = pte_find(mmu, pmap, va); 1780 if ((pte != NULL) && PTE_ISVALID(pte)) 1781 pte_remove(mmu, pmap, va, hold_flag); 1782 } 1783 PMAP_UNLOCK(pmap); 1784 rw_wunlock(&pvh_global_lock); 1785 1786 //debugf("mmu_booke_remove: e\n"); 1787} 1788 1789/* 1790 * Remove physical page from all pmaps in which it resides. 1791 */ 1792static void 1793mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1794{ 1795 pv_entry_t pv, pvn; 1796 uint8_t hold_flag; 1797 1798 rw_wlock(&pvh_global_lock); 1799 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1800 pvn = TAILQ_NEXT(pv, pv_link); 1801 1802 PMAP_LOCK(pv->pv_pmap); 1803 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1804 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1805 PMAP_UNLOCK(pv->pv_pmap); 1806 } 1807 vm_page_aflag_clear(m, PGA_WRITEABLE); 1808 rw_wunlock(&pvh_global_lock); 1809} 1810 1811/* 1812 * Map a range of physical addresses into kernel virtual address space. 1813 */ 1814static vm_offset_t 1815mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1816 vm_paddr_t pa_end, int prot) 1817{ 1818 vm_offset_t sva = *virt; 1819 vm_offset_t va = sva; 1820 1821 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1822 // sva, pa_start, pa_end); 1823 1824 while (pa_start < pa_end) { 1825 mmu_booke_kenter(mmu, va, pa_start); 1826 va += PAGE_SIZE; 1827 pa_start += PAGE_SIZE; 1828 } 1829 *virt = va; 1830 1831 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1832 return (sva); 1833} 1834 1835/* 1836 * The pmap must be activated before it's address space can be accessed in any 1837 * way. 1838 */ 1839static void 1840mmu_booke_activate(mmu_t mmu, struct thread *td) 1841{ 1842 pmap_t pmap; 1843 u_int cpuid; 1844 1845 pmap = &td->td_proc->p_vmspace->vm_pmap; 1846 1847 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1848 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1849 1850 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1851 1852 mtx_lock_spin(&sched_lock); 1853 1854 cpuid = PCPU_GET(cpuid); 1855 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1856 PCPU_SET(curpmap, pmap); 1857 1858 if (pmap->pm_tid[cpuid] == TID_NONE) 1859 tid_alloc(pmap); 1860 1861 /* Load PID0 register with pmap tid value. */ 1862 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1863 __asm __volatile("isync"); 1864 1865 mtx_unlock_spin(&sched_lock); 1866 1867 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1868 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1869} 1870 1871/* 1872 * Deactivate the specified process's address space. 1873 */ 1874static void 1875mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1876{ 1877 pmap_t pmap; 1878 1879 pmap = &td->td_proc->p_vmspace->vm_pmap; 1880 1881 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1882 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1883 1884 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1885 PCPU_SET(curpmap, NULL); 1886} 1887 1888/* 1889 * Copy the range specified by src_addr/len 1890 * from the source map to the range dst_addr/len 1891 * in the destination map. 1892 * 1893 * This routine is only advisory and need not do anything. 1894 */ 1895static void 1896mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1897 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1898{ 1899 1900} 1901 1902/* 1903 * Set the physical protection on the specified range of this map as requested. 1904 */ 1905static void 1906mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1907 vm_prot_t prot) 1908{ 1909 vm_offset_t va; 1910 vm_page_t m; 1911 pte_t *pte; 1912 1913 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1914 mmu_booke_remove(mmu, pmap, sva, eva); 1915 return; 1916 } 1917 1918 if (prot & VM_PROT_WRITE) 1919 return; 1920 1921 PMAP_LOCK(pmap); 1922 for (va = sva; va < eva; va += PAGE_SIZE) { 1923 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1924 if (PTE_ISVALID(pte)) { 1925 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1926 1927 mtx_lock_spin(&tlbivax_mutex); 1928 tlb_miss_lock(); 1929 1930 /* Handle modified pages. */ 1931 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1932 vm_page_dirty(m); 1933 1934 tlb0_flush_entry(va); 1935 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1936 1937 tlb_miss_unlock(); 1938 mtx_unlock_spin(&tlbivax_mutex); 1939 } 1940 } 1941 } 1942 PMAP_UNLOCK(pmap); 1943} 1944 1945/* 1946 * Clear the write and modified bits in each of the given page's mappings. 1947 */ 1948static void 1949mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1950{ 1951 pv_entry_t pv; 1952 pte_t *pte; 1953 1954 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1955 ("mmu_booke_remove_write: page %p is not managed", m)); 1956 1957 /* 1958 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1959 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1960 * is clear, no page table entries need updating. 1961 */ 1962 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1963 if ((m->oflags & VPO_BUSY) == 0 && 1964 (m->aflags & PGA_WRITEABLE) == 0) 1965 return; 1966 rw_wlock(&pvh_global_lock); 1967 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1968 PMAP_LOCK(pv->pv_pmap); 1969 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1970 if (PTE_ISVALID(pte)) { 1971 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1972 1973 mtx_lock_spin(&tlbivax_mutex); 1974 tlb_miss_lock(); 1975 1976 /* Handle modified pages. */ 1977 if (PTE_ISMODIFIED(pte)) 1978 vm_page_dirty(m); 1979 1980 /* Flush mapping from TLB0. */ 1981 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1982 1983 tlb_miss_unlock(); 1984 mtx_unlock_spin(&tlbivax_mutex); 1985 } 1986 } 1987 PMAP_UNLOCK(pv->pv_pmap); 1988 } 1989 vm_page_aflag_clear(m, PGA_WRITEABLE); 1990 rw_wunlock(&pvh_global_lock); 1991} 1992 1993static void 1994mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1995{ 1996 pte_t *pte; 1997 pmap_t pmap; 1998 vm_page_t m; 1999 vm_offset_t addr; 2000 vm_paddr_t pa; 2001 int active, valid; 2002 2003 va = trunc_page(va); 2004 sz = round_page(sz); 2005 2006 rw_wlock(&pvh_global_lock); 2007 pmap = PCPU_GET(curpmap); 2008 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2009 while (sz > 0) { 2010 PMAP_LOCK(pm); 2011 pte = pte_find(mmu, pm, va); 2012 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2013 if (valid) 2014 pa = PTE_PA(pte); 2015 PMAP_UNLOCK(pm); 2016 if (valid) { 2017 if (!active) { 2018 /* Create a mapping in the active pmap. */ 2019 addr = 0; 2020 m = PHYS_TO_VM_PAGE(pa); 2021 PMAP_LOCK(pmap); 2022 pte_enter(mmu, pmap, m, addr, 2023 PTE_SR | PTE_VALID | PTE_UR); 2024 __syncicache((void *)addr, PAGE_SIZE); 2025 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2026 PMAP_UNLOCK(pmap); 2027 } else 2028 __syncicache((void *)va, PAGE_SIZE); 2029 } 2030 va += PAGE_SIZE; 2031 sz -= PAGE_SIZE; 2032 } 2033 rw_wunlock(&pvh_global_lock); 2034} 2035 2036/* 2037 * Atomically extract and hold the physical page with the given 2038 * pmap and virtual address pair if that mapping permits the given 2039 * protection. 2040 */ 2041static vm_page_t 2042mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2043 vm_prot_t prot) 2044{ 2045 pte_t *pte; 2046 vm_page_t m; 2047 uint32_t pte_wbit; 2048 vm_paddr_t pa; 2049 2050 m = NULL; 2051 pa = 0; 2052 PMAP_LOCK(pmap); 2053retry: 2054 pte = pte_find(mmu, pmap, va); 2055 if ((pte != NULL) && PTE_ISVALID(pte)) { 2056 if (pmap == kernel_pmap) 2057 pte_wbit = PTE_SW; 2058 else 2059 pte_wbit = PTE_UW; 2060 2061 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2062 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2063 goto retry; 2064 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2065 vm_page_hold(m); 2066 } 2067 } 2068 2069 PA_UNLOCK_COND(pa); 2070 PMAP_UNLOCK(pmap); 2071 return (m); 2072} 2073 2074/* 2075 * Initialize a vm_page's machine-dependent fields. 2076 */ 2077static void 2078mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2079{ 2080 2081 TAILQ_INIT(&m->md.pv_list); 2082} 2083 2084/* 2085 * mmu_booke_zero_page_area zeros the specified hardware page by 2086 * mapping it into virtual memory and using bzero to clear 2087 * its contents. 2088 * 2089 * off and size must reside within a single page. 2090 */ 2091static void 2092mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2093{ 2094 vm_offset_t va; 2095 2096 /* XXX KASSERT off and size are within a single page? */ 2097 2098 mtx_lock(&zero_page_mutex); 2099 va = zero_page_va; 2100 2101 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2102 bzero((caddr_t)va + off, size); 2103 mmu_booke_kremove(mmu, va); 2104 2105 mtx_unlock(&zero_page_mutex); 2106} 2107 2108/* 2109 * mmu_booke_zero_page zeros the specified hardware page. 2110 */ 2111static void 2112mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2113{ 2114 2115 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2116} 2117 2118/* 2119 * mmu_booke_copy_page copies the specified (machine independent) page by 2120 * mapping the page into virtual memory and using memcopy to copy the page, 2121 * one machine dependent page at a time. 2122 */ 2123static void 2124mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2125{ 2126 vm_offset_t sva, dva; 2127 2128 sva = copy_page_src_va; 2129 dva = copy_page_dst_va; 2130 2131 mtx_lock(©_page_mutex); 2132 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2133 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2134 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2135 mmu_booke_kremove(mmu, dva); 2136 mmu_booke_kremove(mmu, sva); 2137 mtx_unlock(©_page_mutex); 2138} 2139 2140/* 2141 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2142 * into virtual memory and using bzero to clear its contents. This is intended 2143 * to be called from the vm_pagezero process only and outside of Giant. No 2144 * lock is required. 2145 */ 2146static void 2147mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2148{ 2149 vm_offset_t va; 2150 2151 va = zero_page_idle_va; 2152 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2153 bzero((caddr_t)va, PAGE_SIZE); 2154 mmu_booke_kremove(mmu, va); 2155} 2156 2157/* 2158 * Return whether or not the specified physical page was modified 2159 * in any of physical maps. 2160 */ 2161static boolean_t 2162mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2163{ 2164 pte_t *pte; 2165 pv_entry_t pv; 2166 boolean_t rv; 2167 2168 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2169 ("mmu_booke_is_modified: page %p is not managed", m)); 2170 rv = FALSE; 2171 2172 /* 2173 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2174 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2175 * is clear, no PTEs can be modified. 2176 */ 2177 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2178 if ((m->oflags & VPO_BUSY) == 0 && 2179 (m->aflags & PGA_WRITEABLE) == 0) 2180 return (rv); 2181 rw_wlock(&pvh_global_lock); 2182 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2183 PMAP_LOCK(pv->pv_pmap); 2184 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2185 PTE_ISVALID(pte)) { 2186 if (PTE_ISMODIFIED(pte)) 2187 rv = TRUE; 2188 } 2189 PMAP_UNLOCK(pv->pv_pmap); 2190 if (rv) 2191 break; 2192 } 2193 rw_wunlock(&pvh_global_lock); 2194 return (rv); 2195} 2196 2197/* 2198 * Return whether or not the specified virtual address is eligible 2199 * for prefault. 2200 */ 2201static boolean_t 2202mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2203{ 2204 2205 return (FALSE); 2206} 2207 2208/* 2209 * Return whether or not the specified physical page was referenced 2210 * in any physical maps. 2211 */ 2212static boolean_t 2213mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2214{ 2215 pte_t *pte; 2216 pv_entry_t pv; 2217 boolean_t rv; 2218 2219 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2220 ("mmu_booke_is_referenced: page %p is not managed", m)); 2221 rv = FALSE; 2222 rw_wlock(&pvh_global_lock); 2223 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2224 PMAP_LOCK(pv->pv_pmap); 2225 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2226 PTE_ISVALID(pte)) { 2227 if (PTE_ISREFERENCED(pte)) 2228 rv = TRUE; 2229 } 2230 PMAP_UNLOCK(pv->pv_pmap); 2231 if (rv) 2232 break; 2233 } 2234 rw_wunlock(&pvh_global_lock); 2235 return (rv); 2236} 2237 2238/* 2239 * Clear the modify bits on the specified physical page. 2240 */ 2241static void 2242mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2243{ 2244 pte_t *pte; 2245 pv_entry_t pv; 2246 2247 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2248 ("mmu_booke_clear_modify: page %p is not managed", m)); 2249 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2250 KASSERT((m->oflags & VPO_BUSY) == 0, 2251 ("mmu_booke_clear_modify: page %p is busy", m)); 2252 2253 /* 2254 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2255 * If the object containing the page is locked and the page is not 2256 * VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set. 2257 */ 2258 if ((m->aflags & PGA_WRITEABLE) == 0) 2259 return; 2260 rw_wlock(&pvh_global_lock); 2261 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2262 PMAP_LOCK(pv->pv_pmap); 2263 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2264 PTE_ISVALID(pte)) { 2265 mtx_lock_spin(&tlbivax_mutex); 2266 tlb_miss_lock(); 2267 2268 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2269 tlb0_flush_entry(pv->pv_va); 2270 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2271 PTE_REFERENCED); 2272 } 2273 2274 tlb_miss_unlock(); 2275 mtx_unlock_spin(&tlbivax_mutex); 2276 } 2277 PMAP_UNLOCK(pv->pv_pmap); 2278 } 2279 rw_wunlock(&pvh_global_lock); 2280} 2281 2282/* 2283 * Return a count of reference bits for a page, clearing those bits. 2284 * It is not necessary for every reference bit to be cleared, but it 2285 * is necessary that 0 only be returned when there are truly no 2286 * reference bits set. 2287 * 2288 * XXX: The exact number of bits to check and clear is a matter that 2289 * should be tested and standardized at some point in the future for 2290 * optimal aging of shared pages. 2291 */ 2292static int 2293mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2294{ 2295 pte_t *pte; 2296 pv_entry_t pv; 2297 int count; 2298 2299 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2300 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2301 count = 0; 2302 rw_wlock(&pvh_global_lock); 2303 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2304 PMAP_LOCK(pv->pv_pmap); 2305 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2306 PTE_ISVALID(pte)) { 2307 if (PTE_ISREFERENCED(pte)) { 2308 mtx_lock_spin(&tlbivax_mutex); 2309 tlb_miss_lock(); 2310 2311 tlb0_flush_entry(pv->pv_va); 2312 pte->flags &= ~PTE_REFERENCED; 2313 2314 tlb_miss_unlock(); 2315 mtx_unlock_spin(&tlbivax_mutex); 2316 2317 if (++count > 4) { 2318 PMAP_UNLOCK(pv->pv_pmap); 2319 break; 2320 } 2321 } 2322 } 2323 PMAP_UNLOCK(pv->pv_pmap); 2324 } 2325 rw_wunlock(&pvh_global_lock); 2326 return (count); 2327} 2328 2329/* 2330 * Clear the reference bit on the specified physical page. 2331 */ 2332static void 2333mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2334{ 2335 pte_t *pte; 2336 pv_entry_t pv; 2337 2338 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2339 ("mmu_booke_clear_reference: page %p is not managed", m)); 2340 rw_wlock(&pvh_global_lock); 2341 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2342 PMAP_LOCK(pv->pv_pmap); 2343 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2344 PTE_ISVALID(pte)) { 2345 if (PTE_ISREFERENCED(pte)) { 2346 mtx_lock_spin(&tlbivax_mutex); 2347 tlb_miss_lock(); 2348 2349 tlb0_flush_entry(pv->pv_va); 2350 pte->flags &= ~PTE_REFERENCED; 2351 2352 tlb_miss_unlock(); 2353 mtx_unlock_spin(&tlbivax_mutex); 2354 } 2355 } 2356 PMAP_UNLOCK(pv->pv_pmap); 2357 } 2358 rw_wunlock(&pvh_global_lock); 2359} 2360 2361/* 2362 * Change wiring attribute for a map/virtual-address pair. 2363 */ 2364static void 2365mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2366{ 2367 pte_t *pte; 2368 2369 PMAP_LOCK(pmap); 2370 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2371 if (wired) { 2372 if (!PTE_ISWIRED(pte)) { 2373 pte->flags |= PTE_WIRED; 2374 pmap->pm_stats.wired_count++; 2375 } 2376 } else { 2377 if (PTE_ISWIRED(pte)) { 2378 pte->flags &= ~PTE_WIRED; 2379 pmap->pm_stats.wired_count--; 2380 } 2381 } 2382 } 2383 PMAP_UNLOCK(pmap); 2384} 2385 2386/* 2387 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2388 * page. This count may be changed upwards or downwards in the future; it is 2389 * only necessary that true be returned for a small subset of pmaps for proper 2390 * page aging. 2391 */ 2392static boolean_t 2393mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2394{ 2395 pv_entry_t pv; 2396 int loops; 2397 boolean_t rv; 2398 2399 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2400 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2401 loops = 0; 2402 rv = FALSE; 2403 rw_wlock(&pvh_global_lock); 2404 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2405 if (pv->pv_pmap == pmap) { 2406 rv = TRUE; 2407 break; 2408 } 2409 if (++loops >= 16) 2410 break; 2411 } 2412 rw_wunlock(&pvh_global_lock); 2413 return (rv); 2414} 2415 2416/* 2417 * Return the number of managed mappings to the given physical page that are 2418 * wired. 2419 */ 2420static int 2421mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2422{ 2423 pv_entry_t pv; 2424 pte_t *pte; 2425 int count = 0; 2426 2427 if ((m->oflags & VPO_UNMANAGED) != 0) 2428 return (count); 2429 rw_wlock(&pvh_global_lock); 2430 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2431 PMAP_LOCK(pv->pv_pmap); 2432 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2433 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2434 count++; 2435 PMAP_UNLOCK(pv->pv_pmap); 2436 } 2437 rw_wunlock(&pvh_global_lock); 2438 return (count); 2439} 2440 2441static int 2442mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2443{ 2444 int i; 2445 vm_offset_t va; 2446 2447 /* 2448 * This currently does not work for entries that 2449 * overlap TLB1 entries. 2450 */ 2451 for (i = 0; i < tlb1_idx; i ++) { 2452 if (tlb1_iomapped(i, pa, size, &va) == 0) 2453 return (0); 2454 } 2455 2456 return (EFAULT); 2457} 2458 2459vm_offset_t 2460mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2461 vm_size_t *sz) 2462{ 2463 vm_paddr_t pa, ppa; 2464 vm_offset_t va; 2465 vm_size_t gran; 2466 2467 /* Raw physical memory dumps don't have a virtual address. */ 2468 if (md->md_vaddr == ~0UL) { 2469 /* We always map a 256MB page at 256M. */ 2470 gran = 256 * 1024 * 1024; 2471 pa = md->md_paddr + ofs; 2472 ppa = pa & ~(gran - 1); 2473 ofs = pa - ppa; 2474 va = gran; 2475 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2476 if (*sz > (gran - ofs)) 2477 *sz = gran - ofs; 2478 return (va + ofs); 2479 } 2480 2481 /* Minidumps are based on virtual memory addresses. */ 2482 va = md->md_vaddr + ofs; 2483 if (va >= kernstart + kernsize) { 2484 gran = PAGE_SIZE - (va & PAGE_MASK); 2485 if (*sz > gran) 2486 *sz = gran; 2487 } 2488 return (va); 2489} 2490 2491void 2492mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2493 vm_offset_t va) 2494{ 2495 2496 /* Raw physical memory dumps don't have a virtual address. */ 2497 if (md->md_vaddr == ~0UL) { 2498 tlb1_idx--; 2499 tlb1[tlb1_idx].mas1 = 0; 2500 tlb1[tlb1_idx].mas2 = 0; 2501 tlb1[tlb1_idx].mas3 = 0; 2502 tlb1_write_entry(tlb1_idx); 2503 return; 2504 } 2505 2506 /* Minidumps are based on virtual memory addresses. */ 2507 /* Nothing to do... */ 2508} 2509 2510struct pmap_md * 2511mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2512{ 2513 static struct pmap_md md; 2514 pte_t *pte; 2515 vm_offset_t va; 2516 2517 if (dumpsys_minidump) { 2518 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2519 if (prev == NULL) { 2520 /* 1st: kernel .data and .bss. */ 2521 md.md_index = 1; 2522 md.md_vaddr = trunc_page((uintptr_t)_etext); 2523 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2524 return (&md); 2525 } 2526 switch (prev->md_index) { 2527 case 1: 2528 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2529 md.md_index = 2; 2530 md.md_vaddr = data_start; 2531 md.md_size = data_end - data_start; 2532 break; 2533 case 2: 2534 /* 3rd: kernel VM. */ 2535 va = prev->md_vaddr + prev->md_size; 2536 /* Find start of next chunk (from va). */ 2537 while (va < virtual_end) { 2538 /* Don't dump the buffer cache. */ 2539 if (va >= kmi.buffer_sva && 2540 va < kmi.buffer_eva) { 2541 va = kmi.buffer_eva; 2542 continue; 2543 } 2544 pte = pte_find(mmu, kernel_pmap, va); 2545 if (pte != NULL && PTE_ISVALID(pte)) 2546 break; 2547 va += PAGE_SIZE; 2548 } 2549 if (va < virtual_end) { 2550 md.md_vaddr = va; 2551 va += PAGE_SIZE; 2552 /* Find last page in chunk. */ 2553 while (va < virtual_end) { 2554 /* Don't run into the buffer cache. */ 2555 if (va == kmi.buffer_sva) 2556 break; 2557 pte = pte_find(mmu, kernel_pmap, va); 2558 if (pte == NULL || !PTE_ISVALID(pte)) 2559 break; 2560 va += PAGE_SIZE; 2561 } 2562 md.md_size = va - md.md_vaddr; 2563 break; 2564 } 2565 md.md_index = 3; 2566 /* FALLTHROUGH */ 2567 default: 2568 return (NULL); 2569 } 2570 } else { /* minidumps */ 2571 mem_regions(&physmem_regions, &physmem_regions_sz, 2572 &availmem_regions, &availmem_regions_sz); 2573 2574 if (prev == NULL) { 2575 /* first physical chunk. */ 2576 md.md_paddr = physmem_regions[0].mr_start; 2577 md.md_size = physmem_regions[0].mr_size; 2578 md.md_vaddr = ~0UL; 2579 md.md_index = 1; 2580 } else if (md.md_index < physmem_regions_sz) { 2581 md.md_paddr = physmem_regions[md.md_index].mr_start; 2582 md.md_size = physmem_regions[md.md_index].mr_size; 2583 md.md_vaddr = ~0UL; 2584 md.md_index++; 2585 } else { 2586 /* There's no next physical chunk. */ 2587 return (NULL); 2588 } 2589 } 2590 2591 return (&md); 2592} 2593 2594/* 2595 * Map a set of physical memory pages into the kernel virtual address space. 2596 * Return a pointer to where it is mapped. This routine is intended to be used 2597 * for mapping device memory, NOT real memory. 2598 */ 2599static void * 2600mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2601{ 2602 void *res; 2603 uintptr_t va; 2604 vm_size_t sz; 2605 2606 /* 2607 * CCSR is premapped. Note that (pa + size - 1) is there to make sure 2608 * we don't wrap around. Devices on the local bus typically extend all 2609 * the way up to and including 0xffffffff. In that case (pa + size) 2610 * would be 0. This creates a false positive (i.e. we think it's 2611 * within the CCSR) and not create a mapping. 2612 */ 2613 if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) { 2614 va = CCSRBAR_VA + (pa - ccsrbar_pa); 2615 return ((void *)va); 2616 } 2617 2618 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2619 res = (void *)va; 2620 2621 do { 2622 sz = 1 << (ilog2(size) & ~1); 2623 if (bootverbose) 2624 printf("Wiring VA=%x to PA=%x (size=%x), " 2625 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2626 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2627 size -= sz; 2628 pa += sz; 2629 va += sz; 2630 } while (size > 0); 2631 2632 return (res); 2633} 2634 2635/* 2636 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2637 */ 2638static void 2639mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2640{ 2641 vm_offset_t base, offset; 2642 2643 /* 2644 * Unmap only if this is inside kernel virtual space. 2645 */ 2646 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2647 base = trunc_page(va); 2648 offset = va & PAGE_MASK; 2649 size = roundup(offset + size, PAGE_SIZE); 2650 kmem_free(kernel_map, base, size); 2651 } 2652} 2653 2654/* 2655 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2656 * specified pmap. This eliminates the blast of soft faults on process startup 2657 * and immediately after an mmap. 2658 */ 2659static void 2660mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2661 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2662{ 2663 2664 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2665 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2666 ("mmu_booke_object_init_pt: non-device object")); 2667} 2668 2669/* 2670 * Perform the pmap work for mincore. 2671 */ 2672static int 2673mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2674 vm_paddr_t *locked_pa) 2675{ 2676 2677 TODO; 2678 return (0); 2679} 2680 2681/**************************************************************************/ 2682/* TID handling */ 2683/**************************************************************************/ 2684 2685/* 2686 * Allocate a TID. If necessary, steal one from someone else. 2687 * The new TID is flushed from the TLB before returning. 2688 */ 2689static tlbtid_t 2690tid_alloc(pmap_t pmap) 2691{ 2692 tlbtid_t tid; 2693 int thiscpu; 2694 2695 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2696 2697 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2698 2699 thiscpu = PCPU_GET(cpuid); 2700 2701 tid = PCPU_GET(tid_next); 2702 if (tid > TID_MAX) 2703 tid = TID_MIN; 2704 PCPU_SET(tid_next, tid + 1); 2705 2706 /* If we are stealing TID then clear the relevant pmap's field */ 2707 if (tidbusy[thiscpu][tid] != NULL) { 2708 2709 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2710 2711 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2712 2713 /* Flush all entries from TLB0 matching this TID. */ 2714 tid_flush(tid); 2715 } 2716 2717 tidbusy[thiscpu][tid] = pmap; 2718 pmap->pm_tid[thiscpu] = tid; 2719 __asm __volatile("msync; isync"); 2720 2721 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2722 PCPU_GET(tid_next)); 2723 2724 return (tid); 2725} 2726 2727/**************************************************************************/ 2728/* TLB0 handling */ 2729/**************************************************************************/ 2730 2731static void 2732tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2733 uint32_t mas7) 2734{ 2735 int as; 2736 char desc[3]; 2737 tlbtid_t tid; 2738 vm_size_t size; 2739 unsigned int tsize; 2740 2741 desc[2] = '\0'; 2742 if (mas1 & MAS1_VALID) 2743 desc[0] = 'V'; 2744 else 2745 desc[0] = ' '; 2746 2747 if (mas1 & MAS1_IPROT) 2748 desc[1] = 'P'; 2749 else 2750 desc[1] = ' '; 2751 2752 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2753 tid = MAS1_GETTID(mas1); 2754 2755 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2756 size = 0; 2757 if (tsize) 2758 size = tsize2size(tsize); 2759 2760 debugf("%3d: (%s) [AS=%d] " 2761 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2762 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2763 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2764} 2765 2766/* Convert TLB0 va and way number to tlb0[] table index. */ 2767static inline unsigned int 2768tlb0_tableidx(vm_offset_t va, unsigned int way) 2769{ 2770 unsigned int idx; 2771 2772 idx = (way * TLB0_ENTRIES_PER_WAY); 2773 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2774 return (idx); 2775} 2776 2777/* 2778 * Invalidate TLB0 entry. 2779 */ 2780static inline void 2781tlb0_flush_entry(vm_offset_t va) 2782{ 2783 2784 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2785 2786 mtx_assert(&tlbivax_mutex, MA_OWNED); 2787 2788 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2789 __asm __volatile("isync; msync"); 2790 __asm __volatile("tlbsync; msync"); 2791 2792 CTR1(KTR_PMAP, "%s: e", __func__); 2793} 2794 2795/* Print out contents of the MAS registers for each TLB0 entry */ 2796void 2797tlb0_print_tlbentries(void) 2798{ 2799 uint32_t mas0, mas1, mas2, mas3, mas7; 2800 int entryidx, way, idx; 2801 2802 debugf("TLB0 entries:\n"); 2803 for (way = 0; way < TLB0_WAYS; way ++) 2804 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2805 2806 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2807 mtspr(SPR_MAS0, mas0); 2808 __asm __volatile("isync"); 2809 2810 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2811 mtspr(SPR_MAS2, mas2); 2812 2813 __asm __volatile("isync; tlbre"); 2814 2815 mas1 = mfspr(SPR_MAS1); 2816 mas2 = mfspr(SPR_MAS2); 2817 mas3 = mfspr(SPR_MAS3); 2818 mas7 = mfspr(SPR_MAS7); 2819 2820 idx = tlb0_tableidx(mas2, way); 2821 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2822 } 2823} 2824 2825/**************************************************************************/ 2826/* TLB1 handling */ 2827/**************************************************************************/ 2828 2829/* 2830 * TLB1 mapping notes: 2831 * 2832 * TLB1[0] CCSRBAR 2833 * TLB1[1] Kernel text and data. 2834 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2835 * windows, other devices mappings. 2836 */ 2837 2838/* 2839 * Write given entry to TLB1 hardware. 2840 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2841 */ 2842static void 2843tlb1_write_entry(unsigned int idx) 2844{ 2845 uint32_t mas0, mas7; 2846 2847 //debugf("tlb1_write_entry: s\n"); 2848 2849 /* Clear high order RPN bits */ 2850 mas7 = 0; 2851 2852 /* Select entry */ 2853 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2854 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2855 2856 mtspr(SPR_MAS0, mas0); 2857 __asm __volatile("isync"); 2858 mtspr(SPR_MAS1, tlb1[idx].mas1); 2859 __asm __volatile("isync"); 2860 mtspr(SPR_MAS2, tlb1[idx].mas2); 2861 __asm __volatile("isync"); 2862 mtspr(SPR_MAS3, tlb1[idx].mas3); 2863 __asm __volatile("isync"); 2864 mtspr(SPR_MAS7, mas7); 2865 __asm __volatile("isync; tlbwe; isync; msync"); 2866 2867 //debugf("tlb1_write_entry: e\n"); 2868} 2869 2870/* 2871 * Return the largest uint value log such that 2^log <= num. 2872 */ 2873static unsigned int 2874ilog2(unsigned int num) 2875{ 2876 int lz; 2877 2878 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2879 return (31 - lz); 2880} 2881 2882/* 2883 * Convert TLB TSIZE value to mapped region size. 2884 */ 2885static vm_size_t 2886tsize2size(unsigned int tsize) 2887{ 2888 2889 /* 2890 * size = 4^tsize KB 2891 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2892 */ 2893 2894 return ((1 << (2 * tsize)) * 1024); 2895} 2896 2897/* 2898 * Convert region size (must be power of 4) to TLB TSIZE value. 2899 */ 2900static unsigned int 2901size2tsize(vm_size_t size) 2902{ 2903 2904 return (ilog2(size) / 2 - 5); 2905} 2906 2907/* 2908 * Register permanent kernel mapping in TLB1. 2909 * 2910 * Entries are created starting from index 0 (current free entry is 2911 * kept in tlb1_idx) and are not supposed to be invalidated. 2912 */ 2913static int 2914tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2915 uint32_t flags) 2916{ 2917 uint32_t ts, tid; 2918 int tsize; 2919 2920 if (tlb1_idx >= TLB1_ENTRIES) { 2921 printf("tlb1_set_entry: TLB1 full!\n"); 2922 return (-1); 2923 } 2924 2925 /* Convert size to TSIZE */ 2926 tsize = size2tsize(size); 2927 2928 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2929 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2930 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2931 2932 /* XXX LOCK tlb1[] */ 2933 2934 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2935 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2936 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2937 2938 /* Set supervisor RWX permission bits */ 2939 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2940 2941 tlb1_write_entry(tlb1_idx++); 2942 2943 /* XXX UNLOCK tlb1[] */ 2944 2945 /* 2946 * XXX in general TLB1 updates should be propagated between CPUs, 2947 * since current design assumes to have the same TLB1 set-up on all 2948 * cores. 2949 */ 2950 return (0); 2951} 2952 2953/* 2954 * Map in contiguous RAM region into the TLB1 using maximum of 2955 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2956 * 2957 * If necessary round up last entry size and return total size 2958 * used by all allocated entries. 2959 */ 2960vm_size_t 2961tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 2962{ 2963 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 2964 vm_size_t mapped, pgsz, base, mask; 2965 int idx, nents; 2966 2967 /* Round up to the next 1M */ 2968 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 2969 2970 mapped = 0; 2971 idx = 0; 2972 base = va; 2973 pgsz = 64*1024*1024; 2974 while (mapped < size) { 2975 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 2976 while (pgsz > (size - mapped)) 2977 pgsz >>= 2; 2978 pgs[idx++] = pgsz; 2979 mapped += pgsz; 2980 } 2981 2982 /* We under-map. Correct for this. */ 2983 if (mapped < size) { 2984 while (pgs[idx - 1] == pgsz) { 2985 idx--; 2986 mapped -= pgsz; 2987 } 2988 /* XXX We may increase beyond out starting point. */ 2989 pgsz <<= 2; 2990 pgs[idx++] = pgsz; 2991 mapped += pgsz; 2992 } 2993 } 2994 2995 nents = idx; 2996 mask = pgs[0] - 1; 2997 /* Align address to the boundary */ 2998 if (va & mask) { 2999 va = (va + mask) & ~mask; 3000 pa = (pa + mask) & ~mask; 3001 } 3002 3003 for (idx = 0; idx < nents; idx++) { 3004 pgsz = pgs[idx]; 3005 debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 3006 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 3007 pa += pgsz; 3008 va += pgsz; 3009 } 3010 3011 mapped = (va - base); 3012 debugf("mapped size 0x%08x (wasted space 0x%08x)\n", 3013 mapped, mapped - size); 3014 return (mapped); 3015} 3016 3017/* 3018 * TLB1 initialization routine, to be called after the very first 3019 * assembler level setup done in locore.S. 3020 */ 3021void 3022tlb1_init(vm_offset_t ccsrbar) 3023{ 3024 uint32_t mas0, mas1, mas3; 3025 uint32_t tsz; 3026 u_int i; 3027 3028 ccsrbar_pa = ccsrbar; 3029 3030 if (bootinfo != NULL && bootinfo[0] != 1) { 3031 tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3032 } else 3033 tlb1_idx = 1; 3034 3035 /* The first entry/entries are used to map the kernel. */ 3036 for (i = 0; i < tlb1_idx; i++) { 3037 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3038 mtspr(SPR_MAS0, mas0); 3039 __asm __volatile("isync; tlbre"); 3040 3041 mas1 = mfspr(SPR_MAS1); 3042 if ((mas1 & MAS1_VALID) == 0) 3043 continue; 3044 3045 mas3 = mfspr(SPR_MAS3); 3046 3047 tlb1[i].mas1 = mas1; 3048 tlb1[i].mas2 = mfspr(SPR_MAS2); 3049 tlb1[i].mas3 = mas3; 3050 3051 if (i == 0) 3052 kernload = mas3 & MAS3_RPN; 3053 3054 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3055 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 3056 } 3057 3058 /* Map in CCSRBAR. */ 3059 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3060 3061#ifdef SMP 3062 bp_ntlb1s = tlb1_idx; 3063#endif 3064 3065 /* Purge the remaining entries */ 3066 for (i = tlb1_idx; i < TLB1_ENTRIES; i++) 3067 tlb1_write_entry(i); 3068 3069 /* Setup TLB miss defaults */ 3070 set_mas4_defaults(); 3071} 3072 3073/* 3074 * Setup MAS4 defaults. 3075 * These values are loaded to MAS0-2 on a TLB miss. 3076 */ 3077static void 3078set_mas4_defaults(void) 3079{ 3080 uint32_t mas4; 3081 3082 /* Defaults: TLB0, PID0, TSIZED=4K */ 3083 mas4 = MAS4_TLBSELD0; 3084 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3085#ifdef SMP 3086 mas4 |= MAS4_MD; 3087#endif 3088 mtspr(SPR_MAS4, mas4); 3089 __asm __volatile("isync"); 3090} 3091 3092/* 3093 * Print out contents of the MAS registers for each TLB1 entry 3094 */ 3095void 3096tlb1_print_tlbentries(void) 3097{ 3098 uint32_t mas0, mas1, mas2, mas3, mas7; 3099 int i; 3100 3101 debugf("TLB1 entries:\n"); 3102 for (i = 0; i < TLB1_ENTRIES; i++) { 3103 3104 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3105 mtspr(SPR_MAS0, mas0); 3106 3107 __asm __volatile("isync; tlbre"); 3108 3109 mas1 = mfspr(SPR_MAS1); 3110 mas2 = mfspr(SPR_MAS2); 3111 mas3 = mfspr(SPR_MAS3); 3112 mas7 = mfspr(SPR_MAS7); 3113 3114 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3115 } 3116} 3117 3118/* 3119 * Print out contents of the in-ram tlb1 table. 3120 */ 3121void 3122tlb1_print_entries(void) 3123{ 3124 int i; 3125 3126 debugf("tlb1[] table entries:\n"); 3127 for (i = 0; i < TLB1_ENTRIES; i++) 3128 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3129} 3130 3131/* 3132 * Return 0 if the physical IO range is encompassed by one of the 3133 * the TLB1 entries, otherwise return related error code. 3134 */ 3135static int 3136tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3137{ 3138 uint32_t prot; 3139 vm_paddr_t pa_start; 3140 vm_paddr_t pa_end; 3141 unsigned int entry_tsize; 3142 vm_size_t entry_size; 3143 3144 *va = (vm_offset_t)NULL; 3145 3146 /* Skip invalid entries */ 3147 if (!(tlb1[i].mas1 & MAS1_VALID)) 3148 return (EINVAL); 3149 3150 /* 3151 * The entry must be cache-inhibited, guarded, and r/w 3152 * so it can function as an i/o page 3153 */ 3154 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3155 if (prot != (MAS2_I | MAS2_G)) 3156 return (EPERM); 3157 3158 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3159 if (prot != (MAS3_SR | MAS3_SW)) 3160 return (EPERM); 3161 3162 /* The address should be within the entry range. */ 3163 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3164 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3165 3166 entry_size = tsize2size(entry_tsize); 3167 pa_start = tlb1[i].mas3 & MAS3_RPN; 3168 pa_end = pa_start + entry_size - 1; 3169 3170 if ((pa < pa_start) || ((pa + size) > pa_end)) 3171 return (ERANGE); 3172 3173 /* Return virtual address of this mapping. */ 3174 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3175 return (0); 3176} 3177