pmap.c revision 242526
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 242526 2012-11-03 22:02:12Z marcel $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/linker.h> 64#include <sys/msgbuf.h> 65#include <sys/lock.h> 66#include <sys/mutex.h> 67#include <sys/sched.h> 68#include <sys/smp.h> 69#include <sys/vmmeter.h> 70 71#include <vm/vm.h> 72#include <vm/vm_page.h> 73#include <vm/vm_kern.h> 74#include <vm/vm_pageout.h> 75#include <vm/vm_extern.h> 76#include <vm/vm_object.h> 77#include <vm/vm_param.h> 78#include <vm/vm_map.h> 79#include <vm/vm_pager.h> 80#include <vm/uma.h> 81 82#include <machine/cpu.h> 83#include <machine/pcb.h> 84#include <machine/platform.h> 85 86#include <machine/tlb.h> 87#include <machine/spr.h> 88#include <machine/vmparam.h> 89#include <machine/md_var.h> 90#include <machine/mmuvar.h> 91#include <machine/pmap.h> 92#include <machine/pte.h> 93 94#include "mmu_if.h" 95 96#ifdef DEBUG 97#define debugf(fmt, args...) printf(fmt, ##args) 98#else 99#define debugf(fmt, args...) 100#endif 101 102#define TODO panic("%s: not implemented", __func__); 103 104extern struct mtx sched_lock; 105 106extern int dumpsys_minidump; 107 108extern unsigned char _etext[]; 109extern unsigned char _end[]; 110 111extern uint32_t *bootinfo; 112 113#ifdef SMP 114extern uint32_t bp_ntlb1s; 115#endif 116 117vm_paddr_t ccsrbar_pa; 118vm_paddr_t kernload; 119vm_offset_t kernstart; 120vm_size_t kernsize; 121 122/* Message buffer and tables. */ 123static vm_offset_t data_start; 124static vm_size_t data_end; 125 126/* Phys/avail memory regions. */ 127static struct mem_region *availmem_regions; 128static int availmem_regions_sz; 129static struct mem_region *physmem_regions; 130static int physmem_regions_sz; 131 132/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133static vm_offset_t zero_page_va; 134static struct mtx zero_page_mutex; 135 136static struct mtx tlbivax_mutex; 137 138/* 139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140 * by idle thred only, no lock required. 141 */ 142static vm_offset_t zero_page_idle_va; 143 144/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145static vm_offset_t copy_page_src_va; 146static vm_offset_t copy_page_dst_va; 147static struct mtx copy_page_mutex; 148 149/**************************************************************************/ 150/* PMAP */ 151/**************************************************************************/ 152 153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154 vm_prot_t, boolean_t); 155 156unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158 159/* 160 * If user pmap is processed with mmu_booke_remove and the resident count 161 * drops to 0, there are no more pages to remove, so we need not continue. 162 */ 163#define PMAP_REMOVE_DONE(pmap) \ 164 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 165 166extern void tid_flush(tlbtid_t); 167 168/**************************************************************************/ 169/* TLB and TID handling */ 170/**************************************************************************/ 171 172/* Translation ID busy table */ 173static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 174 175/* 176 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 177 * core revisions and should be read from h/w registers during early config. 178 */ 179uint32_t tlb0_entries; 180uint32_t tlb0_ways; 181uint32_t tlb0_entries_per_way; 182 183#define TLB0_ENTRIES (tlb0_entries) 184#define TLB0_WAYS (tlb0_ways) 185#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 186 187#define TLB1_ENTRIES 16 188 189/* In-ram copy of the TLB1 */ 190static tlb_entry_t tlb1[TLB1_ENTRIES]; 191 192/* Next free entry in the TLB1 */ 193static unsigned int tlb1_idx; 194 195static tlbtid_t tid_alloc(struct pmap *); 196 197static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 198 199static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 200static void tlb1_write_entry(unsigned int); 201static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 202static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 203 204static vm_size_t tsize2size(unsigned int); 205static unsigned int size2tsize(vm_size_t); 206static unsigned int ilog2(unsigned int); 207 208static void set_mas4_defaults(void); 209 210static inline void tlb0_flush_entry(vm_offset_t); 211static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 212 213/**************************************************************************/ 214/* Page table management */ 215/**************************************************************************/ 216 217/* Data for the pv entry allocation mechanism */ 218static uma_zone_t pvzone; 219static struct vm_object pvzone_obj; 220static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 221 222#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 223 224#ifndef PMAP_SHPGPERPROC 225#define PMAP_SHPGPERPROC 200 226#endif 227 228static void ptbl_init(void); 229static struct ptbl_buf *ptbl_buf_alloc(void); 230static void ptbl_buf_free(struct ptbl_buf *); 231static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 232 233static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 234static void ptbl_free(mmu_t, pmap_t, unsigned int); 235static void ptbl_hold(mmu_t, pmap_t, unsigned int); 236static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 237 238static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 239static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 240static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 241static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 242 243static pv_entry_t pv_alloc(void); 244static void pv_free(pv_entry_t); 245static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 246static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 247 248/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 249#define PTBL_BUFS (128 * 16) 250 251struct ptbl_buf { 252 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 253 vm_offset_t kva; /* va of mapping */ 254}; 255 256/* ptbl free list and a lock used for access synchronization. */ 257static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 258static struct mtx ptbl_buf_freelist_lock; 259 260/* Base address of kva space allocated fot ptbl bufs. */ 261static vm_offset_t ptbl_buf_pool_vabase; 262 263/* Pointer to ptbl_buf structures. */ 264static struct ptbl_buf *ptbl_bufs; 265 266void pmap_bootstrap_ap(volatile uint32_t *); 267 268/* 269 * Kernel MMU interface 270 */ 271static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 272static void mmu_booke_clear_modify(mmu_t, vm_page_t); 273static void mmu_booke_clear_reference(mmu_t, vm_page_t); 274static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 275 vm_size_t, vm_offset_t); 276static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 277static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 278 vm_prot_t, boolean_t); 279static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 280 vm_page_t, vm_prot_t); 281static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 282 vm_prot_t); 283static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 284static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 285 vm_prot_t); 286static void mmu_booke_init(mmu_t); 287static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 288static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 289static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 290static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 291static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 292 int); 293static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 294 vm_paddr_t *); 295static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296 vm_object_t, vm_pindex_t, vm_size_t); 297static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298static void mmu_booke_page_init(mmu_t, vm_page_t); 299static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300static void mmu_booke_pinit(mmu_t, pmap_t); 301static void mmu_booke_pinit0(mmu_t, pmap_t); 302static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303 vm_prot_t); 304static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306static void mmu_booke_release(mmu_t, pmap_t); 307static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308static void mmu_booke_remove_all(mmu_t, vm_page_t); 309static void mmu_booke_remove_write(mmu_t, vm_page_t); 310static void mmu_booke_zero_page(mmu_t, vm_page_t); 311static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313static void mmu_booke_activate(mmu_t, struct thread *); 314static void mmu_booke_deactivate(mmu_t, struct thread *); 315static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 317static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 319static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 320static void mmu_booke_kremove(mmu_t, vm_offset_t); 321static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 322static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 323 vm_size_t); 324static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 325 vm_size_t, vm_size_t *); 326static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 327 vm_size_t, vm_offset_t); 328static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 329 330static mmu_method_t mmu_booke_methods[] = { 331 /* pmap dispatcher interface */ 332 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 333 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 334 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 335 MMUMETHOD(mmu_copy, mmu_booke_copy), 336 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 337 MMUMETHOD(mmu_enter, mmu_booke_enter), 338 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 339 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 340 MMUMETHOD(mmu_extract, mmu_booke_extract), 341 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 342 MMUMETHOD(mmu_init, mmu_booke_init), 343 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 344 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 345 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 346 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 347 MMUMETHOD(mmu_map, mmu_booke_map), 348 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 349 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 350 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 351 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 352 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 353 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 354 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 355 MMUMETHOD(mmu_protect, mmu_booke_protect), 356 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 357 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 358 MMUMETHOD(mmu_release, mmu_booke_release), 359 MMUMETHOD(mmu_remove, mmu_booke_remove), 360 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 361 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 362 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 363 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 364 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 365 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 366 MMUMETHOD(mmu_activate, mmu_booke_activate), 367 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 368 369 /* Internal interfaces */ 370 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 371 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 372 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 373 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 374 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 375/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 376 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 377 378 /* dumpsys() support */ 379 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 380 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 381 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 382 383 { 0, 0 } 384}; 385 386MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 387 388static inline void 389tlb_miss_lock(void) 390{ 391#ifdef SMP 392 struct pcpu *pc; 393 394 if (!smp_started) 395 return; 396 397 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 398 if (pc != pcpup) { 399 400 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 401 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 402 403 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 404 ("tlb_miss_lock: tried to lock self")); 405 406 tlb_lock(pc->pc_booke_tlb_lock); 407 408 CTR1(KTR_PMAP, "%s: locked", __func__); 409 } 410 } 411#endif 412} 413 414static inline void 415tlb_miss_unlock(void) 416{ 417#ifdef SMP 418 struct pcpu *pc; 419 420 if (!smp_started) 421 return; 422 423 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 424 if (pc != pcpup) { 425 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 426 __func__, pc->pc_cpuid); 427 428 tlb_unlock(pc->pc_booke_tlb_lock); 429 430 CTR1(KTR_PMAP, "%s: unlocked", __func__); 431 } 432 } 433#endif 434} 435 436/* Return number of entries in TLB0. */ 437static __inline void 438tlb0_get_tlbconf(void) 439{ 440 uint32_t tlb0_cfg; 441 442 tlb0_cfg = mfspr(SPR_TLB0CFG); 443 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 444 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 445 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 446} 447 448/* Initialize pool of kva ptbl buffers. */ 449static void 450ptbl_init(void) 451{ 452 int i; 453 454 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 455 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 456 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 457 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 458 459 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 460 TAILQ_INIT(&ptbl_buf_freelist); 461 462 for (i = 0; i < PTBL_BUFS; i++) { 463 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 464 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 465 } 466} 467 468/* Get a ptbl_buf from the freelist. */ 469static struct ptbl_buf * 470ptbl_buf_alloc(void) 471{ 472 struct ptbl_buf *buf; 473 474 mtx_lock(&ptbl_buf_freelist_lock); 475 buf = TAILQ_FIRST(&ptbl_buf_freelist); 476 if (buf != NULL) 477 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 478 mtx_unlock(&ptbl_buf_freelist_lock); 479 480 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 481 482 return (buf); 483} 484 485/* Return ptbl buff to free pool. */ 486static void 487ptbl_buf_free(struct ptbl_buf *buf) 488{ 489 490 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 491 492 mtx_lock(&ptbl_buf_freelist_lock); 493 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 494 mtx_unlock(&ptbl_buf_freelist_lock); 495} 496 497/* 498 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 499 */ 500static void 501ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 502{ 503 struct ptbl_buf *pbuf; 504 505 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 506 507 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 508 509 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 510 if (pbuf->kva == (vm_offset_t)ptbl) { 511 /* Remove from pmap ptbl buf list. */ 512 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 513 514 /* Free corresponding ptbl buf. */ 515 ptbl_buf_free(pbuf); 516 break; 517 } 518} 519 520/* Allocate page table. */ 521static pte_t * 522ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 523{ 524 vm_page_t mtbl[PTBL_PAGES]; 525 vm_page_t m; 526 struct ptbl_buf *pbuf; 527 unsigned int pidx; 528 pte_t *ptbl; 529 int i; 530 531 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 532 (pmap == kernel_pmap), pdir_idx); 533 534 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 535 ("ptbl_alloc: invalid pdir_idx")); 536 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 537 ("pte_alloc: valid ptbl entry exists!")); 538 539 pbuf = ptbl_buf_alloc(); 540 if (pbuf == NULL) 541 panic("pte_alloc: couldn't alloc kernel virtual memory"); 542 543 ptbl = (pte_t *)pbuf->kva; 544 545 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 546 547 /* Allocate ptbl pages, this will sleep! */ 548 for (i = 0; i < PTBL_PAGES; i++) { 549 pidx = (PTBL_PAGES * pdir_idx) + i; 550 while ((m = vm_page_alloc(NULL, pidx, 551 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 552 553 PMAP_UNLOCK(pmap); 554 vm_page_unlock_queues(); 555 VM_WAIT; 556 vm_page_lock_queues(); 557 PMAP_LOCK(pmap); 558 } 559 mtbl[i] = m; 560 } 561 562 /* Map allocated pages into kernel_pmap. */ 563 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 564 565 /* Zero whole ptbl. */ 566 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 567 568 /* Add pbuf to the pmap ptbl bufs list. */ 569 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 570 571 return (ptbl); 572} 573 574/* Free ptbl pages and invalidate pdir entry. */ 575static void 576ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 577{ 578 pte_t *ptbl; 579 vm_paddr_t pa; 580 vm_offset_t va; 581 vm_page_t m; 582 int i; 583 584 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 585 (pmap == kernel_pmap), pdir_idx); 586 587 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 588 ("ptbl_free: invalid pdir_idx")); 589 590 ptbl = pmap->pm_pdir[pdir_idx]; 591 592 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 593 594 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 595 596 /* 597 * Invalidate the pdir entry as soon as possible, so that other CPUs 598 * don't attempt to look up the page tables we are releasing. 599 */ 600 mtx_lock_spin(&tlbivax_mutex); 601 tlb_miss_lock(); 602 603 pmap->pm_pdir[pdir_idx] = NULL; 604 605 tlb_miss_unlock(); 606 mtx_unlock_spin(&tlbivax_mutex); 607 608 for (i = 0; i < PTBL_PAGES; i++) { 609 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 610 pa = pte_vatopa(mmu, kernel_pmap, va); 611 m = PHYS_TO_VM_PAGE(pa); 612 vm_page_free_zero(m); 613 atomic_subtract_int(&cnt.v_wire_count, 1); 614 mmu_booke_kremove(mmu, va); 615 } 616 617 ptbl_free_pmap_ptbl(pmap, ptbl); 618} 619 620/* 621 * Decrement ptbl pages hold count and attempt to free ptbl pages. 622 * Called when removing pte entry from ptbl. 623 * 624 * Return 1 if ptbl pages were freed. 625 */ 626static int 627ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 628{ 629 pte_t *ptbl; 630 vm_paddr_t pa; 631 vm_page_t m; 632 int i; 633 634 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 635 (pmap == kernel_pmap), pdir_idx); 636 637 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 638 ("ptbl_unhold: invalid pdir_idx")); 639 KASSERT((pmap != kernel_pmap), 640 ("ptbl_unhold: unholding kernel ptbl!")); 641 642 ptbl = pmap->pm_pdir[pdir_idx]; 643 644 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 645 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 646 ("ptbl_unhold: non kva ptbl")); 647 648 /* decrement hold count */ 649 for (i = 0; i < PTBL_PAGES; i++) { 650 pa = pte_vatopa(mmu, kernel_pmap, 651 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 652 m = PHYS_TO_VM_PAGE(pa); 653 m->wire_count--; 654 } 655 656 /* 657 * Free ptbl pages if there are no pte etries in this ptbl. 658 * wire_count has the same value for all ptbl pages, so check the last 659 * page. 660 */ 661 if (m->wire_count == 0) { 662 ptbl_free(mmu, pmap, pdir_idx); 663 664 //debugf("ptbl_unhold: e (freed ptbl)\n"); 665 return (1); 666 } 667 668 return (0); 669} 670 671/* 672 * Increment hold count for ptbl pages. This routine is used when a new pte 673 * entry is being inserted into the ptbl. 674 */ 675static void 676ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 677{ 678 vm_paddr_t pa; 679 pte_t *ptbl; 680 vm_page_t m; 681 int i; 682 683 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 684 pdir_idx); 685 686 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 687 ("ptbl_hold: invalid pdir_idx")); 688 KASSERT((pmap != kernel_pmap), 689 ("ptbl_hold: holding kernel ptbl!")); 690 691 ptbl = pmap->pm_pdir[pdir_idx]; 692 693 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 694 695 for (i = 0; i < PTBL_PAGES; i++) { 696 pa = pte_vatopa(mmu, kernel_pmap, 697 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 698 m = PHYS_TO_VM_PAGE(pa); 699 m->wire_count++; 700 } 701} 702 703/* Allocate pv_entry structure. */ 704pv_entry_t 705pv_alloc(void) 706{ 707 pv_entry_t pv; 708 709 pv_entry_count++; 710 if (pv_entry_count > pv_entry_high_water) 711 pagedaemon_wakeup(); 712 pv = uma_zalloc(pvzone, M_NOWAIT); 713 714 return (pv); 715} 716 717/* Free pv_entry structure. */ 718static __inline void 719pv_free(pv_entry_t pve) 720{ 721 722 pv_entry_count--; 723 uma_zfree(pvzone, pve); 724} 725 726 727/* Allocate and initialize pv_entry structure. */ 728static void 729pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 730{ 731 pv_entry_t pve; 732 733 //int su = (pmap == kernel_pmap); 734 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 735 // (u_int32_t)pmap, va, (u_int32_t)m); 736 737 pve = pv_alloc(); 738 if (pve == NULL) 739 panic("pv_insert: no pv entries!"); 740 741 pve->pv_pmap = pmap; 742 pve->pv_va = va; 743 744 /* add to pv_list */ 745 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 746 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 747 748 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 749 750 //debugf("pv_insert: e\n"); 751} 752 753/* Destroy pv entry. */ 754static void 755pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 756{ 757 pv_entry_t pve; 758 759 //int su = (pmap == kernel_pmap); 760 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 761 762 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 763 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 764 765 /* find pv entry */ 766 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 767 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 768 /* remove from pv_list */ 769 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 770 if (TAILQ_EMPTY(&m->md.pv_list)) 771 vm_page_aflag_clear(m, PGA_WRITEABLE); 772 773 /* free pv entry struct */ 774 pv_free(pve); 775 break; 776 } 777 } 778 779 //debugf("pv_remove: e\n"); 780} 781 782/* 783 * Clean pte entry, try to free page table page if requested. 784 * 785 * Return 1 if ptbl pages were freed, otherwise return 0. 786 */ 787static int 788pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 789{ 790 unsigned int pdir_idx = PDIR_IDX(va); 791 unsigned int ptbl_idx = PTBL_IDX(va); 792 vm_page_t m; 793 pte_t *ptbl; 794 pte_t *pte; 795 796 //int su = (pmap == kernel_pmap); 797 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 798 // su, (u_int32_t)pmap, va, flags); 799 800 ptbl = pmap->pm_pdir[pdir_idx]; 801 KASSERT(ptbl, ("pte_remove: null ptbl")); 802 803 pte = &ptbl[ptbl_idx]; 804 805 if (pte == NULL || !PTE_ISVALID(pte)) 806 return (0); 807 808 if (PTE_ISWIRED(pte)) 809 pmap->pm_stats.wired_count--; 810 811 /* Handle managed entry. */ 812 if (PTE_ISMANAGED(pte)) { 813 /* Get vm_page_t for mapped pte. */ 814 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 815 816 if (PTE_ISMODIFIED(pte)) 817 vm_page_dirty(m); 818 819 if (PTE_ISREFERENCED(pte)) 820 vm_page_aflag_set(m, PGA_REFERENCED); 821 822 pv_remove(pmap, va, m); 823 } 824 825 mtx_lock_spin(&tlbivax_mutex); 826 tlb_miss_lock(); 827 828 tlb0_flush_entry(va); 829 pte->flags = 0; 830 pte->rpn = 0; 831 832 tlb_miss_unlock(); 833 mtx_unlock_spin(&tlbivax_mutex); 834 835 pmap->pm_stats.resident_count--; 836 837 if (flags & PTBL_UNHOLD) { 838 //debugf("pte_remove: e (unhold)\n"); 839 return (ptbl_unhold(mmu, pmap, pdir_idx)); 840 } 841 842 //debugf("pte_remove: e\n"); 843 return (0); 844} 845 846/* 847 * Insert PTE for a given page and virtual address. 848 */ 849static void 850pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 851{ 852 unsigned int pdir_idx = PDIR_IDX(va); 853 unsigned int ptbl_idx = PTBL_IDX(va); 854 pte_t *ptbl, *pte; 855 856 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 857 pmap == kernel_pmap, pmap, va); 858 859 /* Get the page table pointer. */ 860 ptbl = pmap->pm_pdir[pdir_idx]; 861 862 if (ptbl == NULL) { 863 /* Allocate page table pages. */ 864 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 865 } else { 866 /* 867 * Check if there is valid mapping for requested 868 * va, if there is, remove it. 869 */ 870 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 871 if (PTE_ISVALID(pte)) { 872 pte_remove(mmu, pmap, va, PTBL_HOLD); 873 } else { 874 /* 875 * pte is not used, increment hold count 876 * for ptbl pages. 877 */ 878 if (pmap != kernel_pmap) 879 ptbl_hold(mmu, pmap, pdir_idx); 880 } 881 } 882 883 /* 884 * Insert pv_entry into pv_list for mapped page if part of managed 885 * memory. 886 */ 887 if ((m->oflags & VPO_UNMANAGED) == 0) { 888 flags |= PTE_MANAGED; 889 890 /* Create and insert pv entry. */ 891 pv_insert(pmap, va, m); 892 } 893 894 pmap->pm_stats.resident_count++; 895 896 mtx_lock_spin(&tlbivax_mutex); 897 tlb_miss_lock(); 898 899 tlb0_flush_entry(va); 900 if (pmap->pm_pdir[pdir_idx] == NULL) { 901 /* 902 * If we just allocated a new page table, hook it in 903 * the pdir. 904 */ 905 pmap->pm_pdir[pdir_idx] = ptbl; 906 } 907 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 908 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 909 pte->flags |= (PTE_VALID | flags); 910 911 tlb_miss_unlock(); 912 mtx_unlock_spin(&tlbivax_mutex); 913} 914 915/* Return the pa for the given pmap/va. */ 916static vm_paddr_t 917pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 918{ 919 vm_paddr_t pa = 0; 920 pte_t *pte; 921 922 pte = pte_find(mmu, pmap, va); 923 if ((pte != NULL) && PTE_ISVALID(pte)) 924 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 925 return (pa); 926} 927 928/* Get a pointer to a PTE in a page table. */ 929static pte_t * 930pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 931{ 932 unsigned int pdir_idx = PDIR_IDX(va); 933 unsigned int ptbl_idx = PTBL_IDX(va); 934 935 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 936 937 if (pmap->pm_pdir[pdir_idx]) 938 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 939 940 return (NULL); 941} 942 943/**************************************************************************/ 944/* PMAP related */ 945/**************************************************************************/ 946 947/* 948 * This is called during booke_init, before the system is really initialized. 949 */ 950static void 951mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 952{ 953 vm_offset_t phys_kernelend; 954 struct mem_region *mp, *mp1; 955 int cnt, i, j; 956 u_int s, e, sz; 957 u_int phys_avail_count; 958 vm_size_t physsz, hwphyssz, kstack0_sz; 959 vm_offset_t kernel_pdir, kstack0, va; 960 vm_paddr_t kstack0_phys; 961 void *dpcpu; 962 pte_t *pte; 963 964 debugf("mmu_booke_bootstrap: entered\n"); 965 966 /* Initialize invalidation mutex */ 967 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 968 969 /* Read TLB0 size and associativity. */ 970 tlb0_get_tlbconf(); 971 972 /* 973 * Align kernel start and end address (kernel image). 974 * Note that kernel end does not necessarily relate to kernsize. 975 * kernsize is the size of the kernel that is actually mapped. 976 * Also note that "start - 1" is deliberate. With SMP, the 977 * entry point is exactly a page from the actual load address. 978 * As such, trunc_page() has no effect and we're off by a page. 979 * Since we always have the ELF header between the load address 980 * and the entry point, we can safely subtract 1 to compensate. 981 */ 982 kernstart = trunc_page(start - 1); 983 data_start = round_page(kernelend); 984 data_end = data_start; 985 986 /* 987 * Addresses of preloaded modules (like file systems) use 988 * physical addresses. Make sure we relocate those into 989 * virtual addresses. 990 */ 991 preload_addr_relocate = kernstart - kernload; 992 993 /* Allocate the dynamic per-cpu area. */ 994 dpcpu = (void *)data_end; 995 data_end += DPCPU_SIZE; 996 997 /* Allocate space for the message buffer. */ 998 msgbufp = (struct msgbuf *)data_end; 999 data_end += msgbufsize; 1000 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1001 data_end); 1002 1003 data_end = round_page(data_end); 1004 1005 /* Allocate space for ptbl_bufs. */ 1006 ptbl_bufs = (struct ptbl_buf *)data_end; 1007 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1008 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1009 data_end); 1010 1011 data_end = round_page(data_end); 1012 1013 /* Allocate PTE tables for kernel KVA. */ 1014 kernel_pdir = data_end; 1015 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1016 PDIR_SIZE - 1) / PDIR_SIZE; 1017 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1018 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1019 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1020 1021 debugf(" data_end: 0x%08x\n", data_end); 1022 if (data_end - kernstart > kernsize) { 1023 kernsize += tlb1_mapin_region(kernstart + kernsize, 1024 kernload + kernsize, (data_end - kernstart) - kernsize); 1025 } 1026 data_end = kernstart + kernsize; 1027 debugf(" updated data_end: 0x%08x\n", data_end); 1028 1029 /* 1030 * Clear the structures - note we can only do it safely after the 1031 * possible additional TLB1 translations are in place (above) so that 1032 * all range up to the currently calculated 'data_end' is covered. 1033 */ 1034 dpcpu_init(dpcpu, 0); 1035 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1036 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1037 1038 /*******************************************************/ 1039 /* Set the start and end of kva. */ 1040 /*******************************************************/ 1041 virtual_avail = round_page(data_end); 1042 virtual_end = VM_MAX_KERNEL_ADDRESS; 1043 1044 /* Allocate KVA space for page zero/copy operations. */ 1045 zero_page_va = virtual_avail; 1046 virtual_avail += PAGE_SIZE; 1047 zero_page_idle_va = virtual_avail; 1048 virtual_avail += PAGE_SIZE; 1049 copy_page_src_va = virtual_avail; 1050 virtual_avail += PAGE_SIZE; 1051 copy_page_dst_va = virtual_avail; 1052 virtual_avail += PAGE_SIZE; 1053 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1054 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1055 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1056 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1057 1058 /* Initialize page zero/copy mutexes. */ 1059 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1060 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1061 1062 /* Allocate KVA space for ptbl bufs. */ 1063 ptbl_buf_pool_vabase = virtual_avail; 1064 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1065 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1066 ptbl_buf_pool_vabase, virtual_avail); 1067 1068 /* Calculate corresponding physical addresses for the kernel region. */ 1069 phys_kernelend = kernload + kernsize; 1070 debugf("kernel image and allocated data:\n"); 1071 debugf(" kernload = 0x%08x\n", kernload); 1072 debugf(" kernstart = 0x%08x\n", kernstart); 1073 debugf(" kernsize = 0x%08x\n", kernsize); 1074 1075 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1076 panic("mmu_booke_bootstrap: phys_avail too small"); 1077 1078 /* 1079 * Remove kernel physical address range from avail regions list. Page 1080 * align all regions. Non-page aligned memory isn't very interesting 1081 * to us. Also, sort the entries for ascending addresses. 1082 */ 1083 1084 /* Retrieve phys/avail mem regions */ 1085 mem_regions(&physmem_regions, &physmem_regions_sz, 1086 &availmem_regions, &availmem_regions_sz); 1087 sz = 0; 1088 cnt = availmem_regions_sz; 1089 debugf("processing avail regions:\n"); 1090 for (mp = availmem_regions; mp->mr_size; mp++) { 1091 s = mp->mr_start; 1092 e = mp->mr_start + mp->mr_size; 1093 debugf(" %08x-%08x -> ", s, e); 1094 /* Check whether this region holds all of the kernel. */ 1095 if (s < kernload && e > phys_kernelend) { 1096 availmem_regions[cnt].mr_start = phys_kernelend; 1097 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1098 e = kernload; 1099 } 1100 /* Look whether this regions starts within the kernel. */ 1101 if (s >= kernload && s < phys_kernelend) { 1102 if (e <= phys_kernelend) 1103 goto empty; 1104 s = phys_kernelend; 1105 } 1106 /* Now look whether this region ends within the kernel. */ 1107 if (e > kernload && e <= phys_kernelend) { 1108 if (s >= kernload) 1109 goto empty; 1110 e = kernload; 1111 } 1112 /* Now page align the start and size of the region. */ 1113 s = round_page(s); 1114 e = trunc_page(e); 1115 if (e < s) 1116 e = s; 1117 sz = e - s; 1118 debugf("%08x-%08x = %x\n", s, e, sz); 1119 1120 /* Check whether some memory is left here. */ 1121 if (sz == 0) { 1122 empty: 1123 memmove(mp, mp + 1, 1124 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1125 cnt--; 1126 mp--; 1127 continue; 1128 } 1129 1130 /* Do an insertion sort. */ 1131 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1132 if (s < mp1->mr_start) 1133 break; 1134 if (mp1 < mp) { 1135 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1136 mp1->mr_start = s; 1137 mp1->mr_size = sz; 1138 } else { 1139 mp->mr_start = s; 1140 mp->mr_size = sz; 1141 } 1142 } 1143 availmem_regions_sz = cnt; 1144 1145 /*******************************************************/ 1146 /* Steal physical memory for kernel stack from the end */ 1147 /* of the first avail region */ 1148 /*******************************************************/ 1149 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1150 kstack0_phys = availmem_regions[0].mr_start + 1151 availmem_regions[0].mr_size; 1152 kstack0_phys -= kstack0_sz; 1153 availmem_regions[0].mr_size -= kstack0_sz; 1154 1155 /*******************************************************/ 1156 /* Fill in phys_avail table, based on availmem_regions */ 1157 /*******************************************************/ 1158 phys_avail_count = 0; 1159 physsz = 0; 1160 hwphyssz = 0; 1161 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1162 1163 debugf("fill in phys_avail:\n"); 1164 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1165 1166 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1167 availmem_regions[i].mr_start, 1168 availmem_regions[i].mr_start + 1169 availmem_regions[i].mr_size, 1170 availmem_regions[i].mr_size); 1171 1172 if (hwphyssz != 0 && 1173 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1174 debugf(" hw.physmem adjust\n"); 1175 if (physsz < hwphyssz) { 1176 phys_avail[j] = availmem_regions[i].mr_start; 1177 phys_avail[j + 1] = 1178 availmem_regions[i].mr_start + 1179 hwphyssz - physsz; 1180 physsz = hwphyssz; 1181 phys_avail_count++; 1182 } 1183 break; 1184 } 1185 1186 phys_avail[j] = availmem_regions[i].mr_start; 1187 phys_avail[j + 1] = availmem_regions[i].mr_start + 1188 availmem_regions[i].mr_size; 1189 phys_avail_count++; 1190 physsz += availmem_regions[i].mr_size; 1191 } 1192 physmem = btoc(physsz); 1193 1194 /* Calculate the last available physical address. */ 1195 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1196 ; 1197 Maxmem = powerpc_btop(phys_avail[i + 1]); 1198 1199 debugf("Maxmem = 0x%08lx\n", Maxmem); 1200 debugf("phys_avail_count = %d\n", phys_avail_count); 1201 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1202 physmem); 1203 1204 /*******************************************************/ 1205 /* Initialize (statically allocated) kernel pmap. */ 1206 /*******************************************************/ 1207 PMAP_LOCK_INIT(kernel_pmap); 1208 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1209 1210 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1211 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1212 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1213 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1214 1215 /* Initialize kernel pdir */ 1216 for (i = 0; i < kernel_ptbls; i++) 1217 kernel_pmap->pm_pdir[kptbl_min + i] = 1218 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1219 1220 for (i = 0; i < MAXCPU; i++) { 1221 kernel_pmap->pm_tid[i] = TID_KERNEL; 1222 1223 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1224 tidbusy[i][0] = kernel_pmap; 1225 } 1226 1227 /* 1228 * Fill in PTEs covering kernel code and data. They are not required 1229 * for address translation, as this area is covered by static TLB1 1230 * entries, but for pte_vatopa() to work correctly with kernel area 1231 * addresses. 1232 */ 1233 for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1234 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1235 pte->rpn = kernload + (va - kernstart); 1236 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1237 PTE_VALID; 1238 } 1239 /* Mark kernel_pmap active on all CPUs */ 1240 CPU_FILL(&kernel_pmap->pm_active); 1241 1242 /*******************************************************/ 1243 /* Final setup */ 1244 /*******************************************************/ 1245 1246 /* Enter kstack0 into kernel map, provide guard page */ 1247 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1248 thread0.td_kstack = kstack0; 1249 thread0.td_kstack_pages = KSTACK_PAGES; 1250 1251 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1252 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1253 kstack0_phys, kstack0_phys + kstack0_sz); 1254 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1255 1256 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1257 for (i = 0; i < KSTACK_PAGES; i++) { 1258 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1259 kstack0 += PAGE_SIZE; 1260 kstack0_phys += PAGE_SIZE; 1261 } 1262 1263 debugf("virtual_avail = %08x\n", virtual_avail); 1264 debugf("virtual_end = %08x\n", virtual_end); 1265 1266 debugf("mmu_booke_bootstrap: exit\n"); 1267} 1268 1269void 1270pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1271{ 1272 int i; 1273 1274 /* 1275 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1276 * have the snapshot of its contents in the s/w tlb1[] table, so use 1277 * these values directly to (re)program AP's TLB1 hardware. 1278 */ 1279 for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1280 /* Skip invalid entries */ 1281 if (!(tlb1[i].mas1 & MAS1_VALID)) 1282 continue; 1283 1284 tlb1_write_entry(i); 1285 } 1286 1287 set_mas4_defaults(); 1288} 1289 1290/* 1291 * Get the physical page address for the given pmap/virtual address. 1292 */ 1293static vm_paddr_t 1294mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1295{ 1296 vm_paddr_t pa; 1297 1298 PMAP_LOCK(pmap); 1299 pa = pte_vatopa(mmu, pmap, va); 1300 PMAP_UNLOCK(pmap); 1301 1302 return (pa); 1303} 1304 1305/* 1306 * Extract the physical page address associated with the given 1307 * kernel virtual address. 1308 */ 1309static vm_paddr_t 1310mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1311{ 1312 1313 return (pte_vatopa(mmu, kernel_pmap, va)); 1314} 1315 1316/* 1317 * Initialize the pmap module. 1318 * Called by vm_init, to initialize any structures that the pmap 1319 * system needs to map virtual memory. 1320 */ 1321static void 1322mmu_booke_init(mmu_t mmu) 1323{ 1324 int shpgperproc = PMAP_SHPGPERPROC; 1325 1326 /* 1327 * Initialize the address space (zone) for the pv entries. Set a 1328 * high water mark so that the system can recover from excessive 1329 * numbers of pv entries. 1330 */ 1331 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1332 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1333 1334 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1335 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1336 1337 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1338 pv_entry_high_water = 9 * (pv_entry_max / 10); 1339 1340 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1341 1342 /* Pre-fill pvzone with initial number of pv entries. */ 1343 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1344 1345 /* Initialize ptbl allocation. */ 1346 ptbl_init(); 1347} 1348 1349/* 1350 * Map a list of wired pages into kernel virtual address space. This is 1351 * intended for temporary mappings which do not need page modification or 1352 * references recorded. Existing mappings in the region are overwritten. 1353 */ 1354static void 1355mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1356{ 1357 vm_offset_t va; 1358 1359 va = sva; 1360 while (count-- > 0) { 1361 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1362 va += PAGE_SIZE; 1363 m++; 1364 } 1365} 1366 1367/* 1368 * Remove page mappings from kernel virtual address space. Intended for 1369 * temporary mappings entered by mmu_booke_qenter. 1370 */ 1371static void 1372mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1373{ 1374 vm_offset_t va; 1375 1376 va = sva; 1377 while (count-- > 0) { 1378 mmu_booke_kremove(mmu, va); 1379 va += PAGE_SIZE; 1380 } 1381} 1382 1383/* 1384 * Map a wired page into kernel virtual address space. 1385 */ 1386static void 1387mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1388{ 1389 unsigned int pdir_idx = PDIR_IDX(va); 1390 unsigned int ptbl_idx = PTBL_IDX(va); 1391 uint32_t flags; 1392 pte_t *pte; 1393 1394 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1395 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1396 1397 flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1398 1399 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1400 1401 mtx_lock_spin(&tlbivax_mutex); 1402 tlb_miss_lock(); 1403 1404 if (PTE_ISVALID(pte)) { 1405 1406 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1407 1408 /* Flush entry from TLB0 */ 1409 tlb0_flush_entry(va); 1410 } 1411 1412 pte->rpn = pa & ~PTE_PA_MASK; 1413 pte->flags = flags; 1414 1415 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1416 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1417 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1418 1419 /* Flush the real memory from the instruction cache. */ 1420 if ((flags & (PTE_I | PTE_G)) == 0) { 1421 __syncicache((void *)va, PAGE_SIZE); 1422 } 1423 1424 tlb_miss_unlock(); 1425 mtx_unlock_spin(&tlbivax_mutex); 1426} 1427 1428/* 1429 * Remove a page from kernel page table. 1430 */ 1431static void 1432mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1433{ 1434 unsigned int pdir_idx = PDIR_IDX(va); 1435 unsigned int ptbl_idx = PTBL_IDX(va); 1436 pte_t *pte; 1437 1438// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1439 1440 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1441 (va <= VM_MAX_KERNEL_ADDRESS)), 1442 ("mmu_booke_kremove: invalid va")); 1443 1444 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1445 1446 if (!PTE_ISVALID(pte)) { 1447 1448 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1449 1450 return; 1451 } 1452 1453 mtx_lock_spin(&tlbivax_mutex); 1454 tlb_miss_lock(); 1455 1456 /* Invalidate entry in TLB0, update PTE. */ 1457 tlb0_flush_entry(va); 1458 pte->flags = 0; 1459 pte->rpn = 0; 1460 1461 tlb_miss_unlock(); 1462 mtx_unlock_spin(&tlbivax_mutex); 1463} 1464 1465/* 1466 * Initialize pmap associated with process 0. 1467 */ 1468static void 1469mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1470{ 1471 1472 mmu_booke_pinit(mmu, pmap); 1473 PCPU_SET(curpmap, pmap); 1474} 1475 1476/* 1477 * Initialize a preallocated and zeroed pmap structure, 1478 * such as one in a vmspace structure. 1479 */ 1480static void 1481mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1482{ 1483 int i; 1484 1485 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1486 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1487 1488 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1489 1490 PMAP_LOCK_INIT(pmap); 1491 for (i = 0; i < MAXCPU; i++) 1492 pmap->pm_tid[i] = TID_NONE; 1493 CPU_ZERO(&kernel_pmap->pm_active); 1494 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1495 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1496 TAILQ_INIT(&pmap->pm_ptbl_list); 1497} 1498 1499/* 1500 * Release any resources held by the given physical map. 1501 * Called when a pmap initialized by mmu_booke_pinit is being released. 1502 * Should only be called if the map contains no valid mappings. 1503 */ 1504static void 1505mmu_booke_release(mmu_t mmu, pmap_t pmap) 1506{ 1507 1508 KASSERT(pmap->pm_stats.resident_count == 0, 1509 ("pmap_release: pmap resident count %ld != 0", 1510 pmap->pm_stats.resident_count)); 1511 1512 PMAP_LOCK_DESTROY(pmap); 1513} 1514 1515/* 1516 * Insert the given physical page at the specified virtual address in the 1517 * target physical map with the protection requested. If specified the page 1518 * will be wired down. 1519 */ 1520static void 1521mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1522 vm_prot_t prot, boolean_t wired) 1523{ 1524 1525 vm_page_lock_queues(); 1526 PMAP_LOCK(pmap); 1527 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1528 vm_page_unlock_queues(); 1529 PMAP_UNLOCK(pmap); 1530} 1531 1532static void 1533mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1534 vm_prot_t prot, boolean_t wired) 1535{ 1536 pte_t *pte; 1537 vm_paddr_t pa; 1538 uint32_t flags; 1539 int su, sync; 1540 1541 pa = VM_PAGE_TO_PHYS(m); 1542 su = (pmap == kernel_pmap); 1543 sync = 0; 1544 1545 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1546 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1547 // (u_int32_t)pmap, su, pmap->pm_tid, 1548 // (u_int32_t)m, va, pa, prot, wired); 1549 1550 if (su) { 1551 KASSERT(((va >= virtual_avail) && 1552 (va <= VM_MAX_KERNEL_ADDRESS)), 1553 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1554 } else { 1555 KASSERT((va <= VM_MAXUSER_ADDRESS), 1556 ("mmu_booke_enter_locked: user pmap, non user va")); 1557 } 1558 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1559 VM_OBJECT_LOCKED(m->object), 1560 ("mmu_booke_enter_locked: page %p is not busy", m)); 1561 1562 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1563 1564 /* 1565 * If there is an existing mapping, and the physical address has not 1566 * changed, must be protection or wiring change. 1567 */ 1568 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1569 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1570 1571 /* 1572 * Before actually updating pte->flags we calculate and 1573 * prepare its new value in a helper var. 1574 */ 1575 flags = pte->flags; 1576 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1577 1578 /* Wiring change, just update stats. */ 1579 if (wired) { 1580 if (!PTE_ISWIRED(pte)) { 1581 flags |= PTE_WIRED; 1582 pmap->pm_stats.wired_count++; 1583 } 1584 } else { 1585 if (PTE_ISWIRED(pte)) { 1586 flags &= ~PTE_WIRED; 1587 pmap->pm_stats.wired_count--; 1588 } 1589 } 1590 1591 if (prot & VM_PROT_WRITE) { 1592 /* Add write permissions. */ 1593 flags |= PTE_SW; 1594 if (!su) 1595 flags |= PTE_UW; 1596 1597 if ((flags & PTE_MANAGED) != 0) 1598 vm_page_aflag_set(m, PGA_WRITEABLE); 1599 } else { 1600 /* Handle modified pages, sense modify status. */ 1601 1602 /* 1603 * The PTE_MODIFIED flag could be set by underlying 1604 * TLB misses since we last read it (above), possibly 1605 * other CPUs could update it so we check in the PTE 1606 * directly rather than rely on that saved local flags 1607 * copy. 1608 */ 1609 if (PTE_ISMODIFIED(pte)) 1610 vm_page_dirty(m); 1611 } 1612 1613 if (prot & VM_PROT_EXECUTE) { 1614 flags |= PTE_SX; 1615 if (!su) 1616 flags |= PTE_UX; 1617 1618 /* 1619 * Check existing flags for execute permissions: if we 1620 * are turning execute permissions on, icache should 1621 * be flushed. 1622 */ 1623 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1624 sync++; 1625 } 1626 1627 flags &= ~PTE_REFERENCED; 1628 1629 /* 1630 * The new flags value is all calculated -- only now actually 1631 * update the PTE. 1632 */ 1633 mtx_lock_spin(&tlbivax_mutex); 1634 tlb_miss_lock(); 1635 1636 tlb0_flush_entry(va); 1637 pte->flags = flags; 1638 1639 tlb_miss_unlock(); 1640 mtx_unlock_spin(&tlbivax_mutex); 1641 1642 } else { 1643 /* 1644 * If there is an existing mapping, but it's for a different 1645 * physical address, pte_enter() will delete the old mapping. 1646 */ 1647 //if ((pte != NULL) && PTE_ISVALID(pte)) 1648 // debugf("mmu_booke_enter_locked: replace\n"); 1649 //else 1650 // debugf("mmu_booke_enter_locked: new\n"); 1651 1652 /* Now set up the flags and install the new mapping. */ 1653 flags = (PTE_SR | PTE_VALID); 1654 flags |= PTE_M; 1655 1656 if (!su) 1657 flags |= PTE_UR; 1658 1659 if (prot & VM_PROT_WRITE) { 1660 flags |= PTE_SW; 1661 if (!su) 1662 flags |= PTE_UW; 1663 1664 if ((m->oflags & VPO_UNMANAGED) == 0) 1665 vm_page_aflag_set(m, PGA_WRITEABLE); 1666 } 1667 1668 if (prot & VM_PROT_EXECUTE) { 1669 flags |= PTE_SX; 1670 if (!su) 1671 flags |= PTE_UX; 1672 } 1673 1674 /* If its wired update stats. */ 1675 if (wired) { 1676 pmap->pm_stats.wired_count++; 1677 flags |= PTE_WIRED; 1678 } 1679 1680 pte_enter(mmu, pmap, m, va, flags); 1681 1682 /* Flush the real memory from the instruction cache. */ 1683 if (prot & VM_PROT_EXECUTE) 1684 sync++; 1685 } 1686 1687 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1688 __syncicache((void *)va, PAGE_SIZE); 1689 sync = 0; 1690 } 1691} 1692 1693/* 1694 * Maps a sequence of resident pages belonging to the same object. 1695 * The sequence begins with the given page m_start. This page is 1696 * mapped at the given virtual address start. Each subsequent page is 1697 * mapped at a virtual address that is offset from start by the same 1698 * amount as the page is offset from m_start within the object. The 1699 * last page in the sequence is the page with the largest offset from 1700 * m_start that can be mapped at a virtual address less than the given 1701 * virtual address end. Not every virtual page between start and end 1702 * is mapped; only those for which a resident page exists with the 1703 * corresponding offset from m_start are mapped. 1704 */ 1705static void 1706mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1707 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1708{ 1709 vm_page_t m; 1710 vm_pindex_t diff, psize; 1711 1712 psize = atop(end - start); 1713 m = m_start; 1714 vm_page_lock_queues(); 1715 PMAP_LOCK(pmap); 1716 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1717 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1718 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1719 m = TAILQ_NEXT(m, listq); 1720 } 1721 vm_page_unlock_queues(); 1722 PMAP_UNLOCK(pmap); 1723} 1724 1725static void 1726mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1727 vm_prot_t prot) 1728{ 1729 1730 vm_page_lock_queues(); 1731 PMAP_LOCK(pmap); 1732 mmu_booke_enter_locked(mmu, pmap, va, m, 1733 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1734 vm_page_unlock_queues(); 1735 PMAP_UNLOCK(pmap); 1736} 1737 1738/* 1739 * Remove the given range of addresses from the specified map. 1740 * 1741 * It is assumed that the start and end are properly rounded to the page size. 1742 */ 1743static void 1744mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1745{ 1746 pte_t *pte; 1747 uint8_t hold_flag; 1748 1749 int su = (pmap == kernel_pmap); 1750 1751 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1752 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1753 1754 if (su) { 1755 KASSERT(((va >= virtual_avail) && 1756 (va <= VM_MAX_KERNEL_ADDRESS)), 1757 ("mmu_booke_remove: kernel pmap, non kernel va")); 1758 } else { 1759 KASSERT((va <= VM_MAXUSER_ADDRESS), 1760 ("mmu_booke_remove: user pmap, non user va")); 1761 } 1762 1763 if (PMAP_REMOVE_DONE(pmap)) { 1764 //debugf("mmu_booke_remove: e (empty)\n"); 1765 return; 1766 } 1767 1768 hold_flag = PTBL_HOLD_FLAG(pmap); 1769 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1770 1771 vm_page_lock_queues(); 1772 PMAP_LOCK(pmap); 1773 for (; va < endva; va += PAGE_SIZE) { 1774 pte = pte_find(mmu, pmap, va); 1775 if ((pte != NULL) && PTE_ISVALID(pte)) 1776 pte_remove(mmu, pmap, va, hold_flag); 1777 } 1778 PMAP_UNLOCK(pmap); 1779 vm_page_unlock_queues(); 1780 1781 //debugf("mmu_booke_remove: e\n"); 1782} 1783 1784/* 1785 * Remove physical page from all pmaps in which it resides. 1786 */ 1787static void 1788mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1789{ 1790 pv_entry_t pv, pvn; 1791 uint8_t hold_flag; 1792 1793 vm_page_lock_queues(); 1794 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1795 pvn = TAILQ_NEXT(pv, pv_link); 1796 1797 PMAP_LOCK(pv->pv_pmap); 1798 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1799 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1800 PMAP_UNLOCK(pv->pv_pmap); 1801 } 1802 vm_page_aflag_clear(m, PGA_WRITEABLE); 1803 vm_page_unlock_queues(); 1804} 1805 1806/* 1807 * Map a range of physical addresses into kernel virtual address space. 1808 */ 1809static vm_offset_t 1810mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1811 vm_paddr_t pa_end, int prot) 1812{ 1813 vm_offset_t sva = *virt; 1814 vm_offset_t va = sva; 1815 1816 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1817 // sva, pa_start, pa_end); 1818 1819 while (pa_start < pa_end) { 1820 mmu_booke_kenter(mmu, va, pa_start); 1821 va += PAGE_SIZE; 1822 pa_start += PAGE_SIZE; 1823 } 1824 *virt = va; 1825 1826 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1827 return (sva); 1828} 1829 1830/* 1831 * The pmap must be activated before it's address space can be accessed in any 1832 * way. 1833 */ 1834static void 1835mmu_booke_activate(mmu_t mmu, struct thread *td) 1836{ 1837 pmap_t pmap; 1838 u_int cpuid; 1839 1840 pmap = &td->td_proc->p_vmspace->vm_pmap; 1841 1842 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1843 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1844 1845 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1846 1847 mtx_lock_spin(&sched_lock); 1848 1849 cpuid = PCPU_GET(cpuid); 1850 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1851 PCPU_SET(curpmap, pmap); 1852 1853 if (pmap->pm_tid[cpuid] == TID_NONE) 1854 tid_alloc(pmap); 1855 1856 /* Load PID0 register with pmap tid value. */ 1857 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1858 __asm __volatile("isync"); 1859 1860 mtx_unlock_spin(&sched_lock); 1861 1862 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1863 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1864} 1865 1866/* 1867 * Deactivate the specified process's address space. 1868 */ 1869static void 1870mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1871{ 1872 pmap_t pmap; 1873 1874 pmap = &td->td_proc->p_vmspace->vm_pmap; 1875 1876 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1877 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1878 1879 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1880 PCPU_SET(curpmap, NULL); 1881} 1882 1883/* 1884 * Copy the range specified by src_addr/len 1885 * from the source map to the range dst_addr/len 1886 * in the destination map. 1887 * 1888 * This routine is only advisory and need not do anything. 1889 */ 1890static void 1891mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1892 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1893{ 1894 1895} 1896 1897/* 1898 * Set the physical protection on the specified range of this map as requested. 1899 */ 1900static void 1901mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1902 vm_prot_t prot) 1903{ 1904 vm_offset_t va; 1905 vm_page_t m; 1906 pte_t *pte; 1907 1908 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1909 mmu_booke_remove(mmu, pmap, sva, eva); 1910 return; 1911 } 1912 1913 if (prot & VM_PROT_WRITE) 1914 return; 1915 1916 PMAP_LOCK(pmap); 1917 for (va = sva; va < eva; va += PAGE_SIZE) { 1918 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1919 if (PTE_ISVALID(pte)) { 1920 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1921 1922 mtx_lock_spin(&tlbivax_mutex); 1923 tlb_miss_lock(); 1924 1925 /* Handle modified pages. */ 1926 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1927 vm_page_dirty(m); 1928 1929 tlb0_flush_entry(va); 1930 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1931 1932 tlb_miss_unlock(); 1933 mtx_unlock_spin(&tlbivax_mutex); 1934 } 1935 } 1936 } 1937 PMAP_UNLOCK(pmap); 1938} 1939 1940/* 1941 * Clear the write and modified bits in each of the given page's mappings. 1942 */ 1943static void 1944mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1945{ 1946 pv_entry_t pv; 1947 pte_t *pte; 1948 1949 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1950 ("mmu_booke_remove_write: page %p is not managed", m)); 1951 1952 /* 1953 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1954 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1955 * is clear, no page table entries need updating. 1956 */ 1957 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1958 if ((m->oflags & VPO_BUSY) == 0 && 1959 (m->aflags & PGA_WRITEABLE) == 0) 1960 return; 1961 vm_page_lock_queues(); 1962 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1963 PMAP_LOCK(pv->pv_pmap); 1964 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1965 if (PTE_ISVALID(pte)) { 1966 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1967 1968 mtx_lock_spin(&tlbivax_mutex); 1969 tlb_miss_lock(); 1970 1971 /* Handle modified pages. */ 1972 if (PTE_ISMODIFIED(pte)) 1973 vm_page_dirty(m); 1974 1975 /* Flush mapping from TLB0. */ 1976 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1977 1978 tlb_miss_unlock(); 1979 mtx_unlock_spin(&tlbivax_mutex); 1980 } 1981 } 1982 PMAP_UNLOCK(pv->pv_pmap); 1983 } 1984 vm_page_aflag_clear(m, PGA_WRITEABLE); 1985 vm_page_unlock_queues(); 1986} 1987 1988static void 1989mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 1990{ 1991 pte_t *pte; 1992 pmap_t pmap; 1993 vm_page_t m; 1994 vm_offset_t addr; 1995 vm_paddr_t pa; 1996 int active, valid; 1997 1998 va = trunc_page(va); 1999 sz = round_page(sz); 2000 2001 vm_page_lock_queues(); 2002 pmap = PCPU_GET(curpmap); 2003 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2004 while (sz > 0) { 2005 PMAP_LOCK(pm); 2006 pte = pte_find(mmu, pm, va); 2007 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2008 if (valid) 2009 pa = PTE_PA(pte); 2010 PMAP_UNLOCK(pm); 2011 if (valid) { 2012 if (!active) { 2013 /* Create a mapping in the active pmap. */ 2014 addr = 0; 2015 m = PHYS_TO_VM_PAGE(pa); 2016 PMAP_LOCK(pmap); 2017 pte_enter(mmu, pmap, m, addr, 2018 PTE_SR | PTE_VALID | PTE_UR); 2019 __syncicache((void *)addr, PAGE_SIZE); 2020 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2021 PMAP_UNLOCK(pmap); 2022 } else 2023 __syncicache((void *)va, PAGE_SIZE); 2024 } 2025 va += PAGE_SIZE; 2026 sz -= PAGE_SIZE; 2027 } 2028 vm_page_unlock_queues(); 2029} 2030 2031/* 2032 * Atomically extract and hold the physical page with the given 2033 * pmap and virtual address pair if that mapping permits the given 2034 * protection. 2035 */ 2036static vm_page_t 2037mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2038 vm_prot_t prot) 2039{ 2040 pte_t *pte; 2041 vm_page_t m; 2042 uint32_t pte_wbit; 2043 vm_paddr_t pa; 2044 2045 m = NULL; 2046 pa = 0; 2047 PMAP_LOCK(pmap); 2048retry: 2049 pte = pte_find(mmu, pmap, va); 2050 if ((pte != NULL) && PTE_ISVALID(pte)) { 2051 if (pmap == kernel_pmap) 2052 pte_wbit = PTE_SW; 2053 else 2054 pte_wbit = PTE_UW; 2055 2056 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2057 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2058 goto retry; 2059 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2060 vm_page_hold(m); 2061 } 2062 } 2063 2064 PA_UNLOCK_COND(pa); 2065 PMAP_UNLOCK(pmap); 2066 return (m); 2067} 2068 2069/* 2070 * Initialize a vm_page's machine-dependent fields. 2071 */ 2072static void 2073mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2074{ 2075 2076 TAILQ_INIT(&m->md.pv_list); 2077} 2078 2079/* 2080 * mmu_booke_zero_page_area zeros the specified hardware page by 2081 * mapping it into virtual memory and using bzero to clear 2082 * its contents. 2083 * 2084 * off and size must reside within a single page. 2085 */ 2086static void 2087mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2088{ 2089 vm_offset_t va; 2090 2091 /* XXX KASSERT off and size are within a single page? */ 2092 2093 mtx_lock(&zero_page_mutex); 2094 va = zero_page_va; 2095 2096 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2097 bzero((caddr_t)va + off, size); 2098 mmu_booke_kremove(mmu, va); 2099 2100 mtx_unlock(&zero_page_mutex); 2101} 2102 2103/* 2104 * mmu_booke_zero_page zeros the specified hardware page. 2105 */ 2106static void 2107mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2108{ 2109 2110 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2111} 2112 2113/* 2114 * mmu_booke_copy_page copies the specified (machine independent) page by 2115 * mapping the page into virtual memory and using memcopy to copy the page, 2116 * one machine dependent page at a time. 2117 */ 2118static void 2119mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2120{ 2121 vm_offset_t sva, dva; 2122 2123 sva = copy_page_src_va; 2124 dva = copy_page_dst_va; 2125 2126 mtx_lock(©_page_mutex); 2127 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2128 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2129 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2130 mmu_booke_kremove(mmu, dva); 2131 mmu_booke_kremove(mmu, sva); 2132 mtx_unlock(©_page_mutex); 2133} 2134 2135/* 2136 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2137 * into virtual memory and using bzero to clear its contents. This is intended 2138 * to be called from the vm_pagezero process only and outside of Giant. No 2139 * lock is required. 2140 */ 2141static void 2142mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2143{ 2144 vm_offset_t va; 2145 2146 va = zero_page_idle_va; 2147 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2148 bzero((caddr_t)va, PAGE_SIZE); 2149 mmu_booke_kremove(mmu, va); 2150} 2151 2152/* 2153 * Return whether or not the specified physical page was modified 2154 * in any of physical maps. 2155 */ 2156static boolean_t 2157mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2158{ 2159 pte_t *pte; 2160 pv_entry_t pv; 2161 boolean_t rv; 2162 2163 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2164 ("mmu_booke_is_modified: page %p is not managed", m)); 2165 rv = FALSE; 2166 2167 /* 2168 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2169 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2170 * is clear, no PTEs can be modified. 2171 */ 2172 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2173 if ((m->oflags & VPO_BUSY) == 0 && 2174 (m->aflags & PGA_WRITEABLE) == 0) 2175 return (rv); 2176 vm_page_lock_queues(); 2177 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2178 PMAP_LOCK(pv->pv_pmap); 2179 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2180 PTE_ISVALID(pte)) { 2181 if (PTE_ISMODIFIED(pte)) 2182 rv = TRUE; 2183 } 2184 PMAP_UNLOCK(pv->pv_pmap); 2185 if (rv) 2186 break; 2187 } 2188 vm_page_unlock_queues(); 2189 return (rv); 2190} 2191 2192/* 2193 * Return whether or not the specified virtual address is eligible 2194 * for prefault. 2195 */ 2196static boolean_t 2197mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2198{ 2199 2200 return (FALSE); 2201} 2202 2203/* 2204 * Return whether or not the specified physical page was referenced 2205 * in any physical maps. 2206 */ 2207static boolean_t 2208mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2209{ 2210 pte_t *pte; 2211 pv_entry_t pv; 2212 boolean_t rv; 2213 2214 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2215 ("mmu_booke_is_referenced: page %p is not managed", m)); 2216 rv = FALSE; 2217 vm_page_lock_queues(); 2218 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2219 PMAP_LOCK(pv->pv_pmap); 2220 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2221 PTE_ISVALID(pte)) { 2222 if (PTE_ISREFERENCED(pte)) 2223 rv = TRUE; 2224 } 2225 PMAP_UNLOCK(pv->pv_pmap); 2226 if (rv) 2227 break; 2228 } 2229 vm_page_unlock_queues(); 2230 return (rv); 2231} 2232 2233/* 2234 * Clear the modify bits on the specified physical page. 2235 */ 2236static void 2237mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2238{ 2239 pte_t *pte; 2240 pv_entry_t pv; 2241 2242 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2243 ("mmu_booke_clear_modify: page %p is not managed", m)); 2244 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2245 KASSERT((m->oflags & VPO_BUSY) == 0, 2246 ("mmu_booke_clear_modify: page %p is busy", m)); 2247 2248 /* 2249 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2250 * If the object containing the page is locked and the page is not 2251 * VPO_BUSY, then PG_AWRITEABLE cannot be concurrently set. 2252 */ 2253 if ((m->aflags & PGA_WRITEABLE) == 0) 2254 return; 2255 vm_page_lock_queues(); 2256 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2257 PMAP_LOCK(pv->pv_pmap); 2258 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2259 PTE_ISVALID(pte)) { 2260 mtx_lock_spin(&tlbivax_mutex); 2261 tlb_miss_lock(); 2262 2263 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2264 tlb0_flush_entry(pv->pv_va); 2265 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2266 PTE_REFERENCED); 2267 } 2268 2269 tlb_miss_unlock(); 2270 mtx_unlock_spin(&tlbivax_mutex); 2271 } 2272 PMAP_UNLOCK(pv->pv_pmap); 2273 } 2274 vm_page_unlock_queues(); 2275} 2276 2277/* 2278 * Return a count of reference bits for a page, clearing those bits. 2279 * It is not necessary for every reference bit to be cleared, but it 2280 * is necessary that 0 only be returned when there are truly no 2281 * reference bits set. 2282 * 2283 * XXX: The exact number of bits to check and clear is a matter that 2284 * should be tested and standardized at some point in the future for 2285 * optimal aging of shared pages. 2286 */ 2287static int 2288mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2289{ 2290 pte_t *pte; 2291 pv_entry_t pv; 2292 int count; 2293 2294 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2295 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2296 count = 0; 2297 vm_page_lock_queues(); 2298 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2299 PMAP_LOCK(pv->pv_pmap); 2300 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2301 PTE_ISVALID(pte)) { 2302 if (PTE_ISREFERENCED(pte)) { 2303 mtx_lock_spin(&tlbivax_mutex); 2304 tlb_miss_lock(); 2305 2306 tlb0_flush_entry(pv->pv_va); 2307 pte->flags &= ~PTE_REFERENCED; 2308 2309 tlb_miss_unlock(); 2310 mtx_unlock_spin(&tlbivax_mutex); 2311 2312 if (++count > 4) { 2313 PMAP_UNLOCK(pv->pv_pmap); 2314 break; 2315 } 2316 } 2317 } 2318 PMAP_UNLOCK(pv->pv_pmap); 2319 } 2320 vm_page_unlock_queues(); 2321 return (count); 2322} 2323 2324/* 2325 * Clear the reference bit on the specified physical page. 2326 */ 2327static void 2328mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2329{ 2330 pte_t *pte; 2331 pv_entry_t pv; 2332 2333 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2334 ("mmu_booke_clear_reference: page %p is not managed", m)); 2335 vm_page_lock_queues(); 2336 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2337 PMAP_LOCK(pv->pv_pmap); 2338 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2339 PTE_ISVALID(pte)) { 2340 if (PTE_ISREFERENCED(pte)) { 2341 mtx_lock_spin(&tlbivax_mutex); 2342 tlb_miss_lock(); 2343 2344 tlb0_flush_entry(pv->pv_va); 2345 pte->flags &= ~PTE_REFERENCED; 2346 2347 tlb_miss_unlock(); 2348 mtx_unlock_spin(&tlbivax_mutex); 2349 } 2350 } 2351 PMAP_UNLOCK(pv->pv_pmap); 2352 } 2353 vm_page_unlock_queues(); 2354} 2355 2356/* 2357 * Change wiring attribute for a map/virtual-address pair. 2358 */ 2359static void 2360mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2361{ 2362 pte_t *pte; 2363 2364 PMAP_LOCK(pmap); 2365 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2366 if (wired) { 2367 if (!PTE_ISWIRED(pte)) { 2368 pte->flags |= PTE_WIRED; 2369 pmap->pm_stats.wired_count++; 2370 } 2371 } else { 2372 if (PTE_ISWIRED(pte)) { 2373 pte->flags &= ~PTE_WIRED; 2374 pmap->pm_stats.wired_count--; 2375 } 2376 } 2377 } 2378 PMAP_UNLOCK(pmap); 2379} 2380 2381/* 2382 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2383 * page. This count may be changed upwards or downwards in the future; it is 2384 * only necessary that true be returned for a small subset of pmaps for proper 2385 * page aging. 2386 */ 2387static boolean_t 2388mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2389{ 2390 pv_entry_t pv; 2391 int loops; 2392 boolean_t rv; 2393 2394 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2395 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2396 loops = 0; 2397 rv = FALSE; 2398 vm_page_lock_queues(); 2399 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2400 if (pv->pv_pmap == pmap) { 2401 rv = TRUE; 2402 break; 2403 } 2404 if (++loops >= 16) 2405 break; 2406 } 2407 vm_page_unlock_queues(); 2408 return (rv); 2409} 2410 2411/* 2412 * Return the number of managed mappings to the given physical page that are 2413 * wired. 2414 */ 2415static int 2416mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2417{ 2418 pv_entry_t pv; 2419 pte_t *pte; 2420 int count = 0; 2421 2422 if ((m->oflags & VPO_UNMANAGED) != 0) 2423 return (count); 2424 vm_page_lock_queues(); 2425 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2426 PMAP_LOCK(pv->pv_pmap); 2427 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2428 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2429 count++; 2430 PMAP_UNLOCK(pv->pv_pmap); 2431 } 2432 vm_page_unlock_queues(); 2433 return (count); 2434} 2435 2436static int 2437mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2438{ 2439 int i; 2440 vm_offset_t va; 2441 2442 /* 2443 * This currently does not work for entries that 2444 * overlap TLB1 entries. 2445 */ 2446 for (i = 0; i < tlb1_idx; i ++) { 2447 if (tlb1_iomapped(i, pa, size, &va) == 0) 2448 return (0); 2449 } 2450 2451 return (EFAULT); 2452} 2453 2454vm_offset_t 2455mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2456 vm_size_t *sz) 2457{ 2458 vm_paddr_t pa, ppa; 2459 vm_offset_t va; 2460 vm_size_t gran; 2461 2462 /* Raw physical memory dumps don't have a virtual address. */ 2463 if (md->md_vaddr == ~0UL) { 2464 /* We always map a 256MB page at 256M. */ 2465 gran = 256 * 1024 * 1024; 2466 pa = md->md_paddr + ofs; 2467 ppa = pa & ~(gran - 1); 2468 ofs = pa - ppa; 2469 va = gran; 2470 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2471 if (*sz > (gran - ofs)) 2472 *sz = gran - ofs; 2473 return (va + ofs); 2474 } 2475 2476 /* Minidumps are based on virtual memory addresses. */ 2477 va = md->md_vaddr + ofs; 2478 if (va >= kernstart + kernsize) { 2479 gran = PAGE_SIZE - (va & PAGE_MASK); 2480 if (*sz > gran) 2481 *sz = gran; 2482 } 2483 return (va); 2484} 2485 2486void 2487mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2488 vm_offset_t va) 2489{ 2490 2491 /* Raw physical memory dumps don't have a virtual address. */ 2492 if (md->md_vaddr == ~0UL) { 2493 tlb1_idx--; 2494 tlb1[tlb1_idx].mas1 = 0; 2495 tlb1[tlb1_idx].mas2 = 0; 2496 tlb1[tlb1_idx].mas3 = 0; 2497 tlb1_write_entry(tlb1_idx); 2498 return; 2499 } 2500 2501 /* Minidumps are based on virtual memory addresses. */ 2502 /* Nothing to do... */ 2503} 2504 2505struct pmap_md * 2506mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2507{ 2508 static struct pmap_md md; 2509 pte_t *pte; 2510 vm_offset_t va; 2511 2512 if (dumpsys_minidump) { 2513 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2514 if (prev == NULL) { 2515 /* 1st: kernel .data and .bss. */ 2516 md.md_index = 1; 2517 md.md_vaddr = trunc_page((uintptr_t)_etext); 2518 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2519 return (&md); 2520 } 2521 switch (prev->md_index) { 2522 case 1: 2523 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2524 md.md_index = 2; 2525 md.md_vaddr = data_start; 2526 md.md_size = data_end - data_start; 2527 break; 2528 case 2: 2529 /* 3rd: kernel VM. */ 2530 va = prev->md_vaddr + prev->md_size; 2531 /* Find start of next chunk (from va). */ 2532 while (va < virtual_end) { 2533 /* Don't dump the buffer cache. */ 2534 if (va >= kmi.buffer_sva && 2535 va < kmi.buffer_eva) { 2536 va = kmi.buffer_eva; 2537 continue; 2538 } 2539 pte = pte_find(mmu, kernel_pmap, va); 2540 if (pte != NULL && PTE_ISVALID(pte)) 2541 break; 2542 va += PAGE_SIZE; 2543 } 2544 if (va < virtual_end) { 2545 md.md_vaddr = va; 2546 va += PAGE_SIZE; 2547 /* Find last page in chunk. */ 2548 while (va < virtual_end) { 2549 /* Don't run into the buffer cache. */ 2550 if (va == kmi.buffer_sva) 2551 break; 2552 pte = pte_find(mmu, kernel_pmap, va); 2553 if (pte == NULL || !PTE_ISVALID(pte)) 2554 break; 2555 va += PAGE_SIZE; 2556 } 2557 md.md_size = va - md.md_vaddr; 2558 break; 2559 } 2560 md.md_index = 3; 2561 /* FALLTHROUGH */ 2562 default: 2563 return (NULL); 2564 } 2565 } else { /* minidumps */ 2566 mem_regions(&physmem_regions, &physmem_regions_sz, 2567 &availmem_regions, &availmem_regions_sz); 2568 2569 if (prev == NULL) { 2570 /* first physical chunk. */ 2571 md.md_paddr = physmem_regions[0].mr_start; 2572 md.md_size = physmem_regions[0].mr_size; 2573 md.md_vaddr = ~0UL; 2574 md.md_index = 1; 2575 } else if (md.md_index < physmem_regions_sz) { 2576 md.md_paddr = physmem_regions[md.md_index].mr_start; 2577 md.md_size = physmem_regions[md.md_index].mr_size; 2578 md.md_vaddr = ~0UL; 2579 md.md_index++; 2580 } else { 2581 /* There's no next physical chunk. */ 2582 return (NULL); 2583 } 2584 } 2585 2586 return (&md); 2587} 2588 2589/* 2590 * Map a set of physical memory pages into the kernel virtual address space. 2591 * Return a pointer to where it is mapped. This routine is intended to be used 2592 * for mapping device memory, NOT real memory. 2593 */ 2594static void * 2595mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2596{ 2597 void *res; 2598 uintptr_t va; 2599 vm_size_t sz; 2600 2601 /* 2602 * CCSR is premapped. Note that (pa + size - 1) is there to make sure 2603 * we don't wrap around. Devices on the local bus typically extend all 2604 * the way up to and including 0xffffffff. In that case (pa + size) 2605 * would be 0. This creates a false positive (i.e. we think it's 2606 * within the CCSR) and not create a mapping. 2607 */ 2608 if (pa >= ccsrbar_pa && (pa + size - 1) < (ccsrbar_pa + CCSRBAR_SIZE)) { 2609 va = CCSRBAR_VA + (pa - ccsrbar_pa); 2610 return ((void *)va); 2611 } 2612 2613 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2614 res = (void *)va; 2615 2616 do { 2617 sz = 1 << (ilog2(size) & ~1); 2618 if (bootverbose) 2619 printf("Wiring VA=%x to PA=%x (size=%x), " 2620 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2621 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2622 size -= sz; 2623 pa += sz; 2624 va += sz; 2625 } while (size > 0); 2626 2627 return (res); 2628} 2629 2630/* 2631 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2632 */ 2633static void 2634mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2635{ 2636 vm_offset_t base, offset; 2637 2638 /* 2639 * Unmap only if this is inside kernel virtual space. 2640 */ 2641 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2642 base = trunc_page(va); 2643 offset = va & PAGE_MASK; 2644 size = roundup(offset + size, PAGE_SIZE); 2645 kmem_free(kernel_map, base, size); 2646 } 2647} 2648 2649/* 2650 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2651 * specified pmap. This eliminates the blast of soft faults on process startup 2652 * and immediately after an mmap. 2653 */ 2654static void 2655mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2656 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2657{ 2658 2659 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2660 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2661 ("mmu_booke_object_init_pt: non-device object")); 2662} 2663 2664/* 2665 * Perform the pmap work for mincore. 2666 */ 2667static int 2668mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2669 vm_paddr_t *locked_pa) 2670{ 2671 2672 TODO; 2673 return (0); 2674} 2675 2676/**************************************************************************/ 2677/* TID handling */ 2678/**************************************************************************/ 2679 2680/* 2681 * Allocate a TID. If necessary, steal one from someone else. 2682 * The new TID is flushed from the TLB before returning. 2683 */ 2684static tlbtid_t 2685tid_alloc(pmap_t pmap) 2686{ 2687 tlbtid_t tid; 2688 int thiscpu; 2689 2690 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2691 2692 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2693 2694 thiscpu = PCPU_GET(cpuid); 2695 2696 tid = PCPU_GET(tid_next); 2697 if (tid > TID_MAX) 2698 tid = TID_MIN; 2699 PCPU_SET(tid_next, tid + 1); 2700 2701 /* If we are stealing TID then clear the relevant pmap's field */ 2702 if (tidbusy[thiscpu][tid] != NULL) { 2703 2704 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2705 2706 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2707 2708 /* Flush all entries from TLB0 matching this TID. */ 2709 tid_flush(tid); 2710 } 2711 2712 tidbusy[thiscpu][tid] = pmap; 2713 pmap->pm_tid[thiscpu] = tid; 2714 __asm __volatile("msync; isync"); 2715 2716 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2717 PCPU_GET(tid_next)); 2718 2719 return (tid); 2720} 2721 2722/**************************************************************************/ 2723/* TLB0 handling */ 2724/**************************************************************************/ 2725 2726static void 2727tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2728 uint32_t mas7) 2729{ 2730 int as; 2731 char desc[3]; 2732 tlbtid_t tid; 2733 vm_size_t size; 2734 unsigned int tsize; 2735 2736 desc[2] = '\0'; 2737 if (mas1 & MAS1_VALID) 2738 desc[0] = 'V'; 2739 else 2740 desc[0] = ' '; 2741 2742 if (mas1 & MAS1_IPROT) 2743 desc[1] = 'P'; 2744 else 2745 desc[1] = ' '; 2746 2747 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2748 tid = MAS1_GETTID(mas1); 2749 2750 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2751 size = 0; 2752 if (tsize) 2753 size = tsize2size(tsize); 2754 2755 debugf("%3d: (%s) [AS=%d] " 2756 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2757 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2758 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2759} 2760 2761/* Convert TLB0 va and way number to tlb0[] table index. */ 2762static inline unsigned int 2763tlb0_tableidx(vm_offset_t va, unsigned int way) 2764{ 2765 unsigned int idx; 2766 2767 idx = (way * TLB0_ENTRIES_PER_WAY); 2768 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2769 return (idx); 2770} 2771 2772/* 2773 * Invalidate TLB0 entry. 2774 */ 2775static inline void 2776tlb0_flush_entry(vm_offset_t va) 2777{ 2778 2779 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2780 2781 mtx_assert(&tlbivax_mutex, MA_OWNED); 2782 2783 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2784 __asm __volatile("isync; msync"); 2785 __asm __volatile("tlbsync; msync"); 2786 2787 CTR1(KTR_PMAP, "%s: e", __func__); 2788} 2789 2790/* Print out contents of the MAS registers for each TLB0 entry */ 2791void 2792tlb0_print_tlbentries(void) 2793{ 2794 uint32_t mas0, mas1, mas2, mas3, mas7; 2795 int entryidx, way, idx; 2796 2797 debugf("TLB0 entries:\n"); 2798 for (way = 0; way < TLB0_WAYS; way ++) 2799 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2800 2801 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2802 mtspr(SPR_MAS0, mas0); 2803 __asm __volatile("isync"); 2804 2805 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2806 mtspr(SPR_MAS2, mas2); 2807 2808 __asm __volatile("isync; tlbre"); 2809 2810 mas1 = mfspr(SPR_MAS1); 2811 mas2 = mfspr(SPR_MAS2); 2812 mas3 = mfspr(SPR_MAS3); 2813 mas7 = mfspr(SPR_MAS7); 2814 2815 idx = tlb0_tableidx(mas2, way); 2816 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2817 } 2818} 2819 2820/**************************************************************************/ 2821/* TLB1 handling */ 2822/**************************************************************************/ 2823 2824/* 2825 * TLB1 mapping notes: 2826 * 2827 * TLB1[0] CCSRBAR 2828 * TLB1[1] Kernel text and data. 2829 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2830 * windows, other devices mappings. 2831 */ 2832 2833/* 2834 * Write given entry to TLB1 hardware. 2835 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2836 */ 2837static void 2838tlb1_write_entry(unsigned int idx) 2839{ 2840 uint32_t mas0, mas7; 2841 2842 //debugf("tlb1_write_entry: s\n"); 2843 2844 /* Clear high order RPN bits */ 2845 mas7 = 0; 2846 2847 /* Select entry */ 2848 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2849 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2850 2851 mtspr(SPR_MAS0, mas0); 2852 __asm __volatile("isync"); 2853 mtspr(SPR_MAS1, tlb1[idx].mas1); 2854 __asm __volatile("isync"); 2855 mtspr(SPR_MAS2, tlb1[idx].mas2); 2856 __asm __volatile("isync"); 2857 mtspr(SPR_MAS3, tlb1[idx].mas3); 2858 __asm __volatile("isync"); 2859 mtspr(SPR_MAS7, mas7); 2860 __asm __volatile("isync; tlbwe; isync; msync"); 2861 2862 //debugf("tlb1_write_entry: e\n"); 2863} 2864 2865/* 2866 * Return the largest uint value log such that 2^log <= num. 2867 */ 2868static unsigned int 2869ilog2(unsigned int num) 2870{ 2871 int lz; 2872 2873 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2874 return (31 - lz); 2875} 2876 2877/* 2878 * Convert TLB TSIZE value to mapped region size. 2879 */ 2880static vm_size_t 2881tsize2size(unsigned int tsize) 2882{ 2883 2884 /* 2885 * size = 4^tsize KB 2886 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2887 */ 2888 2889 return ((1 << (2 * tsize)) * 1024); 2890} 2891 2892/* 2893 * Convert region size (must be power of 4) to TLB TSIZE value. 2894 */ 2895static unsigned int 2896size2tsize(vm_size_t size) 2897{ 2898 2899 return (ilog2(size) / 2 - 5); 2900} 2901 2902/* 2903 * Register permanent kernel mapping in TLB1. 2904 * 2905 * Entries are created starting from index 0 (current free entry is 2906 * kept in tlb1_idx) and are not supposed to be invalidated. 2907 */ 2908static int 2909tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2910 uint32_t flags) 2911{ 2912 uint32_t ts, tid; 2913 int tsize; 2914 2915 if (tlb1_idx >= TLB1_ENTRIES) { 2916 printf("tlb1_set_entry: TLB1 full!\n"); 2917 return (-1); 2918 } 2919 2920 /* Convert size to TSIZE */ 2921 tsize = size2tsize(size); 2922 2923 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2924 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2925 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2926 2927 /* XXX LOCK tlb1[] */ 2928 2929 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2930 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2931 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2932 2933 /* Set supervisor RWX permission bits */ 2934 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2935 2936 tlb1_write_entry(tlb1_idx++); 2937 2938 /* XXX UNLOCK tlb1[] */ 2939 2940 /* 2941 * XXX in general TLB1 updates should be propagated between CPUs, 2942 * since current design assumes to have the same TLB1 set-up on all 2943 * cores. 2944 */ 2945 return (0); 2946} 2947 2948/* 2949 * Map in contiguous RAM region into the TLB1 using maximum of 2950 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2951 * 2952 * If necessary round up last entry size and return total size 2953 * used by all allocated entries. 2954 */ 2955vm_size_t 2956tlb1_mapin_region(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 2957{ 2958 vm_size_t pgs[KERNEL_REGION_MAX_TLB_ENTRIES]; 2959 vm_size_t mapped, pgsz, base, mask; 2960 int idx, nents; 2961 2962 /* Round up to the next 1M */ 2963 size = (size + (1 << 20) - 1) & ~((1 << 20) - 1); 2964 2965 mapped = 0; 2966 idx = 0; 2967 base = va; 2968 pgsz = 64*1024*1024; 2969 while (mapped < size) { 2970 while (mapped < size && idx < KERNEL_REGION_MAX_TLB_ENTRIES) { 2971 while (pgsz > (size - mapped)) 2972 pgsz >>= 2; 2973 pgs[idx++] = pgsz; 2974 mapped += pgsz; 2975 } 2976 2977 /* We under-map. Correct for this. */ 2978 if (mapped < size) { 2979 while (pgs[idx - 1] == pgsz) { 2980 idx--; 2981 mapped -= pgsz; 2982 } 2983 /* XXX We may increase beyond out starting point. */ 2984 pgsz <<= 2; 2985 pgs[idx++] = pgsz; 2986 mapped += pgsz; 2987 } 2988 } 2989 2990 nents = idx; 2991 mask = pgs[0] - 1; 2992 /* Align address to the boundary */ 2993 if (va & mask) { 2994 va = (va + mask) & ~mask; 2995 pa = (pa + mask) & ~mask; 2996 } 2997 2998 for (idx = 0; idx < nents; idx++) { 2999 pgsz = pgs[idx]; 3000 debugf("%u: %x -> %x, size=%x\n", idx, pa, va, pgsz); 3001 tlb1_set_entry(va, pa, pgsz, _TLB_ENTRY_MEM); 3002 pa += pgsz; 3003 va += pgsz; 3004 } 3005 3006 mapped = (va - base); 3007 debugf("mapped size 0x%08x (wasted space 0x%08x)\n", 3008 mapped, mapped - size); 3009 return (mapped); 3010} 3011 3012/* 3013 * TLB1 initialization routine, to be called after the very first 3014 * assembler level setup done in locore.S. 3015 */ 3016void 3017tlb1_init(vm_offset_t ccsrbar) 3018{ 3019 uint32_t mas0, mas1, mas3; 3020 uint32_t tsz; 3021 u_int i; 3022 3023 ccsrbar_pa = ccsrbar; 3024 3025 if (bootinfo != NULL && bootinfo[0] != 1) { 3026 tlb1_idx = *((uint16_t *)(bootinfo + 8)); 3027 } else 3028 tlb1_idx = 1; 3029 3030 /* The first entry/entries are used to map the kernel. */ 3031 for (i = 0; i < tlb1_idx; i++) { 3032 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3033 mtspr(SPR_MAS0, mas0); 3034 __asm __volatile("isync; tlbre"); 3035 3036 mas1 = mfspr(SPR_MAS1); 3037 if ((mas1 & MAS1_VALID) == 0) 3038 continue; 3039 3040 mas3 = mfspr(SPR_MAS3); 3041 3042 tlb1[i].mas1 = mas1; 3043 tlb1[i].mas2 = mfspr(SPR_MAS2); 3044 tlb1[i].mas3 = mas3; 3045 3046 if (i == 0) 3047 kernload = mas3 & MAS3_RPN; 3048 3049 tsz = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3050 kernsize += (tsz > 0) ? tsize2size(tsz) : 0; 3051 } 3052 3053 /* Map in CCSRBAR. */ 3054 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 3055 3056#ifdef SMP 3057 bp_ntlb1s = tlb1_idx; 3058#endif 3059 3060 /* Purge the remaining entries */ 3061 for (i = tlb1_idx; i < TLB1_ENTRIES; i++) 3062 tlb1_write_entry(i); 3063 3064 /* Setup TLB miss defaults */ 3065 set_mas4_defaults(); 3066} 3067 3068/* 3069 * Setup MAS4 defaults. 3070 * These values are loaded to MAS0-2 on a TLB miss. 3071 */ 3072static void 3073set_mas4_defaults(void) 3074{ 3075 uint32_t mas4; 3076 3077 /* Defaults: TLB0, PID0, TSIZED=4K */ 3078 mas4 = MAS4_TLBSELD0; 3079 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3080#ifdef SMP 3081 mas4 |= MAS4_MD; 3082#endif 3083 mtspr(SPR_MAS4, mas4); 3084 __asm __volatile("isync"); 3085} 3086 3087/* 3088 * Print out contents of the MAS registers for each TLB1 entry 3089 */ 3090void 3091tlb1_print_tlbentries(void) 3092{ 3093 uint32_t mas0, mas1, mas2, mas3, mas7; 3094 int i; 3095 3096 debugf("TLB1 entries:\n"); 3097 for (i = 0; i < TLB1_ENTRIES; i++) { 3098 3099 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3100 mtspr(SPR_MAS0, mas0); 3101 3102 __asm __volatile("isync; tlbre"); 3103 3104 mas1 = mfspr(SPR_MAS1); 3105 mas2 = mfspr(SPR_MAS2); 3106 mas3 = mfspr(SPR_MAS3); 3107 mas7 = mfspr(SPR_MAS7); 3108 3109 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3110 } 3111} 3112 3113/* 3114 * Print out contents of the in-ram tlb1 table. 3115 */ 3116void 3117tlb1_print_entries(void) 3118{ 3119 int i; 3120 3121 debugf("tlb1[] table entries:\n"); 3122 for (i = 0; i < TLB1_ENTRIES; i++) 3123 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3124} 3125 3126/* 3127 * Return 0 if the physical IO range is encompassed by one of the 3128 * the TLB1 entries, otherwise return related error code. 3129 */ 3130static int 3131tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3132{ 3133 uint32_t prot; 3134 vm_paddr_t pa_start; 3135 vm_paddr_t pa_end; 3136 unsigned int entry_tsize; 3137 vm_size_t entry_size; 3138 3139 *va = (vm_offset_t)NULL; 3140 3141 /* Skip invalid entries */ 3142 if (!(tlb1[i].mas1 & MAS1_VALID)) 3143 return (EINVAL); 3144 3145 /* 3146 * The entry must be cache-inhibited, guarded, and r/w 3147 * so it can function as an i/o page 3148 */ 3149 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3150 if (prot != (MAS2_I | MAS2_G)) 3151 return (EPERM); 3152 3153 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3154 if (prot != (MAS3_SR | MAS3_SW)) 3155 return (EPERM); 3156 3157 /* The address should be within the entry range. */ 3158 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3159 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3160 3161 entry_size = tsize2size(entry_tsize); 3162 pa_start = tlb1[i].mas3 & MAS3_RPN; 3163 pa_end = pa_start + entry_size - 1; 3164 3165 if ((pa < pa_start) || ((pa + size) > pa_end)) 3166 return (ERANGE); 3167 3168 /* Return virtual address of this mapping. */ 3169 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3170 return (0); 3171} 3172