pmap.c revision 194123
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 194123 2009-06-13 18:35:29Z alc $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/smp.h> 67#include <sys/vmmeter.h> 68 69#include <vm/vm.h> 70#include <vm/vm_page.h> 71#include <vm/vm_kern.h> 72#include <vm/vm_pageout.h> 73#include <vm/vm_extern.h> 74#include <vm/vm_object.h> 75#include <vm/vm_param.h> 76#include <vm/vm_map.h> 77#include <vm/vm_pager.h> 78#include <vm/uma.h> 79 80#include <machine/bootinfo.h> 81#include <machine/cpu.h> 82#include <machine/pcb.h> 83#include <machine/platform.h> 84 85#include <machine/tlb.h> 86#include <machine/spr.h> 87#include <machine/vmparam.h> 88#include <machine/md_var.h> 89#include <machine/mmuvar.h> 90#include <machine/pmap.h> 91#include <machine/pte.h> 92 93#include "mmu_if.h" 94 95#define DEBUG 96#undef DEBUG 97 98#ifdef DEBUG 99#define debugf(fmt, args...) printf(fmt, ##args) 100#else 101#define debugf(fmt, args...) 102#endif 103 104#define TODO panic("%s: not implemented", __func__); 105 106#include "opt_sched.h" 107#ifndef SCHED_4BSD 108#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 109#endif 110extern struct mtx sched_lock; 111 112extern int dumpsys_minidump; 113 114extern unsigned char _etext[]; 115extern unsigned char _end[]; 116 117/* Kernel physical load address. */ 118extern uint32_t kernload; 119vm_offset_t kernstart; 120vm_size_t kernsize; 121 122/* Message buffer and tables. */ 123static vm_offset_t data_start; 124static vm_size_t data_end; 125 126/* Phys/avail memory regions. */ 127static struct mem_region *availmem_regions; 128static int availmem_regions_sz; 129static struct mem_region *physmem_regions; 130static int physmem_regions_sz; 131 132/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133static vm_offset_t zero_page_va; 134static struct mtx zero_page_mutex; 135 136static struct mtx tlbivax_mutex; 137 138/* 139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140 * by idle thred only, no lock required. 141 */ 142static vm_offset_t zero_page_idle_va; 143 144/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145static vm_offset_t copy_page_src_va; 146static vm_offset_t copy_page_dst_va; 147static struct mtx copy_page_mutex; 148 149/**************************************************************************/ 150/* PMAP */ 151/**************************************************************************/ 152 153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154 vm_prot_t, boolean_t); 155 156unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158 159/* 160 * If user pmap is processed with mmu_booke_remove and the resident count 161 * drops to 0, there are no more pages to remove, so we need not continue. 162 */ 163#define PMAP_REMOVE_DONE(pmap) \ 164 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 165 166extern void tlb_lock(uint32_t *); 167extern void tlb_unlock(uint32_t *); 168extern void tid_flush(tlbtid_t); 169 170/**************************************************************************/ 171/* TLB and TID handling */ 172/**************************************************************************/ 173 174/* Translation ID busy table */ 175static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 176 177/* 178 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 179 * core revisions and should be read from h/w registers during early config. 180 */ 181uint32_t tlb0_entries; 182uint32_t tlb0_ways; 183uint32_t tlb0_entries_per_way; 184 185#define TLB0_ENTRIES (tlb0_entries) 186#define TLB0_WAYS (tlb0_ways) 187#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 188 189#define TLB1_ENTRIES 16 190 191/* In-ram copy of the TLB1 */ 192static tlb_entry_t tlb1[TLB1_ENTRIES]; 193 194/* Next free entry in the TLB1 */ 195static unsigned int tlb1_idx; 196 197static tlbtid_t tid_alloc(struct pmap *); 198 199static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 200 201static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 202static void tlb1_write_entry(unsigned int); 203static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 204static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 205 206static vm_size_t tsize2size(unsigned int); 207static unsigned int size2tsize(vm_size_t); 208static unsigned int ilog2(unsigned int); 209 210static void set_mas4_defaults(void); 211 212static inline void tlb0_flush_entry(vm_offset_t); 213static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 214 215/**************************************************************************/ 216/* Page table management */ 217/**************************************************************************/ 218 219/* Data for the pv entry allocation mechanism */ 220static uma_zone_t pvzone; 221static struct vm_object pvzone_obj; 222static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 223 224#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 225 226#ifndef PMAP_SHPGPERPROC 227#define PMAP_SHPGPERPROC 200 228#endif 229 230static void ptbl_init(void); 231static struct ptbl_buf *ptbl_buf_alloc(void); 232static void ptbl_buf_free(struct ptbl_buf *); 233static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 234 235static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 236static void ptbl_free(mmu_t, pmap_t, unsigned int); 237static void ptbl_hold(mmu_t, pmap_t, unsigned int); 238static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 239 240static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 241static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 242static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 243static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 244 245static pv_entry_t pv_alloc(void); 246static void pv_free(pv_entry_t); 247static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 248static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 249 250/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 251#define PTBL_BUFS (128 * 16) 252 253struct ptbl_buf { 254 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 255 vm_offset_t kva; /* va of mapping */ 256}; 257 258/* ptbl free list and a lock used for access synchronization. */ 259static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 260static struct mtx ptbl_buf_freelist_lock; 261 262/* Base address of kva space allocated fot ptbl bufs. */ 263static vm_offset_t ptbl_buf_pool_vabase; 264 265/* Pointer to ptbl_buf structures. */ 266static struct ptbl_buf *ptbl_bufs; 267 268void pmap_bootstrap_ap(volatile uint32_t *); 269 270/* 271 * Kernel MMU interface 272 */ 273static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 274static void mmu_booke_clear_modify(mmu_t, vm_page_t); 275static void mmu_booke_clear_reference(mmu_t, vm_page_t); 276static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 277 vm_size_t, vm_offset_t); 278static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 279static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280 vm_prot_t, boolean_t); 281static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 282 vm_page_t, vm_prot_t); 283static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 284 vm_prot_t); 285static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 286static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 287 vm_prot_t); 288static void mmu_booke_init(mmu_t); 289static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 290static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 291static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 292static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 293 int); 294static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 295static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296 vm_object_t, vm_pindex_t, vm_size_t); 297static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298static void mmu_booke_page_init(mmu_t, vm_page_t); 299static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300static void mmu_booke_pinit(mmu_t, pmap_t); 301static void mmu_booke_pinit0(mmu_t, pmap_t); 302static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303 vm_prot_t); 304static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306static void mmu_booke_release(mmu_t, pmap_t); 307static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308static void mmu_booke_remove_all(mmu_t, vm_page_t); 309static void mmu_booke_remove_write(mmu_t, vm_page_t); 310static void mmu_booke_zero_page(mmu_t, vm_page_t); 311static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313static void mmu_booke_activate(mmu_t, struct thread *); 314static void mmu_booke_deactivate(mmu_t, struct thread *); 315static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 317static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 318static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 319static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 320static void mmu_booke_kremove(mmu_t, vm_offset_t); 321static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 322static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 323static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 324 vm_size_t, vm_size_t *); 325static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 326 vm_size_t, vm_offset_t); 327static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 328 329static mmu_method_t mmu_booke_methods[] = { 330 /* pmap dispatcher interface */ 331 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 332 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 333 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 334 MMUMETHOD(mmu_copy, mmu_booke_copy), 335 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 336 MMUMETHOD(mmu_enter, mmu_booke_enter), 337 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 338 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 339 MMUMETHOD(mmu_extract, mmu_booke_extract), 340 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 341 MMUMETHOD(mmu_init, mmu_booke_init), 342 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 343 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 344 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 345 MMUMETHOD(mmu_map, mmu_booke_map), 346 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 347 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 348 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 349 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 350 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 351 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 352 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 353 MMUMETHOD(mmu_protect, mmu_booke_protect), 354 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 355 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 356 MMUMETHOD(mmu_release, mmu_booke_release), 357 MMUMETHOD(mmu_remove, mmu_booke_remove), 358 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 359 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 360 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 361 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 362 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 363 MMUMETHOD(mmu_activate, mmu_booke_activate), 364 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 365 366 /* Internal interfaces */ 367 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 368 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 369 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 370 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 371 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 372/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 373 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 374 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 375 376 /* dumpsys() support */ 377 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 378 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 379 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 380 381 { 0, 0 } 382}; 383 384static mmu_def_t booke_mmu = { 385 MMU_TYPE_BOOKE, 386 mmu_booke_methods, 387 0 388}; 389MMU_DEF(booke_mmu); 390 391static inline void 392tlb_miss_lock(void) 393{ 394#ifdef SMP 395 struct pcpu *pc; 396 397 if (!smp_started) 398 return; 399 400 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 401 if (pc != pcpup) { 402 403 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 404 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 405 406 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 407 ("tlb_miss_lock: tried to lock self")); 408 409 tlb_lock(pc->pc_booke_tlb_lock); 410 411 CTR1(KTR_PMAP, "%s: locked", __func__); 412 } 413 } 414#endif 415} 416 417static inline void 418tlb_miss_unlock(void) 419{ 420#ifdef SMP 421 struct pcpu *pc; 422 423 if (!smp_started) 424 return; 425 426 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 427 if (pc != pcpup) { 428 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 429 __func__, pc->pc_cpuid); 430 431 tlb_unlock(pc->pc_booke_tlb_lock); 432 433 CTR1(KTR_PMAP, "%s: unlocked", __func__); 434 } 435 } 436#endif 437} 438 439/* Return number of entries in TLB0. */ 440static __inline void 441tlb0_get_tlbconf(void) 442{ 443 uint32_t tlb0_cfg; 444 445 tlb0_cfg = mfspr(SPR_TLB0CFG); 446 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 447 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 448 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 449} 450 451/* Initialize pool of kva ptbl buffers. */ 452static void 453ptbl_init(void) 454{ 455 int i; 456 457 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 458 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 459 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 460 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 461 462 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 463 TAILQ_INIT(&ptbl_buf_freelist); 464 465 for (i = 0; i < PTBL_BUFS; i++) { 466 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 467 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 468 } 469} 470 471/* Get a ptbl_buf from the freelist. */ 472static struct ptbl_buf * 473ptbl_buf_alloc(void) 474{ 475 struct ptbl_buf *buf; 476 477 mtx_lock(&ptbl_buf_freelist_lock); 478 buf = TAILQ_FIRST(&ptbl_buf_freelist); 479 if (buf != NULL) 480 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 481 mtx_unlock(&ptbl_buf_freelist_lock); 482 483 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 484 485 return (buf); 486} 487 488/* Return ptbl buff to free pool. */ 489static void 490ptbl_buf_free(struct ptbl_buf *buf) 491{ 492 493 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 494 495 mtx_lock(&ptbl_buf_freelist_lock); 496 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 497 mtx_unlock(&ptbl_buf_freelist_lock); 498} 499 500/* 501 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 502 */ 503static void 504ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 505{ 506 struct ptbl_buf *pbuf; 507 508 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 509 510 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 511 512 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 513 if (pbuf->kva == (vm_offset_t)ptbl) { 514 /* Remove from pmap ptbl buf list. */ 515 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 516 517 /* Free corresponding ptbl buf. */ 518 ptbl_buf_free(pbuf); 519 break; 520 } 521} 522 523/* Allocate page table. */ 524static pte_t * 525ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 526{ 527 vm_page_t mtbl[PTBL_PAGES]; 528 vm_page_t m; 529 struct ptbl_buf *pbuf; 530 unsigned int pidx; 531 pte_t *ptbl; 532 int i; 533 534 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 535 (pmap == kernel_pmap), pdir_idx); 536 537 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 538 ("ptbl_alloc: invalid pdir_idx")); 539 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 540 ("pte_alloc: valid ptbl entry exists!")); 541 542 pbuf = ptbl_buf_alloc(); 543 if (pbuf == NULL) 544 panic("pte_alloc: couldn't alloc kernel virtual memory"); 545 546 ptbl = (pte_t *)pbuf->kva; 547 548 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 549 550 /* Allocate ptbl pages, this will sleep! */ 551 for (i = 0; i < PTBL_PAGES; i++) { 552 pidx = (PTBL_PAGES * pdir_idx) + i; 553 while ((m = vm_page_alloc(NULL, pidx, 554 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 555 556 PMAP_UNLOCK(pmap); 557 vm_page_unlock_queues(); 558 VM_WAIT; 559 vm_page_lock_queues(); 560 PMAP_LOCK(pmap); 561 } 562 mtbl[i] = m; 563 } 564 565 /* Map allocated pages into kernel_pmap. */ 566 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 567 568 /* Zero whole ptbl. */ 569 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 570 571 /* Add pbuf to the pmap ptbl bufs list. */ 572 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 573 574 return (ptbl); 575} 576 577/* Free ptbl pages and invalidate pdir entry. */ 578static void 579ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 580{ 581 pte_t *ptbl; 582 vm_paddr_t pa; 583 vm_offset_t va; 584 vm_page_t m; 585 int i; 586 587 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 588 (pmap == kernel_pmap), pdir_idx); 589 590 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 591 ("ptbl_free: invalid pdir_idx")); 592 593 ptbl = pmap->pm_pdir[pdir_idx]; 594 595 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 596 597 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 598 599 /* 600 * Invalidate the pdir entry as soon as possible, so that other CPUs 601 * don't attempt to look up the page tables we are releasing. 602 */ 603 mtx_lock_spin(&tlbivax_mutex); 604 tlb_miss_lock(); 605 606 pmap->pm_pdir[pdir_idx] = NULL; 607 608 tlb_miss_unlock(); 609 mtx_unlock_spin(&tlbivax_mutex); 610 611 for (i = 0; i < PTBL_PAGES; i++) { 612 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 613 pa = pte_vatopa(mmu, kernel_pmap, va); 614 m = PHYS_TO_VM_PAGE(pa); 615 vm_page_free_zero(m); 616 atomic_subtract_int(&cnt.v_wire_count, 1); 617 mmu_booke_kremove(mmu, va); 618 } 619 620 ptbl_free_pmap_ptbl(pmap, ptbl); 621} 622 623/* 624 * Decrement ptbl pages hold count and attempt to free ptbl pages. 625 * Called when removing pte entry from ptbl. 626 * 627 * Return 1 if ptbl pages were freed. 628 */ 629static int 630ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 631{ 632 pte_t *ptbl; 633 vm_paddr_t pa; 634 vm_page_t m; 635 int i; 636 637 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 638 (pmap == kernel_pmap), pdir_idx); 639 640 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 641 ("ptbl_unhold: invalid pdir_idx")); 642 KASSERT((pmap != kernel_pmap), 643 ("ptbl_unhold: unholding kernel ptbl!")); 644 645 ptbl = pmap->pm_pdir[pdir_idx]; 646 647 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 648 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 649 ("ptbl_unhold: non kva ptbl")); 650 651 /* decrement hold count */ 652 for (i = 0; i < PTBL_PAGES; i++) { 653 pa = pte_vatopa(mmu, kernel_pmap, 654 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 655 m = PHYS_TO_VM_PAGE(pa); 656 m->wire_count--; 657 } 658 659 /* 660 * Free ptbl pages if there are no pte etries in this ptbl. 661 * wire_count has the same value for all ptbl pages, so check the last 662 * page. 663 */ 664 if (m->wire_count == 0) { 665 ptbl_free(mmu, pmap, pdir_idx); 666 667 //debugf("ptbl_unhold: e (freed ptbl)\n"); 668 return (1); 669 } 670 671 return (0); 672} 673 674/* 675 * Increment hold count for ptbl pages. This routine is used when a new pte 676 * entry is being inserted into the ptbl. 677 */ 678static void 679ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 680{ 681 vm_paddr_t pa; 682 pte_t *ptbl; 683 vm_page_t m; 684 int i; 685 686 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 687 pdir_idx); 688 689 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 690 ("ptbl_hold: invalid pdir_idx")); 691 KASSERT((pmap != kernel_pmap), 692 ("ptbl_hold: holding kernel ptbl!")); 693 694 ptbl = pmap->pm_pdir[pdir_idx]; 695 696 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 697 698 for (i = 0; i < PTBL_PAGES; i++) { 699 pa = pte_vatopa(mmu, kernel_pmap, 700 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 701 m = PHYS_TO_VM_PAGE(pa); 702 m->wire_count++; 703 } 704} 705 706/* Allocate pv_entry structure. */ 707pv_entry_t 708pv_alloc(void) 709{ 710 pv_entry_t pv; 711 712 pv_entry_count++; 713 if (pv_entry_count > pv_entry_high_water) 714 pagedaemon_wakeup(); 715 pv = uma_zalloc(pvzone, M_NOWAIT); 716 717 return (pv); 718} 719 720/* Free pv_entry structure. */ 721static __inline void 722pv_free(pv_entry_t pve) 723{ 724 725 pv_entry_count--; 726 uma_zfree(pvzone, pve); 727} 728 729 730/* Allocate and initialize pv_entry structure. */ 731static void 732pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 733{ 734 pv_entry_t pve; 735 736 //int su = (pmap == kernel_pmap); 737 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 738 // (u_int32_t)pmap, va, (u_int32_t)m); 739 740 pve = pv_alloc(); 741 if (pve == NULL) 742 panic("pv_insert: no pv entries!"); 743 744 pve->pv_pmap = pmap; 745 pve->pv_va = va; 746 747 /* add to pv_list */ 748 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 749 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 750 751 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 752 753 //debugf("pv_insert: e\n"); 754} 755 756/* Destroy pv entry. */ 757static void 758pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 759{ 760 pv_entry_t pve; 761 762 //int su = (pmap == kernel_pmap); 763 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 764 765 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 766 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 767 768 /* find pv entry */ 769 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 770 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 771 /* remove from pv_list */ 772 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 773 if (TAILQ_EMPTY(&m->md.pv_list)) 774 vm_page_flag_clear(m, PG_WRITEABLE); 775 776 /* free pv entry struct */ 777 pv_free(pve); 778 break; 779 } 780 } 781 782 //debugf("pv_remove: e\n"); 783} 784 785/* 786 * Clean pte entry, try to free page table page if requested. 787 * 788 * Return 1 if ptbl pages were freed, otherwise return 0. 789 */ 790static int 791pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 792{ 793 unsigned int pdir_idx = PDIR_IDX(va); 794 unsigned int ptbl_idx = PTBL_IDX(va); 795 vm_page_t m; 796 pte_t *ptbl; 797 pte_t *pte; 798 799 //int su = (pmap == kernel_pmap); 800 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 801 // su, (u_int32_t)pmap, va, flags); 802 803 ptbl = pmap->pm_pdir[pdir_idx]; 804 KASSERT(ptbl, ("pte_remove: null ptbl")); 805 806 pte = &ptbl[ptbl_idx]; 807 808 if (pte == NULL || !PTE_ISVALID(pte)) 809 return (0); 810 811 if (PTE_ISWIRED(pte)) 812 pmap->pm_stats.wired_count--; 813 814 /* Handle managed entry. */ 815 if (PTE_ISMANAGED(pte)) { 816 /* Get vm_page_t for mapped pte. */ 817 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 818 819 if (PTE_ISMODIFIED(pte)) 820 vm_page_dirty(m); 821 822 if (PTE_ISREFERENCED(pte)) 823 vm_page_flag_set(m, PG_REFERENCED); 824 825 pv_remove(pmap, va, m); 826 } 827 828 mtx_lock_spin(&tlbivax_mutex); 829 tlb_miss_lock(); 830 831 tlb0_flush_entry(va); 832 pte->flags = 0; 833 pte->rpn = 0; 834 835 tlb_miss_unlock(); 836 mtx_unlock_spin(&tlbivax_mutex); 837 838 pmap->pm_stats.resident_count--; 839 840 if (flags & PTBL_UNHOLD) { 841 //debugf("pte_remove: e (unhold)\n"); 842 return (ptbl_unhold(mmu, pmap, pdir_idx)); 843 } 844 845 //debugf("pte_remove: e\n"); 846 return (0); 847} 848 849/* 850 * Insert PTE for a given page and virtual address. 851 */ 852static void 853pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 854{ 855 unsigned int pdir_idx = PDIR_IDX(va); 856 unsigned int ptbl_idx = PTBL_IDX(va); 857 pte_t *ptbl, *pte; 858 859 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 860 pmap == kernel_pmap, pmap, va); 861 862 /* Get the page table pointer. */ 863 ptbl = pmap->pm_pdir[pdir_idx]; 864 865 if (ptbl == NULL) { 866 /* Allocate page table pages. */ 867 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 868 } else { 869 /* 870 * Check if there is valid mapping for requested 871 * va, if there is, remove it. 872 */ 873 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 874 if (PTE_ISVALID(pte)) { 875 pte_remove(mmu, pmap, va, PTBL_HOLD); 876 } else { 877 /* 878 * pte is not used, increment hold count 879 * for ptbl pages. 880 */ 881 if (pmap != kernel_pmap) 882 ptbl_hold(mmu, pmap, pdir_idx); 883 } 884 } 885 886 /* 887 * Insert pv_entry into pv_list for mapped page if part of managed 888 * memory. 889 */ 890 if ((m->flags & PG_FICTITIOUS) == 0) { 891 if ((m->flags & PG_UNMANAGED) == 0) { 892 flags |= PTE_MANAGED; 893 894 /* Create and insert pv entry. */ 895 pv_insert(pmap, va, m); 896 } 897 } 898 899 pmap->pm_stats.resident_count++; 900 901 mtx_lock_spin(&tlbivax_mutex); 902 tlb_miss_lock(); 903 904 tlb0_flush_entry(va); 905 if (pmap->pm_pdir[pdir_idx] == NULL) { 906 /* 907 * If we just allocated a new page table, hook it in 908 * the pdir. 909 */ 910 pmap->pm_pdir[pdir_idx] = ptbl; 911 } 912 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 913 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 914 pte->flags |= (PTE_VALID | flags); 915 916 tlb_miss_unlock(); 917 mtx_unlock_spin(&tlbivax_mutex); 918} 919 920/* Return the pa for the given pmap/va. */ 921static vm_paddr_t 922pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 923{ 924 vm_paddr_t pa = 0; 925 pte_t *pte; 926 927 pte = pte_find(mmu, pmap, va); 928 if ((pte != NULL) && PTE_ISVALID(pte)) 929 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 930 return (pa); 931} 932 933/* Get a pointer to a PTE in a page table. */ 934static pte_t * 935pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 936{ 937 unsigned int pdir_idx = PDIR_IDX(va); 938 unsigned int ptbl_idx = PTBL_IDX(va); 939 940 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 941 942 if (pmap->pm_pdir[pdir_idx]) 943 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 944 945 return (NULL); 946} 947 948/**************************************************************************/ 949/* PMAP related */ 950/**************************************************************************/ 951 952/* 953 * This is called during e500_init, before the system is really initialized. 954 */ 955static void 956mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 957{ 958 vm_offset_t phys_kernelend; 959 struct mem_region *mp, *mp1; 960 int cnt, i, j; 961 u_int s, e, sz; 962 u_int phys_avail_count; 963 vm_size_t physsz, hwphyssz, kstack0_sz; 964 vm_offset_t kernel_pdir, kstack0, va; 965 vm_paddr_t kstack0_phys; 966 pte_t *pte; 967 968 debugf("mmu_booke_bootstrap: entered\n"); 969 970 /* Initialize invalidation mutex */ 971 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 972 973 /* Read TLB0 size and associativity. */ 974 tlb0_get_tlbconf(); 975 976 /* Align kernel start and end address (kernel image). */ 977 kernstart = trunc_page(start); 978 data_start = round_page(kernelend); 979 kernsize = data_start - kernstart; 980 981 data_end = data_start; 982 983 /* Allocate space for the message buffer. */ 984 msgbufp = (struct msgbuf *)data_end; 985 data_end += MSGBUF_SIZE; 986 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 987 data_end); 988 989 data_end = round_page(data_end); 990 991 /* Allocate space for ptbl_bufs. */ 992 ptbl_bufs = (struct ptbl_buf *)data_end; 993 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 994 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 995 data_end); 996 997 data_end = round_page(data_end); 998 999 /* Allocate PTE tables for kernel KVA. */ 1000 kernel_pdir = data_end; 1001 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1002 PDIR_SIZE - 1) / PDIR_SIZE; 1003 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1004 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1005 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1006 1007 debugf(" data_end: 0x%08x\n", data_end); 1008 if (data_end - kernstart > 0x1000000) { 1009 data_end = (data_end + 0x3fffff) & ~0x3fffff; 1010 tlb1_mapin_region(kernstart + 0x1000000, 1011 kernload + 0x1000000, data_end - kernstart - 0x1000000); 1012 } else 1013 data_end = (data_end + 0xffffff) & ~0xffffff; 1014 1015 debugf(" updated data_end: 0x%08x\n", data_end); 1016 1017 kernsize += data_end - data_start; 1018 1019 /* 1020 * Clear the structures - note we can only do it safely after the 1021 * possible additional TLB1 translations are in place (above) so that 1022 * all range up to the currently calculated 'data_end' is covered. 1023 */ 1024 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1025 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1026 1027 /*******************************************************/ 1028 /* Set the start and end of kva. */ 1029 /*******************************************************/ 1030 virtual_avail = round_page(data_end); 1031 virtual_end = VM_MAX_KERNEL_ADDRESS; 1032 1033 /* Allocate KVA space for page zero/copy operations. */ 1034 zero_page_va = virtual_avail; 1035 virtual_avail += PAGE_SIZE; 1036 zero_page_idle_va = virtual_avail; 1037 virtual_avail += PAGE_SIZE; 1038 copy_page_src_va = virtual_avail; 1039 virtual_avail += PAGE_SIZE; 1040 copy_page_dst_va = virtual_avail; 1041 virtual_avail += PAGE_SIZE; 1042 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1043 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1044 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1045 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1046 1047 /* Initialize page zero/copy mutexes. */ 1048 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1049 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1050 1051 /* Allocate KVA space for ptbl bufs. */ 1052 ptbl_buf_pool_vabase = virtual_avail; 1053 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1054 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1055 ptbl_buf_pool_vabase, virtual_avail); 1056 1057 /* Calculate corresponding physical addresses for the kernel region. */ 1058 phys_kernelend = kernload + kernsize; 1059 debugf("kernel image and allocated data:\n"); 1060 debugf(" kernload = 0x%08x\n", kernload); 1061 debugf(" kernstart = 0x%08x\n", kernstart); 1062 debugf(" kernsize = 0x%08x\n", kernsize); 1063 1064 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1065 panic("mmu_booke_bootstrap: phys_avail too small"); 1066 1067 /* 1068 * Remove kernel physical address range from avail regions list. Page 1069 * align all regions. Non-page aligned memory isn't very interesting 1070 * to us. Also, sort the entries for ascending addresses. 1071 */ 1072 1073 /* Retrieve phys/avail mem regions */ 1074 mem_regions(&physmem_regions, &physmem_regions_sz, 1075 &availmem_regions, &availmem_regions_sz); 1076 sz = 0; 1077 cnt = availmem_regions_sz; 1078 debugf("processing avail regions:\n"); 1079 for (mp = availmem_regions; mp->mr_size; mp++) { 1080 s = mp->mr_start; 1081 e = mp->mr_start + mp->mr_size; 1082 debugf(" %08x-%08x -> ", s, e); 1083 /* Check whether this region holds all of the kernel. */ 1084 if (s < kernload && e > phys_kernelend) { 1085 availmem_regions[cnt].mr_start = phys_kernelend; 1086 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1087 e = kernload; 1088 } 1089 /* Look whether this regions starts within the kernel. */ 1090 if (s >= kernload && s < phys_kernelend) { 1091 if (e <= phys_kernelend) 1092 goto empty; 1093 s = phys_kernelend; 1094 } 1095 /* Now look whether this region ends within the kernel. */ 1096 if (e > kernload && e <= phys_kernelend) { 1097 if (s >= kernload) 1098 goto empty; 1099 e = kernload; 1100 } 1101 /* Now page align the start and size of the region. */ 1102 s = round_page(s); 1103 e = trunc_page(e); 1104 if (e < s) 1105 e = s; 1106 sz = e - s; 1107 debugf("%08x-%08x = %x\n", s, e, sz); 1108 1109 /* Check whether some memory is left here. */ 1110 if (sz == 0) { 1111 empty: 1112 memmove(mp, mp + 1, 1113 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1114 cnt--; 1115 mp--; 1116 continue; 1117 } 1118 1119 /* Do an insertion sort. */ 1120 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1121 if (s < mp1->mr_start) 1122 break; 1123 if (mp1 < mp) { 1124 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1125 mp1->mr_start = s; 1126 mp1->mr_size = sz; 1127 } else { 1128 mp->mr_start = s; 1129 mp->mr_size = sz; 1130 } 1131 } 1132 availmem_regions_sz = cnt; 1133 1134 /*******************************************************/ 1135 /* Steal physical memory for kernel stack from the end */ 1136 /* of the first avail region */ 1137 /*******************************************************/ 1138 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1139 kstack0_phys = availmem_regions[0].mr_start + 1140 availmem_regions[0].mr_size; 1141 kstack0_phys -= kstack0_sz; 1142 availmem_regions[0].mr_size -= kstack0_sz; 1143 1144 /*******************************************************/ 1145 /* Fill in phys_avail table, based on availmem_regions */ 1146 /*******************************************************/ 1147 phys_avail_count = 0; 1148 physsz = 0; 1149 hwphyssz = 0; 1150 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1151 1152 debugf("fill in phys_avail:\n"); 1153 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1154 1155 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1156 availmem_regions[i].mr_start, 1157 availmem_regions[i].mr_start + 1158 availmem_regions[i].mr_size, 1159 availmem_regions[i].mr_size); 1160 1161 if (hwphyssz != 0 && 1162 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1163 debugf(" hw.physmem adjust\n"); 1164 if (physsz < hwphyssz) { 1165 phys_avail[j] = availmem_regions[i].mr_start; 1166 phys_avail[j + 1] = 1167 availmem_regions[i].mr_start + 1168 hwphyssz - physsz; 1169 physsz = hwphyssz; 1170 phys_avail_count++; 1171 } 1172 break; 1173 } 1174 1175 phys_avail[j] = availmem_regions[i].mr_start; 1176 phys_avail[j + 1] = availmem_regions[i].mr_start + 1177 availmem_regions[i].mr_size; 1178 phys_avail_count++; 1179 physsz += availmem_regions[i].mr_size; 1180 } 1181 physmem = btoc(physsz); 1182 1183 /* Calculate the last available physical address. */ 1184 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1185 ; 1186 Maxmem = powerpc_btop(phys_avail[i + 1]); 1187 1188 debugf("Maxmem = 0x%08lx\n", Maxmem); 1189 debugf("phys_avail_count = %d\n", phys_avail_count); 1190 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1191 physmem); 1192 1193 /*******************************************************/ 1194 /* Initialize (statically allocated) kernel pmap. */ 1195 /*******************************************************/ 1196 PMAP_LOCK_INIT(kernel_pmap); 1197 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1198 1199 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1200 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1201 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1202 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1203 1204 /* Initialize kernel pdir */ 1205 for (i = 0; i < kernel_ptbls; i++) 1206 kernel_pmap->pm_pdir[kptbl_min + i] = 1207 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1208 1209 for (i = 0; i < MAXCPU; i++) { 1210 kernel_pmap->pm_tid[i] = TID_KERNEL; 1211 1212 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1213 tidbusy[i][0] = kernel_pmap; 1214 } 1215 1216 /* 1217 * Fill in PTEs covering kernel code and data. They are not required 1218 * for address translation, as this area is covered by static TLB1 1219 * entries, but for pte_vatopa() to work correctly with kernel area 1220 * addresses. 1221 */ 1222 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1223 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1224 pte->rpn = kernload + (va - KERNBASE); 1225 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1226 PTE_VALID; 1227 } 1228 /* Mark kernel_pmap active on all CPUs */ 1229 kernel_pmap->pm_active = ~0; 1230 1231 /*******************************************************/ 1232 /* Final setup */ 1233 /*******************************************************/ 1234 1235 /* Enter kstack0 into kernel map, provide guard page */ 1236 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1237 thread0.td_kstack = kstack0; 1238 thread0.td_kstack_pages = KSTACK_PAGES; 1239 1240 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1241 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1242 kstack0_phys, kstack0_phys + kstack0_sz); 1243 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1244 1245 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1246 for (i = 0; i < KSTACK_PAGES; i++) { 1247 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1248 kstack0 += PAGE_SIZE; 1249 kstack0_phys += PAGE_SIZE; 1250 } 1251 1252 debugf("virtual_avail = %08x\n", virtual_avail); 1253 debugf("virtual_end = %08x\n", virtual_end); 1254 1255 debugf("mmu_booke_bootstrap: exit\n"); 1256} 1257 1258void 1259pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1260{ 1261 int i; 1262 1263 /* 1264 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1265 * have the snapshot of its contents in the s/w tlb1[] table, so use 1266 * these values directly to (re)program AP's TLB1 hardware. 1267 */ 1268 for (i = 0; i < tlb1_idx; i ++) { 1269 /* Skip invalid entries */ 1270 if (!(tlb1[i].mas1 & MAS1_VALID)) 1271 continue; 1272 1273 tlb1_write_entry(i); 1274 } 1275 1276 set_mas4_defaults(); 1277} 1278 1279/* 1280 * Get the physical page address for the given pmap/virtual address. 1281 */ 1282static vm_paddr_t 1283mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1284{ 1285 vm_paddr_t pa; 1286 1287 PMAP_LOCK(pmap); 1288 pa = pte_vatopa(mmu, pmap, va); 1289 PMAP_UNLOCK(pmap); 1290 1291 return (pa); 1292} 1293 1294/* 1295 * Extract the physical page address associated with the given 1296 * kernel virtual address. 1297 */ 1298static vm_paddr_t 1299mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1300{ 1301 1302 return (pte_vatopa(mmu, kernel_pmap, va)); 1303} 1304 1305/* 1306 * Initialize the pmap module. 1307 * Called by vm_init, to initialize any structures that the pmap 1308 * system needs to map virtual memory. 1309 */ 1310static void 1311mmu_booke_init(mmu_t mmu) 1312{ 1313 int shpgperproc = PMAP_SHPGPERPROC; 1314 1315 /* 1316 * Initialize the address space (zone) for the pv entries. Set a 1317 * high water mark so that the system can recover from excessive 1318 * numbers of pv entries. 1319 */ 1320 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1321 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1322 1323 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1324 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1325 1326 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1327 pv_entry_high_water = 9 * (pv_entry_max / 10); 1328 1329 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1330 1331 /* Pre-fill pvzone with initial number of pv entries. */ 1332 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1333 1334 /* Initialize ptbl allocation. */ 1335 ptbl_init(); 1336} 1337 1338/* 1339 * Map a list of wired pages into kernel virtual address space. This is 1340 * intended for temporary mappings which do not need page modification or 1341 * references recorded. Existing mappings in the region are overwritten. 1342 */ 1343static void 1344mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1345{ 1346 vm_offset_t va; 1347 1348 va = sva; 1349 while (count-- > 0) { 1350 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1351 va += PAGE_SIZE; 1352 m++; 1353 } 1354} 1355 1356/* 1357 * Remove page mappings from kernel virtual address space. Intended for 1358 * temporary mappings entered by mmu_booke_qenter. 1359 */ 1360static void 1361mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1362{ 1363 vm_offset_t va; 1364 1365 va = sva; 1366 while (count-- > 0) { 1367 mmu_booke_kremove(mmu, va); 1368 va += PAGE_SIZE; 1369 } 1370} 1371 1372/* 1373 * Map a wired page into kernel virtual address space. 1374 */ 1375static void 1376mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1377{ 1378 unsigned int pdir_idx = PDIR_IDX(va); 1379 unsigned int ptbl_idx = PTBL_IDX(va); 1380 uint32_t flags; 1381 pte_t *pte; 1382 1383 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1384 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1385 1386 flags = 0; 1387 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1388 flags |= PTE_M; 1389 1390 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1391 1392 mtx_lock_spin(&tlbivax_mutex); 1393 tlb_miss_lock(); 1394 1395 if (PTE_ISVALID(pte)) { 1396 1397 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1398 1399 /* Flush entry from TLB0 */ 1400 tlb0_flush_entry(va); 1401 } 1402 1403 pte->rpn = pa & ~PTE_PA_MASK; 1404 pte->flags = flags; 1405 1406 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1407 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1408 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1409 1410 /* Flush the real memory from the instruction cache. */ 1411 if ((flags & (PTE_I | PTE_G)) == 0) { 1412 __syncicache((void *)va, PAGE_SIZE); 1413 } 1414 1415 tlb_miss_unlock(); 1416 mtx_unlock_spin(&tlbivax_mutex); 1417} 1418 1419/* 1420 * Remove a page from kernel page table. 1421 */ 1422static void 1423mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1424{ 1425 unsigned int pdir_idx = PDIR_IDX(va); 1426 unsigned int ptbl_idx = PTBL_IDX(va); 1427 pte_t *pte; 1428 1429// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1430 1431 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1432 (va <= VM_MAX_KERNEL_ADDRESS)), 1433 ("mmu_booke_kremove: invalid va")); 1434 1435 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1436 1437 if (!PTE_ISVALID(pte)) { 1438 1439 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1440 1441 return; 1442 } 1443 1444 mtx_lock_spin(&tlbivax_mutex); 1445 tlb_miss_lock(); 1446 1447 /* Invalidate entry in TLB0, update PTE. */ 1448 tlb0_flush_entry(va); 1449 pte->flags = 0; 1450 pte->rpn = 0; 1451 1452 tlb_miss_unlock(); 1453 mtx_unlock_spin(&tlbivax_mutex); 1454} 1455 1456/* 1457 * Initialize pmap associated with process 0. 1458 */ 1459static void 1460mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1461{ 1462 1463 mmu_booke_pinit(mmu, pmap); 1464 PCPU_SET(curpmap, pmap); 1465} 1466 1467/* 1468 * Initialize a preallocated and zeroed pmap structure, 1469 * such as one in a vmspace structure. 1470 */ 1471static void 1472mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1473{ 1474 int i; 1475 1476 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1477 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1478 1479 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1480 1481 PMAP_LOCK_INIT(pmap); 1482 for (i = 0; i < MAXCPU; i++) 1483 pmap->pm_tid[i] = TID_NONE; 1484 pmap->pm_active = 0; 1485 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1486 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1487 TAILQ_INIT(&pmap->pm_ptbl_list); 1488} 1489 1490/* 1491 * Release any resources held by the given physical map. 1492 * Called when a pmap initialized by mmu_booke_pinit is being released. 1493 * Should only be called if the map contains no valid mappings. 1494 */ 1495static void 1496mmu_booke_release(mmu_t mmu, pmap_t pmap) 1497{ 1498 1499 printf("mmu_booke_release: s\n"); 1500 1501 KASSERT(pmap->pm_stats.resident_count == 0, 1502 ("pmap_release: pmap resident count %ld != 0", 1503 pmap->pm_stats.resident_count)); 1504 1505 PMAP_LOCK_DESTROY(pmap); 1506} 1507 1508/* 1509 * Insert the given physical page at the specified virtual address in the 1510 * target physical map with the protection requested. If specified the page 1511 * will be wired down. 1512 */ 1513static void 1514mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1515 vm_prot_t prot, boolean_t wired) 1516{ 1517 1518 vm_page_lock_queues(); 1519 PMAP_LOCK(pmap); 1520 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1521 vm_page_unlock_queues(); 1522 PMAP_UNLOCK(pmap); 1523} 1524 1525static void 1526mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1527 vm_prot_t prot, boolean_t wired) 1528{ 1529 pte_t *pte; 1530 vm_paddr_t pa; 1531 uint32_t flags; 1532 int su, sync; 1533 1534 pa = VM_PAGE_TO_PHYS(m); 1535 su = (pmap == kernel_pmap); 1536 sync = 0; 1537 1538 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1539 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1540 // (u_int32_t)pmap, su, pmap->pm_tid, 1541 // (u_int32_t)m, va, pa, prot, wired); 1542 1543 if (su) { 1544 KASSERT(((va >= virtual_avail) && 1545 (va <= VM_MAX_KERNEL_ADDRESS)), 1546 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1547 } else { 1548 KASSERT((va <= VM_MAXUSER_ADDRESS), 1549 ("mmu_booke_enter_locked: user pmap, non user va")); 1550 } 1551 1552 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1553 1554 /* 1555 * If there is an existing mapping, and the physical address has not 1556 * changed, must be protection or wiring change. 1557 */ 1558 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1559 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1560 1561 /* 1562 * Before actually updating pte->flags we calculate and 1563 * prepare its new value in a helper var. 1564 */ 1565 flags = pte->flags; 1566 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1567 1568 /* Wiring change, just update stats. */ 1569 if (wired) { 1570 if (!PTE_ISWIRED(pte)) { 1571 flags |= PTE_WIRED; 1572 pmap->pm_stats.wired_count++; 1573 } 1574 } else { 1575 if (PTE_ISWIRED(pte)) { 1576 flags &= ~PTE_WIRED; 1577 pmap->pm_stats.wired_count--; 1578 } 1579 } 1580 1581 if (prot & VM_PROT_WRITE) { 1582 /* Add write permissions. */ 1583 flags |= PTE_SW; 1584 if (!su) 1585 flags |= PTE_UW; 1586 1587 vm_page_flag_set(m, PG_WRITEABLE); 1588 } else { 1589 /* Handle modified pages, sense modify status. */ 1590 1591 /* 1592 * The PTE_MODIFIED flag could be set by underlying 1593 * TLB misses since we last read it (above), possibly 1594 * other CPUs could update it so we check in the PTE 1595 * directly rather than rely on that saved local flags 1596 * copy. 1597 */ 1598 if (PTE_ISMODIFIED(pte)) 1599 vm_page_dirty(m); 1600 } 1601 1602 if (prot & VM_PROT_EXECUTE) { 1603 flags |= PTE_SX; 1604 if (!su) 1605 flags |= PTE_UX; 1606 1607 /* 1608 * Check existing flags for execute permissions: if we 1609 * are turning execute permissions on, icache should 1610 * be flushed. 1611 */ 1612 if ((flags & (PTE_UX | PTE_SX)) == 0) 1613 sync++; 1614 } 1615 1616 flags &= ~PTE_REFERENCED; 1617 1618 /* 1619 * The new flags value is all calculated -- only now actually 1620 * update the PTE. 1621 */ 1622 mtx_lock_spin(&tlbivax_mutex); 1623 tlb_miss_lock(); 1624 1625 tlb0_flush_entry(va); 1626 pte->flags = flags; 1627 1628 tlb_miss_unlock(); 1629 mtx_unlock_spin(&tlbivax_mutex); 1630 1631 } else { 1632 /* 1633 * If there is an existing mapping, but it's for a different 1634 * physical address, pte_enter() will delete the old mapping. 1635 */ 1636 //if ((pte != NULL) && PTE_ISVALID(pte)) 1637 // debugf("mmu_booke_enter_locked: replace\n"); 1638 //else 1639 // debugf("mmu_booke_enter_locked: new\n"); 1640 1641 /* Now set up the flags and install the new mapping. */ 1642 flags = (PTE_SR | PTE_VALID); 1643 flags |= PTE_M; 1644 1645 if (!su) 1646 flags |= PTE_UR; 1647 1648 if (prot & VM_PROT_WRITE) { 1649 flags |= PTE_SW; 1650 if (!su) 1651 flags |= PTE_UW; 1652 1653 vm_page_flag_set(m, PG_WRITEABLE); 1654 } 1655 1656 if (prot & VM_PROT_EXECUTE) { 1657 flags |= PTE_SX; 1658 if (!su) 1659 flags |= PTE_UX; 1660 } 1661 1662 /* If its wired update stats. */ 1663 if (wired) { 1664 pmap->pm_stats.wired_count++; 1665 flags |= PTE_WIRED; 1666 } 1667 1668 pte_enter(mmu, pmap, m, va, flags); 1669 1670 /* Flush the real memory from the instruction cache. */ 1671 if (prot & VM_PROT_EXECUTE) 1672 sync++; 1673 } 1674 1675 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1676 __syncicache((void *)va, PAGE_SIZE); 1677 sync = 0; 1678 } 1679 1680 if (sync) { 1681 /* Create a temporary mapping. */ 1682 pmap = PCPU_GET(curpmap); 1683 1684 va = 0; 1685 pte = pte_find(mmu, pmap, va); 1686 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1687 1688 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1689 1690 pte_enter(mmu, pmap, m, va, flags); 1691 __syncicache((void *)va, PAGE_SIZE); 1692 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1693 } 1694} 1695 1696/* 1697 * Maps a sequence of resident pages belonging to the same object. 1698 * The sequence begins with the given page m_start. This page is 1699 * mapped at the given virtual address start. Each subsequent page is 1700 * mapped at a virtual address that is offset from start by the same 1701 * amount as the page is offset from m_start within the object. The 1702 * last page in the sequence is the page with the largest offset from 1703 * m_start that can be mapped at a virtual address less than the given 1704 * virtual address end. Not every virtual page between start and end 1705 * is mapped; only those for which a resident page exists with the 1706 * corresponding offset from m_start are mapped. 1707 */ 1708static void 1709mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1710 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1711{ 1712 vm_page_t m; 1713 vm_pindex_t diff, psize; 1714 1715 psize = atop(end - start); 1716 m = m_start; 1717 PMAP_LOCK(pmap); 1718 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1719 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1720 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1721 m = TAILQ_NEXT(m, listq); 1722 } 1723 PMAP_UNLOCK(pmap); 1724} 1725 1726static void 1727mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1728 vm_prot_t prot) 1729{ 1730 1731 PMAP_LOCK(pmap); 1732 mmu_booke_enter_locked(mmu, pmap, va, m, 1733 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1734 PMAP_UNLOCK(pmap); 1735} 1736 1737/* 1738 * Remove the given range of addresses from the specified map. 1739 * 1740 * It is assumed that the start and end are properly rounded to the page size. 1741 */ 1742static void 1743mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1744{ 1745 pte_t *pte; 1746 uint8_t hold_flag; 1747 1748 int su = (pmap == kernel_pmap); 1749 1750 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1751 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1752 1753 if (su) { 1754 KASSERT(((va >= virtual_avail) && 1755 (va <= VM_MAX_KERNEL_ADDRESS)), 1756 ("mmu_booke_remove: kernel pmap, non kernel va")); 1757 } else { 1758 KASSERT((va <= VM_MAXUSER_ADDRESS), 1759 ("mmu_booke_remove: user pmap, non user va")); 1760 } 1761 1762 if (PMAP_REMOVE_DONE(pmap)) { 1763 //debugf("mmu_booke_remove: e (empty)\n"); 1764 return; 1765 } 1766 1767 hold_flag = PTBL_HOLD_FLAG(pmap); 1768 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1769 1770 vm_page_lock_queues(); 1771 PMAP_LOCK(pmap); 1772 for (; va < endva; va += PAGE_SIZE) { 1773 pte = pte_find(mmu, pmap, va); 1774 if ((pte != NULL) && PTE_ISVALID(pte)) 1775 pte_remove(mmu, pmap, va, hold_flag); 1776 } 1777 PMAP_UNLOCK(pmap); 1778 vm_page_unlock_queues(); 1779 1780 //debugf("mmu_booke_remove: e\n"); 1781} 1782 1783/* 1784 * Remove physical page from all pmaps in which it resides. 1785 */ 1786static void 1787mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1788{ 1789 pv_entry_t pv, pvn; 1790 uint8_t hold_flag; 1791 1792 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1793 1794 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1795 pvn = TAILQ_NEXT(pv, pv_link); 1796 1797 PMAP_LOCK(pv->pv_pmap); 1798 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1799 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1800 PMAP_UNLOCK(pv->pv_pmap); 1801 } 1802 vm_page_flag_clear(m, PG_WRITEABLE); 1803} 1804 1805/* 1806 * Map a range of physical addresses into kernel virtual address space. 1807 */ 1808static vm_offset_t 1809mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1810 vm_offset_t pa_end, int prot) 1811{ 1812 vm_offset_t sva = *virt; 1813 vm_offset_t va = sva; 1814 1815 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1816 // sva, pa_start, pa_end); 1817 1818 while (pa_start < pa_end) { 1819 mmu_booke_kenter(mmu, va, pa_start); 1820 va += PAGE_SIZE; 1821 pa_start += PAGE_SIZE; 1822 } 1823 *virt = va; 1824 1825 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1826 return (sva); 1827} 1828 1829/* 1830 * The pmap must be activated before it's address space can be accessed in any 1831 * way. 1832 */ 1833static void 1834mmu_booke_activate(mmu_t mmu, struct thread *td) 1835{ 1836 pmap_t pmap; 1837 1838 pmap = &td->td_proc->p_vmspace->vm_pmap; 1839 1840 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1841 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1842 1843 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1844 1845 mtx_lock_spin(&sched_lock); 1846 1847 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1848 PCPU_SET(curpmap, pmap); 1849 1850 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1851 tid_alloc(pmap); 1852 1853 /* Load PID0 register with pmap tid value. */ 1854 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1855 __asm __volatile("isync"); 1856 1857 mtx_unlock_spin(&sched_lock); 1858 1859 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1860 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1861} 1862 1863/* 1864 * Deactivate the specified process's address space. 1865 */ 1866static void 1867mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1868{ 1869 pmap_t pmap; 1870 1871 pmap = &td->td_proc->p_vmspace->vm_pmap; 1872 1873 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1874 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1875 1876 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1877 PCPU_SET(curpmap, NULL); 1878} 1879 1880/* 1881 * Copy the range specified by src_addr/len 1882 * from the source map to the range dst_addr/len 1883 * in the destination map. 1884 * 1885 * This routine is only advisory and need not do anything. 1886 */ 1887static void 1888mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1889 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1890{ 1891 1892} 1893 1894/* 1895 * Set the physical protection on the specified range of this map as requested. 1896 */ 1897static void 1898mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1899 vm_prot_t prot) 1900{ 1901 vm_offset_t va; 1902 vm_page_t m; 1903 pte_t *pte; 1904 1905 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1906 mmu_booke_remove(mmu, pmap, sva, eva); 1907 return; 1908 } 1909 1910 if (prot & VM_PROT_WRITE) 1911 return; 1912 1913 vm_page_lock_queues(); 1914 PMAP_LOCK(pmap); 1915 for (va = sva; va < eva; va += PAGE_SIZE) { 1916 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1917 if (PTE_ISVALID(pte)) { 1918 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1919 1920 mtx_lock_spin(&tlbivax_mutex); 1921 tlb_miss_lock(); 1922 1923 /* Handle modified pages. */ 1924 if (PTE_ISMODIFIED(pte)) 1925 vm_page_dirty(m); 1926 1927 /* Referenced pages. */ 1928 if (PTE_ISREFERENCED(pte)) 1929 vm_page_flag_set(m, PG_REFERENCED); 1930 1931 tlb0_flush_entry(va); 1932 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1933 PTE_REFERENCED); 1934 1935 tlb_miss_unlock(); 1936 mtx_unlock_spin(&tlbivax_mutex); 1937 } 1938 } 1939 } 1940 PMAP_UNLOCK(pmap); 1941 vm_page_unlock_queues(); 1942} 1943 1944/* 1945 * Clear the write and modified bits in each of the given page's mappings. 1946 */ 1947static void 1948mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1949{ 1950 pv_entry_t pv; 1951 pte_t *pte; 1952 1953 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1954 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1955 (m->flags & PG_WRITEABLE) == 0) 1956 return; 1957 1958 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1959 PMAP_LOCK(pv->pv_pmap); 1960 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1961 if (PTE_ISVALID(pte)) { 1962 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1963 1964 mtx_lock_spin(&tlbivax_mutex); 1965 tlb_miss_lock(); 1966 1967 /* Handle modified pages. */ 1968 if (PTE_ISMODIFIED(pte)) 1969 vm_page_dirty(m); 1970 1971 /* Referenced pages. */ 1972 if (PTE_ISREFERENCED(pte)) 1973 vm_page_flag_set(m, PG_REFERENCED); 1974 1975 /* Flush mapping from TLB0. */ 1976 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1977 PTE_REFERENCED); 1978 1979 tlb_miss_unlock(); 1980 mtx_unlock_spin(&tlbivax_mutex); 1981 } 1982 } 1983 PMAP_UNLOCK(pv->pv_pmap); 1984 } 1985 vm_page_flag_clear(m, PG_WRITEABLE); 1986} 1987 1988static boolean_t 1989mmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1990{ 1991 pv_entry_t pv; 1992 pte_t *pte; 1993 boolean_t executable; 1994 1995 executable = FALSE; 1996 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1997 PMAP_LOCK(pv->pv_pmap); 1998 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 1999 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 2000 executable = TRUE; 2001 PMAP_UNLOCK(pv->pv_pmap); 2002 if (executable) 2003 break; 2004 } 2005 2006 return (executable); 2007} 2008 2009/* 2010 * Atomically extract and hold the physical page with the given 2011 * pmap and virtual address pair if that mapping permits the given 2012 * protection. 2013 */ 2014static vm_page_t 2015mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2016 vm_prot_t prot) 2017{ 2018 pte_t *pte; 2019 vm_page_t m; 2020 uint32_t pte_wbit; 2021 2022 m = NULL; 2023 vm_page_lock_queues(); 2024 PMAP_LOCK(pmap); 2025 2026 pte = pte_find(mmu, pmap, va); 2027 if ((pte != NULL) && PTE_ISVALID(pte)) { 2028 if (pmap == kernel_pmap) 2029 pte_wbit = PTE_SW; 2030 else 2031 pte_wbit = PTE_UW; 2032 2033 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2034 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2035 vm_page_hold(m); 2036 } 2037 } 2038 2039 vm_page_unlock_queues(); 2040 PMAP_UNLOCK(pmap); 2041 return (m); 2042} 2043 2044/* 2045 * Initialize a vm_page's machine-dependent fields. 2046 */ 2047static void 2048mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2049{ 2050 2051 TAILQ_INIT(&m->md.pv_list); 2052} 2053 2054/* 2055 * mmu_booke_zero_page_area zeros the specified hardware page by 2056 * mapping it into virtual memory and using bzero to clear 2057 * its contents. 2058 * 2059 * off and size must reside within a single page. 2060 */ 2061static void 2062mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2063{ 2064 vm_offset_t va; 2065 2066 /* XXX KASSERT off and size are within a single page? */ 2067 2068 mtx_lock(&zero_page_mutex); 2069 va = zero_page_va; 2070 2071 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2072 bzero((caddr_t)va + off, size); 2073 mmu_booke_kremove(mmu, va); 2074 2075 mtx_unlock(&zero_page_mutex); 2076} 2077 2078/* 2079 * mmu_booke_zero_page zeros the specified hardware page. 2080 */ 2081static void 2082mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2083{ 2084 2085 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2086} 2087 2088/* 2089 * mmu_booke_copy_page copies the specified (machine independent) page by 2090 * mapping the page into virtual memory and using memcopy to copy the page, 2091 * one machine dependent page at a time. 2092 */ 2093static void 2094mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2095{ 2096 vm_offset_t sva, dva; 2097 2098 sva = copy_page_src_va; 2099 dva = copy_page_dst_va; 2100 2101 mtx_lock(©_page_mutex); 2102 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2103 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2104 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2105 mmu_booke_kremove(mmu, dva); 2106 mmu_booke_kremove(mmu, sva); 2107 mtx_unlock(©_page_mutex); 2108} 2109 2110/* 2111 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2112 * into virtual memory and using bzero to clear its contents. This is intended 2113 * to be called from the vm_pagezero process only and outside of Giant. No 2114 * lock is required. 2115 */ 2116static void 2117mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2118{ 2119 vm_offset_t va; 2120 2121 va = zero_page_idle_va; 2122 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2123 bzero((caddr_t)va, PAGE_SIZE); 2124 mmu_booke_kremove(mmu, va); 2125} 2126 2127/* 2128 * Return whether or not the specified physical page was modified 2129 * in any of physical maps. 2130 */ 2131static boolean_t 2132mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2133{ 2134 pte_t *pte; 2135 pv_entry_t pv; 2136 2137 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2138 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2139 return (FALSE); 2140 2141 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2142 PMAP_LOCK(pv->pv_pmap); 2143 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2144 if (!PTE_ISVALID(pte)) 2145 goto make_sure_to_unlock; 2146 2147 if (PTE_ISMODIFIED(pte)) { 2148 PMAP_UNLOCK(pv->pv_pmap); 2149 return (TRUE); 2150 } 2151 } 2152make_sure_to_unlock: 2153 PMAP_UNLOCK(pv->pv_pmap); 2154 } 2155 return (FALSE); 2156} 2157 2158/* 2159 * Return whether or not the specified virtual address is eligible 2160 * for prefault. 2161 */ 2162static boolean_t 2163mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2164{ 2165 2166 return (FALSE); 2167} 2168 2169/* 2170 * Clear the modify bits on the specified physical page. 2171 */ 2172static void 2173mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2174{ 2175 pte_t *pte; 2176 pv_entry_t pv; 2177 2178 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2179 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2180 return; 2181 2182 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2183 PMAP_LOCK(pv->pv_pmap); 2184 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2185 if (!PTE_ISVALID(pte)) 2186 goto make_sure_to_unlock; 2187 2188 mtx_lock_spin(&tlbivax_mutex); 2189 tlb_miss_lock(); 2190 2191 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2192 tlb0_flush_entry(pv->pv_va); 2193 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2194 PTE_REFERENCED); 2195 } 2196 2197 tlb_miss_unlock(); 2198 mtx_unlock_spin(&tlbivax_mutex); 2199 } 2200make_sure_to_unlock: 2201 PMAP_UNLOCK(pv->pv_pmap); 2202 } 2203} 2204 2205/* 2206 * Return a count of reference bits for a page, clearing those bits. 2207 * It is not necessary for every reference bit to be cleared, but it 2208 * is necessary that 0 only be returned when there are truly no 2209 * reference bits set. 2210 * 2211 * XXX: The exact number of bits to check and clear is a matter that 2212 * should be tested and standardized at some point in the future for 2213 * optimal aging of shared pages. 2214 */ 2215static int 2216mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2217{ 2218 pte_t *pte; 2219 pv_entry_t pv; 2220 int count; 2221 2222 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2223 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2224 return (0); 2225 2226 count = 0; 2227 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2228 PMAP_LOCK(pv->pv_pmap); 2229 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2230 if (!PTE_ISVALID(pte)) 2231 goto make_sure_to_unlock; 2232 2233 if (PTE_ISREFERENCED(pte)) { 2234 mtx_lock_spin(&tlbivax_mutex); 2235 tlb_miss_lock(); 2236 2237 tlb0_flush_entry(pv->pv_va); 2238 pte->flags &= ~PTE_REFERENCED; 2239 2240 tlb_miss_unlock(); 2241 mtx_unlock_spin(&tlbivax_mutex); 2242 2243 if (++count > 4) { 2244 PMAP_UNLOCK(pv->pv_pmap); 2245 break; 2246 } 2247 } 2248 } 2249make_sure_to_unlock: 2250 PMAP_UNLOCK(pv->pv_pmap); 2251 } 2252 return (count); 2253} 2254 2255/* 2256 * Clear the reference bit on the specified physical page. 2257 */ 2258static void 2259mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2260{ 2261 pte_t *pte; 2262 pv_entry_t pv; 2263 2264 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2265 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2266 return; 2267 2268 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2269 PMAP_LOCK(pv->pv_pmap); 2270 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2271 if (!PTE_ISVALID(pte)) 2272 goto make_sure_to_unlock; 2273 2274 if (PTE_ISREFERENCED(pte)) { 2275 mtx_lock_spin(&tlbivax_mutex); 2276 tlb_miss_lock(); 2277 2278 tlb0_flush_entry(pv->pv_va); 2279 pte->flags &= ~PTE_REFERENCED; 2280 2281 tlb_miss_unlock(); 2282 mtx_unlock_spin(&tlbivax_mutex); 2283 } 2284 } 2285make_sure_to_unlock: 2286 PMAP_UNLOCK(pv->pv_pmap); 2287 } 2288} 2289 2290/* 2291 * Change wiring attribute for a map/virtual-address pair. 2292 */ 2293static void 2294mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2295{ 2296 pte_t *pte;; 2297 2298 PMAP_LOCK(pmap); 2299 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2300 if (wired) { 2301 if (!PTE_ISWIRED(pte)) { 2302 pte->flags |= PTE_WIRED; 2303 pmap->pm_stats.wired_count++; 2304 } 2305 } else { 2306 if (PTE_ISWIRED(pte)) { 2307 pte->flags &= ~PTE_WIRED; 2308 pmap->pm_stats.wired_count--; 2309 } 2310 } 2311 } 2312 PMAP_UNLOCK(pmap); 2313} 2314 2315/* 2316 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2317 * page. This count may be changed upwards or downwards in the future; it is 2318 * only necessary that true be returned for a small subset of pmaps for proper 2319 * page aging. 2320 */ 2321static boolean_t 2322mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2323{ 2324 pv_entry_t pv; 2325 int loops; 2326 2327 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2328 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2329 return (FALSE); 2330 2331 loops = 0; 2332 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2333 if (pv->pv_pmap == pmap) 2334 return (TRUE); 2335 2336 if (++loops >= 16) 2337 break; 2338 } 2339 return (FALSE); 2340} 2341 2342/* 2343 * Return the number of managed mappings to the given physical page that are 2344 * wired. 2345 */ 2346static int 2347mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2348{ 2349 pv_entry_t pv; 2350 pte_t *pte; 2351 int count = 0; 2352 2353 if ((m->flags & PG_FICTITIOUS) != 0) 2354 return (count); 2355 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2356 2357 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2358 PMAP_LOCK(pv->pv_pmap); 2359 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2360 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2361 count++; 2362 PMAP_UNLOCK(pv->pv_pmap); 2363 } 2364 2365 return (count); 2366} 2367 2368static int 2369mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2370{ 2371 int i; 2372 vm_offset_t va; 2373 2374 /* 2375 * This currently does not work for entries that 2376 * overlap TLB1 entries. 2377 */ 2378 for (i = 0; i < tlb1_idx; i ++) { 2379 if (tlb1_iomapped(i, pa, size, &va) == 0) 2380 return (0); 2381 } 2382 2383 return (EFAULT); 2384} 2385 2386vm_offset_t 2387mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2388 vm_size_t *sz) 2389{ 2390 vm_paddr_t pa, ppa; 2391 vm_offset_t va; 2392 vm_size_t gran; 2393 2394 /* Raw physical memory dumps don't have a virtual address. */ 2395 if (md->md_vaddr == ~0UL) { 2396 /* We always map a 256MB page at 256M. */ 2397 gran = 256 * 1024 * 1024; 2398 pa = md->md_paddr + ofs; 2399 ppa = pa & ~(gran - 1); 2400 ofs = pa - ppa; 2401 va = gran; 2402 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2403 if (*sz > (gran - ofs)) 2404 *sz = gran - ofs; 2405 return (va + ofs); 2406 } 2407 2408 /* Minidumps are based on virtual memory addresses. */ 2409 va = md->md_vaddr + ofs; 2410 if (va >= kernstart + kernsize) { 2411 gran = PAGE_SIZE - (va & PAGE_MASK); 2412 if (*sz > gran) 2413 *sz = gran; 2414 } 2415 return (va); 2416} 2417 2418void 2419mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2420 vm_offset_t va) 2421{ 2422 2423 /* Raw physical memory dumps don't have a virtual address. */ 2424 if (md->md_vaddr == ~0UL) { 2425 tlb1_idx--; 2426 tlb1[tlb1_idx].mas1 = 0; 2427 tlb1[tlb1_idx].mas2 = 0; 2428 tlb1[tlb1_idx].mas3 = 0; 2429 tlb1_write_entry(tlb1_idx); 2430 return; 2431 } 2432 2433 /* Minidumps are based on virtual memory addresses. */ 2434 /* Nothing to do... */ 2435} 2436 2437struct pmap_md * 2438mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2439{ 2440 static struct pmap_md md; 2441 struct bi_mem_region *mr; 2442 pte_t *pte; 2443 vm_offset_t va; 2444 2445 if (dumpsys_minidump) { 2446 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2447 if (prev == NULL) { 2448 /* 1st: kernel .data and .bss. */ 2449 md.md_index = 1; 2450 md.md_vaddr = trunc_page((uintptr_t)_etext); 2451 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2452 return (&md); 2453 } 2454 switch (prev->md_index) { 2455 case 1: 2456 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2457 md.md_index = 2; 2458 md.md_vaddr = data_start; 2459 md.md_size = data_end - data_start; 2460 break; 2461 case 2: 2462 /* 3rd: kernel VM. */ 2463 va = prev->md_vaddr + prev->md_size; 2464 /* Find start of next chunk (from va). */ 2465 while (va < virtual_end) { 2466 /* Don't dump the buffer cache. */ 2467 if (va >= kmi.buffer_sva && 2468 va < kmi.buffer_eva) { 2469 va = kmi.buffer_eva; 2470 continue; 2471 } 2472 pte = pte_find(mmu, kernel_pmap, va); 2473 if (pte != NULL && PTE_ISVALID(pte)) 2474 break; 2475 va += PAGE_SIZE; 2476 } 2477 if (va < virtual_end) { 2478 md.md_vaddr = va; 2479 va += PAGE_SIZE; 2480 /* Find last page in chunk. */ 2481 while (va < virtual_end) { 2482 /* Don't run into the buffer cache. */ 2483 if (va == kmi.buffer_sva) 2484 break; 2485 pte = pte_find(mmu, kernel_pmap, va); 2486 if (pte == NULL || !PTE_ISVALID(pte)) 2487 break; 2488 va += PAGE_SIZE; 2489 } 2490 md.md_size = va - md.md_vaddr; 2491 break; 2492 } 2493 md.md_index = 3; 2494 /* FALLTHROUGH */ 2495 default: 2496 return (NULL); 2497 } 2498 } else { /* minidumps */ 2499 mr = bootinfo_mr(); 2500 if (prev == NULL) { 2501 /* first physical chunk. */ 2502 md.md_paddr = mr->mem_base; 2503 md.md_size = mr->mem_size; 2504 md.md_vaddr = ~0UL; 2505 md.md_index = 1; 2506 } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2507 md.md_paddr = mr[md.md_index].mem_base; 2508 md.md_size = mr[md.md_index].mem_size; 2509 md.md_vaddr = ~0UL; 2510 md.md_index++; 2511 } else { 2512 /* There's no next physical chunk. */ 2513 return (NULL); 2514 } 2515 } 2516 2517 return (&md); 2518} 2519 2520/* 2521 * Map a set of physical memory pages into the kernel virtual address space. 2522 * Return a pointer to where it is mapped. This routine is intended to be used 2523 * for mapping device memory, NOT real memory. 2524 */ 2525static void * 2526mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2527{ 2528 void *res; 2529 uintptr_t va; 2530 vm_size_t sz; 2531 2532 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2533 res = (void *)va; 2534 2535 do { 2536 sz = 1 << (ilog2(size) & ~1); 2537 if (bootverbose) 2538 printf("Wiring VA=%x to PA=%x (size=%x), " 2539 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2540 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2541 size -= sz; 2542 pa += sz; 2543 va += sz; 2544 } while (size > 0); 2545 2546 return (res); 2547} 2548 2549/* 2550 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2551 */ 2552static void 2553mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2554{ 2555 vm_offset_t base, offset; 2556 2557 /* 2558 * Unmap only if this is inside kernel virtual space. 2559 */ 2560 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2561 base = trunc_page(va); 2562 offset = va & PAGE_MASK; 2563 size = roundup(offset + size, PAGE_SIZE); 2564 kmem_free(kernel_map, base, size); 2565 } 2566} 2567 2568/* 2569 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2570 * specified pmap. This eliminates the blast of soft faults on process startup 2571 * and immediately after an mmap. 2572 */ 2573static void 2574mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2575 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2576{ 2577 2578 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2579 KASSERT(object->type == OBJT_DEVICE, 2580 ("mmu_booke_object_init_pt: non-device object")); 2581} 2582 2583/* 2584 * Perform the pmap work for mincore. 2585 */ 2586static int 2587mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2588{ 2589 2590 TODO; 2591 return (0); 2592} 2593 2594/**************************************************************************/ 2595/* TID handling */ 2596/**************************************************************************/ 2597 2598/* 2599 * Allocate a TID. If necessary, steal one from someone else. 2600 * The new TID is flushed from the TLB before returning. 2601 */ 2602static tlbtid_t 2603tid_alloc(pmap_t pmap) 2604{ 2605 tlbtid_t tid; 2606 int thiscpu; 2607 2608 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2609 2610 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2611 2612 thiscpu = PCPU_GET(cpuid); 2613 2614 tid = PCPU_GET(tid_next); 2615 if (tid > TID_MAX) 2616 tid = TID_MIN; 2617 PCPU_SET(tid_next, tid + 1); 2618 2619 /* If we are stealing TID then clear the relevant pmap's field */ 2620 if (tidbusy[thiscpu][tid] != NULL) { 2621 2622 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2623 2624 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2625 2626 /* Flush all entries from TLB0 matching this TID. */ 2627 tid_flush(tid); 2628 } 2629 2630 tidbusy[thiscpu][tid] = pmap; 2631 pmap->pm_tid[thiscpu] = tid; 2632 __asm __volatile("msync; isync"); 2633 2634 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2635 PCPU_GET(tid_next)); 2636 2637 return (tid); 2638} 2639 2640/**************************************************************************/ 2641/* TLB0 handling */ 2642/**************************************************************************/ 2643 2644static void 2645tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2646 uint32_t mas7) 2647{ 2648 int as; 2649 char desc[3]; 2650 tlbtid_t tid; 2651 vm_size_t size; 2652 unsigned int tsize; 2653 2654 desc[2] = '\0'; 2655 if (mas1 & MAS1_VALID) 2656 desc[0] = 'V'; 2657 else 2658 desc[0] = ' '; 2659 2660 if (mas1 & MAS1_IPROT) 2661 desc[1] = 'P'; 2662 else 2663 desc[1] = ' '; 2664 2665 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2666 tid = MAS1_GETTID(mas1); 2667 2668 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2669 size = 0; 2670 if (tsize) 2671 size = tsize2size(tsize); 2672 2673 debugf("%3d: (%s) [AS=%d] " 2674 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2675 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2676 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2677} 2678 2679/* Convert TLB0 va and way number to tlb0[] table index. */ 2680static inline unsigned int 2681tlb0_tableidx(vm_offset_t va, unsigned int way) 2682{ 2683 unsigned int idx; 2684 2685 idx = (way * TLB0_ENTRIES_PER_WAY); 2686 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2687 return (idx); 2688} 2689 2690/* 2691 * Invalidate TLB0 entry. 2692 */ 2693static inline void 2694tlb0_flush_entry(vm_offset_t va) 2695{ 2696 2697 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2698 2699 mtx_assert(&tlbivax_mutex, MA_OWNED); 2700 2701 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2702 __asm __volatile("isync; msync"); 2703 __asm __volatile("tlbsync; msync"); 2704 2705 CTR1(KTR_PMAP, "%s: e", __func__); 2706} 2707 2708/* Print out contents of the MAS registers for each TLB0 entry */ 2709void 2710tlb0_print_tlbentries(void) 2711{ 2712 uint32_t mas0, mas1, mas2, mas3, mas7; 2713 int entryidx, way, idx; 2714 2715 debugf("TLB0 entries:\n"); 2716 for (way = 0; way < TLB0_WAYS; way ++) 2717 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2718 2719 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2720 mtspr(SPR_MAS0, mas0); 2721 __asm __volatile("isync"); 2722 2723 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2724 mtspr(SPR_MAS2, mas2); 2725 2726 __asm __volatile("isync; tlbre"); 2727 2728 mas1 = mfspr(SPR_MAS1); 2729 mas2 = mfspr(SPR_MAS2); 2730 mas3 = mfspr(SPR_MAS3); 2731 mas7 = mfspr(SPR_MAS7); 2732 2733 idx = tlb0_tableidx(mas2, way); 2734 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2735 } 2736} 2737 2738/**************************************************************************/ 2739/* TLB1 handling */ 2740/**************************************************************************/ 2741 2742/* 2743 * TLB1 mapping notes: 2744 * 2745 * TLB1[0] CCSRBAR 2746 * TLB1[1] Kernel text and data. 2747 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2748 * windows, other devices mappings. 2749 */ 2750 2751/* 2752 * Write given entry to TLB1 hardware. 2753 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2754 */ 2755static void 2756tlb1_write_entry(unsigned int idx) 2757{ 2758 uint32_t mas0, mas7; 2759 2760 //debugf("tlb1_write_entry: s\n"); 2761 2762 /* Clear high order RPN bits */ 2763 mas7 = 0; 2764 2765 /* Select entry */ 2766 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2767 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2768 2769 mtspr(SPR_MAS0, mas0); 2770 __asm __volatile("isync"); 2771 mtspr(SPR_MAS1, tlb1[idx].mas1); 2772 __asm __volatile("isync"); 2773 mtspr(SPR_MAS2, tlb1[idx].mas2); 2774 __asm __volatile("isync"); 2775 mtspr(SPR_MAS3, tlb1[idx].mas3); 2776 __asm __volatile("isync"); 2777 mtspr(SPR_MAS7, mas7); 2778 __asm __volatile("isync; tlbwe; isync; msync"); 2779 2780 //debugf("tlb1_write_entry: e\n");; 2781} 2782 2783/* 2784 * Return the largest uint value log such that 2^log <= num. 2785 */ 2786static unsigned int 2787ilog2(unsigned int num) 2788{ 2789 int lz; 2790 2791 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2792 return (31 - lz); 2793} 2794 2795/* 2796 * Convert TLB TSIZE value to mapped region size. 2797 */ 2798static vm_size_t 2799tsize2size(unsigned int tsize) 2800{ 2801 2802 /* 2803 * size = 4^tsize KB 2804 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2805 */ 2806 2807 return ((1 << (2 * tsize)) * 1024); 2808} 2809 2810/* 2811 * Convert region size (must be power of 4) to TLB TSIZE value. 2812 */ 2813static unsigned int 2814size2tsize(vm_size_t size) 2815{ 2816 2817 return (ilog2(size) / 2 - 5); 2818} 2819 2820/* 2821 * Register permanent kernel mapping in TLB1. 2822 * 2823 * Entries are created starting from index 0 (current free entry is 2824 * kept in tlb1_idx) and are not supposed to be invalidated. 2825 */ 2826static int 2827tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2828 uint32_t flags) 2829{ 2830 uint32_t ts, tid; 2831 int tsize; 2832 2833 if (tlb1_idx >= TLB1_ENTRIES) { 2834 printf("tlb1_set_entry: TLB1 full!\n"); 2835 return (-1); 2836 } 2837 2838 /* Convert size to TSIZE */ 2839 tsize = size2tsize(size); 2840 2841 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2842 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2843 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2844 2845 /* XXX LOCK tlb1[] */ 2846 2847 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2848 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2849 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2850 2851 /* Set supervisor RWX permission bits */ 2852 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2853 2854 tlb1_write_entry(tlb1_idx++); 2855 2856 /* XXX UNLOCK tlb1[] */ 2857 2858 /* 2859 * XXX in general TLB1 updates should be propagated between CPUs, 2860 * since current design assumes to have the same TLB1 set-up on all 2861 * cores. 2862 */ 2863 return (0); 2864} 2865 2866static int 2867tlb1_entry_size_cmp(const void *a, const void *b) 2868{ 2869 const vm_size_t *sza; 2870 const vm_size_t *szb; 2871 2872 sza = a; 2873 szb = b; 2874 if (*sza > *szb) 2875 return (-1); 2876 else if (*sza < *szb) 2877 return (1); 2878 else 2879 return (0); 2880} 2881 2882/* 2883 * Map in contiguous RAM region into the TLB1 using maximum of 2884 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2885 * 2886 * If necessary round up last entry size and return total size 2887 * used by all allocated entries. 2888 */ 2889vm_size_t 2890tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2891{ 2892 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2893 vm_size_t mapped_size, sz, esz; 2894 unsigned int log; 2895 int i; 2896 2897 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2898 __func__, size, va, pa); 2899 2900 mapped_size = 0; 2901 sz = size; 2902 memset(entry_size, 0, sizeof(entry_size)); 2903 2904 /* Calculate entry sizes. */ 2905 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2906 2907 /* Largest region that is power of 4 and fits within size */ 2908 log = ilog2(sz) / 2; 2909 esz = 1 << (2 * log); 2910 2911 /* If this is last entry cover remaining size. */ 2912 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2913 while (esz < sz) 2914 esz = esz << 2; 2915 } 2916 2917 entry_size[i] = esz; 2918 mapped_size += esz; 2919 if (esz < sz) 2920 sz -= esz; 2921 else 2922 sz = 0; 2923 } 2924 2925 /* Sort entry sizes, required to get proper entry address alignment. */ 2926 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2927 sizeof(vm_size_t), tlb1_entry_size_cmp); 2928 2929 /* Load TLB1 entries. */ 2930 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2931 esz = entry_size[i]; 2932 if (!esz) 2933 break; 2934 2935 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2936 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2937 2938 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2939 2940 va += esz; 2941 pa += esz; 2942 } 2943 2944 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2945 __func__, mapped_size, mapped_size - size); 2946 2947 return (mapped_size); 2948} 2949 2950/* 2951 * TLB1 initialization routine, to be called after the very first 2952 * assembler level setup done in locore.S. 2953 */ 2954void 2955tlb1_init(vm_offset_t ccsrbar) 2956{ 2957 uint32_t mas0; 2958 2959 /* TLB1[1] is used to map the kernel. Save that entry. */ 2960 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2961 mtspr(SPR_MAS0, mas0); 2962 __asm __volatile("isync; tlbre"); 2963 2964 tlb1[1].mas1 = mfspr(SPR_MAS1); 2965 tlb1[1].mas2 = mfspr(SPR_MAS2); 2966 tlb1[1].mas3 = mfspr(SPR_MAS3); 2967 2968 /* Map in CCSRBAR in TLB1[0] */ 2969 tlb1_idx = 0; 2970 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2971 /* 2972 * Set the next available TLB1 entry index. Note TLB[1] is reserved 2973 * for initial mapping of kernel text+data, which was set early in 2974 * locore, we need to skip this [busy] entry. 2975 */ 2976 tlb1_idx = 2; 2977 2978 /* Setup TLB miss defaults */ 2979 set_mas4_defaults(); 2980} 2981 2982/* 2983 * Setup MAS4 defaults. 2984 * These values are loaded to MAS0-2 on a TLB miss. 2985 */ 2986static void 2987set_mas4_defaults(void) 2988{ 2989 uint32_t mas4; 2990 2991 /* Defaults: TLB0, PID0, TSIZED=4K */ 2992 mas4 = MAS4_TLBSELD0; 2993 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2994#ifdef SMP 2995 mas4 |= MAS4_MD; 2996#endif 2997 mtspr(SPR_MAS4, mas4); 2998 __asm __volatile("isync"); 2999} 3000 3001/* 3002 * Print out contents of the MAS registers for each TLB1 entry 3003 */ 3004void 3005tlb1_print_tlbentries(void) 3006{ 3007 uint32_t mas0, mas1, mas2, mas3, mas7; 3008 int i; 3009 3010 debugf("TLB1 entries:\n"); 3011 for (i = 0; i < TLB1_ENTRIES; i++) { 3012 3013 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3014 mtspr(SPR_MAS0, mas0); 3015 3016 __asm __volatile("isync; tlbre"); 3017 3018 mas1 = mfspr(SPR_MAS1); 3019 mas2 = mfspr(SPR_MAS2); 3020 mas3 = mfspr(SPR_MAS3); 3021 mas7 = mfspr(SPR_MAS7); 3022 3023 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3024 } 3025} 3026 3027/* 3028 * Print out contents of the in-ram tlb1 table. 3029 */ 3030void 3031tlb1_print_entries(void) 3032{ 3033 int i; 3034 3035 debugf("tlb1[] table entries:\n"); 3036 for (i = 0; i < TLB1_ENTRIES; i++) 3037 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3038} 3039 3040/* 3041 * Return 0 if the physical IO range is encompassed by one of the 3042 * the TLB1 entries, otherwise return related error code. 3043 */ 3044static int 3045tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3046{ 3047 uint32_t prot; 3048 vm_paddr_t pa_start; 3049 vm_paddr_t pa_end; 3050 unsigned int entry_tsize; 3051 vm_size_t entry_size; 3052 3053 *va = (vm_offset_t)NULL; 3054 3055 /* Skip invalid entries */ 3056 if (!(tlb1[i].mas1 & MAS1_VALID)) 3057 return (EINVAL); 3058 3059 /* 3060 * The entry must be cache-inhibited, guarded, and r/w 3061 * so it can function as an i/o page 3062 */ 3063 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3064 if (prot != (MAS2_I | MAS2_G)) 3065 return (EPERM); 3066 3067 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3068 if (prot != (MAS3_SR | MAS3_SW)) 3069 return (EPERM); 3070 3071 /* The address should be within the entry range. */ 3072 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3073 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3074 3075 entry_size = tsize2size(entry_tsize); 3076 pa_start = tlb1[i].mas3 & MAS3_RPN; 3077 pa_end = pa_start + entry_size - 1; 3078 3079 if ((pa < pa_start) || ((pa + size) > pa_end)) 3080 return (ERANGE); 3081 3082 /* Return virtual address of this mapping. */ 3083 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3084 return (0); 3085} 3086