pmap.c revision 194101
1/*- 2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 194101 2009-06-13 08:57:04Z raj $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/smp.h> 67#include <sys/vmmeter.h> 68 69#include <vm/vm.h> 70#include <vm/vm_page.h> 71#include <vm/vm_kern.h> 72#include <vm/vm_pageout.h> 73#include <vm/vm_extern.h> 74#include <vm/vm_object.h> 75#include <vm/vm_param.h> 76#include <vm/vm_map.h> 77#include <vm/vm_pager.h> 78#include <vm/uma.h> 79 80#include <machine/bootinfo.h> 81#include <machine/cpu.h> 82#include <machine/pcb.h> 83#include <machine/platform.h> 84 85#include <machine/tlb.h> 86#include <machine/spr.h> 87#include <machine/vmparam.h> 88#include <machine/md_var.h> 89#include <machine/mmuvar.h> 90#include <machine/pmap.h> 91#include <machine/pte.h> 92 93#include "mmu_if.h" 94 95#define DEBUG 96#undef DEBUG 97 98#ifdef DEBUG 99#define debugf(fmt, args...) printf(fmt, ##args) 100#else 101#define debugf(fmt, args...) 102#endif 103 104#define TODO panic("%s: not implemented", __func__); 105 106#include "opt_sched.h" 107#ifndef SCHED_4BSD 108#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 109#endif 110extern struct mtx sched_lock; 111 112extern int dumpsys_minidump; 113 114extern unsigned char _etext[]; 115extern unsigned char _end[]; 116 117/* Kernel physical load address. */ 118extern uint32_t kernload; 119vm_offset_t kernstart; 120vm_size_t kernsize; 121 122/* Message buffer and tables. */ 123static vm_offset_t data_start; 124static vm_size_t data_end; 125 126/* Phys/avail memory regions. */ 127static struct mem_region *availmem_regions; 128static int availmem_regions_sz; 129static struct mem_region *physmem_regions; 130static int physmem_regions_sz; 131 132/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 133static vm_offset_t zero_page_va; 134static struct mtx zero_page_mutex; 135 136static struct mtx tlbivax_mutex; 137 138/* 139 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 140 * by idle thred only, no lock required. 141 */ 142static vm_offset_t zero_page_idle_va; 143 144/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 145static vm_offset_t copy_page_src_va; 146static vm_offset_t copy_page_dst_va; 147static struct mtx copy_page_mutex; 148 149/**************************************************************************/ 150/* PMAP */ 151/**************************************************************************/ 152 153static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 154 vm_prot_t, boolean_t); 155 156unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 157unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 158 159static int pagedaemon_waken; 160 161/* 162 * If user pmap is processed with mmu_booke_remove and the resident count 163 * drops to 0, there are no more pages to remove, so we need not continue. 164 */ 165#define PMAP_REMOVE_DONE(pmap) \ 166 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 167 168extern void tlb_lock(uint32_t *); 169extern void tlb_unlock(uint32_t *); 170extern void tid_flush(tlbtid_t); 171 172/**************************************************************************/ 173/* TLB and TID handling */ 174/**************************************************************************/ 175 176/* Translation ID busy table */ 177static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 178 179/* 180 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 181 * core revisions and should be read from h/w registers during early config. 182 */ 183uint32_t tlb0_entries; 184uint32_t tlb0_ways; 185uint32_t tlb0_entries_per_way; 186 187#define TLB0_ENTRIES (tlb0_entries) 188#define TLB0_WAYS (tlb0_ways) 189#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 190 191#define TLB1_ENTRIES 16 192 193/* In-ram copy of the TLB1 */ 194static tlb_entry_t tlb1[TLB1_ENTRIES]; 195 196/* Next free entry in the TLB1 */ 197static unsigned int tlb1_idx; 198 199static tlbtid_t tid_alloc(struct pmap *); 200 201static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 202 203static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 204static void tlb1_write_entry(unsigned int); 205static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 206static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 207 208static vm_size_t tsize2size(unsigned int); 209static unsigned int size2tsize(vm_size_t); 210static unsigned int ilog2(unsigned int); 211 212static void set_mas4_defaults(void); 213 214static inline void tlb0_flush_entry(vm_offset_t); 215static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 216 217/**************************************************************************/ 218/* Page table management */ 219/**************************************************************************/ 220 221/* Data for the pv entry allocation mechanism */ 222static uma_zone_t pvzone; 223static struct vm_object pvzone_obj; 224static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 225 226#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 227 228#ifndef PMAP_SHPGPERPROC 229#define PMAP_SHPGPERPROC 200 230#endif 231 232static void ptbl_init(void); 233static struct ptbl_buf *ptbl_buf_alloc(void); 234static void ptbl_buf_free(struct ptbl_buf *); 235static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 236 237static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 238static void ptbl_free(mmu_t, pmap_t, unsigned int); 239static void ptbl_hold(mmu_t, pmap_t, unsigned int); 240static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 241 242static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 243static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 244static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 245static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 246 247static pv_entry_t pv_alloc(void); 248static void pv_free(pv_entry_t); 249static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 250static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 251 252/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 253#define PTBL_BUFS (128 * 16) 254 255struct ptbl_buf { 256 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 257 vm_offset_t kva; /* va of mapping */ 258}; 259 260/* ptbl free list and a lock used for access synchronization. */ 261static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 262static struct mtx ptbl_buf_freelist_lock; 263 264/* Base address of kva space allocated fot ptbl bufs. */ 265static vm_offset_t ptbl_buf_pool_vabase; 266 267/* Pointer to ptbl_buf structures. */ 268static struct ptbl_buf *ptbl_bufs; 269 270void pmap_bootstrap_ap(volatile uint32_t *); 271 272/* 273 * Kernel MMU interface 274 */ 275static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 276static void mmu_booke_clear_modify(mmu_t, vm_page_t); 277static void mmu_booke_clear_reference(mmu_t, vm_page_t); 278static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 279 vm_size_t, vm_offset_t); 280static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 281static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 282 vm_prot_t, boolean_t); 283static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 284 vm_page_t, vm_prot_t); 285static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 286 vm_prot_t); 287static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 288static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 289 vm_prot_t); 290static void mmu_booke_init(mmu_t); 291static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 292static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 293static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 294static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 295 int); 296static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 297static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 298 vm_object_t, vm_pindex_t, vm_size_t); 299static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 300static void mmu_booke_page_init(mmu_t, vm_page_t); 301static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 302static void mmu_booke_pinit(mmu_t, pmap_t); 303static void mmu_booke_pinit0(mmu_t, pmap_t); 304static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 305 vm_prot_t); 306static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 307static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 308static void mmu_booke_release(mmu_t, pmap_t); 309static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 310static void mmu_booke_remove_all(mmu_t, vm_page_t); 311static void mmu_booke_remove_write(mmu_t, vm_page_t); 312static void mmu_booke_zero_page(mmu_t, vm_page_t); 313static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 314static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 315static void mmu_booke_activate(mmu_t, struct thread *); 316static void mmu_booke_deactivate(mmu_t, struct thread *); 317static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 318static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 319static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 320static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 321static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 322static void mmu_booke_kremove(mmu_t, vm_offset_t); 323static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 324static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 325static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 326 vm_size_t, vm_size_t *); 327static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 328 vm_size_t, vm_offset_t); 329static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 330 331static mmu_method_t mmu_booke_methods[] = { 332 /* pmap dispatcher interface */ 333 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 334 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 335 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 336 MMUMETHOD(mmu_copy, mmu_booke_copy), 337 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 338 MMUMETHOD(mmu_enter, mmu_booke_enter), 339 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 340 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 341 MMUMETHOD(mmu_extract, mmu_booke_extract), 342 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 343 MMUMETHOD(mmu_init, mmu_booke_init), 344 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 345 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 346 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 347 MMUMETHOD(mmu_map, mmu_booke_map), 348 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 349 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 350 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 351 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 352 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 353 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 354 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 355 MMUMETHOD(mmu_protect, mmu_booke_protect), 356 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 357 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 358 MMUMETHOD(mmu_release, mmu_booke_release), 359 MMUMETHOD(mmu_remove, mmu_booke_remove), 360 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 361 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 362 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 363 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 364 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 365 MMUMETHOD(mmu_activate, mmu_booke_activate), 366 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 367 368 /* Internal interfaces */ 369 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 370 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 371 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 372 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 373 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 374/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 375 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 376 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 377 378 /* dumpsys() support */ 379 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 380 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 381 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 382 383 { 0, 0 } 384}; 385 386static mmu_def_t booke_mmu = { 387 MMU_TYPE_BOOKE, 388 mmu_booke_methods, 389 0 390}; 391MMU_DEF(booke_mmu); 392 393static inline void 394tlb_miss_lock(void) 395{ 396#ifdef SMP 397 struct pcpu *pc; 398 399 if (!smp_started) 400 return; 401 402 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 403 if (pc != pcpup) { 404 405 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 406 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 407 408 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 409 ("tlb_miss_lock: tried to lock self")); 410 411 tlb_lock(pc->pc_booke_tlb_lock); 412 413 CTR1(KTR_PMAP, "%s: locked", __func__); 414 } 415 } 416#endif 417} 418 419static inline void 420tlb_miss_unlock(void) 421{ 422#ifdef SMP 423 struct pcpu *pc; 424 425 if (!smp_started) 426 return; 427 428 SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { 429 if (pc != pcpup) { 430 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 431 __func__, pc->pc_cpuid); 432 433 tlb_unlock(pc->pc_booke_tlb_lock); 434 435 CTR1(KTR_PMAP, "%s: unlocked", __func__); 436 } 437 } 438#endif 439} 440 441/* Return number of entries in TLB0. */ 442static __inline void 443tlb0_get_tlbconf(void) 444{ 445 uint32_t tlb0_cfg; 446 447 tlb0_cfg = mfspr(SPR_TLB0CFG); 448 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 449 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 450 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 451} 452 453/* Initialize pool of kva ptbl buffers. */ 454static void 455ptbl_init(void) 456{ 457 int i; 458 459 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 460 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 461 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 462 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 463 464 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 465 TAILQ_INIT(&ptbl_buf_freelist); 466 467 for (i = 0; i < PTBL_BUFS; i++) { 468 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 469 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 470 } 471} 472 473/* Get a ptbl_buf from the freelist. */ 474static struct ptbl_buf * 475ptbl_buf_alloc(void) 476{ 477 struct ptbl_buf *buf; 478 479 mtx_lock(&ptbl_buf_freelist_lock); 480 buf = TAILQ_FIRST(&ptbl_buf_freelist); 481 if (buf != NULL) 482 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 483 mtx_unlock(&ptbl_buf_freelist_lock); 484 485 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 486 487 return (buf); 488} 489 490/* Return ptbl buff to free pool. */ 491static void 492ptbl_buf_free(struct ptbl_buf *buf) 493{ 494 495 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 496 497 mtx_lock(&ptbl_buf_freelist_lock); 498 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 499 mtx_unlock(&ptbl_buf_freelist_lock); 500} 501 502/* 503 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 504 */ 505static void 506ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 507{ 508 struct ptbl_buf *pbuf; 509 510 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 511 512 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 513 514 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 515 if (pbuf->kva == (vm_offset_t)ptbl) { 516 /* Remove from pmap ptbl buf list. */ 517 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 518 519 /* Free corresponding ptbl buf. */ 520 ptbl_buf_free(pbuf); 521 break; 522 } 523} 524 525/* Allocate page table. */ 526static pte_t * 527ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 528{ 529 vm_page_t mtbl[PTBL_PAGES]; 530 vm_page_t m; 531 struct ptbl_buf *pbuf; 532 unsigned int pidx; 533 pte_t *ptbl; 534 int i; 535 536 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 537 (pmap == kernel_pmap), pdir_idx); 538 539 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 540 ("ptbl_alloc: invalid pdir_idx")); 541 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 542 ("pte_alloc: valid ptbl entry exists!")); 543 544 pbuf = ptbl_buf_alloc(); 545 if (pbuf == NULL) 546 panic("pte_alloc: couldn't alloc kernel virtual memory"); 547 548 ptbl = (pte_t *)pbuf->kva; 549 550 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 551 552 /* Allocate ptbl pages, this will sleep! */ 553 for (i = 0; i < PTBL_PAGES; i++) { 554 pidx = (PTBL_PAGES * pdir_idx) + i; 555 while ((m = vm_page_alloc(NULL, pidx, 556 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 557 558 PMAP_UNLOCK(pmap); 559 vm_page_unlock_queues(); 560 VM_WAIT; 561 vm_page_lock_queues(); 562 PMAP_LOCK(pmap); 563 } 564 mtbl[i] = m; 565 } 566 567 /* Map allocated pages into kernel_pmap. */ 568 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 569 570 /* Zero whole ptbl. */ 571 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 572 573 /* Add pbuf to the pmap ptbl bufs list. */ 574 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 575 576 return (ptbl); 577} 578 579/* Free ptbl pages and invalidate pdir entry. */ 580static void 581ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 582{ 583 pte_t *ptbl; 584 vm_paddr_t pa; 585 vm_offset_t va; 586 vm_page_t m; 587 int i; 588 589 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 590 (pmap == kernel_pmap), pdir_idx); 591 592 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 593 ("ptbl_free: invalid pdir_idx")); 594 595 ptbl = pmap->pm_pdir[pdir_idx]; 596 597 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 598 599 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 600 601 /* 602 * Invalidate the pdir entry as soon as possible, so that other CPUs 603 * don't attempt to look up the page tables we are releasing. 604 */ 605 mtx_lock_spin(&tlbivax_mutex); 606 tlb_miss_lock(); 607 608 pmap->pm_pdir[pdir_idx] = NULL; 609 610 tlb_miss_unlock(); 611 mtx_unlock_spin(&tlbivax_mutex); 612 613 for (i = 0; i < PTBL_PAGES; i++) { 614 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 615 pa = pte_vatopa(mmu, kernel_pmap, va); 616 m = PHYS_TO_VM_PAGE(pa); 617 vm_page_free_zero(m); 618 atomic_subtract_int(&cnt.v_wire_count, 1); 619 mmu_booke_kremove(mmu, va); 620 } 621 622 ptbl_free_pmap_ptbl(pmap, ptbl); 623} 624 625/* 626 * Decrement ptbl pages hold count and attempt to free ptbl pages. 627 * Called when removing pte entry from ptbl. 628 * 629 * Return 1 if ptbl pages were freed. 630 */ 631static int 632ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 633{ 634 pte_t *ptbl; 635 vm_paddr_t pa; 636 vm_page_t m; 637 int i; 638 639 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 640 (pmap == kernel_pmap), pdir_idx); 641 642 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 643 ("ptbl_unhold: invalid pdir_idx")); 644 KASSERT((pmap != kernel_pmap), 645 ("ptbl_unhold: unholding kernel ptbl!")); 646 647 ptbl = pmap->pm_pdir[pdir_idx]; 648 649 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 650 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 651 ("ptbl_unhold: non kva ptbl")); 652 653 /* decrement hold count */ 654 for (i = 0; i < PTBL_PAGES; i++) { 655 pa = pte_vatopa(mmu, kernel_pmap, 656 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 657 m = PHYS_TO_VM_PAGE(pa); 658 m->wire_count--; 659 } 660 661 /* 662 * Free ptbl pages if there are no pte etries in this ptbl. 663 * wire_count has the same value for all ptbl pages, so check the last 664 * page. 665 */ 666 if (m->wire_count == 0) { 667 ptbl_free(mmu, pmap, pdir_idx); 668 669 //debugf("ptbl_unhold: e (freed ptbl)\n"); 670 return (1); 671 } 672 673 return (0); 674} 675 676/* 677 * Increment hold count for ptbl pages. This routine is used when a new pte 678 * entry is being inserted into the ptbl. 679 */ 680static void 681ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 682{ 683 vm_paddr_t pa; 684 pte_t *ptbl; 685 vm_page_t m; 686 int i; 687 688 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 689 pdir_idx); 690 691 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 692 ("ptbl_hold: invalid pdir_idx")); 693 KASSERT((pmap != kernel_pmap), 694 ("ptbl_hold: holding kernel ptbl!")); 695 696 ptbl = pmap->pm_pdir[pdir_idx]; 697 698 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 699 700 for (i = 0; i < PTBL_PAGES; i++) { 701 pa = pte_vatopa(mmu, kernel_pmap, 702 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 703 m = PHYS_TO_VM_PAGE(pa); 704 m->wire_count++; 705 } 706} 707 708/* Allocate pv_entry structure. */ 709pv_entry_t 710pv_alloc(void) 711{ 712 pv_entry_t pv; 713 714 pv_entry_count++; 715 if ((pv_entry_count > pv_entry_high_water) && 716 (pagedaemon_waken == 0)) { 717 pagedaemon_waken = 1; 718 wakeup(&vm_pages_needed); 719 } 720 pv = uma_zalloc(pvzone, M_NOWAIT); 721 722 return (pv); 723} 724 725/* Free pv_entry structure. */ 726static __inline void 727pv_free(pv_entry_t pve) 728{ 729 730 pv_entry_count--; 731 uma_zfree(pvzone, pve); 732} 733 734 735/* Allocate and initialize pv_entry structure. */ 736static void 737pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 738{ 739 pv_entry_t pve; 740 741 //int su = (pmap == kernel_pmap); 742 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 743 // (u_int32_t)pmap, va, (u_int32_t)m); 744 745 pve = pv_alloc(); 746 if (pve == NULL) 747 panic("pv_insert: no pv entries!"); 748 749 pve->pv_pmap = pmap; 750 pve->pv_va = va; 751 752 /* add to pv_list */ 753 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 754 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 755 756 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 757 758 //debugf("pv_insert: e\n"); 759} 760 761/* Destroy pv entry. */ 762static void 763pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 764{ 765 pv_entry_t pve; 766 767 //int su = (pmap == kernel_pmap); 768 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 769 770 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 771 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 772 773 /* find pv entry */ 774 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 775 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 776 /* remove from pv_list */ 777 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 778 if (TAILQ_EMPTY(&m->md.pv_list)) 779 vm_page_flag_clear(m, PG_WRITEABLE); 780 781 /* free pv entry struct */ 782 pv_free(pve); 783 break; 784 } 785 } 786 787 //debugf("pv_remove: e\n"); 788} 789 790/* 791 * Clean pte entry, try to free page table page if requested. 792 * 793 * Return 1 if ptbl pages were freed, otherwise return 0. 794 */ 795static int 796pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 797{ 798 unsigned int pdir_idx = PDIR_IDX(va); 799 unsigned int ptbl_idx = PTBL_IDX(va); 800 vm_page_t m; 801 pte_t *ptbl; 802 pte_t *pte; 803 804 //int su = (pmap == kernel_pmap); 805 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 806 // su, (u_int32_t)pmap, va, flags); 807 808 ptbl = pmap->pm_pdir[pdir_idx]; 809 KASSERT(ptbl, ("pte_remove: null ptbl")); 810 811 pte = &ptbl[ptbl_idx]; 812 813 if (pte == NULL || !PTE_ISVALID(pte)) 814 return (0); 815 816 if (PTE_ISWIRED(pte)) 817 pmap->pm_stats.wired_count--; 818 819 /* Handle managed entry. */ 820 if (PTE_ISMANAGED(pte)) { 821 /* Get vm_page_t for mapped pte. */ 822 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 823 824 if (PTE_ISMODIFIED(pte)) 825 vm_page_dirty(m); 826 827 if (PTE_ISREFERENCED(pte)) 828 vm_page_flag_set(m, PG_REFERENCED); 829 830 pv_remove(pmap, va, m); 831 } 832 833 mtx_lock_spin(&tlbivax_mutex); 834 tlb_miss_lock(); 835 836 tlb0_flush_entry(va); 837 pte->flags = 0; 838 pte->rpn = 0; 839 840 tlb_miss_unlock(); 841 mtx_unlock_spin(&tlbivax_mutex); 842 843 pmap->pm_stats.resident_count--; 844 845 if (flags & PTBL_UNHOLD) { 846 //debugf("pte_remove: e (unhold)\n"); 847 return (ptbl_unhold(mmu, pmap, pdir_idx)); 848 } 849 850 //debugf("pte_remove: e\n"); 851 return (0); 852} 853 854/* 855 * Insert PTE for a given page and virtual address. 856 */ 857static void 858pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 859{ 860 unsigned int pdir_idx = PDIR_IDX(va); 861 unsigned int ptbl_idx = PTBL_IDX(va); 862 pte_t *ptbl, *pte; 863 864 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 865 pmap == kernel_pmap, pmap, va); 866 867 /* Get the page table pointer. */ 868 ptbl = pmap->pm_pdir[pdir_idx]; 869 870 if (ptbl == NULL) { 871 /* Allocate page table pages. */ 872 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 873 } else { 874 /* 875 * Check if there is valid mapping for requested 876 * va, if there is, remove it. 877 */ 878 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 879 if (PTE_ISVALID(pte)) { 880 pte_remove(mmu, pmap, va, PTBL_HOLD); 881 } else { 882 /* 883 * pte is not used, increment hold count 884 * for ptbl pages. 885 */ 886 if (pmap != kernel_pmap) 887 ptbl_hold(mmu, pmap, pdir_idx); 888 } 889 } 890 891 /* 892 * Insert pv_entry into pv_list for mapped page if part of managed 893 * memory. 894 */ 895 if ((m->flags & PG_FICTITIOUS) == 0) { 896 if ((m->flags & PG_UNMANAGED) == 0) { 897 flags |= PTE_MANAGED; 898 899 /* Create and insert pv entry. */ 900 pv_insert(pmap, va, m); 901 } 902 } 903 904 pmap->pm_stats.resident_count++; 905 906 mtx_lock_spin(&tlbivax_mutex); 907 tlb_miss_lock(); 908 909 tlb0_flush_entry(va); 910 if (pmap->pm_pdir[pdir_idx] == NULL) { 911 /* 912 * If we just allocated a new page table, hook it in 913 * the pdir. 914 */ 915 pmap->pm_pdir[pdir_idx] = ptbl; 916 } 917 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 918 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 919 pte->flags |= (PTE_VALID | flags); 920 921 tlb_miss_unlock(); 922 mtx_unlock_spin(&tlbivax_mutex); 923} 924 925/* Return the pa for the given pmap/va. */ 926static vm_paddr_t 927pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 928{ 929 vm_paddr_t pa = 0; 930 pte_t *pte; 931 932 pte = pte_find(mmu, pmap, va); 933 if ((pte != NULL) && PTE_ISVALID(pte)) 934 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 935 return (pa); 936} 937 938/* Get a pointer to a PTE in a page table. */ 939static pte_t * 940pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 941{ 942 unsigned int pdir_idx = PDIR_IDX(va); 943 unsigned int ptbl_idx = PTBL_IDX(va); 944 945 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 946 947 if (pmap->pm_pdir[pdir_idx]) 948 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 949 950 return (NULL); 951} 952 953/**************************************************************************/ 954/* PMAP related */ 955/**************************************************************************/ 956 957/* 958 * This is called during e500_init, before the system is really initialized. 959 */ 960static void 961mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 962{ 963 vm_offset_t phys_kernelend; 964 struct mem_region *mp, *mp1; 965 int cnt, i, j; 966 u_int s, e, sz; 967 u_int phys_avail_count; 968 vm_size_t physsz, hwphyssz, kstack0_sz; 969 vm_offset_t kernel_pdir, kstack0, va; 970 vm_paddr_t kstack0_phys; 971 pte_t *pte; 972 973 debugf("mmu_booke_bootstrap: entered\n"); 974 975 /* Initialize invalidation mutex */ 976 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 977 978 /* Read TLB0 size and associativity. */ 979 tlb0_get_tlbconf(); 980 981 /* Align kernel start and end address (kernel image). */ 982 kernstart = trunc_page(start); 983 data_start = round_page(kernelend); 984 kernsize = data_start - kernstart; 985 986 data_end = data_start; 987 988 /* Allocate space for the message buffer. */ 989 msgbufp = (struct msgbuf *)data_end; 990 data_end += MSGBUF_SIZE; 991 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 992 data_end); 993 994 data_end = round_page(data_end); 995 996 /* Allocate space for ptbl_bufs. */ 997 ptbl_bufs = (struct ptbl_buf *)data_end; 998 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 999 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1000 data_end); 1001 1002 data_end = round_page(data_end); 1003 1004 /* Allocate PTE tables for kernel KVA. */ 1005 kernel_pdir = data_end; 1006 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1007 PDIR_SIZE - 1) / PDIR_SIZE; 1008 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1009 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1010 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1011 1012 debugf(" data_end: 0x%08x\n", data_end); 1013 if (data_end - kernstart > 0x1000000) { 1014 data_end = (data_end + 0x3fffff) & ~0x3fffff; 1015 tlb1_mapin_region(kernstart + 0x1000000, 1016 kernload + 0x1000000, data_end - kernstart - 0x1000000); 1017 } else 1018 data_end = (data_end + 0xffffff) & ~0xffffff; 1019 1020 debugf(" updated data_end: 0x%08x\n", data_end); 1021 1022 kernsize += data_end - data_start; 1023 1024 /* 1025 * Clear the structures - note we can only do it safely after the 1026 * possible additional TLB1 translations are in place (above) so that 1027 * all range up to the currently calculated 'data_end' is covered. 1028 */ 1029 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1030 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1031 1032 /*******************************************************/ 1033 /* Set the start and end of kva. */ 1034 /*******************************************************/ 1035 virtual_avail = round_page(data_end); 1036 virtual_end = VM_MAX_KERNEL_ADDRESS; 1037 1038 /* Allocate KVA space for page zero/copy operations. */ 1039 zero_page_va = virtual_avail; 1040 virtual_avail += PAGE_SIZE; 1041 zero_page_idle_va = virtual_avail; 1042 virtual_avail += PAGE_SIZE; 1043 copy_page_src_va = virtual_avail; 1044 virtual_avail += PAGE_SIZE; 1045 copy_page_dst_va = virtual_avail; 1046 virtual_avail += PAGE_SIZE; 1047 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1048 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1049 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1050 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1051 1052 /* Initialize page zero/copy mutexes. */ 1053 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1054 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1055 1056 /* Allocate KVA space for ptbl bufs. */ 1057 ptbl_buf_pool_vabase = virtual_avail; 1058 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1059 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1060 ptbl_buf_pool_vabase, virtual_avail); 1061 1062 /* Calculate corresponding physical addresses for the kernel region. */ 1063 phys_kernelend = kernload + kernsize; 1064 debugf("kernel image and allocated data:\n"); 1065 debugf(" kernload = 0x%08x\n", kernload); 1066 debugf(" kernstart = 0x%08x\n", kernstart); 1067 debugf(" kernsize = 0x%08x\n", kernsize); 1068 1069 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1070 panic("mmu_booke_bootstrap: phys_avail too small"); 1071 1072 /* 1073 * Remove kernel physical address range from avail regions list. Page 1074 * align all regions. Non-page aligned memory isn't very interesting 1075 * to us. Also, sort the entries for ascending addresses. 1076 */ 1077 1078 /* Retrieve phys/avail mem regions */ 1079 mem_regions(&physmem_regions, &physmem_regions_sz, 1080 &availmem_regions, &availmem_regions_sz); 1081 sz = 0; 1082 cnt = availmem_regions_sz; 1083 debugf("processing avail regions:\n"); 1084 for (mp = availmem_regions; mp->mr_size; mp++) { 1085 s = mp->mr_start; 1086 e = mp->mr_start + mp->mr_size; 1087 debugf(" %08x-%08x -> ", s, e); 1088 /* Check whether this region holds all of the kernel. */ 1089 if (s < kernload && e > phys_kernelend) { 1090 availmem_regions[cnt].mr_start = phys_kernelend; 1091 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1092 e = kernload; 1093 } 1094 /* Look whether this regions starts within the kernel. */ 1095 if (s >= kernload && s < phys_kernelend) { 1096 if (e <= phys_kernelend) 1097 goto empty; 1098 s = phys_kernelend; 1099 } 1100 /* Now look whether this region ends within the kernel. */ 1101 if (e > kernload && e <= phys_kernelend) { 1102 if (s >= kernload) 1103 goto empty; 1104 e = kernload; 1105 } 1106 /* Now page align the start and size of the region. */ 1107 s = round_page(s); 1108 e = trunc_page(e); 1109 if (e < s) 1110 e = s; 1111 sz = e - s; 1112 debugf("%08x-%08x = %x\n", s, e, sz); 1113 1114 /* Check whether some memory is left here. */ 1115 if (sz == 0) { 1116 empty: 1117 memmove(mp, mp + 1, 1118 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1119 cnt--; 1120 mp--; 1121 continue; 1122 } 1123 1124 /* Do an insertion sort. */ 1125 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1126 if (s < mp1->mr_start) 1127 break; 1128 if (mp1 < mp) { 1129 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1130 mp1->mr_start = s; 1131 mp1->mr_size = sz; 1132 } else { 1133 mp->mr_start = s; 1134 mp->mr_size = sz; 1135 } 1136 } 1137 availmem_regions_sz = cnt; 1138 1139 /*******************************************************/ 1140 /* Steal physical memory for kernel stack from the end */ 1141 /* of the first avail region */ 1142 /*******************************************************/ 1143 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1144 kstack0_phys = availmem_regions[0].mr_start + 1145 availmem_regions[0].mr_size; 1146 kstack0_phys -= kstack0_sz; 1147 availmem_regions[0].mr_size -= kstack0_sz; 1148 1149 /*******************************************************/ 1150 /* Fill in phys_avail table, based on availmem_regions */ 1151 /*******************************************************/ 1152 phys_avail_count = 0; 1153 physsz = 0; 1154 hwphyssz = 0; 1155 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1156 1157 debugf("fill in phys_avail:\n"); 1158 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1159 1160 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1161 availmem_regions[i].mr_start, 1162 availmem_regions[i].mr_start + 1163 availmem_regions[i].mr_size, 1164 availmem_regions[i].mr_size); 1165 1166 if (hwphyssz != 0 && 1167 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1168 debugf(" hw.physmem adjust\n"); 1169 if (physsz < hwphyssz) { 1170 phys_avail[j] = availmem_regions[i].mr_start; 1171 phys_avail[j + 1] = 1172 availmem_regions[i].mr_start + 1173 hwphyssz - physsz; 1174 physsz = hwphyssz; 1175 phys_avail_count++; 1176 } 1177 break; 1178 } 1179 1180 phys_avail[j] = availmem_regions[i].mr_start; 1181 phys_avail[j + 1] = availmem_regions[i].mr_start + 1182 availmem_regions[i].mr_size; 1183 phys_avail_count++; 1184 physsz += availmem_regions[i].mr_size; 1185 } 1186 physmem = btoc(physsz); 1187 1188 /* Calculate the last available physical address. */ 1189 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1190 ; 1191 Maxmem = powerpc_btop(phys_avail[i + 1]); 1192 1193 debugf("Maxmem = 0x%08lx\n", Maxmem); 1194 debugf("phys_avail_count = %d\n", phys_avail_count); 1195 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1196 physmem); 1197 1198 /*******************************************************/ 1199 /* Initialize (statically allocated) kernel pmap. */ 1200 /*******************************************************/ 1201 PMAP_LOCK_INIT(kernel_pmap); 1202 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1203 1204 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1205 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1206 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1207 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1208 1209 /* Initialize kernel pdir */ 1210 for (i = 0; i < kernel_ptbls; i++) 1211 kernel_pmap->pm_pdir[kptbl_min + i] = 1212 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1213 1214 for (i = 0; i < MAXCPU; i++) { 1215 kernel_pmap->pm_tid[i] = TID_KERNEL; 1216 1217 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1218 tidbusy[i][0] = kernel_pmap; 1219 } 1220 1221 /* 1222 * Fill in PTEs covering kernel code and data. They are not required 1223 * for address translation, as this area is covered by static TLB1 1224 * entries, but for pte_vatopa() to work correctly with kernel area 1225 * addresses. 1226 */ 1227 for (va = KERNBASE; va < data_end; va += PAGE_SIZE) { 1228 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1229 pte->rpn = kernload + (va - KERNBASE); 1230 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1231 PTE_VALID; 1232 } 1233 /* Mark kernel_pmap active on all CPUs */ 1234 kernel_pmap->pm_active = ~0; 1235 1236 /*******************************************************/ 1237 /* Final setup */ 1238 /*******************************************************/ 1239 1240 /* Enter kstack0 into kernel map, provide guard page */ 1241 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1242 thread0.td_kstack = kstack0; 1243 thread0.td_kstack_pages = KSTACK_PAGES; 1244 1245 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1246 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1247 kstack0_phys, kstack0_phys + kstack0_sz); 1248 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1249 1250 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1251 for (i = 0; i < KSTACK_PAGES; i++) { 1252 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1253 kstack0 += PAGE_SIZE; 1254 kstack0_phys += PAGE_SIZE; 1255 } 1256 1257 debugf("virtual_avail = %08x\n", virtual_avail); 1258 debugf("virtual_end = %08x\n", virtual_end); 1259 1260 debugf("mmu_booke_bootstrap: exit\n"); 1261} 1262 1263void 1264pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1265{ 1266 int i; 1267 1268 /* 1269 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1270 * have the snapshot of its contents in the s/w tlb1[] table, so use 1271 * these values directly to (re)program AP's TLB1 hardware. 1272 */ 1273 for (i = 0; i < tlb1_idx; i ++) { 1274 /* Skip invalid entries */ 1275 if (!(tlb1[i].mas1 & MAS1_VALID)) 1276 continue; 1277 1278 tlb1_write_entry(i); 1279 } 1280 1281 set_mas4_defaults(); 1282} 1283 1284/* 1285 * Get the physical page address for the given pmap/virtual address. 1286 */ 1287static vm_paddr_t 1288mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1289{ 1290 vm_paddr_t pa; 1291 1292 PMAP_LOCK(pmap); 1293 pa = pte_vatopa(mmu, pmap, va); 1294 PMAP_UNLOCK(pmap); 1295 1296 return (pa); 1297} 1298 1299/* 1300 * Extract the physical page address associated with the given 1301 * kernel virtual address. 1302 */ 1303static vm_paddr_t 1304mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1305{ 1306 1307 return (pte_vatopa(mmu, kernel_pmap, va)); 1308} 1309 1310/* 1311 * Initialize the pmap module. 1312 * Called by vm_init, to initialize any structures that the pmap 1313 * system needs to map virtual memory. 1314 */ 1315static void 1316mmu_booke_init(mmu_t mmu) 1317{ 1318 int shpgperproc = PMAP_SHPGPERPROC; 1319 1320 /* 1321 * Initialize the address space (zone) for the pv entries. Set a 1322 * high water mark so that the system can recover from excessive 1323 * numbers of pv entries. 1324 */ 1325 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1326 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1327 1328 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1329 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1330 1331 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1332 pv_entry_high_water = 9 * (pv_entry_max / 10); 1333 1334 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1335 1336 /* Pre-fill pvzone with initial number of pv entries. */ 1337 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1338 1339 /* Initialize ptbl allocation. */ 1340 ptbl_init(); 1341} 1342 1343/* 1344 * Map a list of wired pages into kernel virtual address space. This is 1345 * intended for temporary mappings which do not need page modification or 1346 * references recorded. Existing mappings in the region are overwritten. 1347 */ 1348static void 1349mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1350{ 1351 vm_offset_t va; 1352 1353 va = sva; 1354 while (count-- > 0) { 1355 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1356 va += PAGE_SIZE; 1357 m++; 1358 } 1359} 1360 1361/* 1362 * Remove page mappings from kernel virtual address space. Intended for 1363 * temporary mappings entered by mmu_booke_qenter. 1364 */ 1365static void 1366mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1367{ 1368 vm_offset_t va; 1369 1370 va = sva; 1371 while (count-- > 0) { 1372 mmu_booke_kremove(mmu, va); 1373 va += PAGE_SIZE; 1374 } 1375} 1376 1377/* 1378 * Map a wired page into kernel virtual address space. 1379 */ 1380static void 1381mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1382{ 1383 unsigned int pdir_idx = PDIR_IDX(va); 1384 unsigned int ptbl_idx = PTBL_IDX(va); 1385 uint32_t flags; 1386 pte_t *pte; 1387 1388 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1389 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1390 1391 flags = 0; 1392 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1393 flags |= PTE_M; 1394 1395 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1396 1397 mtx_lock_spin(&tlbivax_mutex); 1398 tlb_miss_lock(); 1399 1400 if (PTE_ISVALID(pte)) { 1401 1402 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1403 1404 /* Flush entry from TLB0 */ 1405 tlb0_flush_entry(va); 1406 } 1407 1408 pte->rpn = pa & ~PTE_PA_MASK; 1409 pte->flags = flags; 1410 1411 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1412 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1413 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1414 1415 /* Flush the real memory from the instruction cache. */ 1416 if ((flags & (PTE_I | PTE_G)) == 0) { 1417 __syncicache((void *)va, PAGE_SIZE); 1418 } 1419 1420 tlb_miss_unlock(); 1421 mtx_unlock_spin(&tlbivax_mutex); 1422} 1423 1424/* 1425 * Remove a page from kernel page table. 1426 */ 1427static void 1428mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1429{ 1430 unsigned int pdir_idx = PDIR_IDX(va); 1431 unsigned int ptbl_idx = PTBL_IDX(va); 1432 pte_t *pte; 1433 1434// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1435 1436 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1437 (va <= VM_MAX_KERNEL_ADDRESS)), 1438 ("mmu_booke_kremove: invalid va")); 1439 1440 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1441 1442 if (!PTE_ISVALID(pte)) { 1443 1444 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1445 1446 return; 1447 } 1448 1449 mtx_lock_spin(&tlbivax_mutex); 1450 tlb_miss_lock(); 1451 1452 /* Invalidate entry in TLB0, update PTE. */ 1453 tlb0_flush_entry(va); 1454 pte->flags = 0; 1455 pte->rpn = 0; 1456 1457 tlb_miss_unlock(); 1458 mtx_unlock_spin(&tlbivax_mutex); 1459} 1460 1461/* 1462 * Initialize pmap associated with process 0. 1463 */ 1464static void 1465mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1466{ 1467 1468 mmu_booke_pinit(mmu, pmap); 1469 PCPU_SET(curpmap, pmap); 1470} 1471 1472/* 1473 * Initialize a preallocated and zeroed pmap structure, 1474 * such as one in a vmspace structure. 1475 */ 1476static void 1477mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1478{ 1479 int i; 1480 1481 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1482 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1483 1484 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1485 1486 PMAP_LOCK_INIT(pmap); 1487 for (i = 0; i < MAXCPU; i++) 1488 pmap->pm_tid[i] = TID_NONE; 1489 pmap->pm_active = 0; 1490 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1491 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1492 TAILQ_INIT(&pmap->pm_ptbl_list); 1493} 1494 1495/* 1496 * Release any resources held by the given physical map. 1497 * Called when a pmap initialized by mmu_booke_pinit is being released. 1498 * Should only be called if the map contains no valid mappings. 1499 */ 1500static void 1501mmu_booke_release(mmu_t mmu, pmap_t pmap) 1502{ 1503 1504 printf("mmu_booke_release: s\n"); 1505 1506 KASSERT(pmap->pm_stats.resident_count == 0, 1507 ("pmap_release: pmap resident count %ld != 0", 1508 pmap->pm_stats.resident_count)); 1509 1510 PMAP_LOCK_DESTROY(pmap); 1511} 1512 1513/* 1514 * Insert the given physical page at the specified virtual address in the 1515 * target physical map with the protection requested. If specified the page 1516 * will be wired down. 1517 */ 1518static void 1519mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1520 vm_prot_t prot, boolean_t wired) 1521{ 1522 1523 vm_page_lock_queues(); 1524 PMAP_LOCK(pmap); 1525 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1526 vm_page_unlock_queues(); 1527 PMAP_UNLOCK(pmap); 1528} 1529 1530static void 1531mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1532 vm_prot_t prot, boolean_t wired) 1533{ 1534 pte_t *pte; 1535 vm_paddr_t pa; 1536 uint32_t flags; 1537 int su, sync; 1538 1539 pa = VM_PAGE_TO_PHYS(m); 1540 su = (pmap == kernel_pmap); 1541 sync = 0; 1542 1543 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1544 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1545 // (u_int32_t)pmap, su, pmap->pm_tid, 1546 // (u_int32_t)m, va, pa, prot, wired); 1547 1548 if (su) { 1549 KASSERT(((va >= virtual_avail) && 1550 (va <= VM_MAX_KERNEL_ADDRESS)), 1551 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1552 } else { 1553 KASSERT((va <= VM_MAXUSER_ADDRESS), 1554 ("mmu_booke_enter_locked: user pmap, non user va")); 1555 } 1556 1557 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1558 1559 /* 1560 * If there is an existing mapping, and the physical address has not 1561 * changed, must be protection or wiring change. 1562 */ 1563 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1564 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1565 1566 /* 1567 * Before actually updating pte->flags we calculate and 1568 * prepare its new value in a helper var. 1569 */ 1570 flags = pte->flags; 1571 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1572 1573 /* Wiring change, just update stats. */ 1574 if (wired) { 1575 if (!PTE_ISWIRED(pte)) { 1576 flags |= PTE_WIRED; 1577 pmap->pm_stats.wired_count++; 1578 } 1579 } else { 1580 if (PTE_ISWIRED(pte)) { 1581 flags &= ~PTE_WIRED; 1582 pmap->pm_stats.wired_count--; 1583 } 1584 } 1585 1586 if (prot & VM_PROT_WRITE) { 1587 /* Add write permissions. */ 1588 flags |= PTE_SW; 1589 if (!su) 1590 flags |= PTE_UW; 1591 1592 vm_page_flag_set(m, PG_WRITEABLE); 1593 } else { 1594 /* Handle modified pages, sense modify status. */ 1595 1596 /* 1597 * The PTE_MODIFIED flag could be set by underlying 1598 * TLB misses since we last read it (above), possibly 1599 * other CPUs could update it so we check in the PTE 1600 * directly rather than rely on that saved local flags 1601 * copy. 1602 */ 1603 if (PTE_ISMODIFIED(pte)) 1604 vm_page_dirty(m); 1605 } 1606 1607 if (prot & VM_PROT_EXECUTE) { 1608 flags |= PTE_SX; 1609 if (!su) 1610 flags |= PTE_UX; 1611 1612 /* 1613 * Check existing flags for execute permissions: if we 1614 * are turning execute permissions on, icache should 1615 * be flushed. 1616 */ 1617 if ((flags & (PTE_UX | PTE_SX)) == 0) 1618 sync++; 1619 } 1620 1621 flags &= ~PTE_REFERENCED; 1622 1623 /* 1624 * The new flags value is all calculated -- only now actually 1625 * update the PTE. 1626 */ 1627 mtx_lock_spin(&tlbivax_mutex); 1628 tlb_miss_lock(); 1629 1630 tlb0_flush_entry(va); 1631 pte->flags = flags; 1632 1633 tlb_miss_unlock(); 1634 mtx_unlock_spin(&tlbivax_mutex); 1635 1636 } else { 1637 /* 1638 * If there is an existing mapping, but it's for a different 1639 * physical address, pte_enter() will delete the old mapping. 1640 */ 1641 //if ((pte != NULL) && PTE_ISVALID(pte)) 1642 // debugf("mmu_booke_enter_locked: replace\n"); 1643 //else 1644 // debugf("mmu_booke_enter_locked: new\n"); 1645 1646 /* Now set up the flags and install the new mapping. */ 1647 flags = (PTE_SR | PTE_VALID); 1648 flags |= PTE_M; 1649 1650 if (!su) 1651 flags |= PTE_UR; 1652 1653 if (prot & VM_PROT_WRITE) { 1654 flags |= PTE_SW; 1655 if (!su) 1656 flags |= PTE_UW; 1657 1658 vm_page_flag_set(m, PG_WRITEABLE); 1659 } 1660 1661 if (prot & VM_PROT_EXECUTE) { 1662 flags |= PTE_SX; 1663 if (!su) 1664 flags |= PTE_UX; 1665 } 1666 1667 /* If its wired update stats. */ 1668 if (wired) { 1669 pmap->pm_stats.wired_count++; 1670 flags |= PTE_WIRED; 1671 } 1672 1673 pte_enter(mmu, pmap, m, va, flags); 1674 1675 /* Flush the real memory from the instruction cache. */ 1676 if (prot & VM_PROT_EXECUTE) 1677 sync++; 1678 } 1679 1680 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1681 __syncicache((void *)va, PAGE_SIZE); 1682 sync = 0; 1683 } 1684 1685 if (sync) { 1686 /* Create a temporary mapping. */ 1687 pmap = PCPU_GET(curpmap); 1688 1689 va = 0; 1690 pte = pte_find(mmu, pmap, va); 1691 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1692 1693 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1694 1695 pte_enter(mmu, pmap, m, va, flags); 1696 __syncicache((void *)va, PAGE_SIZE); 1697 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1698 } 1699} 1700 1701/* 1702 * Maps a sequence of resident pages belonging to the same object. 1703 * The sequence begins with the given page m_start. This page is 1704 * mapped at the given virtual address start. Each subsequent page is 1705 * mapped at a virtual address that is offset from start by the same 1706 * amount as the page is offset from m_start within the object. The 1707 * last page in the sequence is the page with the largest offset from 1708 * m_start that can be mapped at a virtual address less than the given 1709 * virtual address end. Not every virtual page between start and end 1710 * is mapped; only those for which a resident page exists with the 1711 * corresponding offset from m_start are mapped. 1712 */ 1713static void 1714mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1715 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1716{ 1717 vm_page_t m; 1718 vm_pindex_t diff, psize; 1719 1720 psize = atop(end - start); 1721 m = m_start; 1722 PMAP_LOCK(pmap); 1723 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1724 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1725 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1726 m = TAILQ_NEXT(m, listq); 1727 } 1728 PMAP_UNLOCK(pmap); 1729} 1730 1731static void 1732mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1733 vm_prot_t prot) 1734{ 1735 1736 PMAP_LOCK(pmap); 1737 mmu_booke_enter_locked(mmu, pmap, va, m, 1738 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1739 PMAP_UNLOCK(pmap); 1740} 1741 1742/* 1743 * Remove the given range of addresses from the specified map. 1744 * 1745 * It is assumed that the start and end are properly rounded to the page size. 1746 */ 1747static void 1748mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1749{ 1750 pte_t *pte; 1751 uint8_t hold_flag; 1752 1753 int su = (pmap == kernel_pmap); 1754 1755 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1756 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1757 1758 if (su) { 1759 KASSERT(((va >= virtual_avail) && 1760 (va <= VM_MAX_KERNEL_ADDRESS)), 1761 ("mmu_booke_remove: kernel pmap, non kernel va")); 1762 } else { 1763 KASSERT((va <= VM_MAXUSER_ADDRESS), 1764 ("mmu_booke_remove: user pmap, non user va")); 1765 } 1766 1767 if (PMAP_REMOVE_DONE(pmap)) { 1768 //debugf("mmu_booke_remove: e (empty)\n"); 1769 return; 1770 } 1771 1772 hold_flag = PTBL_HOLD_FLAG(pmap); 1773 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1774 1775 vm_page_lock_queues(); 1776 PMAP_LOCK(pmap); 1777 for (; va < endva; va += PAGE_SIZE) { 1778 pte = pte_find(mmu, pmap, va); 1779 if ((pte != NULL) && PTE_ISVALID(pte)) 1780 pte_remove(mmu, pmap, va, hold_flag); 1781 } 1782 PMAP_UNLOCK(pmap); 1783 vm_page_unlock_queues(); 1784 1785 //debugf("mmu_booke_remove: e\n"); 1786} 1787 1788/* 1789 * Remove physical page from all pmaps in which it resides. 1790 */ 1791static void 1792mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1793{ 1794 pv_entry_t pv, pvn; 1795 uint8_t hold_flag; 1796 1797 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1798 1799 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1800 pvn = TAILQ_NEXT(pv, pv_link); 1801 1802 PMAP_LOCK(pv->pv_pmap); 1803 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1804 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1805 PMAP_UNLOCK(pv->pv_pmap); 1806 } 1807 vm_page_flag_clear(m, PG_WRITEABLE); 1808} 1809 1810/* 1811 * Map a range of physical addresses into kernel virtual address space. 1812 */ 1813static vm_offset_t 1814mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1815 vm_offset_t pa_end, int prot) 1816{ 1817 vm_offset_t sva = *virt; 1818 vm_offset_t va = sva; 1819 1820 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1821 // sva, pa_start, pa_end); 1822 1823 while (pa_start < pa_end) { 1824 mmu_booke_kenter(mmu, va, pa_start); 1825 va += PAGE_SIZE; 1826 pa_start += PAGE_SIZE; 1827 } 1828 *virt = va; 1829 1830 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1831 return (sva); 1832} 1833 1834/* 1835 * The pmap must be activated before it's address space can be accessed in any 1836 * way. 1837 */ 1838static void 1839mmu_booke_activate(mmu_t mmu, struct thread *td) 1840{ 1841 pmap_t pmap; 1842 1843 pmap = &td->td_proc->p_vmspace->vm_pmap; 1844 1845 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1846 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1847 1848 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1849 1850 mtx_lock_spin(&sched_lock); 1851 1852 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1853 PCPU_SET(curpmap, pmap); 1854 1855 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1856 tid_alloc(pmap); 1857 1858 /* Load PID0 register with pmap tid value. */ 1859 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1860 __asm __volatile("isync"); 1861 1862 mtx_unlock_spin(&sched_lock); 1863 1864 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1865 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1866} 1867 1868/* 1869 * Deactivate the specified process's address space. 1870 */ 1871static void 1872mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1873{ 1874 pmap_t pmap; 1875 1876 pmap = &td->td_proc->p_vmspace->vm_pmap; 1877 1878 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1879 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1880 1881 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1882 PCPU_SET(curpmap, NULL); 1883} 1884 1885/* 1886 * Copy the range specified by src_addr/len 1887 * from the source map to the range dst_addr/len 1888 * in the destination map. 1889 * 1890 * This routine is only advisory and need not do anything. 1891 */ 1892static void 1893mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1894 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1895{ 1896 1897} 1898 1899/* 1900 * Set the physical protection on the specified range of this map as requested. 1901 */ 1902static void 1903mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1904 vm_prot_t prot) 1905{ 1906 vm_offset_t va; 1907 vm_page_t m; 1908 pte_t *pte; 1909 1910 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1911 mmu_booke_remove(mmu, pmap, sva, eva); 1912 return; 1913 } 1914 1915 if (prot & VM_PROT_WRITE) 1916 return; 1917 1918 vm_page_lock_queues(); 1919 PMAP_LOCK(pmap); 1920 for (va = sva; va < eva; va += PAGE_SIZE) { 1921 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1922 if (PTE_ISVALID(pte)) { 1923 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1924 1925 mtx_lock_spin(&tlbivax_mutex); 1926 tlb_miss_lock(); 1927 1928 /* Handle modified pages. */ 1929 if (PTE_ISMODIFIED(pte)) 1930 vm_page_dirty(m); 1931 1932 /* Referenced pages. */ 1933 if (PTE_ISREFERENCED(pte)) 1934 vm_page_flag_set(m, PG_REFERENCED); 1935 1936 tlb0_flush_entry(va); 1937 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1938 PTE_REFERENCED); 1939 1940 tlb_miss_unlock(); 1941 mtx_unlock_spin(&tlbivax_mutex); 1942 } 1943 } 1944 } 1945 PMAP_UNLOCK(pmap); 1946 vm_page_unlock_queues(); 1947} 1948 1949/* 1950 * Clear the write and modified bits in each of the given page's mappings. 1951 */ 1952static void 1953mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1954{ 1955 pv_entry_t pv; 1956 pte_t *pte; 1957 1958 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1959 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1960 (m->flags & PG_WRITEABLE) == 0) 1961 return; 1962 1963 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1964 PMAP_LOCK(pv->pv_pmap); 1965 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1966 if (PTE_ISVALID(pte)) { 1967 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1968 1969 mtx_lock_spin(&tlbivax_mutex); 1970 tlb_miss_lock(); 1971 1972 /* Handle modified pages. */ 1973 if (PTE_ISMODIFIED(pte)) 1974 vm_page_dirty(m); 1975 1976 /* Referenced pages. */ 1977 if (PTE_ISREFERENCED(pte)) 1978 vm_page_flag_set(m, PG_REFERENCED); 1979 1980 /* Flush mapping from TLB0. */ 1981 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1982 PTE_REFERENCED); 1983 1984 tlb_miss_unlock(); 1985 mtx_unlock_spin(&tlbivax_mutex); 1986 } 1987 } 1988 PMAP_UNLOCK(pv->pv_pmap); 1989 } 1990 vm_page_flag_clear(m, PG_WRITEABLE); 1991} 1992 1993static boolean_t 1994mmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1995{ 1996 pv_entry_t pv; 1997 pte_t *pte; 1998 boolean_t executable; 1999 2000 executable = FALSE; 2001 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2002 PMAP_LOCK(pv->pv_pmap); 2003 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 2004 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 2005 executable = TRUE; 2006 PMAP_UNLOCK(pv->pv_pmap); 2007 if (executable) 2008 break; 2009 } 2010 2011 return (executable); 2012} 2013 2014/* 2015 * Atomically extract and hold the physical page with the given 2016 * pmap and virtual address pair if that mapping permits the given 2017 * protection. 2018 */ 2019static vm_page_t 2020mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2021 vm_prot_t prot) 2022{ 2023 pte_t *pte; 2024 vm_page_t m; 2025 uint32_t pte_wbit; 2026 2027 m = NULL; 2028 vm_page_lock_queues(); 2029 PMAP_LOCK(pmap); 2030 2031 pte = pte_find(mmu, pmap, va); 2032 if ((pte != NULL) && PTE_ISVALID(pte)) { 2033 if (pmap == kernel_pmap) 2034 pte_wbit = PTE_SW; 2035 else 2036 pte_wbit = PTE_UW; 2037 2038 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2039 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2040 vm_page_hold(m); 2041 } 2042 } 2043 2044 vm_page_unlock_queues(); 2045 PMAP_UNLOCK(pmap); 2046 return (m); 2047} 2048 2049/* 2050 * Initialize a vm_page's machine-dependent fields. 2051 */ 2052static void 2053mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2054{ 2055 2056 TAILQ_INIT(&m->md.pv_list); 2057} 2058 2059/* 2060 * mmu_booke_zero_page_area zeros the specified hardware page by 2061 * mapping it into virtual memory and using bzero to clear 2062 * its contents. 2063 * 2064 * off and size must reside within a single page. 2065 */ 2066static void 2067mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2068{ 2069 vm_offset_t va; 2070 2071 /* XXX KASSERT off and size are within a single page? */ 2072 2073 mtx_lock(&zero_page_mutex); 2074 va = zero_page_va; 2075 2076 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2077 bzero((caddr_t)va + off, size); 2078 mmu_booke_kremove(mmu, va); 2079 2080 mtx_unlock(&zero_page_mutex); 2081} 2082 2083/* 2084 * mmu_booke_zero_page zeros the specified hardware page. 2085 */ 2086static void 2087mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2088{ 2089 2090 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2091} 2092 2093/* 2094 * mmu_booke_copy_page copies the specified (machine independent) page by 2095 * mapping the page into virtual memory and using memcopy to copy the page, 2096 * one machine dependent page at a time. 2097 */ 2098static void 2099mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2100{ 2101 vm_offset_t sva, dva; 2102 2103 sva = copy_page_src_va; 2104 dva = copy_page_dst_va; 2105 2106 mtx_lock(©_page_mutex); 2107 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2108 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2109 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2110 mmu_booke_kremove(mmu, dva); 2111 mmu_booke_kremove(mmu, sva); 2112 mtx_unlock(©_page_mutex); 2113} 2114 2115/* 2116 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2117 * into virtual memory and using bzero to clear its contents. This is intended 2118 * to be called from the vm_pagezero process only and outside of Giant. No 2119 * lock is required. 2120 */ 2121static void 2122mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2123{ 2124 vm_offset_t va; 2125 2126 va = zero_page_idle_va; 2127 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2128 bzero((caddr_t)va, PAGE_SIZE); 2129 mmu_booke_kremove(mmu, va); 2130} 2131 2132/* 2133 * Return whether or not the specified physical page was modified 2134 * in any of physical maps. 2135 */ 2136static boolean_t 2137mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2138{ 2139 pte_t *pte; 2140 pv_entry_t pv; 2141 2142 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2143 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2144 return (FALSE); 2145 2146 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2147 PMAP_LOCK(pv->pv_pmap); 2148 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2149 if (!PTE_ISVALID(pte)) 2150 goto make_sure_to_unlock; 2151 2152 if (PTE_ISMODIFIED(pte)) { 2153 PMAP_UNLOCK(pv->pv_pmap); 2154 return (TRUE); 2155 } 2156 } 2157make_sure_to_unlock: 2158 PMAP_UNLOCK(pv->pv_pmap); 2159 } 2160 return (FALSE); 2161} 2162 2163/* 2164 * Return whether or not the specified virtual address is eligible 2165 * for prefault. 2166 */ 2167static boolean_t 2168mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2169{ 2170 2171 return (FALSE); 2172} 2173 2174/* 2175 * Clear the modify bits on the specified physical page. 2176 */ 2177static void 2178mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2179{ 2180 pte_t *pte; 2181 pv_entry_t pv; 2182 2183 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2184 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2185 return; 2186 2187 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2188 PMAP_LOCK(pv->pv_pmap); 2189 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2190 if (!PTE_ISVALID(pte)) 2191 goto make_sure_to_unlock; 2192 2193 mtx_lock_spin(&tlbivax_mutex); 2194 tlb_miss_lock(); 2195 2196 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2197 tlb0_flush_entry(pv->pv_va); 2198 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2199 PTE_REFERENCED); 2200 } 2201 2202 tlb_miss_unlock(); 2203 mtx_unlock_spin(&tlbivax_mutex); 2204 } 2205make_sure_to_unlock: 2206 PMAP_UNLOCK(pv->pv_pmap); 2207 } 2208} 2209 2210/* 2211 * Return a count of reference bits for a page, clearing those bits. 2212 * It is not necessary for every reference bit to be cleared, but it 2213 * is necessary that 0 only be returned when there are truly no 2214 * reference bits set. 2215 * 2216 * XXX: The exact number of bits to check and clear is a matter that 2217 * should be tested and standardized at some point in the future for 2218 * optimal aging of shared pages. 2219 */ 2220static int 2221mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2222{ 2223 pte_t *pte; 2224 pv_entry_t pv; 2225 int count; 2226 2227 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2228 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2229 return (0); 2230 2231 count = 0; 2232 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2233 PMAP_LOCK(pv->pv_pmap); 2234 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2235 if (!PTE_ISVALID(pte)) 2236 goto make_sure_to_unlock; 2237 2238 if (PTE_ISREFERENCED(pte)) { 2239 mtx_lock_spin(&tlbivax_mutex); 2240 tlb_miss_lock(); 2241 2242 tlb0_flush_entry(pv->pv_va); 2243 pte->flags &= ~PTE_REFERENCED; 2244 2245 tlb_miss_unlock(); 2246 mtx_unlock_spin(&tlbivax_mutex); 2247 2248 if (++count > 4) { 2249 PMAP_UNLOCK(pv->pv_pmap); 2250 break; 2251 } 2252 } 2253 } 2254make_sure_to_unlock: 2255 PMAP_UNLOCK(pv->pv_pmap); 2256 } 2257 return (count); 2258} 2259 2260/* 2261 * Clear the reference bit on the specified physical page. 2262 */ 2263static void 2264mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2265{ 2266 pte_t *pte; 2267 pv_entry_t pv; 2268 2269 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2270 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2271 return; 2272 2273 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2274 PMAP_LOCK(pv->pv_pmap); 2275 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2276 if (!PTE_ISVALID(pte)) 2277 goto make_sure_to_unlock; 2278 2279 if (PTE_ISREFERENCED(pte)) { 2280 mtx_lock_spin(&tlbivax_mutex); 2281 tlb_miss_lock(); 2282 2283 tlb0_flush_entry(pv->pv_va); 2284 pte->flags &= ~PTE_REFERENCED; 2285 2286 tlb_miss_unlock(); 2287 mtx_unlock_spin(&tlbivax_mutex); 2288 } 2289 } 2290make_sure_to_unlock: 2291 PMAP_UNLOCK(pv->pv_pmap); 2292 } 2293} 2294 2295/* 2296 * Change wiring attribute for a map/virtual-address pair. 2297 */ 2298static void 2299mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2300{ 2301 pte_t *pte;; 2302 2303 PMAP_LOCK(pmap); 2304 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2305 if (wired) { 2306 if (!PTE_ISWIRED(pte)) { 2307 pte->flags |= PTE_WIRED; 2308 pmap->pm_stats.wired_count++; 2309 } 2310 } else { 2311 if (PTE_ISWIRED(pte)) { 2312 pte->flags &= ~PTE_WIRED; 2313 pmap->pm_stats.wired_count--; 2314 } 2315 } 2316 } 2317 PMAP_UNLOCK(pmap); 2318} 2319 2320/* 2321 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2322 * page. This count may be changed upwards or downwards in the future; it is 2323 * only necessary that true be returned for a small subset of pmaps for proper 2324 * page aging. 2325 */ 2326static boolean_t 2327mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2328{ 2329 pv_entry_t pv; 2330 int loops; 2331 2332 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2333 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2334 return (FALSE); 2335 2336 loops = 0; 2337 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2338 if (pv->pv_pmap == pmap) 2339 return (TRUE); 2340 2341 if (++loops >= 16) 2342 break; 2343 } 2344 return (FALSE); 2345} 2346 2347/* 2348 * Return the number of managed mappings to the given physical page that are 2349 * wired. 2350 */ 2351static int 2352mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2353{ 2354 pv_entry_t pv; 2355 pte_t *pte; 2356 int count = 0; 2357 2358 if ((m->flags & PG_FICTITIOUS) != 0) 2359 return (count); 2360 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2361 2362 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2363 PMAP_LOCK(pv->pv_pmap); 2364 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2365 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2366 count++; 2367 PMAP_UNLOCK(pv->pv_pmap); 2368 } 2369 2370 return (count); 2371} 2372 2373static int 2374mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2375{ 2376 int i; 2377 vm_offset_t va; 2378 2379 /* 2380 * This currently does not work for entries that 2381 * overlap TLB1 entries. 2382 */ 2383 for (i = 0; i < tlb1_idx; i ++) { 2384 if (tlb1_iomapped(i, pa, size, &va) == 0) 2385 return (0); 2386 } 2387 2388 return (EFAULT); 2389} 2390 2391vm_offset_t 2392mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2393 vm_size_t *sz) 2394{ 2395 vm_paddr_t pa, ppa; 2396 vm_offset_t va; 2397 vm_size_t gran; 2398 2399 /* Raw physical memory dumps don't have a virtual address. */ 2400 if (md->md_vaddr == ~0UL) { 2401 /* We always map a 256MB page at 256M. */ 2402 gran = 256 * 1024 * 1024; 2403 pa = md->md_paddr + ofs; 2404 ppa = pa & ~(gran - 1); 2405 ofs = pa - ppa; 2406 va = gran; 2407 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2408 if (*sz > (gran - ofs)) 2409 *sz = gran - ofs; 2410 return (va + ofs); 2411 } 2412 2413 /* Minidumps are based on virtual memory addresses. */ 2414 va = md->md_vaddr + ofs; 2415 if (va >= kernstart + kernsize) { 2416 gran = PAGE_SIZE - (va & PAGE_MASK); 2417 if (*sz > gran) 2418 *sz = gran; 2419 } 2420 return (va); 2421} 2422 2423void 2424mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2425 vm_offset_t va) 2426{ 2427 2428 /* Raw physical memory dumps don't have a virtual address. */ 2429 if (md->md_vaddr == ~0UL) { 2430 tlb1_idx--; 2431 tlb1[tlb1_idx].mas1 = 0; 2432 tlb1[tlb1_idx].mas2 = 0; 2433 tlb1[tlb1_idx].mas3 = 0; 2434 tlb1_write_entry(tlb1_idx); 2435 return; 2436 } 2437 2438 /* Minidumps are based on virtual memory addresses. */ 2439 /* Nothing to do... */ 2440} 2441 2442struct pmap_md * 2443mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2444{ 2445 static struct pmap_md md; 2446 struct bi_mem_region *mr; 2447 pte_t *pte; 2448 vm_offset_t va; 2449 2450 if (dumpsys_minidump) { 2451 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2452 if (prev == NULL) { 2453 /* 1st: kernel .data and .bss. */ 2454 md.md_index = 1; 2455 md.md_vaddr = trunc_page((uintptr_t)_etext); 2456 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2457 return (&md); 2458 } 2459 switch (prev->md_index) { 2460 case 1: 2461 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2462 md.md_index = 2; 2463 md.md_vaddr = data_start; 2464 md.md_size = data_end - data_start; 2465 break; 2466 case 2: 2467 /* 3rd: kernel VM. */ 2468 va = prev->md_vaddr + prev->md_size; 2469 /* Find start of next chunk (from va). */ 2470 while (va < virtual_end) { 2471 /* Don't dump the buffer cache. */ 2472 if (va >= kmi.buffer_sva && 2473 va < kmi.buffer_eva) { 2474 va = kmi.buffer_eva; 2475 continue; 2476 } 2477 pte = pte_find(mmu, kernel_pmap, va); 2478 if (pte != NULL && PTE_ISVALID(pte)) 2479 break; 2480 va += PAGE_SIZE; 2481 } 2482 if (va < virtual_end) { 2483 md.md_vaddr = va; 2484 va += PAGE_SIZE; 2485 /* Find last page in chunk. */ 2486 while (va < virtual_end) { 2487 /* Don't run into the buffer cache. */ 2488 if (va == kmi.buffer_sva) 2489 break; 2490 pte = pte_find(mmu, kernel_pmap, va); 2491 if (pte == NULL || !PTE_ISVALID(pte)) 2492 break; 2493 va += PAGE_SIZE; 2494 } 2495 md.md_size = va - md.md_vaddr; 2496 break; 2497 } 2498 md.md_index = 3; 2499 /* FALLTHROUGH */ 2500 default: 2501 return (NULL); 2502 } 2503 } else { /* minidumps */ 2504 mr = bootinfo_mr(); 2505 if (prev == NULL) { 2506 /* first physical chunk. */ 2507 md.md_paddr = mr->mem_base; 2508 md.md_size = mr->mem_size; 2509 md.md_vaddr = ~0UL; 2510 md.md_index = 1; 2511 } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2512 md.md_paddr = mr[md.md_index].mem_base; 2513 md.md_size = mr[md.md_index].mem_size; 2514 md.md_vaddr = ~0UL; 2515 md.md_index++; 2516 } else { 2517 /* There's no next physical chunk. */ 2518 return (NULL); 2519 } 2520 } 2521 2522 return (&md); 2523} 2524 2525/* 2526 * Map a set of physical memory pages into the kernel virtual address space. 2527 * Return a pointer to where it is mapped. This routine is intended to be used 2528 * for mapping device memory, NOT real memory. 2529 */ 2530static void * 2531mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2532{ 2533 void *res; 2534 uintptr_t va; 2535 vm_size_t sz; 2536 2537 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2538 res = (void *)va; 2539 2540 do { 2541 sz = 1 << (ilog2(size) & ~1); 2542 if (bootverbose) 2543 printf("Wiring VA=%x to PA=%x (size=%x), " 2544 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2545 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2546 size -= sz; 2547 pa += sz; 2548 va += sz; 2549 } while (size > 0); 2550 2551 return (res); 2552} 2553 2554/* 2555 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2556 */ 2557static void 2558mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2559{ 2560 vm_offset_t base, offset; 2561 2562 /* 2563 * Unmap only if this is inside kernel virtual space. 2564 */ 2565 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2566 base = trunc_page(va); 2567 offset = va & PAGE_MASK; 2568 size = roundup(offset + size, PAGE_SIZE); 2569 kmem_free(kernel_map, base, size); 2570 } 2571} 2572 2573/* 2574 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2575 * specified pmap. This eliminates the blast of soft faults on process startup 2576 * and immediately after an mmap. 2577 */ 2578static void 2579mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2580 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2581{ 2582 2583 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2584 KASSERT(object->type == OBJT_DEVICE, 2585 ("mmu_booke_object_init_pt: non-device object")); 2586} 2587 2588/* 2589 * Perform the pmap work for mincore. 2590 */ 2591static int 2592mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2593{ 2594 2595 TODO; 2596 return (0); 2597} 2598 2599/**************************************************************************/ 2600/* TID handling */ 2601/**************************************************************************/ 2602 2603/* 2604 * Allocate a TID. If necessary, steal one from someone else. 2605 * The new TID is flushed from the TLB before returning. 2606 */ 2607static tlbtid_t 2608tid_alloc(pmap_t pmap) 2609{ 2610 tlbtid_t tid; 2611 int thiscpu; 2612 2613 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2614 2615 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2616 2617 thiscpu = PCPU_GET(cpuid); 2618 2619 tid = PCPU_GET(tid_next); 2620 if (tid > TID_MAX) 2621 tid = TID_MIN; 2622 PCPU_SET(tid_next, tid + 1); 2623 2624 /* If we are stealing TID then clear the relevant pmap's field */ 2625 if (tidbusy[thiscpu][tid] != NULL) { 2626 2627 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2628 2629 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2630 2631 /* Flush all entries from TLB0 matching this TID. */ 2632 tid_flush(tid); 2633 } 2634 2635 tidbusy[thiscpu][tid] = pmap; 2636 pmap->pm_tid[thiscpu] = tid; 2637 __asm __volatile("msync; isync"); 2638 2639 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2640 PCPU_GET(tid_next)); 2641 2642 return (tid); 2643} 2644 2645/**************************************************************************/ 2646/* TLB0 handling */ 2647/**************************************************************************/ 2648 2649static void 2650tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2651 uint32_t mas7) 2652{ 2653 int as; 2654 char desc[3]; 2655 tlbtid_t tid; 2656 vm_size_t size; 2657 unsigned int tsize; 2658 2659 desc[2] = '\0'; 2660 if (mas1 & MAS1_VALID) 2661 desc[0] = 'V'; 2662 else 2663 desc[0] = ' '; 2664 2665 if (mas1 & MAS1_IPROT) 2666 desc[1] = 'P'; 2667 else 2668 desc[1] = ' '; 2669 2670 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2671 tid = MAS1_GETTID(mas1); 2672 2673 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2674 size = 0; 2675 if (tsize) 2676 size = tsize2size(tsize); 2677 2678 debugf("%3d: (%s) [AS=%d] " 2679 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2680 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2681 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2682} 2683 2684/* Convert TLB0 va and way number to tlb0[] table index. */ 2685static inline unsigned int 2686tlb0_tableidx(vm_offset_t va, unsigned int way) 2687{ 2688 unsigned int idx; 2689 2690 idx = (way * TLB0_ENTRIES_PER_WAY); 2691 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2692 return (idx); 2693} 2694 2695/* 2696 * Invalidate TLB0 entry. 2697 */ 2698static inline void 2699tlb0_flush_entry(vm_offset_t va) 2700{ 2701 2702 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2703 2704 mtx_assert(&tlbivax_mutex, MA_OWNED); 2705 2706 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2707 __asm __volatile("isync; msync"); 2708 __asm __volatile("tlbsync; msync"); 2709 2710 CTR1(KTR_PMAP, "%s: e", __func__); 2711} 2712 2713/* Print out contents of the MAS registers for each TLB0 entry */ 2714void 2715tlb0_print_tlbentries(void) 2716{ 2717 uint32_t mas0, mas1, mas2, mas3, mas7; 2718 int entryidx, way, idx; 2719 2720 debugf("TLB0 entries:\n"); 2721 for (way = 0; way < TLB0_WAYS; way ++) 2722 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2723 2724 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2725 mtspr(SPR_MAS0, mas0); 2726 __asm __volatile("isync"); 2727 2728 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2729 mtspr(SPR_MAS2, mas2); 2730 2731 __asm __volatile("isync; tlbre"); 2732 2733 mas1 = mfspr(SPR_MAS1); 2734 mas2 = mfspr(SPR_MAS2); 2735 mas3 = mfspr(SPR_MAS3); 2736 mas7 = mfspr(SPR_MAS7); 2737 2738 idx = tlb0_tableidx(mas2, way); 2739 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2740 } 2741} 2742 2743/**************************************************************************/ 2744/* TLB1 handling */ 2745/**************************************************************************/ 2746 2747/* 2748 * TLB1 mapping notes: 2749 * 2750 * TLB1[0] CCSRBAR 2751 * TLB1[1] Kernel text and data. 2752 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2753 * windows, other devices mappings. 2754 */ 2755 2756/* 2757 * Write given entry to TLB1 hardware. 2758 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2759 */ 2760static void 2761tlb1_write_entry(unsigned int idx) 2762{ 2763 uint32_t mas0, mas7; 2764 2765 //debugf("tlb1_write_entry: s\n"); 2766 2767 /* Clear high order RPN bits */ 2768 mas7 = 0; 2769 2770 /* Select entry */ 2771 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2772 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2773 2774 mtspr(SPR_MAS0, mas0); 2775 __asm __volatile("isync"); 2776 mtspr(SPR_MAS1, tlb1[idx].mas1); 2777 __asm __volatile("isync"); 2778 mtspr(SPR_MAS2, tlb1[idx].mas2); 2779 __asm __volatile("isync"); 2780 mtspr(SPR_MAS3, tlb1[idx].mas3); 2781 __asm __volatile("isync"); 2782 mtspr(SPR_MAS7, mas7); 2783 __asm __volatile("isync; tlbwe; isync; msync"); 2784 2785 //debugf("tlb1_write_entry: e\n");; 2786} 2787 2788/* 2789 * Return the largest uint value log such that 2^log <= num. 2790 */ 2791static unsigned int 2792ilog2(unsigned int num) 2793{ 2794 int lz; 2795 2796 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2797 return (31 - lz); 2798} 2799 2800/* 2801 * Convert TLB TSIZE value to mapped region size. 2802 */ 2803static vm_size_t 2804tsize2size(unsigned int tsize) 2805{ 2806 2807 /* 2808 * size = 4^tsize KB 2809 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2810 */ 2811 2812 return ((1 << (2 * tsize)) * 1024); 2813} 2814 2815/* 2816 * Convert region size (must be power of 4) to TLB TSIZE value. 2817 */ 2818static unsigned int 2819size2tsize(vm_size_t size) 2820{ 2821 2822 return (ilog2(size) / 2 - 5); 2823} 2824 2825/* 2826 * Register permanent kernel mapping in TLB1. 2827 * 2828 * Entries are created starting from index 0 (current free entry is 2829 * kept in tlb1_idx) and are not supposed to be invalidated. 2830 */ 2831static int 2832tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2833 uint32_t flags) 2834{ 2835 uint32_t ts, tid; 2836 int tsize; 2837 2838 if (tlb1_idx >= TLB1_ENTRIES) { 2839 printf("tlb1_set_entry: TLB1 full!\n"); 2840 return (-1); 2841 } 2842 2843 /* Convert size to TSIZE */ 2844 tsize = size2tsize(size); 2845 2846 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2847 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2848 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2849 2850 /* XXX LOCK tlb1[] */ 2851 2852 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2853 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2854 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2855 2856 /* Set supervisor RWX permission bits */ 2857 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2858 2859 tlb1_write_entry(tlb1_idx++); 2860 2861 /* XXX UNLOCK tlb1[] */ 2862 2863 /* 2864 * XXX in general TLB1 updates should be propagated between CPUs, 2865 * since current design assumes to have the same TLB1 set-up on all 2866 * cores. 2867 */ 2868 return (0); 2869} 2870 2871static int 2872tlb1_entry_size_cmp(const void *a, const void *b) 2873{ 2874 const vm_size_t *sza; 2875 const vm_size_t *szb; 2876 2877 sza = a; 2878 szb = b; 2879 if (*sza > *szb) 2880 return (-1); 2881 else if (*sza < *szb) 2882 return (1); 2883 else 2884 return (0); 2885} 2886 2887/* 2888 * Map in contiguous RAM region into the TLB1 using maximum of 2889 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2890 * 2891 * If necessary round up last entry size and return total size 2892 * used by all allocated entries. 2893 */ 2894vm_size_t 2895tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2896{ 2897 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2898 vm_size_t mapped_size, sz, esz; 2899 unsigned int log; 2900 int i; 2901 2902 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2903 __func__, size, va, pa); 2904 2905 mapped_size = 0; 2906 sz = size; 2907 memset(entry_size, 0, sizeof(entry_size)); 2908 2909 /* Calculate entry sizes. */ 2910 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2911 2912 /* Largest region that is power of 4 and fits within size */ 2913 log = ilog2(sz) / 2; 2914 esz = 1 << (2 * log); 2915 2916 /* If this is last entry cover remaining size. */ 2917 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2918 while (esz < sz) 2919 esz = esz << 2; 2920 } 2921 2922 entry_size[i] = esz; 2923 mapped_size += esz; 2924 if (esz < sz) 2925 sz -= esz; 2926 else 2927 sz = 0; 2928 } 2929 2930 /* Sort entry sizes, required to get proper entry address alignment. */ 2931 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2932 sizeof(vm_size_t), tlb1_entry_size_cmp); 2933 2934 /* Load TLB1 entries. */ 2935 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2936 esz = entry_size[i]; 2937 if (!esz) 2938 break; 2939 2940 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2941 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2942 2943 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2944 2945 va += esz; 2946 pa += esz; 2947 } 2948 2949 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2950 __func__, mapped_size, mapped_size - size); 2951 2952 return (mapped_size); 2953} 2954 2955/* 2956 * TLB1 initialization routine, to be called after the very first 2957 * assembler level setup done in locore.S. 2958 */ 2959void 2960tlb1_init(vm_offset_t ccsrbar) 2961{ 2962 uint32_t mas0; 2963 2964 /* TLB1[1] is used to map the kernel. Save that entry. */ 2965 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2966 mtspr(SPR_MAS0, mas0); 2967 __asm __volatile("isync; tlbre"); 2968 2969 tlb1[1].mas1 = mfspr(SPR_MAS1); 2970 tlb1[1].mas2 = mfspr(SPR_MAS2); 2971 tlb1[1].mas3 = mfspr(SPR_MAS3); 2972 2973 /* Map in CCSRBAR in TLB1[0] */ 2974 tlb1_idx = 0; 2975 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2976 /* 2977 * Set the next available TLB1 entry index. Note TLB[1] is reserved 2978 * for initial mapping of kernel text+data, which was set early in 2979 * locore, we need to skip this [busy] entry. 2980 */ 2981 tlb1_idx = 2; 2982 2983 /* Setup TLB miss defaults */ 2984 set_mas4_defaults(); 2985} 2986 2987/* 2988 * Setup MAS4 defaults. 2989 * These values are loaded to MAS0-2 on a TLB miss. 2990 */ 2991static void 2992set_mas4_defaults(void) 2993{ 2994 uint32_t mas4; 2995 2996 /* Defaults: TLB0, PID0, TSIZED=4K */ 2997 mas4 = MAS4_TLBSELD0; 2998 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2999#ifdef SMP 3000 mas4 |= MAS4_MD; 3001#endif 3002 mtspr(SPR_MAS4, mas4); 3003 __asm __volatile("isync"); 3004} 3005 3006/* 3007 * Print out contents of the MAS registers for each TLB1 entry 3008 */ 3009void 3010tlb1_print_tlbentries(void) 3011{ 3012 uint32_t mas0, mas1, mas2, mas3, mas7; 3013 int i; 3014 3015 debugf("TLB1 entries:\n"); 3016 for (i = 0; i < TLB1_ENTRIES; i++) { 3017 3018 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3019 mtspr(SPR_MAS0, mas0); 3020 3021 __asm __volatile("isync; tlbre"); 3022 3023 mas1 = mfspr(SPR_MAS1); 3024 mas2 = mfspr(SPR_MAS2); 3025 mas3 = mfspr(SPR_MAS3); 3026 mas7 = mfspr(SPR_MAS7); 3027 3028 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3029 } 3030} 3031 3032/* 3033 * Print out contents of the in-ram tlb1 table. 3034 */ 3035void 3036tlb1_print_entries(void) 3037{ 3038 int i; 3039 3040 debugf("tlb1[] table entries:\n"); 3041 for (i = 0; i < TLB1_ENTRIES; i++) 3042 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3043} 3044 3045/* 3046 * Return 0 if the physical IO range is encompassed by one of the 3047 * the TLB1 entries, otherwise return related error code. 3048 */ 3049static int 3050tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3051{ 3052 uint32_t prot; 3053 vm_paddr_t pa_start; 3054 vm_paddr_t pa_end; 3055 unsigned int entry_tsize; 3056 vm_size_t entry_size; 3057 3058 *va = (vm_offset_t)NULL; 3059 3060 /* Skip invalid entries */ 3061 if (!(tlb1[i].mas1 & MAS1_VALID)) 3062 return (EINVAL); 3063 3064 /* 3065 * The entry must be cache-inhibited, guarded, and r/w 3066 * so it can function as an i/o page 3067 */ 3068 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3069 if (prot != (MAS2_I | MAS2_G)) 3070 return (EPERM); 3071 3072 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3073 if (prot != (MAS3_SR | MAS3_SW)) 3074 return (EPERM); 3075 3076 /* The address should be within the entry range. */ 3077 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3078 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3079 3080 entry_size = tsize2size(entry_tsize); 3081 pa_start = tlb1[i].mas3 & MAS3_RPN; 3082 pa_end = pa_start + entry_size - 1; 3083 3084 if ((pa < pa_start) || ((pa + size) > pa_end)) 3085 return (ERANGE); 3086 3087 /* Return virtual address of this mapping. */ 3088 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 3089 return (0); 3090} 3091