pmap.c revision 189170
1/*- 2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - kernelend : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 189170 2009-02-28 16:21:25Z ed $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/vmmeter.h> 67 68#include <vm/vm.h> 69#include <vm/vm_page.h> 70#include <vm/vm_kern.h> 71#include <vm/vm_pageout.h> 72#include <vm/vm_extern.h> 73#include <vm/vm_object.h> 74#include <vm/vm_param.h> 75#include <vm/vm_map.h> 76#include <vm/vm_pager.h> 77#include <vm/uma.h> 78 79#include <machine/cpu.h> 80#include <machine/pcb.h> 81#include <machine/powerpc.h> 82 83#include <machine/tlb.h> 84#include <machine/spr.h> 85#include <machine/vmparam.h> 86#include <machine/md_var.h> 87#include <machine/mmuvar.h> 88#include <machine/pmap.h> 89#include <machine/pte.h> 90 91#include "mmu_if.h" 92 93#define DEBUG 94#undef DEBUG 95 96#ifdef DEBUG 97#define debugf(fmt, args...) printf(fmt, ##args) 98#else 99#define debugf(fmt, args...) 100#endif 101 102#define TODO panic("%s: not implemented", __func__); 103 104#include "opt_sched.h" 105#ifndef SCHED_4BSD 106#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 107#endif 108extern struct mtx sched_lock; 109 110/* Kernel physical load address. */ 111extern uint32_t kernload; 112 113struct mem_region availmem_regions[MEM_REGIONS]; 114int availmem_regions_sz; 115 116/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 117static vm_offset_t zero_page_va; 118static struct mtx zero_page_mutex; 119 120static struct mtx tlbivax_mutex; 121 122/* 123 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 124 * by idle thred only, no lock required. 125 */ 126static vm_offset_t zero_page_idle_va; 127 128/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 129static vm_offset_t copy_page_src_va; 130static vm_offset_t copy_page_dst_va; 131static struct mtx copy_page_mutex; 132 133/**************************************************************************/ 134/* PMAP */ 135/**************************************************************************/ 136 137static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 138 vm_prot_t, boolean_t); 139 140unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 141unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 142 143static int pagedaemon_waken; 144 145/* 146 * If user pmap is processed with mmu_booke_remove and the resident count 147 * drops to 0, there are no more pages to remove, so we need not continue. 148 */ 149#define PMAP_REMOVE_DONE(pmap) \ 150 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 151 152extern void tlb_lock(uint32_t *); 153extern void tlb_unlock(uint32_t *); 154extern void tid_flush(tlbtid_t); 155 156/**************************************************************************/ 157/* TLB and TID handling */ 158/**************************************************************************/ 159 160/* Translation ID busy table */ 161static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 162 163/* 164 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 165 * core revisions and should be read from h/w registers during early config. 166 */ 167uint32_t tlb0_entries; 168uint32_t tlb0_ways; 169uint32_t tlb0_entries_per_way; 170 171#define TLB0_ENTRIES (tlb0_entries) 172#define TLB0_WAYS (tlb0_ways) 173#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 174 175#define TLB1_ENTRIES 16 176 177/* In-ram copy of the TLB1 */ 178static tlb_entry_t tlb1[TLB1_ENTRIES]; 179 180/* Next free entry in the TLB1 */ 181static unsigned int tlb1_idx; 182 183static tlbtid_t tid_alloc(struct pmap *); 184 185static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 186 187static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 188static void tlb1_write_entry(unsigned int); 189static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 190static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 191 192static vm_size_t tsize2size(unsigned int); 193static unsigned int size2tsize(vm_size_t); 194static unsigned int ilog2(unsigned int); 195 196static void set_mas4_defaults(void); 197 198static inline void tlb0_flush_entry(vm_offset_t); 199static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 200 201/**************************************************************************/ 202/* Page table management */ 203/**************************************************************************/ 204 205/* Data for the pv entry allocation mechanism */ 206static uma_zone_t pvzone; 207static struct vm_object pvzone_obj; 208static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 209 210#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 211 212#ifndef PMAP_SHPGPERPROC 213#define PMAP_SHPGPERPROC 200 214#endif 215 216static void ptbl_init(void); 217static struct ptbl_buf *ptbl_buf_alloc(void); 218static void ptbl_buf_free(struct ptbl_buf *); 219static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 220 221static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 222static void ptbl_free(mmu_t, pmap_t, unsigned int); 223static void ptbl_hold(mmu_t, pmap_t, unsigned int); 224static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 225 226static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 227static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 228static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 229static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 230 231static pv_entry_t pv_alloc(void); 232static void pv_free(pv_entry_t); 233static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 234static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 235 236/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 237#define PTBL_BUFS (128 * 16) 238 239struct ptbl_buf { 240 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 241 vm_offset_t kva; /* va of mapping */ 242}; 243 244/* ptbl free list and a lock used for access synchronization. */ 245static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 246static struct mtx ptbl_buf_freelist_lock; 247 248/* Base address of kva space allocated fot ptbl bufs. */ 249static vm_offset_t ptbl_buf_pool_vabase; 250 251/* Pointer to ptbl_buf structures. */ 252static struct ptbl_buf *ptbl_bufs; 253 254/* 255 * Kernel MMU interface 256 */ 257static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 258static void mmu_booke_clear_modify(mmu_t, vm_page_t); 259static void mmu_booke_clear_reference(mmu_t, vm_page_t); 260static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, 261 vm_offset_t); 262static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 263static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 264 vm_prot_t, boolean_t); 265static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 266 vm_page_t, vm_prot_t); 267static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 268 vm_prot_t); 269static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 270static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 271 vm_prot_t); 272static void mmu_booke_init(mmu_t); 273static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 274static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 275static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 276static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 277 int); 278static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 279static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 280 vm_object_t, vm_pindex_t, vm_size_t); 281static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 282static void mmu_booke_page_init(mmu_t, vm_page_t); 283static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 284static void mmu_booke_pinit(mmu_t, pmap_t); 285static void mmu_booke_pinit0(mmu_t, pmap_t); 286static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 287 vm_prot_t); 288static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 289static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 290static void mmu_booke_release(mmu_t, pmap_t); 291static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 292static void mmu_booke_remove_all(mmu_t, vm_page_t); 293static void mmu_booke_remove_write(mmu_t, vm_page_t); 294static void mmu_booke_zero_page(mmu_t, vm_page_t); 295static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 296static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 297static void mmu_booke_activate(mmu_t, struct thread *); 298static void mmu_booke_deactivate(mmu_t, struct thread *); 299static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 300static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 301static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 302static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 303static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 304static void mmu_booke_kremove(mmu_t, vm_offset_t); 305static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 306static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 307 308static mmu_method_t mmu_booke_methods[] = { 309 /* pmap dispatcher interface */ 310 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 311 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 312 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 313 MMUMETHOD(mmu_copy, mmu_booke_copy), 314 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 315 MMUMETHOD(mmu_enter, mmu_booke_enter), 316 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 317 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 318 MMUMETHOD(mmu_extract, mmu_booke_extract), 319 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 320 MMUMETHOD(mmu_init, mmu_booke_init), 321 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 322 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 323 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 324 MMUMETHOD(mmu_map, mmu_booke_map), 325 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 326 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 327 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 328 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 329 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 330 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 331 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 332 MMUMETHOD(mmu_protect, mmu_booke_protect), 333 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 334 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 335 MMUMETHOD(mmu_release, mmu_booke_release), 336 MMUMETHOD(mmu_remove, mmu_booke_remove), 337 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 338 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 339 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 340 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 341 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 342 MMUMETHOD(mmu_activate, mmu_booke_activate), 343 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 344 345 /* Internal interfaces */ 346 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 347 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 348 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 349 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 350 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 351/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 352 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 353 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 354 355 { 0, 0 } 356}; 357 358static mmu_def_t booke_mmu = { 359 MMU_TYPE_BOOKE, 360 mmu_booke_methods, 361 0 362}; 363MMU_DEF(booke_mmu); 364 365/* Return number of entries in TLB0. */ 366static __inline void 367tlb0_get_tlbconf(void) 368{ 369 uint32_t tlb0_cfg; 370 371 tlb0_cfg = mfspr(SPR_TLB0CFG); 372 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 373 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 374 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 375} 376 377/* Initialize pool of kva ptbl buffers. */ 378static void 379ptbl_init(void) 380{ 381 int i; 382 383 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 384 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 385 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 386 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 387 388 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 389 TAILQ_INIT(&ptbl_buf_freelist); 390 391 for (i = 0; i < PTBL_BUFS; i++) { 392 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 393 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 394 } 395} 396 397/* Get a ptbl_buf from the freelist. */ 398static struct ptbl_buf * 399ptbl_buf_alloc(void) 400{ 401 struct ptbl_buf *buf; 402 403 mtx_lock(&ptbl_buf_freelist_lock); 404 buf = TAILQ_FIRST(&ptbl_buf_freelist); 405 if (buf != NULL) 406 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 407 mtx_unlock(&ptbl_buf_freelist_lock); 408 409 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 410 411 return (buf); 412} 413 414/* Return ptbl buff to free pool. */ 415static void 416ptbl_buf_free(struct ptbl_buf *buf) 417{ 418 419 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 420 421 mtx_lock(&ptbl_buf_freelist_lock); 422 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 423 mtx_unlock(&ptbl_buf_freelist_lock); 424} 425 426/* 427 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 428 */ 429static void 430ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 431{ 432 struct ptbl_buf *pbuf; 433 434 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 435 436 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 437 438 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 439 if (pbuf->kva == (vm_offset_t)ptbl) { 440 /* Remove from pmap ptbl buf list. */ 441 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 442 443 /* Free corresponding ptbl buf. */ 444 ptbl_buf_free(pbuf); 445 break; 446 } 447} 448 449/* Allocate page table. */ 450static pte_t * 451ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 452{ 453 vm_page_t mtbl[PTBL_PAGES]; 454 vm_page_t m; 455 struct ptbl_buf *pbuf; 456 unsigned int pidx; 457 pte_t *ptbl; 458 int i; 459 460 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 461 (pmap == kernel_pmap), pdir_idx); 462 463 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 464 ("ptbl_alloc: invalid pdir_idx")); 465 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 466 ("pte_alloc: valid ptbl entry exists!")); 467 468 pbuf = ptbl_buf_alloc(); 469 if (pbuf == NULL) 470 panic("pte_alloc: couldn't alloc kernel virtual memory"); 471 472 ptbl = (pte_t *)pbuf->kva; 473 474 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 475 476 /* Allocate ptbl pages, this will sleep! */ 477 for (i = 0; i < PTBL_PAGES; i++) { 478 pidx = (PTBL_PAGES * pdir_idx) + i; 479 while ((m = vm_page_alloc(NULL, pidx, 480 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 481 482 PMAP_UNLOCK(pmap); 483 vm_page_unlock_queues(); 484 VM_WAIT; 485 vm_page_lock_queues(); 486 PMAP_LOCK(pmap); 487 } 488 mtbl[i] = m; 489 } 490 491 /* Map allocated pages into kernel_pmap. */ 492 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 493 494 /* Zero whole ptbl. */ 495 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 496 497 /* Add pbuf to the pmap ptbl bufs list. */ 498 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 499 500 return (ptbl); 501} 502 503/* Free ptbl pages and invalidate pdir entry. */ 504static void 505ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 506{ 507 pte_t *ptbl; 508 vm_paddr_t pa; 509 vm_offset_t va; 510 vm_page_t m; 511 int i; 512 513 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 514 (pmap == kernel_pmap), pdir_idx); 515 516 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 517 ("ptbl_free: invalid pdir_idx")); 518 519 ptbl = pmap->pm_pdir[pdir_idx]; 520 521 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 522 523 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 524 525 /* 526 * Invalidate the pdir entry as soon as possible, so that other CPUs 527 * don't attempt to look up the page tables we are releasing. 528 */ 529 mtx_lock_spin(&tlbivax_mutex); 530 531 pmap->pm_pdir[pdir_idx] = NULL; 532 533 mtx_unlock_spin(&tlbivax_mutex); 534 535 for (i = 0; i < PTBL_PAGES; i++) { 536 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 537 pa = pte_vatopa(mmu, kernel_pmap, va); 538 m = PHYS_TO_VM_PAGE(pa); 539 vm_page_free_zero(m); 540 atomic_subtract_int(&cnt.v_wire_count, 1); 541 mmu_booke_kremove(mmu, va); 542 } 543 544 ptbl_free_pmap_ptbl(pmap, ptbl); 545} 546 547/* 548 * Decrement ptbl pages hold count and attempt to free ptbl pages. 549 * Called when removing pte entry from ptbl. 550 * 551 * Return 1 if ptbl pages were freed. 552 */ 553static int 554ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 555{ 556 pte_t *ptbl; 557 vm_paddr_t pa; 558 vm_page_t m; 559 int i; 560 561 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 562 (pmap == kernel_pmap), pdir_idx); 563 564 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 565 ("ptbl_unhold: invalid pdir_idx")); 566 KASSERT((pmap != kernel_pmap), 567 ("ptbl_unhold: unholding kernel ptbl!")); 568 569 ptbl = pmap->pm_pdir[pdir_idx]; 570 571 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 572 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 573 ("ptbl_unhold: non kva ptbl")); 574 575 /* decrement hold count */ 576 for (i = 0; i < PTBL_PAGES; i++) { 577 pa = pte_vatopa(mmu, kernel_pmap, 578 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 579 m = PHYS_TO_VM_PAGE(pa); 580 m->wire_count--; 581 } 582 583 /* 584 * Free ptbl pages if there are no pte etries in this ptbl. 585 * wire_count has the same value for all ptbl pages, so check the last 586 * page. 587 */ 588 if (m->wire_count == 0) { 589 ptbl_free(mmu, pmap, pdir_idx); 590 591 //debugf("ptbl_unhold: e (freed ptbl)\n"); 592 return (1); 593 } 594 595 return (0); 596} 597 598/* 599 * Increment hold count for ptbl pages. This routine is used when a new pte 600 * entry is being inserted into the ptbl. 601 */ 602static void 603ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 604{ 605 vm_paddr_t pa; 606 pte_t *ptbl; 607 vm_page_t m; 608 int i; 609 610 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 611 pdir_idx); 612 613 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 614 ("ptbl_hold: invalid pdir_idx")); 615 KASSERT((pmap != kernel_pmap), 616 ("ptbl_hold: holding kernel ptbl!")); 617 618 ptbl = pmap->pm_pdir[pdir_idx]; 619 620 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 621 622 for (i = 0; i < PTBL_PAGES; i++) { 623 pa = pte_vatopa(mmu, kernel_pmap, 624 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 625 m = PHYS_TO_VM_PAGE(pa); 626 m->wire_count++; 627 } 628} 629 630/* Allocate pv_entry structure. */ 631pv_entry_t 632pv_alloc(void) 633{ 634 pv_entry_t pv; 635 636 pv_entry_count++; 637 if ((pv_entry_count > pv_entry_high_water) && 638 (pagedaemon_waken == 0)) { 639 pagedaemon_waken = 1; 640 wakeup(&vm_pages_needed); 641 } 642 pv = uma_zalloc(pvzone, M_NOWAIT); 643 644 return (pv); 645} 646 647/* Free pv_entry structure. */ 648static __inline void 649pv_free(pv_entry_t pve) 650{ 651 652 pv_entry_count--; 653 uma_zfree(pvzone, pve); 654} 655 656 657/* Allocate and initialize pv_entry structure. */ 658static void 659pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 660{ 661 pv_entry_t pve; 662 663 //int su = (pmap == kernel_pmap); 664 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 665 // (u_int32_t)pmap, va, (u_int32_t)m); 666 667 pve = pv_alloc(); 668 if (pve == NULL) 669 panic("pv_insert: no pv entries!"); 670 671 pve->pv_pmap = pmap; 672 pve->pv_va = va; 673 674 /* add to pv_list */ 675 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 676 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 677 678 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 679 680 //debugf("pv_insert: e\n"); 681} 682 683/* Destroy pv entry. */ 684static void 685pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 686{ 687 pv_entry_t pve; 688 689 //int su = (pmap == kernel_pmap); 690 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 691 692 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 693 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 694 695 /* find pv entry */ 696 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 697 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 698 /* remove from pv_list */ 699 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 700 if (TAILQ_EMPTY(&m->md.pv_list)) 701 vm_page_flag_clear(m, PG_WRITEABLE); 702 703 /* free pv entry struct */ 704 pv_free(pve); 705 break; 706 } 707 } 708 709 //debugf("pv_remove: e\n"); 710} 711 712/* 713 * Clean pte entry, try to free page table page if requested. 714 * 715 * Return 1 if ptbl pages were freed, otherwise return 0. 716 */ 717static int 718pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 719{ 720 unsigned int pdir_idx = PDIR_IDX(va); 721 unsigned int ptbl_idx = PTBL_IDX(va); 722 vm_page_t m; 723 pte_t *ptbl; 724 pte_t *pte; 725 726 //int su = (pmap == kernel_pmap); 727 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 728 // su, (u_int32_t)pmap, va, flags); 729 730 ptbl = pmap->pm_pdir[pdir_idx]; 731 KASSERT(ptbl, ("pte_remove: null ptbl")); 732 733 pte = &ptbl[ptbl_idx]; 734 735 if (pte == NULL || !PTE_ISVALID(pte)) 736 return (0); 737 738 /* Get vm_page_t for mapped pte. */ 739 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 740 741 if (PTE_ISWIRED(pte)) 742 pmap->pm_stats.wired_count--; 743 744 if (!PTE_ISFAKE(pte)) { 745 /* Handle managed entry. */ 746 if (PTE_ISMANAGED(pte)) { 747 748 /* Handle modified pages. */ 749 if (PTE_ISMODIFIED(pte)) 750 vm_page_dirty(m); 751 752 /* Referenced pages. */ 753 if (PTE_ISREFERENCED(pte)) 754 vm_page_flag_set(m, PG_REFERENCED); 755 756 /* Remove pv_entry from pv_list. */ 757 pv_remove(pmap, va, m); 758 } 759 } 760 761 mtx_lock_spin(&tlbivax_mutex); 762 763 tlb0_flush_entry(va); 764 pte->flags = 0; 765 pte->rpn = 0; 766 767 mtx_unlock_spin(&tlbivax_mutex); 768 769 pmap->pm_stats.resident_count--; 770 771 if (flags & PTBL_UNHOLD) { 772 //debugf("pte_remove: e (unhold)\n"); 773 return (ptbl_unhold(mmu, pmap, pdir_idx)); 774 } 775 776 //debugf("pte_remove: e\n"); 777 return (0); 778} 779 780/* 781 * Insert PTE for a given page and virtual address. 782 */ 783static void 784pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 785{ 786 unsigned int pdir_idx = PDIR_IDX(va); 787 unsigned int ptbl_idx = PTBL_IDX(va); 788 pte_t *ptbl, *pte; 789 790 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 791 pmap == kernel_pmap, pmap, va); 792 793 /* Get the page table pointer. */ 794 ptbl = pmap->pm_pdir[pdir_idx]; 795 796 if (ptbl == NULL) { 797 /* Allocate page table pages. */ 798 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 799 } else { 800 /* 801 * Check if there is valid mapping for requested 802 * va, if there is, remove it. 803 */ 804 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 805 if (PTE_ISVALID(pte)) { 806 pte_remove(mmu, pmap, va, PTBL_HOLD); 807 } else { 808 /* 809 * pte is not used, increment hold count 810 * for ptbl pages. 811 */ 812 if (pmap != kernel_pmap) 813 ptbl_hold(mmu, pmap, pdir_idx); 814 } 815 } 816 817 /* 818 * Insert pv_entry into pv_list for mapped page if part of managed 819 * memory. 820 */ 821 if ((m->flags & PG_FICTITIOUS) == 0) { 822 if ((m->flags & PG_UNMANAGED) == 0) { 823 flags |= PTE_MANAGED; 824 825 /* Create and insert pv entry. */ 826 pv_insert(pmap, va, m); 827 } 828 } else { 829 flags |= PTE_FAKE; 830 } 831 832 pmap->pm_stats.resident_count++; 833 834 mtx_lock_spin(&tlbivax_mutex); 835 836 tlb0_flush_entry(va); 837 if (pmap->pm_pdir[pdir_idx] == NULL) { 838 /* 839 * If we just allocated a new page table, hook it in 840 * the pdir. 841 */ 842 pmap->pm_pdir[pdir_idx] = ptbl; 843 } 844 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 845 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 846 pte->flags |= (PTE_VALID | flags); 847 848 mtx_unlock_spin(&tlbivax_mutex); 849} 850 851/* Return the pa for the given pmap/va. */ 852static vm_paddr_t 853pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 854{ 855 vm_paddr_t pa = 0; 856 pte_t *pte; 857 858 pte = pte_find(mmu, pmap, va); 859 if ((pte != NULL) && PTE_ISVALID(pte)) 860 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 861 return (pa); 862} 863 864/* Get a pointer to a PTE in a page table. */ 865static pte_t * 866pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 867{ 868 unsigned int pdir_idx = PDIR_IDX(va); 869 unsigned int ptbl_idx = PTBL_IDX(va); 870 871 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 872 873 if (pmap->pm_pdir[pdir_idx]) 874 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 875 876 return (NULL); 877} 878 879/**************************************************************************/ 880/* PMAP related */ 881/**************************************************************************/ 882 883/* 884 * This is called during e500_init, before the system is really initialized. 885 */ 886static void 887mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) 888{ 889 vm_offset_t phys_kernelend; 890 struct mem_region *mp, *mp1; 891 int cnt, i, j; 892 u_int s, e, sz; 893 u_int phys_avail_count; 894 vm_size_t physsz, hwphyssz, kstack0_sz; 895 vm_offset_t kernel_pdir, kstack0; 896 vm_paddr_t kstack0_phys; 897 898 debugf("mmu_booke_bootstrap: entered\n"); 899 900 /* Initialize invalidation mutex */ 901 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 902 903 /* Read TLB0 size and associativity. */ 904 tlb0_get_tlbconf(); 905 906 /* Align kernel start and end address (kernel image). */ 907 kernelstart = trunc_page(kernelstart); 908 kernelend = round_page(kernelend); 909 910 /* Allocate space for the message buffer. */ 911 msgbufp = (struct msgbuf *)kernelend; 912 kernelend += MSGBUF_SIZE; 913 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 914 kernelend); 915 916 kernelend = round_page(kernelend); 917 918 /* Allocate space for ptbl_bufs. */ 919 ptbl_bufs = (struct ptbl_buf *)kernelend; 920 kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS; 921 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 922 kernelend); 923 924 kernelend = round_page(kernelend); 925 926 /* Allocate PTE tables for kernel KVA. */ 927 kernel_pdir = kernelend; 928 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 929 PDIR_SIZE - 1) / PDIR_SIZE; 930 kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 931 debugf(" kernel ptbls: %d\n", kernel_ptbls); 932 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, kernelend); 933 934 debugf(" kernelend: 0x%08x\n", kernelend); 935 if (kernelend - kernelstart > 0x1000000) { 936 kernelend = (kernelend + 0x3fffff) & ~0x3fffff; 937 tlb1_mapin_region(kernelstart + 0x1000000, 938 kernload + 0x1000000, kernelend - kernelstart - 0x1000000); 939 } else 940 kernelend = (kernelend + 0xffffff) & ~0xffffff; 941 942 debugf(" updated kernelend: 0x%08x\n", kernelend); 943 944 /* 945 * Clear the structures - note we can only do it safely after the 946 * possible additional TLB1 translations are in place (above) so that 947 * all range up to the currently calculated 'kernelend' is covered. 948 */ 949 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 950 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 951 952 /*******************************************************/ 953 /* Set the start and end of kva. */ 954 /*******************************************************/ 955 virtual_avail = kernelend; 956 virtual_end = VM_MAX_KERNEL_ADDRESS; 957 958 /* Allocate KVA space for page zero/copy operations. */ 959 zero_page_va = virtual_avail; 960 virtual_avail += PAGE_SIZE; 961 zero_page_idle_va = virtual_avail; 962 virtual_avail += PAGE_SIZE; 963 copy_page_src_va = virtual_avail; 964 virtual_avail += PAGE_SIZE; 965 copy_page_dst_va = virtual_avail; 966 virtual_avail += PAGE_SIZE; 967 debugf("zero_page_va = 0x%08x\n", zero_page_va); 968 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 969 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 970 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 971 972 /* Initialize page zero/copy mutexes. */ 973 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 974 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 975 976 /* Allocate KVA space for ptbl bufs. */ 977 ptbl_buf_pool_vabase = virtual_avail; 978 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 979 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 980 ptbl_buf_pool_vabase, virtual_avail); 981 982 /* Calculate corresponding physical addresses for the kernel region. */ 983 phys_kernelend = kernload + (kernelend - kernelstart); 984 debugf("kernel image and allocated data:\n"); 985 debugf(" kernload = 0x%08x\n", kernload); 986 debugf(" kernelstart = 0x%08x\n", kernelstart); 987 debugf(" kernelend = 0x%08x\n", kernelend); 988 debugf(" kernel size = 0x%08x\n", kernelend - kernelstart); 989 990 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 991 panic("mmu_booke_bootstrap: phys_avail too small"); 992 993 /* 994 * Remove kernel physical address range from avail regions list. Page 995 * align all regions. Non-page aligned memory isn't very interesting 996 * to us. Also, sort the entries for ascending addresses. 997 */ 998 sz = 0; 999 cnt = availmem_regions_sz; 1000 debugf("processing avail regions:\n"); 1001 for (mp = availmem_regions; mp->mr_size; mp++) { 1002 s = mp->mr_start; 1003 e = mp->mr_start + mp->mr_size; 1004 debugf(" %08x-%08x -> ", s, e); 1005 /* Check whether this region holds all of the kernel. */ 1006 if (s < kernload && e > phys_kernelend) { 1007 availmem_regions[cnt].mr_start = phys_kernelend; 1008 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1009 e = kernload; 1010 } 1011 /* Look whether this regions starts within the kernel. */ 1012 if (s >= kernload && s < phys_kernelend) { 1013 if (e <= phys_kernelend) 1014 goto empty; 1015 s = phys_kernelend; 1016 } 1017 /* Now look whether this region ends within the kernel. */ 1018 if (e > kernload && e <= phys_kernelend) { 1019 if (s >= kernload) 1020 goto empty; 1021 e = kernload; 1022 } 1023 /* Now page align the start and size of the region. */ 1024 s = round_page(s); 1025 e = trunc_page(e); 1026 if (e < s) 1027 e = s; 1028 sz = e - s; 1029 debugf("%08x-%08x = %x\n", s, e, sz); 1030 1031 /* Check whether some memory is left here. */ 1032 if (sz == 0) { 1033 empty: 1034 memmove(mp, mp + 1, 1035 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1036 cnt--; 1037 mp--; 1038 continue; 1039 } 1040 1041 /* Do an insertion sort. */ 1042 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1043 if (s < mp1->mr_start) 1044 break; 1045 if (mp1 < mp) { 1046 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1047 mp1->mr_start = s; 1048 mp1->mr_size = sz; 1049 } else { 1050 mp->mr_start = s; 1051 mp->mr_size = sz; 1052 } 1053 } 1054 availmem_regions_sz = cnt; 1055 1056 /*******************************************************/ 1057 /* Steal physical memory for kernel stack from the end */ 1058 /* of the first avail region */ 1059 /*******************************************************/ 1060 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1061 kstack0_phys = availmem_regions[0].mr_start + 1062 availmem_regions[0].mr_size; 1063 kstack0_phys -= kstack0_sz; 1064 availmem_regions[0].mr_size -= kstack0_sz; 1065 1066 /*******************************************************/ 1067 /* Fill in phys_avail table, based on availmem_regions */ 1068 /*******************************************************/ 1069 phys_avail_count = 0; 1070 physsz = 0; 1071 hwphyssz = 0; 1072 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1073 1074 debugf("fill in phys_avail:\n"); 1075 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1076 1077 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1078 availmem_regions[i].mr_start, 1079 availmem_regions[i].mr_start + 1080 availmem_regions[i].mr_size, 1081 availmem_regions[i].mr_size); 1082 1083 if (hwphyssz != 0 && 1084 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1085 debugf(" hw.physmem adjust\n"); 1086 if (physsz < hwphyssz) { 1087 phys_avail[j] = availmem_regions[i].mr_start; 1088 phys_avail[j + 1] = 1089 availmem_regions[i].mr_start + 1090 hwphyssz - physsz; 1091 physsz = hwphyssz; 1092 phys_avail_count++; 1093 } 1094 break; 1095 } 1096 1097 phys_avail[j] = availmem_regions[i].mr_start; 1098 phys_avail[j + 1] = availmem_regions[i].mr_start + 1099 availmem_regions[i].mr_size; 1100 phys_avail_count++; 1101 physsz += availmem_regions[i].mr_size; 1102 } 1103 physmem = btoc(physsz); 1104 1105 /* Calculate the last available physical address. */ 1106 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1107 ; 1108 Maxmem = powerpc_btop(phys_avail[i + 1]); 1109 1110 debugf("Maxmem = 0x%08lx\n", Maxmem); 1111 debugf("phys_avail_count = %d\n", phys_avail_count); 1112 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1113 physmem); 1114 1115 /*******************************************************/ 1116 /* Initialize (statically allocated) kernel pmap. */ 1117 /*******************************************************/ 1118 PMAP_LOCK_INIT(kernel_pmap); 1119 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1120 1121 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1122 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1123 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1124 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1125 1126 /* Initialize kernel pdir */ 1127 for (i = 0; i < kernel_ptbls; i++) 1128 kernel_pmap->pm_pdir[kptbl_min + i] = 1129 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1130 1131 for (i = 0; i < MAXCPU; i++) { 1132 kernel_pmap->pm_tid[i] = TID_KERNEL; 1133 1134 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1135 tidbusy[i][0] = kernel_pmap; 1136 } 1137 /* Mark kernel_pmap active on all CPUs */ 1138 kernel_pmap->pm_active = ~0; 1139 1140 /*******************************************************/ 1141 /* Final setup */ 1142 /*******************************************************/ 1143 1144 /* Enter kstack0 into kernel map, provide guard page */ 1145 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1146 thread0.td_kstack = kstack0; 1147 thread0.td_kstack_pages = KSTACK_PAGES; 1148 1149 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1150 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1151 kstack0_phys, kstack0_phys + kstack0_sz); 1152 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1153 1154 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1155 for (i = 0; i < KSTACK_PAGES; i++) { 1156 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1157 kstack0 += PAGE_SIZE; 1158 kstack0_phys += PAGE_SIZE; 1159 } 1160 1161 debugf("virtual_avail = %08x\n", virtual_avail); 1162 debugf("virtual_end = %08x\n", virtual_end); 1163 1164 debugf("mmu_booke_bootstrap: exit\n"); 1165} 1166 1167/* 1168 * Get the physical page address for the given pmap/virtual address. 1169 */ 1170static vm_paddr_t 1171mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1172{ 1173 vm_paddr_t pa; 1174 1175 PMAP_LOCK(pmap); 1176 pa = pte_vatopa(mmu, pmap, va); 1177 PMAP_UNLOCK(pmap); 1178 1179 return (pa); 1180} 1181 1182/* 1183 * Extract the physical page address associated with the given 1184 * kernel virtual address. 1185 */ 1186static vm_paddr_t 1187mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1188{ 1189 1190 return (pte_vatopa(mmu, kernel_pmap, va)); 1191} 1192 1193/* 1194 * Initialize the pmap module. 1195 * Called by vm_init, to initialize any structures that the pmap 1196 * system needs to map virtual memory. 1197 */ 1198static void 1199mmu_booke_init(mmu_t mmu) 1200{ 1201 int shpgperproc = PMAP_SHPGPERPROC; 1202 1203 /* 1204 * Initialize the address space (zone) for the pv entries. Set a 1205 * high water mark so that the system can recover from excessive 1206 * numbers of pv entries. 1207 */ 1208 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1209 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1210 1211 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1212 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1213 1214 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1215 pv_entry_high_water = 9 * (pv_entry_max / 10); 1216 1217 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1218 1219 /* Pre-fill pvzone with initial number of pv entries. */ 1220 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1221 1222 /* Initialize ptbl allocation. */ 1223 ptbl_init(); 1224} 1225 1226/* 1227 * Map a list of wired pages into kernel virtual address space. This is 1228 * intended for temporary mappings which do not need page modification or 1229 * references recorded. Existing mappings in the region are overwritten. 1230 */ 1231static void 1232mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1233{ 1234 vm_offset_t va; 1235 1236 va = sva; 1237 while (count-- > 0) { 1238 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1239 va += PAGE_SIZE; 1240 m++; 1241 } 1242} 1243 1244/* 1245 * Remove page mappings from kernel virtual address space. Intended for 1246 * temporary mappings entered by mmu_booke_qenter. 1247 */ 1248static void 1249mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1250{ 1251 vm_offset_t va; 1252 1253 va = sva; 1254 while (count-- > 0) { 1255 mmu_booke_kremove(mmu, va); 1256 va += PAGE_SIZE; 1257 } 1258} 1259 1260/* 1261 * Map a wired page into kernel virtual address space. 1262 */ 1263static void 1264mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1265{ 1266 unsigned int pdir_idx = PDIR_IDX(va); 1267 unsigned int ptbl_idx = PTBL_IDX(va); 1268 uint32_t flags; 1269 pte_t *pte; 1270 1271 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1272 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1273 1274#if 0 1275 /* assume IO mapping, set I, G bits */ 1276 flags = (PTE_G | PTE_I | PTE_FAKE); 1277 1278 /* if mapping is within system memory, do not set I, G bits */ 1279 for (i = 0; i < totalmem_regions_sz; i++) { 1280 if ((pa >= totalmem_regions[i].mr_start) && 1281 (pa < (totalmem_regions[i].mr_start + 1282 totalmem_regions[i].mr_size))) { 1283 flags &= ~(PTE_I | PTE_G | PTE_FAKE); 1284 break; 1285 } 1286 } 1287#else 1288 flags = 0; 1289#endif 1290 1291 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1292 flags |= PTE_M; 1293 1294 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1295 1296 mtx_lock_spin(&tlbivax_mutex); 1297 1298 if (PTE_ISVALID(pte)) { 1299 1300 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1301 1302 /* Flush entry from TLB0 */ 1303 tlb0_flush_entry(va); 1304 } 1305 1306 pte->rpn = pa & ~PTE_PA_MASK; 1307 pte->flags = flags; 1308 1309 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1310 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1311 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1312 1313 /* Flush the real memory from the instruction cache. */ 1314 if ((flags & (PTE_I | PTE_G)) == 0) { 1315 __syncicache((void *)va, PAGE_SIZE); 1316 } 1317 1318 mtx_unlock_spin(&tlbivax_mutex); 1319} 1320 1321/* 1322 * Remove a page from kernel page table. 1323 */ 1324static void 1325mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1326{ 1327 unsigned int pdir_idx = PDIR_IDX(va); 1328 unsigned int ptbl_idx = PTBL_IDX(va); 1329 pte_t *pte; 1330 1331// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1332 1333 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1334 (va <= VM_MAX_KERNEL_ADDRESS)), 1335 ("mmu_booke_kremove: invalid va")); 1336 1337 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1338 1339 if (!PTE_ISVALID(pte)) { 1340 1341 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1342 1343 return; 1344 } 1345 1346 mtx_lock_spin(&tlbivax_mutex); 1347 1348 /* Invalidate entry in TLB0, update PTE. */ 1349 tlb0_flush_entry(va); 1350 pte->flags = 0; 1351 pte->rpn = 0; 1352 1353 mtx_unlock_spin(&tlbivax_mutex); 1354} 1355 1356/* 1357 * Initialize pmap associated with process 0. 1358 */ 1359static void 1360mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1361{ 1362 1363 mmu_booke_pinit(mmu, pmap); 1364 PCPU_SET(curpmap, pmap); 1365} 1366 1367/* 1368 * Initialize a preallocated and zeroed pmap structure, 1369 * such as one in a vmspace structure. 1370 */ 1371static void 1372mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1373{ 1374 int i; 1375 1376 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1377 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1378 1379 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1380 1381 PMAP_LOCK_INIT(pmap); 1382 for (i = 0; i < MAXCPU; i++) 1383 pmap->pm_tid[i] = TID_NONE; 1384 pmap->pm_active = 0; 1385 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1386 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1387 TAILQ_INIT(&pmap->pm_ptbl_list); 1388} 1389 1390/* 1391 * Release any resources held by the given physical map. 1392 * Called when a pmap initialized by mmu_booke_pinit is being released. 1393 * Should only be called if the map contains no valid mappings. 1394 */ 1395static void 1396mmu_booke_release(mmu_t mmu, pmap_t pmap) 1397{ 1398 1399 printf("mmu_booke_release: s\n"); 1400 1401 KASSERT(pmap->pm_stats.resident_count == 0, 1402 ("pmap_release: pmap resident count %ld != 0", 1403 pmap->pm_stats.resident_count)); 1404 1405 PMAP_LOCK_DESTROY(pmap); 1406} 1407 1408#if 0 1409/* Not needed, kernel page tables are statically allocated. */ 1410void 1411mmu_booke_growkernel(vm_offset_t maxkvaddr) 1412{ 1413} 1414#endif 1415 1416/* 1417 * Insert the given physical page at the specified virtual address in the 1418 * target physical map with the protection requested. If specified the page 1419 * will be wired down. 1420 */ 1421static void 1422mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1423 vm_prot_t prot, boolean_t wired) 1424{ 1425 1426 vm_page_lock_queues(); 1427 PMAP_LOCK(pmap); 1428 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1429 vm_page_unlock_queues(); 1430 PMAP_UNLOCK(pmap); 1431} 1432 1433static void 1434mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1435 vm_prot_t prot, boolean_t wired) 1436{ 1437 pte_t *pte; 1438 vm_paddr_t pa; 1439 uint32_t flags; 1440 int su, sync; 1441 1442 pa = VM_PAGE_TO_PHYS(m); 1443 su = (pmap == kernel_pmap); 1444 sync = 0; 1445 1446 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1447 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1448 // (u_int32_t)pmap, su, pmap->pm_tid, 1449 // (u_int32_t)m, va, pa, prot, wired); 1450 1451 if (su) { 1452 KASSERT(((va >= virtual_avail) && 1453 (va <= VM_MAX_KERNEL_ADDRESS)), 1454 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1455 } else { 1456 KASSERT((va <= VM_MAXUSER_ADDRESS), 1457 ("mmu_booke_enter_locked: user pmap, non user va")); 1458 } 1459 1460 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1461 1462 /* 1463 * If there is an existing mapping, and the physical address has not 1464 * changed, must be protection or wiring change. 1465 */ 1466 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1467 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1468 1469 /* 1470 * Before actually updating pte->flags we calculate and 1471 * prepare its new value in a helper var. 1472 */ 1473 flags = pte->flags; 1474 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1475 1476 /* Wiring change, just update stats. */ 1477 if (wired) { 1478 if (!PTE_ISWIRED(pte)) { 1479 flags |= PTE_WIRED; 1480 pmap->pm_stats.wired_count++; 1481 } 1482 } else { 1483 if (PTE_ISWIRED(pte)) { 1484 flags &= ~PTE_WIRED; 1485 pmap->pm_stats.wired_count--; 1486 } 1487 } 1488 1489 if (prot & VM_PROT_WRITE) { 1490 /* Add write permissions. */ 1491 flags |= PTE_SW; 1492 if (!su) 1493 flags |= PTE_UW; 1494 } else { 1495 /* Handle modified pages, sense modify status. */ 1496 1497 /* 1498 * The PTE_MODIFIED flag could be set by underlying 1499 * TLB misses since we last read it (above), possibly 1500 * other CPUs could update it so we check in the PTE 1501 * directly rather than rely on that saved local flags 1502 * copy. 1503 */ 1504 if (PTE_ISMODIFIED(pte)) 1505 vm_page_dirty(m); 1506 } 1507 1508 if (prot & VM_PROT_EXECUTE) { 1509 flags |= PTE_SX; 1510 if (!su) 1511 flags |= PTE_UX; 1512 1513 /* 1514 * Check existing flags for execute permissions: if we 1515 * are turning execute permissions on, icache should 1516 * be flushed. 1517 */ 1518 if ((flags & (PTE_UX | PTE_SX)) == 0) 1519 sync++; 1520 } 1521 1522 flags &= ~PTE_REFERENCED; 1523 1524 /* 1525 * The new flags value is all calculated -- only now actually 1526 * update the PTE. 1527 */ 1528 mtx_lock_spin(&tlbivax_mutex); 1529 1530 tlb0_flush_entry(va); 1531 pte->flags = flags; 1532 1533 mtx_unlock_spin(&tlbivax_mutex); 1534 1535 } else { 1536 /* 1537 * If there is an existing mapping, but it's for a different 1538 * physical address, pte_enter() will delete the old mapping. 1539 */ 1540 //if ((pte != NULL) && PTE_ISVALID(pte)) 1541 // debugf("mmu_booke_enter_locked: replace\n"); 1542 //else 1543 // debugf("mmu_booke_enter_locked: new\n"); 1544 1545 /* Now set up the flags and install the new mapping. */ 1546 flags = (PTE_SR | PTE_VALID); 1547 flags |= PTE_M; 1548 1549 if (!su) 1550 flags |= PTE_UR; 1551 1552 if (prot & VM_PROT_WRITE) { 1553 flags |= PTE_SW; 1554 if (!su) 1555 flags |= PTE_UW; 1556 } 1557 1558 if (prot & VM_PROT_EXECUTE) { 1559 flags |= PTE_SX; 1560 if (!su) 1561 flags |= PTE_UX; 1562 } 1563 1564 /* If its wired update stats. */ 1565 if (wired) { 1566 pmap->pm_stats.wired_count++; 1567 flags |= PTE_WIRED; 1568 } 1569 1570 pte_enter(mmu, pmap, m, va, flags); 1571 1572 /* Flush the real memory from the instruction cache. */ 1573 if (prot & VM_PROT_EXECUTE) 1574 sync++; 1575 } 1576 1577 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1578 __syncicache((void *)va, PAGE_SIZE); 1579 sync = 0; 1580 } 1581 1582 if (sync) { 1583 /* Create a temporary mapping. */ 1584 pmap = PCPU_GET(curpmap); 1585 1586 va = 0; 1587 pte = pte_find(mmu, pmap, va); 1588 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1589 1590 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1591 1592 pte_enter(mmu, pmap, m, va, flags); 1593 __syncicache((void *)va, PAGE_SIZE); 1594 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1595 } 1596} 1597 1598/* 1599 * Maps a sequence of resident pages belonging to the same object. 1600 * The sequence begins with the given page m_start. This page is 1601 * mapped at the given virtual address start. Each subsequent page is 1602 * mapped at a virtual address that is offset from start by the same 1603 * amount as the page is offset from m_start within the object. The 1604 * last page in the sequence is the page with the largest offset from 1605 * m_start that can be mapped at a virtual address less than the given 1606 * virtual address end. Not every virtual page between start and end 1607 * is mapped; only those for which a resident page exists with the 1608 * corresponding offset from m_start are mapped. 1609 */ 1610static void 1611mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1612 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1613{ 1614 vm_page_t m; 1615 vm_pindex_t diff, psize; 1616 1617 psize = atop(end - start); 1618 m = m_start; 1619 PMAP_LOCK(pmap); 1620 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1621 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1622 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1623 m = TAILQ_NEXT(m, listq); 1624 } 1625 PMAP_UNLOCK(pmap); 1626} 1627 1628static void 1629mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1630 vm_prot_t prot) 1631{ 1632 1633 PMAP_LOCK(pmap); 1634 mmu_booke_enter_locked(mmu, pmap, va, m, 1635 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1636 PMAP_UNLOCK(pmap); 1637} 1638 1639/* 1640 * Remove the given range of addresses from the specified map. 1641 * 1642 * It is assumed that the start and end are properly rounded to the page size. 1643 */ 1644static void 1645mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1646{ 1647 pte_t *pte; 1648 uint8_t hold_flag; 1649 1650 int su = (pmap == kernel_pmap); 1651 1652 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1653 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1654 1655 if (su) { 1656 KASSERT(((va >= virtual_avail) && 1657 (va <= VM_MAX_KERNEL_ADDRESS)), 1658 ("mmu_booke_remove: kernel pmap, non kernel va")); 1659 } else { 1660 KASSERT((va <= VM_MAXUSER_ADDRESS), 1661 ("mmu_booke_remove: user pmap, non user va")); 1662 } 1663 1664 if (PMAP_REMOVE_DONE(pmap)) { 1665 //debugf("mmu_booke_remove: e (empty)\n"); 1666 return; 1667 } 1668 1669 hold_flag = PTBL_HOLD_FLAG(pmap); 1670 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1671 1672 vm_page_lock_queues(); 1673 PMAP_LOCK(pmap); 1674 for (; va < endva; va += PAGE_SIZE) { 1675 pte = pte_find(mmu, pmap, va); 1676 if ((pte != NULL) && PTE_ISVALID(pte)) 1677 pte_remove(mmu, pmap, va, hold_flag); 1678 } 1679 PMAP_UNLOCK(pmap); 1680 vm_page_unlock_queues(); 1681 1682 //debugf("mmu_booke_remove: e\n"); 1683} 1684 1685/* 1686 * Remove physical page from all pmaps in which it resides. 1687 */ 1688static void 1689mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1690{ 1691 pv_entry_t pv, pvn; 1692 uint8_t hold_flag; 1693 1694 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1695 1696 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1697 pvn = TAILQ_NEXT(pv, pv_link); 1698 1699 PMAP_LOCK(pv->pv_pmap); 1700 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1701 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1702 PMAP_UNLOCK(pv->pv_pmap); 1703 } 1704 vm_page_flag_clear(m, PG_WRITEABLE); 1705} 1706 1707/* 1708 * Map a range of physical addresses into kernel virtual address space. 1709 */ 1710static vm_offset_t 1711mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1712 vm_offset_t pa_end, int prot) 1713{ 1714 vm_offset_t sva = *virt; 1715 vm_offset_t va = sva; 1716 1717 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1718 // sva, pa_start, pa_end); 1719 1720 while (pa_start < pa_end) { 1721 mmu_booke_kenter(mmu, va, pa_start); 1722 va += PAGE_SIZE; 1723 pa_start += PAGE_SIZE; 1724 } 1725 *virt = va; 1726 1727 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1728 return (sva); 1729} 1730 1731/* 1732 * The pmap must be activated before it's address space can be accessed in any 1733 * way. 1734 */ 1735static void 1736mmu_booke_activate(mmu_t mmu, struct thread *td) 1737{ 1738 pmap_t pmap; 1739 1740 pmap = &td->td_proc->p_vmspace->vm_pmap; 1741 1742 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1743 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1744 1745 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1746 1747 mtx_lock_spin(&sched_lock); 1748 1749 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1750 PCPU_SET(curpmap, pmap); 1751 1752 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1753 tid_alloc(pmap); 1754 1755 /* Load PID0 register with pmap tid value. */ 1756 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1757 __asm __volatile("isync"); 1758 1759 mtx_unlock_spin(&sched_lock); 1760 1761 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1762 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1763} 1764 1765/* 1766 * Deactivate the specified process's address space. 1767 */ 1768static void 1769mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1770{ 1771 pmap_t pmap; 1772 1773 pmap = &td->td_proc->p_vmspace->vm_pmap; 1774 1775 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1776 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1777 1778 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1779 PCPU_SET(curpmap, NULL); 1780} 1781 1782/* 1783 * Copy the range specified by src_addr/len 1784 * from the source map to the range dst_addr/len 1785 * in the destination map. 1786 * 1787 * This routine is only advisory and need not do anything. 1788 */ 1789static void 1790mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1791 vm_size_t len, vm_offset_t src_addr) 1792{ 1793 1794} 1795 1796/* 1797 * Set the physical protection on the specified range of this map as requested. 1798 */ 1799static void 1800mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1801 vm_prot_t prot) 1802{ 1803 vm_offset_t va; 1804 vm_page_t m; 1805 pte_t *pte; 1806 1807 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1808 mmu_booke_remove(mmu, pmap, sva, eva); 1809 return; 1810 } 1811 1812 if (prot & VM_PROT_WRITE) 1813 return; 1814 1815 vm_page_lock_queues(); 1816 PMAP_LOCK(pmap); 1817 for (va = sva; va < eva; va += PAGE_SIZE) { 1818 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1819 if (PTE_ISVALID(pte)) { 1820 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1821 1822 mtx_lock_spin(&tlbivax_mutex); 1823 1824 /* Handle modified pages. */ 1825 if (PTE_ISMODIFIED(pte)) 1826 vm_page_dirty(m); 1827 1828 /* Referenced pages. */ 1829 if (PTE_ISREFERENCED(pte)) 1830 vm_page_flag_set(m, PG_REFERENCED); 1831 1832 tlb0_flush_entry(va); 1833 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1834 PTE_REFERENCED); 1835 1836 mtx_unlock_spin(&tlbivax_mutex); 1837 } 1838 } 1839 } 1840 PMAP_UNLOCK(pmap); 1841 vm_page_unlock_queues(); 1842} 1843 1844/* 1845 * Clear the write and modified bits in each of the given page's mappings. 1846 */ 1847static void 1848mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1849{ 1850 pv_entry_t pv; 1851 pte_t *pte; 1852 1853 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1854 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1855 (m->flags & PG_WRITEABLE) == 0) 1856 return; 1857 1858 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1859 PMAP_LOCK(pv->pv_pmap); 1860 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1861 if (PTE_ISVALID(pte)) { 1862 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1863 1864 mtx_lock_spin(&tlbivax_mutex); 1865 1866 /* Handle modified pages. */ 1867 if (PTE_ISMODIFIED(pte)) 1868 vm_page_dirty(m); 1869 1870 /* Referenced pages. */ 1871 if (PTE_ISREFERENCED(pte)) 1872 vm_page_flag_set(m, PG_REFERENCED); 1873 1874 /* Flush mapping from TLB0. */ 1875 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1876 PTE_REFERENCED); 1877 1878 mtx_unlock_spin(&tlbivax_mutex); 1879 } 1880 } 1881 PMAP_UNLOCK(pv->pv_pmap); 1882 } 1883 vm_page_flag_clear(m, PG_WRITEABLE); 1884} 1885 1886static boolean_t 1887mmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1888{ 1889 pv_entry_t pv; 1890 pte_t *pte; 1891 boolean_t executable; 1892 1893 executable = FALSE; 1894 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1895 PMAP_LOCK(pv->pv_pmap); 1896 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 1897 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 1898 executable = TRUE; 1899 PMAP_UNLOCK(pv->pv_pmap); 1900 if (executable) 1901 break; 1902 } 1903 1904 return (executable); 1905} 1906 1907/* 1908 * Atomically extract and hold the physical page with the given 1909 * pmap and virtual address pair if that mapping permits the given 1910 * protection. 1911 */ 1912static vm_page_t 1913mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1914 vm_prot_t prot) 1915{ 1916 pte_t *pte; 1917 vm_page_t m; 1918 uint32_t pte_wbit; 1919 1920 m = NULL; 1921 vm_page_lock_queues(); 1922 PMAP_LOCK(pmap); 1923 1924 pte = pte_find(mmu, pmap, va); 1925 if ((pte != NULL) && PTE_ISVALID(pte)) { 1926 if (pmap == kernel_pmap) 1927 pte_wbit = PTE_SW; 1928 else 1929 pte_wbit = PTE_UW; 1930 1931 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1932 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1933 vm_page_hold(m); 1934 } 1935 } 1936 1937 vm_page_unlock_queues(); 1938 PMAP_UNLOCK(pmap); 1939 return (m); 1940} 1941 1942/* 1943 * Initialize a vm_page's machine-dependent fields. 1944 */ 1945static void 1946mmu_booke_page_init(mmu_t mmu, vm_page_t m) 1947{ 1948 1949 TAILQ_INIT(&m->md.pv_list); 1950} 1951 1952/* 1953 * mmu_booke_zero_page_area zeros the specified hardware page by 1954 * mapping it into virtual memory and using bzero to clear 1955 * its contents. 1956 * 1957 * off and size must reside within a single page. 1958 */ 1959static void 1960mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1961{ 1962 vm_offset_t va; 1963 1964 /* XXX KASSERT off and size are within a single page? */ 1965 1966 mtx_lock(&zero_page_mutex); 1967 va = zero_page_va; 1968 1969 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1970 bzero((caddr_t)va + off, size); 1971 mmu_booke_kremove(mmu, va); 1972 1973 mtx_unlock(&zero_page_mutex); 1974} 1975 1976/* 1977 * mmu_booke_zero_page zeros the specified hardware page. 1978 */ 1979static void 1980mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1981{ 1982 1983 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 1984} 1985 1986/* 1987 * mmu_booke_copy_page copies the specified (machine independent) page by 1988 * mapping the page into virtual memory and using memcopy to copy the page, 1989 * one machine dependent page at a time. 1990 */ 1991static void 1992mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 1993{ 1994 vm_offset_t sva, dva; 1995 1996 sva = copy_page_src_va; 1997 dva = copy_page_dst_va; 1998 1999 mtx_lock(©_page_mutex); 2000 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2001 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2002 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2003 mmu_booke_kremove(mmu, dva); 2004 mmu_booke_kremove(mmu, sva); 2005 mtx_unlock(©_page_mutex); 2006} 2007 2008#if 0 2009/* 2010 * Remove all pages from specified address space, this aids process exit 2011 * speeds. This is much faster than mmu_booke_remove in the case of running 2012 * down an entire address space. Only works for the current pmap. 2013 */ 2014void 2015mmu_booke_remove_pages(pmap_t pmap) 2016{ 2017} 2018#endif 2019 2020/* 2021 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2022 * into virtual memory and using bzero to clear its contents. This is intended 2023 * to be called from the vm_pagezero process only and outside of Giant. No 2024 * lock is required. 2025 */ 2026static void 2027mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2028{ 2029 vm_offset_t va; 2030 2031 va = zero_page_idle_va; 2032 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2033 bzero((caddr_t)va, PAGE_SIZE); 2034 mmu_booke_kremove(mmu, va); 2035} 2036 2037/* 2038 * Return whether or not the specified physical page was modified 2039 * in any of physical maps. 2040 */ 2041static boolean_t 2042mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2043{ 2044 pte_t *pte; 2045 pv_entry_t pv; 2046 2047 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2048 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2049 return (FALSE); 2050 2051 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2052 PMAP_LOCK(pv->pv_pmap); 2053 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2054 if (!PTE_ISVALID(pte)) 2055 goto make_sure_to_unlock; 2056 2057 if (PTE_ISMODIFIED(pte)) { 2058 PMAP_UNLOCK(pv->pv_pmap); 2059 return (TRUE); 2060 } 2061 } 2062make_sure_to_unlock: 2063 PMAP_UNLOCK(pv->pv_pmap); 2064 } 2065 return (FALSE); 2066} 2067 2068/* 2069 * Return whether or not the specified virtual address is eligible 2070 * for prefault. 2071 */ 2072static boolean_t 2073mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2074{ 2075 2076 return (FALSE); 2077} 2078 2079/* 2080 * Clear the modify bits on the specified physical page. 2081 */ 2082static void 2083mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2084{ 2085 pte_t *pte; 2086 pv_entry_t pv; 2087 2088 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2089 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2090 return; 2091 2092 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2093 PMAP_LOCK(pv->pv_pmap); 2094 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2095 if (!PTE_ISVALID(pte)) 2096 goto make_sure_to_unlock; 2097 2098 mtx_lock_spin(&tlbivax_mutex); 2099 2100 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2101 tlb0_flush_entry(pv->pv_va); 2102 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2103 PTE_REFERENCED); 2104 } 2105 2106 mtx_unlock_spin(&tlbivax_mutex); 2107 } 2108make_sure_to_unlock: 2109 PMAP_UNLOCK(pv->pv_pmap); 2110 } 2111} 2112 2113/* 2114 * Return a count of reference bits for a page, clearing those bits. 2115 * It is not necessary for every reference bit to be cleared, but it 2116 * is necessary that 0 only be returned when there are truly no 2117 * reference bits set. 2118 * 2119 * XXX: The exact number of bits to check and clear is a matter that 2120 * should be tested and standardized at some point in the future for 2121 * optimal aging of shared pages. 2122 */ 2123static int 2124mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2125{ 2126 pte_t *pte; 2127 pv_entry_t pv; 2128 int count; 2129 2130 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2131 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2132 return (0); 2133 2134 count = 0; 2135 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2136 PMAP_LOCK(pv->pv_pmap); 2137 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2138 if (!PTE_ISVALID(pte)) 2139 goto make_sure_to_unlock; 2140 2141 if (PTE_ISREFERENCED(pte)) { 2142 mtx_lock_spin(&tlbivax_mutex); 2143 2144 tlb0_flush_entry(pv->pv_va); 2145 pte->flags &= ~PTE_REFERENCED; 2146 2147 mtx_unlock_spin(&tlbivax_mutex); 2148 2149 if (++count > 4) { 2150 PMAP_UNLOCK(pv->pv_pmap); 2151 break; 2152 } 2153 } 2154 } 2155make_sure_to_unlock: 2156 PMAP_UNLOCK(pv->pv_pmap); 2157 } 2158 return (count); 2159} 2160 2161/* 2162 * Clear the reference bit on the specified physical page. 2163 */ 2164static void 2165mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2166{ 2167 pte_t *pte; 2168 pv_entry_t pv; 2169 2170 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2171 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2172 return; 2173 2174 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2175 PMAP_LOCK(pv->pv_pmap); 2176 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2177 if (!PTE_ISVALID(pte)) 2178 goto make_sure_to_unlock; 2179 2180 if (PTE_ISREFERENCED(pte)) { 2181 mtx_lock_spin(&tlbivax_mutex); 2182 2183 tlb0_flush_entry(pv->pv_va); 2184 pte->flags &= ~PTE_REFERENCED; 2185 2186 mtx_unlock_spin(&tlbivax_mutex); 2187 } 2188 } 2189make_sure_to_unlock: 2190 PMAP_UNLOCK(pv->pv_pmap); 2191 } 2192} 2193 2194/* 2195 * Change wiring attribute for a map/virtual-address pair. 2196 */ 2197static void 2198mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2199{ 2200 pte_t *pte;; 2201 2202 PMAP_LOCK(pmap); 2203 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2204 if (wired) { 2205 if (!PTE_ISWIRED(pte)) { 2206 pte->flags |= PTE_WIRED; 2207 pmap->pm_stats.wired_count++; 2208 } 2209 } else { 2210 if (PTE_ISWIRED(pte)) { 2211 pte->flags &= ~PTE_WIRED; 2212 pmap->pm_stats.wired_count--; 2213 } 2214 } 2215 } 2216 PMAP_UNLOCK(pmap); 2217} 2218 2219/* 2220 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2221 * page. This count may be changed upwards or downwards in the future; it is 2222 * only necessary that true be returned for a small subset of pmaps for proper 2223 * page aging. 2224 */ 2225static boolean_t 2226mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2227{ 2228 pv_entry_t pv; 2229 int loops; 2230 2231 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2232 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2233 return (FALSE); 2234 2235 loops = 0; 2236 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2237 if (pv->pv_pmap == pmap) 2238 return (TRUE); 2239 2240 if (++loops >= 16) 2241 break; 2242 } 2243 return (FALSE); 2244} 2245 2246/* 2247 * Return the number of managed mappings to the given physical page that are 2248 * wired. 2249 */ 2250static int 2251mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2252{ 2253 pv_entry_t pv; 2254 pte_t *pte; 2255 int count = 0; 2256 2257 if ((m->flags & PG_FICTITIOUS) != 0) 2258 return (count); 2259 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2260 2261 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2262 PMAP_LOCK(pv->pv_pmap); 2263 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2264 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2265 count++; 2266 PMAP_UNLOCK(pv->pv_pmap); 2267 } 2268 2269 return (count); 2270} 2271 2272static int 2273mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2274{ 2275 int i; 2276 vm_offset_t va; 2277 2278 /* 2279 * This currently does not work for entries that 2280 * overlap TLB1 entries. 2281 */ 2282 for (i = 0; i < tlb1_idx; i ++) { 2283 if (tlb1_iomapped(i, pa, size, &va) == 0) 2284 return (0); 2285 } 2286 2287 return (EFAULT); 2288} 2289 2290/* 2291 * Map a set of physical memory pages into the kernel virtual address space. 2292 * Return a pointer to where it is mapped. This routine is intended to be used 2293 * for mapping device memory, NOT real memory. 2294 */ 2295static void * 2296mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2297{ 2298 void *res; 2299 uintptr_t va; 2300 vm_size_t sz; 2301 2302 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2303 res = (void *)va; 2304 2305 do { 2306 sz = 1 << (ilog2(size) & ~1); 2307 if (bootverbose) 2308 printf("Wiring VA=%x to PA=%x (size=%x), " 2309 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2310 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2311 size -= sz; 2312 pa += sz; 2313 va += sz; 2314 } while (size > 0); 2315 2316 return (res); 2317} 2318 2319/* 2320 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2321 */ 2322static void 2323mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2324{ 2325 vm_offset_t base, offset; 2326 2327 /* 2328 * Unmap only if this is inside kernel virtual space. 2329 */ 2330 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2331 base = trunc_page(va); 2332 offset = va & PAGE_MASK; 2333 size = roundup(offset + size, PAGE_SIZE); 2334 kmem_free(kernel_map, base, size); 2335 } 2336} 2337 2338/* 2339 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2340 * specified pmap. This eliminates the blast of soft faults on process startup 2341 * and immediately after an mmap. 2342 */ 2343static void 2344mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2345 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2346{ 2347 2348 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2349 KASSERT(object->type == OBJT_DEVICE, 2350 ("mmu_booke_object_init_pt: non-device object")); 2351} 2352 2353/* 2354 * Perform the pmap work for mincore. 2355 */ 2356static int 2357mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2358{ 2359 2360 TODO; 2361 return (0); 2362} 2363 2364/**************************************************************************/ 2365/* TID handling */ 2366/**************************************************************************/ 2367 2368/* 2369 * Allocate a TID. If necessary, steal one from someone else. 2370 * The new TID is flushed from the TLB before returning. 2371 */ 2372static tlbtid_t 2373tid_alloc(pmap_t pmap) 2374{ 2375 tlbtid_t tid; 2376 int thiscpu; 2377 2378 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2379 2380 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2381 2382 thiscpu = PCPU_GET(cpuid); 2383 2384 tid = PCPU_GET(tid_next); 2385 if (tid > TID_MAX) 2386 tid = TID_MIN; 2387 PCPU_SET(tid_next, tid + 1); 2388 2389 /* If we are stealing TID then clear the relevant pmap's field */ 2390 if (tidbusy[thiscpu][tid] != NULL) { 2391 2392 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2393 2394 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2395 2396 /* Flush all entries from TLB0 matching this TID. */ 2397 tid_flush(tid); 2398 } 2399 2400 tidbusy[thiscpu][tid] = pmap; 2401 pmap->pm_tid[thiscpu] = tid; 2402 __asm __volatile("msync; isync"); 2403 2404 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2405 PCPU_GET(tid_next)); 2406 2407 return (tid); 2408} 2409 2410/**************************************************************************/ 2411/* TLB0 handling */ 2412/**************************************************************************/ 2413 2414static void 2415tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2416 uint32_t mas7) 2417{ 2418 int as; 2419 char desc[3]; 2420 tlbtid_t tid; 2421 vm_size_t size; 2422 unsigned int tsize; 2423 2424 desc[2] = '\0'; 2425 if (mas1 & MAS1_VALID) 2426 desc[0] = 'V'; 2427 else 2428 desc[0] = ' '; 2429 2430 if (mas1 & MAS1_IPROT) 2431 desc[1] = 'P'; 2432 else 2433 desc[1] = ' '; 2434 2435 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2436 tid = MAS1_GETTID(mas1); 2437 2438 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2439 size = 0; 2440 if (tsize) 2441 size = tsize2size(tsize); 2442 2443 debugf("%3d: (%s) [AS=%d] " 2444 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2445 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2446 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2447} 2448 2449/* Convert TLB0 va and way number to tlb0[] table index. */ 2450static inline unsigned int 2451tlb0_tableidx(vm_offset_t va, unsigned int way) 2452{ 2453 unsigned int idx; 2454 2455 idx = (way * TLB0_ENTRIES_PER_WAY); 2456 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2457 return (idx); 2458} 2459 2460/* 2461 * Invalidate TLB0 entry. 2462 */ 2463static inline void 2464tlb0_flush_entry(vm_offset_t va) 2465{ 2466 2467 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2468 2469 mtx_assert(&tlbivax_mutex, MA_OWNED); 2470 2471 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2472 __asm __volatile("isync; msync"); 2473 __asm __volatile("tlbsync; msync"); 2474 2475 CTR1(KTR_PMAP, "%s: e", __func__); 2476} 2477 2478/* Print out contents of the MAS registers for each TLB0 entry */ 2479void 2480tlb0_print_tlbentries(void) 2481{ 2482 uint32_t mas0, mas1, mas2, mas3, mas7; 2483 int entryidx, way, idx; 2484 2485 debugf("TLB0 entries:\n"); 2486 for (way = 0; way < TLB0_WAYS; way ++) 2487 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2488 2489 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2490 mtspr(SPR_MAS0, mas0); 2491 __asm __volatile("isync"); 2492 2493 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2494 mtspr(SPR_MAS2, mas2); 2495 2496 __asm __volatile("isync; tlbre"); 2497 2498 mas1 = mfspr(SPR_MAS1); 2499 mas2 = mfspr(SPR_MAS2); 2500 mas3 = mfspr(SPR_MAS3); 2501 mas7 = mfspr(SPR_MAS7); 2502 2503 idx = tlb0_tableidx(mas2, way); 2504 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2505 } 2506} 2507 2508/**************************************************************************/ 2509/* TLB1 handling */ 2510/**************************************************************************/ 2511 2512/* 2513 * TLB1 mapping notes: 2514 * 2515 * TLB1[0] CCSRBAR 2516 * TLB1[1] Kernel text and data. 2517 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2518 * windows, other devices mappings. 2519 */ 2520 2521/* 2522 * Write given entry to TLB1 hardware. 2523 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2524 */ 2525static void 2526tlb1_write_entry(unsigned int idx) 2527{ 2528 uint32_t mas0, mas7; 2529 2530 //debugf("tlb1_write_entry: s\n"); 2531 2532 /* Clear high order RPN bits */ 2533 mas7 = 0; 2534 2535 /* Select entry */ 2536 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2537 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2538 2539 mtspr(SPR_MAS0, mas0); 2540 __asm __volatile("isync"); 2541 mtspr(SPR_MAS1, tlb1[idx].mas1); 2542 __asm __volatile("isync"); 2543 mtspr(SPR_MAS2, tlb1[idx].mas2); 2544 __asm __volatile("isync"); 2545 mtspr(SPR_MAS3, tlb1[idx].mas3); 2546 __asm __volatile("isync"); 2547 mtspr(SPR_MAS7, mas7); 2548 __asm __volatile("isync; tlbwe; isync; msync"); 2549 2550 //debugf("tlb1_write_entry: e\n");; 2551} 2552 2553/* 2554 * Return the largest uint value log such that 2^log <= num. 2555 */ 2556static unsigned int 2557ilog2(unsigned int num) 2558{ 2559 int lz; 2560 2561 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2562 return (31 - lz); 2563} 2564 2565/* 2566 * Convert TLB TSIZE value to mapped region size. 2567 */ 2568static vm_size_t 2569tsize2size(unsigned int tsize) 2570{ 2571 2572 /* 2573 * size = 4^tsize KB 2574 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2575 */ 2576 2577 return ((1 << (2 * tsize)) * 1024); 2578} 2579 2580/* 2581 * Convert region size (must be power of 4) to TLB TSIZE value. 2582 */ 2583static unsigned int 2584size2tsize(vm_size_t size) 2585{ 2586 2587 return (ilog2(size) / 2 - 5); 2588} 2589 2590/* 2591 * Register permanent kernel mapping in TLB1. 2592 * 2593 * Entries are created starting from index 0 (current free entry is 2594 * kept in tlb1_idx) and are not supposed to be invalidated. 2595 */ 2596static int 2597tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2598 uint32_t flags) 2599{ 2600 uint32_t ts, tid; 2601 int tsize; 2602 2603 if (tlb1_idx >= TLB1_ENTRIES) { 2604 printf("tlb1_set_entry: TLB1 full!\n"); 2605 return (-1); 2606 } 2607 2608 /* Convert size to TSIZE */ 2609 tsize = size2tsize(size); 2610 2611 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2612 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2613 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2614 2615 /* XXX LOCK tlb1[] */ 2616 2617 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2618 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2619 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2620 2621 /* Set supervisor RWX permission bits */ 2622 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2623 2624 tlb1_write_entry(tlb1_idx++); 2625 2626 /* XXX UNLOCK tlb1[] */ 2627 2628 /* 2629 * XXX in general TLB1 updates should be propagated between CPUs, 2630 * since current design assumes to have the same TLB1 set-up on all 2631 * cores. 2632 */ 2633 return (0); 2634} 2635 2636static int 2637tlb1_entry_size_cmp(const void *a, const void *b) 2638{ 2639 const vm_size_t *sza; 2640 const vm_size_t *szb; 2641 2642 sza = a; 2643 szb = b; 2644 if (*sza > *szb) 2645 return (-1); 2646 else if (*sza < *szb) 2647 return (1); 2648 else 2649 return (0); 2650} 2651 2652/* 2653 * Map in contiguous RAM region into the TLB1 using maximum of 2654 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2655 * 2656 * If necessary round up last entry size and return total size 2657 * used by all allocated entries. 2658 */ 2659vm_size_t 2660tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2661{ 2662 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2663 vm_size_t mapped_size, sz, esz; 2664 unsigned int log; 2665 int i; 2666 2667 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2668 __func__, size, va, pa); 2669 2670 mapped_size = 0; 2671 sz = size; 2672 memset(entry_size, 0, sizeof(entry_size)); 2673 2674 /* Calculate entry sizes. */ 2675 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2676 2677 /* Largest region that is power of 4 and fits within size */ 2678 log = ilog2(sz) / 2; 2679 esz = 1 << (2 * log); 2680 2681 /* If this is last entry cover remaining size. */ 2682 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2683 while (esz < sz) 2684 esz = esz << 2; 2685 } 2686 2687 entry_size[i] = esz; 2688 mapped_size += esz; 2689 if (esz < sz) 2690 sz -= esz; 2691 else 2692 sz = 0; 2693 } 2694 2695 /* Sort entry sizes, required to get proper entry address alignment. */ 2696 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2697 sizeof(vm_size_t), tlb1_entry_size_cmp); 2698 2699 /* Load TLB1 entries. */ 2700 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2701 esz = entry_size[i]; 2702 if (!esz) 2703 break; 2704 2705 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2706 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2707 2708 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2709 2710 va += esz; 2711 pa += esz; 2712 } 2713 2714 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2715 __func__, mapped_size, mapped_size - size); 2716 2717 return (mapped_size); 2718} 2719 2720/* 2721 * TLB1 initialization routine, to be called after the very first 2722 * assembler level setup done in locore.S. 2723 */ 2724void 2725tlb1_init(vm_offset_t ccsrbar) 2726{ 2727 uint32_t mas0; 2728 2729 /* TLB1[1] is used to map the kernel. Save that entry. */ 2730 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2731 mtspr(SPR_MAS0, mas0); 2732 __asm __volatile("isync; tlbre"); 2733 2734 tlb1[1].mas1 = mfspr(SPR_MAS1); 2735 tlb1[1].mas2 = mfspr(SPR_MAS2); 2736 tlb1[1].mas3 = mfspr(SPR_MAS3); 2737 2738 /* Map in CCSRBAR in TLB1[0] */ 2739 tlb1_idx = 0; 2740 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2741 /* 2742 * Set the next available TLB1 entry index. Note TLB[1] is reserved 2743 * for initial mapping of kernel text+data, which was set early in 2744 * locore, we need to skip this [busy] entry. 2745 */ 2746 tlb1_idx = 2; 2747 2748 /* Setup TLB miss defaults */ 2749 set_mas4_defaults(); 2750} 2751 2752/* 2753 * Setup MAS4 defaults. 2754 * These values are loaded to MAS0-2 on a TLB miss. 2755 */ 2756static void 2757set_mas4_defaults(void) 2758{ 2759 uint32_t mas4; 2760 2761 /* Defaults: TLB0, PID0, TSIZED=4K */ 2762 mas4 = MAS4_TLBSELD0; 2763 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2764 2765 mtspr(SPR_MAS4, mas4); 2766 __asm __volatile("isync"); 2767} 2768 2769/* 2770 * Print out contents of the MAS registers for each TLB1 entry 2771 */ 2772void 2773tlb1_print_tlbentries(void) 2774{ 2775 uint32_t mas0, mas1, mas2, mas3, mas7; 2776 int i; 2777 2778 debugf("TLB1 entries:\n"); 2779 for (i = 0; i < TLB1_ENTRIES; i++) { 2780 2781 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 2782 mtspr(SPR_MAS0, mas0); 2783 2784 __asm __volatile("isync; tlbre"); 2785 2786 mas1 = mfspr(SPR_MAS1); 2787 mas2 = mfspr(SPR_MAS2); 2788 mas3 = mfspr(SPR_MAS3); 2789 mas7 = mfspr(SPR_MAS7); 2790 2791 tlb_print_entry(i, mas1, mas2, mas3, mas7); 2792 } 2793} 2794 2795/* 2796 * Print out contents of the in-ram tlb1 table. 2797 */ 2798void 2799tlb1_print_entries(void) 2800{ 2801 int i; 2802 2803 debugf("tlb1[] table entries:\n"); 2804 for (i = 0; i < TLB1_ENTRIES; i++) 2805 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 2806} 2807 2808/* 2809 * Return 0 if the physical IO range is encompassed by one of the 2810 * the TLB1 entries, otherwise return related error code. 2811 */ 2812static int 2813tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2814{ 2815 uint32_t prot; 2816 vm_paddr_t pa_start; 2817 vm_paddr_t pa_end; 2818 unsigned int entry_tsize; 2819 vm_size_t entry_size; 2820 2821 *va = (vm_offset_t)NULL; 2822 2823 /* Skip invalid entries */ 2824 if (!(tlb1[i].mas1 & MAS1_VALID)) 2825 return (EINVAL); 2826 2827 /* 2828 * The entry must be cache-inhibited, guarded, and r/w 2829 * so it can function as an i/o page 2830 */ 2831 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 2832 if (prot != (MAS2_I | MAS2_G)) 2833 return (EPERM); 2834 2835 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 2836 if (prot != (MAS3_SR | MAS3_SW)) 2837 return (EPERM); 2838 2839 /* The address should be within the entry range. */ 2840 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2841 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 2842 2843 entry_size = tsize2size(entry_tsize); 2844 pa_start = tlb1[i].mas3 & MAS3_RPN; 2845 pa_end = pa_start + entry_size - 1; 2846 2847 if ((pa < pa_start) || ((pa + size) > pa_end)) 2848 return (ERANGE); 2849 2850 /* Return virtual address of this mapping. */ 2851 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 2852 return (0); 2853} 2854