pmap.c revision 184244
1/*- 2 * Copyright (C) 2007 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 24 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 25 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 26 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 * 28 * Some hw specific parts of this pmap were derived or influenced 29 * by NetBSD's ibm4xx pmap module. More generic code is shared with 30 * a few other pmap modules from the FreeBSD tree. 31 */ 32 33 /* 34 * VM layout notes: 35 * 36 * Kernel and user threads run within one common virtual address space 37 * defined by AS=0. 38 * 39 * Virtual address space layout: 40 * ----------------------------- 41 * 0x0000_0000 - 0xbfff_efff : user process 42 * 0xc000_0000 - 0xc1ff_ffff : kernel reserved 43 * 0xc000_0000 - kernelend : kernel code &data 44 * 0xc1ff_c000 - 0xc200_0000 : kstack0 45 * 0xc200_0000 - 0xffef_ffff : KVA 46 * 0xc200_0000 - 0xc200_3fff : reserved for page zero/copy 47 * 0xc200_4000 - ptbl buf end: reserved for ptbl bufs 48 * ptbl buf end- 0xffef_ffff : actual free KVA space 49 * 0xfff0_0000 - 0xffff_ffff : I/O devices region 50 */ 51 52#include <sys/cdefs.h> 53__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 184244 2008-10-25 03:36:21Z marcel $"); 54 55#include <sys/types.h> 56#include <sys/param.h> 57#include <sys/malloc.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/vmmeter.h> 67 68#include <vm/vm.h> 69#include <vm/vm_page.h> 70#include <vm/vm_kern.h> 71#include <vm/vm_pageout.h> 72#include <vm/vm_extern.h> 73#include <vm/vm_object.h> 74#include <vm/vm_param.h> 75#include <vm/vm_map.h> 76#include <vm/vm_pager.h> 77#include <vm/uma.h> 78 79#include <machine/cpu.h> 80#include <machine/pcb.h> 81#include <machine/powerpc.h> 82 83#include <machine/tlb.h> 84#include <machine/spr.h> 85#include <machine/vmparam.h> 86#include <machine/md_var.h> 87#include <machine/mmuvar.h> 88#include <machine/pmap.h> 89#include <machine/pte.h> 90 91#include "mmu_if.h" 92 93#define DEBUG 94#undef DEBUG 95 96#ifdef DEBUG 97#define debugf(fmt, args...) printf(fmt, ##args) 98#else 99#define debugf(fmt, args...) 100#endif 101 102#define TODO panic("%s: not implemented", __func__); 103#define memmove(d, s, l) bcopy(s, d, l) 104 105#include "opt_sched.h" 106#ifndef SCHED_4BSD 107#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 108#endif 109extern struct mtx sched_lock; 110 111/* Kernel physical load address. */ 112extern uint32_t kernload; 113 114struct mem_region availmem_regions[MEM_REGIONS]; 115int availmem_regions_sz; 116 117/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 118static vm_offset_t zero_page_va; 119static struct mtx zero_page_mutex; 120 121/* 122 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 123 * by idle thred only, no lock required. 124 */ 125static vm_offset_t zero_page_idle_va; 126 127/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 128static vm_offset_t copy_page_src_va; 129static vm_offset_t copy_page_dst_va; 130static struct mtx copy_page_mutex; 131 132/**************************************************************************/ 133/* PMAP */ 134/**************************************************************************/ 135 136static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 137 vm_prot_t, boolean_t); 138 139unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 140unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 141 142static int pagedaemon_waken; 143 144/* 145 * If user pmap is processed with mmu_booke_remove and the resident count 146 * drops to 0, there are no more pages to remove, so we need not continue. 147 */ 148#define PMAP_REMOVE_DONE(pmap) \ 149 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 150 151extern void load_pid0(tlbtid_t); 152 153/**************************************************************************/ 154/* TLB and TID handling */ 155/**************************************************************************/ 156 157/* Translation ID busy table */ 158static volatile pmap_t tidbusy[TID_MAX + 1]; 159 160/* 161 * Actual maximum number of TLB0 entries. 162 * This number differs between e500 core revisions. 163 */ 164u_int32_t tlb0_size; 165u_int32_t tlb0_nways; 166u_int32_t tlb0_nentries_per_way; 167 168#define TLB0_SIZE (tlb0_size) 169#define TLB0_NWAYS (tlb0_nways) 170#define TLB0_ENTRIES_PER_WAY (tlb0_nentries_per_way) 171 172/* Pointer to kernel tlb0 table, allocated in mmu_booke_bootstrap() */ 173tlb_entry_t *tlb0; 174 175/* 176 * Spinlock to assure proper locking between threads and 177 * between tlb miss handler and kernel. 178 */ 179static struct mtx tlb0_mutex; 180 181#define TLB1_SIZE 16 182 183/* In-ram copy of the TLB1 */ 184static tlb_entry_t tlb1[TLB1_SIZE]; 185 186/* Next free entry in the TLB1 */ 187static unsigned int tlb1_idx; 188 189static tlbtid_t tid_alloc(struct pmap *); 190static void tid_flush(tlbtid_t); 191 192extern void tlb1_inval_va(vm_offset_t); 193extern void tlb0_inval_va(vm_offset_t); 194 195static void tlb_print_entry(int, u_int32_t, u_int32_t, u_int32_t, u_int32_t); 196 197static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, u_int32_t); 198static void __tlb1_set_entry(unsigned int, vm_offset_t, vm_offset_t, 199 vm_size_t, u_int32_t, unsigned int, unsigned int); 200static void tlb1_write_entry(unsigned int); 201static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 202static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 203 204static vm_size_t tsize2size(unsigned int); 205static unsigned int size2tsize(vm_size_t); 206static unsigned int ilog2(unsigned int); 207 208static void set_mas4_defaults(void); 209 210static void tlb0_inval_entry(vm_offset_t, unsigned int); 211static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 212static void tlb0_write_entry(unsigned int, unsigned int); 213static void tlb0_flush_entry(pmap_t, vm_offset_t); 214static void tlb0_init(void); 215 216/**************************************************************************/ 217/* Page table management */ 218/**************************************************************************/ 219 220/* Data for the pv entry allocation mechanism */ 221static uma_zone_t pvzone; 222static struct vm_object pvzone_obj; 223static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 224 225#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 226 227#ifndef PMAP_SHPGPERPROC 228#define PMAP_SHPGPERPROC 200 229#endif 230 231static void ptbl_init(void); 232static struct ptbl_buf *ptbl_buf_alloc(void); 233static void ptbl_buf_free(struct ptbl_buf *); 234static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 235 236static void ptbl_alloc(mmu_t, pmap_t, unsigned int); 237static void ptbl_free(mmu_t, pmap_t, unsigned int); 238static void ptbl_hold(mmu_t, pmap_t, unsigned int); 239static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 240 241static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 242static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 243void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, u_int32_t); 244static int pte_remove(mmu_t, pmap_t, vm_offset_t, u_int8_t); 245 246pv_entry_t pv_alloc(void); 247static void pv_free(pv_entry_t); 248static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 249static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 250 251/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 252#define PTBL_BUFS (128 * 16) 253 254struct ptbl_buf { 255 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 256 vm_offset_t kva; /* va of mapping */ 257}; 258 259/* ptbl free list and a lock used for access synchronization. */ 260static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 261static struct mtx ptbl_buf_freelist_lock; 262 263/* Base address of kva space allocated fot ptbl bufs. */ 264static vm_offset_t ptbl_buf_pool_vabase; 265 266/* Pointer to ptbl_buf structures. */ 267static struct ptbl_buf *ptbl_bufs; 268 269/* 270 * Kernel MMU interface 271 */ 272static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 273static void mmu_booke_clear_modify(mmu_t, vm_page_t); 274static void mmu_booke_clear_reference(mmu_t, vm_page_t); 275static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, 276 vm_offset_t); 277static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 278static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 279 vm_prot_t, boolean_t); 280static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 281 vm_page_t, vm_prot_t); 282static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 283 vm_prot_t); 284static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 285static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 286 vm_prot_t); 287static void mmu_booke_init(mmu_t); 288static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 289static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 290static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 291static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 292 int); 293static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 294static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 295 vm_object_t, vm_pindex_t, vm_size_t); 296static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 297static void mmu_booke_page_init(mmu_t, vm_page_t); 298static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 299static void mmu_booke_pinit(mmu_t, pmap_t); 300static void mmu_booke_pinit0(mmu_t, pmap_t); 301static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 302 vm_prot_t); 303static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 304static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 305static void mmu_booke_release(mmu_t, pmap_t); 306static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 307static void mmu_booke_remove_all(mmu_t, vm_page_t); 308static void mmu_booke_remove_write(mmu_t, vm_page_t); 309static void mmu_booke_zero_page(mmu_t, vm_page_t); 310static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 311static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 312static void mmu_booke_activate(mmu_t, struct thread *); 313static void mmu_booke_deactivate(mmu_t, struct thread *); 314static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 315static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 316static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 317static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 318static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 319static void mmu_booke_kremove(mmu_t, vm_offset_t); 320static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 321static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 322 323static mmu_method_t mmu_booke_methods[] = { 324 /* pmap dispatcher interface */ 325 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 326 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 327 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 328 MMUMETHOD(mmu_copy, mmu_booke_copy), 329 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 330 MMUMETHOD(mmu_enter, mmu_booke_enter), 331 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 332 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 333 MMUMETHOD(mmu_extract, mmu_booke_extract), 334 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 335 MMUMETHOD(mmu_init, mmu_booke_init), 336 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 337 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 338 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 339 MMUMETHOD(mmu_map, mmu_booke_map), 340 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 341 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 342 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 343 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 344 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 345 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 346 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 347 MMUMETHOD(mmu_protect, mmu_booke_protect), 348 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 349 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 350 MMUMETHOD(mmu_release, mmu_booke_release), 351 MMUMETHOD(mmu_remove, mmu_booke_remove), 352 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 353 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 354 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 355 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 356 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 357 MMUMETHOD(mmu_activate, mmu_booke_activate), 358 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 359 360 /* Internal interfaces */ 361 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 362 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 363 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 364 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 365 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 366/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 367 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 368 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 369 370 { 0, 0 } 371}; 372 373static mmu_def_t booke_mmu = { 374 MMU_TYPE_BOOKE, 375 mmu_booke_methods, 376 0 377}; 378MMU_DEF(booke_mmu); 379 380/* Return number of entries in TLB0. */ 381static __inline void 382tlb0_get_tlbconf(void) 383{ 384 uint32_t tlb0_cfg; 385 386 tlb0_cfg = mfspr(SPR_TLB0CFG); 387 tlb0_size = tlb0_cfg & TLBCFG_NENTRY_MASK; 388 tlb0_nways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 389 tlb0_nentries_per_way = tlb0_size/tlb0_nways; 390} 391 392/* Initialize pool of kva ptbl buffers. */ 393static void 394ptbl_init(void) 395{ 396 int i; 397 398 //debugf("ptbl_init: s (ptbl_bufs = 0x%08x size 0x%08x)\n", 399 // (u_int32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 400 //debugf("ptbl_init: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)\n", 401 // ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 402 403 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 404 TAILQ_INIT(&ptbl_buf_freelist); 405 406 for (i = 0; i < PTBL_BUFS; i++) { 407 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 408 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 409 } 410 411 //debugf("ptbl_init: e\n"); 412} 413 414/* Get a ptbl_buf from the freelist. */ 415static struct ptbl_buf * 416ptbl_buf_alloc(void) 417{ 418 struct ptbl_buf *buf; 419 420 //debugf("ptbl_buf_alloc: s\n"); 421 422 mtx_lock(&ptbl_buf_freelist_lock); 423 buf = TAILQ_FIRST(&ptbl_buf_freelist); 424 if (buf != NULL) 425 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 426 mtx_unlock(&ptbl_buf_freelist_lock); 427 428 //debugf("ptbl_buf_alloc: e (buf = 0x%08x)\n", (u_int32_t)buf); 429 return (buf); 430} 431 432/* Return ptbl buff to free pool. */ 433static void 434ptbl_buf_free(struct ptbl_buf *buf) 435{ 436 437 //debugf("ptbl_buf_free: s (buf = 0x%08x)\n", (u_int32_t)buf); 438 439 mtx_lock(&ptbl_buf_freelist_lock); 440 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 441 mtx_unlock(&ptbl_buf_freelist_lock); 442 443 //debugf("ptbl_buf_free: e\n"); 444} 445 446/* 447 * Search the list of allocated ptbl bufs and find 448 * on list of allocated ptbls 449 */ 450static void 451ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 452{ 453 struct ptbl_buf *pbuf; 454 455 //debugf("ptbl_free_pmap_ptbl: s (pmap = 0x%08x ptbl = 0x%08x)\n", 456 // (u_int32_t)pmap, (u_int32_t)ptbl); 457 458 TAILQ_FOREACH(pbuf, &pmap->ptbl_list, link) { 459 if (pbuf->kva == (vm_offset_t)ptbl) { 460 /* Remove from pmap ptbl buf list. */ 461 TAILQ_REMOVE(&pmap->ptbl_list, pbuf, link); 462 463 /* Free correspondig ptbl buf. */ 464 ptbl_buf_free(pbuf); 465 466 break; 467 } 468 } 469 470 //debugf("ptbl_free_pmap_ptbl: e\n"); 471} 472 473/* Allocate page table. */ 474static void 475ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 476{ 477 vm_page_t mtbl[PTBL_PAGES]; 478 vm_page_t m; 479 struct ptbl_buf *pbuf; 480 unsigned int pidx; 481 int i; 482 483 //int su = (pmap == kernel_pmap); 484 //debugf("ptbl_alloc: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx); 485 486 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 487 ("ptbl_alloc: invalid pdir_idx")); 488 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 489 ("pte_alloc: valid ptbl entry exists!")); 490 491 pbuf = ptbl_buf_alloc(); 492 if (pbuf == NULL) 493 panic("pte_alloc: couldn't alloc kernel virtual memory"); 494 pmap->pm_pdir[pdir_idx] = (pte_t *)pbuf->kva; 495 //debugf("ptbl_alloc: kva = 0x%08x\n", (u_int32_t)pmap->pm_pdir[pdir_idx]); 496 497 /* Allocate ptbl pages, this will sleep! */ 498 for (i = 0; i < PTBL_PAGES; i++) { 499 pidx = (PTBL_PAGES * pdir_idx) + i; 500 while ((m = vm_page_alloc(NULL, pidx, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 501 PMAP_UNLOCK(pmap); 502 vm_page_unlock_queues(); 503 VM_WAIT; 504 vm_page_lock_queues(); 505 PMAP_LOCK(pmap); 506 } 507 mtbl[i] = m; 508 } 509 510 /* Map in allocated pages into kernel_pmap. */ 511 mmu_booke_qenter(mmu, (vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl, PTBL_PAGES); 512 513 /* Zero whole ptbl. */ 514 bzero((caddr_t)pmap->pm_pdir[pdir_idx], PTBL_PAGES * PAGE_SIZE); 515 516 /* Add pbuf to the pmap ptbl bufs list. */ 517 TAILQ_INSERT_TAIL(&pmap->ptbl_list, pbuf, link); 518 519 //debugf("ptbl_alloc: e\n"); 520} 521 522/* Free ptbl pages and invalidate pdir entry. */ 523static void 524ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 525{ 526 pte_t *ptbl; 527 vm_paddr_t pa; 528 vm_offset_t va; 529 vm_page_t m; 530 int i; 531 532 //int su = (pmap == kernel_pmap); 533 //debugf("ptbl_free: s (pmap = 0x%08x su = %d pdir_idx = %d)\n", (u_int32_t)pmap, su, pdir_idx); 534 535 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 536 ("ptbl_free: invalid pdir_idx")); 537 538 ptbl = pmap->pm_pdir[pdir_idx]; 539 540 //debugf("ptbl_free: ptbl = 0x%08x\n", (u_int32_t)ptbl); 541 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 542 543 for (i = 0; i < PTBL_PAGES; i++) { 544 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 545 pa = pte_vatopa(mmu, kernel_pmap, va); 546 m = PHYS_TO_VM_PAGE(pa); 547 vm_page_free_zero(m); 548 atomic_subtract_int(&cnt.v_wire_count, 1); 549 mmu_booke_kremove(mmu, va); 550 } 551 552 ptbl_free_pmap_ptbl(pmap, ptbl); 553 pmap->pm_pdir[pdir_idx] = NULL; 554 555 //debugf("ptbl_free: e\n"); 556} 557 558/* 559 * Decrement ptbl pages hold count and attempt to free ptbl pages. 560 * Called when removing pte entry from ptbl. 561 * 562 * Return 1 if ptbl pages were freed. 563 */ 564static int 565ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 566{ 567 pte_t *ptbl; 568 vm_paddr_t pa; 569 vm_page_t m; 570 int i; 571 572 //int su = (pmap == kernel_pmap); 573 //debugf("ptbl_unhold: s (pmap = %08x su = %d pdir_idx = %d)\n", 574 // (u_int32_t)pmap, su, pdir_idx); 575 576 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 577 ("ptbl_unhold: invalid pdir_idx")); 578 KASSERT((pmap != kernel_pmap), 579 ("ptbl_unhold: unholding kernel ptbl!")); 580 581 ptbl = pmap->pm_pdir[pdir_idx]; 582 583 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 584 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 585 ("ptbl_unhold: non kva ptbl")); 586 587 /* decrement hold count */ 588 for (i = 0; i < PTBL_PAGES; i++) { 589 pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE)); 590 m = PHYS_TO_VM_PAGE(pa); 591 m->wire_count--; 592 } 593 594 /* 595 * Free ptbl pages if there are no pte etries in this ptbl. 596 * wire_count has the same value for all ptbl pages, so check 597 * the last page. 598 */ 599 if (m->wire_count == 0) { 600 ptbl_free(mmu, pmap, pdir_idx); 601 602 //debugf("ptbl_unhold: e (freed ptbl)\n"); 603 return (1); 604 } 605 606 //debugf("ptbl_unhold: e\n"); 607 return (0); 608} 609 610/* 611 * Increment hold count for ptbl pages. This routine is used when 612 * new pte entry is being inserted into ptbl. 613 */ 614static void 615ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 616{ 617 vm_paddr_t pa; 618 pte_t *ptbl; 619 vm_page_t m; 620 int i; 621 622 //debugf("ptbl_hold: s (pmap = 0x%08x pdir_idx = %d)\n", (u_int32_t)pmap, pdir_idx); 623 624 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 625 ("ptbl_hold: invalid pdir_idx")); 626 KASSERT((pmap != kernel_pmap), 627 ("ptbl_hold: holding kernel ptbl!")); 628 629 ptbl = pmap->pm_pdir[pdir_idx]; 630 631 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 632 633 for (i = 0; i < PTBL_PAGES; i++) { 634 pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE)); 635 m = PHYS_TO_VM_PAGE(pa); 636 m->wire_count++; 637 } 638 639 //debugf("ptbl_hold: e\n"); 640} 641 642/* Allocate pv_entry structure. */ 643pv_entry_t 644pv_alloc(void) 645{ 646 pv_entry_t pv; 647 648 debugf("pv_alloc: s\n"); 649 650 pv_entry_count++; 651 if ((pv_entry_count > pv_entry_high_water) && (pagedaemon_waken == 0)) { 652 pagedaemon_waken = 1; 653 wakeup (&vm_pages_needed); 654 } 655 pv = uma_zalloc(pvzone, M_NOWAIT); 656 657 debugf("pv_alloc: e\n"); 658 return (pv); 659} 660 661/* Free pv_entry structure. */ 662static __inline void 663pv_free(pv_entry_t pve) 664{ 665 //debugf("pv_free: s\n"); 666 667 pv_entry_count--; 668 uma_zfree(pvzone, pve); 669 670 //debugf("pv_free: e\n"); 671} 672 673 674/* Allocate and initialize pv_entry structure. */ 675static void 676pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 677{ 678 pv_entry_t pve; 679 680 //int su = (pmap == kernel_pmap); 681 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 682 // (u_int32_t)pmap, va, (u_int32_t)m); 683 684 pve = pv_alloc(); 685 if (pve == NULL) 686 panic("pv_insert: no pv entries!"); 687 688 pve->pv_pmap = pmap; 689 pve->pv_va = va; 690 691 /* add to pv_list */ 692 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 693 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 694 695 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 696 697 //debugf("pv_insert: e\n"); 698} 699 700/* Destroy pv entry. */ 701static void 702pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 703{ 704 pv_entry_t pve; 705 706 //int su = (pmap == kernel_pmap); 707 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 708 709 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 710 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 711 712 /* find pv entry */ 713 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 714 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 715 /* remove from pv_list */ 716 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 717 if (TAILQ_EMPTY(&m->md.pv_list)) 718 vm_page_flag_clear(m, PG_WRITEABLE); 719 720 /* free pv entry struct */ 721 pv_free(pve); 722 723 break; 724 } 725 } 726 727 //debugf("pv_remove: e\n"); 728} 729 730/* 731 * Clean pte entry, try to free page table page if requested. 732 * 733 * Return 1 if ptbl pages were freed, otherwise return 0. 734 */ 735static int 736pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags) 737{ 738 unsigned int pdir_idx = PDIR_IDX(va); 739 unsigned int ptbl_idx = PTBL_IDX(va); 740 vm_page_t m; 741 pte_t *ptbl; 742 pte_t *pte; 743 744 //int su = (pmap == kernel_pmap); 745 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 746 // su, (u_int32_t)pmap, va, flags); 747 748 ptbl = pmap->pm_pdir[pdir_idx]; 749 KASSERT(ptbl, ("pte_remove: null ptbl")); 750 751 pte = &ptbl[ptbl_idx]; 752 753 if (pte == NULL || !PTE_ISVALID(pte)) 754 return (0); 755 756 /* Get vm_page_t for mapped pte. */ 757 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 758 759 if (PTE_ISWIRED(pte)) 760 pmap->pm_stats.wired_count--; 761 762 if (!PTE_ISFAKE(pte)) { 763 /* Handle managed entry. */ 764 if (PTE_ISMANAGED(pte)) { 765 766 /* Handle modified pages. */ 767 if (PTE_ISMODIFIED(pte)) 768 vm_page_dirty(m); 769 770 /* Referenced pages. */ 771 if (PTE_ISREFERENCED(pte)) 772 vm_page_flag_set(m, PG_REFERENCED); 773 774 /* Remove pv_entry from pv_list. */ 775 pv_remove(pmap, va, m); 776 } 777 } 778 779 pte->flags = 0; 780 pte->rpn = 0; 781 pmap->pm_stats.resident_count--; 782 783 if (flags & PTBL_UNHOLD) { 784 //debugf("pte_remove: e (unhold)\n"); 785 return (ptbl_unhold(mmu, pmap, pdir_idx)); 786 } 787 788 //debugf("pte_remove: e\n"); 789 return (0); 790} 791 792/* 793 * Insert PTE for a given page and virtual address. 794 */ 795void 796pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, u_int32_t flags) 797{ 798 unsigned int pdir_idx = PDIR_IDX(va); 799 unsigned int ptbl_idx = PTBL_IDX(va); 800 pte_t *ptbl; 801 pte_t *pte; 802 803 //int su = (pmap == kernel_pmap); 804 //debugf("pte_enter: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 805 806 /* Get the page table pointer. */ 807 ptbl = pmap->pm_pdir[pdir_idx]; 808 809 if (ptbl) { 810 /* 811 * Check if there is valid mapping for requested 812 * va, if there is, remove it. 813 */ 814 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 815 if (PTE_ISVALID(pte)) { 816 pte_remove(mmu, pmap, va, PTBL_HOLD); 817 } else { 818 /* 819 * pte is not used, increment hold count 820 * for ptbl pages. 821 */ 822 if (pmap != kernel_pmap) 823 ptbl_hold(mmu, pmap, pdir_idx); 824 } 825 } else { 826 /* Allocate page table pages. */ 827 ptbl_alloc(mmu, pmap, pdir_idx); 828 } 829 830 /* Flush entry from TLB. */ 831 tlb0_flush_entry(pmap, va); 832 833 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 834 835 /* 836 * Insert pv_entry into pv_list for mapped page 837 * if part of managed memory. 838 */ 839 if ((m->flags & PG_FICTITIOUS) == 0) { 840 if ((m->flags & PG_UNMANAGED) == 0) { 841 pte->flags |= PTE_MANAGED; 842 843 /* Create and insert pv entry. */ 844 pv_insert(pmap, va, m); 845 } 846 } else { 847 pte->flags |= PTE_FAKE; 848 } 849 850 pmap->pm_stats.resident_count++; 851 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 852 pte->flags |= (PTE_VALID | flags); 853 854 //debugf("pte_enter: e\n"); 855} 856 857/* Return the pa for the given pmap/va. */ 858static vm_paddr_t 859pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 860{ 861 vm_paddr_t pa = 0; 862 pte_t *pte; 863 864 pte = pte_find(mmu, pmap, va); 865 if ((pte != NULL) && PTE_ISVALID(pte)) 866 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 867 return (pa); 868} 869 870/* Get a pointer to a PTE in a page table. */ 871static pte_t * 872pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 873{ 874 unsigned int pdir_idx = PDIR_IDX(va); 875 unsigned int ptbl_idx = PTBL_IDX(va); 876 877 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 878 879 if (pmap->pm_pdir[pdir_idx]) 880 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 881 882 return (NULL); 883} 884 885/**************************************************************************/ 886/* PMAP related */ 887/**************************************************************************/ 888 889/* 890 * This is called during e500_init, before the system is really initialized. 891 */ 892static void 893mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend) 894{ 895 vm_offset_t phys_kernelend; 896 struct mem_region *mp, *mp1; 897 int cnt, i, j; 898 u_int s, e, sz; 899 u_int phys_avail_count; 900 vm_size_t physsz, hwphyssz, kstack0_sz; 901 vm_offset_t kernel_pdir, kstack0; 902 vm_paddr_t kstack0_phys; 903 904 debugf("mmu_booke_bootstrap: entered\n"); 905 906 /* Align kernel start and end address (kernel image). */ 907 kernelstart = trunc_page(kernelstart); 908 kernelend = round_page(kernelend); 909 910 /* Allocate space for the message buffer. */ 911 msgbufp = (struct msgbuf *)kernelend; 912 kernelend += MSGBUF_SIZE; 913 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (u_int32_t)msgbufp, 914 kernelend); 915 916 kernelend = round_page(kernelend); 917 918 /* Allocate space for tlb0 table. */ 919 tlb0_get_tlbconf(); /* Read TLB0 size and associativity. */ 920 tlb0 = (tlb_entry_t *)kernelend; 921 kernelend += sizeof(tlb_entry_t) * tlb0_size; 922 debugf(" tlb0 at 0x%08x end = 0x%08x\n", (u_int32_t)tlb0, kernelend); 923 924 kernelend = round_page(kernelend); 925 926 /* Allocate space for ptbl_bufs. */ 927 ptbl_bufs = (struct ptbl_buf *)kernelend; 928 kernelend += sizeof(struct ptbl_buf) * PTBL_BUFS; 929 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (u_int32_t)ptbl_bufs, 930 kernelend); 931 932 kernelend = round_page(kernelend); 933 934 /* Allocate PTE tables for kernel KVA. */ 935 kernel_pdir = kernelend; 936 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 937 PDIR_SIZE - 1) / PDIR_SIZE; 938 kernelend += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 939 debugf(" kernel ptbls: %d\n", kernel_ptbls); 940 debugf(" kernel pdir at 0x%08x\n", kernel_pdir); 941 942 if (kernelend - kernelstart > 0x1000000) { 943 kernelend = (kernelend + 0x3fffff) & ~0x3fffff; 944 tlb1_mapin_region(kernelstart + 0x1000000, 945 kernload + 0x1000000, kernelend - kernelstart - 0x1000000); 946 } else 947 kernelend = (kernelend + 0xffffff) & ~0xffffff; 948 949 /* 950 * Clear the structures - note we can only do it safely after the 951 * possible additional TLB1 translations are in place so that 952 * all range up to the currently calculated 'kernelend' is covered. 953 */ 954 memset((void *)tlb0, 0, sizeof(tlb_entry_t) * tlb0_size); 955 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 956 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 957 958 /*******************************************************/ 959 /* Set the start and end of kva. */ 960 /*******************************************************/ 961 virtual_avail = kernelend; 962 virtual_end = VM_MAX_KERNEL_ADDRESS; 963 964 /* Allocate KVA space for page zero/copy operations. */ 965 zero_page_va = virtual_avail; 966 virtual_avail += PAGE_SIZE; 967 zero_page_idle_va = virtual_avail; 968 virtual_avail += PAGE_SIZE; 969 copy_page_src_va = virtual_avail; 970 virtual_avail += PAGE_SIZE; 971 copy_page_dst_va = virtual_avail; 972 virtual_avail += PAGE_SIZE; 973 974 /* Initialize page zero/copy mutexes. */ 975 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 976 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 977 978 /* Initialize tlb0 table mutex. */ 979 mtx_init(&tlb0_mutex, "tlb0", NULL, MTX_SPIN | MTX_RECURSE); 980 981 /* Allocate KVA space for ptbl bufs. */ 982 ptbl_buf_pool_vabase = virtual_avail; 983 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 984 985 debugf("ptbl_buf_pool_vabase = 0x%08x\n", ptbl_buf_pool_vabase); 986 debugf("virtual_avail = %08x\n", virtual_avail); 987 debugf("virtual_end = %08x\n", virtual_end); 988 989 /* Calculate corresponding physical addresses for the kernel region. */ 990 phys_kernelend = kernload + (kernelend - kernelstart); 991 992 debugf("kernel image and allocated data:\n"); 993 debugf(" kernload = 0x%08x\n", kernload); 994 debugf(" kernelstart = 0x%08x\n", kernelstart); 995 debugf(" kernelend = 0x%08x\n", kernelend); 996 debugf(" kernel size = 0x%08x\n", kernelend - kernelstart); 997 998 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 999 panic("mmu_booke_bootstrap: phys_avail too small"); 1000 1001 /* 1002 * Removed kernel physical address range from avail 1003 * regions list. Page align all regions. 1004 * Non-page aligned memory isn't very interesting to us. 1005 * Also, sort the entries for ascending addresses. 1006 */ 1007 sz = 0; 1008 cnt = availmem_regions_sz; 1009 debugf("processing avail regions:\n"); 1010 for (mp = availmem_regions; mp->mr_size; mp++) { 1011 s = mp->mr_start; 1012 e = mp->mr_start + mp->mr_size; 1013 debugf(" %08x-%08x -> ", s, e); 1014 /* Check whether this region holds all of the kernel. */ 1015 if (s < kernload && e > phys_kernelend) { 1016 availmem_regions[cnt].mr_start = phys_kernelend; 1017 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1018 e = kernload; 1019 } 1020 /* Look whether this regions starts within the kernel. */ 1021 if (s >= kernload && s < phys_kernelend) { 1022 if (e <= phys_kernelend) 1023 goto empty; 1024 s = phys_kernelend; 1025 } 1026 /* Now look whether this region ends within the kernel. */ 1027 if (e > kernload && e <= phys_kernelend) { 1028 if (s >= kernload) 1029 goto empty; 1030 e = kernload; 1031 } 1032 /* Now page align the start and size of the region. */ 1033 s = round_page(s); 1034 e = trunc_page(e); 1035 if (e < s) 1036 e = s; 1037 sz = e - s; 1038 debugf("%08x-%08x = %x\n", s, e, sz); 1039 1040 /* Check whether some memory is left here. */ 1041 if (sz == 0) { 1042 empty: 1043 memmove(mp, mp + 1, 1044 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1045 cnt--; 1046 mp--; 1047 continue; 1048 } 1049 1050 /* Do an insertion sort. */ 1051 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1052 if (s < mp1->mr_start) 1053 break; 1054 if (mp1 < mp) { 1055 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1056 mp1->mr_start = s; 1057 mp1->mr_size = sz; 1058 } else { 1059 mp->mr_start = s; 1060 mp->mr_size = sz; 1061 } 1062 } 1063 availmem_regions_sz = cnt; 1064 1065 /*******************************************************/ 1066 /* Steal physical memory for kernel stack from the end */ 1067 /* of the first avail region */ 1068 /*******************************************************/ 1069 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1070 kstack0_phys = availmem_regions[0].mr_start + 1071 availmem_regions[0].mr_size; 1072 kstack0_phys -= kstack0_sz; 1073 availmem_regions[0].mr_size -= kstack0_sz; 1074 1075 /*******************************************************/ 1076 /* Fill in phys_avail table, based on availmem_regions */ 1077 /*******************************************************/ 1078 phys_avail_count = 0; 1079 physsz = 0; 1080 hwphyssz = 0; 1081 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1082 1083 debugf("fill in phys_avail:\n"); 1084 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1085 1086 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1087 availmem_regions[i].mr_start, 1088 availmem_regions[i].mr_start + availmem_regions[i].mr_size, 1089 availmem_regions[i].mr_size); 1090 1091 if (hwphyssz != 0 && 1092 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1093 debugf(" hw.physmem adjust\n"); 1094 if (physsz < hwphyssz) { 1095 phys_avail[j] = availmem_regions[i].mr_start; 1096 phys_avail[j + 1] = 1097 availmem_regions[i].mr_start + 1098 hwphyssz - physsz; 1099 physsz = hwphyssz; 1100 phys_avail_count++; 1101 } 1102 break; 1103 } 1104 1105 phys_avail[j] = availmem_regions[i].mr_start; 1106 phys_avail[j + 1] = availmem_regions[i].mr_start + 1107 availmem_regions[i].mr_size; 1108 phys_avail_count++; 1109 physsz += availmem_regions[i].mr_size; 1110 } 1111 physmem = btoc(physsz); 1112 1113 /* Calculate the last available physical address. */ 1114 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1115 ; 1116 Maxmem = powerpc_btop(phys_avail[i + 1]); 1117 1118 debugf("Maxmem = 0x%08lx\n", Maxmem); 1119 debugf("phys_avail_count = %d\n", phys_avail_count); 1120 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, physmem); 1121 1122 /*******************************************************/ 1123 /* Initialize (statically allocated) kernel pmap. */ 1124 /*******************************************************/ 1125 PMAP_LOCK_INIT(kernel_pmap); 1126 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1127 1128 debugf("kernel_pmap = 0x%08x\n", (u_int32_t)kernel_pmap); 1129 debugf("kptbl_min = %d, kernel_kptbls = %d\n", kptbl_min, kernel_ptbls); 1130 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1131 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1132 1133 /* Initialize kernel pdir */ 1134 for (i = 0; i < kernel_ptbls; i++) 1135 kernel_pmap->pm_pdir[kptbl_min + i] = 1136 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1137 1138 kernel_pmap->pm_tid = KERNEL_TID; 1139 kernel_pmap->pm_active = ~0; 1140 1141 /* Initialize tidbusy with kenel_pmap entry. */ 1142 tidbusy[0] = kernel_pmap; 1143 1144 /*******************************************************/ 1145 /* Final setup */ 1146 /*******************************************************/ 1147 /* Enter kstack0 into kernel map, provide guard page */ 1148 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1149 thread0.td_kstack = kstack0; 1150 thread0.td_kstack_pages = KSTACK_PAGES; 1151 1152 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1153 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1154 kstack0_phys, kstack0_phys + kstack0_sz); 1155 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1156 1157 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1158 for (i = 0; i < KSTACK_PAGES; i++) { 1159 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1160 kstack0 += PAGE_SIZE; 1161 kstack0_phys += PAGE_SIZE; 1162 } 1163 1164 /* Initialize TLB0 handling. */ 1165 tlb0_init(); 1166 1167 debugf("mmu_booke_bootstrap: exit\n"); 1168} 1169 1170/* 1171 * Get the physical page address for the given pmap/virtual address. 1172 */ 1173static vm_paddr_t 1174mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1175{ 1176 vm_paddr_t pa; 1177 1178 PMAP_LOCK(pmap); 1179 pa = pte_vatopa(mmu, pmap, va); 1180 PMAP_UNLOCK(pmap); 1181 1182 return (pa); 1183} 1184 1185/* 1186 * Extract the physical page address associated with the given 1187 * kernel virtual address. 1188 */ 1189static vm_paddr_t 1190mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1191{ 1192 1193 return (pte_vatopa(mmu, kernel_pmap, va)); 1194} 1195 1196/* 1197 * Initialize the pmap module. 1198 * Called by vm_init, to initialize any structures that the pmap 1199 * system needs to map virtual memory. 1200 */ 1201static void 1202mmu_booke_init(mmu_t mmu) 1203{ 1204 int shpgperproc = PMAP_SHPGPERPROC; 1205 1206 //debugf("mmu_booke_init: s\n"); 1207 1208 /* 1209 * Initialize the address space (zone) for the pv entries. Set a 1210 * high water mark so that the system can recover from excessive 1211 * numbers of pv entries. 1212 */ 1213 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1214 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1215 1216 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1217 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1218 1219 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1220 pv_entry_high_water = 9 * (pv_entry_max / 10); 1221 1222 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1223 1224 /* Pre-fill pvzone with initial number of pv entries. */ 1225 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1226 1227 /* Initialize ptbl allocation. */ 1228 ptbl_init(); 1229 1230 //debugf("mmu_booke_init: e\n"); 1231} 1232 1233/* 1234 * Map a list of wired pages into kernel virtual address space. This is 1235 * intended for temporary mappings which do not need page modification or 1236 * references recorded. Existing mappings in the region are overwritten. 1237 */ 1238static void 1239mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1240{ 1241 vm_offset_t va; 1242 1243 //debugf("mmu_booke_qenter: s (sva = 0x%08x count = %d)\n", sva, count); 1244 1245 va = sva; 1246 while (count-- > 0) { 1247 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1248 va += PAGE_SIZE; 1249 m++; 1250 } 1251 1252 //debugf("mmu_booke_qenter: e\n"); 1253} 1254 1255/* 1256 * Remove page mappings from kernel virtual address space. Intended for 1257 * temporary mappings entered by mmu_booke_qenter. 1258 */ 1259static void 1260mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1261{ 1262 vm_offset_t va; 1263 1264 //debugf("mmu_booke_qremove: s (sva = 0x%08x count = %d)\n", sva, count); 1265 1266 va = sva; 1267 while (count-- > 0) { 1268 mmu_booke_kremove(mmu, va); 1269 va += PAGE_SIZE; 1270 } 1271 1272 //debugf("mmu_booke_qremove: e\n"); 1273} 1274 1275/* 1276 * Map a wired page into kernel virtual address space. 1277 */ 1278static void 1279mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1280{ 1281 unsigned int pdir_idx = PDIR_IDX(va); 1282 unsigned int ptbl_idx = PTBL_IDX(va); 1283 u_int32_t flags; 1284 pte_t *pte; 1285 1286 //debugf("mmu_booke_kenter: s (pdir_idx = %d ptbl_idx = %d va=0x%08x pa=0x%08x)\n", 1287 // pdir_idx, ptbl_idx, va, pa); 1288 1289 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), 1290 ("mmu_booke_kenter: invalid va")); 1291 1292#if 0 1293 /* assume IO mapping, set I, G bits */ 1294 flags = (PTE_G | PTE_I | PTE_FAKE); 1295 1296 /* if mapping is within system memory, do not set I, G bits */ 1297 for (i = 0; i < totalmem_regions_sz; i++) { 1298 if ((pa >= totalmem_regions[i].mr_start) && 1299 (pa < (totalmem_regions[i].mr_start + 1300 totalmem_regions[i].mr_size))) { 1301 flags &= ~(PTE_I | PTE_G | PTE_FAKE); 1302 break; 1303 } 1304 } 1305#else 1306 flags = 0; 1307#endif 1308 1309 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1310 1311 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1312 1313 if (PTE_ISVALID(pte)) { 1314 //debugf("mmu_booke_kenter: replacing entry!\n"); 1315 1316 /* Flush entry from TLB0 */ 1317 tlb0_flush_entry(kernel_pmap, va); 1318 } 1319 1320 pte->rpn = pa & ~PTE_PA_MASK; 1321 pte->flags = flags; 1322 1323 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1324 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1325 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1326 1327 /* Flush the real memory from the instruction cache. */ 1328 if ((flags & (PTE_I | PTE_G)) == 0) { 1329 __syncicache((void *)va, PAGE_SIZE); 1330 } 1331 1332 //debugf("mmu_booke_kenter: e\n"); 1333} 1334 1335/* 1336 * Remove a page from kernel page table. 1337 */ 1338static void 1339mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1340{ 1341 unsigned int pdir_idx = PDIR_IDX(va); 1342 unsigned int ptbl_idx = PTBL_IDX(va); 1343 pte_t *pte; 1344 1345 //debugf("mmu_booke_kremove: s (va = 0x%08x)\n", va); 1346 1347 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), 1348 ("mmu_booke_kremove: invalid va")); 1349 1350 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1351 1352 if (!PTE_ISVALID(pte)) { 1353 //debugf("mmu_booke_kremove: e (invalid pte)\n"); 1354 return; 1355 } 1356 1357 /* Invalidate entry in TLB0. */ 1358 tlb0_flush_entry(kernel_pmap, va); 1359 1360 pte->flags = 0; 1361 pte->rpn = 0; 1362 1363 //debugf("mmu_booke_kremove: e\n"); 1364} 1365 1366/* 1367 * Initialize pmap associated with process 0. 1368 */ 1369static void 1370mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1371{ 1372 //debugf("mmu_booke_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap); 1373 mmu_booke_pinit(mmu, pmap); 1374 PCPU_SET(curpmap, pmap); 1375 //debugf("mmu_booke_pinit0: e\n"); 1376} 1377 1378/* 1379 * Initialize a preallocated and zeroed pmap structure, 1380 * such as one in a vmspace structure. 1381 */ 1382static void 1383mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1384{ 1385 1386 //struct thread *td; 1387 //struct proc *p; 1388 1389 //td = PCPU_GET(curthread); 1390 //p = td->td_proc; 1391 //debugf("mmu_booke_pinit: s (pmap = 0x%08x)\n", (u_int32_t)pmap); 1392 //printf("mmu_booke_pinit: proc %d '%s'\n", p->p_pid, p->p_comm); 1393 1394 KASSERT((pmap != kernel_pmap), ("mmu_booke_pinit: initializing kernel_pmap")); 1395 1396 PMAP_LOCK_INIT(pmap); 1397 pmap->pm_tid = 0; 1398 pmap->pm_active = 0; 1399 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1400 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1401 1402 TAILQ_INIT(&pmap->ptbl_list); 1403 1404 //debugf("mmu_booke_pinit: e\n"); 1405} 1406 1407/* 1408 * Release any resources held by the given physical map. 1409 * Called when a pmap initialized by mmu_booke_pinit is being released. 1410 * Should only be called if the map contains no valid mappings. 1411 */ 1412static void 1413mmu_booke_release(mmu_t mmu, pmap_t pmap) 1414{ 1415 1416 //debugf("mmu_booke_release: s\n"); 1417 1418 PMAP_LOCK_DESTROY(pmap); 1419 1420 //debugf("mmu_booke_release: e\n"); 1421} 1422 1423#if 0 1424/* Not needed, kernel page tables are statically allocated. */ 1425void 1426mmu_booke_growkernel(vm_offset_t maxkvaddr) 1427{ 1428} 1429#endif 1430 1431/* 1432 * Insert the given physical page at the specified virtual address in the 1433 * target physical map with the protection requested. If specified the page 1434 * will be wired down. 1435 */ 1436static void 1437mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1438 vm_prot_t prot, boolean_t wired) 1439{ 1440 vm_page_lock_queues(); 1441 PMAP_LOCK(pmap); 1442 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1443 vm_page_unlock_queues(); 1444 PMAP_UNLOCK(pmap); 1445} 1446 1447static void 1448mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1449 vm_prot_t prot, boolean_t wired) 1450{ 1451 pte_t *pte; 1452 vm_paddr_t pa; 1453 u_int32_t flags; 1454 int su, sync; 1455 1456 pa = VM_PAGE_TO_PHYS(m); 1457 su = (pmap == kernel_pmap); 1458 sync = 0; 1459 1460 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1461 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1462 // (u_int32_t)pmap, su, pmap->pm_tid, 1463 // (u_int32_t)m, va, pa, prot, wired); 1464 1465 if (su) { 1466 KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), 1467 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1468 } else { 1469 KASSERT((va <= VM_MAXUSER_ADDRESS), 1470 ("mmu_booke_enter_locked: user pmap, non user va")); 1471 } 1472 1473 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1474 1475 /* 1476 * If there is an existing mapping, and the physical address has not 1477 * changed, must be protection or wiring change. 1478 */ 1479 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1480 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1481 1482 //debugf("mmu_booke_enter_locked: update\n"); 1483 1484 /* Wiring change, just update stats. */ 1485 if (wired) { 1486 if (!PTE_ISWIRED(pte)) { 1487 pte->flags |= PTE_WIRED; 1488 pmap->pm_stats.wired_count++; 1489 } 1490 } else { 1491 if (PTE_ISWIRED(pte)) { 1492 pte->flags &= ~PTE_WIRED; 1493 pmap->pm_stats.wired_count--; 1494 } 1495 } 1496 1497 /* Save the old bits and clear the ones we're interested in. */ 1498 flags = pte->flags; 1499 pte->flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1500 1501 if (prot & VM_PROT_WRITE) { 1502 /* Add write permissions. */ 1503 pte->flags |= PTE_SW; 1504 if (!su) 1505 pte->flags |= PTE_UW; 1506 } else { 1507 /* Handle modified pages, sense modify status. */ 1508 if (PTE_ISMODIFIED(pte)) 1509 vm_page_dirty(m); 1510 } 1511 1512 /* If we're turning on execute permissions, flush the icache. */ 1513 if (prot & VM_PROT_EXECUTE) { 1514 pte->flags |= PTE_SX; 1515 if (!su) 1516 pte->flags |= PTE_UX; 1517 1518 if ((flags & (PTE_UX | PTE_SX)) == 0) 1519 sync++; 1520 } 1521 1522 /* Flush the old mapping from TLB0. */ 1523 pte->flags &= ~PTE_REFERENCED; 1524 tlb0_flush_entry(pmap, va); 1525 } else { 1526 /* 1527 * If there is an existing mapping, but its for a different 1528 * physical address, pte_enter() will delete the old mapping. 1529 */ 1530 //if ((pte != NULL) && PTE_ISVALID(pte)) 1531 // debugf("mmu_booke_enter_locked: replace\n"); 1532 //else 1533 // debugf("mmu_booke_enter_locked: new\n"); 1534 1535 /* Now set up the flags and install the new mapping. */ 1536 flags = (PTE_SR | PTE_VALID); 1537 1538 if (!su) 1539 flags |= PTE_UR; 1540 1541 if (prot & VM_PROT_WRITE) { 1542 flags |= PTE_SW; 1543 if (!su) 1544 flags |= PTE_UW; 1545 } 1546 1547 if (prot & VM_PROT_EXECUTE) { 1548 flags |= PTE_SX; 1549 if (!su) 1550 flags |= PTE_UX; 1551 } 1552 1553 /* If its wired update stats. */ 1554 if (wired) { 1555 pmap->pm_stats.wired_count++; 1556 flags |= PTE_WIRED; 1557 } 1558 1559 pte_enter(mmu, pmap, m, va, flags); 1560 1561 /* Flush the real memory from the instruction cache. */ 1562 if (prot & VM_PROT_EXECUTE) 1563 sync++; 1564 } 1565 1566 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1567 __syncicache((void *)va, PAGE_SIZE); 1568 sync = 0; 1569 } 1570 1571 if (sync) { 1572 /* Create a temporary mapping. */ 1573 pmap = PCPU_GET(curpmap); 1574 1575 va = 0; 1576 pte = pte_find(mmu, pmap, va); 1577 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1578 1579 flags = PTE_SR | PTE_VALID | PTE_UR; 1580 pte_enter(mmu, pmap, m, va, flags); 1581 __syncicache((void *)va, PAGE_SIZE); 1582 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1583 } 1584 1585 //debugf("mmu_booke_enter_locked: e\n"); 1586} 1587 1588/* 1589 * Maps a sequence of resident pages belonging to the same object. 1590 * The sequence begins with the given page m_start. This page is 1591 * mapped at the given virtual address start. Each subsequent page is 1592 * mapped at a virtual address that is offset from start by the same 1593 * amount as the page is offset from m_start within the object. The 1594 * last page in the sequence is the page with the largest offset from 1595 * m_start that can be mapped at a virtual address less than the given 1596 * virtual address end. Not every virtual page between start and end 1597 * is mapped; only those for which a resident page exists with the 1598 * corresponding offset from m_start are mapped. 1599 */ 1600static void 1601mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1602 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1603{ 1604 vm_page_t m; 1605 vm_pindex_t diff, psize; 1606 1607 psize = atop(end - start); 1608 m = m_start; 1609 PMAP_LOCK(pmap); 1610 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1611 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot & 1612 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1613 m = TAILQ_NEXT(m, listq); 1614 } 1615 PMAP_UNLOCK(pmap); 1616} 1617 1618static void 1619mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1620 vm_prot_t prot) 1621{ 1622 1623 //debugf("mmu_booke_enter_quick: s\n"); 1624 1625 PMAP_LOCK(pmap); 1626 mmu_booke_enter_locked(mmu, pmap, va, m, 1627 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1628 PMAP_UNLOCK(pmap); 1629 1630 //debugf("mmu_booke_enter_quick e\n"); 1631} 1632 1633/* 1634 * Remove the given range of addresses from the specified map. 1635 * 1636 * It is assumed that the start and end are properly rounded to the page size. 1637 */ 1638static void 1639mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1640{ 1641 pte_t *pte; 1642 u_int8_t hold_flag; 1643 1644 int su = (pmap == kernel_pmap); 1645 1646 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1647 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1648 1649 if (su) { 1650 KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), 1651 ("mmu_booke_enter: kernel pmap, non kernel va")); 1652 } else { 1653 KASSERT((va <= VM_MAXUSER_ADDRESS), 1654 ("mmu_booke_enter: user pmap, non user va")); 1655 } 1656 1657 if (PMAP_REMOVE_DONE(pmap)) { 1658 //debugf("mmu_booke_remove: e (empty)\n"); 1659 return; 1660 } 1661 1662 hold_flag = PTBL_HOLD_FLAG(pmap); 1663 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1664 1665 vm_page_lock_queues(); 1666 PMAP_LOCK(pmap); 1667 for (; va < endva; va += PAGE_SIZE) { 1668 pte = pte_find(mmu, pmap, va); 1669 if ((pte != NULL) && PTE_ISVALID(pte)) { 1670 pte_remove(mmu, pmap, va, hold_flag); 1671 1672 /* Flush mapping from TLB0. */ 1673 tlb0_flush_entry(pmap, va); 1674 } 1675 } 1676 PMAP_UNLOCK(pmap); 1677 vm_page_unlock_queues(); 1678 1679 //debugf("mmu_booke_remove: e\n"); 1680} 1681 1682/* 1683 * Remove physical page from all pmaps in which it resides. 1684 */ 1685static void 1686mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1687{ 1688 pv_entry_t pv, pvn; 1689 u_int8_t hold_flag; 1690 1691 //debugf("mmu_booke_remove_all: s\n"); 1692 1693 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1694 1695 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1696 pvn = TAILQ_NEXT(pv, pv_link); 1697 1698 PMAP_LOCK(pv->pv_pmap); 1699 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1700 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1701 1702 /* Flush mapping from TLB0. */ 1703 tlb0_flush_entry(pv->pv_pmap, pv->pv_va); 1704 PMAP_UNLOCK(pv->pv_pmap); 1705 } 1706 vm_page_flag_clear(m, PG_WRITEABLE); 1707 1708 //debugf("mmu_booke_remove_all: e\n"); 1709} 1710 1711/* 1712 * Map a range of physical addresses into kernel virtual address space. 1713 * 1714 * The value passed in *virt is a suggested virtual address for the mapping. 1715 * Architectures which can support a direct-mapped physical to virtual region 1716 * can return the appropriate address within that region, leaving '*virt' 1717 * unchanged. We cannot and therefore do not; *virt is updated with the 1718 * first usable address after the mapped region. 1719 */ 1720static vm_offset_t 1721mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1722 vm_offset_t pa_end, int prot) 1723{ 1724 vm_offset_t sva = *virt; 1725 vm_offset_t va = sva; 1726 1727 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1728 // sva, pa_start, pa_end); 1729 1730 while (pa_start < pa_end) { 1731 mmu_booke_kenter(mmu, va, pa_start); 1732 va += PAGE_SIZE; 1733 pa_start += PAGE_SIZE; 1734 } 1735 *virt = va; 1736 1737 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1738 return (sva); 1739} 1740 1741/* 1742 * The pmap must be activated before it's address space can be accessed in any 1743 * way. 1744 */ 1745static void 1746mmu_booke_activate(mmu_t mmu, struct thread *td) 1747{ 1748 pmap_t pmap; 1749 1750 pmap = &td->td_proc->p_vmspace->vm_pmap; 1751 1752 //debugf("mmu_booke_activate: s (proc = '%s', id = %d, pmap = 0x%08x)\n", 1753 // td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1754 1755 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1756 1757 mtx_lock_spin(&sched_lock); 1758 1759 pmap->pm_active |= PCPU_GET(cpumask); 1760 PCPU_SET(curpmap, pmap); 1761 1762 if (!pmap->pm_tid) 1763 tid_alloc(pmap); 1764 1765 /* Load PID0 register with pmap tid value. */ 1766 load_pid0(pmap->pm_tid); 1767 1768 mtx_unlock_spin(&sched_lock); 1769 1770 //debugf("mmu_booke_activate: e (tid = %d for '%s')\n", pmap->pm_tid, 1771 // td->td_proc->p_comm); 1772} 1773 1774/* 1775 * Deactivate the specified process's address space. 1776 */ 1777static void 1778mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1779{ 1780 pmap_t pmap; 1781 1782 pmap = &td->td_proc->p_vmspace->vm_pmap; 1783 pmap->pm_active &= ~(PCPU_GET(cpumask)); 1784 PCPU_SET(curpmap, NULL); 1785} 1786 1787/* 1788 * Copy the range specified by src_addr/len 1789 * from the source map to the range dst_addr/len 1790 * in the destination map. 1791 * 1792 * This routine is only advisory and need not do anything. 1793 */ 1794static void 1795mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1796 vm_size_t len, vm_offset_t src_addr) 1797{ 1798 1799} 1800 1801/* 1802 * Set the physical protection on the specified range of this map as requested. 1803 */ 1804static void 1805mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1806 vm_prot_t prot) 1807{ 1808 vm_offset_t va; 1809 vm_page_t m; 1810 pte_t *pte; 1811 1812 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1813 mmu_booke_remove(mmu, pmap, sva, eva); 1814 return; 1815 } 1816 1817 if (prot & VM_PROT_WRITE) 1818 return; 1819 1820 vm_page_lock_queues(); 1821 PMAP_LOCK(pmap); 1822 for (va = sva; va < eva; va += PAGE_SIZE) { 1823 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1824 if (PTE_ISVALID(pte)) { 1825 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1826 1827 /* Handle modified pages. */ 1828 if (PTE_ISMODIFIED(pte)) 1829 vm_page_dirty(m); 1830 1831 /* Referenced pages. */ 1832 if (PTE_ISREFERENCED(pte)) 1833 vm_page_flag_set(m, PG_REFERENCED); 1834 1835 /* Flush mapping from TLB0. */ 1836 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1837 PTE_REFERENCED); 1838 tlb0_flush_entry(pmap, va); 1839 } 1840 } 1841 } 1842 PMAP_UNLOCK(pmap); 1843 vm_page_unlock_queues(); 1844} 1845 1846/* 1847 * Clear the write and modified bits in each of the given page's mappings. 1848 */ 1849static void 1850mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1851{ 1852 pv_entry_t pv; 1853 pte_t *pte; 1854 1855 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1856 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1857 (m->flags & PG_WRITEABLE) == 0) 1858 return; 1859 1860 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1861 PMAP_LOCK(pv->pv_pmap); 1862 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1863 if (PTE_ISVALID(pte)) { 1864 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1865 1866 /* Handle modified pages. */ 1867 if (PTE_ISMODIFIED(pte)) 1868 vm_page_dirty(m); 1869 1870 /* Referenced pages. */ 1871 if (PTE_ISREFERENCED(pte)) 1872 vm_page_flag_set(m, PG_REFERENCED); 1873 1874 /* Flush mapping from TLB0. */ 1875 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1876 PTE_REFERENCED); 1877 tlb0_flush_entry(pv->pv_pmap, pv->pv_va); 1878 } 1879 } 1880 PMAP_UNLOCK(pv->pv_pmap); 1881 } 1882 vm_page_flag_clear(m, PG_WRITEABLE); 1883} 1884 1885static boolean_t 1886mmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1887{ 1888 pv_entry_t pv; 1889 pte_t *pte; 1890 boolean_t executable; 1891 1892 executable = FALSE; 1893 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1894 PMAP_LOCK(pv->pv_pmap); 1895 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 1896 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 1897 executable = TRUE; 1898 PMAP_UNLOCK(pv->pv_pmap); 1899 if (executable) 1900 break; 1901 } 1902 1903 return (executable); 1904} 1905 1906/* 1907 * Atomically extract and hold the physical page with the given 1908 * pmap and virtual address pair if that mapping permits the given 1909 * protection. 1910 */ 1911static vm_page_t 1912mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1913 vm_prot_t prot) 1914{ 1915 pte_t *pte; 1916 vm_page_t m; 1917 u_int32_t pte_wbit; 1918 1919 m = NULL; 1920 vm_page_lock_queues(); 1921 PMAP_LOCK(pmap); 1922 pte = pte_find(mmu, pmap, va); 1923 1924 if ((pte != NULL) && PTE_ISVALID(pte)) { 1925 if (pmap == kernel_pmap) 1926 pte_wbit = PTE_SW; 1927 else 1928 pte_wbit = PTE_UW; 1929 1930 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1931 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1932 vm_page_hold(m); 1933 } 1934 } 1935 1936 vm_page_unlock_queues(); 1937 PMAP_UNLOCK(pmap); 1938 return (m); 1939} 1940 1941/* 1942 * Initialize a vm_page's machine-dependent fields. 1943 */ 1944static void 1945mmu_booke_page_init(mmu_t mmu, vm_page_t m) 1946{ 1947 1948 TAILQ_INIT(&m->md.pv_list); 1949} 1950 1951/* 1952 * mmu_booke_zero_page_area zeros the specified hardware page by 1953 * mapping it into virtual memory and using bzero to clear 1954 * its contents. 1955 * 1956 * off and size must reside within a single page. 1957 */ 1958static void 1959mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1960{ 1961 vm_offset_t va; 1962 1963 //debugf("mmu_booke_zero_page_area: s\n"); 1964 1965 mtx_lock(&zero_page_mutex); 1966 va = zero_page_va; 1967 1968 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1969 bzero((caddr_t)va + off, size); 1970 mmu_booke_kremove(mmu, va); 1971 1972 mtx_unlock(&zero_page_mutex); 1973 1974 //debugf("mmu_booke_zero_page_area: e\n"); 1975} 1976 1977/* 1978 * mmu_booke_zero_page zeros the specified hardware page. 1979 */ 1980static void 1981mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1982{ 1983 1984 //debugf("mmu_booke_zero_page: s\n"); 1985 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 1986 //debugf("mmu_booke_zero_page: e\n"); 1987} 1988 1989/* 1990 * mmu_booke_copy_page copies the specified (machine independent) page by 1991 * mapping the page into virtual memory and using memcopy to copy the page, 1992 * one machine dependent page at a time. 1993 */ 1994static void 1995mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 1996{ 1997 vm_offset_t sva, dva; 1998 1999 //debugf("mmu_booke_copy_page: s\n"); 2000 2001 mtx_lock(©_page_mutex); 2002 sva = copy_page_src_va; 2003 dva = copy_page_dst_va; 2004 2005 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2006 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2007 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2008 mmu_booke_kremove(mmu, dva); 2009 mmu_booke_kremove(mmu, sva); 2010 2011 mtx_unlock(©_page_mutex); 2012 2013 //debugf("mmu_booke_copy_page: e\n"); 2014} 2015 2016#if 0 2017/* 2018 * Remove all pages from specified address space, this aids process exit 2019 * speeds. This is much faster than mmu_booke_remove in the case of running 2020 * down an entire address space. Only works for the current pmap. 2021 */ 2022void 2023mmu_booke_remove_pages(pmap_t pmap) 2024{ 2025} 2026#endif 2027 2028/* 2029 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2030 * into virtual memory and using bzero to clear its contents. This is intended 2031 * to be called from the vm_pagezero process only and outside of Giant. No 2032 * lock is required. 2033 */ 2034static void 2035mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2036{ 2037 vm_offset_t va; 2038 2039 //debugf("mmu_booke_zero_page_idle: s\n"); 2040 2041 va = zero_page_idle_va; 2042 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2043 bzero((caddr_t)va, PAGE_SIZE); 2044 mmu_booke_kremove(mmu, va); 2045 2046 //debugf("mmu_booke_zero_page_idle: e\n"); 2047} 2048 2049/* 2050 * Return whether or not the specified physical page was modified 2051 * in any of physical maps. 2052 */ 2053static boolean_t 2054mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2055{ 2056 pte_t *pte; 2057 pv_entry_t pv; 2058 2059 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2060 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2061 return (FALSE); 2062 2063 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2064 PMAP_LOCK(pv->pv_pmap); 2065 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2066 if (!PTE_ISVALID(pte)) 2067 goto make_sure_to_unlock; 2068 2069 if (PTE_ISMODIFIED(pte)) { 2070 PMAP_UNLOCK(pv->pv_pmap); 2071 return (TRUE); 2072 } 2073 } 2074make_sure_to_unlock: 2075 PMAP_UNLOCK(pv->pv_pmap); 2076 } 2077 return (FALSE); 2078} 2079 2080/* 2081 * Return whether or not the specified virtual address is elgible 2082 * for prefault. 2083 */ 2084static boolean_t 2085mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2086{ 2087 2088 return (FALSE); 2089} 2090 2091/* 2092 * Clear the modify bits on the specified physical page. 2093 */ 2094static void 2095mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2096{ 2097 pte_t *pte; 2098 pv_entry_t pv; 2099 2100 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2101 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2102 return; 2103 2104 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2105 PMAP_LOCK(pv->pv_pmap); 2106 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2107 if (!PTE_ISVALID(pte)) 2108 goto make_sure_to_unlock; 2109 2110 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2111 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2112 PTE_REFERENCED); 2113 tlb0_flush_entry(pv->pv_pmap, pv->pv_va); 2114 } 2115 } 2116make_sure_to_unlock: 2117 PMAP_UNLOCK(pv->pv_pmap); 2118 } 2119} 2120 2121/* 2122 * Return a count of reference bits for a page, clearing those bits. 2123 * It is not necessary for every reference bit to be cleared, but it 2124 * is necessary that 0 only be returned when there are truly no 2125 * reference bits set. 2126 * 2127 * XXX: The exact number of bits to check and clear is a matter that 2128 * should be tested and standardized at some point in the future for 2129 * optimal aging of shared pages. 2130 */ 2131static int 2132mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2133{ 2134 pte_t *pte; 2135 pv_entry_t pv; 2136 int count; 2137 2138 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2139 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2140 return (0); 2141 2142 count = 0; 2143 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2144 PMAP_LOCK(pv->pv_pmap); 2145 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2146 if (!PTE_ISVALID(pte)) 2147 goto make_sure_to_unlock; 2148 2149 if (PTE_ISREFERENCED(pte)) { 2150 pte->flags &= ~PTE_REFERENCED; 2151 tlb0_flush_entry(pv->pv_pmap, pv->pv_va); 2152 2153 if (++count > 4) { 2154 PMAP_UNLOCK(pv->pv_pmap); 2155 break; 2156 } 2157 } 2158 } 2159make_sure_to_unlock: 2160 PMAP_UNLOCK(pv->pv_pmap); 2161 } 2162 return (count); 2163} 2164 2165/* 2166 * Clear the reference bit on the specified physical page. 2167 */ 2168static void 2169mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2170{ 2171 pte_t *pte; 2172 pv_entry_t pv; 2173 2174 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2175 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2176 return; 2177 2178 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2179 PMAP_LOCK(pv->pv_pmap); 2180 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2181 if (!PTE_ISVALID(pte)) 2182 goto make_sure_to_unlock; 2183 2184 if (PTE_ISREFERENCED(pte)) { 2185 pte->flags &= ~PTE_REFERENCED; 2186 tlb0_flush_entry(pv->pv_pmap, pv->pv_va); 2187 } 2188 } 2189make_sure_to_unlock: 2190 PMAP_UNLOCK(pv->pv_pmap); 2191 } 2192} 2193 2194/* 2195 * Change wiring attribute for a map/virtual-address pair. 2196 */ 2197static void 2198mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2199{ 2200 pte_t *pte;; 2201 2202 PMAP_LOCK(pmap); 2203 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2204 if (wired) { 2205 if (!PTE_ISWIRED(pte)) { 2206 pte->flags |= PTE_WIRED; 2207 pmap->pm_stats.wired_count++; 2208 } 2209 } else { 2210 if (PTE_ISWIRED(pte)) { 2211 pte->flags &= ~PTE_WIRED; 2212 pmap->pm_stats.wired_count--; 2213 } 2214 } 2215 } 2216 PMAP_UNLOCK(pmap); 2217} 2218 2219/* 2220 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2221 * page. This count may be changed upwards or downwards in the future; it is 2222 * only necessary that true be returned for a small subset of pmaps for proper 2223 * page aging. 2224 */ 2225static boolean_t 2226mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2227{ 2228 pv_entry_t pv; 2229 int loops; 2230 2231 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2232 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2233 return (FALSE); 2234 2235 loops = 0; 2236 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2237 2238 if (pv->pv_pmap == pmap) 2239 return (TRUE); 2240 2241 if (++loops >= 16) 2242 break; 2243 } 2244 return (FALSE); 2245} 2246 2247/* 2248 * Return the number of managed mappings to the given physical page that are 2249 * wired. 2250 */ 2251static int 2252mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2253{ 2254 pv_entry_t pv; 2255 pte_t *pte; 2256 int count = 0; 2257 2258 if ((m->flags & PG_FICTITIOUS) != 0) 2259 return (count); 2260 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2261 2262 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2263 PMAP_LOCK(pv->pv_pmap); 2264 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2265 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2266 count++; 2267 PMAP_UNLOCK(pv->pv_pmap); 2268 } 2269 2270 return (count); 2271} 2272 2273static int 2274mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2275{ 2276 int i; 2277 vm_offset_t va; 2278 2279 /* 2280 * This currently does not work for entries that 2281 * overlap TLB1 entries. 2282 */ 2283 for (i = 0; i < tlb1_idx; i ++) { 2284 if (tlb1_iomapped(i, pa, size, &va) == 0) 2285 return (0); 2286 } 2287 2288 return (EFAULT); 2289} 2290 2291/* 2292 * Map a set of physical memory pages into the kernel virtual address space. 2293 * Return a pointer to where it is mapped. This routine is intended to be used 2294 * for mapping device memory, NOT real memory. 2295 */ 2296static void * 2297mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2298{ 2299 void *res; 2300 uintptr_t va; 2301 vm_size_t sz; 2302 2303 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2304 res = (void *)va; 2305 2306 do { 2307 sz = 1 << (ilog2(size) & ~1); 2308 if (bootverbose) 2309 printf("Wiring VA=%x to PA=%x (size=%x), " 2310 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2311 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2312 size -= sz; 2313 pa += sz; 2314 va += sz; 2315 } while (size > 0); 2316 2317 return (res); 2318} 2319 2320/* 2321 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2322 */ 2323static void 2324mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2325{ 2326 vm_offset_t base, offset; 2327 2328 //debugf("mmu_booke_unmapdev: s (va = 0x%08x)\n", va); 2329 2330 /* 2331 * Unmap only if this is inside kernel virtual space. 2332 */ 2333 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2334 base = trunc_page(va); 2335 offset = va & PAGE_MASK; 2336 size = roundup(offset + size, PAGE_SIZE); 2337 kmem_free(kernel_map, base, size); 2338 } 2339 2340 //debugf("mmu_booke_unmapdev: e\n"); 2341} 2342 2343/* 2344 * mmu_booke_object_init_pt preloads the ptes for a given object 2345 * into the specified pmap. This eliminates the blast of soft 2346 * faults on process startup and immediately after an mmap. 2347 */ 2348static void 2349mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2350 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2351{ 2352 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2353 KASSERT(object->type == OBJT_DEVICE, 2354 ("mmu_booke_object_init_pt: non-device object")); 2355} 2356 2357/* 2358 * Perform the pmap work for mincore. 2359 */ 2360static int 2361mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2362{ 2363 2364 TODO; 2365 return (0); 2366} 2367 2368/**************************************************************************/ 2369/* TID handling */ 2370/**************************************************************************/ 2371/* 2372 * Flush all entries from TLB0 matching given tid. 2373 */ 2374static void 2375tid_flush(tlbtid_t tid) 2376{ 2377 int i, entryidx, way; 2378 2379 //debugf("tid_flush: s (tid = %d)\n", tid); 2380 2381 mtx_lock_spin(&tlb0_mutex); 2382 2383 for (i = 0; i < TLB0_SIZE; i++) { 2384 if (MAS1_GETTID(tlb0[i].mas1) == tid) { 2385 way = i / TLB0_ENTRIES_PER_WAY; 2386 entryidx = i - (way * TLB0_ENTRIES_PER_WAY); 2387 2388 //debugf("tid_flush: inval tlb0 entry %d\n", i); 2389 tlb0_inval_entry(entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT, way); 2390 } 2391 } 2392 2393 mtx_unlock_spin(&tlb0_mutex); 2394 2395 //debugf("tid_flush: e\n"); 2396} 2397 2398/* 2399 * Allocate a TID. If necessary, steal one from someone else. 2400 * The new TID is flushed from the TLB before returning. 2401 */ 2402static tlbtid_t 2403tid_alloc(pmap_t pmap) 2404{ 2405 tlbtid_t tid; 2406 static tlbtid_t next_tid = TID_MIN; 2407 2408 //struct thread *td; 2409 //struct proc *p; 2410 2411 //td = PCPU_GET(curthread); 2412 //p = td->td_proc; 2413 //debugf("tid_alloc: s (pmap = 0x%08x)\n", (u_int32_t)pmap); 2414 //printf("tid_alloc: proc %d '%s'\n", p->p_pid, p->p_comm); 2415 2416 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2417 2418 /* 2419 * Find a likely TID, allocate unused if possible, 2420 * skip reserved entries. 2421 */ 2422 tid = next_tid; 2423 while (tidbusy[tid] != NULL) { 2424 if (tid == next_tid) 2425 break; 2426 2427 if (tid == TID_MAX) 2428 tid = TID_MIN; 2429 else 2430 tid++; 2431 2432 } 2433 2434 /* Now clean it out */ 2435 tid_flush(tid); 2436 2437 /* If we are stealing pmap then clear its tid */ 2438 if (tidbusy[tid]) { 2439 //debugf("warning: stealing tid %d\n", tid); 2440 tidbusy[tid]->pm_tid = 0; 2441 } 2442 2443 /* Calculate next tid */ 2444 if (tid == TID_MAX) 2445 next_tid = TID_MIN; 2446 else 2447 next_tid = tid + 1; 2448 2449 tidbusy[tid] = pmap; 2450 pmap->pm_tid = tid; 2451 2452 //debugf("tid_alloc: e (%02d next = %02d)\n", tid, next_tid); 2453 return (tid); 2454} 2455 2456#if 0 2457/* 2458 * Free this pmap's TID. 2459 */ 2460static void 2461tid_free(pmap_t pmap) 2462{ 2463 tlbtid_t oldtid; 2464 2465 oldtid = pmap->pm_tid; 2466 2467 if (oldtid == 0) { 2468 panic("tid_free: freeing kernel tid"); 2469 } 2470 2471#ifdef DEBUG 2472 if (tidbusy[oldtid] == 0) 2473 debugf("tid_free: freeing free tid %d\n", oldtid); 2474 if (tidbusy[oldtid] != pmap) { 2475 debugf("tid_free: freeing someone esle's tid\n " 2476 "tidbusy[%d] = 0x%08x pmap = 0x%08x\n", 2477 oldtid, (u_int32_t)tidbusy[oldtid], (u_int32_t)pmap); 2478 } 2479#endif 2480 2481 tidbusy[oldtid] = NULL; 2482 tid_flush(oldtid); 2483} 2484#endif 2485 2486#if 0 2487#if DEBUG 2488static void 2489tid_print_busy(void) 2490{ 2491 int i; 2492 2493 for (i = 0; i < TID_MAX; i++) { 2494 debugf("tid %d = pmap 0x%08x", i, (u_int32_t)tidbusy[i]); 2495 if (tidbusy[i]) 2496 debugf(" pmap->tid = %d", tidbusy[i]->pm_tid); 2497 debugf("\n"); 2498 } 2499 2500} 2501#endif /* DEBUG */ 2502#endif 2503 2504/**************************************************************************/ 2505/* TLB0 handling */ 2506/**************************************************************************/ 2507 2508static void 2509tlb_print_entry(int i, u_int32_t mas1, u_int32_t mas2, u_int32_t mas3, u_int32_t mas7) 2510{ 2511 int as; 2512 char desc[3]; 2513 tlbtid_t tid; 2514 vm_size_t size; 2515 unsigned int tsize; 2516 2517 desc[2] = '\0'; 2518 if (mas1 & MAS1_VALID) 2519 desc[0] = 'V'; 2520 else 2521 desc[0] = ' '; 2522 2523 if (mas1 & MAS1_IPROT) 2524 desc[1] = 'P'; 2525 else 2526 desc[1] = ' '; 2527 2528 as = (mas1 & MAS1_TS) ? 1 : 0; 2529 tid = MAS1_GETTID(mas1); 2530 2531 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2532 size = 0; 2533 if (tsize) 2534 size = tsize2size(tsize); 2535 2536 debugf("%3d: (%s) [AS=%d] " 2537 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2538 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2539 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2540} 2541 2542/* Convert TLB0 va and way number to tlb0[] table index. */ 2543static inline unsigned int 2544tlb0_tableidx(vm_offset_t va, unsigned int way) 2545{ 2546 unsigned int idx; 2547 2548 idx = (way * TLB0_ENTRIES_PER_WAY); 2549 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2550 return (idx); 2551} 2552 2553/* 2554 * Write given entry to TLB0 hardware. 2555 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2556 */ 2557static void 2558tlb0_write_entry(unsigned int idx, unsigned int way) 2559{ 2560 u_int32_t mas0, mas7, nv; 2561 2562 /* Clear high order RPN bits. */ 2563 mas7 = 0; 2564 2565 /* Preserve NV. */ 2566 mas0 = mfspr(SPR_MAS0); 2567 nv = mas0 & (TLB0_NWAYS - 1); 2568 2569 /* Select entry. */ 2570 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way) | nv; 2571 2572 //debugf("tlb0_write_entry: s (idx=%d way=%d mas0=0x%08x " 2573 // "mas1=0x%08x mas2=0x%08x mas3=0x%08x)\n", 2574 // idx, way, mas0, tlb0[idx].mas1, 2575 // tlb0[idx].mas2, tlb0[idx].mas3); 2576 2577 mtspr(SPR_MAS0, mas0); 2578 __asm volatile("isync"); 2579 mtspr(SPR_MAS1, tlb0[idx].mas1); 2580 __asm volatile("isync"); 2581 mtspr(SPR_MAS2, tlb0[idx].mas2); 2582 __asm volatile("isync"); 2583 mtspr(SPR_MAS3, tlb0[idx].mas3); 2584 __asm volatile("isync"); 2585 mtspr(SPR_MAS7, mas7); 2586 __asm volatile("isync; tlbwe; isync; msync"); 2587 2588 //debugf("tlb0_write_entry: e\n"); 2589} 2590 2591/* 2592 * Invalidate TLB0 entry, clear correspondig tlb0 table element. 2593 */ 2594static void 2595tlb0_inval_entry(vm_offset_t va, unsigned int way) 2596{ 2597 int idx = tlb0_tableidx(va, way); 2598 2599 //debugf("tlb0_inval_entry: s (va=0x%08x way=%d idx=%d)\n", 2600 // va, way, idx); 2601 2602 tlb0[idx].mas1 = 1 << MAS1_TSIZE_SHIFT; /* !MAS1_VALID */ 2603 tlb0[idx].mas2 = va & MAS2_EPN; 2604 tlb0[idx].mas3 = 0; 2605 2606 tlb0_write_entry(idx, way); 2607 2608 //debugf("tlb0_inval_entry: e\n"); 2609} 2610 2611/* 2612 * Invalidate TLB0 entry that corresponds to pmap/va. 2613 */ 2614static void 2615tlb0_flush_entry(pmap_t pmap, vm_offset_t va) 2616{ 2617 int idx, way; 2618 2619 //debugf("tlb0_flush_entry: s (pmap=0x%08x va=0x%08x)\n", 2620 // (u_int32_t)pmap, va); 2621 2622 mtx_lock_spin(&tlb0_mutex); 2623 2624 /* Check all TLB0 ways. */ 2625 for (way = 0; way < TLB0_NWAYS; way ++) { 2626 idx = tlb0_tableidx(va, way); 2627 2628 /* Invalidate only if entry matches va and pmap tid. */ 2629 if (((MAS1_GETTID(tlb0[idx].mas1) == pmap->pm_tid) && 2630 ((tlb0[idx].mas2 & MAS2_EPN) == va))) { 2631 tlb0_inval_entry(va, way); 2632 } 2633 } 2634 2635 mtx_unlock_spin(&tlb0_mutex); 2636 2637 //debugf("tlb0_flush_entry: e\n"); 2638} 2639 2640/* Clean TLB0 hardware and tlb0[] table. */ 2641static void 2642tlb0_init(void) 2643{ 2644 int entryidx, way; 2645 2646 debugf("tlb0_init: TLB0_SIZE = %d TLB0_NWAYS = %d\n", 2647 TLB0_SIZE, TLB0_NWAYS); 2648 2649 mtx_lock_spin(&tlb0_mutex); 2650 2651 for (way = 0; way < TLB0_NWAYS; way ++) { 2652 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2653 tlb0_inval_entry(entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT, way); 2654 } 2655 } 2656 2657 mtx_unlock_spin(&tlb0_mutex); 2658} 2659 2660#if 0 2661#if DEBUG 2662/* Print out tlb0 entries for given va. */ 2663static void 2664tlb0_print_tlbentries_va(vm_offset_t va) 2665{ 2666 u_int32_t mas0, mas1, mas2, mas3, mas7; 2667 int way, idx; 2668 2669 debugf("TLB0 entries for va = 0x%08x:\n", va); 2670 for (way = 0; way < TLB0_NWAYS; way ++) { 2671 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2672 mtspr(SPR_MAS0, mas0); 2673 __asm volatile("isync"); 2674 2675 mas2 = va & MAS2_EPN; 2676 mtspr(SPR_MAS2, mas2); 2677 __asm volatile("isync; tlbre"); 2678 2679 mas1 = mfspr(SPR_MAS1); 2680 mas2 = mfspr(SPR_MAS2); 2681 mas3 = mfspr(SPR_MAS3); 2682 mas7 = mfspr(SPR_MAS7); 2683 2684 idx = tlb0_tableidx(va, way); 2685 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2686 } 2687} 2688 2689/* Print out contents of the MAS registers for each TLB0 entry */ 2690static void 2691tlb0_print_tlbentries(void) 2692{ 2693 u_int32_t mas0, mas1, mas2, mas3, mas7; 2694 int entryidx, way, idx; 2695 2696 debugf("TLB0 entries:\n"); 2697 for (way = 0; way < TLB0_NWAYS; way ++) { 2698 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2699 2700 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2701 mtspr(SPR_MAS0, mas0); 2702 __asm volatile("isync"); 2703 2704 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2705 mtspr(SPR_MAS2, mas2); 2706 2707 __asm volatile("isync; tlbre"); 2708 2709 mas1 = mfspr(SPR_MAS1); 2710 mas2 = mfspr(SPR_MAS2); 2711 mas3 = mfspr(SPR_MAS3); 2712 mas7 = mfspr(SPR_MAS7); 2713 2714 idx = tlb0_tableidx(mas2, way); 2715 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2716 } 2717 } 2718} 2719 2720/* Print out kernel tlb0[] table. */ 2721static void 2722tlb0_print_entries(void) 2723{ 2724 int i; 2725 2726 debugf("tlb0[] table entries:\n"); 2727 for (i = 0; i < TLB0_SIZE; i++) { 2728 tlb_print_entry(i, tlb0[i].mas1, 2729 tlb0[i].mas2, tlb0[i].mas3, 0); 2730 } 2731} 2732#endif /* DEBUG */ 2733#endif 2734 2735/**************************************************************************/ 2736/* TLB1 handling */ 2737/**************************************************************************/ 2738/* 2739 * Write given entry to TLB1 hardware. 2740 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2741 */ 2742static void 2743tlb1_write_entry(unsigned int idx) 2744{ 2745 u_int32_t mas0, mas7; 2746 2747 //debugf("tlb1_write_entry: s\n"); 2748 2749 /* Clear high order RPN bits */ 2750 mas7 = 0; 2751 2752 /* Select entry */ 2753 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2754 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2755 2756 mtspr(SPR_MAS0, mas0); 2757 __asm volatile("isync"); 2758 mtspr(SPR_MAS1, tlb1[idx].mas1); 2759 __asm volatile("isync"); 2760 mtspr(SPR_MAS2, tlb1[idx].mas2); 2761 __asm volatile("isync"); 2762 mtspr(SPR_MAS3, tlb1[idx].mas3); 2763 __asm volatile("isync"); 2764 mtspr(SPR_MAS7, mas7); 2765 __asm volatile("isync; tlbwe; isync; msync"); 2766 2767 //debugf("tlb1_write_entry: e\n");; 2768} 2769 2770/* 2771 * Return the largest uint value log such that 2^log <= num. 2772 */ 2773static unsigned int 2774ilog2(unsigned int num) 2775{ 2776 int lz; 2777 2778 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2779 return (31 - lz); 2780} 2781 2782/* 2783 * Convert TLB TSIZE value to mapped region size. 2784 */ 2785static vm_size_t 2786tsize2size(unsigned int tsize) 2787{ 2788 2789 /* 2790 * size = 4^tsize KB 2791 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2792 */ 2793 2794 return ((1 << (2 * tsize)) * 1024); 2795} 2796 2797/* 2798 * Convert region size (must be power of 4) to TLB TSIZE value. 2799 */ 2800static unsigned int 2801size2tsize(vm_size_t size) 2802{ 2803 2804 /* 2805 * tsize = log2(size) / 2 - 5 2806 */ 2807 2808 return (ilog2(size) / 2 - 5); 2809} 2810 2811/* 2812 * Setup entry in a sw tlb1 table, write entry to TLB1 hardware. 2813 * This routine is used for low level operations on the TLB1, 2814 * for creating temporaray as well as permanent mappings (tlb_set_entry). 2815 * 2816 * We assume kernel mappings only, thus all entries created have supervisor 2817 * permission bits set nad user permission bits cleared. 2818 * 2819 * Provided mapping size must be a power of 4. 2820 * Mapping flags must be a combination of MAS2_[WIMG]. 2821 * Entry TID is set to _tid which must not exceed 8 bit value. 2822 * Entry TS is set to either 0 or MAS1_TS based on provided _ts. 2823 */ 2824static void 2825__tlb1_set_entry(unsigned int idx, vm_offset_t va, vm_offset_t pa, 2826 vm_size_t size, u_int32_t flags, unsigned int _tid, unsigned int _ts) 2827{ 2828 int tsize; 2829 u_int32_t ts, tid; 2830 2831 //debugf("__tlb1_set_entry: s (idx = %d va = 0x%08x pa = 0x%08x " 2832 // "size = 0x%08x flags = 0x%08x _tid = %d _ts = %d\n", 2833 // idx, va, pa, size, flags, _tid, _ts); 2834 2835 /* Convert size to TSIZE */ 2836 tsize = size2tsize(size); 2837 //debugf("__tlb1_set_entry: tsize = %d\n", tsize); 2838 2839 tid = (_tid << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2840 ts = (_ts) ? MAS1_TS : 0; 2841 tlb1[idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2842 tlb1[idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2843 2844 tlb1[idx].mas2 = (va & MAS2_EPN) | flags; 2845 2846 /* Set supervisor rwx permission bits */ 2847 tlb1[idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2848 2849 //debugf("__tlb1_set_entry: mas1 = %08x mas2 = %08x mas3 = 0x%08x\n", 2850 // tlb1[idx].mas1, tlb1[idx].mas2, tlb1[idx].mas3); 2851 2852 tlb1_write_entry(idx); 2853 //debugf("__tlb1_set_entry: e\n"); 2854} 2855 2856/* 2857 * Register permanent kernel mapping in TLB1. 2858 * 2859 * Entries are created starting from index 0 (current free entry is 2860 * kept in tlb1_idx) and are not supposed to be invalidated. 2861 */ 2862static int 2863tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, u_int32_t flags) 2864{ 2865 //debugf("tlb1_set_entry: s (tlb1_idx = %d va = 0x%08x pa = 0x%08x " 2866 // "size = 0x%08x flags = 0x%08x\n", 2867 // tlb1_idx, va, pa, size, flags); 2868 2869 if (tlb1_idx >= TLB1_SIZE) { 2870 //debugf("tlb1_set_entry: e (tlb1 full!)\n"); 2871 return (-1); 2872 } 2873 2874 /* TS = 0, TID = 0 */ 2875 __tlb1_set_entry(tlb1_idx++, va, pa, size, flags, KERNEL_TID, 0); 2876 //debugf("tlb1_set_entry: e\n"); 2877 return (0); 2878} 2879 2880/* 2881 * Invalidate TLB1 entry, clear correspondig tlb1 table element. 2882 * This routine is used to clear temporary entries created 2883 * early in a locore.S or through the use of __tlb1_set_entry(). 2884 */ 2885void 2886tlb1_inval_entry(unsigned int idx) 2887{ 2888 vm_offset_t va; 2889 2890 va = tlb1[idx].mas2 & MAS2_EPN; 2891 2892 tlb1[idx].mas1 = 0; /* !MAS1_VALID */ 2893 tlb1[idx].mas2 = 0; 2894 tlb1[idx].mas3 = 0; 2895 2896 tlb1_write_entry(idx); 2897} 2898 2899static int 2900tlb1_entry_size_cmp(const void *a, const void *b) 2901{ 2902 const vm_size_t *sza; 2903 const vm_size_t *szb; 2904 2905 sza = a; 2906 szb = b; 2907 if (*sza > *szb) 2908 return (-1); 2909 else if (*sza < *szb) 2910 return (1); 2911 else 2912 return (0); 2913} 2914 2915/* 2916 * Mapin contiguous RAM region into the TLB1 using maximum of 2917 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2918 * 2919 * If necessarry round up last entry size and return total size 2920 * used by all allocated entries. 2921 */ 2922vm_size_t 2923tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2924{ 2925 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2926 vm_size_t mapped_size, sz, esz; 2927 unsigned int log; 2928 int i; 2929 2930 debugf("tlb1_mapin_region:\n"); 2931 debugf(" region size = 0x%08x va = 0x%08x pa = 0x%08x\n", size, va, pa); 2932 2933 mapped_size = 0; 2934 sz = size; 2935 memset(entry_size, 0, sizeof(entry_size)); 2936 2937 /* Calculate entry sizes. */ 2938 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2939 2940 /* Largest region that is power of 4 and fits within size */ 2941 log = ilog2(sz)/2; 2942 esz = 1 << (2 * log); 2943 2944 /* Minimum region size is 4KB */ 2945 if (esz < (1 << 12)) 2946 esz = 1 << 12; 2947 2948 /* If this is last entry cover remaining size. */ 2949 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2950 while (esz < sz) 2951 esz = esz << 2; 2952 } 2953 2954 entry_size[i] = esz; 2955 mapped_size += esz; 2956 if (esz < sz) 2957 sz -= esz; 2958 else 2959 sz = 0; 2960 } 2961 2962 /* Sort entry sizes, required to get proper entry address alignment. */ 2963 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2964 sizeof(vm_size_t), tlb1_entry_size_cmp); 2965 2966 /* Load TLB1 entries. */ 2967 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2968 esz = entry_size[i]; 2969 if (!esz) 2970 break; 2971 debugf(" entry %d: sz = 0x%08x (va = 0x%08x pa = 0x%08x)\n", 2972 tlb1_idx, esz, va, pa); 2973 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2974 2975 va += esz; 2976 pa += esz; 2977 } 2978 2979 debugf(" mapped size 0x%08x (wasted space 0x%08x)\n", 2980 mapped_size, mapped_size - size); 2981 2982 return (mapped_size); 2983} 2984 2985/* 2986 * TLB1 initialization routine, to be called after the very first 2987 * assembler level setup done in locore.S. 2988 */ 2989void 2990tlb1_init(vm_offset_t ccsrbar) 2991{ 2992 uint32_t mas0; 2993 2994 /* TBL1[1] is used to map the kernel. Save that entry. */ 2995 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2996 mtspr(SPR_MAS0, mas0); 2997 __asm __volatile("isync; tlbre"); 2998 2999 tlb1[1].mas1 = mfspr(SPR_MAS1); 3000 tlb1[1].mas2 = mfspr(SPR_MAS2); 3001 tlb1[1].mas3 = mfspr(SPR_MAS3); 3002 3003 /* Mapin CCSRBAR in TLB1[0] */ 3004 __tlb1_set_entry(0, CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, 3005 _TLB_ENTRY_IO, KERNEL_TID, 0); 3006 3007 /* Setup TLB miss defaults */ 3008 set_mas4_defaults(); 3009 3010 /* Reset next available TLB1 entry index. */ 3011 tlb1_idx = 2; 3012} 3013 3014/* 3015 * Setup MAS4 defaults. 3016 * These values are loaded to MAS0-2 on a TLB miss. 3017 */ 3018static void 3019set_mas4_defaults(void) 3020{ 3021 u_int32_t mas4; 3022 3023 /* Defaults: TLB0, PID0, TSIZED=4K */ 3024 mas4 = MAS4_TLBSELD0; 3025 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 3026 3027 mtspr(SPR_MAS4, mas4); 3028 __asm volatile("isync"); 3029} 3030 3031/* 3032 * Print out contents of the MAS registers for each TLB1 entry 3033 */ 3034void 3035tlb1_print_tlbentries(void) 3036{ 3037 u_int32_t mas0, mas1, mas2, mas3, mas7; 3038 int i; 3039 3040 debugf("TLB1 entries:\n"); 3041 for (i = 0; i < TLB1_SIZE; i++) { 3042 3043 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 3044 mtspr(SPR_MAS0, mas0); 3045 3046 __asm volatile("isync; tlbre"); 3047 3048 mas1 = mfspr(SPR_MAS1); 3049 mas2 = mfspr(SPR_MAS2); 3050 mas3 = mfspr(SPR_MAS3); 3051 mas7 = mfspr(SPR_MAS7); 3052 3053 tlb_print_entry(i, mas1, mas2, mas3, mas7); 3054 } 3055} 3056 3057/* 3058 * Print out contents of the in-ram tlb1 table. 3059 */ 3060void 3061tlb1_print_entries(void) 3062{ 3063 int i; 3064 3065 debugf("tlb1[] table entries:\n"); 3066 for (i = 0; i < TLB1_SIZE; i++) 3067 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 3068} 3069 3070/* 3071 * Return 0 if the physical IO range is encompassed by one of the 3072 * the TLB1 entries, otherwise return related error code. 3073 */ 3074static int 3075tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 3076{ 3077 u_int32_t prot; 3078 vm_paddr_t pa_start; 3079 vm_paddr_t pa_end; 3080 unsigned int entry_tsize; 3081 vm_size_t entry_size; 3082 3083 *va = (vm_offset_t)NULL; 3084 3085 /* Skip invalid entries */ 3086 if (!(tlb1[i].mas1 & MAS1_VALID)) 3087 return (EINVAL); 3088 3089 /* 3090 * The entry must be cache-inhibited, guarded, and r/w 3091 * so it can function as an i/o page 3092 */ 3093 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 3094 if (prot != (MAS2_I | MAS2_G)) 3095 return (EPERM); 3096 3097 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 3098 if (prot != (MAS3_SR | MAS3_SW)) 3099 return (EPERM); 3100 3101 /* The address should be within the entry range. */ 3102 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 3103 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 3104 3105 entry_size = tsize2size(entry_tsize); 3106 pa_start = tlb1[i].mas3 & MAS3_RPN; 3107 pa_end = pa_start + entry_size - 1; 3108 3109 if ((pa < pa_start) || ((pa + size) > pa_end)) 3110 return (ERANGE); 3111 3112 /* Return virtual address of this mapping. */ 3113 *va = (tlb1[i].mas2 & MAS2_EPN) + (pa - pa_start); 3114 return (0); 3115} 3116