pmap.c revision 192067
1/*- 2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 192067 2009-05-14 00:34:26Z nwhitehorn $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/vmmeter.h> 67 68#include <vm/vm.h> 69#include <vm/vm_page.h> 70#include <vm/vm_kern.h> 71#include <vm/vm_pageout.h> 72#include <vm/vm_extern.h> 73#include <vm/vm_object.h> 74#include <vm/vm_param.h> 75#include <vm/vm_map.h> 76#include <vm/vm_pager.h> 77#include <vm/uma.h> 78 79#include <machine/bootinfo.h> 80#include <machine/cpu.h> 81#include <machine/pcb.h> 82#include <machine/platform.h> 83 84#include <machine/tlb.h> 85#include <machine/spr.h> 86#include <machine/vmparam.h> 87#include <machine/md_var.h> 88#include <machine/mmuvar.h> 89#include <machine/pmap.h> 90#include <machine/pte.h> 91 92#include "mmu_if.h" 93 94#define DEBUG 95#undef DEBUG 96 97#ifdef DEBUG 98#define debugf(fmt, args...) printf(fmt, ##args) 99#else 100#define debugf(fmt, args...) 101#endif 102 103#define TODO panic("%s: not implemented", __func__); 104 105#include "opt_sched.h" 106#ifndef SCHED_4BSD 107#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 108#endif 109extern struct mtx sched_lock; 110 111extern int dumpsys_minidump; 112 113extern unsigned char _etext[]; 114extern unsigned char _end[]; 115 116/* Kernel physical load address. */ 117extern uint32_t kernload; 118vm_offset_t kernstart; 119vm_size_t kernsize; 120 121/* Message buffer and tables. */ 122static vm_offset_t data_start; 123static vm_size_t data_end; 124 125/* Phys/avail memory regions. */ 126static struct mem_region *availmem_regions; 127static int availmem_regions_sz; 128static struct mem_region *physmem_regions; 129static int physmem_regions_sz; 130 131/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 132static vm_offset_t zero_page_va; 133static struct mtx zero_page_mutex; 134 135static struct mtx tlbivax_mutex; 136 137/* 138 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 139 * by idle thred only, no lock required. 140 */ 141static vm_offset_t zero_page_idle_va; 142 143/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 144static vm_offset_t copy_page_src_va; 145static vm_offset_t copy_page_dst_va; 146static struct mtx copy_page_mutex; 147 148/**************************************************************************/ 149/* PMAP */ 150/**************************************************************************/ 151 152static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 153 vm_prot_t, boolean_t); 154 155unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 156unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 157 158static int pagedaemon_waken; 159 160/* 161 * If user pmap is processed with mmu_booke_remove and the resident count 162 * drops to 0, there are no more pages to remove, so we need not continue. 163 */ 164#define PMAP_REMOVE_DONE(pmap) \ 165 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 166 167extern void tlb_lock(uint32_t *); 168extern void tlb_unlock(uint32_t *); 169extern void tid_flush(tlbtid_t); 170 171/**************************************************************************/ 172/* TLB and TID handling */ 173/**************************************************************************/ 174 175/* Translation ID busy table */ 176static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 177 178/* 179 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 180 * core revisions and should be read from h/w registers during early config. 181 */ 182uint32_t tlb0_entries; 183uint32_t tlb0_ways; 184uint32_t tlb0_entries_per_way; 185 186#define TLB0_ENTRIES (tlb0_entries) 187#define TLB0_WAYS (tlb0_ways) 188#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 189 190#define TLB1_ENTRIES 16 191 192/* In-ram copy of the TLB1 */ 193static tlb_entry_t tlb1[TLB1_ENTRIES]; 194 195/* Next free entry in the TLB1 */ 196static unsigned int tlb1_idx; 197 198static tlbtid_t tid_alloc(struct pmap *); 199 200static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 201 202static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 203static void tlb1_write_entry(unsigned int); 204static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 205static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 206 207static vm_size_t tsize2size(unsigned int); 208static unsigned int size2tsize(vm_size_t); 209static unsigned int ilog2(unsigned int); 210 211static void set_mas4_defaults(void); 212 213static inline void tlb0_flush_entry(vm_offset_t); 214static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 215 216/**************************************************************************/ 217/* Page table management */ 218/**************************************************************************/ 219 220/* Data for the pv entry allocation mechanism */ 221static uma_zone_t pvzone; 222static struct vm_object pvzone_obj; 223static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 224 225#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 226 227#ifndef PMAP_SHPGPERPROC 228#define PMAP_SHPGPERPROC 200 229#endif 230 231static void ptbl_init(void); 232static struct ptbl_buf *ptbl_buf_alloc(void); 233static void ptbl_buf_free(struct ptbl_buf *); 234static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 235 236static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 237static void ptbl_free(mmu_t, pmap_t, unsigned int); 238static void ptbl_hold(mmu_t, pmap_t, unsigned int); 239static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 240 241static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 242static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 243static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 244static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 245 246static pv_entry_t pv_alloc(void); 247static void pv_free(pv_entry_t); 248static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 249static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 250 251/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 252#define PTBL_BUFS (128 * 16) 253 254struct ptbl_buf { 255 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 256 vm_offset_t kva; /* va of mapping */ 257}; 258 259/* ptbl free list and a lock used for access synchronization. */ 260static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 261static struct mtx ptbl_buf_freelist_lock; 262 263/* Base address of kva space allocated fot ptbl bufs. */ 264static vm_offset_t ptbl_buf_pool_vabase; 265 266/* Pointer to ptbl_buf structures. */ 267static struct ptbl_buf *ptbl_bufs; 268 269/* 270 * Kernel MMU interface 271 */ 272static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 273static void mmu_booke_clear_modify(mmu_t, vm_page_t); 274static void mmu_booke_clear_reference(mmu_t, vm_page_t); 275static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, 276 vm_offset_t); 277static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 278static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 279 vm_prot_t, boolean_t); 280static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 281 vm_page_t, vm_prot_t); 282static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 283 vm_prot_t); 284static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 285static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 286 vm_prot_t); 287static void mmu_booke_init(mmu_t); 288static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 289static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 290static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 291static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 292 int); 293static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 294static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 295 vm_object_t, vm_pindex_t, vm_size_t); 296static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 297static void mmu_booke_page_init(mmu_t, vm_page_t); 298static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 299static void mmu_booke_pinit(mmu_t, pmap_t); 300static void mmu_booke_pinit0(mmu_t, pmap_t); 301static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 302 vm_prot_t); 303static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 304static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 305static void mmu_booke_release(mmu_t, pmap_t); 306static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 307static void mmu_booke_remove_all(mmu_t, vm_page_t); 308static void mmu_booke_remove_write(mmu_t, vm_page_t); 309static void mmu_booke_zero_page(mmu_t, vm_page_t); 310static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 311static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 312static void mmu_booke_activate(mmu_t, struct thread *); 313static void mmu_booke_deactivate(mmu_t, struct thread *); 314static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 315static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 316static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 317static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 318static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 319static void mmu_booke_kremove(mmu_t, vm_offset_t); 320static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 321static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 322static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 323 vm_size_t, vm_size_t *); 324static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 325 vm_size_t, vm_offset_t); 326static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 327 328static mmu_method_t mmu_booke_methods[] = { 329 /* pmap dispatcher interface */ 330 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 331 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 332 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 333 MMUMETHOD(mmu_copy, mmu_booke_copy), 334 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 335 MMUMETHOD(mmu_enter, mmu_booke_enter), 336 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 337 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 338 MMUMETHOD(mmu_extract, mmu_booke_extract), 339 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 340 MMUMETHOD(mmu_init, mmu_booke_init), 341 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 342 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 343 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 344 MMUMETHOD(mmu_map, mmu_booke_map), 345 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 346 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 347 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 348 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 349 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 350 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 351 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 352 MMUMETHOD(mmu_protect, mmu_booke_protect), 353 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 354 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 355 MMUMETHOD(mmu_release, mmu_booke_release), 356 MMUMETHOD(mmu_remove, mmu_booke_remove), 357 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 358 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 359 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 360 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 361 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 362 MMUMETHOD(mmu_activate, mmu_booke_activate), 363 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 364 365 /* Internal interfaces */ 366 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 367 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 368 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 369 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 370 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 371/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 372 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 373 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 374 375 /* dumpsys() support */ 376 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 377 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 378 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 379 380 { 0, 0 } 381}; 382 383static mmu_def_t booke_mmu = { 384 MMU_TYPE_BOOKE, 385 mmu_booke_methods, 386 0 387}; 388MMU_DEF(booke_mmu); 389 390/* Return number of entries in TLB0. */ 391static __inline void 392tlb0_get_tlbconf(void) 393{ 394 uint32_t tlb0_cfg; 395 396 tlb0_cfg = mfspr(SPR_TLB0CFG); 397 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 398 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 399 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 400} 401 402/* Initialize pool of kva ptbl buffers. */ 403static void 404ptbl_init(void) 405{ 406 int i; 407 408 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 409 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 410 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 411 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 412 413 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 414 TAILQ_INIT(&ptbl_buf_freelist); 415 416 for (i = 0; i < PTBL_BUFS; i++) { 417 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 418 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 419 } 420} 421 422/* Get a ptbl_buf from the freelist. */ 423static struct ptbl_buf * 424ptbl_buf_alloc(void) 425{ 426 struct ptbl_buf *buf; 427 428 mtx_lock(&ptbl_buf_freelist_lock); 429 buf = TAILQ_FIRST(&ptbl_buf_freelist); 430 if (buf != NULL) 431 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 432 mtx_unlock(&ptbl_buf_freelist_lock); 433 434 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 435 436 return (buf); 437} 438 439/* Return ptbl buff to free pool. */ 440static void 441ptbl_buf_free(struct ptbl_buf *buf) 442{ 443 444 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 445 446 mtx_lock(&ptbl_buf_freelist_lock); 447 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 448 mtx_unlock(&ptbl_buf_freelist_lock); 449} 450 451/* 452 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 453 */ 454static void 455ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 456{ 457 struct ptbl_buf *pbuf; 458 459 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 460 461 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 462 463 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 464 if (pbuf->kva == (vm_offset_t)ptbl) { 465 /* Remove from pmap ptbl buf list. */ 466 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 467 468 /* Free corresponding ptbl buf. */ 469 ptbl_buf_free(pbuf); 470 break; 471 } 472} 473 474/* Allocate page table. */ 475static pte_t * 476ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 477{ 478 vm_page_t mtbl[PTBL_PAGES]; 479 vm_page_t m; 480 struct ptbl_buf *pbuf; 481 unsigned int pidx; 482 pte_t *ptbl; 483 int i; 484 485 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 486 (pmap == kernel_pmap), pdir_idx); 487 488 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 489 ("ptbl_alloc: invalid pdir_idx")); 490 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 491 ("pte_alloc: valid ptbl entry exists!")); 492 493 pbuf = ptbl_buf_alloc(); 494 if (pbuf == NULL) 495 panic("pte_alloc: couldn't alloc kernel virtual memory"); 496 497 ptbl = (pte_t *)pbuf->kva; 498 499 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 500 501 /* Allocate ptbl pages, this will sleep! */ 502 for (i = 0; i < PTBL_PAGES; i++) { 503 pidx = (PTBL_PAGES * pdir_idx) + i; 504 while ((m = vm_page_alloc(NULL, pidx, 505 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 506 507 PMAP_UNLOCK(pmap); 508 vm_page_unlock_queues(); 509 VM_WAIT; 510 vm_page_lock_queues(); 511 PMAP_LOCK(pmap); 512 } 513 mtbl[i] = m; 514 } 515 516 /* Map allocated pages into kernel_pmap. */ 517 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 518 519 /* Zero whole ptbl. */ 520 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 521 522 /* Add pbuf to the pmap ptbl bufs list. */ 523 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 524 525 return (ptbl); 526} 527 528/* Free ptbl pages and invalidate pdir entry. */ 529static void 530ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 531{ 532 pte_t *ptbl; 533 vm_paddr_t pa; 534 vm_offset_t va; 535 vm_page_t m; 536 int i; 537 538 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 539 (pmap == kernel_pmap), pdir_idx); 540 541 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 542 ("ptbl_free: invalid pdir_idx")); 543 544 ptbl = pmap->pm_pdir[pdir_idx]; 545 546 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 547 548 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 549 550 /* 551 * Invalidate the pdir entry as soon as possible, so that other CPUs 552 * don't attempt to look up the page tables we are releasing. 553 */ 554 mtx_lock_spin(&tlbivax_mutex); 555 556 pmap->pm_pdir[pdir_idx] = NULL; 557 558 mtx_unlock_spin(&tlbivax_mutex); 559 560 for (i = 0; i < PTBL_PAGES; i++) { 561 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 562 pa = pte_vatopa(mmu, kernel_pmap, va); 563 m = PHYS_TO_VM_PAGE(pa); 564 vm_page_free_zero(m); 565 atomic_subtract_int(&cnt.v_wire_count, 1); 566 mmu_booke_kremove(mmu, va); 567 } 568 569 ptbl_free_pmap_ptbl(pmap, ptbl); 570} 571 572/* 573 * Decrement ptbl pages hold count and attempt to free ptbl pages. 574 * Called when removing pte entry from ptbl. 575 * 576 * Return 1 if ptbl pages were freed. 577 */ 578static int 579ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 580{ 581 pte_t *ptbl; 582 vm_paddr_t pa; 583 vm_page_t m; 584 int i; 585 586 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 587 (pmap == kernel_pmap), pdir_idx); 588 589 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 590 ("ptbl_unhold: invalid pdir_idx")); 591 KASSERT((pmap != kernel_pmap), 592 ("ptbl_unhold: unholding kernel ptbl!")); 593 594 ptbl = pmap->pm_pdir[pdir_idx]; 595 596 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 597 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 598 ("ptbl_unhold: non kva ptbl")); 599 600 /* decrement hold count */ 601 for (i = 0; i < PTBL_PAGES; i++) { 602 pa = pte_vatopa(mmu, kernel_pmap, 603 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 604 m = PHYS_TO_VM_PAGE(pa); 605 m->wire_count--; 606 } 607 608 /* 609 * Free ptbl pages if there are no pte etries in this ptbl. 610 * wire_count has the same value for all ptbl pages, so check the last 611 * page. 612 */ 613 if (m->wire_count == 0) { 614 ptbl_free(mmu, pmap, pdir_idx); 615 616 //debugf("ptbl_unhold: e (freed ptbl)\n"); 617 return (1); 618 } 619 620 return (0); 621} 622 623/* 624 * Increment hold count for ptbl pages. This routine is used when a new pte 625 * entry is being inserted into the ptbl. 626 */ 627static void 628ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 629{ 630 vm_paddr_t pa; 631 pte_t *ptbl; 632 vm_page_t m; 633 int i; 634 635 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 636 pdir_idx); 637 638 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 639 ("ptbl_hold: invalid pdir_idx")); 640 KASSERT((pmap != kernel_pmap), 641 ("ptbl_hold: holding kernel ptbl!")); 642 643 ptbl = pmap->pm_pdir[pdir_idx]; 644 645 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 646 647 for (i = 0; i < PTBL_PAGES; i++) { 648 pa = pte_vatopa(mmu, kernel_pmap, 649 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 650 m = PHYS_TO_VM_PAGE(pa); 651 m->wire_count++; 652 } 653} 654 655/* Allocate pv_entry structure. */ 656pv_entry_t 657pv_alloc(void) 658{ 659 pv_entry_t pv; 660 661 pv_entry_count++; 662 if ((pv_entry_count > pv_entry_high_water) && 663 (pagedaemon_waken == 0)) { 664 pagedaemon_waken = 1; 665 wakeup(&vm_pages_needed); 666 } 667 pv = uma_zalloc(pvzone, M_NOWAIT); 668 669 return (pv); 670} 671 672/* Free pv_entry structure. */ 673static __inline void 674pv_free(pv_entry_t pve) 675{ 676 677 pv_entry_count--; 678 uma_zfree(pvzone, pve); 679} 680 681 682/* Allocate and initialize pv_entry structure. */ 683static void 684pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 685{ 686 pv_entry_t pve; 687 688 //int su = (pmap == kernel_pmap); 689 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 690 // (u_int32_t)pmap, va, (u_int32_t)m); 691 692 pve = pv_alloc(); 693 if (pve == NULL) 694 panic("pv_insert: no pv entries!"); 695 696 pve->pv_pmap = pmap; 697 pve->pv_va = va; 698 699 /* add to pv_list */ 700 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 701 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 702 703 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 704 705 //debugf("pv_insert: e\n"); 706} 707 708/* Destroy pv entry. */ 709static void 710pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 711{ 712 pv_entry_t pve; 713 714 //int su = (pmap == kernel_pmap); 715 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 716 717 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 718 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 719 720 /* find pv entry */ 721 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 722 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 723 /* remove from pv_list */ 724 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 725 if (TAILQ_EMPTY(&m->md.pv_list)) 726 vm_page_flag_clear(m, PG_WRITEABLE); 727 728 /* free pv entry struct */ 729 pv_free(pve); 730 break; 731 } 732 } 733 734 //debugf("pv_remove: e\n"); 735} 736 737/* 738 * Clean pte entry, try to free page table page if requested. 739 * 740 * Return 1 if ptbl pages were freed, otherwise return 0. 741 */ 742static int 743pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 744{ 745 unsigned int pdir_idx = PDIR_IDX(va); 746 unsigned int ptbl_idx = PTBL_IDX(va); 747 vm_page_t m; 748 pte_t *ptbl; 749 pte_t *pte; 750 751 //int su = (pmap == kernel_pmap); 752 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 753 // su, (u_int32_t)pmap, va, flags); 754 755 ptbl = pmap->pm_pdir[pdir_idx]; 756 KASSERT(ptbl, ("pte_remove: null ptbl")); 757 758 pte = &ptbl[ptbl_idx]; 759 760 if (pte == NULL || !PTE_ISVALID(pte)) 761 return (0); 762 763 if (PTE_ISWIRED(pte)) 764 pmap->pm_stats.wired_count--; 765 766 /* Handle managed entry. */ 767 if (PTE_ISMANAGED(pte)) { 768 /* Get vm_page_t for mapped pte. */ 769 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 770 771 if (PTE_ISMODIFIED(pte)) 772 vm_page_dirty(m); 773 774 if (PTE_ISREFERENCED(pte)) 775 vm_page_flag_set(m, PG_REFERENCED); 776 777 pv_remove(pmap, va, m); 778 } 779 780 mtx_lock_spin(&tlbivax_mutex); 781 782 tlb0_flush_entry(va); 783 pte->flags = 0; 784 pte->rpn = 0; 785 786 mtx_unlock_spin(&tlbivax_mutex); 787 788 pmap->pm_stats.resident_count--; 789 790 if (flags & PTBL_UNHOLD) { 791 //debugf("pte_remove: e (unhold)\n"); 792 return (ptbl_unhold(mmu, pmap, pdir_idx)); 793 } 794 795 //debugf("pte_remove: e\n"); 796 return (0); 797} 798 799/* 800 * Insert PTE for a given page and virtual address. 801 */ 802static void 803pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 804{ 805 unsigned int pdir_idx = PDIR_IDX(va); 806 unsigned int ptbl_idx = PTBL_IDX(va); 807 pte_t *ptbl, *pte; 808 809 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 810 pmap == kernel_pmap, pmap, va); 811 812 /* Get the page table pointer. */ 813 ptbl = pmap->pm_pdir[pdir_idx]; 814 815 if (ptbl == NULL) { 816 /* Allocate page table pages. */ 817 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 818 } else { 819 /* 820 * Check if there is valid mapping for requested 821 * va, if there is, remove it. 822 */ 823 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 824 if (PTE_ISVALID(pte)) { 825 pte_remove(mmu, pmap, va, PTBL_HOLD); 826 } else { 827 /* 828 * pte is not used, increment hold count 829 * for ptbl pages. 830 */ 831 if (pmap != kernel_pmap) 832 ptbl_hold(mmu, pmap, pdir_idx); 833 } 834 } 835 836 /* 837 * Insert pv_entry into pv_list for mapped page if part of managed 838 * memory. 839 */ 840 if ((m->flags & PG_FICTITIOUS) == 0) { 841 if ((m->flags & PG_UNMANAGED) == 0) { 842 flags |= PTE_MANAGED; 843 844 /* Create and insert pv entry. */ 845 pv_insert(pmap, va, m); 846 } 847 } 848 849 pmap->pm_stats.resident_count++; 850 851 mtx_lock_spin(&tlbivax_mutex); 852 853 tlb0_flush_entry(va); 854 if (pmap->pm_pdir[pdir_idx] == NULL) { 855 /* 856 * If we just allocated a new page table, hook it in 857 * the pdir. 858 */ 859 pmap->pm_pdir[pdir_idx] = ptbl; 860 } 861 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 862 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 863 pte->flags |= (PTE_VALID | flags); 864 865 mtx_unlock_spin(&tlbivax_mutex); 866} 867 868/* Return the pa for the given pmap/va. */ 869static vm_paddr_t 870pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 871{ 872 vm_paddr_t pa = 0; 873 pte_t *pte; 874 875 pte = pte_find(mmu, pmap, va); 876 if ((pte != NULL) && PTE_ISVALID(pte)) 877 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 878 return (pa); 879} 880 881/* Get a pointer to a PTE in a page table. */ 882static pte_t * 883pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 884{ 885 unsigned int pdir_idx = PDIR_IDX(va); 886 unsigned int ptbl_idx = PTBL_IDX(va); 887 888 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 889 890 if (pmap->pm_pdir[pdir_idx]) 891 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 892 893 return (NULL); 894} 895 896/**************************************************************************/ 897/* PMAP related */ 898/**************************************************************************/ 899 900/* 901 * This is called during e500_init, before the system is really initialized. 902 */ 903static void 904mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 905{ 906 vm_offset_t phys_kernelend; 907 struct mem_region *mp, *mp1; 908 int cnt, i, j; 909 u_int s, e, sz; 910 u_int phys_avail_count; 911 vm_size_t physsz, hwphyssz, kstack0_sz; 912 vm_offset_t kernel_pdir, kstack0; 913 vm_paddr_t kstack0_phys; 914 915 debugf("mmu_booke_bootstrap: entered\n"); 916 917 /* Initialize invalidation mutex */ 918 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 919 920 /* Read TLB0 size and associativity. */ 921 tlb0_get_tlbconf(); 922 923 /* Align kernel start and end address (kernel image). */ 924 kernstart = trunc_page(start); 925 data_start = round_page(kernelend); 926 kernsize = data_start - kernstart; 927 928 data_end = data_start; 929 930 /* Allocate space for the message buffer. */ 931 msgbufp = (struct msgbuf *)data_end; 932 data_end += MSGBUF_SIZE; 933 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 934 data_end); 935 936 data_end = round_page(data_end); 937 938 /* Allocate space for ptbl_bufs. */ 939 ptbl_bufs = (struct ptbl_buf *)data_end; 940 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 941 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 942 data_end); 943 944 data_end = round_page(data_end); 945 946 /* Allocate PTE tables for kernel KVA. */ 947 kernel_pdir = data_end; 948 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 949 PDIR_SIZE - 1) / PDIR_SIZE; 950 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 951 debugf(" kernel ptbls: %d\n", kernel_ptbls); 952 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 953 954 debugf(" data_end: 0x%08x\n", data_end); 955 if (data_end - kernstart > 0x1000000) { 956 data_end = (data_end + 0x3fffff) & ~0x3fffff; 957 tlb1_mapin_region(kernstart + 0x1000000, 958 kernload + 0x1000000, data_end - kernstart - 0x1000000); 959 } else 960 data_end = (data_end + 0xffffff) & ~0xffffff; 961 962 debugf(" updated data_end: 0x%08x\n", data_end); 963 964 kernsize += data_end - data_start; 965 966 /* 967 * Clear the structures - note we can only do it safely after the 968 * possible additional TLB1 translations are in place (above) so that 969 * all range up to the currently calculated 'data_end' is covered. 970 */ 971 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 972 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 973 974 /*******************************************************/ 975 /* Set the start and end of kva. */ 976 /*******************************************************/ 977 virtual_avail = round_page(data_end); 978 virtual_end = VM_MAX_KERNEL_ADDRESS; 979 980 /* Allocate KVA space for page zero/copy operations. */ 981 zero_page_va = virtual_avail; 982 virtual_avail += PAGE_SIZE; 983 zero_page_idle_va = virtual_avail; 984 virtual_avail += PAGE_SIZE; 985 copy_page_src_va = virtual_avail; 986 virtual_avail += PAGE_SIZE; 987 copy_page_dst_va = virtual_avail; 988 virtual_avail += PAGE_SIZE; 989 debugf("zero_page_va = 0x%08x\n", zero_page_va); 990 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 991 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 992 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 993 994 /* Initialize page zero/copy mutexes. */ 995 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 996 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 997 998 /* Allocate KVA space for ptbl bufs. */ 999 ptbl_buf_pool_vabase = virtual_avail; 1000 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1001 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1002 ptbl_buf_pool_vabase, virtual_avail); 1003 1004 /* Calculate corresponding physical addresses for the kernel region. */ 1005 phys_kernelend = kernload + kernsize; 1006 debugf("kernel image and allocated data:\n"); 1007 debugf(" kernload = 0x%08x\n", kernload); 1008 debugf(" kernstart = 0x%08x\n", kernstart); 1009 debugf(" kernsize = 0x%08x\n", kernsize); 1010 1011 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1012 panic("mmu_booke_bootstrap: phys_avail too small"); 1013 1014 /* 1015 * Remove kernel physical address range from avail regions list. Page 1016 * align all regions. Non-page aligned memory isn't very interesting 1017 * to us. Also, sort the entries for ascending addresses. 1018 */ 1019 1020 /* Retrieve phys/avail mem regions */ 1021 mem_regions(&physmem_regions, &physmem_regions_sz, 1022 &availmem_regions, &availmem_regions_sz); 1023 sz = 0; 1024 cnt = availmem_regions_sz; 1025 debugf("processing avail regions:\n"); 1026 for (mp = availmem_regions; mp->mr_size; mp++) { 1027 s = mp->mr_start; 1028 e = mp->mr_start + mp->mr_size; 1029 debugf(" %08x-%08x -> ", s, e); 1030 /* Check whether this region holds all of the kernel. */ 1031 if (s < kernload && e > phys_kernelend) { 1032 availmem_regions[cnt].mr_start = phys_kernelend; 1033 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1034 e = kernload; 1035 } 1036 /* Look whether this regions starts within the kernel. */ 1037 if (s >= kernload && s < phys_kernelend) { 1038 if (e <= phys_kernelend) 1039 goto empty; 1040 s = phys_kernelend; 1041 } 1042 /* Now look whether this region ends within the kernel. */ 1043 if (e > kernload && e <= phys_kernelend) { 1044 if (s >= kernload) 1045 goto empty; 1046 e = kernload; 1047 } 1048 /* Now page align the start and size of the region. */ 1049 s = round_page(s); 1050 e = trunc_page(e); 1051 if (e < s) 1052 e = s; 1053 sz = e - s; 1054 debugf("%08x-%08x = %x\n", s, e, sz); 1055 1056 /* Check whether some memory is left here. */ 1057 if (sz == 0) { 1058 empty: 1059 memmove(mp, mp + 1, 1060 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1061 cnt--; 1062 mp--; 1063 continue; 1064 } 1065 1066 /* Do an insertion sort. */ 1067 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1068 if (s < mp1->mr_start) 1069 break; 1070 if (mp1 < mp) { 1071 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1072 mp1->mr_start = s; 1073 mp1->mr_size = sz; 1074 } else { 1075 mp->mr_start = s; 1076 mp->mr_size = sz; 1077 } 1078 } 1079 availmem_regions_sz = cnt; 1080 1081 /*******************************************************/ 1082 /* Steal physical memory for kernel stack from the end */ 1083 /* of the first avail region */ 1084 /*******************************************************/ 1085 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1086 kstack0_phys = availmem_regions[0].mr_start + 1087 availmem_regions[0].mr_size; 1088 kstack0_phys -= kstack0_sz; 1089 availmem_regions[0].mr_size -= kstack0_sz; 1090 1091 /*******************************************************/ 1092 /* Fill in phys_avail table, based on availmem_regions */ 1093 /*******************************************************/ 1094 phys_avail_count = 0; 1095 physsz = 0; 1096 hwphyssz = 0; 1097 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1098 1099 debugf("fill in phys_avail:\n"); 1100 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1101 1102 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1103 availmem_regions[i].mr_start, 1104 availmem_regions[i].mr_start + 1105 availmem_regions[i].mr_size, 1106 availmem_regions[i].mr_size); 1107 1108 if (hwphyssz != 0 && 1109 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1110 debugf(" hw.physmem adjust\n"); 1111 if (physsz < hwphyssz) { 1112 phys_avail[j] = availmem_regions[i].mr_start; 1113 phys_avail[j + 1] = 1114 availmem_regions[i].mr_start + 1115 hwphyssz - physsz; 1116 physsz = hwphyssz; 1117 phys_avail_count++; 1118 } 1119 break; 1120 } 1121 1122 phys_avail[j] = availmem_regions[i].mr_start; 1123 phys_avail[j + 1] = availmem_regions[i].mr_start + 1124 availmem_regions[i].mr_size; 1125 phys_avail_count++; 1126 physsz += availmem_regions[i].mr_size; 1127 } 1128 physmem = btoc(physsz); 1129 1130 /* Calculate the last available physical address. */ 1131 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1132 ; 1133 Maxmem = powerpc_btop(phys_avail[i + 1]); 1134 1135 debugf("Maxmem = 0x%08lx\n", Maxmem); 1136 debugf("phys_avail_count = %d\n", phys_avail_count); 1137 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1138 physmem); 1139 1140 /*******************************************************/ 1141 /* Initialize (statically allocated) kernel pmap. */ 1142 /*******************************************************/ 1143 PMAP_LOCK_INIT(kernel_pmap); 1144 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1145 1146 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1147 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1148 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1149 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1150 1151 /* Initialize kernel pdir */ 1152 for (i = 0; i < kernel_ptbls; i++) 1153 kernel_pmap->pm_pdir[kptbl_min + i] = 1154 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1155 1156 for (i = 0; i < MAXCPU; i++) { 1157 kernel_pmap->pm_tid[i] = TID_KERNEL; 1158 1159 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1160 tidbusy[i][0] = kernel_pmap; 1161 } 1162 /* Mark kernel_pmap active on all CPUs */ 1163 kernel_pmap->pm_active = ~0; 1164 1165 /*******************************************************/ 1166 /* Final setup */ 1167 /*******************************************************/ 1168 1169 /* Enter kstack0 into kernel map, provide guard page */ 1170 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1171 thread0.td_kstack = kstack0; 1172 thread0.td_kstack_pages = KSTACK_PAGES; 1173 1174 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1175 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1176 kstack0_phys, kstack0_phys + kstack0_sz); 1177 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1178 1179 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1180 for (i = 0; i < KSTACK_PAGES; i++) { 1181 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1182 kstack0 += PAGE_SIZE; 1183 kstack0_phys += PAGE_SIZE; 1184 } 1185 1186 debugf("virtual_avail = %08x\n", virtual_avail); 1187 debugf("virtual_end = %08x\n", virtual_end); 1188 1189 debugf("mmu_booke_bootstrap: exit\n"); 1190} 1191 1192/* 1193 * Get the physical page address for the given pmap/virtual address. 1194 */ 1195static vm_paddr_t 1196mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1197{ 1198 vm_paddr_t pa; 1199 1200 PMAP_LOCK(pmap); 1201 pa = pte_vatopa(mmu, pmap, va); 1202 PMAP_UNLOCK(pmap); 1203 1204 return (pa); 1205} 1206 1207/* 1208 * Extract the physical page address associated with the given 1209 * kernel virtual address. 1210 */ 1211static vm_paddr_t 1212mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1213{ 1214 1215 return (pte_vatopa(mmu, kernel_pmap, va)); 1216} 1217 1218/* 1219 * Initialize the pmap module. 1220 * Called by vm_init, to initialize any structures that the pmap 1221 * system needs to map virtual memory. 1222 */ 1223static void 1224mmu_booke_init(mmu_t mmu) 1225{ 1226 int shpgperproc = PMAP_SHPGPERPROC; 1227 1228 /* 1229 * Initialize the address space (zone) for the pv entries. Set a 1230 * high water mark so that the system can recover from excessive 1231 * numbers of pv entries. 1232 */ 1233 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1234 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1235 1236 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1237 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1238 1239 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1240 pv_entry_high_water = 9 * (pv_entry_max / 10); 1241 1242 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1243 1244 /* Pre-fill pvzone with initial number of pv entries. */ 1245 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1246 1247 /* Initialize ptbl allocation. */ 1248 ptbl_init(); 1249} 1250 1251/* 1252 * Map a list of wired pages into kernel virtual address space. This is 1253 * intended for temporary mappings which do not need page modification or 1254 * references recorded. Existing mappings in the region are overwritten. 1255 */ 1256static void 1257mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1258{ 1259 vm_offset_t va; 1260 1261 va = sva; 1262 while (count-- > 0) { 1263 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1264 va += PAGE_SIZE; 1265 m++; 1266 } 1267} 1268 1269/* 1270 * Remove page mappings from kernel virtual address space. Intended for 1271 * temporary mappings entered by mmu_booke_qenter. 1272 */ 1273static void 1274mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1275{ 1276 vm_offset_t va; 1277 1278 va = sva; 1279 while (count-- > 0) { 1280 mmu_booke_kremove(mmu, va); 1281 va += PAGE_SIZE; 1282 } 1283} 1284 1285/* 1286 * Map a wired page into kernel virtual address space. 1287 */ 1288static void 1289mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1290{ 1291 unsigned int pdir_idx = PDIR_IDX(va); 1292 unsigned int ptbl_idx = PTBL_IDX(va); 1293 uint32_t flags; 1294 pte_t *pte; 1295 1296 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1297 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1298 1299 flags = 0; 1300 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1301 flags |= PTE_M; 1302 1303 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1304 1305 mtx_lock_spin(&tlbivax_mutex); 1306 1307 if (PTE_ISVALID(pte)) { 1308 1309 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1310 1311 /* Flush entry from TLB0 */ 1312 tlb0_flush_entry(va); 1313 } 1314 1315 pte->rpn = pa & ~PTE_PA_MASK; 1316 pte->flags = flags; 1317 1318 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1319 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1320 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1321 1322 /* Flush the real memory from the instruction cache. */ 1323 if ((flags & (PTE_I | PTE_G)) == 0) { 1324 __syncicache((void *)va, PAGE_SIZE); 1325 } 1326 1327 mtx_unlock_spin(&tlbivax_mutex); 1328} 1329 1330/* 1331 * Remove a page from kernel page table. 1332 */ 1333static void 1334mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1335{ 1336 unsigned int pdir_idx = PDIR_IDX(va); 1337 unsigned int ptbl_idx = PTBL_IDX(va); 1338 pte_t *pte; 1339 1340// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1341 1342 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1343 (va <= VM_MAX_KERNEL_ADDRESS)), 1344 ("mmu_booke_kremove: invalid va")); 1345 1346 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1347 1348 if (!PTE_ISVALID(pte)) { 1349 1350 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1351 1352 return; 1353 } 1354 1355 mtx_lock_spin(&tlbivax_mutex); 1356 1357 /* Invalidate entry in TLB0, update PTE. */ 1358 tlb0_flush_entry(va); 1359 pte->flags = 0; 1360 pte->rpn = 0; 1361 1362 mtx_unlock_spin(&tlbivax_mutex); 1363} 1364 1365/* 1366 * Initialize pmap associated with process 0. 1367 */ 1368static void 1369mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1370{ 1371 1372 mmu_booke_pinit(mmu, pmap); 1373 PCPU_SET(curpmap, pmap); 1374} 1375 1376/* 1377 * Initialize a preallocated and zeroed pmap structure, 1378 * such as one in a vmspace structure. 1379 */ 1380static void 1381mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1382{ 1383 int i; 1384 1385 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1386 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1387 1388 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1389 1390 PMAP_LOCK_INIT(pmap); 1391 for (i = 0; i < MAXCPU; i++) 1392 pmap->pm_tid[i] = TID_NONE; 1393 pmap->pm_active = 0; 1394 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1395 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1396 TAILQ_INIT(&pmap->pm_ptbl_list); 1397} 1398 1399/* 1400 * Release any resources held by the given physical map. 1401 * Called when a pmap initialized by mmu_booke_pinit is being released. 1402 * Should only be called if the map contains no valid mappings. 1403 */ 1404static void 1405mmu_booke_release(mmu_t mmu, pmap_t pmap) 1406{ 1407 1408 printf("mmu_booke_release: s\n"); 1409 1410 KASSERT(pmap->pm_stats.resident_count == 0, 1411 ("pmap_release: pmap resident count %ld != 0", 1412 pmap->pm_stats.resident_count)); 1413 1414 PMAP_LOCK_DESTROY(pmap); 1415} 1416 1417/* 1418 * Insert the given physical page at the specified virtual address in the 1419 * target physical map with the protection requested. If specified the page 1420 * will be wired down. 1421 */ 1422static void 1423mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1424 vm_prot_t prot, boolean_t wired) 1425{ 1426 1427 vm_page_lock_queues(); 1428 PMAP_LOCK(pmap); 1429 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1430 vm_page_unlock_queues(); 1431 PMAP_UNLOCK(pmap); 1432} 1433 1434static void 1435mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1436 vm_prot_t prot, boolean_t wired) 1437{ 1438 pte_t *pte; 1439 vm_paddr_t pa; 1440 uint32_t flags; 1441 int su, sync; 1442 1443 pa = VM_PAGE_TO_PHYS(m); 1444 su = (pmap == kernel_pmap); 1445 sync = 0; 1446 1447 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1448 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1449 // (u_int32_t)pmap, su, pmap->pm_tid, 1450 // (u_int32_t)m, va, pa, prot, wired); 1451 1452 if (su) { 1453 KASSERT(((va >= virtual_avail) && 1454 (va <= VM_MAX_KERNEL_ADDRESS)), 1455 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1456 } else { 1457 KASSERT((va <= VM_MAXUSER_ADDRESS), 1458 ("mmu_booke_enter_locked: user pmap, non user va")); 1459 } 1460 1461 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1462 1463 /* 1464 * If there is an existing mapping, and the physical address has not 1465 * changed, must be protection or wiring change. 1466 */ 1467 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1468 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1469 1470 /* 1471 * Before actually updating pte->flags we calculate and 1472 * prepare its new value in a helper var. 1473 */ 1474 flags = pte->flags; 1475 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1476 1477 /* Wiring change, just update stats. */ 1478 if (wired) { 1479 if (!PTE_ISWIRED(pte)) { 1480 flags |= PTE_WIRED; 1481 pmap->pm_stats.wired_count++; 1482 } 1483 } else { 1484 if (PTE_ISWIRED(pte)) { 1485 flags &= ~PTE_WIRED; 1486 pmap->pm_stats.wired_count--; 1487 } 1488 } 1489 1490 if (prot & VM_PROT_WRITE) { 1491 /* Add write permissions. */ 1492 flags |= PTE_SW; 1493 if (!su) 1494 flags |= PTE_UW; 1495 } else { 1496 /* Handle modified pages, sense modify status. */ 1497 1498 /* 1499 * The PTE_MODIFIED flag could be set by underlying 1500 * TLB misses since we last read it (above), possibly 1501 * other CPUs could update it so we check in the PTE 1502 * directly rather than rely on that saved local flags 1503 * copy. 1504 */ 1505 if (PTE_ISMODIFIED(pte)) 1506 vm_page_dirty(m); 1507 } 1508 1509 if (prot & VM_PROT_EXECUTE) { 1510 flags |= PTE_SX; 1511 if (!su) 1512 flags |= PTE_UX; 1513 1514 /* 1515 * Check existing flags for execute permissions: if we 1516 * are turning execute permissions on, icache should 1517 * be flushed. 1518 */ 1519 if ((flags & (PTE_UX | PTE_SX)) == 0) 1520 sync++; 1521 } 1522 1523 flags &= ~PTE_REFERENCED; 1524 1525 /* 1526 * The new flags value is all calculated -- only now actually 1527 * update the PTE. 1528 */ 1529 mtx_lock_spin(&tlbivax_mutex); 1530 1531 tlb0_flush_entry(va); 1532 pte->flags = flags; 1533 1534 mtx_unlock_spin(&tlbivax_mutex); 1535 1536 } else { 1537 /* 1538 * If there is an existing mapping, but it's for a different 1539 * physical address, pte_enter() will delete the old mapping. 1540 */ 1541 //if ((pte != NULL) && PTE_ISVALID(pte)) 1542 // debugf("mmu_booke_enter_locked: replace\n"); 1543 //else 1544 // debugf("mmu_booke_enter_locked: new\n"); 1545 1546 /* Now set up the flags and install the new mapping. */ 1547 flags = (PTE_SR | PTE_VALID); 1548 flags |= PTE_M; 1549 1550 if (!su) 1551 flags |= PTE_UR; 1552 1553 if (prot & VM_PROT_WRITE) { 1554 flags |= PTE_SW; 1555 if (!su) 1556 flags |= PTE_UW; 1557 } 1558 1559 if (prot & VM_PROT_EXECUTE) { 1560 flags |= PTE_SX; 1561 if (!su) 1562 flags |= PTE_UX; 1563 } 1564 1565 /* If its wired update stats. */ 1566 if (wired) { 1567 pmap->pm_stats.wired_count++; 1568 flags |= PTE_WIRED; 1569 } 1570 1571 pte_enter(mmu, pmap, m, va, flags); 1572 1573 /* Flush the real memory from the instruction cache. */ 1574 if (prot & VM_PROT_EXECUTE) 1575 sync++; 1576 } 1577 1578 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1579 __syncicache((void *)va, PAGE_SIZE); 1580 sync = 0; 1581 } 1582 1583 if (sync) { 1584 /* Create a temporary mapping. */ 1585 pmap = PCPU_GET(curpmap); 1586 1587 va = 0; 1588 pte = pte_find(mmu, pmap, va); 1589 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1590 1591 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1592 1593 pte_enter(mmu, pmap, m, va, flags); 1594 __syncicache((void *)va, PAGE_SIZE); 1595 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1596 } 1597} 1598 1599/* 1600 * Maps a sequence of resident pages belonging to the same object. 1601 * The sequence begins with the given page m_start. This page is 1602 * mapped at the given virtual address start. Each subsequent page is 1603 * mapped at a virtual address that is offset from start by the same 1604 * amount as the page is offset from m_start within the object. The 1605 * last page in the sequence is the page with the largest offset from 1606 * m_start that can be mapped at a virtual address less than the given 1607 * virtual address end. Not every virtual page between start and end 1608 * is mapped; only those for which a resident page exists with the 1609 * corresponding offset from m_start are mapped. 1610 */ 1611static void 1612mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1613 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1614{ 1615 vm_page_t m; 1616 vm_pindex_t diff, psize; 1617 1618 psize = atop(end - start); 1619 m = m_start; 1620 PMAP_LOCK(pmap); 1621 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1622 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1623 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1624 m = TAILQ_NEXT(m, listq); 1625 } 1626 PMAP_UNLOCK(pmap); 1627} 1628 1629static void 1630mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1631 vm_prot_t prot) 1632{ 1633 1634 PMAP_LOCK(pmap); 1635 mmu_booke_enter_locked(mmu, pmap, va, m, 1636 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1637 PMAP_UNLOCK(pmap); 1638} 1639 1640/* 1641 * Remove the given range of addresses from the specified map. 1642 * 1643 * It is assumed that the start and end are properly rounded to the page size. 1644 */ 1645static void 1646mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1647{ 1648 pte_t *pte; 1649 uint8_t hold_flag; 1650 1651 int su = (pmap == kernel_pmap); 1652 1653 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1654 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1655 1656 if (su) { 1657 KASSERT(((va >= virtual_avail) && 1658 (va <= VM_MAX_KERNEL_ADDRESS)), 1659 ("mmu_booke_remove: kernel pmap, non kernel va")); 1660 } else { 1661 KASSERT((va <= VM_MAXUSER_ADDRESS), 1662 ("mmu_booke_remove: user pmap, non user va")); 1663 } 1664 1665 if (PMAP_REMOVE_DONE(pmap)) { 1666 //debugf("mmu_booke_remove: e (empty)\n"); 1667 return; 1668 } 1669 1670 hold_flag = PTBL_HOLD_FLAG(pmap); 1671 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1672 1673 vm_page_lock_queues(); 1674 PMAP_LOCK(pmap); 1675 for (; va < endva; va += PAGE_SIZE) { 1676 pte = pte_find(mmu, pmap, va); 1677 if ((pte != NULL) && PTE_ISVALID(pte)) 1678 pte_remove(mmu, pmap, va, hold_flag); 1679 } 1680 PMAP_UNLOCK(pmap); 1681 vm_page_unlock_queues(); 1682 1683 //debugf("mmu_booke_remove: e\n"); 1684} 1685 1686/* 1687 * Remove physical page from all pmaps in which it resides. 1688 */ 1689static void 1690mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1691{ 1692 pv_entry_t pv, pvn; 1693 uint8_t hold_flag; 1694 1695 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1696 1697 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1698 pvn = TAILQ_NEXT(pv, pv_link); 1699 1700 PMAP_LOCK(pv->pv_pmap); 1701 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1702 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1703 PMAP_UNLOCK(pv->pv_pmap); 1704 } 1705 vm_page_flag_clear(m, PG_WRITEABLE); 1706} 1707 1708/* 1709 * Map a range of physical addresses into kernel virtual address space. 1710 */ 1711static vm_offset_t 1712mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1713 vm_offset_t pa_end, int prot) 1714{ 1715 vm_offset_t sva = *virt; 1716 vm_offset_t va = sva; 1717 1718 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1719 // sva, pa_start, pa_end); 1720 1721 while (pa_start < pa_end) { 1722 mmu_booke_kenter(mmu, va, pa_start); 1723 va += PAGE_SIZE; 1724 pa_start += PAGE_SIZE; 1725 } 1726 *virt = va; 1727 1728 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1729 return (sva); 1730} 1731 1732/* 1733 * The pmap must be activated before it's address space can be accessed in any 1734 * way. 1735 */ 1736static void 1737mmu_booke_activate(mmu_t mmu, struct thread *td) 1738{ 1739 pmap_t pmap; 1740 1741 pmap = &td->td_proc->p_vmspace->vm_pmap; 1742 1743 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1744 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1745 1746 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1747 1748 mtx_lock_spin(&sched_lock); 1749 1750 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1751 PCPU_SET(curpmap, pmap); 1752 1753 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1754 tid_alloc(pmap); 1755 1756 /* Load PID0 register with pmap tid value. */ 1757 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1758 __asm __volatile("isync"); 1759 1760 mtx_unlock_spin(&sched_lock); 1761 1762 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1763 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1764} 1765 1766/* 1767 * Deactivate the specified process's address space. 1768 */ 1769static void 1770mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1771{ 1772 pmap_t pmap; 1773 1774 pmap = &td->td_proc->p_vmspace->vm_pmap; 1775 1776 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1777 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1778 1779 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1780 PCPU_SET(curpmap, NULL); 1781} 1782 1783/* 1784 * Copy the range specified by src_addr/len 1785 * from the source map to the range dst_addr/len 1786 * in the destination map. 1787 * 1788 * This routine is only advisory and need not do anything. 1789 */ 1790static void 1791mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1792 vm_size_t len, vm_offset_t src_addr) 1793{ 1794 1795} 1796 1797/* 1798 * Set the physical protection on the specified range of this map as requested. 1799 */ 1800static void 1801mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1802 vm_prot_t prot) 1803{ 1804 vm_offset_t va; 1805 vm_page_t m; 1806 pte_t *pte; 1807 1808 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1809 mmu_booke_remove(mmu, pmap, sva, eva); 1810 return; 1811 } 1812 1813 if (prot & VM_PROT_WRITE) 1814 return; 1815 1816 vm_page_lock_queues(); 1817 PMAP_LOCK(pmap); 1818 for (va = sva; va < eva; va += PAGE_SIZE) { 1819 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1820 if (PTE_ISVALID(pte)) { 1821 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1822 1823 mtx_lock_spin(&tlbivax_mutex); 1824 1825 /* Handle modified pages. */ 1826 if (PTE_ISMODIFIED(pte)) 1827 vm_page_dirty(m); 1828 1829 /* Referenced pages. */ 1830 if (PTE_ISREFERENCED(pte)) 1831 vm_page_flag_set(m, PG_REFERENCED); 1832 1833 tlb0_flush_entry(va); 1834 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1835 PTE_REFERENCED); 1836 1837 mtx_unlock_spin(&tlbivax_mutex); 1838 } 1839 } 1840 } 1841 PMAP_UNLOCK(pmap); 1842 vm_page_unlock_queues(); 1843} 1844 1845/* 1846 * Clear the write and modified bits in each of the given page's mappings. 1847 */ 1848static void 1849mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1850{ 1851 pv_entry_t pv; 1852 pte_t *pte; 1853 1854 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1855 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1856 (m->flags & PG_WRITEABLE) == 0) 1857 return; 1858 1859 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1860 PMAP_LOCK(pv->pv_pmap); 1861 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1862 if (PTE_ISVALID(pte)) { 1863 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1864 1865 mtx_lock_spin(&tlbivax_mutex); 1866 1867 /* Handle modified pages. */ 1868 if (PTE_ISMODIFIED(pte)) 1869 vm_page_dirty(m); 1870 1871 /* Referenced pages. */ 1872 if (PTE_ISREFERENCED(pte)) 1873 vm_page_flag_set(m, PG_REFERENCED); 1874 1875 /* Flush mapping from TLB0. */ 1876 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1877 PTE_REFERENCED); 1878 1879 mtx_unlock_spin(&tlbivax_mutex); 1880 } 1881 } 1882 PMAP_UNLOCK(pv->pv_pmap); 1883 } 1884 vm_page_flag_clear(m, PG_WRITEABLE); 1885} 1886 1887static boolean_t 1888mmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1889{ 1890 pv_entry_t pv; 1891 pte_t *pte; 1892 boolean_t executable; 1893 1894 executable = FALSE; 1895 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1896 PMAP_LOCK(pv->pv_pmap); 1897 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 1898 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 1899 executable = TRUE; 1900 PMAP_UNLOCK(pv->pv_pmap); 1901 if (executable) 1902 break; 1903 } 1904 1905 return (executable); 1906} 1907 1908/* 1909 * Atomically extract and hold the physical page with the given 1910 * pmap and virtual address pair if that mapping permits the given 1911 * protection. 1912 */ 1913static vm_page_t 1914mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1915 vm_prot_t prot) 1916{ 1917 pte_t *pte; 1918 vm_page_t m; 1919 uint32_t pte_wbit; 1920 1921 m = NULL; 1922 vm_page_lock_queues(); 1923 PMAP_LOCK(pmap); 1924 1925 pte = pte_find(mmu, pmap, va); 1926 if ((pte != NULL) && PTE_ISVALID(pte)) { 1927 if (pmap == kernel_pmap) 1928 pte_wbit = PTE_SW; 1929 else 1930 pte_wbit = PTE_UW; 1931 1932 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1933 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1934 vm_page_hold(m); 1935 } 1936 } 1937 1938 vm_page_unlock_queues(); 1939 PMAP_UNLOCK(pmap); 1940 return (m); 1941} 1942 1943/* 1944 * Initialize a vm_page's machine-dependent fields. 1945 */ 1946static void 1947mmu_booke_page_init(mmu_t mmu, vm_page_t m) 1948{ 1949 1950 TAILQ_INIT(&m->md.pv_list); 1951} 1952 1953/* 1954 * mmu_booke_zero_page_area zeros the specified hardware page by 1955 * mapping it into virtual memory and using bzero to clear 1956 * its contents. 1957 * 1958 * off and size must reside within a single page. 1959 */ 1960static void 1961mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1962{ 1963 vm_offset_t va; 1964 1965 /* XXX KASSERT off and size are within a single page? */ 1966 1967 mtx_lock(&zero_page_mutex); 1968 va = zero_page_va; 1969 1970 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1971 bzero((caddr_t)va + off, size); 1972 mmu_booke_kremove(mmu, va); 1973 1974 mtx_unlock(&zero_page_mutex); 1975} 1976 1977/* 1978 * mmu_booke_zero_page zeros the specified hardware page. 1979 */ 1980static void 1981mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1982{ 1983 1984 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 1985} 1986 1987/* 1988 * mmu_booke_copy_page copies the specified (machine independent) page by 1989 * mapping the page into virtual memory and using memcopy to copy the page, 1990 * one machine dependent page at a time. 1991 */ 1992static void 1993mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 1994{ 1995 vm_offset_t sva, dva; 1996 1997 sva = copy_page_src_va; 1998 dva = copy_page_dst_va; 1999 2000 mtx_lock(©_page_mutex); 2001 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2002 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2003 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2004 mmu_booke_kremove(mmu, dva); 2005 mmu_booke_kremove(mmu, sva); 2006 mtx_unlock(©_page_mutex); 2007} 2008 2009/* 2010 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2011 * into virtual memory and using bzero to clear its contents. This is intended 2012 * to be called from the vm_pagezero process only and outside of Giant. No 2013 * lock is required. 2014 */ 2015static void 2016mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2017{ 2018 vm_offset_t va; 2019 2020 va = zero_page_idle_va; 2021 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2022 bzero((caddr_t)va, PAGE_SIZE); 2023 mmu_booke_kremove(mmu, va); 2024} 2025 2026/* 2027 * Return whether or not the specified physical page was modified 2028 * in any of physical maps. 2029 */ 2030static boolean_t 2031mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2032{ 2033 pte_t *pte; 2034 pv_entry_t pv; 2035 2036 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2037 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2038 return (FALSE); 2039 2040 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2041 PMAP_LOCK(pv->pv_pmap); 2042 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2043 if (!PTE_ISVALID(pte)) 2044 goto make_sure_to_unlock; 2045 2046 if (PTE_ISMODIFIED(pte)) { 2047 PMAP_UNLOCK(pv->pv_pmap); 2048 return (TRUE); 2049 } 2050 } 2051make_sure_to_unlock: 2052 PMAP_UNLOCK(pv->pv_pmap); 2053 } 2054 return (FALSE); 2055} 2056 2057/* 2058 * Return whether or not the specified virtual address is eligible 2059 * for prefault. 2060 */ 2061static boolean_t 2062mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2063{ 2064 2065 return (FALSE); 2066} 2067 2068/* 2069 * Clear the modify bits on the specified physical page. 2070 */ 2071static void 2072mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2073{ 2074 pte_t *pte; 2075 pv_entry_t pv; 2076 2077 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2078 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2079 return; 2080 2081 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2082 PMAP_LOCK(pv->pv_pmap); 2083 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2084 if (!PTE_ISVALID(pte)) 2085 goto make_sure_to_unlock; 2086 2087 mtx_lock_spin(&tlbivax_mutex); 2088 2089 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2090 tlb0_flush_entry(pv->pv_va); 2091 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2092 PTE_REFERENCED); 2093 } 2094 2095 mtx_unlock_spin(&tlbivax_mutex); 2096 } 2097make_sure_to_unlock: 2098 PMAP_UNLOCK(pv->pv_pmap); 2099 } 2100} 2101 2102/* 2103 * Return a count of reference bits for a page, clearing those bits. 2104 * It is not necessary for every reference bit to be cleared, but it 2105 * is necessary that 0 only be returned when there are truly no 2106 * reference bits set. 2107 * 2108 * XXX: The exact number of bits to check and clear is a matter that 2109 * should be tested and standardized at some point in the future for 2110 * optimal aging of shared pages. 2111 */ 2112static int 2113mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2114{ 2115 pte_t *pte; 2116 pv_entry_t pv; 2117 int count; 2118 2119 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2120 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2121 return (0); 2122 2123 count = 0; 2124 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2125 PMAP_LOCK(pv->pv_pmap); 2126 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2127 if (!PTE_ISVALID(pte)) 2128 goto make_sure_to_unlock; 2129 2130 if (PTE_ISREFERENCED(pte)) { 2131 mtx_lock_spin(&tlbivax_mutex); 2132 2133 tlb0_flush_entry(pv->pv_va); 2134 pte->flags &= ~PTE_REFERENCED; 2135 2136 mtx_unlock_spin(&tlbivax_mutex); 2137 2138 if (++count > 4) { 2139 PMAP_UNLOCK(pv->pv_pmap); 2140 break; 2141 } 2142 } 2143 } 2144make_sure_to_unlock: 2145 PMAP_UNLOCK(pv->pv_pmap); 2146 } 2147 return (count); 2148} 2149 2150/* 2151 * Clear the reference bit on the specified physical page. 2152 */ 2153static void 2154mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2155{ 2156 pte_t *pte; 2157 pv_entry_t pv; 2158 2159 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2160 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2161 return; 2162 2163 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2164 PMAP_LOCK(pv->pv_pmap); 2165 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2166 if (!PTE_ISVALID(pte)) 2167 goto make_sure_to_unlock; 2168 2169 if (PTE_ISREFERENCED(pte)) { 2170 mtx_lock_spin(&tlbivax_mutex); 2171 2172 tlb0_flush_entry(pv->pv_va); 2173 pte->flags &= ~PTE_REFERENCED; 2174 2175 mtx_unlock_spin(&tlbivax_mutex); 2176 } 2177 } 2178make_sure_to_unlock: 2179 PMAP_UNLOCK(pv->pv_pmap); 2180 } 2181} 2182 2183/* 2184 * Change wiring attribute for a map/virtual-address pair. 2185 */ 2186static void 2187mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2188{ 2189 pte_t *pte;; 2190 2191 PMAP_LOCK(pmap); 2192 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2193 if (wired) { 2194 if (!PTE_ISWIRED(pte)) { 2195 pte->flags |= PTE_WIRED; 2196 pmap->pm_stats.wired_count++; 2197 } 2198 } else { 2199 if (PTE_ISWIRED(pte)) { 2200 pte->flags &= ~PTE_WIRED; 2201 pmap->pm_stats.wired_count--; 2202 } 2203 } 2204 } 2205 PMAP_UNLOCK(pmap); 2206} 2207 2208/* 2209 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2210 * page. This count may be changed upwards or downwards in the future; it is 2211 * only necessary that true be returned for a small subset of pmaps for proper 2212 * page aging. 2213 */ 2214static boolean_t 2215mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2216{ 2217 pv_entry_t pv; 2218 int loops; 2219 2220 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2221 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2222 return (FALSE); 2223 2224 loops = 0; 2225 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2226 if (pv->pv_pmap == pmap) 2227 return (TRUE); 2228 2229 if (++loops >= 16) 2230 break; 2231 } 2232 return (FALSE); 2233} 2234 2235/* 2236 * Return the number of managed mappings to the given physical page that are 2237 * wired. 2238 */ 2239static int 2240mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2241{ 2242 pv_entry_t pv; 2243 pte_t *pte; 2244 int count = 0; 2245 2246 if ((m->flags & PG_FICTITIOUS) != 0) 2247 return (count); 2248 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2249 2250 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2251 PMAP_LOCK(pv->pv_pmap); 2252 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2253 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2254 count++; 2255 PMAP_UNLOCK(pv->pv_pmap); 2256 } 2257 2258 return (count); 2259} 2260 2261static int 2262mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2263{ 2264 int i; 2265 vm_offset_t va; 2266 2267 /* 2268 * This currently does not work for entries that 2269 * overlap TLB1 entries. 2270 */ 2271 for (i = 0; i < tlb1_idx; i ++) { 2272 if (tlb1_iomapped(i, pa, size, &va) == 0) 2273 return (0); 2274 } 2275 2276 return (EFAULT); 2277} 2278 2279vm_offset_t 2280mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2281 vm_size_t *sz) 2282{ 2283 vm_paddr_t pa, ppa; 2284 vm_offset_t va; 2285 vm_size_t gran; 2286 2287 /* Raw physical memory dumps don't have a virtual address. */ 2288 if (md->md_vaddr == ~0UL) { 2289 /* We always map a 256MB page at 256M. */ 2290 gran = 256 * 1024 * 1024; 2291 pa = md->md_paddr + ofs; 2292 ppa = pa & ~(gran - 1); 2293 ofs = pa - ppa; 2294 va = gran; 2295 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2296 if (*sz > (gran - ofs)) 2297 *sz = gran - ofs; 2298 return (va + ofs); 2299 } 2300 2301 /* Minidumps are based on virtual memory addresses. */ 2302 va = md->md_vaddr + ofs; 2303 if (va >= kernstart + kernsize) { 2304 gran = PAGE_SIZE - (va & PAGE_MASK); 2305 if (*sz > gran) 2306 *sz = gran; 2307 } 2308 return (va); 2309} 2310 2311void 2312mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2313 vm_offset_t va) 2314{ 2315 2316 /* Raw physical memory dumps don't have a virtual address. */ 2317 if (md->md_vaddr == ~0UL) { 2318 tlb1_idx--; 2319 tlb1[tlb1_idx].mas1 = 0; 2320 tlb1[tlb1_idx].mas2 = 0; 2321 tlb1[tlb1_idx].mas3 = 0; 2322 tlb1_write_entry(tlb1_idx); 2323 return; 2324 } 2325 2326 /* Minidumps are based on virtual memory addresses. */ 2327 /* Nothing to do... */ 2328} 2329 2330struct pmap_md * 2331mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2332{ 2333 static struct pmap_md md; 2334 struct bi_mem_region *mr; 2335 pte_t *pte; 2336 vm_offset_t va; 2337 2338 if (dumpsys_minidump) { 2339 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2340 if (prev == NULL) { 2341 /* 1st: kernel .data and .bss. */ 2342 md.md_index = 1; 2343 md.md_vaddr = trunc_page((uintptr_t)_etext); 2344 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2345 return (&md); 2346 } 2347 switch (prev->md_index) { 2348 case 1: 2349 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2350 md.md_index = 2; 2351 md.md_vaddr = data_start; 2352 md.md_size = data_end - data_start; 2353 break; 2354 case 2: 2355 /* 3rd: kernel VM. */ 2356 va = prev->md_vaddr + prev->md_size; 2357 /* Find start of next chunk (from va). */ 2358 while (va < virtual_end) { 2359 /* Don't dump the buffer cache. */ 2360 if (va >= kmi.buffer_sva && 2361 va < kmi.buffer_eva) { 2362 va = kmi.buffer_eva; 2363 continue; 2364 } 2365 pte = pte_find(mmu, kernel_pmap, va); 2366 if (pte != NULL && PTE_ISVALID(pte)) 2367 break; 2368 va += PAGE_SIZE; 2369 } 2370 if (va < virtual_end) { 2371 md.md_vaddr = va; 2372 va += PAGE_SIZE; 2373 /* Find last page in chunk. */ 2374 while (va < virtual_end) { 2375 /* Don't run into the buffer cache. */ 2376 if (va == kmi.buffer_sva) 2377 break; 2378 pte = pte_find(mmu, kernel_pmap, va); 2379 if (pte == NULL || !PTE_ISVALID(pte)) 2380 break; 2381 va += PAGE_SIZE; 2382 } 2383 md.md_size = va - md.md_vaddr; 2384 break; 2385 } 2386 md.md_index = 3; 2387 /* FALLTHROUGH */ 2388 default: 2389 return (NULL); 2390 } 2391 } else { /* minidumps */ 2392 mr = bootinfo_mr(); 2393 if (prev == NULL) { 2394 /* first physical chunk. */ 2395 md.md_paddr = mr->mem_base; 2396 md.md_size = mr->mem_size; 2397 md.md_vaddr = ~0UL; 2398 md.md_index = 1; 2399 } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2400 md.md_paddr = mr[md.md_index].mem_base; 2401 md.md_size = mr[md.md_index].mem_size; 2402 md.md_vaddr = ~0UL; 2403 md.md_index++; 2404 } else { 2405 /* There's no next physical chunk. */ 2406 return (NULL); 2407 } 2408 } 2409 2410 return (&md); 2411} 2412 2413/* 2414 * Map a set of physical memory pages into the kernel virtual address space. 2415 * Return a pointer to where it is mapped. This routine is intended to be used 2416 * for mapping device memory, NOT real memory. 2417 */ 2418static void * 2419mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2420{ 2421 void *res; 2422 uintptr_t va; 2423 vm_size_t sz; 2424 2425 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2426 res = (void *)va; 2427 2428 do { 2429 sz = 1 << (ilog2(size) & ~1); 2430 if (bootverbose) 2431 printf("Wiring VA=%x to PA=%x (size=%x), " 2432 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2433 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2434 size -= sz; 2435 pa += sz; 2436 va += sz; 2437 } while (size > 0); 2438 2439 return (res); 2440} 2441 2442/* 2443 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2444 */ 2445static void 2446mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2447{ 2448 vm_offset_t base, offset; 2449 2450 /* 2451 * Unmap only if this is inside kernel virtual space. 2452 */ 2453 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2454 base = trunc_page(va); 2455 offset = va & PAGE_MASK; 2456 size = roundup(offset + size, PAGE_SIZE); 2457 kmem_free(kernel_map, base, size); 2458 } 2459} 2460 2461/* 2462 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2463 * specified pmap. This eliminates the blast of soft faults on process startup 2464 * and immediately after an mmap. 2465 */ 2466static void 2467mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2468 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2469{ 2470 2471 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2472 KASSERT(object->type == OBJT_DEVICE, 2473 ("mmu_booke_object_init_pt: non-device object")); 2474} 2475 2476/* 2477 * Perform the pmap work for mincore. 2478 */ 2479static int 2480mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2481{ 2482 2483 TODO; 2484 return (0); 2485} 2486 2487/**************************************************************************/ 2488/* TID handling */ 2489/**************************************************************************/ 2490 2491/* 2492 * Allocate a TID. If necessary, steal one from someone else. 2493 * The new TID is flushed from the TLB before returning. 2494 */ 2495static tlbtid_t 2496tid_alloc(pmap_t pmap) 2497{ 2498 tlbtid_t tid; 2499 int thiscpu; 2500 2501 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2502 2503 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2504 2505 thiscpu = PCPU_GET(cpuid); 2506 2507 tid = PCPU_GET(tid_next); 2508 if (tid > TID_MAX) 2509 tid = TID_MIN; 2510 PCPU_SET(tid_next, tid + 1); 2511 2512 /* If we are stealing TID then clear the relevant pmap's field */ 2513 if (tidbusy[thiscpu][tid] != NULL) { 2514 2515 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2516 2517 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2518 2519 /* Flush all entries from TLB0 matching this TID. */ 2520 tid_flush(tid); 2521 } 2522 2523 tidbusy[thiscpu][tid] = pmap; 2524 pmap->pm_tid[thiscpu] = tid; 2525 __asm __volatile("msync; isync"); 2526 2527 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2528 PCPU_GET(tid_next)); 2529 2530 return (tid); 2531} 2532 2533/**************************************************************************/ 2534/* TLB0 handling */ 2535/**************************************************************************/ 2536 2537static void 2538tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2539 uint32_t mas7) 2540{ 2541 int as; 2542 char desc[3]; 2543 tlbtid_t tid; 2544 vm_size_t size; 2545 unsigned int tsize; 2546 2547 desc[2] = '\0'; 2548 if (mas1 & MAS1_VALID) 2549 desc[0] = 'V'; 2550 else 2551 desc[0] = ' '; 2552 2553 if (mas1 & MAS1_IPROT) 2554 desc[1] = 'P'; 2555 else 2556 desc[1] = ' '; 2557 2558 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2559 tid = MAS1_GETTID(mas1); 2560 2561 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2562 size = 0; 2563 if (tsize) 2564 size = tsize2size(tsize); 2565 2566 debugf("%3d: (%s) [AS=%d] " 2567 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2568 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2569 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2570} 2571 2572/* Convert TLB0 va and way number to tlb0[] table index. */ 2573static inline unsigned int 2574tlb0_tableidx(vm_offset_t va, unsigned int way) 2575{ 2576 unsigned int idx; 2577 2578 idx = (way * TLB0_ENTRIES_PER_WAY); 2579 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2580 return (idx); 2581} 2582 2583/* 2584 * Invalidate TLB0 entry. 2585 */ 2586static inline void 2587tlb0_flush_entry(vm_offset_t va) 2588{ 2589 2590 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2591 2592 mtx_assert(&tlbivax_mutex, MA_OWNED); 2593 2594 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2595 __asm __volatile("isync; msync"); 2596 __asm __volatile("tlbsync; msync"); 2597 2598 CTR1(KTR_PMAP, "%s: e", __func__); 2599} 2600 2601/* Print out contents of the MAS registers for each TLB0 entry */ 2602void 2603tlb0_print_tlbentries(void) 2604{ 2605 uint32_t mas0, mas1, mas2, mas3, mas7; 2606 int entryidx, way, idx; 2607 2608 debugf("TLB0 entries:\n"); 2609 for (way = 0; way < TLB0_WAYS; way ++) 2610 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2611 2612 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2613 mtspr(SPR_MAS0, mas0); 2614 __asm __volatile("isync"); 2615 2616 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2617 mtspr(SPR_MAS2, mas2); 2618 2619 __asm __volatile("isync; tlbre"); 2620 2621 mas1 = mfspr(SPR_MAS1); 2622 mas2 = mfspr(SPR_MAS2); 2623 mas3 = mfspr(SPR_MAS3); 2624 mas7 = mfspr(SPR_MAS7); 2625 2626 idx = tlb0_tableidx(mas2, way); 2627 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2628 } 2629} 2630 2631/**************************************************************************/ 2632/* TLB1 handling */ 2633/**************************************************************************/ 2634 2635/* 2636 * TLB1 mapping notes: 2637 * 2638 * TLB1[0] CCSRBAR 2639 * TLB1[1] Kernel text and data. 2640 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2641 * windows, other devices mappings. 2642 */ 2643 2644/* 2645 * Write given entry to TLB1 hardware. 2646 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2647 */ 2648static void 2649tlb1_write_entry(unsigned int idx) 2650{ 2651 uint32_t mas0, mas7; 2652 2653 //debugf("tlb1_write_entry: s\n"); 2654 2655 /* Clear high order RPN bits */ 2656 mas7 = 0; 2657 2658 /* Select entry */ 2659 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2660 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2661 2662 mtspr(SPR_MAS0, mas0); 2663 __asm __volatile("isync"); 2664 mtspr(SPR_MAS1, tlb1[idx].mas1); 2665 __asm __volatile("isync"); 2666 mtspr(SPR_MAS2, tlb1[idx].mas2); 2667 __asm __volatile("isync"); 2668 mtspr(SPR_MAS3, tlb1[idx].mas3); 2669 __asm __volatile("isync"); 2670 mtspr(SPR_MAS7, mas7); 2671 __asm __volatile("isync; tlbwe; isync; msync"); 2672 2673 //debugf("tlb1_write_entry: e\n");; 2674} 2675 2676/* 2677 * Return the largest uint value log such that 2^log <= num. 2678 */ 2679static unsigned int 2680ilog2(unsigned int num) 2681{ 2682 int lz; 2683 2684 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2685 return (31 - lz); 2686} 2687 2688/* 2689 * Convert TLB TSIZE value to mapped region size. 2690 */ 2691static vm_size_t 2692tsize2size(unsigned int tsize) 2693{ 2694 2695 /* 2696 * size = 4^tsize KB 2697 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2698 */ 2699 2700 return ((1 << (2 * tsize)) * 1024); 2701} 2702 2703/* 2704 * Convert region size (must be power of 4) to TLB TSIZE value. 2705 */ 2706static unsigned int 2707size2tsize(vm_size_t size) 2708{ 2709 2710 return (ilog2(size) / 2 - 5); 2711} 2712 2713/* 2714 * Register permanent kernel mapping in TLB1. 2715 * 2716 * Entries are created starting from index 0 (current free entry is 2717 * kept in tlb1_idx) and are not supposed to be invalidated. 2718 */ 2719static int 2720tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2721 uint32_t flags) 2722{ 2723 uint32_t ts, tid; 2724 int tsize; 2725 2726 if (tlb1_idx >= TLB1_ENTRIES) { 2727 printf("tlb1_set_entry: TLB1 full!\n"); 2728 return (-1); 2729 } 2730 2731 /* Convert size to TSIZE */ 2732 tsize = size2tsize(size); 2733 2734 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2735 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2736 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2737 2738 /* XXX LOCK tlb1[] */ 2739 2740 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2741 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2742 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2743 2744 /* Set supervisor RWX permission bits */ 2745 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2746 2747 tlb1_write_entry(tlb1_idx++); 2748 2749 /* XXX UNLOCK tlb1[] */ 2750 2751 /* 2752 * XXX in general TLB1 updates should be propagated between CPUs, 2753 * since current design assumes to have the same TLB1 set-up on all 2754 * cores. 2755 */ 2756 return (0); 2757} 2758 2759static int 2760tlb1_entry_size_cmp(const void *a, const void *b) 2761{ 2762 const vm_size_t *sza; 2763 const vm_size_t *szb; 2764 2765 sza = a; 2766 szb = b; 2767 if (*sza > *szb) 2768 return (-1); 2769 else if (*sza < *szb) 2770 return (1); 2771 else 2772 return (0); 2773} 2774 2775/* 2776 * Map in contiguous RAM region into the TLB1 using maximum of 2777 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2778 * 2779 * If necessary round up last entry size and return total size 2780 * used by all allocated entries. 2781 */ 2782vm_size_t 2783tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2784{ 2785 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2786 vm_size_t mapped_size, sz, esz; 2787 unsigned int log; 2788 int i; 2789 2790 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2791 __func__, size, va, pa); 2792 2793 mapped_size = 0; 2794 sz = size; 2795 memset(entry_size, 0, sizeof(entry_size)); 2796 2797 /* Calculate entry sizes. */ 2798 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2799 2800 /* Largest region that is power of 4 and fits within size */ 2801 log = ilog2(sz) / 2; 2802 esz = 1 << (2 * log); 2803 2804 /* If this is last entry cover remaining size. */ 2805 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2806 while (esz < sz) 2807 esz = esz << 2; 2808 } 2809 2810 entry_size[i] = esz; 2811 mapped_size += esz; 2812 if (esz < sz) 2813 sz -= esz; 2814 else 2815 sz = 0; 2816 } 2817 2818 /* Sort entry sizes, required to get proper entry address alignment. */ 2819 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2820 sizeof(vm_size_t), tlb1_entry_size_cmp); 2821 2822 /* Load TLB1 entries. */ 2823 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2824 esz = entry_size[i]; 2825 if (!esz) 2826 break; 2827 2828 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2829 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2830 2831 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2832 2833 va += esz; 2834 pa += esz; 2835 } 2836 2837 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2838 __func__, mapped_size, mapped_size - size); 2839 2840 return (mapped_size); 2841} 2842 2843/* 2844 * TLB1 initialization routine, to be called after the very first 2845 * assembler level setup done in locore.S. 2846 */ 2847void 2848tlb1_init(vm_offset_t ccsrbar) 2849{ 2850 uint32_t mas0; 2851 2852 /* TLB1[1] is used to map the kernel. Save that entry. */ 2853 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2854 mtspr(SPR_MAS0, mas0); 2855 __asm __volatile("isync; tlbre"); 2856 2857 tlb1[1].mas1 = mfspr(SPR_MAS1); 2858 tlb1[1].mas2 = mfspr(SPR_MAS2); 2859 tlb1[1].mas3 = mfspr(SPR_MAS3); 2860 2861 /* Map in CCSRBAR in TLB1[0] */ 2862 tlb1_idx = 0; 2863 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2864 /* 2865 * Set the next available TLB1 entry index. Note TLB[1] is reserved 2866 * for initial mapping of kernel text+data, which was set early in 2867 * locore, we need to skip this [busy] entry. 2868 */ 2869 tlb1_idx = 2; 2870 2871 /* Setup TLB miss defaults */ 2872 set_mas4_defaults(); 2873} 2874 2875/* 2876 * Setup MAS4 defaults. 2877 * These values are loaded to MAS0-2 on a TLB miss. 2878 */ 2879static void 2880set_mas4_defaults(void) 2881{ 2882 uint32_t mas4; 2883 2884 /* Defaults: TLB0, PID0, TSIZED=4K */ 2885 mas4 = MAS4_TLBSELD0; 2886 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2887 2888 mtspr(SPR_MAS4, mas4); 2889 __asm __volatile("isync"); 2890} 2891 2892/* 2893 * Print out contents of the MAS registers for each TLB1 entry 2894 */ 2895void 2896tlb1_print_tlbentries(void) 2897{ 2898 uint32_t mas0, mas1, mas2, mas3, mas7; 2899 int i; 2900 2901 debugf("TLB1 entries:\n"); 2902 for (i = 0; i < TLB1_ENTRIES; i++) { 2903 2904 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 2905 mtspr(SPR_MAS0, mas0); 2906 2907 __asm __volatile("isync; tlbre"); 2908 2909 mas1 = mfspr(SPR_MAS1); 2910 mas2 = mfspr(SPR_MAS2); 2911 mas3 = mfspr(SPR_MAS3); 2912 mas7 = mfspr(SPR_MAS7); 2913 2914 tlb_print_entry(i, mas1, mas2, mas3, mas7); 2915 } 2916} 2917 2918/* 2919 * Print out contents of the in-ram tlb1 table. 2920 */ 2921void 2922tlb1_print_entries(void) 2923{ 2924 int i; 2925 2926 debugf("tlb1[] table entries:\n"); 2927 for (i = 0; i < TLB1_ENTRIES; i++) 2928 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 2929} 2930 2931/* 2932 * Return 0 if the physical IO range is encompassed by one of the 2933 * the TLB1 entries, otherwise return related error code. 2934 */ 2935static int 2936tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2937{ 2938 uint32_t prot; 2939 vm_paddr_t pa_start; 2940 vm_paddr_t pa_end; 2941 unsigned int entry_tsize; 2942 vm_size_t entry_size; 2943 2944 *va = (vm_offset_t)NULL; 2945 2946 /* Skip invalid entries */ 2947 if (!(tlb1[i].mas1 & MAS1_VALID)) 2948 return (EINVAL); 2949 2950 /* 2951 * The entry must be cache-inhibited, guarded, and r/w 2952 * so it can function as an i/o page 2953 */ 2954 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 2955 if (prot != (MAS2_I | MAS2_G)) 2956 return (EPERM); 2957 2958 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 2959 if (prot != (MAS3_SR | MAS3_SW)) 2960 return (EPERM); 2961 2962 /* The address should be within the entry range. */ 2963 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2964 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 2965 2966 entry_size = tsize2size(entry_tsize); 2967 pa_start = tlb1[i].mas3 & MAS3_RPN; 2968 pa_end = pa_start + entry_size - 1; 2969 2970 if ((pa < pa_start) || ((pa + size) > pa_end)) 2971 return (ERANGE); 2972 2973 /* Return virtual address of this mapping. */ 2974 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 2975 return (0); 2976} 2977