pmap.c revision 191445
1/*- 2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * Some hw specific parts of this pmap were derived or influenced 27 * by NetBSD's ibm4xx pmap module. More generic code is shared with 28 * a few other pmap modules from the FreeBSD tree. 29 */ 30 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - data_end : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region 49 */ 50 51#include <sys/cdefs.h> 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 191445 2009-04-24 02:53:38Z marcel $"); 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> 61#include <sys/systm.h> 62#include <sys/kernel.h> 63#include <sys/msgbuf.h> 64#include <sys/lock.h> 65#include <sys/mutex.h> 66#include <sys/vmmeter.h> 67 68#include <vm/vm.h> 69#include <vm/vm_page.h> 70#include <vm/vm_kern.h> 71#include <vm/vm_pageout.h> 72#include <vm/vm_extern.h> 73#include <vm/vm_object.h> 74#include <vm/vm_param.h> 75#include <vm/vm_map.h> 76#include <vm/vm_pager.h> 77#include <vm/uma.h> 78 79#include <machine/bootinfo.h> 80#include <machine/cpu.h> 81#include <machine/pcb.h> 82#include <machine/powerpc.h> 83 84#include <machine/tlb.h> 85#include <machine/spr.h> 86#include <machine/vmparam.h> 87#include <machine/md_var.h> 88#include <machine/mmuvar.h> 89#include <machine/pmap.h> 90#include <machine/pte.h> 91 92#include "mmu_if.h" 93 94#define DEBUG 95#undef DEBUG 96 97#ifdef DEBUG 98#define debugf(fmt, args...) printf(fmt, ##args) 99#else 100#define debugf(fmt, args...) 101#endif 102 103#define TODO panic("%s: not implemented", __func__); 104 105#include "opt_sched.h" 106#ifndef SCHED_4BSD 107#error "e500 only works with SCHED_4BSD which uses a global scheduler lock." 108#endif 109extern struct mtx sched_lock; 110 111extern int dumpsys_minidump; 112 113extern unsigned char _etext[]; 114extern unsigned char _end[]; 115 116/* Kernel physical load address. */ 117extern uint32_t kernload; 118vm_offset_t kernstart; 119vm_size_t kernsize; 120 121/* Message buffer and tables. */ 122static vm_offset_t data_start; 123static vm_size_t data_end; 124 125struct mem_region availmem_regions[MEM_REGIONS]; 126int availmem_regions_sz; 127 128/* Reserved KVA space and mutex for mmu_booke_zero_page. */ 129static vm_offset_t zero_page_va; 130static struct mtx zero_page_mutex; 131 132static struct mtx tlbivax_mutex; 133 134/* 135 * Reserved KVA space for mmu_booke_zero_page_idle. This is used 136 * by idle thred only, no lock required. 137 */ 138static vm_offset_t zero_page_idle_va; 139 140/* Reserved KVA space and mutex for mmu_booke_copy_page. */ 141static vm_offset_t copy_page_src_va; 142static vm_offset_t copy_page_dst_va; 143static struct mtx copy_page_mutex; 144 145/**************************************************************************/ 146/* PMAP */ 147/**************************************************************************/ 148 149static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, 150 vm_prot_t, boolean_t); 151 152unsigned int kptbl_min; /* Index of the first kernel ptbl. */ 153unsigned int kernel_ptbls; /* Number of KVA ptbls. */ 154 155static int pagedaemon_waken; 156 157/* 158 * If user pmap is processed with mmu_booke_remove and the resident count 159 * drops to 0, there are no more pages to remove, so we need not continue. 160 */ 161#define PMAP_REMOVE_DONE(pmap) \ 162 ((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0) 163 164extern void tlb_lock(uint32_t *); 165extern void tlb_unlock(uint32_t *); 166extern void tid_flush(tlbtid_t); 167 168/**************************************************************************/ 169/* TLB and TID handling */ 170/**************************************************************************/ 171 172/* Translation ID busy table */ 173static volatile pmap_t tidbusy[MAXCPU][TID_MAX + 1]; 174 175/* 176 * TLB0 capabilities (entry, way numbers etc.). These can vary between e500 177 * core revisions and should be read from h/w registers during early config. 178 */ 179uint32_t tlb0_entries; 180uint32_t tlb0_ways; 181uint32_t tlb0_entries_per_way; 182 183#define TLB0_ENTRIES (tlb0_entries) 184#define TLB0_WAYS (tlb0_ways) 185#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way) 186 187#define TLB1_ENTRIES 16 188 189/* In-ram copy of the TLB1 */ 190static tlb_entry_t tlb1[TLB1_ENTRIES]; 191 192/* Next free entry in the TLB1 */ 193static unsigned int tlb1_idx; 194 195static tlbtid_t tid_alloc(struct pmap *); 196 197static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 198 199static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 200static void tlb1_write_entry(unsigned int); 201static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 202static vm_size_t tlb1_mapin_region(vm_offset_t, vm_offset_t, vm_size_t); 203 204static vm_size_t tsize2size(unsigned int); 205static unsigned int size2tsize(vm_size_t); 206static unsigned int ilog2(unsigned int); 207 208static void set_mas4_defaults(void); 209 210static inline void tlb0_flush_entry(vm_offset_t); 211static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 212 213/**************************************************************************/ 214/* Page table management */ 215/**************************************************************************/ 216 217/* Data for the pv entry allocation mechanism */ 218static uma_zone_t pvzone; 219static struct vm_object pvzone_obj; 220static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 221 222#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 223 224#ifndef PMAP_SHPGPERPROC 225#define PMAP_SHPGPERPROC 200 226#endif 227 228static void ptbl_init(void); 229static struct ptbl_buf *ptbl_buf_alloc(void); 230static void ptbl_buf_free(struct ptbl_buf *); 231static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 232 233static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 234static void ptbl_free(mmu_t, pmap_t, unsigned int); 235static void ptbl_hold(mmu_t, pmap_t, unsigned int); 236static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 237 238static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 239static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 240static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 241static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 242 243static pv_entry_t pv_alloc(void); 244static void pv_free(pv_entry_t); 245static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 246static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 247 248/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 249#define PTBL_BUFS (128 * 16) 250 251struct ptbl_buf { 252 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 253 vm_offset_t kva; /* va of mapping */ 254}; 255 256/* ptbl free list and a lock used for access synchronization. */ 257static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 258static struct mtx ptbl_buf_freelist_lock; 259 260/* Base address of kva space allocated fot ptbl bufs. */ 261static vm_offset_t ptbl_buf_pool_vabase; 262 263/* Pointer to ptbl_buf structures. */ 264static struct ptbl_buf *ptbl_bufs; 265 266/* 267 * Kernel MMU interface 268 */ 269static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 270static void mmu_booke_clear_modify(mmu_t, vm_page_t); 271static void mmu_booke_clear_reference(mmu_t, vm_page_t); 272static void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, 273 vm_offset_t); 274static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 275static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 276 vm_prot_t, boolean_t); 277static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 278 vm_page_t, vm_prot_t); 279static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 280 vm_prot_t); 281static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 282static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 283 vm_prot_t); 284static void mmu_booke_init(mmu_t); 285static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 286static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 287static boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t); 288static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, 289 int); 290static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t); 291static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 292 vm_object_t, vm_pindex_t, vm_size_t); 293static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 294static void mmu_booke_page_init(mmu_t, vm_page_t); 295static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 296static void mmu_booke_pinit(mmu_t, pmap_t); 297static void mmu_booke_pinit0(mmu_t, pmap_t); 298static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 299 vm_prot_t); 300static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 301static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 302static void mmu_booke_release(mmu_t, pmap_t); 303static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 304static void mmu_booke_remove_all(mmu_t, vm_page_t); 305static void mmu_booke_remove_write(mmu_t, vm_page_t); 306static void mmu_booke_zero_page(mmu_t, vm_page_t); 307static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 308static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 309static void mmu_booke_activate(mmu_t, struct thread *); 310static void mmu_booke_deactivate(mmu_t, struct thread *); 311static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 312static void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t); 313static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 314static vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t); 315static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t); 316static void mmu_booke_kremove(mmu_t, vm_offset_t); 317static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 318static boolean_t mmu_booke_page_executable(mmu_t, vm_page_t); 319static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 320 vm_size_t, vm_size_t *); 321static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 322 vm_size_t, vm_offset_t); 323static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 324 325static mmu_method_t mmu_booke_methods[] = { 326 /* pmap dispatcher interface */ 327 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 328 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 329 MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference), 330 MMUMETHOD(mmu_copy, mmu_booke_copy), 331 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 332 MMUMETHOD(mmu_enter, mmu_booke_enter), 333 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 334 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 335 MMUMETHOD(mmu_extract, mmu_booke_extract), 336 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 337 MMUMETHOD(mmu_init, mmu_booke_init), 338 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 339 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 340 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 341 MMUMETHOD(mmu_map, mmu_booke_map), 342 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 343 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 344 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 345 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 346 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 347 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 348 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 349 MMUMETHOD(mmu_protect, mmu_booke_protect), 350 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 351 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 352 MMUMETHOD(mmu_release, mmu_booke_release), 353 MMUMETHOD(mmu_remove, mmu_booke_remove), 354 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 355 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 356 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 357 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 358 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 359 MMUMETHOD(mmu_activate, mmu_booke_activate), 360 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 361 362 /* Internal interfaces */ 363 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 364 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 365 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 366 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 367 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 368/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 369 MMUMETHOD(mmu_page_executable, mmu_booke_page_executable), 370 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 371 372 /* dumpsys() support */ 373 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 374 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 375 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 376 377 { 0, 0 } 378}; 379 380static mmu_def_t booke_mmu = { 381 MMU_TYPE_BOOKE, 382 mmu_booke_methods, 383 0 384}; 385MMU_DEF(booke_mmu); 386 387/* Return number of entries in TLB0. */ 388static __inline void 389tlb0_get_tlbconf(void) 390{ 391 uint32_t tlb0_cfg; 392 393 tlb0_cfg = mfspr(SPR_TLB0CFG); 394 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 395 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 396 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 397} 398 399/* Initialize pool of kva ptbl buffers. */ 400static void 401ptbl_init(void) 402{ 403 int i; 404 405 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 406 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 407 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 408 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 409 410 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 411 TAILQ_INIT(&ptbl_buf_freelist); 412 413 for (i = 0; i < PTBL_BUFS; i++) { 414 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 415 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 416 } 417} 418 419/* Get a ptbl_buf from the freelist. */ 420static struct ptbl_buf * 421ptbl_buf_alloc(void) 422{ 423 struct ptbl_buf *buf; 424 425 mtx_lock(&ptbl_buf_freelist_lock); 426 buf = TAILQ_FIRST(&ptbl_buf_freelist); 427 if (buf != NULL) 428 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 429 mtx_unlock(&ptbl_buf_freelist_lock); 430 431 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 432 433 return (buf); 434} 435 436/* Return ptbl buff to free pool. */ 437static void 438ptbl_buf_free(struct ptbl_buf *buf) 439{ 440 441 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 442 443 mtx_lock(&ptbl_buf_freelist_lock); 444 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 445 mtx_unlock(&ptbl_buf_freelist_lock); 446} 447 448/* 449 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 450 */ 451static void 452ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 453{ 454 struct ptbl_buf *pbuf; 455 456 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 457 458 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 459 460 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 461 if (pbuf->kva == (vm_offset_t)ptbl) { 462 /* Remove from pmap ptbl buf list. */ 463 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 464 465 /* Free corresponding ptbl buf. */ 466 ptbl_buf_free(pbuf); 467 break; 468 } 469} 470 471/* Allocate page table. */ 472static pte_t * 473ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 474{ 475 vm_page_t mtbl[PTBL_PAGES]; 476 vm_page_t m; 477 struct ptbl_buf *pbuf; 478 unsigned int pidx; 479 pte_t *ptbl; 480 int i; 481 482 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 483 (pmap == kernel_pmap), pdir_idx); 484 485 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 486 ("ptbl_alloc: invalid pdir_idx")); 487 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 488 ("pte_alloc: valid ptbl entry exists!")); 489 490 pbuf = ptbl_buf_alloc(); 491 if (pbuf == NULL) 492 panic("pte_alloc: couldn't alloc kernel virtual memory"); 493 494 ptbl = (pte_t *)pbuf->kva; 495 496 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 497 498 /* Allocate ptbl pages, this will sleep! */ 499 for (i = 0; i < PTBL_PAGES; i++) { 500 pidx = (PTBL_PAGES * pdir_idx) + i; 501 while ((m = vm_page_alloc(NULL, pidx, 502 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 503 504 PMAP_UNLOCK(pmap); 505 vm_page_unlock_queues(); 506 VM_WAIT; 507 vm_page_lock_queues(); 508 PMAP_LOCK(pmap); 509 } 510 mtbl[i] = m; 511 } 512 513 /* Map allocated pages into kernel_pmap. */ 514 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 515 516 /* Zero whole ptbl. */ 517 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 518 519 /* Add pbuf to the pmap ptbl bufs list. */ 520 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 521 522 return (ptbl); 523} 524 525/* Free ptbl pages and invalidate pdir entry. */ 526static void 527ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 528{ 529 pte_t *ptbl; 530 vm_paddr_t pa; 531 vm_offset_t va; 532 vm_page_t m; 533 int i; 534 535 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 536 (pmap == kernel_pmap), pdir_idx); 537 538 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 539 ("ptbl_free: invalid pdir_idx")); 540 541 ptbl = pmap->pm_pdir[pdir_idx]; 542 543 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 544 545 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 546 547 /* 548 * Invalidate the pdir entry as soon as possible, so that other CPUs 549 * don't attempt to look up the page tables we are releasing. 550 */ 551 mtx_lock_spin(&tlbivax_mutex); 552 553 pmap->pm_pdir[pdir_idx] = NULL; 554 555 mtx_unlock_spin(&tlbivax_mutex); 556 557 for (i = 0; i < PTBL_PAGES; i++) { 558 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 559 pa = pte_vatopa(mmu, kernel_pmap, va); 560 m = PHYS_TO_VM_PAGE(pa); 561 vm_page_free_zero(m); 562 atomic_subtract_int(&cnt.v_wire_count, 1); 563 mmu_booke_kremove(mmu, va); 564 } 565 566 ptbl_free_pmap_ptbl(pmap, ptbl); 567} 568 569/* 570 * Decrement ptbl pages hold count and attempt to free ptbl pages. 571 * Called when removing pte entry from ptbl. 572 * 573 * Return 1 if ptbl pages were freed. 574 */ 575static int 576ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 577{ 578 pte_t *ptbl; 579 vm_paddr_t pa; 580 vm_page_t m; 581 int i; 582 583 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 584 (pmap == kernel_pmap), pdir_idx); 585 586 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 587 ("ptbl_unhold: invalid pdir_idx")); 588 KASSERT((pmap != kernel_pmap), 589 ("ptbl_unhold: unholding kernel ptbl!")); 590 591 ptbl = pmap->pm_pdir[pdir_idx]; 592 593 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 594 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 595 ("ptbl_unhold: non kva ptbl")); 596 597 /* decrement hold count */ 598 for (i = 0; i < PTBL_PAGES; i++) { 599 pa = pte_vatopa(mmu, kernel_pmap, 600 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 601 m = PHYS_TO_VM_PAGE(pa); 602 m->wire_count--; 603 } 604 605 /* 606 * Free ptbl pages if there are no pte etries in this ptbl. 607 * wire_count has the same value for all ptbl pages, so check the last 608 * page. 609 */ 610 if (m->wire_count == 0) { 611 ptbl_free(mmu, pmap, pdir_idx); 612 613 //debugf("ptbl_unhold: e (freed ptbl)\n"); 614 return (1); 615 } 616 617 return (0); 618} 619 620/* 621 * Increment hold count for ptbl pages. This routine is used when a new pte 622 * entry is being inserted into the ptbl. 623 */ 624static void 625ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 626{ 627 vm_paddr_t pa; 628 pte_t *ptbl; 629 vm_page_t m; 630 int i; 631 632 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 633 pdir_idx); 634 635 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 636 ("ptbl_hold: invalid pdir_idx")); 637 KASSERT((pmap != kernel_pmap), 638 ("ptbl_hold: holding kernel ptbl!")); 639 640 ptbl = pmap->pm_pdir[pdir_idx]; 641 642 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 643 644 for (i = 0; i < PTBL_PAGES; i++) { 645 pa = pte_vatopa(mmu, kernel_pmap, 646 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 647 m = PHYS_TO_VM_PAGE(pa); 648 m->wire_count++; 649 } 650} 651 652/* Allocate pv_entry structure. */ 653pv_entry_t 654pv_alloc(void) 655{ 656 pv_entry_t pv; 657 658 pv_entry_count++; 659 if ((pv_entry_count > pv_entry_high_water) && 660 (pagedaemon_waken == 0)) { 661 pagedaemon_waken = 1; 662 wakeup(&vm_pages_needed); 663 } 664 pv = uma_zalloc(pvzone, M_NOWAIT); 665 666 return (pv); 667} 668 669/* Free pv_entry structure. */ 670static __inline void 671pv_free(pv_entry_t pve) 672{ 673 674 pv_entry_count--; 675 uma_zfree(pvzone, pve); 676} 677 678 679/* Allocate and initialize pv_entry structure. */ 680static void 681pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 682{ 683 pv_entry_t pve; 684 685 //int su = (pmap == kernel_pmap); 686 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 687 // (u_int32_t)pmap, va, (u_int32_t)m); 688 689 pve = pv_alloc(); 690 if (pve == NULL) 691 panic("pv_insert: no pv entries!"); 692 693 pve->pv_pmap = pmap; 694 pve->pv_va = va; 695 696 /* add to pv_list */ 697 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 698 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 699 700 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 701 702 //debugf("pv_insert: e\n"); 703} 704 705/* Destroy pv entry. */ 706static void 707pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 708{ 709 pv_entry_t pve; 710 711 //int su = (pmap == kernel_pmap); 712 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 713 714 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 715 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 716 717 /* find pv entry */ 718 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 719 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 720 /* remove from pv_list */ 721 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 722 if (TAILQ_EMPTY(&m->md.pv_list)) 723 vm_page_flag_clear(m, PG_WRITEABLE); 724 725 /* free pv entry struct */ 726 pv_free(pve); 727 break; 728 } 729 } 730 731 //debugf("pv_remove: e\n"); 732} 733 734/* 735 * Clean pte entry, try to free page table page if requested. 736 * 737 * Return 1 if ptbl pages were freed, otherwise return 0. 738 */ 739static int 740pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 741{ 742 unsigned int pdir_idx = PDIR_IDX(va); 743 unsigned int ptbl_idx = PTBL_IDX(va); 744 vm_page_t m; 745 pte_t *ptbl; 746 pte_t *pte; 747 748 //int su = (pmap == kernel_pmap); 749 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 750 // su, (u_int32_t)pmap, va, flags); 751 752 ptbl = pmap->pm_pdir[pdir_idx]; 753 KASSERT(ptbl, ("pte_remove: null ptbl")); 754 755 pte = &ptbl[ptbl_idx]; 756 757 if (pte == NULL || !PTE_ISVALID(pte)) 758 return (0); 759 760 if (PTE_ISWIRED(pte)) 761 pmap->pm_stats.wired_count--; 762 763 /* Handle managed entry. */ 764 if (PTE_ISMANAGED(pte)) { 765 /* Get vm_page_t for mapped pte. */ 766 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 767 768 if (PTE_ISMODIFIED(pte)) 769 vm_page_dirty(m); 770 771 if (PTE_ISREFERENCED(pte)) 772 vm_page_flag_set(m, PG_REFERENCED); 773 774 pv_remove(pmap, va, m); 775 } 776 777 mtx_lock_spin(&tlbivax_mutex); 778 779 tlb0_flush_entry(va); 780 pte->flags = 0; 781 pte->rpn = 0; 782 783 mtx_unlock_spin(&tlbivax_mutex); 784 785 pmap->pm_stats.resident_count--; 786 787 if (flags & PTBL_UNHOLD) { 788 //debugf("pte_remove: e (unhold)\n"); 789 return (ptbl_unhold(mmu, pmap, pdir_idx)); 790 } 791 792 //debugf("pte_remove: e\n"); 793 return (0); 794} 795 796/* 797 * Insert PTE for a given page and virtual address. 798 */ 799static void 800pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 801{ 802 unsigned int pdir_idx = PDIR_IDX(va); 803 unsigned int ptbl_idx = PTBL_IDX(va); 804 pte_t *ptbl, *pte; 805 806 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 807 pmap == kernel_pmap, pmap, va); 808 809 /* Get the page table pointer. */ 810 ptbl = pmap->pm_pdir[pdir_idx]; 811 812 if (ptbl == NULL) { 813 /* Allocate page table pages. */ 814 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 815 } else { 816 /* 817 * Check if there is valid mapping for requested 818 * va, if there is, remove it. 819 */ 820 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 821 if (PTE_ISVALID(pte)) { 822 pte_remove(mmu, pmap, va, PTBL_HOLD); 823 } else { 824 /* 825 * pte is not used, increment hold count 826 * for ptbl pages. 827 */ 828 if (pmap != kernel_pmap) 829 ptbl_hold(mmu, pmap, pdir_idx); 830 } 831 } 832 833 /* 834 * Insert pv_entry into pv_list for mapped page if part of managed 835 * memory. 836 */ 837 if ((m->flags & PG_FICTITIOUS) == 0) { 838 if ((m->flags & PG_UNMANAGED) == 0) { 839 flags |= PTE_MANAGED; 840 841 /* Create and insert pv entry. */ 842 pv_insert(pmap, va, m); 843 } 844 } 845 846 pmap->pm_stats.resident_count++; 847 848 mtx_lock_spin(&tlbivax_mutex); 849 850 tlb0_flush_entry(va); 851 if (pmap->pm_pdir[pdir_idx] == NULL) { 852 /* 853 * If we just allocated a new page table, hook it in 854 * the pdir. 855 */ 856 pmap->pm_pdir[pdir_idx] = ptbl; 857 } 858 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 859 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 860 pte->flags |= (PTE_VALID | flags); 861 862 mtx_unlock_spin(&tlbivax_mutex); 863} 864 865/* Return the pa for the given pmap/va. */ 866static vm_paddr_t 867pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 868{ 869 vm_paddr_t pa = 0; 870 pte_t *pte; 871 872 pte = pte_find(mmu, pmap, va); 873 if ((pte != NULL) && PTE_ISVALID(pte)) 874 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 875 return (pa); 876} 877 878/* Get a pointer to a PTE in a page table. */ 879static pte_t * 880pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 881{ 882 unsigned int pdir_idx = PDIR_IDX(va); 883 unsigned int ptbl_idx = PTBL_IDX(va); 884 885 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 886 887 if (pmap->pm_pdir[pdir_idx]) 888 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 889 890 return (NULL); 891} 892 893/**************************************************************************/ 894/* PMAP related */ 895/**************************************************************************/ 896 897/* 898 * This is called during e500_init, before the system is really initialized. 899 */ 900static void 901mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 902{ 903 vm_offset_t phys_kernelend; 904 struct mem_region *mp, *mp1; 905 int cnt, i, j; 906 u_int s, e, sz; 907 u_int phys_avail_count; 908 vm_size_t physsz, hwphyssz, kstack0_sz; 909 vm_offset_t kernel_pdir, kstack0; 910 vm_paddr_t kstack0_phys; 911 912 debugf("mmu_booke_bootstrap: entered\n"); 913 914 /* Initialize invalidation mutex */ 915 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 916 917 /* Read TLB0 size and associativity. */ 918 tlb0_get_tlbconf(); 919 920 /* Align kernel start and end address (kernel image). */ 921 kernstart = trunc_page(start); 922 data_start = round_page(kernelend); 923 kernsize = data_start - kernstart; 924 925 data_end = data_start; 926 927 /* Allocate space for the message buffer. */ 928 msgbufp = (struct msgbuf *)data_end; 929 data_end += MSGBUF_SIZE; 930 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 931 data_end); 932 933 data_end = round_page(data_end); 934 935 /* Allocate space for ptbl_bufs. */ 936 ptbl_bufs = (struct ptbl_buf *)data_end; 937 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 938 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 939 data_end); 940 941 data_end = round_page(data_end); 942 943 /* Allocate PTE tables for kernel KVA. */ 944 kernel_pdir = data_end; 945 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 946 PDIR_SIZE - 1) / PDIR_SIZE; 947 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 948 debugf(" kernel ptbls: %d\n", kernel_ptbls); 949 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 950 951 debugf(" data_end: 0x%08x\n", data_end); 952 if (data_end - kernstart > 0x1000000) { 953 data_end = (data_end + 0x3fffff) & ~0x3fffff; 954 tlb1_mapin_region(kernstart + 0x1000000, 955 kernload + 0x1000000, data_end - kernstart - 0x1000000); 956 } else 957 data_end = (data_end + 0xffffff) & ~0xffffff; 958 959 debugf(" updated data_end: 0x%08x\n", data_end); 960 961 kernsize += data_end - data_start; 962 963 /* 964 * Clear the structures - note we can only do it safely after the 965 * possible additional TLB1 translations are in place (above) so that 966 * all range up to the currently calculated 'data_end' is covered. 967 */ 968 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 969 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 970 971 /*******************************************************/ 972 /* Set the start and end of kva. */ 973 /*******************************************************/ 974 virtual_avail = round_page(data_end); 975 virtual_end = VM_MAX_KERNEL_ADDRESS; 976 977 /* Allocate KVA space for page zero/copy operations. */ 978 zero_page_va = virtual_avail; 979 virtual_avail += PAGE_SIZE; 980 zero_page_idle_va = virtual_avail; 981 virtual_avail += PAGE_SIZE; 982 copy_page_src_va = virtual_avail; 983 virtual_avail += PAGE_SIZE; 984 copy_page_dst_va = virtual_avail; 985 virtual_avail += PAGE_SIZE; 986 debugf("zero_page_va = 0x%08x\n", zero_page_va); 987 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 988 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 989 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 990 991 /* Initialize page zero/copy mutexes. */ 992 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 993 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 994 995 /* Allocate KVA space for ptbl bufs. */ 996 ptbl_buf_pool_vabase = virtual_avail; 997 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 998 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 999 ptbl_buf_pool_vabase, virtual_avail); 1000 1001 /* Calculate corresponding physical addresses for the kernel region. */ 1002 phys_kernelend = kernload + kernsize; 1003 debugf("kernel image and allocated data:\n"); 1004 debugf(" kernload = 0x%08x\n", kernload); 1005 debugf(" kernstart = 0x%08x\n", kernstart); 1006 debugf(" kernsize = 0x%08x\n", kernsize); 1007 1008 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1009 panic("mmu_booke_bootstrap: phys_avail too small"); 1010 1011 /* 1012 * Remove kernel physical address range from avail regions list. Page 1013 * align all regions. Non-page aligned memory isn't very interesting 1014 * to us. Also, sort the entries for ascending addresses. 1015 */ 1016 sz = 0; 1017 cnt = availmem_regions_sz; 1018 debugf("processing avail regions:\n"); 1019 for (mp = availmem_regions; mp->mr_size; mp++) { 1020 s = mp->mr_start; 1021 e = mp->mr_start + mp->mr_size; 1022 debugf(" %08x-%08x -> ", s, e); 1023 /* Check whether this region holds all of the kernel. */ 1024 if (s < kernload && e > phys_kernelend) { 1025 availmem_regions[cnt].mr_start = phys_kernelend; 1026 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1027 e = kernload; 1028 } 1029 /* Look whether this regions starts within the kernel. */ 1030 if (s >= kernload && s < phys_kernelend) { 1031 if (e <= phys_kernelend) 1032 goto empty; 1033 s = phys_kernelend; 1034 } 1035 /* Now look whether this region ends within the kernel. */ 1036 if (e > kernload && e <= phys_kernelend) { 1037 if (s >= kernload) 1038 goto empty; 1039 e = kernload; 1040 } 1041 /* Now page align the start and size of the region. */ 1042 s = round_page(s); 1043 e = trunc_page(e); 1044 if (e < s) 1045 e = s; 1046 sz = e - s; 1047 debugf("%08x-%08x = %x\n", s, e, sz); 1048 1049 /* Check whether some memory is left here. */ 1050 if (sz == 0) { 1051 empty: 1052 memmove(mp, mp + 1, 1053 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1054 cnt--; 1055 mp--; 1056 continue; 1057 } 1058 1059 /* Do an insertion sort. */ 1060 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1061 if (s < mp1->mr_start) 1062 break; 1063 if (mp1 < mp) { 1064 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1065 mp1->mr_start = s; 1066 mp1->mr_size = sz; 1067 } else { 1068 mp->mr_start = s; 1069 mp->mr_size = sz; 1070 } 1071 } 1072 availmem_regions_sz = cnt; 1073 1074 /*******************************************************/ 1075 /* Steal physical memory for kernel stack from the end */ 1076 /* of the first avail region */ 1077 /*******************************************************/ 1078 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1079 kstack0_phys = availmem_regions[0].mr_start + 1080 availmem_regions[0].mr_size; 1081 kstack0_phys -= kstack0_sz; 1082 availmem_regions[0].mr_size -= kstack0_sz; 1083 1084 /*******************************************************/ 1085 /* Fill in phys_avail table, based on availmem_regions */ 1086 /*******************************************************/ 1087 phys_avail_count = 0; 1088 physsz = 0; 1089 hwphyssz = 0; 1090 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1091 1092 debugf("fill in phys_avail:\n"); 1093 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1094 1095 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1096 availmem_regions[i].mr_start, 1097 availmem_regions[i].mr_start + 1098 availmem_regions[i].mr_size, 1099 availmem_regions[i].mr_size); 1100 1101 if (hwphyssz != 0 && 1102 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1103 debugf(" hw.physmem adjust\n"); 1104 if (physsz < hwphyssz) { 1105 phys_avail[j] = availmem_regions[i].mr_start; 1106 phys_avail[j + 1] = 1107 availmem_regions[i].mr_start + 1108 hwphyssz - physsz; 1109 physsz = hwphyssz; 1110 phys_avail_count++; 1111 } 1112 break; 1113 } 1114 1115 phys_avail[j] = availmem_regions[i].mr_start; 1116 phys_avail[j + 1] = availmem_regions[i].mr_start + 1117 availmem_regions[i].mr_size; 1118 phys_avail_count++; 1119 physsz += availmem_regions[i].mr_size; 1120 } 1121 physmem = btoc(physsz); 1122 1123 /* Calculate the last available physical address. */ 1124 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1125 ; 1126 Maxmem = powerpc_btop(phys_avail[i + 1]); 1127 1128 debugf("Maxmem = 0x%08lx\n", Maxmem); 1129 debugf("phys_avail_count = %d\n", phys_avail_count); 1130 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1131 physmem); 1132 1133 /*******************************************************/ 1134 /* Initialize (statically allocated) kernel pmap. */ 1135 /*******************************************************/ 1136 PMAP_LOCK_INIT(kernel_pmap); 1137 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1138 1139 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1140 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1141 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1142 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1143 1144 /* Initialize kernel pdir */ 1145 for (i = 0; i < kernel_ptbls; i++) 1146 kernel_pmap->pm_pdir[kptbl_min + i] = 1147 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1148 1149 for (i = 0; i < MAXCPU; i++) { 1150 kernel_pmap->pm_tid[i] = TID_KERNEL; 1151 1152 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1153 tidbusy[i][0] = kernel_pmap; 1154 } 1155 /* Mark kernel_pmap active on all CPUs */ 1156 kernel_pmap->pm_active = ~0; 1157 1158 /*******************************************************/ 1159 /* Final setup */ 1160 /*******************************************************/ 1161 1162 /* Enter kstack0 into kernel map, provide guard page */ 1163 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1164 thread0.td_kstack = kstack0; 1165 thread0.td_kstack_pages = KSTACK_PAGES; 1166 1167 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1168 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1169 kstack0_phys, kstack0_phys + kstack0_sz); 1170 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1171 1172 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1173 for (i = 0; i < KSTACK_PAGES; i++) { 1174 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1175 kstack0 += PAGE_SIZE; 1176 kstack0_phys += PAGE_SIZE; 1177 } 1178 1179 debugf("virtual_avail = %08x\n", virtual_avail); 1180 debugf("virtual_end = %08x\n", virtual_end); 1181 1182 debugf("mmu_booke_bootstrap: exit\n"); 1183} 1184 1185/* 1186 * Get the physical page address for the given pmap/virtual address. 1187 */ 1188static vm_paddr_t 1189mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1190{ 1191 vm_paddr_t pa; 1192 1193 PMAP_LOCK(pmap); 1194 pa = pte_vatopa(mmu, pmap, va); 1195 PMAP_UNLOCK(pmap); 1196 1197 return (pa); 1198} 1199 1200/* 1201 * Extract the physical page address associated with the given 1202 * kernel virtual address. 1203 */ 1204static vm_paddr_t 1205mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1206{ 1207 1208 return (pte_vatopa(mmu, kernel_pmap, va)); 1209} 1210 1211/* 1212 * Initialize the pmap module. 1213 * Called by vm_init, to initialize any structures that the pmap 1214 * system needs to map virtual memory. 1215 */ 1216static void 1217mmu_booke_init(mmu_t mmu) 1218{ 1219 int shpgperproc = PMAP_SHPGPERPROC; 1220 1221 /* 1222 * Initialize the address space (zone) for the pv entries. Set a 1223 * high water mark so that the system can recover from excessive 1224 * numbers of pv entries. 1225 */ 1226 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1227 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1228 1229 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1230 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1231 1232 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1233 pv_entry_high_water = 9 * (pv_entry_max / 10); 1234 1235 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1236 1237 /* Pre-fill pvzone with initial number of pv entries. */ 1238 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1239 1240 /* Initialize ptbl allocation. */ 1241 ptbl_init(); 1242} 1243 1244/* 1245 * Map a list of wired pages into kernel virtual address space. This is 1246 * intended for temporary mappings which do not need page modification or 1247 * references recorded. Existing mappings in the region are overwritten. 1248 */ 1249static void 1250mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1251{ 1252 vm_offset_t va; 1253 1254 va = sva; 1255 while (count-- > 0) { 1256 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1257 va += PAGE_SIZE; 1258 m++; 1259 } 1260} 1261 1262/* 1263 * Remove page mappings from kernel virtual address space. Intended for 1264 * temporary mappings entered by mmu_booke_qenter. 1265 */ 1266static void 1267mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1268{ 1269 vm_offset_t va; 1270 1271 va = sva; 1272 while (count-- > 0) { 1273 mmu_booke_kremove(mmu, va); 1274 va += PAGE_SIZE; 1275 } 1276} 1277 1278/* 1279 * Map a wired page into kernel virtual address space. 1280 */ 1281static void 1282mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1283{ 1284 unsigned int pdir_idx = PDIR_IDX(va); 1285 unsigned int ptbl_idx = PTBL_IDX(va); 1286 uint32_t flags; 1287 pte_t *pte; 1288 1289 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1290 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1291 1292 flags = 0; 1293 flags |= (PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID); 1294 flags |= PTE_M; 1295 1296 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1297 1298 mtx_lock_spin(&tlbivax_mutex); 1299 1300 if (PTE_ISVALID(pte)) { 1301 1302 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1303 1304 /* Flush entry from TLB0 */ 1305 tlb0_flush_entry(va); 1306 } 1307 1308 pte->rpn = pa & ~PTE_PA_MASK; 1309 pte->flags = flags; 1310 1311 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1312 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1313 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1314 1315 /* Flush the real memory from the instruction cache. */ 1316 if ((flags & (PTE_I | PTE_G)) == 0) { 1317 __syncicache((void *)va, PAGE_SIZE); 1318 } 1319 1320 mtx_unlock_spin(&tlbivax_mutex); 1321} 1322 1323/* 1324 * Remove a page from kernel page table. 1325 */ 1326static void 1327mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1328{ 1329 unsigned int pdir_idx = PDIR_IDX(va); 1330 unsigned int ptbl_idx = PTBL_IDX(va); 1331 pte_t *pte; 1332 1333// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1334 1335 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1336 (va <= VM_MAX_KERNEL_ADDRESS)), 1337 ("mmu_booke_kremove: invalid va")); 1338 1339 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1340 1341 if (!PTE_ISVALID(pte)) { 1342 1343 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1344 1345 return; 1346 } 1347 1348 mtx_lock_spin(&tlbivax_mutex); 1349 1350 /* Invalidate entry in TLB0, update PTE. */ 1351 tlb0_flush_entry(va); 1352 pte->flags = 0; 1353 pte->rpn = 0; 1354 1355 mtx_unlock_spin(&tlbivax_mutex); 1356} 1357 1358/* 1359 * Initialize pmap associated with process 0. 1360 */ 1361static void 1362mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1363{ 1364 1365 mmu_booke_pinit(mmu, pmap); 1366 PCPU_SET(curpmap, pmap); 1367} 1368 1369/* 1370 * Initialize a preallocated and zeroed pmap structure, 1371 * such as one in a vmspace structure. 1372 */ 1373static void 1374mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1375{ 1376 int i; 1377 1378 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1379 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1380 1381 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1382 1383 PMAP_LOCK_INIT(pmap); 1384 for (i = 0; i < MAXCPU; i++) 1385 pmap->pm_tid[i] = TID_NONE; 1386 pmap->pm_active = 0; 1387 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1388 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1389 TAILQ_INIT(&pmap->pm_ptbl_list); 1390} 1391 1392/* 1393 * Release any resources held by the given physical map. 1394 * Called when a pmap initialized by mmu_booke_pinit is being released. 1395 * Should only be called if the map contains no valid mappings. 1396 */ 1397static void 1398mmu_booke_release(mmu_t mmu, pmap_t pmap) 1399{ 1400 1401 printf("mmu_booke_release: s\n"); 1402 1403 KASSERT(pmap->pm_stats.resident_count == 0, 1404 ("pmap_release: pmap resident count %ld != 0", 1405 pmap->pm_stats.resident_count)); 1406 1407 PMAP_LOCK_DESTROY(pmap); 1408} 1409 1410/* 1411 * Insert the given physical page at the specified virtual address in the 1412 * target physical map with the protection requested. If specified the page 1413 * will be wired down. 1414 */ 1415static void 1416mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1417 vm_prot_t prot, boolean_t wired) 1418{ 1419 1420 vm_page_lock_queues(); 1421 PMAP_LOCK(pmap); 1422 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1423 vm_page_unlock_queues(); 1424 PMAP_UNLOCK(pmap); 1425} 1426 1427static void 1428mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1429 vm_prot_t prot, boolean_t wired) 1430{ 1431 pte_t *pte; 1432 vm_paddr_t pa; 1433 uint32_t flags; 1434 int su, sync; 1435 1436 pa = VM_PAGE_TO_PHYS(m); 1437 su = (pmap == kernel_pmap); 1438 sync = 0; 1439 1440 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1441 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1442 // (u_int32_t)pmap, su, pmap->pm_tid, 1443 // (u_int32_t)m, va, pa, prot, wired); 1444 1445 if (su) { 1446 KASSERT(((va >= virtual_avail) && 1447 (va <= VM_MAX_KERNEL_ADDRESS)), 1448 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1449 } else { 1450 KASSERT((va <= VM_MAXUSER_ADDRESS), 1451 ("mmu_booke_enter_locked: user pmap, non user va")); 1452 } 1453 1454 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1455 1456 /* 1457 * If there is an existing mapping, and the physical address has not 1458 * changed, must be protection or wiring change. 1459 */ 1460 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1461 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1462 1463 /* 1464 * Before actually updating pte->flags we calculate and 1465 * prepare its new value in a helper var. 1466 */ 1467 flags = pte->flags; 1468 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1469 1470 /* Wiring change, just update stats. */ 1471 if (wired) { 1472 if (!PTE_ISWIRED(pte)) { 1473 flags |= PTE_WIRED; 1474 pmap->pm_stats.wired_count++; 1475 } 1476 } else { 1477 if (PTE_ISWIRED(pte)) { 1478 flags &= ~PTE_WIRED; 1479 pmap->pm_stats.wired_count--; 1480 } 1481 } 1482 1483 if (prot & VM_PROT_WRITE) { 1484 /* Add write permissions. */ 1485 flags |= PTE_SW; 1486 if (!su) 1487 flags |= PTE_UW; 1488 } else { 1489 /* Handle modified pages, sense modify status. */ 1490 1491 /* 1492 * The PTE_MODIFIED flag could be set by underlying 1493 * TLB misses since we last read it (above), possibly 1494 * other CPUs could update it so we check in the PTE 1495 * directly rather than rely on that saved local flags 1496 * copy. 1497 */ 1498 if (PTE_ISMODIFIED(pte)) 1499 vm_page_dirty(m); 1500 } 1501 1502 if (prot & VM_PROT_EXECUTE) { 1503 flags |= PTE_SX; 1504 if (!su) 1505 flags |= PTE_UX; 1506 1507 /* 1508 * Check existing flags for execute permissions: if we 1509 * are turning execute permissions on, icache should 1510 * be flushed. 1511 */ 1512 if ((flags & (PTE_UX | PTE_SX)) == 0) 1513 sync++; 1514 } 1515 1516 flags &= ~PTE_REFERENCED; 1517 1518 /* 1519 * The new flags value is all calculated -- only now actually 1520 * update the PTE. 1521 */ 1522 mtx_lock_spin(&tlbivax_mutex); 1523 1524 tlb0_flush_entry(va); 1525 pte->flags = flags; 1526 1527 mtx_unlock_spin(&tlbivax_mutex); 1528 1529 } else { 1530 /* 1531 * If there is an existing mapping, but it's for a different 1532 * physical address, pte_enter() will delete the old mapping. 1533 */ 1534 //if ((pte != NULL) && PTE_ISVALID(pte)) 1535 // debugf("mmu_booke_enter_locked: replace\n"); 1536 //else 1537 // debugf("mmu_booke_enter_locked: new\n"); 1538 1539 /* Now set up the flags and install the new mapping. */ 1540 flags = (PTE_SR | PTE_VALID); 1541 flags |= PTE_M; 1542 1543 if (!su) 1544 flags |= PTE_UR; 1545 1546 if (prot & VM_PROT_WRITE) { 1547 flags |= PTE_SW; 1548 if (!su) 1549 flags |= PTE_UW; 1550 } 1551 1552 if (prot & VM_PROT_EXECUTE) { 1553 flags |= PTE_SX; 1554 if (!su) 1555 flags |= PTE_UX; 1556 } 1557 1558 /* If its wired update stats. */ 1559 if (wired) { 1560 pmap->pm_stats.wired_count++; 1561 flags |= PTE_WIRED; 1562 } 1563 1564 pte_enter(mmu, pmap, m, va, flags); 1565 1566 /* Flush the real memory from the instruction cache. */ 1567 if (prot & VM_PROT_EXECUTE) 1568 sync++; 1569 } 1570 1571 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1572 __syncicache((void *)va, PAGE_SIZE); 1573 sync = 0; 1574 } 1575 1576 if (sync) { 1577 /* Create a temporary mapping. */ 1578 pmap = PCPU_GET(curpmap); 1579 1580 va = 0; 1581 pte = pte_find(mmu, pmap, va); 1582 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1583 1584 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1585 1586 pte_enter(mmu, pmap, m, va, flags); 1587 __syncicache((void *)va, PAGE_SIZE); 1588 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1589 } 1590} 1591 1592/* 1593 * Maps a sequence of resident pages belonging to the same object. 1594 * The sequence begins with the given page m_start. This page is 1595 * mapped at the given virtual address start. Each subsequent page is 1596 * mapped at a virtual address that is offset from start by the same 1597 * amount as the page is offset from m_start within the object. The 1598 * last page in the sequence is the page with the largest offset from 1599 * m_start that can be mapped at a virtual address less than the given 1600 * virtual address end. Not every virtual page between start and end 1601 * is mapped; only those for which a resident page exists with the 1602 * corresponding offset from m_start are mapped. 1603 */ 1604static void 1605mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1606 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1607{ 1608 vm_page_t m; 1609 vm_pindex_t diff, psize; 1610 1611 psize = atop(end - start); 1612 m = m_start; 1613 PMAP_LOCK(pmap); 1614 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1615 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1616 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1617 m = TAILQ_NEXT(m, listq); 1618 } 1619 PMAP_UNLOCK(pmap); 1620} 1621 1622static void 1623mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1624 vm_prot_t prot) 1625{ 1626 1627 PMAP_LOCK(pmap); 1628 mmu_booke_enter_locked(mmu, pmap, va, m, 1629 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1630 PMAP_UNLOCK(pmap); 1631} 1632 1633/* 1634 * Remove the given range of addresses from the specified map. 1635 * 1636 * It is assumed that the start and end are properly rounded to the page size. 1637 */ 1638static void 1639mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1640{ 1641 pte_t *pte; 1642 uint8_t hold_flag; 1643 1644 int su = (pmap == kernel_pmap); 1645 1646 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1647 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1648 1649 if (su) { 1650 KASSERT(((va >= virtual_avail) && 1651 (va <= VM_MAX_KERNEL_ADDRESS)), 1652 ("mmu_booke_remove: kernel pmap, non kernel va")); 1653 } else { 1654 KASSERT((va <= VM_MAXUSER_ADDRESS), 1655 ("mmu_booke_remove: user pmap, non user va")); 1656 } 1657 1658 if (PMAP_REMOVE_DONE(pmap)) { 1659 //debugf("mmu_booke_remove: e (empty)\n"); 1660 return; 1661 } 1662 1663 hold_flag = PTBL_HOLD_FLAG(pmap); 1664 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1665 1666 vm_page_lock_queues(); 1667 PMAP_LOCK(pmap); 1668 for (; va < endva; va += PAGE_SIZE) { 1669 pte = pte_find(mmu, pmap, va); 1670 if ((pte != NULL) && PTE_ISVALID(pte)) 1671 pte_remove(mmu, pmap, va, hold_flag); 1672 } 1673 PMAP_UNLOCK(pmap); 1674 vm_page_unlock_queues(); 1675 1676 //debugf("mmu_booke_remove: e\n"); 1677} 1678 1679/* 1680 * Remove physical page from all pmaps in which it resides. 1681 */ 1682static void 1683mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1684{ 1685 pv_entry_t pv, pvn; 1686 uint8_t hold_flag; 1687 1688 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1689 1690 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1691 pvn = TAILQ_NEXT(pv, pv_link); 1692 1693 PMAP_LOCK(pv->pv_pmap); 1694 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1695 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1696 PMAP_UNLOCK(pv->pv_pmap); 1697 } 1698 vm_page_flag_clear(m, PG_WRITEABLE); 1699} 1700 1701/* 1702 * Map a range of physical addresses into kernel virtual address space. 1703 */ 1704static vm_offset_t 1705mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1706 vm_offset_t pa_end, int prot) 1707{ 1708 vm_offset_t sva = *virt; 1709 vm_offset_t va = sva; 1710 1711 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1712 // sva, pa_start, pa_end); 1713 1714 while (pa_start < pa_end) { 1715 mmu_booke_kenter(mmu, va, pa_start); 1716 va += PAGE_SIZE; 1717 pa_start += PAGE_SIZE; 1718 } 1719 *virt = va; 1720 1721 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1722 return (sva); 1723} 1724 1725/* 1726 * The pmap must be activated before it's address space can be accessed in any 1727 * way. 1728 */ 1729static void 1730mmu_booke_activate(mmu_t mmu, struct thread *td) 1731{ 1732 pmap_t pmap; 1733 1734 pmap = &td->td_proc->p_vmspace->vm_pmap; 1735 1736 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1737 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1738 1739 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1740 1741 mtx_lock_spin(&sched_lock); 1742 1743 atomic_set_int(&pmap->pm_active, PCPU_GET(cpumask)); 1744 PCPU_SET(curpmap, pmap); 1745 1746 if (pmap->pm_tid[PCPU_GET(cpuid)] == TID_NONE) 1747 tid_alloc(pmap); 1748 1749 /* Load PID0 register with pmap tid value. */ 1750 mtspr(SPR_PID0, pmap->pm_tid[PCPU_GET(cpuid)]); 1751 __asm __volatile("isync"); 1752 1753 mtx_unlock_spin(&sched_lock); 1754 1755 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1756 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1757} 1758 1759/* 1760 * Deactivate the specified process's address space. 1761 */ 1762static void 1763mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1764{ 1765 pmap_t pmap; 1766 1767 pmap = &td->td_proc->p_vmspace->vm_pmap; 1768 1769 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1770 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1771 1772 atomic_clear_int(&pmap->pm_active, PCPU_GET(cpumask)); 1773 PCPU_SET(curpmap, NULL); 1774} 1775 1776/* 1777 * Copy the range specified by src_addr/len 1778 * from the source map to the range dst_addr/len 1779 * in the destination map. 1780 * 1781 * This routine is only advisory and need not do anything. 1782 */ 1783static void 1784mmu_booke_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1785 vm_size_t len, vm_offset_t src_addr) 1786{ 1787 1788} 1789 1790/* 1791 * Set the physical protection on the specified range of this map as requested. 1792 */ 1793static void 1794mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1795 vm_prot_t prot) 1796{ 1797 vm_offset_t va; 1798 vm_page_t m; 1799 pte_t *pte; 1800 1801 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1802 mmu_booke_remove(mmu, pmap, sva, eva); 1803 return; 1804 } 1805 1806 if (prot & VM_PROT_WRITE) 1807 return; 1808 1809 vm_page_lock_queues(); 1810 PMAP_LOCK(pmap); 1811 for (va = sva; va < eva; va += PAGE_SIZE) { 1812 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1813 if (PTE_ISVALID(pte)) { 1814 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1815 1816 mtx_lock_spin(&tlbivax_mutex); 1817 1818 /* Handle modified pages. */ 1819 if (PTE_ISMODIFIED(pte)) 1820 vm_page_dirty(m); 1821 1822 /* Referenced pages. */ 1823 if (PTE_ISREFERENCED(pte)) 1824 vm_page_flag_set(m, PG_REFERENCED); 1825 1826 tlb0_flush_entry(va); 1827 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1828 PTE_REFERENCED); 1829 1830 mtx_unlock_spin(&tlbivax_mutex); 1831 } 1832 } 1833 } 1834 PMAP_UNLOCK(pmap); 1835 vm_page_unlock_queues(); 1836} 1837 1838/* 1839 * Clear the write and modified bits in each of the given page's mappings. 1840 */ 1841static void 1842mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 1843{ 1844 pv_entry_t pv; 1845 pte_t *pte; 1846 1847 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1848 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1849 (m->flags & PG_WRITEABLE) == 0) 1850 return; 1851 1852 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1853 PMAP_LOCK(pv->pv_pmap); 1854 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 1855 if (PTE_ISVALID(pte)) { 1856 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1857 1858 mtx_lock_spin(&tlbivax_mutex); 1859 1860 /* Handle modified pages. */ 1861 if (PTE_ISMODIFIED(pte)) 1862 vm_page_dirty(m); 1863 1864 /* Referenced pages. */ 1865 if (PTE_ISREFERENCED(pte)) 1866 vm_page_flag_set(m, PG_REFERENCED); 1867 1868 /* Flush mapping from TLB0. */ 1869 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED | 1870 PTE_REFERENCED); 1871 1872 mtx_unlock_spin(&tlbivax_mutex); 1873 } 1874 } 1875 PMAP_UNLOCK(pv->pv_pmap); 1876 } 1877 vm_page_flag_clear(m, PG_WRITEABLE); 1878} 1879 1880static boolean_t 1881mmu_booke_page_executable(mmu_t mmu, vm_page_t m) 1882{ 1883 pv_entry_t pv; 1884 pte_t *pte; 1885 boolean_t executable; 1886 1887 executable = FALSE; 1888 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 1889 PMAP_LOCK(pv->pv_pmap); 1890 pte = pte_find(mmu, pv->pv_pmap, pv->pv_va); 1891 if (pte != NULL && PTE_ISVALID(pte) && (pte->flags & PTE_UX)) 1892 executable = TRUE; 1893 PMAP_UNLOCK(pv->pv_pmap); 1894 if (executable) 1895 break; 1896 } 1897 1898 return (executable); 1899} 1900 1901/* 1902 * Atomically extract and hold the physical page with the given 1903 * pmap and virtual address pair if that mapping permits the given 1904 * protection. 1905 */ 1906static vm_page_t 1907mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1908 vm_prot_t prot) 1909{ 1910 pte_t *pte; 1911 vm_page_t m; 1912 uint32_t pte_wbit; 1913 1914 m = NULL; 1915 vm_page_lock_queues(); 1916 PMAP_LOCK(pmap); 1917 1918 pte = pte_find(mmu, pmap, va); 1919 if ((pte != NULL) && PTE_ISVALID(pte)) { 1920 if (pmap == kernel_pmap) 1921 pte_wbit = PTE_SW; 1922 else 1923 pte_wbit = PTE_UW; 1924 1925 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1926 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1927 vm_page_hold(m); 1928 } 1929 } 1930 1931 vm_page_unlock_queues(); 1932 PMAP_UNLOCK(pmap); 1933 return (m); 1934} 1935 1936/* 1937 * Initialize a vm_page's machine-dependent fields. 1938 */ 1939static void 1940mmu_booke_page_init(mmu_t mmu, vm_page_t m) 1941{ 1942 1943 TAILQ_INIT(&m->md.pv_list); 1944} 1945 1946/* 1947 * mmu_booke_zero_page_area zeros the specified hardware page by 1948 * mapping it into virtual memory and using bzero to clear 1949 * its contents. 1950 * 1951 * off and size must reside within a single page. 1952 */ 1953static void 1954mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1955{ 1956 vm_offset_t va; 1957 1958 /* XXX KASSERT off and size are within a single page? */ 1959 1960 mtx_lock(&zero_page_mutex); 1961 va = zero_page_va; 1962 1963 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1964 bzero((caddr_t)va + off, size); 1965 mmu_booke_kremove(mmu, va); 1966 1967 mtx_unlock(&zero_page_mutex); 1968} 1969 1970/* 1971 * mmu_booke_zero_page zeros the specified hardware page. 1972 */ 1973static void 1974mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1975{ 1976 1977 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 1978} 1979 1980/* 1981 * mmu_booke_copy_page copies the specified (machine independent) page by 1982 * mapping the page into virtual memory and using memcopy to copy the page, 1983 * one machine dependent page at a time. 1984 */ 1985static void 1986mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 1987{ 1988 vm_offset_t sva, dva; 1989 1990 sva = copy_page_src_va; 1991 dva = copy_page_dst_va; 1992 1993 mtx_lock(©_page_mutex); 1994 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 1995 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 1996 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 1997 mmu_booke_kremove(mmu, dva); 1998 mmu_booke_kremove(mmu, sva); 1999 mtx_unlock(©_page_mutex); 2000} 2001 2002/* 2003 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2004 * into virtual memory and using bzero to clear its contents. This is intended 2005 * to be called from the vm_pagezero process only and outside of Giant. No 2006 * lock is required. 2007 */ 2008static void 2009mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2010{ 2011 vm_offset_t va; 2012 2013 va = zero_page_idle_va; 2014 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2015 bzero((caddr_t)va, PAGE_SIZE); 2016 mmu_booke_kremove(mmu, va); 2017} 2018 2019/* 2020 * Return whether or not the specified physical page was modified 2021 * in any of physical maps. 2022 */ 2023static boolean_t 2024mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2025{ 2026 pte_t *pte; 2027 pv_entry_t pv; 2028 2029 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2030 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2031 return (FALSE); 2032 2033 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2034 PMAP_LOCK(pv->pv_pmap); 2035 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2036 if (!PTE_ISVALID(pte)) 2037 goto make_sure_to_unlock; 2038 2039 if (PTE_ISMODIFIED(pte)) { 2040 PMAP_UNLOCK(pv->pv_pmap); 2041 return (TRUE); 2042 } 2043 } 2044make_sure_to_unlock: 2045 PMAP_UNLOCK(pv->pv_pmap); 2046 } 2047 return (FALSE); 2048} 2049 2050/* 2051 * Return whether or not the specified virtual address is eligible 2052 * for prefault. 2053 */ 2054static boolean_t 2055mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2056{ 2057 2058 return (FALSE); 2059} 2060 2061/* 2062 * Clear the modify bits on the specified physical page. 2063 */ 2064static void 2065mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2066{ 2067 pte_t *pte; 2068 pv_entry_t pv; 2069 2070 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2071 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2072 return; 2073 2074 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2075 PMAP_LOCK(pv->pv_pmap); 2076 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2077 if (!PTE_ISVALID(pte)) 2078 goto make_sure_to_unlock; 2079 2080 mtx_lock_spin(&tlbivax_mutex); 2081 2082 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2083 tlb0_flush_entry(pv->pv_va); 2084 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2085 PTE_REFERENCED); 2086 } 2087 2088 mtx_unlock_spin(&tlbivax_mutex); 2089 } 2090make_sure_to_unlock: 2091 PMAP_UNLOCK(pv->pv_pmap); 2092 } 2093} 2094 2095/* 2096 * Return a count of reference bits for a page, clearing those bits. 2097 * It is not necessary for every reference bit to be cleared, but it 2098 * is necessary that 0 only be returned when there are truly no 2099 * reference bits set. 2100 * 2101 * XXX: The exact number of bits to check and clear is a matter that 2102 * should be tested and standardized at some point in the future for 2103 * optimal aging of shared pages. 2104 */ 2105static int 2106mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2107{ 2108 pte_t *pte; 2109 pv_entry_t pv; 2110 int count; 2111 2112 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2113 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2114 return (0); 2115 2116 count = 0; 2117 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2118 PMAP_LOCK(pv->pv_pmap); 2119 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2120 if (!PTE_ISVALID(pte)) 2121 goto make_sure_to_unlock; 2122 2123 if (PTE_ISREFERENCED(pte)) { 2124 mtx_lock_spin(&tlbivax_mutex); 2125 2126 tlb0_flush_entry(pv->pv_va); 2127 pte->flags &= ~PTE_REFERENCED; 2128 2129 mtx_unlock_spin(&tlbivax_mutex); 2130 2131 if (++count > 4) { 2132 PMAP_UNLOCK(pv->pv_pmap); 2133 break; 2134 } 2135 } 2136 } 2137make_sure_to_unlock: 2138 PMAP_UNLOCK(pv->pv_pmap); 2139 } 2140 return (count); 2141} 2142 2143/* 2144 * Clear the reference bit on the specified physical page. 2145 */ 2146static void 2147mmu_booke_clear_reference(mmu_t mmu, vm_page_t m) 2148{ 2149 pte_t *pte; 2150 pv_entry_t pv; 2151 2152 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2153 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2154 return; 2155 2156 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2157 PMAP_LOCK(pv->pv_pmap); 2158 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2159 if (!PTE_ISVALID(pte)) 2160 goto make_sure_to_unlock; 2161 2162 if (PTE_ISREFERENCED(pte)) { 2163 mtx_lock_spin(&tlbivax_mutex); 2164 2165 tlb0_flush_entry(pv->pv_va); 2166 pte->flags &= ~PTE_REFERENCED; 2167 2168 mtx_unlock_spin(&tlbivax_mutex); 2169 } 2170 } 2171make_sure_to_unlock: 2172 PMAP_UNLOCK(pv->pv_pmap); 2173 } 2174} 2175 2176/* 2177 * Change wiring attribute for a map/virtual-address pair. 2178 */ 2179static void 2180mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2181{ 2182 pte_t *pte;; 2183 2184 PMAP_LOCK(pmap); 2185 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2186 if (wired) { 2187 if (!PTE_ISWIRED(pte)) { 2188 pte->flags |= PTE_WIRED; 2189 pmap->pm_stats.wired_count++; 2190 } 2191 } else { 2192 if (PTE_ISWIRED(pte)) { 2193 pte->flags &= ~PTE_WIRED; 2194 pmap->pm_stats.wired_count--; 2195 } 2196 } 2197 } 2198 PMAP_UNLOCK(pmap); 2199} 2200 2201/* 2202 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2203 * page. This count may be changed upwards or downwards in the future; it is 2204 * only necessary that true be returned for a small subset of pmaps for proper 2205 * page aging. 2206 */ 2207static boolean_t 2208mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2209{ 2210 pv_entry_t pv; 2211 int loops; 2212 2213 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2214 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2215 return (FALSE); 2216 2217 loops = 0; 2218 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2219 if (pv->pv_pmap == pmap) 2220 return (TRUE); 2221 2222 if (++loops >= 16) 2223 break; 2224 } 2225 return (FALSE); 2226} 2227 2228/* 2229 * Return the number of managed mappings to the given physical page that are 2230 * wired. 2231 */ 2232static int 2233mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2234{ 2235 pv_entry_t pv; 2236 pte_t *pte; 2237 int count = 0; 2238 2239 if ((m->flags & PG_FICTITIOUS) != 0) 2240 return (count); 2241 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2242 2243 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2244 PMAP_LOCK(pv->pv_pmap); 2245 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2246 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2247 count++; 2248 PMAP_UNLOCK(pv->pv_pmap); 2249 } 2250 2251 return (count); 2252} 2253 2254static int 2255mmu_booke_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2256{ 2257 int i; 2258 vm_offset_t va; 2259 2260 /* 2261 * This currently does not work for entries that 2262 * overlap TLB1 entries. 2263 */ 2264 for (i = 0; i < tlb1_idx; i ++) { 2265 if (tlb1_iomapped(i, pa, size, &va) == 0) 2266 return (0); 2267 } 2268 2269 return (EFAULT); 2270} 2271 2272vm_offset_t 2273mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2274 vm_size_t *sz) 2275{ 2276 vm_paddr_t pa, ppa; 2277 vm_offset_t va; 2278 vm_size_t gran; 2279 2280 /* Raw physical memory dumps don't have a virtual address. */ 2281 if (md->md_vaddr == ~0UL) { 2282 /* We always map a 256MB page at 256M. */ 2283 gran = 256 * 1024 * 1024; 2284 pa = md->md_paddr + ofs; 2285 ppa = pa & ~(gran - 1); 2286 ofs = pa - ppa; 2287 va = gran; 2288 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2289 if (*sz > (gran - ofs)) 2290 *sz = gran - ofs; 2291 return (va + ofs); 2292 } 2293 2294 /* Minidumps are based on virtual memory addresses. */ 2295 va = md->md_vaddr + ofs; 2296 if (va >= kernstart + kernsize) { 2297 gran = PAGE_SIZE - (va & PAGE_MASK); 2298 if (*sz > gran) 2299 *sz = gran; 2300 } 2301 return (va); 2302} 2303 2304void 2305mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2306 vm_offset_t va) 2307{ 2308 2309 /* Raw physical memory dumps don't have a virtual address. */ 2310 if (md->md_vaddr == ~0UL) { 2311 tlb1_idx--; 2312 tlb1[tlb1_idx].mas1 = 0; 2313 tlb1[tlb1_idx].mas2 = 0; 2314 tlb1[tlb1_idx].mas3 = 0; 2315 tlb1_write_entry(tlb1_idx); 2316 return; 2317 } 2318 2319 /* Minidumps are based on virtual memory addresses. */ 2320 /* Nothing to do... */ 2321} 2322 2323struct pmap_md * 2324mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2325{ 2326 static struct pmap_md md; 2327 struct bi_mem_region *mr; 2328 pte_t *pte; 2329 vm_offset_t va; 2330 2331 if (dumpsys_minidump) { 2332 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2333 if (prev == NULL) { 2334 /* 1st: kernel .data and .bss. */ 2335 md.md_index = 1; 2336 md.md_vaddr = trunc_page((uintptr_t)_etext); 2337 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2338 return (&md); 2339 } 2340 switch (prev->md_index) { 2341 case 1: 2342 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2343 md.md_index = 2; 2344 md.md_vaddr = data_start; 2345 md.md_size = data_end - data_start; 2346 break; 2347 case 2: 2348 /* 3rd: kernel VM. */ 2349 va = prev->md_vaddr + prev->md_size; 2350 /* Find start of next chunk (from va). */ 2351 while (va < virtual_end) { 2352 /* Don't dump the buffer cache. */ 2353 if (va >= kmi.buffer_sva && 2354 va < kmi.buffer_eva) { 2355 va = kmi.buffer_eva; 2356 continue; 2357 } 2358 pte = pte_find(mmu, kernel_pmap, va); 2359 if (pte != NULL && PTE_ISVALID(pte)) 2360 break; 2361 va += PAGE_SIZE; 2362 } 2363 if (va < virtual_end) { 2364 md.md_vaddr = va; 2365 va += PAGE_SIZE; 2366 /* Find last page in chunk. */ 2367 while (va < virtual_end) { 2368 /* Don't run into the buffer cache. */ 2369 if (va == kmi.buffer_sva) 2370 break; 2371 pte = pte_find(mmu, kernel_pmap, va); 2372 if (pte == NULL || !PTE_ISVALID(pte)) 2373 break; 2374 va += PAGE_SIZE; 2375 } 2376 md.md_size = va - md.md_vaddr; 2377 break; 2378 } 2379 md.md_index = 3; 2380 /* FALLTHROUGH */ 2381 default: 2382 return (NULL); 2383 } 2384 } else { /* minidumps */ 2385 mr = bootinfo_mr(); 2386 if (prev == NULL) { 2387 /* first physical chunk. */ 2388 md.md_paddr = mr->mem_base; 2389 md.md_size = mr->mem_size; 2390 md.md_vaddr = ~0UL; 2391 md.md_index = 1; 2392 } else if (md.md_index < bootinfo->bi_mem_reg_no) { 2393 md.md_paddr = mr[md.md_index].mem_base; 2394 md.md_size = mr[md.md_index].mem_size; 2395 md.md_vaddr = ~0UL; 2396 md.md_index++; 2397 } else { 2398 /* There's no next physical chunk. */ 2399 return (NULL); 2400 } 2401 } 2402 2403 return (&md); 2404} 2405 2406/* 2407 * Map a set of physical memory pages into the kernel virtual address space. 2408 * Return a pointer to where it is mapped. This routine is intended to be used 2409 * for mapping device memory, NOT real memory. 2410 */ 2411static void * 2412mmu_booke_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2413{ 2414 void *res; 2415 uintptr_t va; 2416 vm_size_t sz; 2417 2418 va = (pa >= 0x80000000) ? pa : (0xe2000000 + pa); 2419 res = (void *)va; 2420 2421 do { 2422 sz = 1 << (ilog2(size) & ~1); 2423 if (bootverbose) 2424 printf("Wiring VA=%x to PA=%x (size=%x), " 2425 "using TLB1[%d]\n", va, pa, sz, tlb1_idx); 2426 tlb1_set_entry(va, pa, sz, _TLB_ENTRY_IO); 2427 size -= sz; 2428 pa += sz; 2429 va += sz; 2430 } while (size > 0); 2431 2432 return (res); 2433} 2434 2435/* 2436 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2437 */ 2438static void 2439mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2440{ 2441 vm_offset_t base, offset; 2442 2443 /* 2444 * Unmap only if this is inside kernel virtual space. 2445 */ 2446 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2447 base = trunc_page(va); 2448 offset = va & PAGE_MASK; 2449 size = roundup(offset + size, PAGE_SIZE); 2450 kmem_free(kernel_map, base, size); 2451 } 2452} 2453 2454/* 2455 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2456 * specified pmap. This eliminates the blast of soft faults on process startup 2457 * and immediately after an mmap. 2458 */ 2459static void 2460mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2461 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2462{ 2463 2464 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2465 KASSERT(object->type == OBJT_DEVICE, 2466 ("mmu_booke_object_init_pt: non-device object")); 2467} 2468 2469/* 2470 * Perform the pmap work for mincore. 2471 */ 2472static int 2473mmu_booke_mincore(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2474{ 2475 2476 TODO; 2477 return (0); 2478} 2479 2480/**************************************************************************/ 2481/* TID handling */ 2482/**************************************************************************/ 2483 2484/* 2485 * Allocate a TID. If necessary, steal one from someone else. 2486 * The new TID is flushed from the TLB before returning. 2487 */ 2488static tlbtid_t 2489tid_alloc(pmap_t pmap) 2490{ 2491 tlbtid_t tid; 2492 int thiscpu; 2493 2494 KASSERT((pmap != kernel_pmap), ("tid_alloc: kernel pmap")); 2495 2496 CTR2(KTR_PMAP, "%s: s (pmap = %p)", __func__, pmap); 2497 2498 thiscpu = PCPU_GET(cpuid); 2499 2500 tid = PCPU_GET(tid_next); 2501 if (tid > TID_MAX) 2502 tid = TID_MIN; 2503 PCPU_SET(tid_next, tid + 1); 2504 2505 /* If we are stealing TID then clear the relevant pmap's field */ 2506 if (tidbusy[thiscpu][tid] != NULL) { 2507 2508 CTR2(KTR_PMAP, "%s: warning: stealing tid %d", __func__, tid); 2509 2510 tidbusy[thiscpu][tid]->pm_tid[thiscpu] = TID_NONE; 2511 2512 /* Flush all entries from TLB0 matching this TID. */ 2513 tid_flush(tid); 2514 } 2515 2516 tidbusy[thiscpu][tid] = pmap; 2517 pmap->pm_tid[thiscpu] = tid; 2518 __asm __volatile("msync; isync"); 2519 2520 CTR3(KTR_PMAP, "%s: e (%02d next = %02d)", __func__, tid, 2521 PCPU_GET(tid_next)); 2522 2523 return (tid); 2524} 2525 2526/**************************************************************************/ 2527/* TLB0 handling */ 2528/**************************************************************************/ 2529 2530static void 2531tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3, 2532 uint32_t mas7) 2533{ 2534 int as; 2535 char desc[3]; 2536 tlbtid_t tid; 2537 vm_size_t size; 2538 unsigned int tsize; 2539 2540 desc[2] = '\0'; 2541 if (mas1 & MAS1_VALID) 2542 desc[0] = 'V'; 2543 else 2544 desc[0] = ' '; 2545 2546 if (mas1 & MAS1_IPROT) 2547 desc[1] = 'P'; 2548 else 2549 desc[1] = ' '; 2550 2551 as = (mas1 & MAS1_TS_MASK) ? 1 : 0; 2552 tid = MAS1_GETTID(mas1); 2553 2554 tsize = (mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2555 size = 0; 2556 if (tsize) 2557 size = tsize2size(tsize); 2558 2559 debugf("%3d: (%s) [AS=%d] " 2560 "sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x " 2561 "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n", 2562 i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7); 2563} 2564 2565/* Convert TLB0 va and way number to tlb0[] table index. */ 2566static inline unsigned int 2567tlb0_tableidx(vm_offset_t va, unsigned int way) 2568{ 2569 unsigned int idx; 2570 2571 idx = (way * TLB0_ENTRIES_PER_WAY); 2572 idx += (va & MAS2_TLB0_ENTRY_IDX_MASK) >> MAS2_TLB0_ENTRY_IDX_SHIFT; 2573 return (idx); 2574} 2575 2576/* 2577 * Invalidate TLB0 entry. 2578 */ 2579static inline void 2580tlb0_flush_entry(vm_offset_t va) 2581{ 2582 2583 CTR2(KTR_PMAP, "%s: s va=0x%08x", __func__, va); 2584 2585 mtx_assert(&tlbivax_mutex, MA_OWNED); 2586 2587 __asm __volatile("tlbivax 0, %0" :: "r"(va & MAS2_EPN_MASK)); 2588 __asm __volatile("isync; msync"); 2589 __asm __volatile("tlbsync; msync"); 2590 2591 CTR1(KTR_PMAP, "%s: e", __func__); 2592} 2593 2594/* Print out contents of the MAS registers for each TLB0 entry */ 2595void 2596tlb0_print_tlbentries(void) 2597{ 2598 uint32_t mas0, mas1, mas2, mas3, mas7; 2599 int entryidx, way, idx; 2600 2601 debugf("TLB0 entries:\n"); 2602 for (way = 0; way < TLB0_WAYS; way ++) 2603 for (entryidx = 0; entryidx < TLB0_ENTRIES_PER_WAY; entryidx++) { 2604 2605 mas0 = MAS0_TLBSEL(0) | MAS0_ESEL(way); 2606 mtspr(SPR_MAS0, mas0); 2607 __asm __volatile("isync"); 2608 2609 mas2 = entryidx << MAS2_TLB0_ENTRY_IDX_SHIFT; 2610 mtspr(SPR_MAS2, mas2); 2611 2612 __asm __volatile("isync; tlbre"); 2613 2614 mas1 = mfspr(SPR_MAS1); 2615 mas2 = mfspr(SPR_MAS2); 2616 mas3 = mfspr(SPR_MAS3); 2617 mas7 = mfspr(SPR_MAS7); 2618 2619 idx = tlb0_tableidx(mas2, way); 2620 tlb_print_entry(idx, mas1, mas2, mas3, mas7); 2621 } 2622} 2623 2624/**************************************************************************/ 2625/* TLB1 handling */ 2626/**************************************************************************/ 2627 2628/* 2629 * TLB1 mapping notes: 2630 * 2631 * TLB1[0] CCSRBAR 2632 * TLB1[1] Kernel text and data. 2633 * TLB1[2-15] Additional kernel text and data mappings (if required), PCI 2634 * windows, other devices mappings. 2635 */ 2636 2637/* 2638 * Write given entry to TLB1 hardware. 2639 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2640 */ 2641static void 2642tlb1_write_entry(unsigned int idx) 2643{ 2644 uint32_t mas0, mas7; 2645 2646 //debugf("tlb1_write_entry: s\n"); 2647 2648 /* Clear high order RPN bits */ 2649 mas7 = 0; 2650 2651 /* Select entry */ 2652 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2653 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2654 2655 mtspr(SPR_MAS0, mas0); 2656 __asm __volatile("isync"); 2657 mtspr(SPR_MAS1, tlb1[idx].mas1); 2658 __asm __volatile("isync"); 2659 mtspr(SPR_MAS2, tlb1[idx].mas2); 2660 __asm __volatile("isync"); 2661 mtspr(SPR_MAS3, tlb1[idx].mas3); 2662 __asm __volatile("isync"); 2663 mtspr(SPR_MAS7, mas7); 2664 __asm __volatile("isync; tlbwe; isync; msync"); 2665 2666 //debugf("tlb1_write_entry: e\n");; 2667} 2668 2669/* 2670 * Return the largest uint value log such that 2^log <= num. 2671 */ 2672static unsigned int 2673ilog2(unsigned int num) 2674{ 2675 int lz; 2676 2677 __asm ("cntlzw %0, %1" : "=r" (lz) : "r" (num)); 2678 return (31 - lz); 2679} 2680 2681/* 2682 * Convert TLB TSIZE value to mapped region size. 2683 */ 2684static vm_size_t 2685tsize2size(unsigned int tsize) 2686{ 2687 2688 /* 2689 * size = 4^tsize KB 2690 * size = 4^tsize * 2^10 = 2^(2 * tsize - 10) 2691 */ 2692 2693 return ((1 << (2 * tsize)) * 1024); 2694} 2695 2696/* 2697 * Convert region size (must be power of 4) to TLB TSIZE value. 2698 */ 2699static unsigned int 2700size2tsize(vm_size_t size) 2701{ 2702 2703 return (ilog2(size) / 2 - 5); 2704} 2705 2706/* 2707 * Register permanent kernel mapping in TLB1. 2708 * 2709 * Entries are created starting from index 0 (current free entry is 2710 * kept in tlb1_idx) and are not supposed to be invalidated. 2711 */ 2712static int 2713tlb1_set_entry(vm_offset_t va, vm_offset_t pa, vm_size_t size, 2714 uint32_t flags) 2715{ 2716 uint32_t ts, tid; 2717 int tsize; 2718 2719 if (tlb1_idx >= TLB1_ENTRIES) { 2720 printf("tlb1_set_entry: TLB1 full!\n"); 2721 return (-1); 2722 } 2723 2724 /* Convert size to TSIZE */ 2725 tsize = size2tsize(size); 2726 2727 tid = (TID_KERNEL << MAS1_TID_SHIFT) & MAS1_TID_MASK; 2728 /* XXX TS is hard coded to 0 for now as we only use single address space */ 2729 ts = (0 << MAS1_TS_SHIFT) & MAS1_TS_MASK; 2730 2731 /* XXX LOCK tlb1[] */ 2732 2733 tlb1[tlb1_idx].mas1 = MAS1_VALID | MAS1_IPROT | ts | tid; 2734 tlb1[tlb1_idx].mas1 |= ((tsize << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK); 2735 tlb1[tlb1_idx].mas2 = (va & MAS2_EPN_MASK) | flags; 2736 2737 /* Set supervisor RWX permission bits */ 2738 tlb1[tlb1_idx].mas3 = (pa & MAS3_RPN) | MAS3_SR | MAS3_SW | MAS3_SX; 2739 2740 tlb1_write_entry(tlb1_idx++); 2741 2742 /* XXX UNLOCK tlb1[] */ 2743 2744 /* 2745 * XXX in general TLB1 updates should be propagated between CPUs, 2746 * since current design assumes to have the same TLB1 set-up on all 2747 * cores. 2748 */ 2749 return (0); 2750} 2751 2752static int 2753tlb1_entry_size_cmp(const void *a, const void *b) 2754{ 2755 const vm_size_t *sza; 2756 const vm_size_t *szb; 2757 2758 sza = a; 2759 szb = b; 2760 if (*sza > *szb) 2761 return (-1); 2762 else if (*sza < *szb) 2763 return (1); 2764 else 2765 return (0); 2766} 2767 2768/* 2769 * Map in contiguous RAM region into the TLB1 using maximum of 2770 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2771 * 2772 * If necessary round up last entry size and return total size 2773 * used by all allocated entries. 2774 */ 2775vm_size_t 2776tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2777{ 2778 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2779 vm_size_t mapped_size, sz, esz; 2780 unsigned int log; 2781 int i; 2782 2783 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2784 __func__, size, va, pa); 2785 2786 mapped_size = 0; 2787 sz = size; 2788 memset(entry_size, 0, sizeof(entry_size)); 2789 2790 /* Calculate entry sizes. */ 2791 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2792 2793 /* Largest region that is power of 4 and fits within size */ 2794 log = ilog2(sz) / 2; 2795 esz = 1 << (2 * log); 2796 2797 /* If this is last entry cover remaining size. */ 2798 if (i == KERNEL_REGION_MAX_TLB_ENTRIES - 1) { 2799 while (esz < sz) 2800 esz = esz << 2; 2801 } 2802 2803 entry_size[i] = esz; 2804 mapped_size += esz; 2805 if (esz < sz) 2806 sz -= esz; 2807 else 2808 sz = 0; 2809 } 2810 2811 /* Sort entry sizes, required to get proper entry address alignment. */ 2812 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2813 sizeof(vm_size_t), tlb1_entry_size_cmp); 2814 2815 /* Load TLB1 entries. */ 2816 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2817 esz = entry_size[i]; 2818 if (!esz) 2819 break; 2820 2821 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2822 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2823 2824 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2825 2826 va += esz; 2827 pa += esz; 2828 } 2829 2830 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2831 __func__, mapped_size, mapped_size - size); 2832 2833 return (mapped_size); 2834} 2835 2836/* 2837 * TLB1 initialization routine, to be called after the very first 2838 * assembler level setup done in locore.S. 2839 */ 2840void 2841tlb1_init(vm_offset_t ccsrbar) 2842{ 2843 uint32_t mas0; 2844 2845 /* TLB1[1] is used to map the kernel. Save that entry. */ 2846 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2847 mtspr(SPR_MAS0, mas0); 2848 __asm __volatile("isync; tlbre"); 2849 2850 tlb1[1].mas1 = mfspr(SPR_MAS1); 2851 tlb1[1].mas2 = mfspr(SPR_MAS2); 2852 tlb1[1].mas3 = mfspr(SPR_MAS3); 2853 2854 /* Map in CCSRBAR in TLB1[0] */ 2855 tlb1_idx = 0; 2856 tlb1_set_entry(CCSRBAR_VA, ccsrbar, CCSRBAR_SIZE, _TLB_ENTRY_IO); 2857 /* 2858 * Set the next available TLB1 entry index. Note TLB[1] is reserved 2859 * for initial mapping of kernel text+data, which was set early in 2860 * locore, we need to skip this [busy] entry. 2861 */ 2862 tlb1_idx = 2; 2863 2864 /* Setup TLB miss defaults */ 2865 set_mas4_defaults(); 2866} 2867 2868/* 2869 * Setup MAS4 defaults. 2870 * These values are loaded to MAS0-2 on a TLB miss. 2871 */ 2872static void 2873set_mas4_defaults(void) 2874{ 2875 uint32_t mas4; 2876 2877 /* Defaults: TLB0, PID0, TSIZED=4K */ 2878 mas4 = MAS4_TLBSELD0; 2879 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2880 2881 mtspr(SPR_MAS4, mas4); 2882 __asm __volatile("isync"); 2883} 2884 2885/* 2886 * Print out contents of the MAS registers for each TLB1 entry 2887 */ 2888void 2889tlb1_print_tlbentries(void) 2890{ 2891 uint32_t mas0, mas1, mas2, mas3, mas7; 2892 int i; 2893 2894 debugf("TLB1 entries:\n"); 2895 for (i = 0; i < TLB1_ENTRIES; i++) { 2896 2897 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(i); 2898 mtspr(SPR_MAS0, mas0); 2899 2900 __asm __volatile("isync; tlbre"); 2901 2902 mas1 = mfspr(SPR_MAS1); 2903 mas2 = mfspr(SPR_MAS2); 2904 mas3 = mfspr(SPR_MAS3); 2905 mas7 = mfspr(SPR_MAS7); 2906 2907 tlb_print_entry(i, mas1, mas2, mas3, mas7); 2908 } 2909} 2910 2911/* 2912 * Print out contents of the in-ram tlb1 table. 2913 */ 2914void 2915tlb1_print_entries(void) 2916{ 2917 int i; 2918 2919 debugf("tlb1[] table entries:\n"); 2920 for (i = 0; i < TLB1_ENTRIES; i++) 2921 tlb_print_entry(i, tlb1[i].mas1, tlb1[i].mas2, tlb1[i].mas3, 0); 2922} 2923 2924/* 2925 * Return 0 if the physical IO range is encompassed by one of the 2926 * the TLB1 entries, otherwise return related error code. 2927 */ 2928static int 2929tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2930{ 2931 uint32_t prot; 2932 vm_paddr_t pa_start; 2933 vm_paddr_t pa_end; 2934 unsigned int entry_tsize; 2935 vm_size_t entry_size; 2936 2937 *va = (vm_offset_t)NULL; 2938 2939 /* Skip invalid entries */ 2940 if (!(tlb1[i].mas1 & MAS1_VALID)) 2941 return (EINVAL); 2942 2943 /* 2944 * The entry must be cache-inhibited, guarded, and r/w 2945 * so it can function as an i/o page 2946 */ 2947 prot = tlb1[i].mas2 & (MAS2_I | MAS2_G); 2948 if (prot != (MAS2_I | MAS2_G)) 2949 return (EPERM); 2950 2951 prot = tlb1[i].mas3 & (MAS3_SR | MAS3_SW); 2952 if (prot != (MAS3_SR | MAS3_SW)) 2953 return (EPERM); 2954 2955 /* The address should be within the entry range. */ 2956 entry_tsize = (tlb1[i].mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; 2957 KASSERT((entry_tsize), ("tlb1_iomapped: invalid entry tsize")); 2958 2959 entry_size = tsize2size(entry_tsize); 2960 pa_start = tlb1[i].mas3 & MAS3_RPN; 2961 pa_end = pa_start + entry_size - 1; 2962 2963 if ((pa < pa_start) || ((pa + size) > pa_end)) 2964 return (ERANGE); 2965 2966 /* Return virtual address of this mapping. */ 2967 *va = (tlb1[i].mas2 & MAS2_EPN_MASK) + (pa - pa_start); 2968 return (0); 2969} 2970