1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29/*- 30 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 31 * Copyright (C) 1995, 1996 TooLs GmbH. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed by TooLs GmbH. 45 * 4. The name of TooLs GmbH may not be used to endorse or promote products 46 * derived from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 60 */ 61/*- 62 * Copyright (C) 2001 Benno Rice. 63 * All rights reserved. 64 * 65 * Redistribution and use in source and binary forms, with or without 66 * modification, are permitted provided that the following conditions 67 * are met: 68 * 1. Redistributions of source code must retain the above copyright 69 * notice, this list of conditions and the following disclaimer. 70 * 2. Redistributions in binary form must reproduce the above copyright 71 * notice, this list of conditions and the following disclaimer in the 72 * documentation and/or other materials provided with the distribution. 73 * 74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 84 */ 85 86#include <sys/cdefs.h> 87__FBSDID("$FreeBSD$"); 88 89/* 90 * Manages physical address maps. 91 * 92 * Since the information managed by this module is also stored by the 93 * logical address mapping module, this module may throw away valid virtual 94 * to physical mappings at almost any time. However, invalidations of 95 * mappings must be done as requested. 96 * 97 * In order to cope with hardware architectures which make virtual to 98 * physical map invalidates expensive, this module may delay invalidate 99 * reduced protection operations until such time as they are actually 100 * necessary. This module is given full information as to which processors 101 * are currently using which maps, and to when physical maps must be made 102 * correct. 103 */ 104 105#include "opt_kstack_pages.h" 106 107#include <sys/param.h> 108#include <sys/kernel.h> 109#include <sys/queue.h> 110#include <sys/cpuset.h> 111#include <sys/ktr.h> 112#include <sys/lock.h> 113#include <sys/msgbuf.h> 114#include <sys/mutex.h> 115#include <sys/proc.h> 116#include <sys/rwlock.h> 117#include <sys/sched.h> 118#include <sys/sysctl.h> 119#include <sys/systm.h> 120#include <sys/vmmeter.h> 121 122#include <dev/ofw/openfirm.h> 123 124#include <vm/vm.h> 125#include <vm/vm_param.h> 126#include <vm/vm_kern.h> 127#include <vm/vm_page.h> 128#include <vm/vm_map.h> 129#include <vm/vm_object.h> 130#include <vm/vm_extern.h> 131#include <vm/vm_pageout.h> 132#include <vm/uma.h> 133 134#include <machine/cpu.h> 135#include <machine/platform.h> 136#include <machine/bat.h> 137#include <machine/frame.h> 138#include <machine/md_var.h> 139#include <machine/psl.h> 140#include <machine/pte.h> 141#include <machine/smp.h> 142#include <machine/sr.h> 143#include <machine/mmuvar.h> 144#include <machine/trap.h> 145 146#include "mmu_if.h" 147 148#define MOEA_DEBUG 149 150#define TODO panic("%s: not implemented", __func__); 151 152#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 153#define VSID_TO_SR(vsid) ((vsid) & 0xf) 154#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 155 156struct ofw_map { 157 vm_offset_t om_va; 158 vm_size_t om_len; 159 vm_offset_t om_pa; 160 u_int om_mode; 161}; 162 163extern unsigned char _etext[]; 164extern unsigned char _end[]; 165 166extern int dumpsys_minidump; 167 168/* 169 * Map of physical memory regions. 170 */ 171static struct mem_region *regions; 172static struct mem_region *pregions; 173static u_int phys_avail_count; 174static int regions_sz, pregions_sz; 175static struct ofw_map *translations; 176 177/* 178 * Lock for the pteg and pvo tables. 179 */ 180struct mtx moea_table_mutex; 181struct mtx moea_vsid_mutex; 182 183/* tlbie instruction synchronization */ 184static struct mtx tlbie_mtx; 185 186/* 187 * PTEG data. 188 */ 189static struct pteg *moea_pteg_table; 190u_int moea_pteg_count; 191u_int moea_pteg_mask; 192 193/* 194 * PVO data. 195 */ 196struct pvo_head *moea_pvo_table; /* pvo entries by pteg index */ 197struct pvo_head moea_pvo_kunmanaged = 198 LIST_HEAD_INITIALIZER(moea_pvo_kunmanaged); /* list of unmanaged pages */ 199 200static struct rwlock_padalign pvh_global_lock; 201 202uma_zone_t moea_upvo_zone; /* zone for pvo entries for unmanaged pages */ 203uma_zone_t moea_mpvo_zone; /* zone for pvo entries for managed pages */ 204 205#define BPVO_POOL_SIZE 32768 206static struct pvo_entry *moea_bpvo_pool; 207static int moea_bpvo_pool_index = 0; 208 209#define VSID_NBPW (sizeof(u_int32_t) * 8) 210static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; 211 212static boolean_t moea_initialized = FALSE; 213 214/* 215 * Statistics. 216 */ 217u_int moea_pte_valid = 0; 218u_int moea_pte_overflow = 0; 219u_int moea_pte_replacements = 0; 220u_int moea_pvo_entries = 0; 221u_int moea_pvo_enter_calls = 0; 222u_int moea_pvo_remove_calls = 0; 223u_int moea_pte_spills = 0; 224SYSCTL_INT(_machdep, OID_AUTO, moea_pte_valid, CTLFLAG_RD, &moea_pte_valid, 225 0, ""); 226SYSCTL_INT(_machdep, OID_AUTO, moea_pte_overflow, CTLFLAG_RD, 227 &moea_pte_overflow, 0, ""); 228SYSCTL_INT(_machdep, OID_AUTO, moea_pte_replacements, CTLFLAG_RD, 229 &moea_pte_replacements, 0, ""); 230SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_entries, CTLFLAG_RD, &moea_pvo_entries, 231 0, ""); 232SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_enter_calls, CTLFLAG_RD, 233 &moea_pvo_enter_calls, 0, ""); 234SYSCTL_INT(_machdep, OID_AUTO, moea_pvo_remove_calls, CTLFLAG_RD, 235 &moea_pvo_remove_calls, 0, ""); 236SYSCTL_INT(_machdep, OID_AUTO, moea_pte_spills, CTLFLAG_RD, 237 &moea_pte_spills, 0, ""); 238 239/* 240 * Allocate physical memory for use in moea_bootstrap. 241 */ 242static vm_offset_t moea_bootstrap_alloc(vm_size_t, u_int); 243 244/* 245 * PTE calls. 246 */ 247static int moea_pte_insert(u_int, struct pte *); 248 249/* 250 * PVO calls. 251 */ 252static int moea_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 253 vm_offset_t, vm_offset_t, u_int, int); 254static void moea_pvo_remove(struct pvo_entry *, int); 255static struct pvo_entry *moea_pvo_find_va(pmap_t, vm_offset_t, int *); 256static struct pte *moea_pvo_to_pte(const struct pvo_entry *, int); 257 258/* 259 * Utility routines. 260 */ 261static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, 262 vm_prot_t, u_int, int8_t); 263static void moea_syncicache(vm_offset_t, vm_size_t); 264static boolean_t moea_query_bit(vm_page_t, int); 265static u_int moea_clear_bit(vm_page_t, int); 266static void moea_kremove(mmu_t, vm_offset_t); 267int moea_pte_spill(vm_offset_t); 268 269/* 270 * Kernel MMU interface 271 */ 272void moea_clear_modify(mmu_t, vm_page_t); 273void moea_copy_page(mmu_t, vm_page_t, vm_page_t); 274void moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 275 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 276int moea_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, 277 int8_t); 278void moea_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 279 vm_prot_t); 280void moea_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 281vm_paddr_t moea_extract(mmu_t, pmap_t, vm_offset_t); 282vm_page_t moea_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 283void moea_init(mmu_t); 284boolean_t moea_is_modified(mmu_t, vm_page_t); 285boolean_t moea_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 286boolean_t moea_is_referenced(mmu_t, vm_page_t); 287int moea_ts_referenced(mmu_t, vm_page_t); 288vm_offset_t moea_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 289boolean_t moea_page_exists_quick(mmu_t, pmap_t, vm_page_t); 290int moea_page_wired_mappings(mmu_t, vm_page_t); 291void moea_pinit(mmu_t, pmap_t); 292void moea_pinit0(mmu_t, pmap_t); 293void moea_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 294void moea_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 295void moea_qremove(mmu_t, vm_offset_t, int); 296void moea_release(mmu_t, pmap_t); 297void moea_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 298void moea_remove_all(mmu_t, vm_page_t); 299void moea_remove_write(mmu_t, vm_page_t); 300void moea_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 301void moea_zero_page(mmu_t, vm_page_t); 302void moea_zero_page_area(mmu_t, vm_page_t, int, int); 303void moea_zero_page_idle(mmu_t, vm_page_t); 304void moea_activate(mmu_t, struct thread *); 305void moea_deactivate(mmu_t, struct thread *); 306void moea_cpu_bootstrap(mmu_t, int); 307void moea_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 308void *moea_mapdev(mmu_t, vm_paddr_t, vm_size_t); 309void *moea_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 310void moea_unmapdev(mmu_t, vm_offset_t, vm_size_t); 311vm_paddr_t moea_kextract(mmu_t, vm_offset_t); 312void moea_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t); 313void moea_kenter(mmu_t, vm_offset_t, vm_paddr_t); 314void moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma); 315boolean_t moea_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 316static void moea_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 317vm_offset_t moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 318 vm_size_t *sz); 319struct pmap_md * moea_scan_md(mmu_t mmu, struct pmap_md *prev); 320 321static mmu_method_t moea_methods[] = { 322 MMUMETHOD(mmu_clear_modify, moea_clear_modify), 323 MMUMETHOD(mmu_copy_page, moea_copy_page), 324 MMUMETHOD(mmu_copy_pages, moea_copy_pages), 325 MMUMETHOD(mmu_enter, moea_enter), 326 MMUMETHOD(mmu_enter_object, moea_enter_object), 327 MMUMETHOD(mmu_enter_quick, moea_enter_quick), 328 MMUMETHOD(mmu_extract, moea_extract), 329 MMUMETHOD(mmu_extract_and_hold, moea_extract_and_hold), 330 MMUMETHOD(mmu_init, moea_init), 331 MMUMETHOD(mmu_is_modified, moea_is_modified), 332 MMUMETHOD(mmu_is_prefaultable, moea_is_prefaultable), 333 MMUMETHOD(mmu_is_referenced, moea_is_referenced), 334 MMUMETHOD(mmu_ts_referenced, moea_ts_referenced), 335 MMUMETHOD(mmu_map, moea_map), 336 MMUMETHOD(mmu_page_exists_quick,moea_page_exists_quick), 337 MMUMETHOD(mmu_page_wired_mappings,moea_page_wired_mappings), 338 MMUMETHOD(mmu_pinit, moea_pinit), 339 MMUMETHOD(mmu_pinit0, moea_pinit0), 340 MMUMETHOD(mmu_protect, moea_protect), 341 MMUMETHOD(mmu_qenter, moea_qenter), 342 MMUMETHOD(mmu_qremove, moea_qremove), 343 MMUMETHOD(mmu_release, moea_release), 344 MMUMETHOD(mmu_remove, moea_remove), 345 MMUMETHOD(mmu_remove_all, moea_remove_all), 346 MMUMETHOD(mmu_remove_write, moea_remove_write), 347 MMUMETHOD(mmu_sync_icache, moea_sync_icache), 348 MMUMETHOD(mmu_unwire, moea_unwire), 349 MMUMETHOD(mmu_zero_page, moea_zero_page), 350 MMUMETHOD(mmu_zero_page_area, moea_zero_page_area), 351 MMUMETHOD(mmu_zero_page_idle, moea_zero_page_idle), 352 MMUMETHOD(mmu_activate, moea_activate), 353 MMUMETHOD(mmu_deactivate, moea_deactivate), 354 MMUMETHOD(mmu_page_set_memattr, moea_page_set_memattr), 355 356 /* Internal interfaces */ 357 MMUMETHOD(mmu_bootstrap, moea_bootstrap), 358 MMUMETHOD(mmu_cpu_bootstrap, moea_cpu_bootstrap), 359 MMUMETHOD(mmu_mapdev_attr, moea_mapdev_attr), 360 MMUMETHOD(mmu_mapdev, moea_mapdev), 361 MMUMETHOD(mmu_unmapdev, moea_unmapdev), 362 MMUMETHOD(mmu_kextract, moea_kextract), 363 MMUMETHOD(mmu_kenter, moea_kenter), 364 MMUMETHOD(mmu_kenter_attr, moea_kenter_attr), 365 MMUMETHOD(mmu_dev_direct_mapped,moea_dev_direct_mapped), 366 MMUMETHOD(mmu_scan_md, moea_scan_md), 367 MMUMETHOD(mmu_dumpsys_map, moea_dumpsys_map), 368 369 { 0, 0 } 370}; 371 372MMU_DEF(oea_mmu, MMU_TYPE_OEA, moea_methods, 0); 373 374static __inline uint32_t 375moea_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 376{ 377 uint32_t pte_lo; 378 int i; 379 380 if (ma != VM_MEMATTR_DEFAULT) { 381 switch (ma) { 382 case VM_MEMATTR_UNCACHEABLE: 383 return (PTE_I | PTE_G); 384 case VM_MEMATTR_WRITE_COMBINING: 385 case VM_MEMATTR_WRITE_BACK: 386 case VM_MEMATTR_PREFETCHABLE: 387 return (PTE_I); 388 case VM_MEMATTR_WRITE_THROUGH: 389 return (PTE_W | PTE_M); 390 } 391 } 392 393 /* 394 * Assume the page is cache inhibited and access is guarded unless 395 * it's in our available memory array. 396 */ 397 pte_lo = PTE_I | PTE_G; 398 for (i = 0; i < pregions_sz; i++) { 399 if ((pa >= pregions[i].mr_start) && 400 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 401 pte_lo = PTE_M; 402 break; 403 } 404 } 405 406 return pte_lo; 407} 408 409static void 410tlbie(vm_offset_t va) 411{ 412 413 mtx_lock_spin(&tlbie_mtx); 414 __asm __volatile("ptesync"); 415 __asm __volatile("tlbie %0" :: "r"(va)); 416 __asm __volatile("eieio; tlbsync; ptesync"); 417 mtx_unlock_spin(&tlbie_mtx); 418} 419 420static void 421tlbia(void) 422{ 423 vm_offset_t va; 424 425 for (va = 0; va < 0x00040000; va += 0x00001000) { 426 __asm __volatile("tlbie %0" :: "r"(va)); 427 powerpc_sync(); 428 } 429 __asm __volatile("tlbsync"); 430 powerpc_sync(); 431} 432 433static __inline int 434va_to_sr(u_int *sr, vm_offset_t va) 435{ 436 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 437} 438 439static __inline u_int 440va_to_pteg(u_int sr, vm_offset_t addr) 441{ 442 u_int hash; 443 444 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 445 ADDR_PIDX_SHFT); 446 return (hash & moea_pteg_mask); 447} 448 449static __inline struct pvo_head * 450vm_page_to_pvoh(vm_page_t m) 451{ 452 453 return (&m->md.mdpg_pvoh); 454} 455 456static __inline void 457moea_attr_clear(vm_page_t m, int ptebit) 458{ 459 460 rw_assert(&pvh_global_lock, RA_WLOCKED); 461 m->md.mdpg_attrs &= ~ptebit; 462} 463 464static __inline int 465moea_attr_fetch(vm_page_t m) 466{ 467 468 return (m->md.mdpg_attrs); 469} 470 471static __inline void 472moea_attr_save(vm_page_t m, int ptebit) 473{ 474 475 rw_assert(&pvh_global_lock, RA_WLOCKED); 476 m->md.mdpg_attrs |= ptebit; 477} 478 479static __inline int 480moea_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 481{ 482 if (pt->pte_hi == pvo_pt->pte_hi) 483 return (1); 484 485 return (0); 486} 487 488static __inline int 489moea_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 490{ 491 return (pt->pte_hi & ~PTE_VALID) == 492 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 493 ((va >> ADDR_API_SHFT) & PTE_API) | which); 494} 495 496static __inline void 497moea_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 498{ 499 500 mtx_assert(&moea_table_mutex, MA_OWNED); 501 502 /* 503 * Construct a PTE. Default to IMB initially. Valid bit only gets 504 * set when the real pte is set in memory. 505 * 506 * Note: Don't set the valid bit for correct operation of tlb update. 507 */ 508 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 509 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 510 pt->pte_lo = pte_lo; 511} 512 513static __inline void 514moea_pte_synch(struct pte *pt, struct pte *pvo_pt) 515{ 516 517 mtx_assert(&moea_table_mutex, MA_OWNED); 518 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 519} 520 521static __inline void 522moea_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 523{ 524 525 mtx_assert(&moea_table_mutex, MA_OWNED); 526 527 /* 528 * As shown in Section 7.6.3.2.3 529 */ 530 pt->pte_lo &= ~ptebit; 531 tlbie(va); 532} 533 534static __inline void 535moea_pte_set(struct pte *pt, struct pte *pvo_pt) 536{ 537 538 mtx_assert(&moea_table_mutex, MA_OWNED); 539 pvo_pt->pte_hi |= PTE_VALID; 540 541 /* 542 * Update the PTE as defined in section 7.6.3.1. 543 * Note that the REF/CHG bits are from pvo_pt and thus should have 544 * been saved so this routine can restore them (if desired). 545 */ 546 pt->pte_lo = pvo_pt->pte_lo; 547 powerpc_sync(); 548 pt->pte_hi = pvo_pt->pte_hi; 549 powerpc_sync(); 550 moea_pte_valid++; 551} 552 553static __inline void 554moea_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 555{ 556 557 mtx_assert(&moea_table_mutex, MA_OWNED); 558 pvo_pt->pte_hi &= ~PTE_VALID; 559 560 /* 561 * Force the reg & chg bits back into the PTEs. 562 */ 563 powerpc_sync(); 564 565 /* 566 * Invalidate the pte. 567 */ 568 pt->pte_hi &= ~PTE_VALID; 569 570 tlbie(va); 571 572 /* 573 * Save the reg & chg bits. 574 */ 575 moea_pte_synch(pt, pvo_pt); 576 moea_pte_valid--; 577} 578 579static __inline void 580moea_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 581{ 582 583 /* 584 * Invalidate the PTE 585 */ 586 moea_pte_unset(pt, pvo_pt, va); 587 moea_pte_set(pt, pvo_pt); 588} 589 590/* 591 * Quick sort callout for comparing memory regions. 592 */ 593static int om_cmp(const void *a, const void *b); 594 595static int 596om_cmp(const void *a, const void *b) 597{ 598 const struct ofw_map *mapa; 599 const struct ofw_map *mapb; 600 601 mapa = a; 602 mapb = b; 603 if (mapa->om_pa < mapb->om_pa) 604 return (-1); 605 else if (mapa->om_pa > mapb->om_pa) 606 return (1); 607 else 608 return (0); 609} 610 611void 612moea_cpu_bootstrap(mmu_t mmup, int ap) 613{ 614 u_int sdr; 615 int i; 616 617 if (ap) { 618 powerpc_sync(); 619 __asm __volatile("mtdbatu 0,%0" :: "r"(battable[0].batu)); 620 __asm __volatile("mtdbatl 0,%0" :: "r"(battable[0].batl)); 621 isync(); 622 __asm __volatile("mtibatu 0,%0" :: "r"(battable[0].batu)); 623 __asm __volatile("mtibatl 0,%0" :: "r"(battable[0].batl)); 624 isync(); 625 } 626 627#ifdef WII 628 /* 629 * Special case for the Wii: don't install the PCI BAT. 630 */ 631 if (strcmp(installed_platform(), "wii") != 0) { 632#endif 633 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 634 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 635#ifdef WII 636 } 637#endif 638 isync(); 639 640 __asm __volatile("mtibatu 1,%0" :: "r"(0)); 641 __asm __volatile("mtdbatu 2,%0" :: "r"(0)); 642 __asm __volatile("mtibatu 2,%0" :: "r"(0)); 643 __asm __volatile("mtdbatu 3,%0" :: "r"(0)); 644 __asm __volatile("mtibatu 3,%0" :: "r"(0)); 645 isync(); 646 647 for (i = 0; i < 16; i++) 648 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 649 powerpc_sync(); 650 651 sdr = (u_int)moea_pteg_table | (moea_pteg_mask >> 10); 652 __asm __volatile("mtsdr1 %0" :: "r"(sdr)); 653 isync(); 654 655 tlbia(); 656} 657 658void 659moea_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 660{ 661 ihandle_t mmui; 662 phandle_t chosen, mmu; 663 int sz; 664 int i, j; 665 vm_size_t size, physsz, hwphyssz; 666 vm_offset_t pa, va, off; 667 void *dpcpu; 668 register_t msr; 669 670 /* 671 * Set up BAT0 to map the lowest 256 MB area 672 */ 673 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 674 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 675 676 /* 677 * Map PCI memory space. 678 */ 679 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 680 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 681 682 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 683 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 684 685 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 686 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 687 688 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 689 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 690 691 /* 692 * Map obio devices. 693 */ 694 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 695 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 696 697 /* 698 * Use an IBAT and a DBAT to map the bottom segment of memory 699 * where we are. Turn off instruction relocation temporarily 700 * to prevent faults while reprogramming the IBAT. 701 */ 702 msr = mfmsr(); 703 mtmsr(msr & ~PSL_IR); 704 __asm (".balign 32; \n" 705 "mtibatu 0,%0; mtibatl 0,%1; isync; \n" 706 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 707 :: "r"(battable[0].batu), "r"(battable[0].batl)); 708 mtmsr(msr); 709 710#ifdef WII 711 if (strcmp(installed_platform(), "wii") != 0) { 712#endif 713 /* map pci space */ 714 __asm __volatile("mtdbatu 1,%0" :: "r"(battable[8].batu)); 715 __asm __volatile("mtdbatl 1,%0" :: "r"(battable[8].batl)); 716#ifdef WII 717 } 718#endif 719 isync(); 720 721 /* set global direct map flag */ 722 hw_direct_map = 1; 723 724 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 725 CTR0(KTR_PMAP, "moea_bootstrap: physical memory"); 726 727 for (i = 0; i < pregions_sz; i++) { 728 vm_offset_t pa; 729 vm_offset_t end; 730 731 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 732 pregions[i].mr_start, 733 pregions[i].mr_start + pregions[i].mr_size, 734 pregions[i].mr_size); 735 /* 736 * Install entries into the BAT table to allow all 737 * of physmem to be convered by on-demand BAT entries. 738 * The loop will sometimes set the same battable element 739 * twice, but that's fine since they won't be used for 740 * a while yet. 741 */ 742 pa = pregions[i].mr_start & 0xf0000000; 743 end = pregions[i].mr_start + pregions[i].mr_size; 744 do { 745 u_int n = pa >> ADDR_SR_SHFT; 746 747 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 748 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 749 pa += SEGMENT_LENGTH; 750 } while (pa < end); 751 } 752 753 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 754 panic("moea_bootstrap: phys_avail too small"); 755 756 phys_avail_count = 0; 757 physsz = 0; 758 hwphyssz = 0; 759 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 760 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 761 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 762 regions[i].mr_start + regions[i].mr_size, 763 regions[i].mr_size); 764 if (hwphyssz != 0 && 765 (physsz + regions[i].mr_size) >= hwphyssz) { 766 if (physsz < hwphyssz) { 767 phys_avail[j] = regions[i].mr_start; 768 phys_avail[j + 1] = regions[i].mr_start + 769 hwphyssz - physsz; 770 physsz = hwphyssz; 771 phys_avail_count++; 772 } 773 break; 774 } 775 phys_avail[j] = regions[i].mr_start; 776 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 777 phys_avail_count++; 778 physsz += regions[i].mr_size; 779 } 780 781 /* Check for overlap with the kernel and exception vectors */ 782 for (j = 0; j < 2*phys_avail_count; j+=2) { 783 if (phys_avail[j] < EXC_LAST) 784 phys_avail[j] += EXC_LAST; 785 786 if (kernelstart >= phys_avail[j] && 787 kernelstart < phys_avail[j+1]) { 788 if (kernelend < phys_avail[j+1]) { 789 phys_avail[2*phys_avail_count] = 790 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 791 phys_avail[2*phys_avail_count + 1] = 792 phys_avail[j+1]; 793 phys_avail_count++; 794 } 795 796 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 797 } 798 799 if (kernelend >= phys_avail[j] && 800 kernelend < phys_avail[j+1]) { 801 if (kernelstart > phys_avail[j]) { 802 phys_avail[2*phys_avail_count] = phys_avail[j]; 803 phys_avail[2*phys_avail_count + 1] = 804 kernelstart & ~PAGE_MASK; 805 phys_avail_count++; 806 } 807 808 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 809 } 810 } 811 812 physmem = btoc(physsz); 813 814 /* 815 * Allocate PTEG table. 816 */ 817#ifdef PTEGCOUNT 818 moea_pteg_count = PTEGCOUNT; 819#else 820 moea_pteg_count = 0x1000; 821 822 while (moea_pteg_count < physmem) 823 moea_pteg_count <<= 1; 824 825 moea_pteg_count >>= 1; 826#endif /* PTEGCOUNT */ 827 828 size = moea_pteg_count * sizeof(struct pteg); 829 CTR2(KTR_PMAP, "moea_bootstrap: %d PTEGs, %d bytes", moea_pteg_count, 830 size); 831 moea_pteg_table = (struct pteg *)moea_bootstrap_alloc(size, size); 832 CTR1(KTR_PMAP, "moea_bootstrap: PTEG table at %p", moea_pteg_table); 833 bzero((void *)moea_pteg_table, moea_pteg_count * sizeof(struct pteg)); 834 moea_pteg_mask = moea_pteg_count - 1; 835 836 /* 837 * Allocate pv/overflow lists. 838 */ 839 size = sizeof(struct pvo_head) * moea_pteg_count; 840 moea_pvo_table = (struct pvo_head *)moea_bootstrap_alloc(size, 841 PAGE_SIZE); 842 CTR1(KTR_PMAP, "moea_bootstrap: PVO table at %p", moea_pvo_table); 843 for (i = 0; i < moea_pteg_count; i++) 844 LIST_INIT(&moea_pvo_table[i]); 845 846 /* 847 * Initialize the lock that synchronizes access to the pteg and pvo 848 * tables. 849 */ 850 mtx_init(&moea_table_mutex, "pmap table", NULL, MTX_DEF | 851 MTX_RECURSE); 852 mtx_init(&moea_vsid_mutex, "VSID table", NULL, MTX_DEF); 853 854 mtx_init(&tlbie_mtx, "tlbie", NULL, MTX_SPIN); 855 856 /* 857 * Initialise the unmanaged pvo pool. 858 */ 859 moea_bpvo_pool = (struct pvo_entry *)moea_bootstrap_alloc( 860 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 861 moea_bpvo_pool_index = 0; 862 863 /* 864 * Make sure kernel vsid is allocated as well as VSID 0. 865 */ 866 moea_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 867 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 868 moea_vsid_bitmap[0] |= 1; 869 870 /* 871 * Initialize the kernel pmap (which is statically allocated). 872 */ 873 PMAP_LOCK_INIT(kernel_pmap); 874 for (i = 0; i < 16; i++) 875 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 876 CPU_FILL(&kernel_pmap->pm_active); 877 RB_INIT(&kernel_pmap->pmap_pvo); 878 879 /* 880 * Initialize the global pv list lock. 881 */ 882 rw_init(&pvh_global_lock, "pmap pv global"); 883 884 /* 885 * Set up the Open Firmware mappings 886 */ 887 chosen = OF_finddevice("/chosen"); 888 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1 && 889 (mmu = OF_instance_to_package(mmui)) != -1 && 890 (sz = OF_getproplen(mmu, "translations")) != -1) { 891 translations = NULL; 892 for (i = 0; phys_avail[i] != 0; i += 2) { 893 if (phys_avail[i + 1] >= sz) { 894 translations = (struct ofw_map *)phys_avail[i]; 895 break; 896 } 897 } 898 if (translations == NULL) 899 panic("moea_bootstrap: no space to copy translations"); 900 bzero(translations, sz); 901 if (OF_getprop(mmu, "translations", translations, sz) == -1) 902 panic("moea_bootstrap: can't get ofw translations"); 903 CTR0(KTR_PMAP, "moea_bootstrap: translations"); 904 sz /= sizeof(*translations); 905 qsort(translations, sz, sizeof (*translations), om_cmp); 906 for (i = 0; i < sz; i++) { 907 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 908 translations[i].om_pa, translations[i].om_va, 909 translations[i].om_len); 910 911 /* 912 * If the mapping is 1:1, let the RAM and device 913 * on-demand BAT tables take care of the translation. 914 */ 915 if (translations[i].om_va == translations[i].om_pa) 916 continue; 917 918 /* Enter the pages */ 919 for (off = 0; off < translations[i].om_len; 920 off += PAGE_SIZE) 921 moea_kenter(mmup, translations[i].om_va + off, 922 translations[i].om_pa + off); 923 } 924 } 925 926 /* 927 * Calculate the last available physical address. 928 */ 929 for (i = 0; phys_avail[i + 2] != 0; i += 2) 930 ; 931 Maxmem = powerpc_btop(phys_avail[i + 1]); 932 933 moea_cpu_bootstrap(mmup,0); 934 935 pmap_bootstrapped++; 936 937 /* 938 * Set the start and end of kva. 939 */ 940 virtual_avail = VM_MIN_KERNEL_ADDRESS; 941 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 942 943 /* 944 * Allocate a kernel stack with a guard page for thread0 and map it 945 * into the kernel page map. 946 */ 947 pa = moea_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 948 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 949 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 950 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 951 thread0.td_kstack = va; 952 thread0.td_kstack_pages = KSTACK_PAGES; 953 for (i = 0; i < KSTACK_PAGES; i++) { 954 moea_kenter(mmup, va, pa); 955 pa += PAGE_SIZE; 956 va += PAGE_SIZE; 957 } 958 959 /* 960 * Allocate virtual address space for the message buffer. 961 */ 962 pa = msgbuf_phys = moea_bootstrap_alloc(msgbufsize, PAGE_SIZE); 963 msgbufp = (struct msgbuf *)virtual_avail; 964 va = virtual_avail; 965 virtual_avail += round_page(msgbufsize); 966 while (va < virtual_avail) { 967 moea_kenter(mmup, va, pa); 968 pa += PAGE_SIZE; 969 va += PAGE_SIZE; 970 } 971 972 /* 973 * Allocate virtual address space for the dynamic percpu area. 974 */ 975 pa = moea_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 976 dpcpu = (void *)virtual_avail; 977 va = virtual_avail; 978 virtual_avail += DPCPU_SIZE; 979 while (va < virtual_avail) { 980 moea_kenter(mmup, va, pa); 981 pa += PAGE_SIZE; 982 va += PAGE_SIZE; 983 } 984 dpcpu_init(dpcpu, 0); 985} 986 987/* 988 * Activate a user pmap. The pmap must be activated before it's address 989 * space can be accessed in any way. 990 */ 991void 992moea_activate(mmu_t mmu, struct thread *td) 993{ 994 pmap_t pm, pmr; 995 996 /* 997 * Load all the data we need up front to encourage the compiler to 998 * not issue any loads while we have interrupts disabled below. 999 */ 1000 pm = &td->td_proc->p_vmspace->vm_pmap; 1001 pmr = pm->pmap_phys; 1002 1003 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 1004 PCPU_SET(curpmap, pmr); 1005} 1006 1007void 1008moea_deactivate(mmu_t mmu, struct thread *td) 1009{ 1010 pmap_t pm; 1011 1012 pm = &td->td_proc->p_vmspace->vm_pmap; 1013 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1014 PCPU_SET(curpmap, NULL); 1015} 1016 1017void 1018moea_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1019{ 1020 struct pvo_entry key, *pvo; 1021 1022 PMAP_LOCK(pm); 1023 key.pvo_vaddr = sva; 1024 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1025 pvo != NULL && PVO_VADDR(pvo) < eva; 1026 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 1027 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1028 panic("moea_unwire: pvo %p is missing PVO_WIRED", pvo); 1029 pvo->pvo_vaddr &= ~PVO_WIRED; 1030 pm->pm_stats.wired_count--; 1031 } 1032 PMAP_UNLOCK(pm); 1033} 1034 1035void 1036moea_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1037{ 1038 vm_offset_t dst; 1039 vm_offset_t src; 1040 1041 dst = VM_PAGE_TO_PHYS(mdst); 1042 src = VM_PAGE_TO_PHYS(msrc); 1043 1044 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1045} 1046 1047void 1048moea_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1049 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1050{ 1051 void *a_cp, *b_cp; 1052 vm_offset_t a_pg_offset, b_pg_offset; 1053 int cnt; 1054 1055 while (xfersize > 0) { 1056 a_pg_offset = a_offset & PAGE_MASK; 1057 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1058 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1059 a_pg_offset; 1060 b_pg_offset = b_offset & PAGE_MASK; 1061 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1062 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1063 b_pg_offset; 1064 bcopy(a_cp, b_cp, cnt); 1065 a_offset += cnt; 1066 b_offset += cnt; 1067 xfersize -= cnt; 1068 } 1069} 1070 1071/* 1072 * Zero a page of physical memory by temporarily mapping it into the tlb. 1073 */ 1074void 1075moea_zero_page(mmu_t mmu, vm_page_t m) 1076{ 1077 vm_offset_t off, pa = VM_PAGE_TO_PHYS(m); 1078 1079 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1080 __asm __volatile("dcbz 0,%0" :: "r"(pa + off)); 1081} 1082 1083void 1084moea_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1085{ 1086 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1087 void *va = (void *)(pa + off); 1088 1089 bzero(va, size); 1090} 1091 1092void 1093moea_zero_page_idle(mmu_t mmu, vm_page_t m) 1094{ 1095 1096 moea_zero_page(mmu, m); 1097} 1098 1099/* 1100 * Map the given physical page at the specified virtual address in the 1101 * target pmap with the protection requested. If specified the page 1102 * will be wired down. 1103 */ 1104int 1105moea_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1106 u_int flags, int8_t psind) 1107{ 1108 int error; 1109 1110 for (;;) { 1111 rw_wlock(&pvh_global_lock); 1112 PMAP_LOCK(pmap); 1113 error = moea_enter_locked(pmap, va, m, prot, flags, psind); 1114 rw_wunlock(&pvh_global_lock); 1115 PMAP_UNLOCK(pmap); 1116 if (error != ENOMEM) 1117 return (KERN_SUCCESS); 1118 if ((flags & PMAP_ENTER_NOSLEEP) != 0) 1119 return (KERN_RESOURCE_SHORTAGE); 1120 VM_OBJECT_ASSERT_UNLOCKED(m->object); 1121 VM_WAIT; 1122 } 1123} 1124 1125/* 1126 * Map the given physical page at the specified virtual address in the 1127 * target pmap with the protection requested. If specified the page 1128 * will be wired down. 1129 * 1130 * The page queues and pmap must be locked. 1131 */ 1132static int 1133moea_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1134 u_int flags, int8_t psind __unused) 1135{ 1136 struct pvo_head *pvo_head; 1137 uma_zone_t zone; 1138 vm_page_t pg; 1139 u_int pte_lo, pvo_flags; 1140 int error; 1141 1142 if (!moea_initialized) { 1143 pvo_head = &moea_pvo_kunmanaged; 1144 zone = moea_upvo_zone; 1145 pvo_flags = 0; 1146 pg = NULL; 1147 } else { 1148 pvo_head = vm_page_to_pvoh(m); 1149 pg = m; 1150 zone = moea_mpvo_zone; 1151 pvo_flags = PVO_MANAGED; 1152 } 1153 if (pmap_bootstrapped) 1154 rw_assert(&pvh_global_lock, RA_WLOCKED); 1155 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1156 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1157 VM_OBJECT_ASSERT_LOCKED(m->object); 1158 1159 /* XXX change the pvo head for fake pages */ 1160 if ((m->oflags & VPO_UNMANAGED) != 0) { 1161 pvo_flags &= ~PVO_MANAGED; 1162 pvo_head = &moea_pvo_kunmanaged; 1163 zone = moea_upvo_zone; 1164 } 1165 1166 pte_lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1167 1168 if (prot & VM_PROT_WRITE) { 1169 pte_lo |= PTE_BW; 1170 if (pmap_bootstrapped && 1171 (m->oflags & VPO_UNMANAGED) == 0) 1172 vm_page_aflag_set(m, PGA_WRITEABLE); 1173 } else 1174 pte_lo |= PTE_BR; 1175 1176 if ((flags & PMAP_ENTER_WIRED) != 0) 1177 pvo_flags |= PVO_WIRED; 1178 1179 error = moea_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1180 pte_lo, pvo_flags); 1181 1182 /* 1183 * Flush the real page from the instruction cache. This has be done 1184 * for all user mappings to prevent information leakage via the 1185 * instruction cache. moea_pvo_enter() returns ENOENT for the first 1186 * mapping for a page. 1187 */ 1188 if (pmap != kernel_pmap && error == ENOENT && 1189 (pte_lo & (PTE_I | PTE_G)) == 0) 1190 moea_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1191 1192 return (error); 1193} 1194 1195/* 1196 * Maps a sequence of resident pages belonging to the same object. 1197 * The sequence begins with the given page m_start. This page is 1198 * mapped at the given virtual address start. Each subsequent page is 1199 * mapped at a virtual address that is offset from start by the same 1200 * amount as the page is offset from m_start within the object. The 1201 * last page in the sequence is the page with the largest offset from 1202 * m_start that can be mapped at a virtual address less than the given 1203 * virtual address end. Not every virtual page between start and end 1204 * is mapped; only those for which a resident page exists with the 1205 * corresponding offset from m_start are mapped. 1206 */ 1207void 1208moea_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1209 vm_page_t m_start, vm_prot_t prot) 1210{ 1211 vm_page_t m; 1212 vm_pindex_t diff, psize; 1213 1214 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1215 1216 psize = atop(end - start); 1217 m = m_start; 1218 rw_wlock(&pvh_global_lock); 1219 PMAP_LOCK(pm); 1220 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1221 moea_enter_locked(pm, start + ptoa(diff), m, prot & 1222 (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); 1223 m = TAILQ_NEXT(m, listq); 1224 } 1225 rw_wunlock(&pvh_global_lock); 1226 PMAP_UNLOCK(pm); 1227} 1228 1229void 1230moea_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1231 vm_prot_t prot) 1232{ 1233 1234 rw_wlock(&pvh_global_lock); 1235 PMAP_LOCK(pm); 1236 moea_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1237 0, 0); 1238 rw_wunlock(&pvh_global_lock); 1239 PMAP_UNLOCK(pm); 1240} 1241 1242vm_paddr_t 1243moea_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1244{ 1245 struct pvo_entry *pvo; 1246 vm_paddr_t pa; 1247 1248 PMAP_LOCK(pm); 1249 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1250 if (pvo == NULL) 1251 pa = 0; 1252 else 1253 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1254 PMAP_UNLOCK(pm); 1255 return (pa); 1256} 1257 1258/* 1259 * Atomically extract and hold the physical page with the given 1260 * pmap and virtual address pair if that mapping permits the given 1261 * protection. 1262 */ 1263vm_page_t 1264moea_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1265{ 1266 struct pvo_entry *pvo; 1267 vm_page_t m; 1268 vm_paddr_t pa; 1269 1270 m = NULL; 1271 pa = 0; 1272 PMAP_LOCK(pmap); 1273retry: 1274 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1275 if (pvo != NULL && (pvo->pvo_pte.pte.pte_hi & PTE_VALID) && 1276 ((pvo->pvo_pte.pte.pte_lo & PTE_PP) == PTE_RW || 1277 (prot & VM_PROT_WRITE) == 0)) { 1278 if (vm_page_pa_tryrelock(pmap, pvo->pvo_pte.pte.pte_lo & PTE_RPGN, &pa)) 1279 goto retry; 1280 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 1281 vm_page_hold(m); 1282 } 1283 PA_UNLOCK_COND(pa); 1284 PMAP_UNLOCK(pmap); 1285 return (m); 1286} 1287 1288void 1289moea_init(mmu_t mmu) 1290{ 1291 1292 moea_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1293 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1294 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1295 moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1296 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1297 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1298 moea_initialized = TRUE; 1299} 1300 1301boolean_t 1302moea_is_referenced(mmu_t mmu, vm_page_t m) 1303{ 1304 boolean_t rv; 1305 1306 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1307 ("moea_is_referenced: page %p is not managed", m)); 1308 rw_wlock(&pvh_global_lock); 1309 rv = moea_query_bit(m, PTE_REF); 1310 rw_wunlock(&pvh_global_lock); 1311 return (rv); 1312} 1313 1314boolean_t 1315moea_is_modified(mmu_t mmu, vm_page_t m) 1316{ 1317 boolean_t rv; 1318 1319 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1320 ("moea_is_modified: page %p is not managed", m)); 1321 1322 /* 1323 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1324 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1325 * is clear, no PTEs can have PTE_CHG set. 1326 */ 1327 VM_OBJECT_ASSERT_WLOCKED(m->object); 1328 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1329 return (FALSE); 1330 rw_wlock(&pvh_global_lock); 1331 rv = moea_query_bit(m, PTE_CHG); 1332 rw_wunlock(&pvh_global_lock); 1333 return (rv); 1334} 1335 1336boolean_t 1337moea_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1338{ 1339 struct pvo_entry *pvo; 1340 boolean_t rv; 1341 1342 PMAP_LOCK(pmap); 1343 pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1344 rv = pvo == NULL || (pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0; 1345 PMAP_UNLOCK(pmap); 1346 return (rv); 1347} 1348 1349void 1350moea_clear_modify(mmu_t mmu, vm_page_t m) 1351{ 1352 1353 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1354 ("moea_clear_modify: page %p is not managed", m)); 1355 VM_OBJECT_ASSERT_WLOCKED(m->object); 1356 KASSERT(!vm_page_xbusied(m), 1357 ("moea_clear_modify: page %p is exclusive busy", m)); 1358 1359 /* 1360 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_CHG 1361 * set. If the object containing the page is locked and the page is 1362 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1363 */ 1364 if ((m->aflags & PGA_WRITEABLE) == 0) 1365 return; 1366 rw_wlock(&pvh_global_lock); 1367 moea_clear_bit(m, PTE_CHG); 1368 rw_wunlock(&pvh_global_lock); 1369} 1370 1371/* 1372 * Clear the write and modified bits in each of the given page's mappings. 1373 */ 1374void 1375moea_remove_write(mmu_t mmu, vm_page_t m) 1376{ 1377 struct pvo_entry *pvo; 1378 struct pte *pt; 1379 pmap_t pmap; 1380 u_int lo; 1381 1382 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1383 ("moea_remove_write: page %p is not managed", m)); 1384 1385 /* 1386 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1387 * set by another thread while the object is locked. Thus, 1388 * if PGA_WRITEABLE is clear, no page table entries need updating. 1389 */ 1390 VM_OBJECT_ASSERT_WLOCKED(m->object); 1391 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1392 return; 1393 rw_wlock(&pvh_global_lock); 1394 lo = moea_attr_fetch(m); 1395 powerpc_sync(); 1396 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1397 pmap = pvo->pvo_pmap; 1398 PMAP_LOCK(pmap); 1399 if ((pvo->pvo_pte.pte.pte_lo & PTE_PP) != PTE_BR) { 1400 pt = moea_pvo_to_pte(pvo, -1); 1401 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1402 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1403 if (pt != NULL) { 1404 moea_pte_synch(pt, &pvo->pvo_pte.pte); 1405 lo |= pvo->pvo_pte.pte.pte_lo; 1406 pvo->pvo_pte.pte.pte_lo &= ~PTE_CHG; 1407 moea_pte_change(pt, &pvo->pvo_pte.pte, 1408 pvo->pvo_vaddr); 1409 mtx_unlock(&moea_table_mutex); 1410 } 1411 } 1412 PMAP_UNLOCK(pmap); 1413 } 1414 if ((lo & PTE_CHG) != 0) { 1415 moea_attr_clear(m, PTE_CHG); 1416 vm_page_dirty(m); 1417 } 1418 vm_page_aflag_clear(m, PGA_WRITEABLE); 1419 rw_wunlock(&pvh_global_lock); 1420} 1421 1422/* 1423 * moea_ts_referenced: 1424 * 1425 * Return a count of reference bits for a page, clearing those bits. 1426 * It is not necessary for every reference bit to be cleared, but it 1427 * is necessary that 0 only be returned when there are truly no 1428 * reference bits set. 1429 * 1430 * XXX: The exact number of bits to check and clear is a matter that 1431 * should be tested and standardized at some point in the future for 1432 * optimal aging of shared pages. 1433 */ 1434int 1435moea_ts_referenced(mmu_t mmu, vm_page_t m) 1436{ 1437 int count; 1438 1439 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1440 ("moea_ts_referenced: page %p is not managed", m)); 1441 rw_wlock(&pvh_global_lock); 1442 count = moea_clear_bit(m, PTE_REF); 1443 rw_wunlock(&pvh_global_lock); 1444 return (count); 1445} 1446 1447/* 1448 * Modify the WIMG settings of all mappings for a page. 1449 */ 1450void 1451moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1452{ 1453 struct pvo_entry *pvo; 1454 struct pvo_head *pvo_head; 1455 struct pte *pt; 1456 pmap_t pmap; 1457 u_int lo; 1458 1459 if ((m->oflags & VPO_UNMANAGED) != 0) { 1460 m->md.mdpg_cache_attrs = ma; 1461 return; 1462 } 1463 1464 rw_wlock(&pvh_global_lock); 1465 pvo_head = vm_page_to_pvoh(m); 1466 lo = moea_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1467 1468 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1469 pmap = pvo->pvo_pmap; 1470 PMAP_LOCK(pmap); 1471 pt = moea_pvo_to_pte(pvo, -1); 1472 pvo->pvo_pte.pte.pte_lo &= ~PTE_WIMG; 1473 pvo->pvo_pte.pte.pte_lo |= lo; 1474 if (pt != NULL) { 1475 moea_pte_change(pt, &pvo->pvo_pte.pte, 1476 pvo->pvo_vaddr); 1477 if (pvo->pvo_pmap == kernel_pmap) 1478 isync(); 1479 } 1480 mtx_unlock(&moea_table_mutex); 1481 PMAP_UNLOCK(pmap); 1482 } 1483 m->md.mdpg_cache_attrs = ma; 1484 rw_wunlock(&pvh_global_lock); 1485} 1486 1487/* 1488 * Map a wired page into kernel virtual address space. 1489 */ 1490void 1491moea_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1492{ 1493 1494 moea_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1495} 1496 1497void 1498moea_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1499{ 1500 u_int pte_lo; 1501 int error; 1502 1503#if 0 1504 if (va < VM_MIN_KERNEL_ADDRESS) 1505 panic("moea_kenter: attempt to enter non-kernel address %#x", 1506 va); 1507#endif 1508 1509 pte_lo = moea_calc_wimg(pa, ma); 1510 1511 PMAP_LOCK(kernel_pmap); 1512 error = moea_pvo_enter(kernel_pmap, moea_upvo_zone, 1513 &moea_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1514 1515 if (error != 0 && error != ENOENT) 1516 panic("moea_kenter: failed to enter va %#x pa %#x: %d", va, 1517 pa, error); 1518 1519 PMAP_UNLOCK(kernel_pmap); 1520} 1521 1522/* 1523 * Extract the physical page address associated with the given kernel virtual 1524 * address. 1525 */ 1526vm_paddr_t 1527moea_kextract(mmu_t mmu, vm_offset_t va) 1528{ 1529 struct pvo_entry *pvo; 1530 vm_paddr_t pa; 1531 1532 /* 1533 * Allow direct mappings on 32-bit OEA 1534 */ 1535 if (va < VM_MIN_KERNEL_ADDRESS) { 1536 return (va); 1537 } 1538 1539 PMAP_LOCK(kernel_pmap); 1540 pvo = moea_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1541 KASSERT(pvo != NULL, ("moea_kextract: no addr found")); 1542 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1543 PMAP_UNLOCK(kernel_pmap); 1544 return (pa); 1545} 1546 1547/* 1548 * Remove a wired page from kernel virtual address space. 1549 */ 1550void 1551moea_kremove(mmu_t mmu, vm_offset_t va) 1552{ 1553 1554 moea_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1555} 1556 1557/* 1558 * Map a range of physical addresses into kernel virtual address space. 1559 * 1560 * The value passed in *virt is a suggested virtual address for the mapping. 1561 * Architectures which can support a direct-mapped physical to virtual region 1562 * can return the appropriate address within that region, leaving '*virt' 1563 * unchanged. We cannot and therefore do not; *virt is updated with the 1564 * first usable address after the mapped region. 1565 */ 1566vm_offset_t 1567moea_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1568 vm_paddr_t pa_end, int prot) 1569{ 1570 vm_offset_t sva, va; 1571 1572 sva = *virt; 1573 va = sva; 1574 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1575 moea_kenter(mmu, va, pa_start); 1576 *virt = va; 1577 return (sva); 1578} 1579 1580/* 1581 * Returns true if the pmap's pv is one of the first 1582 * 16 pvs linked to from this page. This count may 1583 * be changed upwards or downwards in the future; it 1584 * is only necessary that true be returned for a small 1585 * subset of pmaps for proper page aging. 1586 */ 1587boolean_t 1588moea_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1589{ 1590 int loops; 1591 struct pvo_entry *pvo; 1592 boolean_t rv; 1593 1594 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1595 ("moea_page_exists_quick: page %p is not managed", m)); 1596 loops = 0; 1597 rv = FALSE; 1598 rw_wlock(&pvh_global_lock); 1599 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1600 if (pvo->pvo_pmap == pmap) { 1601 rv = TRUE; 1602 break; 1603 } 1604 if (++loops >= 16) 1605 break; 1606 } 1607 rw_wunlock(&pvh_global_lock); 1608 return (rv); 1609} 1610 1611/* 1612 * Return the number of managed mappings to the given physical page 1613 * that are wired. 1614 */ 1615int 1616moea_page_wired_mappings(mmu_t mmu, vm_page_t m) 1617{ 1618 struct pvo_entry *pvo; 1619 int count; 1620 1621 count = 0; 1622 if ((m->oflags & VPO_UNMANAGED) != 0) 1623 return (count); 1624 rw_wlock(&pvh_global_lock); 1625 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1626 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1627 count++; 1628 rw_wunlock(&pvh_global_lock); 1629 return (count); 1630} 1631 1632static u_int moea_vsidcontext; 1633 1634void 1635moea_pinit(mmu_t mmu, pmap_t pmap) 1636{ 1637 int i, mask; 1638 u_int entropy; 1639 1640 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("moea_pinit: virt pmap")); 1641 RB_INIT(&pmap->pmap_pvo); 1642 1643 entropy = 0; 1644 __asm __volatile("mftb %0" : "=r"(entropy)); 1645 1646 if ((pmap->pmap_phys = (pmap_t)moea_kextract(mmu, (vm_offset_t)pmap)) 1647 == NULL) { 1648 pmap->pmap_phys = pmap; 1649 } 1650 1651 1652 mtx_lock(&moea_vsid_mutex); 1653 /* 1654 * Allocate some segment registers for this pmap. 1655 */ 1656 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1657 u_int hash, n; 1658 1659 /* 1660 * Create a new value by mutiplying by a prime and adding in 1661 * entropy from the timebase register. This is to make the 1662 * VSID more random so that the PT hash function collides 1663 * less often. (Note that the prime casues gcc to do shifts 1664 * instead of a multiply.) 1665 */ 1666 moea_vsidcontext = (moea_vsidcontext * 0x1105) + entropy; 1667 hash = moea_vsidcontext & (NPMAPS - 1); 1668 if (hash == 0) /* 0 is special, avoid it */ 1669 continue; 1670 n = hash >> 5; 1671 mask = 1 << (hash & (VSID_NBPW - 1)); 1672 hash = (moea_vsidcontext & 0xfffff); 1673 if (moea_vsid_bitmap[n] & mask) { /* collision? */ 1674 /* anything free in this bucket? */ 1675 if (moea_vsid_bitmap[n] == 0xffffffff) { 1676 entropy = (moea_vsidcontext >> 20); 1677 continue; 1678 } 1679 i = ffs(~moea_vsid_bitmap[n]) - 1; 1680 mask = 1 << i; 1681 hash &= 0xfffff & ~(VSID_NBPW - 1); 1682 hash |= i; 1683 } 1684 KASSERT(!(moea_vsid_bitmap[n] & mask), 1685 ("Allocating in-use VSID group %#x\n", hash)); 1686 moea_vsid_bitmap[n] |= mask; 1687 for (i = 0; i < 16; i++) 1688 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1689 mtx_unlock(&moea_vsid_mutex); 1690 return; 1691 } 1692 1693 mtx_unlock(&moea_vsid_mutex); 1694 panic("moea_pinit: out of segments"); 1695} 1696 1697/* 1698 * Initialize the pmap associated with process 0. 1699 */ 1700void 1701moea_pinit0(mmu_t mmu, pmap_t pm) 1702{ 1703 1704 PMAP_LOCK_INIT(pm); 1705 moea_pinit(mmu, pm); 1706 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1707} 1708 1709/* 1710 * Set the physical protection on the specified range of this map as requested. 1711 */ 1712void 1713moea_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1714 vm_prot_t prot) 1715{ 1716 struct pvo_entry *pvo, *tpvo, key; 1717 struct pte *pt; 1718 1719 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1720 ("moea_protect: non current pmap")); 1721 1722 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1723 moea_remove(mmu, pm, sva, eva); 1724 return; 1725 } 1726 1727 rw_wlock(&pvh_global_lock); 1728 PMAP_LOCK(pm); 1729 key.pvo_vaddr = sva; 1730 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1731 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1732 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1733 1734 /* 1735 * Grab the PTE pointer before we diddle with the cached PTE 1736 * copy. 1737 */ 1738 pt = moea_pvo_to_pte(pvo, -1); 1739 /* 1740 * Change the protection of the page. 1741 */ 1742 pvo->pvo_pte.pte.pte_lo &= ~PTE_PP; 1743 pvo->pvo_pte.pte.pte_lo |= PTE_BR; 1744 1745 /* 1746 * If the PVO is in the page table, update that pte as well. 1747 */ 1748 if (pt != NULL) { 1749 moea_pte_change(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 1750 mtx_unlock(&moea_table_mutex); 1751 } 1752 } 1753 rw_wunlock(&pvh_global_lock); 1754 PMAP_UNLOCK(pm); 1755} 1756 1757/* 1758 * Map a list of wired pages into kernel virtual address space. This is 1759 * intended for temporary mappings which do not need page modification or 1760 * references recorded. Existing mappings in the region are overwritten. 1761 */ 1762void 1763moea_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1764{ 1765 vm_offset_t va; 1766 1767 va = sva; 1768 while (count-- > 0) { 1769 moea_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1770 va += PAGE_SIZE; 1771 m++; 1772 } 1773} 1774 1775/* 1776 * Remove page mappings from kernel virtual address space. Intended for 1777 * temporary mappings entered by moea_qenter. 1778 */ 1779void 1780moea_qremove(mmu_t mmu, vm_offset_t sva, int count) 1781{ 1782 vm_offset_t va; 1783 1784 va = sva; 1785 while (count-- > 0) { 1786 moea_kremove(mmu, va); 1787 va += PAGE_SIZE; 1788 } 1789} 1790 1791void 1792moea_release(mmu_t mmu, pmap_t pmap) 1793{ 1794 int idx, mask; 1795 1796 /* 1797 * Free segment register's VSID 1798 */ 1799 if (pmap->pm_sr[0] == 0) 1800 panic("moea_release"); 1801 1802 mtx_lock(&moea_vsid_mutex); 1803 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1804 mask = 1 << (idx % VSID_NBPW); 1805 idx /= VSID_NBPW; 1806 moea_vsid_bitmap[idx] &= ~mask; 1807 mtx_unlock(&moea_vsid_mutex); 1808} 1809 1810/* 1811 * Remove the given range of addresses from the specified map. 1812 */ 1813void 1814moea_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1815{ 1816 struct pvo_entry *pvo, *tpvo, key; 1817 1818 rw_wlock(&pvh_global_lock); 1819 PMAP_LOCK(pm); 1820 key.pvo_vaddr = sva; 1821 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1822 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 1823 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 1824 moea_pvo_remove(pvo, -1); 1825 } 1826 PMAP_UNLOCK(pm); 1827 rw_wunlock(&pvh_global_lock); 1828} 1829 1830/* 1831 * Remove physical page from all pmaps in which it resides. moea_pvo_remove() 1832 * will reflect changes in pte's back to the vm_page. 1833 */ 1834void 1835moea_remove_all(mmu_t mmu, vm_page_t m) 1836{ 1837 struct pvo_head *pvo_head; 1838 struct pvo_entry *pvo, *next_pvo; 1839 pmap_t pmap; 1840 1841 rw_wlock(&pvh_global_lock); 1842 pvo_head = vm_page_to_pvoh(m); 1843 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1844 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1845 1846 pmap = pvo->pvo_pmap; 1847 PMAP_LOCK(pmap); 1848 moea_pvo_remove(pvo, -1); 1849 PMAP_UNLOCK(pmap); 1850 } 1851 if ((m->aflags & PGA_WRITEABLE) && moea_query_bit(m, PTE_CHG)) { 1852 moea_attr_clear(m, PTE_CHG); 1853 vm_page_dirty(m); 1854 } 1855 vm_page_aflag_clear(m, PGA_WRITEABLE); 1856 rw_wunlock(&pvh_global_lock); 1857} 1858 1859/* 1860 * Allocate a physical page of memory directly from the phys_avail map. 1861 * Can only be called from moea_bootstrap before avail start and end are 1862 * calculated. 1863 */ 1864static vm_offset_t 1865moea_bootstrap_alloc(vm_size_t size, u_int align) 1866{ 1867 vm_offset_t s, e; 1868 int i, j; 1869 1870 size = round_page(size); 1871 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1872 if (align != 0) 1873 s = (phys_avail[i] + align - 1) & ~(align - 1); 1874 else 1875 s = phys_avail[i]; 1876 e = s + size; 1877 1878 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1879 continue; 1880 1881 if (s == phys_avail[i]) { 1882 phys_avail[i] += size; 1883 } else if (e == phys_avail[i + 1]) { 1884 phys_avail[i + 1] -= size; 1885 } else { 1886 for (j = phys_avail_count * 2; j > i; j -= 2) { 1887 phys_avail[j] = phys_avail[j - 2]; 1888 phys_avail[j + 1] = phys_avail[j - 1]; 1889 } 1890 1891 phys_avail[i + 3] = phys_avail[i + 1]; 1892 phys_avail[i + 1] = s; 1893 phys_avail[i + 2] = e; 1894 phys_avail_count++; 1895 } 1896 1897 return (s); 1898 } 1899 panic("moea_bootstrap_alloc: could not allocate memory"); 1900} 1901 1902static void 1903moea_syncicache(vm_offset_t pa, vm_size_t len) 1904{ 1905 __syncicache((void *)pa, len); 1906} 1907 1908static int 1909moea_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1910 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1911{ 1912 struct pvo_entry *pvo; 1913 u_int sr; 1914 int first; 1915 u_int ptegidx; 1916 int i; 1917 int bootstrap; 1918 1919 moea_pvo_enter_calls++; 1920 first = 0; 1921 bootstrap = 0; 1922 1923 /* 1924 * Compute the PTE Group index. 1925 */ 1926 va &= ~ADDR_POFF; 1927 sr = va_to_sr(pm->pm_sr, va); 1928 ptegidx = va_to_pteg(sr, va); 1929 1930 /* 1931 * Remove any existing mapping for this page. Reuse the pvo entry if 1932 * there is a mapping. 1933 */ 1934 mtx_lock(&moea_table_mutex); 1935 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 1936 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1937 if ((pvo->pvo_pte.pte.pte_lo & PTE_RPGN) == pa && 1938 (pvo->pvo_pte.pte.pte_lo & PTE_PP) == 1939 (pte_lo & PTE_PP)) { 1940 /* 1941 * The PTE is not changing. Instead, this may 1942 * be a request to change the mapping's wired 1943 * attribute. 1944 */ 1945 mtx_unlock(&moea_table_mutex); 1946 if ((flags & PVO_WIRED) != 0 && 1947 (pvo->pvo_vaddr & PVO_WIRED) == 0) { 1948 pvo->pvo_vaddr |= PVO_WIRED; 1949 pm->pm_stats.wired_count++; 1950 } else if ((flags & PVO_WIRED) == 0 && 1951 (pvo->pvo_vaddr & PVO_WIRED) != 0) { 1952 pvo->pvo_vaddr &= ~PVO_WIRED; 1953 pm->pm_stats.wired_count--; 1954 } 1955 return (0); 1956 } 1957 moea_pvo_remove(pvo, -1); 1958 break; 1959 } 1960 } 1961 1962 /* 1963 * If we aren't overwriting a mapping, try to allocate. 1964 */ 1965 if (moea_initialized) { 1966 pvo = uma_zalloc(zone, M_NOWAIT); 1967 } else { 1968 if (moea_bpvo_pool_index >= BPVO_POOL_SIZE) { 1969 panic("moea_enter: bpvo pool exhausted, %d, %d, %d", 1970 moea_bpvo_pool_index, BPVO_POOL_SIZE, 1971 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1972 } 1973 pvo = &moea_bpvo_pool[moea_bpvo_pool_index]; 1974 moea_bpvo_pool_index++; 1975 bootstrap = 1; 1976 } 1977 1978 if (pvo == NULL) { 1979 mtx_unlock(&moea_table_mutex); 1980 return (ENOMEM); 1981 } 1982 1983 moea_pvo_entries++; 1984 pvo->pvo_vaddr = va; 1985 pvo->pvo_pmap = pm; 1986 LIST_INSERT_HEAD(&moea_pvo_table[ptegidx], pvo, pvo_olink); 1987 pvo->pvo_vaddr &= ~ADDR_POFF; 1988 if (flags & PVO_WIRED) 1989 pvo->pvo_vaddr |= PVO_WIRED; 1990 if (pvo_head != &moea_pvo_kunmanaged) 1991 pvo->pvo_vaddr |= PVO_MANAGED; 1992 if (bootstrap) 1993 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1994 1995 moea_pte_create(&pvo->pvo_pte.pte, sr, va, pa | pte_lo); 1996 1997 /* 1998 * Add to pmap list 1999 */ 2000 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2001 2002 /* 2003 * Remember if the list was empty and therefore will be the first 2004 * item. 2005 */ 2006 if (LIST_FIRST(pvo_head) == NULL) 2007 first = 1; 2008 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2009 2010 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2011 pm->pm_stats.wired_count++; 2012 pm->pm_stats.resident_count++; 2013 2014 i = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2015 KASSERT(i < 8, ("Invalid PTE index")); 2016 if (i >= 0) { 2017 PVO_PTEGIDX_SET(pvo, i); 2018 } else { 2019 panic("moea_pvo_enter: overflow"); 2020 moea_pte_overflow++; 2021 } 2022 mtx_unlock(&moea_table_mutex); 2023 2024 return (first ? ENOENT : 0); 2025} 2026 2027static void 2028moea_pvo_remove(struct pvo_entry *pvo, int pteidx) 2029{ 2030 struct pte *pt; 2031 2032 /* 2033 * If there is an active pte entry, we need to deactivate it (and 2034 * save the ref & cfg bits). 2035 */ 2036 pt = moea_pvo_to_pte(pvo, pteidx); 2037 if (pt != NULL) { 2038 moea_pte_unset(pt, &pvo->pvo_pte.pte, pvo->pvo_vaddr); 2039 mtx_unlock(&moea_table_mutex); 2040 PVO_PTEGIDX_CLR(pvo); 2041 } else { 2042 moea_pte_overflow--; 2043 } 2044 2045 /* 2046 * Update our statistics. 2047 */ 2048 pvo->pvo_pmap->pm_stats.resident_count--; 2049 if (pvo->pvo_pte.pte.pte_lo & PVO_WIRED) 2050 pvo->pvo_pmap->pm_stats.wired_count--; 2051 2052 /* 2053 * Save the REF/CHG bits into their cache if the page is managed. 2054 */ 2055 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 2056 struct vm_page *pg; 2057 2058 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte.pte_lo & PTE_RPGN); 2059 if (pg != NULL) { 2060 moea_attr_save(pg, pvo->pvo_pte.pte.pte_lo & 2061 (PTE_REF | PTE_CHG)); 2062 } 2063 } 2064 2065 /* 2066 * Remove this PVO from the PV and pmap lists. 2067 */ 2068 LIST_REMOVE(pvo, pvo_vlink); 2069 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2070 2071 /* 2072 * Remove this from the overflow list and return it to the pool 2073 * if we aren't going to reuse it. 2074 */ 2075 LIST_REMOVE(pvo, pvo_olink); 2076 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2077 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea_mpvo_zone : 2078 moea_upvo_zone, pvo); 2079 moea_pvo_entries--; 2080 moea_pvo_remove_calls++; 2081} 2082 2083static __inline int 2084moea_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2085{ 2086 int pteidx; 2087 2088 /* 2089 * We can find the actual pte entry without searching by grabbing 2090 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2091 * noticing the HID bit. 2092 */ 2093 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2094 if (pvo->pvo_pte.pte.pte_hi & PTE_HID) 2095 pteidx ^= moea_pteg_mask * 8; 2096 2097 return (pteidx); 2098} 2099 2100static struct pvo_entry * 2101moea_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2102{ 2103 struct pvo_entry *pvo; 2104 int ptegidx; 2105 u_int sr; 2106 2107 va &= ~ADDR_POFF; 2108 sr = va_to_sr(pm->pm_sr, va); 2109 ptegidx = va_to_pteg(sr, va); 2110 2111 mtx_lock(&moea_table_mutex); 2112 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2113 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2114 if (pteidx_p) 2115 *pteidx_p = moea_pvo_pte_index(pvo, ptegidx); 2116 break; 2117 } 2118 } 2119 mtx_unlock(&moea_table_mutex); 2120 2121 return (pvo); 2122} 2123 2124static struct pte * 2125moea_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2126{ 2127 struct pte *pt; 2128 2129 /* 2130 * If we haven't been supplied the ptegidx, calculate it. 2131 */ 2132 if (pteidx == -1) { 2133 int ptegidx; 2134 u_int sr; 2135 2136 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2137 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2138 pteidx = moea_pvo_pte_index(pvo, ptegidx); 2139 } 2140 2141 pt = &moea_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2142 mtx_lock(&moea_table_mutex); 2143 2144 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2145 panic("moea_pvo_to_pte: pvo %p has valid pte in pvo but no " 2146 "valid pte index", pvo); 2147 } 2148 2149 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2150 panic("moea_pvo_to_pte: pvo %p has valid pte index in pvo " 2151 "pvo but no valid pte", pvo); 2152 } 2153 2154 if ((pt->pte_hi ^ (pvo->pvo_pte.pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2155 if ((pvo->pvo_pte.pte.pte_hi & PTE_VALID) == 0) { 2156 panic("moea_pvo_to_pte: pvo %p has valid pte in " 2157 "moea_pteg_table %p but invalid in pvo", pvo, pt); 2158 } 2159 2160 if (((pt->pte_lo ^ pvo->pvo_pte.pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2161 != 0) { 2162 panic("moea_pvo_to_pte: pvo %p pte does not match " 2163 "pte %p in moea_pteg_table", pvo, pt); 2164 } 2165 2166 mtx_assert(&moea_table_mutex, MA_OWNED); 2167 return (pt); 2168 } 2169 2170 if (pvo->pvo_pte.pte.pte_hi & PTE_VALID) { 2171 panic("moea_pvo_to_pte: pvo %p has invalid pte %p in " 2172 "moea_pteg_table but valid in pvo: %8x, %8x", pvo, pt, pvo->pvo_pte.pte.pte_hi, pt->pte_hi); 2173 } 2174 2175 mtx_unlock(&moea_table_mutex); 2176 return (NULL); 2177} 2178 2179/* 2180 * XXX: THIS STUFF SHOULD BE IN pte.c? 2181 */ 2182int 2183moea_pte_spill(vm_offset_t addr) 2184{ 2185 struct pvo_entry *source_pvo, *victim_pvo; 2186 struct pvo_entry *pvo; 2187 int ptegidx, i, j; 2188 u_int sr; 2189 struct pteg *pteg; 2190 struct pte *pt; 2191 2192 moea_pte_spills++; 2193 2194 sr = mfsrin(addr); 2195 ptegidx = va_to_pteg(sr, addr); 2196 2197 /* 2198 * Have to substitute some entry. Use the primary hash for this. 2199 * Use low bits of timebase as random generator. 2200 */ 2201 pteg = &moea_pteg_table[ptegidx]; 2202 mtx_lock(&moea_table_mutex); 2203 __asm __volatile("mftb %0" : "=r"(i)); 2204 i &= 7; 2205 pt = &pteg->pt[i]; 2206 2207 source_pvo = NULL; 2208 victim_pvo = NULL; 2209 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx], pvo_olink) { 2210 /* 2211 * We need to find a pvo entry for this address. 2212 */ 2213 if (source_pvo == NULL && 2214 moea_pte_match(&pvo->pvo_pte.pte, sr, addr, 2215 pvo->pvo_pte.pte.pte_hi & PTE_HID)) { 2216 /* 2217 * Now found an entry to be spilled into the pteg. 2218 * The PTE is now valid, so we know it's active. 2219 */ 2220 j = moea_pte_insert(ptegidx, &pvo->pvo_pte.pte); 2221 2222 if (j >= 0) { 2223 PVO_PTEGIDX_SET(pvo, j); 2224 moea_pte_overflow--; 2225 mtx_unlock(&moea_table_mutex); 2226 return (1); 2227 } 2228 2229 source_pvo = pvo; 2230 2231 if (victim_pvo != NULL) 2232 break; 2233 } 2234 2235 /* 2236 * We also need the pvo entry of the victim we are replacing 2237 * so save the R & C bits of the PTE. 2238 */ 2239 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2240 moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2241 victim_pvo = pvo; 2242 if (source_pvo != NULL) 2243 break; 2244 } 2245 } 2246 2247 if (source_pvo == NULL) { 2248 mtx_unlock(&moea_table_mutex); 2249 return (0); 2250 } 2251 2252 if (victim_pvo == NULL) { 2253 if ((pt->pte_hi & PTE_HID) == 0) 2254 panic("moea_pte_spill: victim p-pte (%p) has no pvo" 2255 "entry", pt); 2256 2257 /* 2258 * If this is a secondary PTE, we need to search it's primary 2259 * pvo bucket for the matching PVO. 2260 */ 2261 LIST_FOREACH(pvo, &moea_pvo_table[ptegidx ^ moea_pteg_mask], 2262 pvo_olink) { 2263 /* 2264 * We also need the pvo entry of the victim we are 2265 * replacing so save the R & C bits of the PTE. 2266 */ 2267 if (moea_pte_compare(pt, &pvo->pvo_pte.pte)) { 2268 victim_pvo = pvo; 2269 break; 2270 } 2271 } 2272 2273 if (victim_pvo == NULL) 2274 panic("moea_pte_spill: victim s-pte (%p) has no pvo" 2275 "entry", pt); 2276 } 2277 2278 /* 2279 * We are invalidating the TLB entry for the EA we are replacing even 2280 * though it's valid. If we don't, we lose any ref/chg bit changes 2281 * contained in the TLB entry. 2282 */ 2283 source_pvo->pvo_pte.pte.pte_hi &= ~PTE_HID; 2284 2285 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2286 moea_pte_set(pt, &source_pvo->pvo_pte.pte); 2287 2288 PVO_PTEGIDX_CLR(victim_pvo); 2289 PVO_PTEGIDX_SET(source_pvo, i); 2290 moea_pte_replacements++; 2291 2292 mtx_unlock(&moea_table_mutex); 2293 return (1); 2294} 2295 2296static __inline struct pvo_entry * 2297moea_pte_spillable_ident(u_int ptegidx) 2298{ 2299 struct pte *pt; 2300 struct pvo_entry *pvo_walk, *pvo = NULL; 2301 2302 LIST_FOREACH(pvo_walk, &moea_pvo_table[ptegidx], pvo_olink) { 2303 if (pvo_walk->pvo_vaddr & PVO_WIRED) 2304 continue; 2305 2306 if (!(pvo_walk->pvo_pte.pte.pte_hi & PTE_VALID)) 2307 continue; 2308 2309 pt = moea_pvo_to_pte(pvo_walk, -1); 2310 2311 if (pt == NULL) 2312 continue; 2313 2314 pvo = pvo_walk; 2315 2316 mtx_unlock(&moea_table_mutex); 2317 if (!(pt->pte_lo & PTE_REF)) 2318 return (pvo_walk); 2319 } 2320 2321 return (pvo); 2322} 2323 2324static int 2325moea_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2326{ 2327 struct pte *pt; 2328 struct pvo_entry *victim_pvo; 2329 int i; 2330 int victim_idx; 2331 u_int pteg_bkpidx = ptegidx; 2332 2333 mtx_assert(&moea_table_mutex, MA_OWNED); 2334 2335 /* 2336 * First try primary hash. 2337 */ 2338 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2339 if ((pt->pte_hi & PTE_VALID) == 0) { 2340 pvo_pt->pte_hi &= ~PTE_HID; 2341 moea_pte_set(pt, pvo_pt); 2342 return (i); 2343 } 2344 } 2345 2346 /* 2347 * Now try secondary hash. 2348 */ 2349 ptegidx ^= moea_pteg_mask; 2350 2351 for (pt = moea_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2352 if ((pt->pte_hi & PTE_VALID) == 0) { 2353 pvo_pt->pte_hi |= PTE_HID; 2354 moea_pte_set(pt, pvo_pt); 2355 return (i); 2356 } 2357 } 2358 2359 /* Try again, but this time try to force a PTE out. */ 2360 ptegidx = pteg_bkpidx; 2361 2362 victim_pvo = moea_pte_spillable_ident(ptegidx); 2363 if (victim_pvo == NULL) { 2364 ptegidx ^= moea_pteg_mask; 2365 victim_pvo = moea_pte_spillable_ident(ptegidx); 2366 } 2367 2368 if (victim_pvo == NULL) { 2369 panic("moea_pte_insert: overflow"); 2370 return (-1); 2371 } 2372 2373 victim_idx = moea_pvo_pte_index(victim_pvo, ptegidx); 2374 2375 if (pteg_bkpidx == ptegidx) 2376 pvo_pt->pte_hi &= ~PTE_HID; 2377 else 2378 pvo_pt->pte_hi |= PTE_HID; 2379 2380 /* 2381 * Synchronize the sacrifice PTE with its PVO, then mark both 2382 * invalid. The PVO will be reused when/if the VM system comes 2383 * here after a fault. 2384 */ 2385 pt = &moea_pteg_table[victim_idx >> 3].pt[victim_idx & 7]; 2386 2387 if (pt->pte_hi != victim_pvo->pvo_pte.pte.pte_hi) 2388 panic("Victim PVO doesn't match PTE! PVO: %8x, PTE: %8x", victim_pvo->pvo_pte.pte.pte_hi, pt->pte_hi); 2389 2390 /* 2391 * Set the new PTE. 2392 */ 2393 moea_pte_unset(pt, &victim_pvo->pvo_pte.pte, victim_pvo->pvo_vaddr); 2394 PVO_PTEGIDX_CLR(victim_pvo); 2395 moea_pte_overflow++; 2396 moea_pte_set(pt, pvo_pt); 2397 2398 return (victim_idx & 7); 2399} 2400 2401static boolean_t 2402moea_query_bit(vm_page_t m, int ptebit) 2403{ 2404 struct pvo_entry *pvo; 2405 struct pte *pt; 2406 2407 rw_assert(&pvh_global_lock, RA_WLOCKED); 2408 if (moea_attr_fetch(m) & ptebit) 2409 return (TRUE); 2410 2411 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2412 2413 /* 2414 * See if we saved the bit off. If so, cache it and return 2415 * success. 2416 */ 2417 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2418 moea_attr_save(m, ptebit); 2419 return (TRUE); 2420 } 2421 } 2422 2423 /* 2424 * No luck, now go through the hard part of looking at the PTEs 2425 * themselves. Sync so that any pending REF/CHG bits are flushed to 2426 * the PTEs. 2427 */ 2428 powerpc_sync(); 2429 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2430 2431 /* 2432 * See if this pvo has a valid PTE. if so, fetch the 2433 * REF/CHG bits from the valid PTE. If the appropriate 2434 * ptebit is set, cache it and return success. 2435 */ 2436 pt = moea_pvo_to_pte(pvo, -1); 2437 if (pt != NULL) { 2438 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2439 mtx_unlock(&moea_table_mutex); 2440 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2441 moea_attr_save(m, ptebit); 2442 return (TRUE); 2443 } 2444 } 2445 } 2446 2447 return (FALSE); 2448} 2449 2450static u_int 2451moea_clear_bit(vm_page_t m, int ptebit) 2452{ 2453 u_int count; 2454 struct pvo_entry *pvo; 2455 struct pte *pt; 2456 2457 rw_assert(&pvh_global_lock, RA_WLOCKED); 2458 2459 /* 2460 * Clear the cached value. 2461 */ 2462 moea_attr_clear(m, ptebit); 2463 2464 /* 2465 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2466 * we can reset the right ones). note that since the pvo entries and 2467 * list heads are accessed via BAT0 and are never placed in the page 2468 * table, we don't have to worry about further accesses setting the 2469 * REF/CHG bits. 2470 */ 2471 powerpc_sync(); 2472 2473 /* 2474 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2475 * valid pte clear the ptebit from the valid pte. 2476 */ 2477 count = 0; 2478 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2479 pt = moea_pvo_to_pte(pvo, -1); 2480 if (pt != NULL) { 2481 moea_pte_synch(pt, &pvo->pvo_pte.pte); 2482 if (pvo->pvo_pte.pte.pte_lo & ptebit) { 2483 count++; 2484 moea_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2485 } 2486 mtx_unlock(&moea_table_mutex); 2487 } 2488 pvo->pvo_pte.pte.pte_lo &= ~ptebit; 2489 } 2490 2491 return (count); 2492} 2493 2494/* 2495 * Return true if the physical range is encompassed by the battable[idx] 2496 */ 2497static int 2498moea_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2499{ 2500 u_int prot; 2501 u_int32_t start; 2502 u_int32_t end; 2503 u_int32_t bat_ble; 2504 2505 /* 2506 * Return immediately if not a valid mapping 2507 */ 2508 if (!(battable[idx].batu & BAT_Vs)) 2509 return (EINVAL); 2510 2511 /* 2512 * The BAT entry must be cache-inhibited, guarded, and r/w 2513 * so it can function as an i/o page 2514 */ 2515 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2516 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2517 return (EPERM); 2518 2519 /* 2520 * The address should be within the BAT range. Assume that the 2521 * start address in the BAT has the correct alignment (thus 2522 * not requiring masking) 2523 */ 2524 start = battable[idx].batl & BAT_PBS; 2525 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2526 end = start | (bat_ble << 15) | 0x7fff; 2527 2528 if ((pa < start) || ((pa + size) > end)) 2529 return (ERANGE); 2530 2531 return (0); 2532} 2533 2534boolean_t 2535moea_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2536{ 2537 int i; 2538 2539 /* 2540 * This currently does not work for entries that 2541 * overlap 256M BAT segments. 2542 */ 2543 2544 for(i = 0; i < 16; i++) 2545 if (moea_bat_mapped(i, pa, size) == 0) 2546 return (0); 2547 2548 return (EFAULT); 2549} 2550 2551/* 2552 * Map a set of physical memory pages into the kernel virtual 2553 * address space. Return a pointer to where it is mapped. This 2554 * routine is intended to be used for mapping device memory, 2555 * NOT real memory. 2556 */ 2557void * 2558moea_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2559{ 2560 2561 return (moea_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2562} 2563 2564void * 2565moea_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2566{ 2567 vm_offset_t va, tmpva, ppa, offset; 2568 int i; 2569 2570 ppa = trunc_page(pa); 2571 offset = pa & PAGE_MASK; 2572 size = roundup(offset + size, PAGE_SIZE); 2573 2574 /* 2575 * If the physical address lies within a valid BAT table entry, 2576 * return the 1:1 mapping. This currently doesn't work 2577 * for regions that overlap 256M BAT segments. 2578 */ 2579 for (i = 0; i < 16; i++) { 2580 if (moea_bat_mapped(i, pa, size) == 0) 2581 return ((void *) pa); 2582 } 2583 2584 va = kva_alloc(size); 2585 if (!va) 2586 panic("moea_mapdev: Couldn't alloc kernel virtual memory"); 2587 2588 for (tmpva = va; size > 0;) { 2589 moea_kenter_attr(mmu, tmpva, ppa, ma); 2590 tlbie(tmpva); 2591 size -= PAGE_SIZE; 2592 tmpva += PAGE_SIZE; 2593 ppa += PAGE_SIZE; 2594 } 2595 2596 return ((void *)(va + offset)); 2597} 2598 2599void 2600moea_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2601{ 2602 vm_offset_t base, offset; 2603 2604 /* 2605 * If this is outside kernel virtual space, then it's a 2606 * battable entry and doesn't require unmapping 2607 */ 2608 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= virtual_end)) { 2609 base = trunc_page(va); 2610 offset = va & PAGE_MASK; 2611 size = roundup(offset + size, PAGE_SIZE); 2612 kva_free(base, size); 2613 } 2614} 2615 2616static void 2617moea_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2618{ 2619 struct pvo_entry *pvo; 2620 vm_offset_t lim; 2621 vm_paddr_t pa; 2622 vm_size_t len; 2623 2624 PMAP_LOCK(pm); 2625 while (sz > 0) { 2626 lim = round_page(va); 2627 len = MIN(lim - va, sz); 2628 pvo = moea_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2629 if (pvo != NULL) { 2630 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2631 (va & ADDR_POFF); 2632 moea_syncicache(pa, len); 2633 } 2634 va += len; 2635 sz -= len; 2636 } 2637 PMAP_UNLOCK(pm); 2638} 2639 2640vm_offset_t 2641moea_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2642 vm_size_t *sz) 2643{ 2644 if (md->md_vaddr == ~0UL) 2645 return (md->md_paddr + ofs); 2646 else 2647 return (md->md_vaddr + ofs); 2648} 2649 2650struct pmap_md * 2651moea_scan_md(mmu_t mmu, struct pmap_md *prev) 2652{ 2653 static struct pmap_md md; 2654 struct pvo_entry *pvo; 2655 vm_offset_t va; 2656 2657 if (dumpsys_minidump) { 2658 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2659 if (prev == NULL) { 2660 /* 1st: kernel .data and .bss. */ 2661 md.md_index = 1; 2662 md.md_vaddr = trunc_page((uintptr_t)_etext); 2663 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2664 return (&md); 2665 } 2666 switch (prev->md_index) { 2667 case 1: 2668 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2669 md.md_index = 2; 2670 md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr; 2671 md.md_size = round_page(msgbufp->msg_size); 2672 break; 2673 case 2: 2674 /* 3rd: kernel VM. */ 2675 va = prev->md_vaddr + prev->md_size; 2676 /* Find start of next chunk (from va). */ 2677 while (va < virtual_end) { 2678 /* Don't dump the buffer cache. */ 2679 if (va >= kmi.buffer_sva && 2680 va < kmi.buffer_eva) { 2681 va = kmi.buffer_eva; 2682 continue; 2683 } 2684 pvo = moea_pvo_find_va(kernel_pmap, 2685 va & ~ADDR_POFF, NULL); 2686 if (pvo != NULL && 2687 (pvo->pvo_pte.pte.pte_hi & PTE_VALID)) 2688 break; 2689 va += PAGE_SIZE; 2690 } 2691 if (va < virtual_end) { 2692 md.md_vaddr = va; 2693 va += PAGE_SIZE; 2694 /* Find last page in chunk. */ 2695 while (va < virtual_end) { 2696 /* Don't run into the buffer cache. */ 2697 if (va == kmi.buffer_sva) 2698 break; 2699 pvo = moea_pvo_find_va(kernel_pmap, 2700 va & ~ADDR_POFF, NULL); 2701 if (pvo == NULL || 2702 !(pvo->pvo_pte.pte.pte_hi & PTE_VALID)) 2703 break; 2704 va += PAGE_SIZE; 2705 } 2706 md.md_size = va - md.md_vaddr; 2707 break; 2708 } 2709 md.md_index = 3; 2710 /* FALLTHROUGH */ 2711 default: 2712 return (NULL); 2713 } 2714 } else { /* minidumps */ 2715 mem_regions(&pregions, &pregions_sz, 2716 ®ions, ®ions_sz); 2717 2718 if (prev == NULL) { 2719 /* first physical chunk. */ 2720 md.md_paddr = pregions[0].mr_start; 2721 md.md_size = pregions[0].mr_size; 2722 md.md_vaddr = ~0UL; 2723 md.md_index = 1; 2724 } else if (md.md_index < pregions_sz) { 2725 md.md_paddr = pregions[md.md_index].mr_start; 2726 md.md_size = pregions[md.md_index].mr_size; 2727 md.md_vaddr = ~0UL; 2728 md.md_index++; 2729 } else { 2730 /* There's no next physical chunk. */ 2731 return (NULL); 2732 } 2733 } 2734 2735 return (&md); 2736} 2737