mmu_oea64.c revision 204128
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 204128 2010-02-20 16:23:29Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <sys/kdb.h> 131 132#include <dev/ofw/openfirm.h> 133 134#include <vm/vm.h> 135#include <vm/vm_param.h> 136#include <vm/vm_kern.h> 137#include <vm/vm_page.h> 138#include <vm/vm_map.h> 139#include <vm/vm_object.h> 140#include <vm/vm_extern.h> 141#include <vm/vm_pageout.h> 142#include <vm/vm_pager.h> 143#include <vm/uma.h> 144 145#include <machine/cpu.h> 146#include <machine/platform.h> 147#include <machine/frame.h> 148#include <machine/md_var.h> 149#include <machine/psl.h> 150#include <machine/bat.h> 151#include <machine/pte.h> 152#include <machine/sr.h> 153#include <machine/trap.h> 154#include <machine/mmuvar.h> 155 156#include "mmu_if.h" 157 158#define MOEA_DEBUG 159 160#define TODO panic("%s: not implemented", __func__); 161 162static __inline u_int32_t 163cntlzw(volatile u_int32_t a) { 164 u_int32_t b; 165 __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a)); 166 return b; 167} 168 169static __inline uint64_t 170va_to_vsid(pmap_t pm, vm_offset_t va) 171{ 172 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 173} 174 175#define PTESYNC() __asm __volatile("ptesync"); 176#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 177#define SYNC() __asm __volatile("sync"); 178#define EIEIO() __asm __volatile("eieio"); 179 180/* 181 * The tlbie instruction must be executed in 64-bit mode 182 * so we have to twiddle MSR[SF] around every invocation. 183 * Just to add to the fun, exceptions must be off as well 184 * so that we can't trap in 64-bit mode. What a pain. 185 */ 186struct mtx tlbie_mutex; 187 188static __inline void 189TLBIE(pmap_t pmap, vm_offset_t va) { 190 uint64_t vpn; 191 register_t vpn_hi, vpn_lo; 192 register_t msr; 193 register_t scratch; 194 195 vpn = (uint64_t)(va & ADDR_PIDX); 196 if (pmap != NULL) 197 vpn |= (va_to_vsid(pmap,va) << 28); 198 vpn &= ~(0xffffULL << 48); 199 200 vpn_hi = (uint32_t)(vpn >> 32); 201 vpn_lo = (uint32_t)vpn; 202 203 mtx_lock_spin(&tlbie_mutex); 204 __asm __volatile("\ 205 mfmsr %0; \ 206 mr %1, %0; \ 207 insrdi %1,%5,1,0; \ 208 mtmsrd %1; \ 209 ptesync; \ 210 \ 211 sld %1,%2,%4; \ 212 or %1,%1,%3; \ 213 tlbie %1; \ 214 \ 215 mtmsrd %0; \ 216 eieio; \ 217 tlbsync; \ 218 ptesync;" 219 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1) 220 : "memory"); 221 mtx_unlock_spin(&tlbie_mutex); 222} 223 224#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 225#define ENABLE_TRANS(msr) mtmsr(msr); isync() 226 227#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 228#define VSID_TO_SR(vsid) ((vsid) & 0xf) 229#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 230 231#define PVO_PTEGIDX_MASK 0x007UL /* which PTEG slot */ 232#define PVO_PTEGIDX_VALID 0x008UL /* slot is valid */ 233#define PVO_WIRED 0x010UL /* PVO entry is wired */ 234#define PVO_MANAGED 0x020UL /* PVO entry is managed */ 235#define PVO_BOOTSTRAP 0x080UL /* PVO entry allocated during 236 bootstrap */ 237#define PVO_FAKE 0x100UL /* fictitious phys page */ 238#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 239#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 240#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 241#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 242#define PVO_PTEGIDX_CLR(pvo) \ 243 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 244#define PVO_PTEGIDX_SET(pvo, i) \ 245 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 246 247#define MOEA_PVO_CHECK(pvo) 248 249#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 250#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 251#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 252 253struct ofw_map { 254 vm_offset_t om_va; 255 vm_size_t om_len; 256 vm_offset_t om_pa_hi; 257 vm_offset_t om_pa_lo; 258 u_int om_mode; 259}; 260 261/* 262 * Map of physical memory regions. 263 */ 264static struct mem_region *regions; 265static struct mem_region *pregions; 266extern u_int phys_avail_count; 267extern int regions_sz, pregions_sz; 268extern int ofw_real_mode; 269 270extern struct pmap ofw_pmap; 271 272extern void bs_remap_earlyboot(void); 273 274 275/* 276 * Lock for the pteg and pvo tables. 277 */ 278struct mtx moea64_table_mutex; 279 280/* 281 * PTEG data. 282 */ 283static struct lpteg *moea64_pteg_table; 284u_int moea64_pteg_count; 285u_int moea64_pteg_mask; 286 287/* 288 * PVO data. 289 */ 290struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 291/* lists of unmanaged pages */ 292struct pvo_head moea64_pvo_kunmanaged = 293 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 294struct pvo_head moea64_pvo_unmanaged = 295 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 296 297uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 298uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 299 300#define BPVO_POOL_SIZE 327680 301static struct pvo_entry *moea64_bpvo_pool; 302static int moea64_bpvo_pool_index = 0; 303 304#define VSID_NBPW (sizeof(u_int32_t) * 8) 305static u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW]; 306 307static boolean_t moea64_initialized = FALSE; 308 309/* 310 * Statistics. 311 */ 312u_int moea64_pte_valid = 0; 313u_int moea64_pte_overflow = 0; 314u_int moea64_pvo_entries = 0; 315u_int moea64_pvo_enter_calls = 0; 316u_int moea64_pvo_remove_calls = 0; 317SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 318 &moea64_pte_valid, 0, ""); 319SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 320 &moea64_pte_overflow, 0, ""); 321SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 322 &moea64_pvo_entries, 0, ""); 323SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 324 &moea64_pvo_enter_calls, 0, ""); 325SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 326 &moea64_pvo_remove_calls, 0, ""); 327 328vm_offset_t moea64_scratchpage_va[2]; 329struct pvo_entry *moea64_scratchpage_pvo[2]; 330struct lpte *moea64_scratchpage_pte[2]; 331struct mtx moea64_scratchpage_mtx; 332 333/* 334 * Allocate physical memory for use in moea64_bootstrap. 335 */ 336static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 337 338/* 339 * PTE calls. 340 */ 341static int moea64_pte_insert(u_int, struct lpte *); 342 343/* 344 * PVO calls. 345 */ 346static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 347 vm_offset_t, vm_offset_t, uint64_t, int); 348static void moea64_pvo_remove(struct pvo_entry *, int); 349static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 350static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 351 352/* 353 * Utility routines. 354 */ 355static void moea64_bridge_bootstrap(mmu_t mmup, 356 vm_offset_t kernelstart, vm_offset_t kernelend); 357static void moea64_bridge_cpu_bootstrap(mmu_t, int ap); 358static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 359 vm_prot_t, boolean_t); 360static boolean_t moea64_query_bit(vm_page_t, u_int64_t); 361static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *); 362static void moea64_kremove(mmu_t, vm_offset_t); 363static void moea64_syncicache(pmap_t pmap, vm_offset_t va, 364 vm_offset_t pa, vm_size_t sz); 365static void tlbia(void); 366 367/* 368 * Kernel MMU interface 369 */ 370void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 371void moea64_clear_modify(mmu_t, vm_page_t); 372void moea64_clear_reference(mmu_t, vm_page_t); 373void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 374void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 375void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 376 vm_prot_t); 377void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 378vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 379vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 380void moea64_init(mmu_t); 381boolean_t moea64_is_modified(mmu_t, vm_page_t); 382boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 383vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 384boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 385int moea64_page_wired_mappings(mmu_t, vm_page_t); 386void moea64_pinit(mmu_t, pmap_t); 387void moea64_pinit0(mmu_t, pmap_t); 388void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 389void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 390void moea64_qremove(mmu_t, vm_offset_t, int); 391void moea64_release(mmu_t, pmap_t); 392void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 393void moea64_remove_all(mmu_t, vm_page_t); 394void moea64_remove_write(mmu_t, vm_page_t); 395void moea64_zero_page(mmu_t, vm_page_t); 396void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 397void moea64_zero_page_idle(mmu_t, vm_page_t); 398void moea64_activate(mmu_t, struct thread *); 399void moea64_deactivate(mmu_t, struct thread *); 400void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 401void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 402vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 403void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 404boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 405static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 406 407static mmu_method_t moea64_bridge_methods[] = { 408 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 409 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 410 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 411 MMUMETHOD(mmu_copy_page, moea64_copy_page), 412 MMUMETHOD(mmu_enter, moea64_enter), 413 MMUMETHOD(mmu_enter_object, moea64_enter_object), 414 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 415 MMUMETHOD(mmu_extract, moea64_extract), 416 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 417 MMUMETHOD(mmu_init, moea64_init), 418 MMUMETHOD(mmu_is_modified, moea64_is_modified), 419 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 420 MMUMETHOD(mmu_map, moea64_map), 421 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 422 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 423 MMUMETHOD(mmu_pinit, moea64_pinit), 424 MMUMETHOD(mmu_pinit0, moea64_pinit0), 425 MMUMETHOD(mmu_protect, moea64_protect), 426 MMUMETHOD(mmu_qenter, moea64_qenter), 427 MMUMETHOD(mmu_qremove, moea64_qremove), 428 MMUMETHOD(mmu_release, moea64_release), 429 MMUMETHOD(mmu_remove, moea64_remove), 430 MMUMETHOD(mmu_remove_all, moea64_remove_all), 431 MMUMETHOD(mmu_remove_write, moea64_remove_write), 432 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 433 MMUMETHOD(mmu_zero_page, moea64_zero_page), 434 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 435 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 436 MMUMETHOD(mmu_activate, moea64_activate), 437 MMUMETHOD(mmu_deactivate, moea64_deactivate), 438 439 /* Internal interfaces */ 440 MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap), 441 MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap), 442 MMUMETHOD(mmu_mapdev, moea64_mapdev), 443 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 444 MMUMETHOD(mmu_kextract, moea64_kextract), 445 MMUMETHOD(mmu_kenter, moea64_kenter), 446 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 447 448 { 0, 0 } 449}; 450 451static mmu_def_t oea64_bridge_mmu = { 452 MMU_TYPE_G5, 453 moea64_bridge_methods, 454 0 455}; 456MMU_DEF(oea64_bridge_mmu); 457 458static __inline u_int 459va_to_pteg(uint64_t vsid, vm_offset_t addr) 460{ 461 u_int hash; 462 463 hash = vsid ^ (((uint64_t)addr & ADDR_PIDX) >> 464 ADDR_PIDX_SHFT); 465 return (hash & moea64_pteg_mask); 466} 467 468static __inline struct pvo_head * 469pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 470{ 471 struct vm_page *pg; 472 473 pg = PHYS_TO_VM_PAGE(pa); 474 475 if (pg_p != NULL) 476 *pg_p = pg; 477 478 if (pg == NULL) 479 return (&moea64_pvo_unmanaged); 480 481 return (&pg->md.mdpg_pvoh); 482} 483 484static __inline struct pvo_head * 485vm_page_to_pvoh(vm_page_t m) 486{ 487 488 return (&m->md.mdpg_pvoh); 489} 490 491static __inline void 492moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 493{ 494 495 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 496 m->md.mdpg_attrs &= ~ptebit; 497} 498 499static __inline u_int64_t 500moea64_attr_fetch(vm_page_t m) 501{ 502 503 return (m->md.mdpg_attrs); 504} 505 506static __inline void 507moea64_attr_save(vm_page_t m, u_int64_t ptebit) 508{ 509 510 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 511 m->md.mdpg_attrs |= ptebit; 512} 513 514static __inline void 515moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 516 uint64_t pte_lo) 517{ 518 ASSERT_TABLE_LOCK(); 519 520 /* 521 * Construct a PTE. Default to IMB initially. Valid bit only gets 522 * set when the real pte is set in memory. 523 * 524 * Note: Don't set the valid bit for correct operation of tlb update. 525 */ 526 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 527 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 528 529 pt->pte_lo = pte_lo; 530} 531 532static __inline void 533moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 534{ 535 536 ASSERT_TABLE_LOCK(); 537 538 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 539} 540 541static __inline void 542moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit) 543{ 544 ASSERT_TABLE_LOCK(); 545 546 /* 547 * As shown in Section 7.6.3.2.3 548 */ 549 pt->pte_lo &= ~ptebit; 550 TLBIE(pmap,va); 551} 552 553static __inline void 554moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 555{ 556 557 ASSERT_TABLE_LOCK(); 558 pvo_pt->pte_hi |= LPTE_VALID; 559 560 /* 561 * Update the PTE as defined in section 7.6.3.1. 562 * Note that the REF/CHG bits are from pvo_pt and thus should have 563 * been saved so this routine can restore them (if desired). 564 */ 565 pt->pte_lo = pvo_pt->pte_lo; 566 EIEIO(); 567 pt->pte_hi = pvo_pt->pte_hi; 568 PTESYNC(); 569 moea64_pte_valid++; 570} 571 572static __inline void 573moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 574{ 575 ASSERT_TABLE_LOCK(); 576 pvo_pt->pte_hi &= ~LPTE_VALID; 577 578 /* 579 * Force the reg & chg bits back into the PTEs. 580 */ 581 SYNC(); 582 583 /* 584 * Invalidate the pte. 585 */ 586 pt->pte_hi &= ~LPTE_VALID; 587 TLBIE(pmap,va); 588 589 /* 590 * Save the reg & chg bits. 591 */ 592 moea64_pte_synch(pt, pvo_pt); 593 moea64_pte_valid--; 594} 595 596static __inline void 597moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 598{ 599 600 /* 601 * Invalidate the PTE 602 */ 603 moea64_pte_unset(pt, pvo_pt, pmap, va); 604 moea64_pte_set(pt, pvo_pt); 605 if (pmap == kernel_pmap) 606 isync(); 607} 608 609static __inline uint64_t 610moea64_calc_wimg(vm_offset_t pa) 611{ 612 uint64_t pte_lo; 613 int i; 614 615 /* 616 * Assume the page is cache inhibited and access is guarded unless 617 * it's in our available memory array. 618 */ 619 pte_lo = LPTE_I | LPTE_G; 620 for (i = 0; i < pregions_sz; i++) { 621 if ((pa >= pregions[i].mr_start) && 622 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 623 pte_lo &= ~(LPTE_I | LPTE_G); 624 pte_lo |= LPTE_M; 625 break; 626 } 627 } 628 629 return pte_lo; 630} 631 632/* 633 * Quick sort callout for comparing memory regions. 634 */ 635static int mr_cmp(const void *a, const void *b); 636static int om_cmp(const void *a, const void *b); 637 638static int 639mr_cmp(const void *a, const void *b) 640{ 641 const struct mem_region *regiona; 642 const struct mem_region *regionb; 643 644 regiona = a; 645 regionb = b; 646 if (regiona->mr_start < regionb->mr_start) 647 return (-1); 648 else if (regiona->mr_start > regionb->mr_start) 649 return (1); 650 else 651 return (0); 652} 653 654static int 655om_cmp(const void *a, const void *b) 656{ 657 const struct ofw_map *mapa; 658 const struct ofw_map *mapb; 659 660 mapa = a; 661 mapb = b; 662 if (mapa->om_pa_hi < mapb->om_pa_hi) 663 return (-1); 664 else if (mapa->om_pa_hi > mapb->om_pa_hi) 665 return (1); 666 else if (mapa->om_pa_lo < mapb->om_pa_lo) 667 return (-1); 668 else if (mapa->om_pa_lo > mapb->om_pa_lo) 669 return (1); 670 else 671 return (0); 672} 673 674static void 675moea64_bridge_cpu_bootstrap(mmu_t mmup, int ap) 676{ 677 int i = 0; 678 679 /* 680 * Initialize segment registers and MMU 681 */ 682 683 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 684 for (i = 0; i < 16; i++) { 685 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 686 } 687 __asm __volatile ("ptesync; mtsdr1 %0; isync" 688 :: "r"((u_int)moea64_pteg_table 689 | (32 - cntlzw(moea64_pteg_mask >> 11)))); 690 tlbia(); 691} 692 693static void 694moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 695{ 696 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 697 register_t msr; 698 vm_offset_t off; 699 vm_paddr_t pa_base; 700 int i, ofw_mappings; 701 702 bzero(translations, sz); 703 if (OF_getprop(mmu, "translations", translations, sz) == -1) 704 panic("moea64_bootstrap: can't get ofw translations"); 705 706 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 707 sz /= sizeof(*translations); 708 qsort(translations, sz, sizeof (*translations), om_cmp); 709 710 for (i = 0, ofw_mappings = 0; i < sz; i++) { 711 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 712 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 713 translations[i].om_len); 714 715 if (translations[i].om_pa_lo % PAGE_SIZE) 716 panic("OFW translation not page-aligned!"); 717 718 if (translations[i].om_pa_hi) 719 panic("OFW translations above 32-bit boundary!"); 720 721 pa_base = translations[i].om_pa_lo; 722 723 /* Now enter the pages for this mapping */ 724 725 DISABLE_TRANS(msr); 726 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 727 moea64_kenter(mmup, translations[i].om_va + off, 728 pa_base + off); 729 730 ofw_mappings++; 731 } 732 ENABLE_TRANS(msr); 733 } 734} 735 736static void 737moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 738{ 739 ihandle_t mmui; 740 phandle_t chosen; 741 phandle_t mmu; 742 size_t sz; 743 int i, j; 744 vm_size_t size, physsz, hwphyssz; 745 vm_offset_t pa, va, off; 746 register_t msr; 747 void *dpcpu; 748 749 /* We don't have a direct map since there is no BAT */ 750 hw_direct_map = 0; 751 752 /* Make sure battable is zero, since we have no BAT */ 753 for (i = 0; i < 16; i++) { 754 battable[i].batu = 0; 755 battable[i].batl = 0; 756 } 757 758 /* Get physical memory regions from firmware */ 759 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 760 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 761 762 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 763 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 764 panic("moea64_bootstrap: phys_avail too small"); 765 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 766 phys_avail_count = 0; 767 physsz = 0; 768 hwphyssz = 0; 769 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 770 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 771 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 772 regions[i].mr_start + regions[i].mr_size, 773 regions[i].mr_size); 774 if (hwphyssz != 0 && 775 (physsz + regions[i].mr_size) >= hwphyssz) { 776 if (physsz < hwphyssz) { 777 phys_avail[j] = regions[i].mr_start; 778 phys_avail[j + 1] = regions[i].mr_start + 779 hwphyssz - physsz; 780 physsz = hwphyssz; 781 phys_avail_count++; 782 } 783 break; 784 } 785 phys_avail[j] = regions[i].mr_start; 786 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 787 phys_avail_count++; 788 physsz += regions[i].mr_size; 789 } 790 physmem = btoc(physsz); 791 792 /* 793 * Allocate PTEG table. 794 */ 795#ifdef PTEGCOUNT 796 moea64_pteg_count = PTEGCOUNT; 797#else 798 moea64_pteg_count = 0x1000; 799 800 while (moea64_pteg_count < physmem) 801 moea64_pteg_count <<= 1; 802#endif /* PTEGCOUNT */ 803 804 size = moea64_pteg_count * sizeof(struct lpteg); 805 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 806 moea64_pteg_count, size); 807 808 /* 809 * We now need to allocate memory. This memory, to be allocated, 810 * has to reside in a page table. The page table we are about to 811 * allocate. We don't have BAT. So drop to data real mode for a minute 812 * as a measure of last resort. We do this a couple times. 813 */ 814 815 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 816 DISABLE_TRANS(msr); 817 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 818 ENABLE_TRANS(msr); 819 820 moea64_pteg_mask = moea64_pteg_count - 1; 821 822 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 823 824 /* 825 * Allocate pv/overflow lists. 826 */ 827 size = sizeof(struct pvo_head) * moea64_pteg_count; 828 829 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 830 PAGE_SIZE); 831 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 832 833 DISABLE_TRANS(msr); 834 for (i = 0; i < moea64_pteg_count; i++) 835 LIST_INIT(&moea64_pvo_table[i]); 836 ENABLE_TRANS(msr); 837 838 /* 839 * Initialize the lock that synchronizes access to the pteg and pvo 840 * tables. 841 */ 842 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 843 MTX_RECURSE); 844 845 /* 846 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 847 */ 848 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 849 850 /* 851 * Initialise the unmanaged pvo pool. 852 */ 853 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 854 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 855 moea64_bpvo_pool_index = 0; 856 857 /* 858 * Make sure kernel vsid is allocated as well as VSID 0. 859 */ 860 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 861 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 862 moea64_vsid_bitmap[0] |= 1; 863 864 /* 865 * Initialize the kernel pmap (which is statically allocated). 866 */ 867 for (i = 0; i < 16; i++) 868 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 869 870 kernel_pmap->pmap_phys = kernel_pmap; 871 kernel_pmap->pm_active = ~0; 872 873 PMAP_LOCK_INIT(kernel_pmap); 874 875 /* 876 * Now map in all the other buffers we allocated earlier 877 */ 878 879 DISABLE_TRANS(msr); 880 size = moea64_pteg_count * sizeof(struct lpteg); 881 off = (vm_offset_t)(moea64_pteg_table); 882 for (pa = off; pa < off + size; pa += PAGE_SIZE) 883 moea64_kenter(mmup, pa, pa); 884 size = sizeof(struct pvo_head) * moea64_pteg_count; 885 off = (vm_offset_t)(moea64_pvo_table); 886 for (pa = off; pa < off + size; pa += PAGE_SIZE) 887 moea64_kenter(mmup, pa, pa); 888 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 889 off = (vm_offset_t)(moea64_bpvo_pool); 890 for (pa = off; pa < off + size; pa += PAGE_SIZE) 891 moea64_kenter(mmup, pa, pa); 892 893 /* 894 * Map certain important things, like ourselves. 895 * 896 * NOTE: We do not map the exception vector space. That code is 897 * used only in real mode, and leaving it unmapped allows us to 898 * catch NULL pointer deferences, instead of making NULL a valid 899 * address. 900 */ 901 902 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE) 903 moea64_kenter(mmup, pa, pa); 904 ENABLE_TRANS(msr); 905 906 if (!ofw_real_mode) { 907 /* 908 * Set up the Open Firmware pmap and add its mappings. 909 */ 910 911 moea64_pinit(mmup, &ofw_pmap); 912 for (i = 0; i < 16; i++) 913 ofw_pmap.pm_sr[i] = kernel_pmap->pm_sr[i]; 914 915 if ((chosen = OF_finddevice("/chosen")) == -1) 916 panic("moea64_bootstrap: can't find /chosen"); 917 OF_getprop(chosen, "mmu", &mmui, 4); 918 if ((mmu = OF_instance_to_package(mmui)) == -1) 919 panic("moea64_bootstrap: can't get mmu package"); 920 if ((sz = OF_getproplen(mmu, "translations")) == -1) 921 panic("moea64_bootstrap: can't get ofw translation count"); 922 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 923 panic("moea64_bootstrap: too many ofw translations"); 924 925 moea64_add_ofw_mappings(mmup, mmu, sz); 926 } 927 928#ifdef SMP 929 TLBSYNC(); 930#endif 931 932 /* 933 * Calculate the last available physical address. 934 */ 935 for (i = 0; phys_avail[i + 2] != 0; i += 2) 936 ; 937 Maxmem = powerpc_btop(phys_avail[i + 1]); 938 939 /* 940 * Initialize MMU and remap early physical mappings 941 */ 942 moea64_bridge_cpu_bootstrap(mmup,0); 943 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 944 pmap_bootstrapped++; 945 bs_remap_earlyboot(); 946 947 /* 948 * Set the start and end of kva. 949 */ 950 virtual_avail = VM_MIN_KERNEL_ADDRESS; 951 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 952 953 /* 954 * Figure out how far we can extend virtual_end into segment 16 955 * without running into existing mappings. Segment 16 is guaranteed 956 * to contain neither RAM nor devices (at least on Apple hardware), 957 * but will generally contain some OFW mappings we should not 958 * step on. 959 */ 960 961 PMAP_LOCK(kernel_pmap); 962 while (moea64_pvo_find_va(kernel_pmap, virtual_end+1, NULL) == NULL) 963 virtual_end += PAGE_SIZE; 964 PMAP_UNLOCK(kernel_pmap); 965 966 /* 967 * Allocate some things for page zeroing 968 */ 969 970 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); 971 for (i = 0; i < 2; i++) { 972 moea64_scratchpage_va[i] = virtual_avail; 973 virtual_avail += PAGE_SIZE; 974 975 moea64_kenter(mmup,moea64_scratchpage_va[i],kernelstart); 976 977 LOCK_TABLE(); 978 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap, 979 moea64_scratchpage_va[i],&j); 980 moea64_scratchpage_pte[i] = moea64_pvo_to_pte( 981 moea64_scratchpage_pvo[i],j); 982 UNLOCK_TABLE(); 983 } 984 985 /* 986 * Allocate a kernel stack with a guard page for thread0 and map it 987 * into the kernel page map. 988 */ 989 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 990 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 991 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 992 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 993 thread0.td_kstack = va; 994 thread0.td_kstack_pages = KSTACK_PAGES; 995 for (i = 0; i < KSTACK_PAGES; i++) { 996 moea64_kenter(mmup, va, pa); 997 pa += PAGE_SIZE; 998 va += PAGE_SIZE; 999 } 1000 1001 /* 1002 * Allocate virtual address space for the message buffer. 1003 */ 1004 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1005 msgbufp = (struct msgbuf *)msgbuf_phys; 1006 while (pa - msgbuf_phys < MSGBUF_SIZE) { 1007 moea64_kenter(mmup, pa, pa); 1008 pa += PAGE_SIZE; 1009 } 1010 1011 /* 1012 * Allocate virtual address space for the dynamic percpu area. 1013 */ 1014 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1015 dpcpu = (void *)pa; 1016 while (pa - (vm_offset_t)dpcpu < DPCPU_SIZE) { 1017 moea64_kenter(mmup, pa, pa); 1018 pa += PAGE_SIZE; 1019 } 1020 dpcpu_init(dpcpu, 0); 1021} 1022 1023/* 1024 * Activate a user pmap. The pmap must be activated before it's address 1025 * space can be accessed in any way. 1026 */ 1027void 1028moea64_activate(mmu_t mmu, struct thread *td) 1029{ 1030 pmap_t pm, pmr; 1031 1032 /* 1033 * Load all the data we need up front to encourage the compiler to 1034 * not issue any loads while we have interrupts disabled below. 1035 */ 1036 pm = &td->td_proc->p_vmspace->vm_pmap; 1037 pmr = pm->pmap_phys; 1038 1039 pm->pm_active |= PCPU_GET(cpumask); 1040 PCPU_SET(curpmap, pmr); 1041} 1042 1043void 1044moea64_deactivate(mmu_t mmu, struct thread *td) 1045{ 1046 pmap_t pm; 1047 1048 pm = &td->td_proc->p_vmspace->vm_pmap; 1049 pm->pm_active &= ~(PCPU_GET(cpumask)); 1050 PCPU_SET(curpmap, NULL); 1051} 1052 1053void 1054moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1055{ 1056 struct pvo_entry *pvo; 1057 1058 PMAP_LOCK(pm); 1059 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1060 1061 if (pvo != NULL) { 1062 if (wired) { 1063 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1064 pm->pm_stats.wired_count++; 1065 pvo->pvo_vaddr |= PVO_WIRED; 1066 } else { 1067 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1068 pm->pm_stats.wired_count--; 1069 pvo->pvo_vaddr &= ~PVO_WIRED; 1070 } 1071 } 1072 PMAP_UNLOCK(pm); 1073} 1074 1075/* 1076 * Zero a page of physical memory by temporarily mapping it into the tlb. 1077 */ 1078void 1079moea64_zero_page(mmu_t mmu, vm_page_t m) 1080{ 1081 moea64_zero_page_area(mmu,m,0,PAGE_SIZE); 1082} 1083 1084/* 1085 * This goes through and sets the physical address of our 1086 * special scratch PTE to the PA we want to zero or copy. Because 1087 * of locking issues (this can get called in pvo_enter() by 1088 * the UMA allocator), we can't use most other utility functions here 1089 */ 1090 1091static __inline 1092void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1093 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1094 (~LPTE_WIMG & ~LPTE_RPGN); 1095 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1096 moea64_calc_wimg(pa) | (uint64_t)pa; 1097 1098 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1099 TLBIE(kernel_pmap, moea64_scratchpage_va[which]); 1100 1101 moea64_scratchpage_pte[which]->pte_lo = 1102 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo; 1103 EIEIO(); 1104 1105 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1106 PTESYNC(); isync(); 1107} 1108 1109void 1110moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1111{ 1112 vm_offset_t dst; 1113 vm_offset_t src; 1114 1115 dst = VM_PAGE_TO_PHYS(mdst); 1116 src = VM_PAGE_TO_PHYS(msrc); 1117 1118 mtx_lock(&moea64_scratchpage_mtx); 1119 1120 moea64_set_scratchpage_pa(0,src); 1121 moea64_set_scratchpage_pa(1,dst); 1122 1123 kcopy((void *)moea64_scratchpage_va[0], 1124 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1125 1126 mtx_unlock(&moea64_scratchpage_mtx); 1127} 1128 1129void 1130moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1131{ 1132 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1133 1134 if (!moea64_initialized) 1135 panic("moea64_zero_page: can't zero pa %#x", pa); 1136 if (size + off > PAGE_SIZE) 1137 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1138 1139 mtx_lock(&moea64_scratchpage_mtx); 1140 1141 moea64_set_scratchpage_pa(0,pa); 1142 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1143 mtx_unlock(&moea64_scratchpage_mtx); 1144} 1145 1146void 1147moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1148{ 1149 1150 moea64_zero_page(mmu, m); 1151} 1152 1153/* 1154 * Map the given physical page at the specified virtual address in the 1155 * target pmap with the protection requested. If specified the page 1156 * will be wired down. 1157 */ 1158void 1159moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1160 vm_prot_t prot, boolean_t wired) 1161{ 1162 1163 vm_page_lock_queues(); 1164 PMAP_LOCK(pmap); 1165 moea64_enter_locked(pmap, va, m, prot, wired); 1166 vm_page_unlock_queues(); 1167 PMAP_UNLOCK(pmap); 1168} 1169 1170/* 1171 * Map the given physical page at the specified virtual address in the 1172 * target pmap with the protection requested. If specified the page 1173 * will be wired down. 1174 * 1175 * The page queues and pmap must be locked. 1176 */ 1177 1178static void 1179moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1180 boolean_t wired) 1181{ 1182 struct pvo_head *pvo_head; 1183 uma_zone_t zone; 1184 vm_page_t pg; 1185 uint64_t pte_lo; 1186 u_int pvo_flags; 1187 int error; 1188 1189 if (!moea64_initialized) { 1190 pvo_head = &moea64_pvo_kunmanaged; 1191 pg = NULL; 1192 zone = moea64_upvo_zone; 1193 pvo_flags = 0; 1194 } else { 1195 pvo_head = vm_page_to_pvoh(m); 1196 pg = m; 1197 zone = moea64_mpvo_zone; 1198 pvo_flags = PVO_MANAGED; 1199 } 1200 1201 if (pmap_bootstrapped) 1202 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1203 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1204 1205 /* XXX change the pvo head for fake pages */ 1206 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1207 pvo_flags &= ~PVO_MANAGED; 1208 pvo_head = &moea64_pvo_kunmanaged; 1209 zone = moea64_upvo_zone; 1210 } 1211 1212 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1213 1214 if (prot & VM_PROT_WRITE) { 1215 pte_lo |= LPTE_BW; 1216 if (pmap_bootstrapped) 1217 vm_page_flag_set(m, PG_WRITEABLE); 1218 } else 1219 pte_lo |= LPTE_BR; 1220 1221 if (prot & VM_PROT_EXECUTE) 1222 pvo_flags |= VM_PROT_EXECUTE; 1223 1224 if (wired) 1225 pvo_flags |= PVO_WIRED; 1226 1227 if ((m->flags & PG_FICTITIOUS) != 0) 1228 pvo_flags |= PVO_FAKE; 1229 1230 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1231 pte_lo, pvo_flags); 1232 1233 /* 1234 * Flush the page from the instruction cache if this page is 1235 * mapped executable and cacheable. 1236 */ 1237 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1238 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1239 } 1240} 1241 1242static void 1243moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1244{ 1245 1246 /* 1247 * This is much trickier than on older systems because 1248 * we can't sync the icache on physical addresses directly 1249 * without a direct map. Instead we check a couple of cases 1250 * where the memory is already mapped in and, failing that, 1251 * use the same trick we use for page zeroing to create 1252 * a temporary mapping for this physical address. 1253 */ 1254 1255 if (!pmap_bootstrapped) { 1256 /* 1257 * If PMAP is not bootstrapped, we are likely to be 1258 * in real mode. 1259 */ 1260 __syncicache((void *)pa, sz); 1261 } else if (pmap == kernel_pmap) { 1262 __syncicache((void *)va, sz); 1263 } else { 1264 /* Use the scratch page to set up a temp mapping */ 1265 1266 mtx_lock(&moea64_scratchpage_mtx); 1267 1268 moea64_set_scratchpage_pa(1,pa & ~ADDR_POFF); 1269 __syncicache((void *)(moea64_scratchpage_va[1] + 1270 (va & ADDR_POFF)), sz); 1271 1272 mtx_unlock(&moea64_scratchpage_mtx); 1273 } 1274} 1275 1276/* 1277 * Maps a sequence of resident pages belonging to the same object. 1278 * The sequence begins with the given page m_start. This page is 1279 * mapped at the given virtual address start. Each subsequent page is 1280 * mapped at a virtual address that is offset from start by the same 1281 * amount as the page is offset from m_start within the object. The 1282 * last page in the sequence is the page with the largest offset from 1283 * m_start that can be mapped at a virtual address less than the given 1284 * virtual address end. Not every virtual page between start and end 1285 * is mapped; only those for which a resident page exists with the 1286 * corresponding offset from m_start are mapped. 1287 */ 1288void 1289moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1290 vm_page_t m_start, vm_prot_t prot) 1291{ 1292 vm_page_t m; 1293 vm_pindex_t diff, psize; 1294 1295 psize = atop(end - start); 1296 m = m_start; 1297 PMAP_LOCK(pm); 1298 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1299 moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1300 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1301 m = TAILQ_NEXT(m, listq); 1302 } 1303 PMAP_UNLOCK(pm); 1304} 1305 1306void 1307moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1308 vm_prot_t prot) 1309{ 1310 PMAP_LOCK(pm); 1311 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1312 FALSE); 1313 PMAP_UNLOCK(pm); 1314 1315} 1316 1317vm_paddr_t 1318moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1319{ 1320 struct pvo_entry *pvo; 1321 vm_paddr_t pa; 1322 1323 PMAP_LOCK(pm); 1324 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1325 if (pvo == NULL) 1326 pa = 0; 1327 else 1328 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1329 PMAP_UNLOCK(pm); 1330 return (pa); 1331} 1332 1333/* 1334 * Atomically extract and hold the physical page with the given 1335 * pmap and virtual address pair if that mapping permits the given 1336 * protection. 1337 */ 1338vm_page_t 1339moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1340{ 1341 struct pvo_entry *pvo; 1342 vm_page_t m; 1343 1344 m = NULL; 1345 vm_page_lock_queues(); 1346 PMAP_LOCK(pmap); 1347 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1348 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1349 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1350 (prot & VM_PROT_WRITE) == 0)) { 1351 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1352 vm_page_hold(m); 1353 } 1354 vm_page_unlock_queues(); 1355 PMAP_UNLOCK(pmap); 1356 return (m); 1357} 1358 1359static void * 1360moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1361{ 1362 /* 1363 * This entire routine is a horrible hack to avoid bothering kmem 1364 * for new KVA addresses. Because this can get called from inside 1365 * kmem allocation routines, calling kmem for a new address here 1366 * can lead to multiply locking non-recursive mutexes. 1367 */ 1368 static vm_pindex_t color; 1369 vm_offset_t va; 1370 1371 vm_page_t m; 1372 int pflags, needed_lock; 1373 1374 *flags = UMA_SLAB_PRIV; 1375 needed_lock = !PMAP_LOCKED(kernel_pmap); 1376 1377 if (needed_lock) 1378 PMAP_LOCK(kernel_pmap); 1379 1380 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1381 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1382 else 1383 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1384 if (wait & M_ZERO) 1385 pflags |= VM_ALLOC_ZERO; 1386 1387 for (;;) { 1388 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1389 if (m == NULL) { 1390 if (wait & M_NOWAIT) 1391 return (NULL); 1392 VM_WAIT; 1393 } else 1394 break; 1395 } 1396 1397 va = VM_PAGE_TO_PHYS(m); 1398 1399 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1400 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1401 PVO_WIRED | PVO_BOOTSTRAP); 1402 1403 if (needed_lock) 1404 PMAP_UNLOCK(kernel_pmap); 1405 1406 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1407 bzero((void *)va, PAGE_SIZE); 1408 1409 return (void *)va; 1410} 1411 1412void 1413moea64_init(mmu_t mmu) 1414{ 1415 1416 CTR0(KTR_PMAP, "moea64_init"); 1417 1418 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1419 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1420 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1421 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1422 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1423 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1424 1425 if (!hw_direct_map) { 1426 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1427 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1428 } 1429 1430 moea64_initialized = TRUE; 1431} 1432 1433boolean_t 1434moea64_is_modified(mmu_t mmu, vm_page_t m) 1435{ 1436 1437 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1438 return (FALSE); 1439 1440 return (moea64_query_bit(m, LPTE_CHG)); 1441} 1442 1443void 1444moea64_clear_reference(mmu_t mmu, vm_page_t m) 1445{ 1446 1447 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1448 return; 1449 moea64_clear_bit(m, LPTE_REF, NULL); 1450} 1451 1452void 1453moea64_clear_modify(mmu_t mmu, vm_page_t m) 1454{ 1455 1456 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1457 return; 1458 moea64_clear_bit(m, LPTE_CHG, NULL); 1459} 1460 1461/* 1462 * Clear the write and modified bits in each of the given page's mappings. 1463 */ 1464void 1465moea64_remove_write(mmu_t mmu, vm_page_t m) 1466{ 1467 struct pvo_entry *pvo; 1468 struct lpte *pt; 1469 pmap_t pmap; 1470 uint64_t lo; 1471 1472 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1473 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1474 (m->flags & PG_WRITEABLE) == 0) 1475 return; 1476 lo = moea64_attr_fetch(m); 1477 SYNC(); 1478 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1479 pmap = pvo->pvo_pmap; 1480 PMAP_LOCK(pmap); 1481 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1482 LOCK_TABLE(); 1483 pt = moea64_pvo_to_pte(pvo, -1); 1484 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1485 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1486 if (pt != NULL) { 1487 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1488 lo |= pvo->pvo_pte.lpte.pte_lo; 1489 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1490 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1491 pvo->pvo_pmap, PVO_VADDR(pvo)); 1492 } 1493 UNLOCK_TABLE(); 1494 } 1495 PMAP_UNLOCK(pmap); 1496 } 1497 if ((lo & LPTE_CHG) != 0) { 1498 moea64_attr_clear(m, LPTE_CHG); 1499 vm_page_dirty(m); 1500 } 1501 vm_page_flag_clear(m, PG_WRITEABLE); 1502} 1503 1504/* 1505 * moea64_ts_referenced: 1506 * 1507 * Return a count of reference bits for a page, clearing those bits. 1508 * It is not necessary for every reference bit to be cleared, but it 1509 * is necessary that 0 only be returned when there are truly no 1510 * reference bits set. 1511 * 1512 * XXX: The exact number of bits to check and clear is a matter that 1513 * should be tested and standardized at some point in the future for 1514 * optimal aging of shared pages. 1515 */ 1516boolean_t 1517moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1518{ 1519 int count; 1520 1521 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1522 return (0); 1523 1524 count = moea64_clear_bit(m, LPTE_REF, NULL); 1525 1526 return (count); 1527} 1528 1529/* 1530 * Map a wired page into kernel virtual address space. 1531 */ 1532void 1533moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1534{ 1535 uint64_t pte_lo; 1536 int error; 1537 1538#if 0 1539 if (!pmap_bootstrapped) { 1540 if (va >= VM_MIN_KERNEL_ADDRESS && va < virtual_end) 1541 panic("Trying to enter an address in KVA -- %#x!\n",pa); 1542 } 1543#endif 1544 1545 pte_lo = moea64_calc_wimg(pa); 1546 1547 PMAP_LOCK(kernel_pmap); 1548 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1549 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1550 PVO_WIRED | VM_PROT_EXECUTE); 1551 1552 if (error != 0 && error != ENOENT) 1553 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1554 pa, error); 1555 1556 /* 1557 * Flush the memory from the instruction cache. 1558 */ 1559 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1560 __syncicache((void *)va, PAGE_SIZE); 1561 } 1562 PMAP_UNLOCK(kernel_pmap); 1563} 1564 1565/* 1566 * Extract the physical page address associated with the given kernel virtual 1567 * address. 1568 */ 1569vm_offset_t 1570moea64_kextract(mmu_t mmu, vm_offset_t va) 1571{ 1572 struct pvo_entry *pvo; 1573 vm_paddr_t pa; 1574 1575 PMAP_LOCK(kernel_pmap); 1576 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1577 KASSERT(pvo != NULL, ("moea64_kextract: no addr found")); 1578 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1579 PMAP_UNLOCK(kernel_pmap); 1580 return (pa); 1581} 1582 1583/* 1584 * Remove a wired page from kernel virtual address space. 1585 */ 1586void 1587moea64_kremove(mmu_t mmu, vm_offset_t va) 1588{ 1589 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1590} 1591 1592/* 1593 * Map a range of physical addresses into kernel virtual address space. 1594 * 1595 * The value passed in *virt is a suggested virtual address for the mapping. 1596 * Architectures which can support a direct-mapped physical to virtual region 1597 * can return the appropriate address within that region, leaving '*virt' 1598 * unchanged. We cannot and therefore do not; *virt is updated with the 1599 * first usable address after the mapped region. 1600 */ 1601vm_offset_t 1602moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1603 vm_offset_t pa_end, int prot) 1604{ 1605 vm_offset_t sva, va; 1606 1607 sva = *virt; 1608 va = sva; 1609 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1610 moea64_kenter(mmu, va, pa_start); 1611 *virt = va; 1612 1613 return (sva); 1614} 1615 1616/* 1617 * Returns true if the pmap's pv is one of the first 1618 * 16 pvs linked to from this page. This count may 1619 * be changed upwards or downwards in the future; it 1620 * is only necessary that true be returned for a small 1621 * subset of pmaps for proper page aging. 1622 */ 1623boolean_t 1624moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1625{ 1626 int loops; 1627 struct pvo_entry *pvo; 1628 1629 if (!moea64_initialized || (m->flags & PG_FICTITIOUS)) 1630 return FALSE; 1631 1632 loops = 0; 1633 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1634 if (pvo->pvo_pmap == pmap) 1635 return (TRUE); 1636 if (++loops >= 16) 1637 break; 1638 } 1639 1640 return (FALSE); 1641} 1642 1643/* 1644 * Return the number of managed mappings to the given physical page 1645 * that are wired. 1646 */ 1647int 1648moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1649{ 1650 struct pvo_entry *pvo; 1651 int count; 1652 1653 count = 0; 1654 if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0) 1655 return (count); 1656 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1657 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1658 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1659 count++; 1660 return (count); 1661} 1662 1663static u_int moea64_vsidcontext; 1664 1665void 1666moea64_pinit(mmu_t mmu, pmap_t pmap) 1667{ 1668 int i, mask; 1669 u_int entropy; 1670 1671 PMAP_LOCK_INIT(pmap); 1672 1673 entropy = 0; 1674 __asm __volatile("mftb %0" : "=r"(entropy)); 1675 1676 if (pmap_bootstrapped) 1677 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap); 1678 else 1679 pmap->pmap_phys = pmap; 1680 1681 /* 1682 * Allocate some segment registers for this pmap. 1683 */ 1684 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1685 u_int hash, n; 1686 1687 /* 1688 * Create a new value by mutiplying by a prime and adding in 1689 * entropy from the timebase register. This is to make the 1690 * VSID more random so that the PT hash function collides 1691 * less often. (Note that the prime casues gcc to do shifts 1692 * instead of a multiply.) 1693 */ 1694 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1695 hash = moea64_vsidcontext & (NPMAPS - 1); 1696 if (hash == 0) /* 0 is special, avoid it */ 1697 continue; 1698 n = hash >> 5; 1699 mask = 1 << (hash & (VSID_NBPW - 1)); 1700 hash = (moea64_vsidcontext & 0xfffff); 1701 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1702 /* anything free in this bucket? */ 1703 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1704 entropy = (moea64_vsidcontext >> 20); 1705 continue; 1706 } 1707 i = ffs(~moea64_vsid_bitmap[i]) - 1; 1708 mask = 1 << i; 1709 hash &= 0xfffff & ~(VSID_NBPW - 1); 1710 hash |= i; 1711 } 1712 moea64_vsid_bitmap[n] |= mask; 1713 for (i = 0; i < 16; i++) { 1714 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1715 } 1716 return; 1717 } 1718 1719 panic("moea64_pinit: out of segments"); 1720} 1721 1722/* 1723 * Initialize the pmap associated with process 0. 1724 */ 1725void 1726moea64_pinit0(mmu_t mmu, pmap_t pm) 1727{ 1728 moea64_pinit(mmu, pm); 1729 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1730} 1731 1732/* 1733 * Set the physical protection on the specified range of this map as requested. 1734 */ 1735void 1736moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1737 vm_prot_t prot) 1738{ 1739 struct pvo_entry *pvo; 1740 struct lpte *pt; 1741 int pteidx; 1742 1743 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1744 eva, prot); 1745 1746 1747 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1748 ("moea64_protect: non current pmap")); 1749 1750 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1751 moea64_remove(mmu, pm, sva, eva); 1752 return; 1753 } 1754 1755 vm_page_lock_queues(); 1756 PMAP_LOCK(pm); 1757 for (; sva < eva; sva += PAGE_SIZE) { 1758 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1759 if (pvo == NULL) 1760 continue; 1761 1762 /* 1763 * Grab the PTE pointer before we diddle with the cached PTE 1764 * copy. 1765 */ 1766 LOCK_TABLE(); 1767 pt = moea64_pvo_to_pte(pvo, pteidx); 1768 1769 /* 1770 * Change the protection of the page. 1771 */ 1772 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1773 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1774 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1775 if ((prot & VM_PROT_EXECUTE) == 0) 1776 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1777 1778 /* 1779 * If the PVO is in the page table, update that pte as well. 1780 */ 1781 if (pt != NULL) { 1782 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1783 pvo->pvo_pmap, PVO_VADDR(pvo)); 1784 if ((pvo->pvo_pte.lpte.pte_lo & 1785 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1786 moea64_syncicache(pm, sva, 1787 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1788 PAGE_SIZE); 1789 } 1790 } 1791 UNLOCK_TABLE(); 1792 } 1793 vm_page_unlock_queues(); 1794 PMAP_UNLOCK(pm); 1795} 1796 1797/* 1798 * Map a list of wired pages into kernel virtual address space. This is 1799 * intended for temporary mappings which do not need page modification or 1800 * references recorded. Existing mappings in the region are overwritten. 1801 */ 1802void 1803moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1804{ 1805 while (count-- > 0) { 1806 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1807 va += PAGE_SIZE; 1808 m++; 1809 } 1810} 1811 1812/* 1813 * Remove page mappings from kernel virtual address space. Intended for 1814 * temporary mappings entered by moea64_qenter. 1815 */ 1816void 1817moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1818{ 1819 while (count-- > 0) { 1820 moea64_kremove(mmu, va); 1821 va += PAGE_SIZE; 1822 } 1823} 1824 1825void 1826moea64_release(mmu_t mmu, pmap_t pmap) 1827{ 1828 int idx, mask; 1829 1830 /* 1831 * Free segment register's VSID 1832 */ 1833 if (pmap->pm_sr[0] == 0) 1834 panic("moea64_release"); 1835 1836 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1837 mask = 1 << (idx % VSID_NBPW); 1838 idx /= VSID_NBPW; 1839 moea64_vsid_bitmap[idx] &= ~mask; 1840 PMAP_LOCK_DESTROY(pmap); 1841} 1842 1843/* 1844 * Remove the given range of addresses from the specified map. 1845 */ 1846void 1847moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1848{ 1849 struct pvo_entry *pvo; 1850 int pteidx; 1851 1852 vm_page_lock_queues(); 1853 PMAP_LOCK(pm); 1854 for (; sva < eva; sva += PAGE_SIZE) { 1855 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1856 if (pvo != NULL) { 1857 moea64_pvo_remove(pvo, pteidx); 1858 } 1859 } 1860 vm_page_unlock_queues(); 1861 PMAP_UNLOCK(pm); 1862} 1863 1864/* 1865 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 1866 * will reflect changes in pte's back to the vm_page. 1867 */ 1868void 1869moea64_remove_all(mmu_t mmu, vm_page_t m) 1870{ 1871 struct pvo_head *pvo_head; 1872 struct pvo_entry *pvo, *next_pvo; 1873 pmap_t pmap; 1874 1875 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1876 1877 pvo_head = vm_page_to_pvoh(m); 1878 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1879 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1880 1881 MOEA_PVO_CHECK(pvo); /* sanity check */ 1882 pmap = pvo->pvo_pmap; 1883 PMAP_LOCK(pmap); 1884 moea64_pvo_remove(pvo, -1); 1885 PMAP_UNLOCK(pmap); 1886 } 1887 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 1888 moea64_attr_clear(m, LPTE_CHG); 1889 vm_page_dirty(m); 1890 } 1891 vm_page_flag_clear(m, PG_WRITEABLE); 1892} 1893 1894/* 1895 * Allocate a physical page of memory directly from the phys_avail map. 1896 * Can only be called from moea64_bootstrap before avail start and end are 1897 * calculated. 1898 */ 1899static vm_offset_t 1900moea64_bootstrap_alloc(vm_size_t size, u_int align) 1901{ 1902 vm_offset_t s, e; 1903 int i, j; 1904 1905 size = round_page(size); 1906 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1907 if (align != 0) 1908 s = (phys_avail[i] + align - 1) & ~(align - 1); 1909 else 1910 s = phys_avail[i]; 1911 e = s + size; 1912 1913 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1914 continue; 1915 1916 if (s == phys_avail[i]) { 1917 phys_avail[i] += size; 1918 } else if (e == phys_avail[i + 1]) { 1919 phys_avail[i + 1] -= size; 1920 } else { 1921 for (j = phys_avail_count * 2; j > i; j -= 2) { 1922 phys_avail[j] = phys_avail[j - 2]; 1923 phys_avail[j + 1] = phys_avail[j - 1]; 1924 } 1925 1926 phys_avail[i + 3] = phys_avail[i + 1]; 1927 phys_avail[i + 1] = s; 1928 phys_avail[i + 2] = e; 1929 phys_avail_count++; 1930 } 1931 1932 return (s); 1933 } 1934 panic("moea64_bootstrap_alloc: could not allocate memory"); 1935} 1936 1937static void 1938tlbia(void) 1939{ 1940 vm_offset_t i; 1941 register_t msr, scratch; 1942 1943 for (i = 0; i < 0xFF000; i += 0x00001000) { 1944 __asm __volatile("\ 1945 mfmsr %0; \ 1946 mr %1, %0; \ 1947 insrdi %1,%3,1,0; \ 1948 mtmsrd %1; \ 1949 ptesync; \ 1950 \ 1951 tlbiel %2; \ 1952 \ 1953 mtmsrd %0; \ 1954 eieio; \ 1955 tlbsync; \ 1956 ptesync;" 1957 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 1958 } 1959} 1960 1961static int 1962moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1963 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 1964{ 1965 struct pvo_entry *pvo; 1966 uint64_t vsid; 1967 int first; 1968 u_int ptegidx; 1969 int i; 1970 int bootstrap; 1971 1972 /* 1973 * One nasty thing that can happen here is that the UMA calls to 1974 * allocate new PVOs need to map more memory, which calls pvo_enter(), 1975 * which calls UMA... 1976 * 1977 * We break the loop by detecting recursion and allocating out of 1978 * the bootstrap pool. 1979 */ 1980 1981 moea64_pvo_enter_calls++; 1982 first = 0; 1983 bootstrap = (flags & PVO_BOOTSTRAP); 1984 1985 if (!moea64_initialized) 1986 bootstrap = 1; 1987 1988 /* 1989 * Compute the PTE Group index. 1990 */ 1991 va &= ~ADDR_POFF; 1992 vsid = va_to_vsid(pm, va); 1993 ptegidx = va_to_pteg(vsid, va); 1994 1995 /* 1996 * Remove any existing mapping for this page. Reuse the pvo entry if 1997 * there is a mapping. 1998 */ 1999 LOCK_TABLE(); 2000 2001 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2002 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2003 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2004 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2005 (pte_lo & LPTE_PP)) { 2006 UNLOCK_TABLE(); 2007 return (0); 2008 } 2009 moea64_pvo_remove(pvo, -1); 2010 break; 2011 } 2012 } 2013 2014 /* 2015 * If we aren't overwriting a mapping, try to allocate. 2016 */ 2017 if (bootstrap) { 2018 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2019 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2020 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2021 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2022 } 2023 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2024 moea64_bpvo_pool_index++; 2025 bootstrap = 1; 2026 } else { 2027 /* 2028 * Note: drop the table around the UMA allocation in 2029 * case the UMA allocator needs to manipulate the page 2030 * table. The mapping we are working with is already 2031 * protected by the PMAP lock. 2032 */ 2033 UNLOCK_TABLE(); 2034 pvo = uma_zalloc(zone, M_NOWAIT); 2035 LOCK_TABLE(); 2036 } 2037 2038 if (pvo == NULL) { 2039 UNLOCK_TABLE(); 2040 return (ENOMEM); 2041 } 2042 2043 moea64_pvo_entries++; 2044 pvo->pvo_vaddr = va; 2045 pvo->pvo_pmap = pm; 2046 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2047 pvo->pvo_vaddr &= ~ADDR_POFF; 2048 2049 if (!(flags & VM_PROT_EXECUTE)) 2050 pte_lo |= LPTE_NOEXEC; 2051 if (flags & PVO_WIRED) 2052 pvo->pvo_vaddr |= PVO_WIRED; 2053 if (pvo_head != &moea64_pvo_kunmanaged) 2054 pvo->pvo_vaddr |= PVO_MANAGED; 2055 if (bootstrap) 2056 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2057 if (flags & PVO_FAKE) 2058 pvo->pvo_vaddr |= PVO_FAKE; 2059 2060 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2061 (uint64_t)(pa) | pte_lo); 2062 2063 /* 2064 * Remember if the list was empty and therefore will be the first 2065 * item. 2066 */ 2067 if (LIST_FIRST(pvo_head) == NULL) 2068 first = 1; 2069 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2070 2071 if (pvo->pvo_vaddr & PVO_WIRED) 2072 pm->pm_stats.wired_count++; 2073 pm->pm_stats.resident_count++; 2074 2075 /* 2076 * We hope this succeeds but it isn't required. 2077 */ 2078 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2079 if (i >= 0) { 2080 PVO_PTEGIDX_SET(pvo, i); 2081 } else { 2082 panic("moea64_pvo_enter: overflow"); 2083 moea64_pte_overflow++; 2084 } 2085 2086 if (pm == kernel_pmap) 2087 isync(); 2088 2089 UNLOCK_TABLE(); 2090 2091 return (first ? ENOENT : 0); 2092} 2093 2094static void 2095moea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2096{ 2097 struct lpte *pt; 2098 2099 /* 2100 * If there is an active pte entry, we need to deactivate it (and 2101 * save the ref & cfg bits). 2102 */ 2103 LOCK_TABLE(); 2104 pt = moea64_pvo_to_pte(pvo, pteidx); 2105 if (pt != NULL) { 2106 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap, 2107 PVO_VADDR(pvo)); 2108 PVO_PTEGIDX_CLR(pvo); 2109 } else { 2110 moea64_pte_overflow--; 2111 } 2112 UNLOCK_TABLE(); 2113 2114 /* 2115 * Update our statistics. 2116 */ 2117 pvo->pvo_pmap->pm_stats.resident_count--; 2118 if (pvo->pvo_vaddr & PVO_WIRED) 2119 pvo->pvo_pmap->pm_stats.wired_count--; 2120 2121 /* 2122 * Save the REF/CHG bits into their cache if the page is managed. 2123 */ 2124 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2125 struct vm_page *pg; 2126 2127 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2128 if (pg != NULL) { 2129 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2130 (LPTE_REF | LPTE_CHG)); 2131 } 2132 } 2133 2134 /* 2135 * Remove this PVO from the PV list. 2136 */ 2137 LIST_REMOVE(pvo, pvo_vlink); 2138 2139 /* 2140 * Remove this from the overflow list and return it to the pool 2141 * if we aren't going to reuse it. 2142 */ 2143 LIST_REMOVE(pvo, pvo_olink); 2144 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2145 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2146 moea64_upvo_zone, pvo); 2147 moea64_pvo_entries--; 2148 moea64_pvo_remove_calls++; 2149} 2150 2151static __inline int 2152moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2153{ 2154 int pteidx; 2155 2156 /* 2157 * We can find the actual pte entry without searching by grabbing 2158 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2159 * noticing the HID bit. 2160 */ 2161 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2162 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2163 pteidx ^= moea64_pteg_mask * 8; 2164 2165 return (pteidx); 2166} 2167 2168static struct pvo_entry * 2169moea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2170{ 2171 struct pvo_entry *pvo; 2172 int ptegidx; 2173 uint64_t vsid; 2174 2175 va &= ~ADDR_POFF; 2176 vsid = va_to_vsid(pm, va); 2177 ptegidx = va_to_pteg(vsid, va); 2178 2179 LOCK_TABLE(); 2180 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2181 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2182 if (pteidx_p) 2183 *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx); 2184 break; 2185 } 2186 } 2187 UNLOCK_TABLE(); 2188 2189 return (pvo); 2190} 2191 2192static struct lpte * 2193moea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2194{ 2195 struct lpte *pt; 2196 2197 /* 2198 * If we haven't been supplied the ptegidx, calculate it. 2199 */ 2200 if (pteidx == -1) { 2201 int ptegidx; 2202 uint64_t vsid; 2203 2204 vsid = va_to_vsid(pvo->pvo_pmap, PVO_VADDR(pvo)); 2205 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo)); 2206 pteidx = moea64_pvo_pte_index(pvo, ptegidx); 2207 } 2208 2209 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2210 2211 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2212 !PVO_PTEGIDX_ISSET(pvo)) { 2213 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2214 "valid pte index", pvo); 2215 } 2216 2217 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2218 PVO_PTEGIDX_ISSET(pvo)) { 2219 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2220 "pvo but no valid pte", pvo); 2221 } 2222 2223 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2224 LPTE_VALID) { 2225 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2226 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2227 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2228 } 2229 2230 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2231 ~(LPTE_CHG|LPTE_REF)) != 0) { 2232 panic("moea64_pvo_to_pte: pvo %p pte does not match " 2233 "pte %p in moea64_pteg_table difference is %#x", 2234 pvo, pt, 2235 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2236 } 2237 2238 ASSERT_TABLE_LOCK(); 2239 return (pt); 2240 } 2241 2242 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2243 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2244 "moea64_pteg_table but valid in pvo", pvo, pt); 2245 } 2246 2247 return (NULL); 2248} 2249 2250static int 2251moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2252{ 2253 struct lpte *pt; 2254 int i; 2255 2256 ASSERT_TABLE_LOCK(); 2257 2258 /* 2259 * First try primary hash. 2260 */ 2261 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2262 if ((pt->pte_hi & LPTE_VALID) == 0) { 2263 pvo_pt->pte_hi &= ~LPTE_HID; 2264 moea64_pte_set(pt, pvo_pt); 2265 return (i); 2266 } 2267 } 2268 2269 /* 2270 * Now try secondary hash. 2271 */ 2272 ptegidx ^= moea64_pteg_mask; 2273 2274 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2275 if ((pt->pte_hi & LPTE_VALID) == 0) { 2276 pvo_pt->pte_hi |= LPTE_HID; 2277 moea64_pte_set(pt, pvo_pt); 2278 return (i); 2279 } 2280 } 2281 2282 panic("moea64_pte_insert: overflow"); 2283 return (-1); 2284} 2285 2286static boolean_t 2287moea64_query_bit(vm_page_t m, u_int64_t ptebit) 2288{ 2289 struct pvo_entry *pvo; 2290 struct lpte *pt; 2291 2292 if (moea64_attr_fetch(m) & ptebit) 2293 return (TRUE); 2294 2295 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2296 MOEA_PVO_CHECK(pvo); /* sanity check */ 2297 2298 /* 2299 * See if we saved the bit off. If so, cache it and return 2300 * success. 2301 */ 2302 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2303 moea64_attr_save(m, ptebit); 2304 MOEA_PVO_CHECK(pvo); /* sanity check */ 2305 return (TRUE); 2306 } 2307 } 2308 2309 /* 2310 * No luck, now go through the hard part of looking at the PTEs 2311 * themselves. Sync so that any pending REF/CHG bits are flushed to 2312 * the PTEs. 2313 */ 2314 SYNC(); 2315 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2316 MOEA_PVO_CHECK(pvo); /* sanity check */ 2317 2318 /* 2319 * See if this pvo has a valid PTE. if so, fetch the 2320 * REF/CHG bits from the valid PTE. If the appropriate 2321 * ptebit is set, cache it and return success. 2322 */ 2323 LOCK_TABLE(); 2324 pt = moea64_pvo_to_pte(pvo, -1); 2325 if (pt != NULL) { 2326 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2327 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2328 UNLOCK_TABLE(); 2329 2330 moea64_attr_save(m, ptebit); 2331 MOEA_PVO_CHECK(pvo); /* sanity check */ 2332 return (TRUE); 2333 } 2334 } 2335 UNLOCK_TABLE(); 2336 } 2337 2338 return (FALSE); 2339} 2340 2341static u_int 2342moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit) 2343{ 2344 u_int count; 2345 struct pvo_entry *pvo; 2346 struct lpte *pt; 2347 uint64_t rv; 2348 2349 /* 2350 * Clear the cached value. 2351 */ 2352 rv = moea64_attr_fetch(m); 2353 moea64_attr_clear(m, ptebit); 2354 2355 /* 2356 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2357 * we can reset the right ones). note that since the pvo entries and 2358 * list heads are accessed via BAT0 and are never placed in the page 2359 * table, we don't have to worry about further accesses setting the 2360 * REF/CHG bits. 2361 */ 2362 SYNC(); 2363 2364 /* 2365 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2366 * valid pte clear the ptebit from the valid pte. 2367 */ 2368 count = 0; 2369 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2370 MOEA_PVO_CHECK(pvo); /* sanity check */ 2371 2372 LOCK_TABLE(); 2373 pt = moea64_pvo_to_pte(pvo, -1); 2374 if (pt != NULL) { 2375 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2376 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2377 count++; 2378 moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit); 2379 } 2380 } 2381 UNLOCK_TABLE(); 2382 rv |= pvo->pvo_pte.lpte.pte_lo; 2383 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2384 MOEA_PVO_CHECK(pvo); /* sanity check */ 2385 } 2386 2387 if (origbit != NULL) { 2388 *origbit = rv; 2389 } 2390 2391 return (count); 2392} 2393 2394boolean_t 2395moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2396{ 2397 return (EFAULT); 2398} 2399 2400/* 2401 * Map a set of physical memory pages into the kernel virtual 2402 * address space. Return a pointer to where it is mapped. This 2403 * routine is intended to be used for mapping device memory, 2404 * NOT real memory. 2405 */ 2406void * 2407moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2408{ 2409 vm_offset_t va, tmpva, ppa, offset; 2410 2411 ppa = trunc_page(pa); 2412 offset = pa & PAGE_MASK; 2413 size = roundup(offset + size, PAGE_SIZE); 2414 2415 va = kmem_alloc_nofault(kernel_map, size); 2416 2417 if (!va) 2418 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2419 2420 for (tmpva = va; size > 0;) { 2421 moea64_kenter(mmu, tmpva, ppa); 2422 size -= PAGE_SIZE; 2423 tmpva += PAGE_SIZE; 2424 ppa += PAGE_SIZE; 2425 } 2426 2427 return ((void *)(va + offset)); 2428} 2429 2430void 2431moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2432{ 2433 vm_offset_t base, offset; 2434 2435 base = trunc_page(va); 2436 offset = va & PAGE_MASK; 2437 size = roundup(offset + size, PAGE_SIZE); 2438 2439 kmem_free(kernel_map, base, size); 2440} 2441 2442static void 2443moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2444{ 2445 struct pvo_entry *pvo; 2446 vm_offset_t lim; 2447 vm_paddr_t pa; 2448 vm_size_t len; 2449 2450 PMAP_LOCK(pm); 2451 while (sz > 0) { 2452 lim = round_page(va); 2453 len = MIN(lim - va, sz); 2454 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2455 if (pvo != NULL) { 2456 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2457 (va & ADDR_POFF); 2458 moea64_syncicache(pm, va, pa, len); 2459 } 2460 va += len; 2461 sz -= len; 2462 } 2463 PMAP_UNLOCK(pm); 2464} 2465