mmu_oea64.c revision 198400
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 198400 2009-10-23 14:27:40Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <sys/kdb.h> 131 132#include <dev/ofw/openfirm.h> 133 134#include <vm/vm.h> 135#include <vm/vm_param.h> 136#include <vm/vm_kern.h> 137#include <vm/vm_page.h> 138#include <vm/vm_map.h> 139#include <vm/vm_object.h> 140#include <vm/vm_extern.h> 141#include <vm/vm_pageout.h> 142#include <vm/vm_pager.h> 143#include <vm/uma.h> 144 145#include <machine/cpu.h> 146#include <machine/platform.h> 147#include <machine/frame.h> 148#include <machine/md_var.h> 149#include <machine/psl.h> 150#include <machine/bat.h> 151#include <machine/pte.h> 152#include <machine/sr.h> 153#include <machine/trap.h> 154#include <machine/mmuvar.h> 155 156#include "mmu_if.h" 157 158#define MOEA_DEBUG 159 160#define TODO panic("%s: not implemented", __func__); 161 162static __inline u_int32_t 163cntlzw(volatile u_int32_t a) { 164 u_int32_t b; 165 __asm ("cntlzw %0, %1" : "=r"(b) : "r"(a)); 166 return b; 167} 168 169static __inline uint64_t 170va_to_vsid(pmap_t pm, vm_offset_t va) 171{ 172 return ((pm->pm_sr[(uintptr_t)va >> ADDR_SR_SHFT]) & SR_VSID_MASK); 173} 174 175#define TLBSYNC() __asm __volatile("tlbsync; ptesync"); 176#define SYNC() __asm __volatile("sync"); 177#define EIEIO() __asm __volatile("eieio"); 178 179/* 180 * The tlbie instruction must be executed in 64-bit mode 181 * so we have to twiddle MSR[SF] around every invocation. 182 * Just to add to the fun, exceptions must be off as well 183 * so that we can't trap in 64-bit mode. What a pain. 184 */ 185struct mtx tlbie_mutex; 186 187static __inline void 188TLBIE(pmap_t pmap, vm_offset_t va) { 189 uint64_t vpn; 190 register_t vpn_hi, vpn_lo; 191 register_t msr; 192 register_t scratch; 193 194 vpn = (uint64_t)(va & ADDR_PIDX); 195 if (pmap != NULL) 196 vpn |= (va_to_vsid(pmap,va) << 28); 197 198 vpn_hi = (uint32_t)(vpn >> 32); 199 vpn_lo = (uint32_t)vpn; 200 201 mtx_lock_spin(&tlbie_mutex); 202 __asm __volatile("\ 203 mfmsr %0; \ 204 clrldi %1,%0,49; \ 205 mtmsr %1; \ 206 insrdi %1,%5,1,0; \ 207 mtmsrd %1; \ 208 ptesync; \ 209 \ 210 sld %1,%2,%4; \ 211 or %1,%1,%3; \ 212 tlbie %1; \ 213 \ 214 mtmsrd %0; \ 215 eieio; \ 216 tlbsync; \ 217 ptesync;" 218 : "=r"(msr), "=r"(scratch) : "r"(vpn_hi), "r"(vpn_lo), "r"(32), "r"(1)); 219 mtx_unlock_spin(&tlbie_mutex); 220} 221 222#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 223#define ENABLE_TRANS(msr) mtmsr(msr); isync() 224 225#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 226#define VSID_TO_SR(vsid) ((vsid) & 0xf) 227#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 228 229#define PVO_PTEGIDX_MASK 0x007 /* which PTEG slot */ 230#define PVO_PTEGIDX_VALID 0x008 /* slot is valid */ 231#define PVO_WIRED 0x010 /* PVO entry is wired */ 232#define PVO_MANAGED 0x020 /* PVO entry is managed */ 233#define PVO_BOOTSTRAP 0x080 /* PVO entry allocated during 234 bootstrap */ 235#define PVO_FAKE 0x100 /* fictitious phys page */ 236#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 237#define PVO_ISFAKE(pvo) ((pvo)->pvo_vaddr & PVO_FAKE) 238#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 239#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 240#define PVO_PTEGIDX_CLR(pvo) \ 241 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 242#define PVO_PTEGIDX_SET(pvo, i) \ 243 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 244 245#define MOEA_PVO_CHECK(pvo) 246 247#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 248#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 249#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 250 251struct ofw_map { 252 vm_offset_t om_va; 253 vm_size_t om_len; 254 vm_offset_t om_pa_hi; 255 vm_offset_t om_pa_lo; 256 u_int om_mode; 257}; 258 259/* 260 * Map of physical memory regions. 261 */ 262static struct mem_region *regions; 263static struct mem_region *pregions; 264extern u_int phys_avail_count; 265extern int regions_sz, pregions_sz; 266extern int ofw_real_mode; 267static struct ofw_map translations[64]; 268 269extern struct pmap ofw_pmap; 270 271extern void bs_remap_earlyboot(void); 272 273 274/* 275 * Lock for the pteg and pvo tables. 276 */ 277struct mtx moea64_table_mutex; 278 279/* 280 * PTEG data. 281 */ 282static struct lpteg *moea64_pteg_table; 283u_int moea64_pteg_count; 284u_int moea64_pteg_mask; 285 286/* 287 * PVO data. 288 */ 289struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 290/* lists of unmanaged pages */ 291struct pvo_head moea64_pvo_kunmanaged = 292 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 293struct pvo_head moea64_pvo_unmanaged = 294 LIST_HEAD_INITIALIZER(moea64_pvo_unmanaged); 295 296uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 297uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 298 299vm_offset_t pvo_allocator_start; 300vm_offset_t pvo_allocator_end; 301 302#define BPVO_POOL_SIZE 327680 303static struct pvo_entry *moea64_bpvo_pool; 304static int moea64_bpvo_pool_index = 0; 305 306#define VSID_NBPW (sizeof(u_int32_t) * 8) 307static u_int moea64_vsid_bitmap[NPMAPS / VSID_NBPW]; 308 309static boolean_t moea64_initialized = FALSE; 310 311/* 312 * Statistics. 313 */ 314u_int moea64_pte_valid = 0; 315u_int moea64_pte_overflow = 0; 316u_int moea64_pvo_entries = 0; 317u_int moea64_pvo_enter_calls = 0; 318u_int moea64_pvo_remove_calls = 0; 319SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 320 &moea64_pte_valid, 0, ""); 321SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 322 &moea64_pte_overflow, 0, ""); 323SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 324 &moea64_pvo_entries, 0, ""); 325SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 326 &moea64_pvo_enter_calls, 0, ""); 327SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 328 &moea64_pvo_remove_calls, 0, ""); 329 330vm_offset_t moea64_scratchpage_va[2]; 331struct pvo_entry *moea64_scratchpage_pvo[2]; 332struct lpte *moea64_scratchpage_pte[2]; 333struct mtx moea64_scratchpage_mtx; 334 335/* 336 * Allocate physical memory for use in moea64_bootstrap. 337 */ 338static vm_offset_t moea64_bootstrap_alloc(vm_size_t, u_int); 339 340/* 341 * PTE calls. 342 */ 343static int moea64_pte_insert(u_int, struct lpte *); 344 345/* 346 * PVO calls. 347 */ 348static int moea64_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 349 vm_offset_t, vm_offset_t, uint64_t, int); 350static void moea64_pvo_remove(struct pvo_entry *, int); 351static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t, int *); 352static struct lpte *moea64_pvo_to_pte(const struct pvo_entry *, int); 353 354/* 355 * Utility routines. 356 */ 357static void moea64_bridge_bootstrap(mmu_t mmup, 358 vm_offset_t kernelstart, vm_offset_t kernelend); 359static void moea64_bridge_cpu_bootstrap(mmu_t, int ap); 360static void moea64_enter_locked(pmap_t, vm_offset_t, vm_page_t, 361 vm_prot_t, boolean_t); 362static boolean_t moea64_query_bit(vm_page_t, u_int64_t); 363static u_int moea64_clear_bit(vm_page_t, u_int64_t, u_int64_t *); 364static void moea64_kremove(mmu_t, vm_offset_t); 365static void moea64_syncicache(pmap_t pmap, vm_offset_t va, 366 vm_offset_t pa, vm_size_t sz); 367static void tlbia(void); 368 369/* 370 * Kernel MMU interface 371 */ 372void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 373void moea64_clear_modify(mmu_t, vm_page_t); 374void moea64_clear_reference(mmu_t, vm_page_t); 375void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 376void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 377void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 378 vm_prot_t); 379void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 380vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 381vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 382void moea64_init(mmu_t); 383boolean_t moea64_is_modified(mmu_t, vm_page_t); 384boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 385vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 386boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 387int moea64_page_wired_mappings(mmu_t, vm_page_t); 388void moea64_pinit(mmu_t, pmap_t); 389void moea64_pinit0(mmu_t, pmap_t); 390void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 391void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 392void moea64_qremove(mmu_t, vm_offset_t, int); 393void moea64_release(mmu_t, pmap_t); 394void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 395void moea64_remove_all(mmu_t, vm_page_t); 396void moea64_remove_write(mmu_t, vm_page_t); 397void moea64_zero_page(mmu_t, vm_page_t); 398void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 399void moea64_zero_page_idle(mmu_t, vm_page_t); 400void moea64_activate(mmu_t, struct thread *); 401void moea64_deactivate(mmu_t, struct thread *); 402void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 403void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 404vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 405void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 406boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 407static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 408 409static mmu_method_t moea64_bridge_methods[] = { 410 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 411 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 412 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 413 MMUMETHOD(mmu_copy_page, moea64_copy_page), 414 MMUMETHOD(mmu_enter, moea64_enter), 415 MMUMETHOD(mmu_enter_object, moea64_enter_object), 416 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 417 MMUMETHOD(mmu_extract, moea64_extract), 418 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 419 MMUMETHOD(mmu_init, moea64_init), 420 MMUMETHOD(mmu_is_modified, moea64_is_modified), 421 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 422 MMUMETHOD(mmu_map, moea64_map), 423 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 424 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 425 MMUMETHOD(mmu_pinit, moea64_pinit), 426 MMUMETHOD(mmu_pinit0, moea64_pinit0), 427 MMUMETHOD(mmu_protect, moea64_protect), 428 MMUMETHOD(mmu_qenter, moea64_qenter), 429 MMUMETHOD(mmu_qremove, moea64_qremove), 430 MMUMETHOD(mmu_release, moea64_release), 431 MMUMETHOD(mmu_remove, moea64_remove), 432 MMUMETHOD(mmu_remove_all, moea64_remove_all), 433 MMUMETHOD(mmu_remove_write, moea64_remove_write), 434 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 435 MMUMETHOD(mmu_zero_page, moea64_zero_page), 436 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 437 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 438 MMUMETHOD(mmu_activate, moea64_activate), 439 MMUMETHOD(mmu_deactivate, moea64_deactivate), 440 441 /* Internal interfaces */ 442 MMUMETHOD(mmu_bootstrap, moea64_bridge_bootstrap), 443 MMUMETHOD(mmu_cpu_bootstrap, moea64_bridge_cpu_bootstrap), 444 MMUMETHOD(mmu_mapdev, moea64_mapdev), 445 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 446 MMUMETHOD(mmu_kextract, moea64_kextract), 447 MMUMETHOD(mmu_kenter, moea64_kenter), 448 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 449 450 { 0, 0 } 451}; 452 453static mmu_def_t oea64_bridge_mmu = { 454 MMU_TYPE_G5, 455 moea64_bridge_methods, 456 0 457}; 458MMU_DEF(oea64_bridge_mmu); 459 460static __inline u_int 461va_to_pteg(uint64_t vsid, vm_offset_t addr) 462{ 463 u_int hash; 464 465 hash = vsid ^ (((uint64_t)addr & ADDR_PIDX) >> 466 ADDR_PIDX_SHFT); 467 return (hash & moea64_pteg_mask); 468} 469 470static __inline struct pvo_head * 471pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 472{ 473 struct vm_page *pg; 474 475 pg = PHYS_TO_VM_PAGE(pa); 476 477 if (pg_p != NULL) 478 *pg_p = pg; 479 480 if (pg == NULL) 481 return (&moea64_pvo_unmanaged); 482 483 return (&pg->md.mdpg_pvoh); 484} 485 486static __inline struct pvo_head * 487vm_page_to_pvoh(vm_page_t m) 488{ 489 490 return (&m->md.mdpg_pvoh); 491} 492 493static __inline void 494moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 495{ 496 497 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 498 m->md.mdpg_attrs &= ~ptebit; 499} 500 501static __inline u_int64_t 502moea64_attr_fetch(vm_page_t m) 503{ 504 505 return (m->md.mdpg_attrs); 506} 507 508static __inline void 509moea64_attr_save(vm_page_t m, u_int64_t ptebit) 510{ 511 512 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 513 m->md.mdpg_attrs |= ptebit; 514} 515 516static __inline int 517moea64_pte_compare(const struct lpte *pt, const struct lpte *pvo_pt) 518{ 519 if (pt->pte_hi == pvo_pt->pte_hi) 520 return (1); 521 522 return (0); 523} 524 525static __inline int 526moea64_pte_match(struct lpte *pt, uint64_t vsid, vm_offset_t va, int which) 527{ 528 return (pt->pte_hi & ~LPTE_VALID) == 529 ((vsid << LPTE_VSID_SHIFT) | 530 ((uint64_t)(va >> ADDR_API_SHFT64) & LPTE_API) | which); 531} 532 533static __inline void 534moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 535 uint64_t pte_lo) 536{ 537 ASSERT_TABLE_LOCK(); 538 539 /* 540 * Construct a PTE. Default to IMB initially. Valid bit only gets 541 * set when the real pte is set in memory. 542 * 543 * Note: Don't set the valid bit for correct operation of tlb update. 544 */ 545 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 546 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 547 548 pt->pte_lo = pte_lo; 549} 550 551static __inline void 552moea64_pte_synch(struct lpte *pt, struct lpte *pvo_pt) 553{ 554 555 ASSERT_TABLE_LOCK(); 556 557 pvo_pt->pte_lo |= pt->pte_lo & (LPTE_REF | LPTE_CHG); 558} 559 560static __inline void 561moea64_pte_clear(struct lpte *pt, pmap_t pmap, vm_offset_t va, u_int64_t ptebit) 562{ 563 ASSERT_TABLE_LOCK(); 564 565 /* 566 * As shown in Section 7.6.3.2.3 567 */ 568 pt->pte_lo &= ~ptebit; 569 TLBIE(pmap,va); 570} 571 572static __inline void 573moea64_pte_set(struct lpte *pt, struct lpte *pvo_pt) 574{ 575 576 ASSERT_TABLE_LOCK(); 577 pvo_pt->pte_hi |= LPTE_VALID; 578 579 /* 580 * Update the PTE as defined in section 7.6.3.1. 581 * Note that the REF/CHG bits are from pvo_pt and thus should have 582 * been saved so this routine can restore them (if desired). 583 */ 584 pt->pte_lo = pvo_pt->pte_lo; 585 EIEIO(); 586 pt->pte_hi = pvo_pt->pte_hi; 587 SYNC(); 588 moea64_pte_valid++; 589} 590 591static __inline void 592moea64_pte_unset(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 593{ 594 ASSERT_TABLE_LOCK(); 595 pvo_pt->pte_hi &= ~LPTE_VALID; 596 597 /* 598 * Force the reg & chg bits back into the PTEs. 599 */ 600 SYNC(); 601 602 /* 603 * Invalidate the pte. 604 */ 605 pt->pte_hi &= ~LPTE_VALID; 606 607 TLBIE(pmap,va); 608 609 /* 610 * Save the reg & chg bits. 611 */ 612 moea64_pte_synch(pt, pvo_pt); 613 moea64_pte_valid--; 614} 615 616static __inline void 617moea64_pte_change(struct lpte *pt, struct lpte *pvo_pt, pmap_t pmap, vm_offset_t va) 618{ 619 620 /* 621 * Invalidate the PTE 622 */ 623 moea64_pte_unset(pt, pvo_pt, pmap, va); 624 moea64_pte_set(pt, pvo_pt); 625} 626 627static __inline uint64_t 628moea64_calc_wimg(vm_offset_t pa) 629{ 630 uint64_t pte_lo; 631 int i; 632 633 /* 634 * Assume the page is cache inhibited and access is guarded unless 635 * it's in our available memory array. 636 */ 637 pte_lo = LPTE_I | LPTE_G; 638 for (i = 0; i < pregions_sz; i++) { 639 if ((pa >= pregions[i].mr_start) && 640 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 641 pte_lo &= ~(LPTE_I | LPTE_G); 642 pte_lo |= LPTE_M; 643 break; 644 } 645 } 646 647 return pte_lo; 648} 649 650/* 651 * Quick sort callout for comparing memory regions. 652 */ 653static int mr_cmp(const void *a, const void *b); 654static int om_cmp(const void *a, const void *b); 655 656static int 657mr_cmp(const void *a, const void *b) 658{ 659 const struct mem_region *regiona; 660 const struct mem_region *regionb; 661 662 regiona = a; 663 regionb = b; 664 if (regiona->mr_start < regionb->mr_start) 665 return (-1); 666 else if (regiona->mr_start > regionb->mr_start) 667 return (1); 668 else 669 return (0); 670} 671 672static int 673om_cmp(const void *a, const void *b) 674{ 675 const struct ofw_map *mapa; 676 const struct ofw_map *mapb; 677 678 mapa = a; 679 mapb = b; 680 if (mapa->om_pa_hi < mapb->om_pa_hi) 681 return (-1); 682 else if (mapa->om_pa_hi > mapb->om_pa_hi) 683 return (1); 684 else if (mapa->om_pa_lo < mapb->om_pa_lo) 685 return (-1); 686 else if (mapa->om_pa_lo > mapb->om_pa_lo) 687 return (1); 688 else 689 return (0); 690} 691 692static void 693moea64_bridge_cpu_bootstrap(mmu_t mmup, int ap) 694{ 695 int i = 0; 696 697 /* 698 * Initialize segment registers and MMU 699 */ 700 701 mtmsr(mfmsr() & ~PSL_DR & ~PSL_IR); isync(); 702 for (i = 0; i < 16; i++) { 703 mtsrin(i << ADDR_SR_SHFT, kernel_pmap->pm_sr[i]); 704 } 705 __asm __volatile ("sync; mtsdr1 %0; isync" 706 :: "r"((u_int)moea64_pteg_table 707 | (32 - cntlzw(moea64_pteg_mask >> 11)))); 708 tlbia(); 709} 710 711static void 712moea64_bridge_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 713{ 714 ihandle_t mmui; 715 phandle_t chosen; 716 phandle_t mmu; 717 int sz; 718 int i, j; 719 int ofw_mappings; 720 vm_size_t size, physsz, hwphyssz; 721 vm_offset_t pa, va, off; 722 uint32_t msr; 723 void *dpcpu; 724 725 /* We don't have a direct map since there is no BAT */ 726 hw_direct_map = 0; 727 728 /* Make sure battable is zero, since we have no BAT */ 729 for (i = 0; i < 16; i++) { 730 battable[i].batu = 0; 731 battable[i].batl = 0; 732 } 733 734 /* Get physical memory regions from firmware */ 735 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 736 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 737 738 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 739 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 740 panic("moea64_bootstrap: phys_avail too small"); 741 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 742 phys_avail_count = 0; 743 physsz = 0; 744 hwphyssz = 0; 745 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 746 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 747 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 748 regions[i].mr_start + regions[i].mr_size, 749 regions[i].mr_size); 750 if (hwphyssz != 0 && 751 (physsz + regions[i].mr_size) >= hwphyssz) { 752 if (physsz < hwphyssz) { 753 phys_avail[j] = regions[i].mr_start; 754 phys_avail[j + 1] = regions[i].mr_start + 755 hwphyssz - physsz; 756 physsz = hwphyssz; 757 phys_avail_count++; 758 } 759 break; 760 } 761 phys_avail[j] = regions[i].mr_start; 762 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 763 phys_avail_count++; 764 physsz += regions[i].mr_size; 765 } 766 physmem = btoc(physsz); 767 768 /* 769 * Allocate PTEG table. 770 */ 771#ifdef PTEGCOUNT 772 moea64_pteg_count = PTEGCOUNT; 773#else 774 moea64_pteg_count = 0x1000; 775 776 while (moea64_pteg_count < physmem) 777 moea64_pteg_count <<= 1; 778#endif /* PTEGCOUNT */ 779 780 size = moea64_pteg_count * sizeof(struct lpteg); 781 CTR2(KTR_PMAP, "moea64_bootstrap: %d PTEGs, %d bytes", 782 moea64_pteg_count, size); 783 784 /* 785 * We now need to allocate memory. This memory, to be allocated, 786 * has to reside in a page table. The page table we are about to 787 * allocate. We don't have BAT. So drop to data real mode for a minute 788 * as a measure of last resort. We do this a couple times. 789 */ 790 791 moea64_pteg_table = (struct lpteg *)moea64_bootstrap_alloc(size, size); 792 DISABLE_TRANS(msr); 793 bzero((void *)moea64_pteg_table, moea64_pteg_count * sizeof(struct lpteg)); 794 ENABLE_TRANS(msr); 795 796 moea64_pteg_mask = moea64_pteg_count - 1; 797 798 CTR1(KTR_PMAP, "moea64_bootstrap: PTEG table at %p", moea64_pteg_table); 799 800 /* 801 * Allocate pv/overflow lists. 802 */ 803 size = sizeof(struct pvo_head) * moea64_pteg_count; 804 805 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 806 PAGE_SIZE); 807 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 808 809 DISABLE_TRANS(msr); 810 for (i = 0; i < moea64_pteg_count; i++) 811 LIST_INIT(&moea64_pvo_table[i]); 812 ENABLE_TRANS(msr); 813 814 /* 815 * Initialize the lock that synchronizes access to the pteg and pvo 816 * tables. 817 */ 818 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 819 MTX_RECURSE); 820 821 /* 822 * Initialize the TLBIE lock. TLBIE can only be executed by one CPU. 823 */ 824 mtx_init(&tlbie_mutex, "tlbie mutex", NULL, MTX_SPIN); 825 826 /* 827 * Initialise the unmanaged pvo pool. 828 */ 829 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 830 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 831 moea64_bpvo_pool_index = 0; 832 833 /* 834 * Make sure kernel vsid is allocated as well as VSID 0. 835 */ 836 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 837 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 838 moea64_vsid_bitmap[0] |= 1; 839 840 /* 841 * Initialize the kernel pmap (which is statically allocated). 842 */ 843 for (i = 0; i < 16; i++) 844 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 845 846 kernel_pmap->pmap_phys = kernel_pmap; 847 kernel_pmap->pm_active = ~0; 848 849 PMAP_LOCK_INIT(kernel_pmap); 850 851 /* 852 * Now map in all the other buffers we allocated earlier 853 */ 854 855 DISABLE_TRANS(msr); 856 size = moea64_pteg_count * sizeof(struct lpteg); 857 off = (vm_offset_t)(moea64_pteg_table); 858 for (pa = off; pa < off + size; pa += PAGE_SIZE) 859 moea64_kenter(mmup, pa, pa); 860 size = sizeof(struct pvo_head) * moea64_pteg_count; 861 off = (vm_offset_t)(moea64_pvo_table); 862 for (pa = off; pa < off + size; pa += PAGE_SIZE) 863 moea64_kenter(mmup, pa, pa); 864 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 865 off = (vm_offset_t)(moea64_bpvo_pool); 866 for (pa = off; pa < off + size; pa += PAGE_SIZE) 867 moea64_kenter(mmup, pa, pa); 868 ENABLE_TRANS(msr); 869 870 /* 871 * Map certain important things, like ourselves. 872 * 873 * NOTE: We do not map the exception vector space. That code is 874 * used only in real mode, and leaving it unmapped allows us to 875 * catch NULL pointer deferences, instead of making NULL a valid 876 * address. 877 */ 878 879 DISABLE_TRANS(msr); 880 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; pa += PAGE_SIZE) 881 moea64_kenter(mmup, pa, pa); 882 ENABLE_TRANS(msr); 883 884 if (!ofw_real_mode) { 885 /* 886 * Set up the Open Firmware pmap and add its mappings. 887 */ 888 889 moea64_pinit(mmup, &ofw_pmap); 890 ofw_pmap.pm_sr[KERNEL_SR] = kernel_pmap->pm_sr[KERNEL_SR]; 891 ofw_pmap.pm_sr[KERNEL2_SR] = kernel_pmap->pm_sr[KERNEL2_SR]; 892 893 if ((chosen = OF_finddevice("/chosen")) == -1) 894 panic("moea64_bootstrap: can't find /chosen"); 895 OF_getprop(chosen, "mmu", &mmui, 4); 896 if ((mmu = OF_instance_to_package(mmui)) == -1) 897 panic("moea64_bootstrap: can't get mmu package"); 898 if ((sz = OF_getproplen(mmu, "translations")) == -1) 899 panic("moea64_bootstrap: can't get ofw translation count"); 900 901 bzero(translations, sz); 902 if (OF_getprop(mmu, "translations", translations, sz) == -1) 903 panic("moea64_bootstrap: can't get ofw translations"); 904 905 CTR0(KTR_PMAP, "moea64_bootstrap: translations"); 906 sz /= sizeof(*translations); 907 qsort(translations, sz, sizeof (*translations), om_cmp); 908 909 for (i = 0, ofw_mappings = 0; i < sz; i++) { 910 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 911 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 912 translations[i].om_len); 913 914 if (translations[i].om_pa_lo % PAGE_SIZE) 915 panic("OFW translation not page-aligned!"); 916 917 if (translations[i].om_pa_hi) 918 panic("OFW translations above 32-bit boundary!"); 919 920 /* Now enter the pages for this mapping */ 921 922 /* 923 * Lock the ofw pmap. pmap_kenter(), which we use for the 924 * pages the kernel also needs, does its own locking. 925 */ 926 PMAP_LOCK(&ofw_pmap); 927 DISABLE_TRANS(msr); 928 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 929 struct vm_page m; 930 931 /* Map low memory mappings into the kernel pmap, too. 932 * These are typically mappings made by the loader, 933 * so we need them if we want to keep executing. */ 934 935 if (translations[i].om_va + off < SEGMENT_LENGTH) 936 moea64_kenter(mmup, translations[i].om_va + off, 937 translations[i].om_va + off); 938 939 m.phys_addr = translations[i].om_pa_lo + off; 940 moea64_enter_locked(&ofw_pmap, 941 translations[i].om_va + off, &m, VM_PROT_ALL, 1); 942 943 ofw_mappings++; 944 } 945 ENABLE_TRANS(msr); 946 PMAP_UNLOCK(&ofw_pmap); 947 } 948 } 949 950#ifdef SMP 951 TLBSYNC(); 952#endif 953 954 /* 955 * Calculate the last available physical address. 956 */ 957 for (i = 0; phys_avail[i + 2] != 0; i += 2) 958 ; 959 Maxmem = powerpc_btop(phys_avail[i + 1]); 960 961 /* 962 * Initialize MMU and remap early physical mappings 963 */ 964 moea64_bridge_cpu_bootstrap(mmup,0); 965 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 966 pmap_bootstrapped++; 967 bs_remap_earlyboot(); 968 969 /* 970 * Set the start and end of kva. 971 */ 972 virtual_avail = VM_MIN_KERNEL_ADDRESS; 973 virtual_end = VM_MAX_KERNEL_ADDRESS; 974 975 /* 976 * Allocate some stupid buffer regions. 977 */ 978 979 pvo_allocator_start = virtual_avail; 980 virtual_avail += SEGMENT_LENGTH/4; 981 pvo_allocator_end = virtual_avail; 982 983 /* 984 * Allocate some things for page zeroing 985 */ 986 987 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, MTX_DEF); 988 for (i = 0; i < 2; i++) { 989 moea64_scratchpage_va[i] = virtual_avail; 990 virtual_avail += PAGE_SIZE; 991 992 moea64_kenter(mmup,moea64_scratchpage_va[i],kernelstart); 993 994 LOCK_TABLE(); 995 moea64_scratchpage_pvo[i] = moea64_pvo_find_va(kernel_pmap, 996 moea64_scratchpage_va[i],&j); 997 moea64_scratchpage_pte[i] = moea64_pvo_to_pte( 998 moea64_scratchpage_pvo[i],j); 999 UNLOCK_TABLE(); 1000 } 1001 1002 /* 1003 * Allocate a kernel stack with a guard page for thread0 and map it 1004 * into the kernel page map. 1005 */ 1006 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 1007 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1008 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 1009 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 1010 thread0.td_kstack = va; 1011 thread0.td_kstack_pages = KSTACK_PAGES; 1012 for (i = 0; i < KSTACK_PAGES; i++) { 1013 moea64_kenter(mmup, va, pa);; 1014 pa += PAGE_SIZE; 1015 va += PAGE_SIZE; 1016 } 1017 1018 /* 1019 * Allocate virtual address space for the message buffer. 1020 */ 1021 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 1022 msgbufp = (struct msgbuf *)virtual_avail; 1023 va = virtual_avail; 1024 virtual_avail += round_page(MSGBUF_SIZE); 1025 while (va < virtual_avail) { 1026 moea64_kenter(mmup, va, pa);; 1027 pa += PAGE_SIZE; 1028 va += PAGE_SIZE; 1029 } 1030 1031 /* 1032 * Allocate virtual address space for the dynamic percpu area. 1033 */ 1034 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 1035 dpcpu = (void *)virtual_avail; 1036 va = virtual_avail; 1037 virtual_avail += DPCPU_SIZE; 1038 while (va < virtual_avail) { 1039 moea64_kenter(mmup, va, pa);; 1040 pa += PAGE_SIZE; 1041 va += PAGE_SIZE; 1042 } 1043 dpcpu_init(dpcpu, 0); 1044} 1045 1046/* 1047 * Activate a user pmap. The pmap must be activated before it's address 1048 * space can be accessed in any way. 1049 */ 1050void 1051moea64_activate(mmu_t mmu, struct thread *td) 1052{ 1053 pmap_t pm, pmr; 1054 1055 /* 1056 * Load all the data we need up front to encourage the compiler to 1057 * not issue any loads while we have interrupts disabled below. 1058 */ 1059 pm = &td->td_proc->p_vmspace->vm_pmap; 1060 pmr = pm->pmap_phys; 1061 1062 pm->pm_active |= PCPU_GET(cpumask); 1063 PCPU_SET(curpmap, pmr); 1064} 1065 1066void 1067moea64_deactivate(mmu_t mmu, struct thread *td) 1068{ 1069 pmap_t pm; 1070 1071 pm = &td->td_proc->p_vmspace->vm_pmap; 1072 pm->pm_active &= ~(PCPU_GET(cpumask)); 1073 PCPU_SET(curpmap, NULL); 1074} 1075 1076void 1077moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1078{ 1079 struct pvo_entry *pvo; 1080 1081 PMAP_LOCK(pm); 1082 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1083 1084 if (pvo != NULL) { 1085 if (wired) { 1086 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1087 pm->pm_stats.wired_count++; 1088 pvo->pvo_vaddr |= PVO_WIRED; 1089 } else { 1090 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1091 pm->pm_stats.wired_count--; 1092 pvo->pvo_vaddr &= ~PVO_WIRED; 1093 } 1094 } 1095 PMAP_UNLOCK(pm); 1096} 1097 1098/* 1099 * Zero a page of physical memory by temporarily mapping it into the tlb. 1100 */ 1101void 1102moea64_zero_page(mmu_t mmu, vm_page_t m) 1103{ 1104 moea64_zero_page_area(mmu,m,0,PAGE_SIZE); 1105} 1106 1107/* 1108 * This goes through and sets the physical address of our 1109 * special scratch PTE to the PA we want to zero or copy. Because 1110 * of locking issues (this can get called in pvo_enter() by 1111 * the UMA allocator), we can't use most other utility functions here 1112 */ 1113 1114static __inline 1115void moea64_set_scratchpage_pa(int which, vm_offset_t pa) { 1116 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1117 (~LPTE_WIMG & ~LPTE_RPGN); 1118 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1119 moea64_calc_wimg(pa) | (uint64_t)pa; 1120 1121 moea64_scratchpage_pte[which]->pte_hi &= ~LPTE_VALID; 1122 TLBIE(kernel_pmap, moea64_scratchpage_va[which]); 1123 1124 moea64_scratchpage_pte[which]->pte_lo = 1125 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo; 1126 EIEIO(); 1127 1128 moea64_scratchpage_pte[which]->pte_hi |= LPTE_VALID; 1129 TLBIE(kernel_pmap, moea64_scratchpage_va[which]); 1130} 1131 1132void 1133moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1134{ 1135 vm_offset_t dst; 1136 vm_offset_t src; 1137 1138 dst = VM_PAGE_TO_PHYS(mdst); 1139 src = VM_PAGE_TO_PHYS(msrc); 1140 1141 mtx_lock(&moea64_scratchpage_mtx); 1142 1143 moea64_set_scratchpage_pa(0,src); 1144 moea64_set_scratchpage_pa(1,dst); 1145 1146 kcopy((void *)moea64_scratchpage_va[0], 1147 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1148 1149 __syncicache((void *)moea64_scratchpage_va[1],PAGE_SIZE); 1150 1151 mtx_unlock(&moea64_scratchpage_mtx); 1152} 1153 1154void 1155moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1156{ 1157 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1158 1159 if (!moea64_initialized) 1160 panic("moea64_zero_page: can't zero pa %#x", pa); 1161 if (size + off > PAGE_SIZE) 1162 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1163 1164 mtx_lock(&moea64_scratchpage_mtx); 1165 1166 moea64_set_scratchpage_pa(0,pa); 1167 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1168 __syncicache((void *)moea64_scratchpage_va[0],PAGE_SIZE); 1169 1170 mtx_unlock(&moea64_scratchpage_mtx); 1171} 1172 1173void 1174moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1175{ 1176 1177 moea64_zero_page(mmu, m); 1178} 1179 1180/* 1181 * Map the given physical page at the specified virtual address in the 1182 * target pmap with the protection requested. If specified the page 1183 * will be wired down. 1184 */ 1185void 1186moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1187 vm_prot_t prot, boolean_t wired) 1188{ 1189 1190 vm_page_lock_queues(); 1191 PMAP_LOCK(pmap); 1192 moea64_enter_locked(pmap, va, m, prot, wired); 1193 vm_page_unlock_queues(); 1194 PMAP_UNLOCK(pmap); 1195} 1196 1197/* 1198 * Map the given physical page at the specified virtual address in the 1199 * target pmap with the protection requested. If specified the page 1200 * will be wired down. 1201 * 1202 * The page queues and pmap must be locked. 1203 */ 1204 1205static void 1206moea64_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1207 boolean_t wired) 1208{ 1209 struct pvo_head *pvo_head; 1210 uma_zone_t zone; 1211 vm_page_t pg; 1212 uint64_t pte_lo; 1213 u_int pvo_flags; 1214 int error; 1215 1216 if (!moea64_initialized) { 1217 pvo_head = &moea64_pvo_kunmanaged; 1218 pg = NULL; 1219 zone = moea64_upvo_zone; 1220 pvo_flags = 0; 1221 } else { 1222 pvo_head = vm_page_to_pvoh(m); 1223 pg = m; 1224 zone = moea64_mpvo_zone; 1225 pvo_flags = PVO_MANAGED; 1226 } 1227 1228 if (pmap_bootstrapped) 1229 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1230 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1231 1232 /* XXX change the pvo head for fake pages */ 1233 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1234 pvo_flags &= ~PVO_MANAGED; 1235 pvo_head = &moea64_pvo_kunmanaged; 1236 zone = moea64_upvo_zone; 1237 } 1238 1239 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m)); 1240 1241 if (prot & VM_PROT_WRITE) { 1242 pte_lo |= LPTE_BW; 1243 if (pmap_bootstrapped) 1244 vm_page_flag_set(m, PG_WRITEABLE); 1245 } else 1246 pte_lo |= LPTE_BR; 1247 1248 if (prot & VM_PROT_EXECUTE) 1249 pvo_flags |= VM_PROT_EXECUTE; 1250 1251 if (wired) 1252 pvo_flags |= PVO_WIRED; 1253 1254 if ((m->flags & PG_FICTITIOUS) != 0) 1255 pvo_flags |= PVO_FAKE; 1256 1257 error = moea64_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1258 pte_lo, pvo_flags); 1259 1260 if (pmap == kernel_pmap) 1261 TLBIE(pmap, va); 1262 1263 /* 1264 * Flush the page from the instruction cache if this page is 1265 * mapped executable and cacheable. 1266 */ 1267 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1268 moea64_syncicache(pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1269 } 1270} 1271 1272static void 1273moea64_syncicache(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_size_t sz) 1274{ 1275 /* 1276 * This is much trickier than on older systems because 1277 * we can't sync the icache on physical addresses directly 1278 * without a direct map. Instead we check a couple of cases 1279 * where the memory is already mapped in and, failing that, 1280 * use the same trick we use for page zeroing to create 1281 * a temporary mapping for this physical address. 1282 */ 1283 1284 if (!pmap_bootstrapped) { 1285 /* 1286 * If PMAP is not bootstrapped, we are likely to be 1287 * in real mode. 1288 */ 1289 __syncicache((void *)pa, sz); 1290 } else if (pmap == kernel_pmap) { 1291 __syncicache((void *)va, sz); 1292 } else { 1293 /* Use the scratch page to set up a temp mapping */ 1294 1295 mtx_lock(&moea64_scratchpage_mtx); 1296 1297 moea64_set_scratchpage_pa(1,pa); 1298 __syncicache((void *)moea64_scratchpage_va[1], sz); 1299 1300 mtx_unlock(&moea64_scratchpage_mtx); 1301 } 1302} 1303 1304/* 1305 * Maps a sequence of resident pages belonging to the same object. 1306 * The sequence begins with the given page m_start. This page is 1307 * mapped at the given virtual address start. Each subsequent page is 1308 * mapped at a virtual address that is offset from start by the same 1309 * amount as the page is offset from m_start within the object. The 1310 * last page in the sequence is the page with the largest offset from 1311 * m_start that can be mapped at a virtual address less than the given 1312 * virtual address end. Not every virtual page between start and end 1313 * is mapped; only those for which a resident page exists with the 1314 * corresponding offset from m_start are mapped. 1315 */ 1316void 1317moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1318 vm_page_t m_start, vm_prot_t prot) 1319{ 1320 vm_page_t m; 1321 vm_pindex_t diff, psize; 1322 1323 psize = atop(end - start); 1324 m = m_start; 1325 PMAP_LOCK(pm); 1326 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1327 moea64_enter_locked(pm, start + ptoa(diff), m, prot & 1328 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1329 m = TAILQ_NEXT(m, listq); 1330 } 1331 PMAP_UNLOCK(pm); 1332} 1333 1334void 1335moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1336 vm_prot_t prot) 1337{ 1338 PMAP_LOCK(pm); 1339 moea64_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1340 FALSE); 1341 PMAP_UNLOCK(pm); 1342 1343} 1344 1345vm_paddr_t 1346moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1347{ 1348 struct pvo_entry *pvo; 1349 vm_paddr_t pa; 1350 1351 PMAP_LOCK(pm); 1352 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1353 if (pvo == NULL) 1354 pa = 0; 1355 else 1356 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1357 PMAP_UNLOCK(pm); 1358 return (pa); 1359} 1360 1361/* 1362 * Atomically extract and hold the physical page with the given 1363 * pmap and virtual address pair if that mapping permits the given 1364 * protection. 1365 */ 1366vm_page_t 1367moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1368{ 1369 struct pvo_entry *pvo; 1370 vm_page_t m; 1371 1372 m = NULL; 1373 vm_page_lock_queues(); 1374 PMAP_LOCK(pmap); 1375 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1376 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1377 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1378 (prot & VM_PROT_WRITE) == 0)) { 1379 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1380 vm_page_hold(m); 1381 } 1382 vm_page_unlock_queues(); 1383 PMAP_UNLOCK(pmap); 1384 return (m); 1385} 1386 1387static void * 1388moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1389{ 1390 /* 1391 * This entire routine is a horrible hack to avoid bothering kmem 1392 * for new KVA addresses. Because this can get called from inside 1393 * kmem allocation routines, calling kmem for a new address here 1394 * can lead to multiply locking non-recursive mutexes. 1395 */ 1396 static vm_pindex_t color; 1397 vm_offset_t va; 1398 1399 vm_page_t m; 1400 int pflags, needed_lock; 1401 1402 *flags = UMA_SLAB_PRIV; 1403 needed_lock = !PMAP_LOCKED(kernel_pmap); 1404 1405 if (needed_lock) 1406 PMAP_LOCK(kernel_pmap); 1407 1408 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1409 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1410 else 1411 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1412 if (wait & M_ZERO) 1413 pflags |= VM_ALLOC_ZERO; 1414 1415 for (;;) { 1416 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1417 if (m == NULL) { 1418 if (wait & M_NOWAIT) 1419 return (NULL); 1420 VM_WAIT; 1421 } else 1422 break; 1423 } 1424 1425 va = pvo_allocator_start; 1426 pvo_allocator_start += PAGE_SIZE; 1427 1428 if (pvo_allocator_start >= pvo_allocator_end) 1429 panic("Ran out of PVO allocator buffer space!"); 1430 1431 moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1432 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1433 PVO_WIRED | PVO_BOOTSTRAP); 1434 1435 TLBIE(kernel_pmap, va); 1436 1437 if (needed_lock) 1438 PMAP_UNLOCK(kernel_pmap); 1439 1440 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1441 bzero((void *)va, PAGE_SIZE); 1442 1443 return (void *)va; 1444} 1445 1446void 1447moea64_init(mmu_t mmu) 1448{ 1449 1450 CTR0(KTR_PMAP, "moea64_init"); 1451 1452 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1453 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1454 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1455 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1456 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1457 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1458 1459 if (!hw_direct_map) { 1460 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1461 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1462 } 1463 1464 moea64_initialized = TRUE; 1465} 1466 1467boolean_t 1468moea64_is_modified(mmu_t mmu, vm_page_t m) 1469{ 1470 1471 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1472 return (FALSE); 1473 1474 return (moea64_query_bit(m, LPTE_CHG)); 1475} 1476 1477void 1478moea64_clear_reference(mmu_t mmu, vm_page_t m) 1479{ 1480 1481 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1482 return; 1483 moea64_clear_bit(m, LPTE_REF, NULL); 1484} 1485 1486void 1487moea64_clear_modify(mmu_t mmu, vm_page_t m) 1488{ 1489 1490 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1491 return; 1492 moea64_clear_bit(m, LPTE_CHG, NULL); 1493} 1494 1495/* 1496 * Clear the write and modified bits in each of the given page's mappings. 1497 */ 1498void 1499moea64_remove_write(mmu_t mmu, vm_page_t m) 1500{ 1501 struct pvo_entry *pvo; 1502 struct lpte *pt; 1503 pmap_t pmap; 1504 uint64_t lo; 1505 1506 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1507 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1508 (m->flags & PG_WRITEABLE) == 0) 1509 return; 1510 lo = moea64_attr_fetch(m); 1511 SYNC(); 1512 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1513 pmap = pvo->pvo_pmap; 1514 PMAP_LOCK(pmap); 1515 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1516 LOCK_TABLE(); 1517 pt = moea64_pvo_to_pte(pvo, -1); 1518 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1519 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1520 if (pt != NULL) { 1521 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 1522 lo |= pvo->pvo_pte.lpte.pte_lo; 1523 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1524 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1525 pvo->pvo_pmap, pvo->pvo_vaddr); 1526 } 1527 UNLOCK_TABLE(); 1528 } 1529 PMAP_UNLOCK(pmap); 1530 } 1531 if ((lo & LPTE_CHG) != 0) { 1532 moea64_attr_clear(m, LPTE_CHG); 1533 vm_page_dirty(m); 1534 } 1535 vm_page_flag_clear(m, PG_WRITEABLE); 1536} 1537 1538/* 1539 * moea64_ts_referenced: 1540 * 1541 * Return a count of reference bits for a page, clearing those bits. 1542 * It is not necessary for every reference bit to be cleared, but it 1543 * is necessary that 0 only be returned when there are truly no 1544 * reference bits set. 1545 * 1546 * XXX: The exact number of bits to check and clear is a matter that 1547 * should be tested and standardized at some point in the future for 1548 * optimal aging of shared pages. 1549 */ 1550boolean_t 1551moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1552{ 1553 int count; 1554 1555 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1556 return (0); 1557 1558 count = moea64_clear_bit(m, LPTE_REF, NULL); 1559 1560 return (count); 1561} 1562 1563/* 1564 * Map a wired page into kernel virtual address space. 1565 */ 1566void 1567moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1568{ 1569 uint64_t pte_lo; 1570 int error; 1571 1572 if (!pmap_bootstrapped) { 1573 if (va >= VM_MIN_KERNEL_ADDRESS && va < VM_MAX_KERNEL_ADDRESS) 1574 panic("Trying to enter an address in KVA -- %#x!\n",pa); 1575 } 1576 1577 pte_lo = moea64_calc_wimg(pa); 1578 1579 PMAP_LOCK(kernel_pmap); 1580 error = moea64_pvo_enter(kernel_pmap, moea64_upvo_zone, 1581 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1582 PVO_WIRED | VM_PROT_EXECUTE); 1583 1584 TLBIE(kernel_pmap, va); 1585 1586 if (error != 0 && error != ENOENT) 1587 panic("moea64_kenter: failed to enter va %#x pa %#x: %d", va, 1588 pa, error); 1589 1590 /* 1591 * Flush the memory from the instruction cache. 1592 */ 1593 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) { 1594 __syncicache((void *)va, PAGE_SIZE); 1595 } 1596 PMAP_UNLOCK(kernel_pmap); 1597} 1598 1599/* 1600 * Extract the physical page address associated with the given kernel virtual 1601 * address. 1602 */ 1603vm_offset_t 1604moea64_kextract(mmu_t mmu, vm_offset_t va) 1605{ 1606 struct pvo_entry *pvo; 1607 vm_paddr_t pa; 1608 1609 PMAP_LOCK(kernel_pmap); 1610 pvo = moea64_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1611 KASSERT(pvo != NULL, ("moea64_kextract: no addr found")); 1612 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va & ADDR_POFF); 1613 PMAP_UNLOCK(kernel_pmap); 1614 return (pa); 1615} 1616 1617/* 1618 * Remove a wired page from kernel virtual address space. 1619 */ 1620void 1621moea64_kremove(mmu_t mmu, vm_offset_t va) 1622{ 1623 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1624} 1625 1626/* 1627 * Map a range of physical addresses into kernel virtual address space. 1628 * 1629 * The value passed in *virt is a suggested virtual address for the mapping. 1630 * Architectures which can support a direct-mapped physical to virtual region 1631 * can return the appropriate address within that region, leaving '*virt' 1632 * unchanged. We cannot and therefore do not; *virt is updated with the 1633 * first usable address after the mapped region. 1634 */ 1635vm_offset_t 1636moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1637 vm_offset_t pa_end, int prot) 1638{ 1639 vm_offset_t sva, va; 1640 1641 sva = *virt; 1642 va = sva; 1643 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1644 moea64_kenter(mmu, va, pa_start); 1645 *virt = va; 1646 1647 return (sva); 1648} 1649 1650/* 1651 * Returns true if the pmap's pv is one of the first 1652 * 16 pvs linked to from this page. This count may 1653 * be changed upwards or downwards in the future; it 1654 * is only necessary that true be returned for a small 1655 * subset of pmaps for proper page aging. 1656 */ 1657boolean_t 1658moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1659{ 1660 int loops; 1661 struct pvo_entry *pvo; 1662 1663 if (!moea64_initialized || (m->flags & PG_FICTITIOUS)) 1664 return FALSE; 1665 1666 loops = 0; 1667 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1668 if (pvo->pvo_pmap == pmap) 1669 return (TRUE); 1670 if (++loops >= 16) 1671 break; 1672 } 1673 1674 return (FALSE); 1675} 1676 1677/* 1678 * Return the number of managed mappings to the given physical page 1679 * that are wired. 1680 */ 1681int 1682moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1683{ 1684 struct pvo_entry *pvo; 1685 int count; 1686 1687 count = 0; 1688 if (!moea64_initialized || (m->flags & PG_FICTITIOUS) != 0) 1689 return (count); 1690 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1691 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1692 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1693 count++; 1694 return (count); 1695} 1696 1697static u_int moea64_vsidcontext; 1698 1699void 1700moea64_pinit(mmu_t mmu, pmap_t pmap) 1701{ 1702 int i, mask; 1703 u_int entropy; 1704 1705 PMAP_LOCK_INIT(pmap); 1706 1707 entropy = 0; 1708 __asm __volatile("mftb %0" : "=r"(entropy)); 1709 1710 if (pmap_bootstrapped) 1711 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, (vm_offset_t)pmap); 1712 else 1713 pmap->pmap_phys = pmap; 1714 1715 /* 1716 * Allocate some segment registers for this pmap. 1717 */ 1718 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1719 u_int hash, n; 1720 1721 /* 1722 * Create a new value by mutiplying by a prime and adding in 1723 * entropy from the timebase register. This is to make the 1724 * VSID more random so that the PT hash function collides 1725 * less often. (Note that the prime casues gcc to do shifts 1726 * instead of a multiply.) 1727 */ 1728 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1729 hash = moea64_vsidcontext & (NPMAPS - 1); 1730 if (hash == 0) /* 0 is special, avoid it */ 1731 continue; 1732 n = hash >> 5; 1733 mask = 1 << (hash & (VSID_NBPW - 1)); 1734 hash = (moea64_vsidcontext & 0xfffff); 1735 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1736 /* anything free in this bucket? */ 1737 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1738 entropy = (moea64_vsidcontext >> 20); 1739 continue; 1740 } 1741 i = ffs(~moea64_vsid_bitmap[i]) - 1; 1742 mask = 1 << i; 1743 hash &= 0xfffff & ~(VSID_NBPW - 1); 1744 hash |= i; 1745 } 1746 moea64_vsid_bitmap[n] |= mask; 1747 for (i = 0; i < 16; i++) { 1748 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1749 } 1750 return; 1751 } 1752 1753 panic("moea64_pinit: out of segments"); 1754} 1755 1756/* 1757 * Initialize the pmap associated with process 0. 1758 */ 1759void 1760moea64_pinit0(mmu_t mmu, pmap_t pm) 1761{ 1762 moea64_pinit(mmu, pm); 1763 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1764} 1765 1766/* 1767 * Set the physical protection on the specified range of this map as requested. 1768 */ 1769void 1770moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1771 vm_prot_t prot) 1772{ 1773 struct pvo_entry *pvo; 1774 struct lpte *pt; 1775 int pteidx; 1776 1777 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1778 eva, prot); 1779 1780 1781 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1782 ("moea64_protect: non current pmap")); 1783 1784 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1785 moea64_remove(mmu, pm, sva, eva); 1786 return; 1787 } 1788 1789 vm_page_lock_queues(); 1790 PMAP_LOCK(pm); 1791 for (; sva < eva; sva += PAGE_SIZE) { 1792 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1793 if (pvo == NULL) 1794 continue; 1795 1796 /* 1797 * Grab the PTE pointer before we diddle with the cached PTE 1798 * copy. 1799 */ 1800 LOCK_TABLE(); 1801 pt = moea64_pvo_to_pte(pvo, pteidx); 1802 1803 /* 1804 * Change the protection of the page. 1805 */ 1806 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1807 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1808 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1809 if ((prot & VM_PROT_EXECUTE) == 0) 1810 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1811 1812 /* 1813 * If the PVO is in the page table, update that pte as well. 1814 */ 1815 if (pt != NULL) { 1816 moea64_pte_change(pt, &pvo->pvo_pte.lpte, 1817 pvo->pvo_pmap, pvo->pvo_vaddr); 1818 if ((pvo->pvo_pte.lpte.pte_lo & 1819 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1820 moea64_syncicache(pm, sva, 1821 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1822 PAGE_SIZE); 1823 } 1824 } 1825 UNLOCK_TABLE(); 1826 } 1827 vm_page_unlock_queues(); 1828 PMAP_UNLOCK(pm); 1829} 1830 1831/* 1832 * Map a list of wired pages into kernel virtual address space. This is 1833 * intended for temporary mappings which do not need page modification or 1834 * references recorded. Existing mappings in the region are overwritten. 1835 */ 1836void 1837moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1838{ 1839 while (count-- > 0) { 1840 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1841 va += PAGE_SIZE; 1842 m++; 1843 } 1844} 1845 1846/* 1847 * Remove page mappings from kernel virtual address space. Intended for 1848 * temporary mappings entered by moea64_qenter. 1849 */ 1850void 1851moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1852{ 1853 while (count-- > 0) { 1854 moea64_kremove(mmu, va); 1855 va += PAGE_SIZE; 1856 } 1857} 1858 1859void 1860moea64_release(mmu_t mmu, pmap_t pmap) 1861{ 1862 int idx, mask; 1863 1864 /* 1865 * Free segment register's VSID 1866 */ 1867 if (pmap->pm_sr[0] == 0) 1868 panic("moea64_release"); 1869 1870 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1871 mask = 1 << (idx % VSID_NBPW); 1872 idx /= VSID_NBPW; 1873 moea64_vsid_bitmap[idx] &= ~mask; 1874 PMAP_LOCK_DESTROY(pmap); 1875} 1876 1877/* 1878 * Remove the given range of addresses from the specified map. 1879 */ 1880void 1881moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1882{ 1883 struct pvo_entry *pvo; 1884 int pteidx; 1885 1886 vm_page_lock_queues(); 1887 PMAP_LOCK(pm); 1888 for (; sva < eva; sva += PAGE_SIZE) { 1889 pvo = moea64_pvo_find_va(pm, sva, &pteidx); 1890 if (pvo != NULL) { 1891 moea64_pvo_remove(pvo, pteidx); 1892 } 1893 } 1894 vm_page_unlock_queues(); 1895 PMAP_UNLOCK(pm); 1896} 1897 1898/* 1899 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 1900 * will reflect changes in pte's back to the vm_page. 1901 */ 1902void 1903moea64_remove_all(mmu_t mmu, vm_page_t m) 1904{ 1905 struct pvo_head *pvo_head; 1906 struct pvo_entry *pvo, *next_pvo; 1907 pmap_t pmap; 1908 1909 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1910 1911 pvo_head = vm_page_to_pvoh(m); 1912 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1913 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1914 1915 MOEA_PVO_CHECK(pvo); /* sanity check */ 1916 pmap = pvo->pvo_pmap; 1917 PMAP_LOCK(pmap); 1918 moea64_pvo_remove(pvo, -1); 1919 PMAP_UNLOCK(pmap); 1920 } 1921 vm_page_flag_clear(m, PG_WRITEABLE); 1922} 1923 1924/* 1925 * Allocate a physical page of memory directly from the phys_avail map. 1926 * Can only be called from moea64_bootstrap before avail start and end are 1927 * calculated. 1928 */ 1929static vm_offset_t 1930moea64_bootstrap_alloc(vm_size_t size, u_int align) 1931{ 1932 vm_offset_t s, e; 1933 int i, j; 1934 1935 size = round_page(size); 1936 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1937 if (align != 0) 1938 s = (phys_avail[i] + align - 1) & ~(align - 1); 1939 else 1940 s = phys_avail[i]; 1941 e = s + size; 1942 1943 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1944 continue; 1945 1946 if (s == phys_avail[i]) { 1947 phys_avail[i] += size; 1948 } else if (e == phys_avail[i + 1]) { 1949 phys_avail[i + 1] -= size; 1950 } else { 1951 for (j = phys_avail_count * 2; j > i; j -= 2) { 1952 phys_avail[j] = phys_avail[j - 2]; 1953 phys_avail[j + 1] = phys_avail[j - 1]; 1954 } 1955 1956 phys_avail[i + 3] = phys_avail[i + 1]; 1957 phys_avail[i + 1] = s; 1958 phys_avail[i + 2] = e; 1959 phys_avail_count++; 1960 } 1961 1962 return (s); 1963 } 1964 panic("moea64_bootstrap_alloc: could not allocate memory"); 1965} 1966 1967static void 1968tlbia(void) 1969{ 1970 vm_offset_t i; 1971 register_t msr, scratch; 1972 1973 for (i = 0; i < 0xFF000; i += 0x00001000) { 1974 __asm __volatile("\ 1975 mfmsr %0; \ 1976 mr %1, %0; \ 1977 insrdi %1,%3,1,0; \ 1978 mtmsrd %1; \ 1979 ptesync; \ 1980 \ 1981 tlbiel %2; \ 1982 \ 1983 mtmsrd %0; \ 1984 eieio; \ 1985 tlbsync; \ 1986 ptesync;" 1987 : "=r"(msr), "=r"(scratch) : "r"(i), "r"(1)); 1988 } 1989} 1990 1991static int 1992moea64_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1993 vm_offset_t va, vm_offset_t pa, uint64_t pte_lo, int flags) 1994{ 1995 struct pvo_entry *pvo; 1996 uint64_t vsid; 1997 int first; 1998 u_int ptegidx; 1999 int i; 2000 int bootstrap; 2001 2002 /* 2003 * One nasty thing that can happen here is that the UMA calls to 2004 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2005 * which calls UMA... 2006 * 2007 * We break the loop by detecting recursion and allocating out of 2008 * the bootstrap pool. 2009 */ 2010 2011 moea64_pvo_enter_calls++; 2012 first = 0; 2013 bootstrap = (flags & PVO_BOOTSTRAP); 2014 2015 if (!moea64_initialized) 2016 bootstrap = 1; 2017 2018 /* 2019 * Compute the PTE Group index. 2020 */ 2021 va &= ~ADDR_POFF; 2022 vsid = va_to_vsid(pm, va); 2023 ptegidx = va_to_pteg(vsid, va); 2024 2025 /* 2026 * Remove any existing mapping for this page. Reuse the pvo entry if 2027 * there is a mapping. 2028 */ 2029 LOCK_TABLE(); 2030 2031 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2032 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2033 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2034 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2035 (pte_lo & LPTE_PP)) { 2036 UNLOCK_TABLE(); 2037 return (0); 2038 } 2039 moea64_pvo_remove(pvo, -1); 2040 break; 2041 } 2042 } 2043 2044 /* 2045 * If we aren't overwriting a mapping, try to allocate. 2046 */ 2047 if (bootstrap) { 2048 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2049 panic("moea64_enter: bpvo pool exhausted, %d, %d, %d", 2050 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2051 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2052 } 2053 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2054 moea64_bpvo_pool_index++; 2055 bootstrap = 1; 2056 } else { 2057 /* 2058 * Note: drop the table around the UMA allocation in 2059 * case the UMA allocator needs to manipulate the page 2060 * table. The mapping we are working with is already 2061 * protected by the PMAP lock. 2062 */ 2063 UNLOCK_TABLE(); 2064 pvo = uma_zalloc(zone, M_NOWAIT); 2065 LOCK_TABLE(); 2066 } 2067 2068 if (pvo == NULL) { 2069 UNLOCK_TABLE(); 2070 return (ENOMEM); 2071 } 2072 2073 moea64_pvo_entries++; 2074 pvo->pvo_vaddr = va; 2075 pvo->pvo_pmap = pm; 2076 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2077 pvo->pvo_vaddr &= ~ADDR_POFF; 2078 2079 if (!(flags & VM_PROT_EXECUTE)) 2080 pte_lo |= LPTE_NOEXEC; 2081 if (flags & PVO_WIRED) 2082 pvo->pvo_vaddr |= PVO_WIRED; 2083 if (pvo_head != &moea64_pvo_kunmanaged) 2084 pvo->pvo_vaddr |= PVO_MANAGED; 2085 if (bootstrap) 2086 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2087 if (flags & PVO_FAKE) 2088 pvo->pvo_vaddr |= PVO_FAKE; 2089 2090 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2091 (uint64_t)(pa) | pte_lo); 2092 2093 /* 2094 * Remember if the list was empty and therefore will be the first 2095 * item. 2096 */ 2097 if (LIST_FIRST(pvo_head) == NULL) 2098 first = 1; 2099 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2100 2101 if (pvo->pvo_pte.lpte.pte_lo & PVO_WIRED) 2102 pm->pm_stats.wired_count++; 2103 pm->pm_stats.resident_count++; 2104 2105 /* 2106 * We hope this succeeds but it isn't required. 2107 */ 2108 i = moea64_pte_insert(ptegidx, &pvo->pvo_pte.lpte); 2109 if (i >= 0) { 2110 PVO_PTEGIDX_SET(pvo, i); 2111 } else { 2112 panic("moea64_pvo_enter: overflow"); 2113 moea64_pte_overflow++; 2114 } 2115 2116 UNLOCK_TABLE(); 2117 2118 return (first ? ENOENT : 0); 2119} 2120 2121static void 2122moea64_pvo_remove(struct pvo_entry *pvo, int pteidx) 2123{ 2124 struct lpte *pt; 2125 2126 /* 2127 * If there is an active pte entry, we need to deactivate it (and 2128 * save the ref & cfg bits). 2129 */ 2130 LOCK_TABLE(); 2131 pt = moea64_pvo_to_pte(pvo, pteidx); 2132 if (pt != NULL) { 2133 moea64_pte_unset(pt, &pvo->pvo_pte.lpte, pvo->pvo_pmap, 2134 pvo->pvo_vaddr); 2135 PVO_PTEGIDX_CLR(pvo); 2136 } else { 2137 moea64_pte_overflow--; 2138 } 2139 UNLOCK_TABLE(); 2140 2141 /* 2142 * Update our statistics. 2143 */ 2144 pvo->pvo_pmap->pm_stats.resident_count--; 2145 if (pvo->pvo_pte.lpte.pte_lo & PVO_WIRED) 2146 pvo->pvo_pmap->pm_stats.wired_count--; 2147 2148 /* 2149 * Save the REF/CHG bits into their cache if the page is managed. 2150 */ 2151 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2152 struct vm_page *pg; 2153 2154 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2155 if (pg != NULL) { 2156 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2157 (LPTE_REF | LPTE_CHG)); 2158 } 2159 } 2160 2161 /* 2162 * Remove this PVO from the PV list. 2163 */ 2164 LIST_REMOVE(pvo, pvo_vlink); 2165 2166 /* 2167 * Remove this from the overflow list and return it to the pool 2168 * if we aren't going to reuse it. 2169 */ 2170 LIST_REMOVE(pvo, pvo_olink); 2171 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2172 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? moea64_mpvo_zone : 2173 moea64_upvo_zone, pvo); 2174 moea64_pvo_entries--; 2175 moea64_pvo_remove_calls++; 2176} 2177 2178static __inline int 2179moea64_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 2180{ 2181 int pteidx; 2182 2183 /* 2184 * We can find the actual pte entry without searching by grabbing 2185 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 2186 * noticing the HID bit. 2187 */ 2188 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 2189 if (pvo->pvo_pte.lpte.pte_hi & LPTE_HID) 2190 pteidx ^= moea64_pteg_mask * 8; 2191 2192 return (pteidx); 2193} 2194 2195static struct pvo_entry * 2196moea64_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 2197{ 2198 struct pvo_entry *pvo; 2199 int ptegidx; 2200 uint64_t vsid; 2201 2202 va &= ~ADDR_POFF; 2203 vsid = va_to_vsid(pm, va); 2204 ptegidx = va_to_pteg(vsid, va); 2205 2206 LOCK_TABLE(); 2207 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2208 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2209 if (pteidx_p) 2210 *pteidx_p = moea64_pvo_pte_index(pvo, ptegidx); 2211 break; 2212 } 2213 } 2214 UNLOCK_TABLE(); 2215 2216 return (pvo); 2217} 2218 2219static struct lpte * 2220moea64_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 2221{ 2222 struct lpte *pt; 2223 2224 /* 2225 * If we haven't been supplied the ptegidx, calculate it. 2226 */ 2227 if (pteidx == -1) { 2228 int ptegidx; 2229 uint64_t vsid; 2230 2231 vsid = va_to_vsid(pvo->pvo_pmap, pvo->pvo_vaddr); 2232 ptegidx = va_to_pteg(vsid, pvo->pvo_vaddr); 2233 pteidx = moea64_pvo_pte_index(pvo, ptegidx); 2234 } 2235 2236 pt = &moea64_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2237 2238 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 2239 !PVO_PTEGIDX_ISSET(pvo)) { 2240 panic("moea64_pvo_to_pte: pvo %p has valid pte in pvo but no " 2241 "valid pte index", pvo); 2242 } 2243 2244 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0 && 2245 PVO_PTEGIDX_ISSET(pvo)) { 2246 panic("moea64_pvo_to_pte: pvo %p has valid pte index in pvo " 2247 "pvo but no valid pte", pvo); 2248 } 2249 2250 if ((pt->pte_hi ^ (pvo->pvo_pte.lpte.pte_hi & ~LPTE_VALID)) == 2251 LPTE_VALID) { 2252 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0) { 2253 panic("moea64_pvo_to_pte: pvo %p has valid pte in " 2254 "moea64_pteg_table %p but invalid in pvo", pvo, pt); 2255 } 2256 2257 if (((pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo) & 2258 ~(LPTE_CHG|LPTE_REF)) != 0) { 2259 panic("moea64_pvo_to_pte: pvo %p pte does not match " 2260 "pte %p in moea64_pteg_table difference is %#x", 2261 pvo, pt, 2262 (uint32_t)(pt->pte_lo ^ pvo->pvo_pte.lpte.pte_lo)); 2263 } 2264 2265 ASSERT_TABLE_LOCK(); 2266 return (pt); 2267 } 2268 2269 if (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) { 2270 panic("moea64_pvo_to_pte: pvo %p has invalid pte %p in " 2271 "moea64_pteg_table but valid in pvo", pvo, pt); 2272 } 2273 2274 return (NULL); 2275} 2276 2277static int 2278moea64_pte_insert(u_int ptegidx, struct lpte *pvo_pt) 2279{ 2280 struct lpte *pt; 2281 int i; 2282 2283 ASSERT_TABLE_LOCK(); 2284 2285 /* 2286 * First try primary hash. 2287 */ 2288 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2289 if ((pt->pte_hi & LPTE_VALID) == 0) { 2290 pvo_pt->pte_hi &= ~LPTE_HID; 2291 moea64_pte_set(pt, pvo_pt); 2292 return (i); 2293 } 2294 } 2295 2296 /* 2297 * Now try secondary hash. 2298 */ 2299 ptegidx ^= moea64_pteg_mask; 2300 2301 for (pt = moea64_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2302 if ((pt->pte_hi & LPTE_VALID) == 0) { 2303 pvo_pt->pte_hi |= LPTE_HID; 2304 moea64_pte_set(pt, pvo_pt); 2305 return (i); 2306 } 2307 } 2308 2309 panic("moea64_pte_insert: overflow"); 2310 return (-1); 2311} 2312 2313static boolean_t 2314moea64_query_bit(vm_page_t m, u_int64_t ptebit) 2315{ 2316 struct pvo_entry *pvo; 2317 struct lpte *pt; 2318 2319#if 0 2320 if (moea64_attr_fetch(m) & ptebit) 2321 return (TRUE); 2322#endif 2323 2324 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2325 MOEA_PVO_CHECK(pvo); /* sanity check */ 2326 2327 /* 2328 * See if we saved the bit off. If so, cache it and return 2329 * success. 2330 */ 2331 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2332 moea64_attr_save(m, ptebit); 2333 MOEA_PVO_CHECK(pvo); /* sanity check */ 2334 return (TRUE); 2335 } 2336 } 2337 2338 /* 2339 * No luck, now go through the hard part of looking at the PTEs 2340 * themselves. Sync so that any pending REF/CHG bits are flushed to 2341 * the PTEs. 2342 */ 2343 SYNC(); 2344 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2345 MOEA_PVO_CHECK(pvo); /* sanity check */ 2346 2347 /* 2348 * See if this pvo has a valid PTE. if so, fetch the 2349 * REF/CHG bits from the valid PTE. If the appropriate 2350 * ptebit is set, cache it and return success. 2351 */ 2352 LOCK_TABLE(); 2353 pt = moea64_pvo_to_pte(pvo, -1); 2354 if (pt != NULL) { 2355 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2356 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2357 UNLOCK_TABLE(); 2358 2359 moea64_attr_save(m, ptebit); 2360 MOEA_PVO_CHECK(pvo); /* sanity check */ 2361 return (TRUE); 2362 } 2363 } 2364 UNLOCK_TABLE(); 2365 } 2366 2367 return (FALSE); 2368} 2369 2370static u_int 2371moea64_clear_bit(vm_page_t m, u_int64_t ptebit, u_int64_t *origbit) 2372{ 2373 u_int count; 2374 struct pvo_entry *pvo; 2375 struct lpte *pt; 2376 uint64_t rv; 2377 2378 /* 2379 * Clear the cached value. 2380 */ 2381 rv = moea64_attr_fetch(m); 2382 moea64_attr_clear(m, ptebit); 2383 2384 /* 2385 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2386 * we can reset the right ones). note that since the pvo entries and 2387 * list heads are accessed via BAT0 and are never placed in the page 2388 * table, we don't have to worry about further accesses setting the 2389 * REF/CHG bits. 2390 */ 2391 SYNC(); 2392 2393 /* 2394 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2395 * valid pte clear the ptebit from the valid pte. 2396 */ 2397 count = 0; 2398 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2399 MOEA_PVO_CHECK(pvo); /* sanity check */ 2400 2401 LOCK_TABLE(); 2402 pt = moea64_pvo_to_pte(pvo, -1); 2403 if (pt != NULL) { 2404 moea64_pte_synch(pt, &pvo->pvo_pte.lpte); 2405 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2406 count++; 2407 moea64_pte_clear(pt, pvo->pvo_pmap, PVO_VADDR(pvo), ptebit); 2408 } 2409 } 2410 UNLOCK_TABLE(); 2411 rv |= pvo->pvo_pte.lpte.pte_lo; 2412 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2413 MOEA_PVO_CHECK(pvo); /* sanity check */ 2414 } 2415 2416 if (origbit != NULL) { 2417 *origbit = rv; 2418 } 2419 2420 return (count); 2421} 2422 2423boolean_t 2424moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2425{ 2426 return (EFAULT); 2427} 2428 2429/* 2430 * Map a set of physical memory pages into the kernel virtual 2431 * address space. Return a pointer to where it is mapped. This 2432 * routine is intended to be used for mapping device memory, 2433 * NOT real memory. 2434 */ 2435void * 2436moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2437{ 2438 vm_offset_t va, tmpva, ppa, offset; 2439 2440 ppa = trunc_page(pa); 2441 offset = pa & PAGE_MASK; 2442 size = roundup(offset + size, PAGE_SIZE); 2443 2444 va = kmem_alloc_nofault(kernel_map, size); 2445 2446 if (!va) 2447 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2448 2449 for (tmpva = va; size > 0;) { 2450 moea64_kenter(mmu, tmpva, ppa); 2451 size -= PAGE_SIZE; 2452 tmpva += PAGE_SIZE; 2453 ppa += PAGE_SIZE; 2454 } 2455 2456 return ((void *)(va + offset)); 2457} 2458 2459void 2460moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2461{ 2462 vm_offset_t base, offset; 2463 2464 base = trunc_page(va); 2465 offset = va & PAGE_MASK; 2466 size = roundup(offset + size, PAGE_SIZE); 2467 2468 kmem_free(kernel_map, base, size); 2469} 2470 2471static void 2472moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2473{ 2474 struct pvo_entry *pvo; 2475 vm_offset_t lim; 2476 vm_paddr_t pa; 2477 vm_size_t len; 2478 2479 PMAP_LOCK(pm); 2480 while (sz > 0) { 2481 lim = round_page(va); 2482 len = MIN(lim - va, sz); 2483 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 2484 if (pvo != NULL) { 2485 pa = (pvo->pvo_pte.pte.pte_lo & PTE_RPGN) | 2486 (va & ADDR_POFF); 2487 moea64_syncicache(pm, va, pa, len); 2488 } 2489 va += len; 2490 sz -= len; 2491 } 2492 PMAP_UNLOCK(pm); 2493} 2494