mmu_oea64.c revision 233434
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/*- 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/*- 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 233434 2012-03-24 19:59:14Z nwhitehorn $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_compat.h" 118#include "opt_kstack_pages.h" 119 120#include <sys/param.h> 121#include <sys/kernel.h> 122#include <sys/queue.h> 123#include <sys/cpuset.h> 124#include <sys/ktr.h> 125#include <sys/lock.h> 126#include <sys/msgbuf.h> 127#include <sys/mutex.h> 128#include <sys/proc.h> 129#include <sys/sched.h> 130#include <sys/sysctl.h> 131#include <sys/systm.h> 132#include <sys/vmmeter.h> 133 134#include <sys/kdb.h> 135 136#include <dev/ofw/openfirm.h> 137 138#include <vm/vm.h> 139#include <vm/vm_param.h> 140#include <vm/vm_kern.h> 141#include <vm/vm_page.h> 142#include <vm/vm_map.h> 143#include <vm/vm_object.h> 144#include <vm/vm_extern.h> 145#include <vm/vm_pageout.h> 146#include <vm/vm_pager.h> 147#include <vm/uma.h> 148 149#include <machine/_inttypes.h> 150#include <machine/cpu.h> 151#include <machine/platform.h> 152#include <machine/frame.h> 153#include <machine/md_var.h> 154#include <machine/psl.h> 155#include <machine/bat.h> 156#include <machine/hid.h> 157#include <machine/pte.h> 158#include <machine/sr.h> 159#include <machine/trap.h> 160#include <machine/mmuvar.h> 161 162#include "mmu_oea64.h" 163#include "mmu_if.h" 164#include "moea64_if.h" 165 166void moea64_release_vsid(uint64_t vsid); 167uintptr_t moea64_get_unique_vsid(void); 168 169#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 170#define ENABLE_TRANS(msr) mtmsr(msr) 171 172#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 173#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 174#define VSID_HASH_MASK 0x0000007fffffffffULL 175 176#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 177#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 178#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 179 180struct ofw_map { 181 cell_t om_va; 182 cell_t om_len; 183 cell_t om_pa_hi; 184 cell_t om_pa_lo; 185 cell_t om_mode; 186}; 187 188/* 189 * Map of physical memory regions. 190 */ 191static struct mem_region *regions; 192static struct mem_region *pregions; 193static u_int phys_avail_count; 194static int regions_sz, pregions_sz; 195 196extern void bs_remap_earlyboot(void); 197 198/* 199 * Lock for the pteg and pvo tables. 200 */ 201struct mtx moea64_table_mutex; 202struct mtx moea64_slb_mutex; 203 204/* 205 * PTEG data. 206 */ 207u_int moea64_pteg_count; 208u_int moea64_pteg_mask; 209 210/* 211 * PVO data. 212 */ 213struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 214struct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */ 215 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 216 217uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 218uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 219 220#define BPVO_POOL_SIZE 327680 221static struct pvo_entry *moea64_bpvo_pool; 222static int moea64_bpvo_pool_index = 0; 223 224#define VSID_NBPW (sizeof(u_int32_t) * 8) 225#ifdef __powerpc64__ 226#define NVSIDS (NPMAPS * 16) 227#define VSID_HASHMASK 0xffffffffUL 228#else 229#define NVSIDS NPMAPS 230#define VSID_HASHMASK 0xfffffUL 231#endif 232static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 233 234static boolean_t moea64_initialized = FALSE; 235 236/* 237 * Statistics. 238 */ 239u_int moea64_pte_valid = 0; 240u_int moea64_pte_overflow = 0; 241u_int moea64_pvo_entries = 0; 242u_int moea64_pvo_enter_calls = 0; 243u_int moea64_pvo_remove_calls = 0; 244SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 245 &moea64_pte_valid, 0, ""); 246SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 247 &moea64_pte_overflow, 0, ""); 248SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 249 &moea64_pvo_entries, 0, ""); 250SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 251 &moea64_pvo_enter_calls, 0, ""); 252SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 253 &moea64_pvo_remove_calls, 0, ""); 254 255vm_offset_t moea64_scratchpage_va[2]; 256struct pvo_entry *moea64_scratchpage_pvo[2]; 257uintptr_t moea64_scratchpage_pte[2]; 258struct mtx moea64_scratchpage_mtx; 259 260uint64_t moea64_large_page_mask = 0; 261int moea64_large_page_size = 0; 262int moea64_large_page_shift = 0; 263 264/* 265 * PVO calls. 266 */ 267static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 268 vm_offset_t, vm_offset_t, uint64_t, int); 269static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 270static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 271 272/* 273 * Utility routines. 274 */ 275static void moea64_enter_locked(mmu_t, pmap_t, vm_offset_t, 276 vm_page_t, vm_prot_t, boolean_t); 277static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 278static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 279static void moea64_kremove(mmu_t, vm_offset_t); 280static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 281 vm_offset_t pa, vm_size_t sz); 282 283/* 284 * Kernel MMU interface 285 */ 286void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 287void moea64_clear_modify(mmu_t, vm_page_t); 288void moea64_clear_reference(mmu_t, vm_page_t); 289void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 290void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 291void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 292 vm_prot_t); 293void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 294vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 295vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 296void moea64_init(mmu_t); 297boolean_t moea64_is_modified(mmu_t, vm_page_t); 298boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 299boolean_t moea64_is_referenced(mmu_t, vm_page_t); 300boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 301vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 302boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 303int moea64_page_wired_mappings(mmu_t, vm_page_t); 304void moea64_pinit(mmu_t, pmap_t); 305void moea64_pinit0(mmu_t, pmap_t); 306void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 307void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 308void moea64_qremove(mmu_t, vm_offset_t, int); 309void moea64_release(mmu_t, pmap_t); 310void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 311void moea64_remove_pages(mmu_t, pmap_t); 312void moea64_remove_all(mmu_t, vm_page_t); 313void moea64_remove_write(mmu_t, vm_page_t); 314void moea64_zero_page(mmu_t, vm_page_t); 315void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 316void moea64_zero_page_idle(mmu_t, vm_page_t); 317void moea64_activate(mmu_t, struct thread *); 318void moea64_deactivate(mmu_t, struct thread *); 319void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 320void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 321void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 322vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 323void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 324void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 325void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 326boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 327static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 328 329static mmu_method_t moea64_methods[] = { 330 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 331 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 332 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 333 MMUMETHOD(mmu_copy_page, moea64_copy_page), 334 MMUMETHOD(mmu_enter, moea64_enter), 335 MMUMETHOD(mmu_enter_object, moea64_enter_object), 336 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 337 MMUMETHOD(mmu_extract, moea64_extract), 338 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 339 MMUMETHOD(mmu_init, moea64_init), 340 MMUMETHOD(mmu_is_modified, moea64_is_modified), 341 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 342 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 343 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 344 MMUMETHOD(mmu_map, moea64_map), 345 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 346 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 347 MMUMETHOD(mmu_pinit, moea64_pinit), 348 MMUMETHOD(mmu_pinit0, moea64_pinit0), 349 MMUMETHOD(mmu_protect, moea64_protect), 350 MMUMETHOD(mmu_qenter, moea64_qenter), 351 MMUMETHOD(mmu_qremove, moea64_qremove), 352 MMUMETHOD(mmu_release, moea64_release), 353 MMUMETHOD(mmu_remove, moea64_remove), 354 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 355 MMUMETHOD(mmu_remove_all, moea64_remove_all), 356 MMUMETHOD(mmu_remove_write, moea64_remove_write), 357 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 358 MMUMETHOD(mmu_zero_page, moea64_zero_page), 359 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 360 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 361 MMUMETHOD(mmu_activate, moea64_activate), 362 MMUMETHOD(mmu_deactivate, moea64_deactivate), 363 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 364 365 /* Internal interfaces */ 366 MMUMETHOD(mmu_mapdev, moea64_mapdev), 367 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 368 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 369 MMUMETHOD(mmu_kextract, moea64_kextract), 370 MMUMETHOD(mmu_kenter, moea64_kenter), 371 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 372 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 373 374 { 0, 0 } 375}; 376 377MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 378 379static __inline u_int 380va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 381{ 382 uint64_t hash; 383 int shift; 384 385 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 386 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 387 shift); 388 return (hash & moea64_pteg_mask); 389} 390 391static __inline struct pvo_head * 392vm_page_to_pvoh(vm_page_t m) 393{ 394 395 return (&m->md.mdpg_pvoh); 396} 397 398static __inline void 399moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 400 uint64_t pte_lo, int flags) 401{ 402 403 ASSERT_TABLE_LOCK(); 404 405 /* 406 * Construct a PTE. Default to IMB initially. Valid bit only gets 407 * set when the real pte is set in memory. 408 * 409 * Note: Don't set the valid bit for correct operation of tlb update. 410 */ 411 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 412 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 413 414 if (flags & PVO_LARGE) 415 pt->pte_hi |= LPTE_BIG; 416 417 pt->pte_lo = pte_lo; 418} 419 420static __inline uint64_t 421moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 422{ 423 uint64_t pte_lo; 424 int i; 425 426 if (ma != VM_MEMATTR_DEFAULT) { 427 switch (ma) { 428 case VM_MEMATTR_UNCACHEABLE: 429 return (LPTE_I | LPTE_G); 430 case VM_MEMATTR_WRITE_COMBINING: 431 case VM_MEMATTR_WRITE_BACK: 432 case VM_MEMATTR_PREFETCHABLE: 433 return (LPTE_I); 434 case VM_MEMATTR_WRITE_THROUGH: 435 return (LPTE_W | LPTE_M); 436 } 437 } 438 439 /* 440 * Assume the page is cache inhibited and access is guarded unless 441 * it's in our available memory array. 442 */ 443 pte_lo = LPTE_I | LPTE_G; 444 for (i = 0; i < pregions_sz; i++) { 445 if ((pa >= pregions[i].mr_start) && 446 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 447 pte_lo &= ~(LPTE_I | LPTE_G); 448 pte_lo |= LPTE_M; 449 break; 450 } 451 } 452 453 return pte_lo; 454} 455 456/* 457 * Quick sort callout for comparing memory regions. 458 */ 459static int om_cmp(const void *a, const void *b); 460 461static int 462om_cmp(const void *a, const void *b) 463{ 464 const struct ofw_map *mapa; 465 const struct ofw_map *mapb; 466 467 mapa = a; 468 mapb = b; 469 if (mapa->om_pa_hi < mapb->om_pa_hi) 470 return (-1); 471 else if (mapa->om_pa_hi > mapb->om_pa_hi) 472 return (1); 473 else if (mapa->om_pa_lo < mapb->om_pa_lo) 474 return (-1); 475 else if (mapa->om_pa_lo > mapb->om_pa_lo) 476 return (1); 477 else 478 return (0); 479} 480 481static void 482moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 483{ 484 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 485 register_t msr; 486 vm_offset_t off; 487 vm_paddr_t pa_base; 488 int i; 489 490 bzero(translations, sz); 491 if (OF_getprop(mmu, "translations", translations, sz) == -1) 492 panic("moea64_bootstrap: can't get ofw translations"); 493 494 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 495 sz /= sizeof(*translations); 496 qsort(translations, sz, sizeof (*translations), om_cmp); 497 498 for (i = 0; i < sz; i++) { 499 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 500 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 501 translations[i].om_len); 502 503 if (translations[i].om_pa_lo % PAGE_SIZE) 504 panic("OFW translation not page-aligned!"); 505 506 pa_base = translations[i].om_pa_lo; 507 508 #ifdef __powerpc64__ 509 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 510 #else 511 if (translations[i].om_pa_hi) 512 panic("OFW translations above 32-bit boundary!"); 513 #endif 514 515 /* Now enter the pages for this mapping */ 516 517 DISABLE_TRANS(msr); 518 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 519 if (moea64_pvo_find_va(kernel_pmap, 520 translations[i].om_va + off) != NULL) 521 continue; 522 523 moea64_kenter(mmup, translations[i].om_va + off, 524 pa_base + off); 525 } 526 ENABLE_TRANS(msr); 527 } 528} 529 530#ifdef __powerpc64__ 531static void 532moea64_probe_large_page(void) 533{ 534 uint16_t pvr = mfpvr() >> 16; 535 536 switch (pvr) { 537 case IBM970: 538 case IBM970FX: 539 case IBM970MP: 540 powerpc_sync(); isync(); 541 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 542 powerpc_sync(); isync(); 543 544 /* FALLTHROUGH */ 545 case IBMCELLBE: 546 moea64_large_page_size = 0x1000000; /* 16 MB */ 547 moea64_large_page_shift = 24; 548 break; 549 default: 550 moea64_large_page_size = 0; 551 } 552 553 moea64_large_page_mask = moea64_large_page_size - 1; 554} 555 556static void 557moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 558{ 559 struct slb *cache; 560 struct slb entry; 561 uint64_t esid, slbe; 562 uint64_t i; 563 564 cache = PCPU_GET(slb); 565 esid = va >> ADDR_SR_SHFT; 566 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 567 568 for (i = 0; i < 64; i++) { 569 if (cache[i].slbe == (slbe | i)) 570 return; 571 } 572 573 entry.slbe = slbe; 574 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 575 if (large) 576 entry.slbv |= SLBV_L; 577 578 slb_insert_kernel(entry.slbe, entry.slbv); 579} 580#endif 581 582static void 583moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 584 vm_offset_t kernelend) 585{ 586 register_t msr; 587 vm_paddr_t pa; 588 vm_offset_t size, off; 589 uint64_t pte_lo; 590 int i; 591 592 if (moea64_large_page_size == 0) 593 hw_direct_map = 0; 594 595 DISABLE_TRANS(msr); 596 if (hw_direct_map) { 597 PMAP_LOCK(kernel_pmap); 598 for (i = 0; i < pregions_sz; i++) { 599 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 600 pregions[i].mr_size; pa += moea64_large_page_size) { 601 pte_lo = LPTE_M; 602 603 /* 604 * Set memory access as guarded if prefetch within 605 * the page could exit the available physmem area. 606 */ 607 if (pa & moea64_large_page_mask) { 608 pa &= moea64_large_page_mask; 609 pte_lo |= LPTE_G; 610 } 611 if (pa + moea64_large_page_size > 612 pregions[i].mr_start + pregions[i].mr_size) 613 pte_lo |= LPTE_G; 614 615 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 616 &moea64_pvo_kunmanaged, pa, pa, 617 pte_lo, PVO_WIRED | PVO_LARGE); 618 } 619 } 620 PMAP_UNLOCK(kernel_pmap); 621 } else { 622 size = sizeof(struct pvo_head) * moea64_pteg_count; 623 off = (vm_offset_t)(moea64_pvo_table); 624 for (pa = off; pa < off + size; pa += PAGE_SIZE) 625 moea64_kenter(mmup, pa, pa); 626 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 627 off = (vm_offset_t)(moea64_bpvo_pool); 628 for (pa = off; pa < off + size; pa += PAGE_SIZE) 629 moea64_kenter(mmup, pa, pa); 630 631 /* 632 * Map certain important things, like ourselves. 633 * 634 * NOTE: We do not map the exception vector space. That code is 635 * used only in real mode, and leaving it unmapped allows us to 636 * catch NULL pointer deferences, instead of making NULL a valid 637 * address. 638 */ 639 640 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 641 pa += PAGE_SIZE) 642 moea64_kenter(mmup, pa, pa); 643 } 644 ENABLE_TRANS(msr); 645} 646 647void 648moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 649{ 650 int i, j; 651 vm_size_t physsz, hwphyssz; 652 653#ifndef __powerpc64__ 654 /* We don't have a direct map since there is no BAT */ 655 hw_direct_map = 0; 656 657 /* Make sure battable is zero, since we have no BAT */ 658 for (i = 0; i < 16; i++) { 659 battable[i].batu = 0; 660 battable[i].batl = 0; 661 } 662#else 663 moea64_probe_large_page(); 664 665 /* Use a direct map if we have large page support */ 666 if (moea64_large_page_size > 0) 667 hw_direct_map = 1; 668 else 669 hw_direct_map = 0; 670#endif 671 672 /* Get physical memory regions from firmware */ 673 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 674 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 675 676 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 677 panic("moea64_bootstrap: phys_avail too small"); 678 679 phys_avail_count = 0; 680 physsz = 0; 681 hwphyssz = 0; 682 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 683 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 684 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 685 regions[i].mr_start + regions[i].mr_size, 686 regions[i].mr_size); 687 if (hwphyssz != 0 && 688 (physsz + regions[i].mr_size) >= hwphyssz) { 689 if (physsz < hwphyssz) { 690 phys_avail[j] = regions[i].mr_start; 691 phys_avail[j + 1] = regions[i].mr_start + 692 hwphyssz - physsz; 693 physsz = hwphyssz; 694 phys_avail_count++; 695 } 696 break; 697 } 698 phys_avail[j] = regions[i].mr_start; 699 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 700 phys_avail_count++; 701 physsz += regions[i].mr_size; 702 } 703 704 /* Check for overlap with the kernel and exception vectors */ 705 for (j = 0; j < 2*phys_avail_count; j+=2) { 706 if (phys_avail[j] < EXC_LAST) 707 phys_avail[j] += EXC_LAST; 708 709 if (kernelstart >= phys_avail[j] && 710 kernelstart < phys_avail[j+1]) { 711 if (kernelend < phys_avail[j+1]) { 712 phys_avail[2*phys_avail_count] = 713 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 714 phys_avail[2*phys_avail_count + 1] = 715 phys_avail[j+1]; 716 phys_avail_count++; 717 } 718 719 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 720 } 721 722 if (kernelend >= phys_avail[j] && 723 kernelend < phys_avail[j+1]) { 724 if (kernelstart > phys_avail[j]) { 725 phys_avail[2*phys_avail_count] = phys_avail[j]; 726 phys_avail[2*phys_avail_count + 1] = 727 kernelstart & ~PAGE_MASK; 728 phys_avail_count++; 729 } 730 731 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 732 } 733 } 734 735 physmem = btoc(physsz); 736 737#ifdef PTEGCOUNT 738 moea64_pteg_count = PTEGCOUNT; 739#else 740 moea64_pteg_count = 0x1000; 741 742 while (moea64_pteg_count < physmem) 743 moea64_pteg_count <<= 1; 744 745 moea64_pteg_count >>= 1; 746#endif /* PTEGCOUNT */ 747} 748 749void 750moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 751{ 752 vm_size_t size; 753 register_t msr; 754 int i; 755 756 /* 757 * Set PTEG mask 758 */ 759 moea64_pteg_mask = moea64_pteg_count - 1; 760 761 /* 762 * Allocate pv/overflow lists. 763 */ 764 size = sizeof(struct pvo_head) * moea64_pteg_count; 765 766 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 767 PAGE_SIZE); 768 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 769 770 DISABLE_TRANS(msr); 771 for (i = 0; i < moea64_pteg_count; i++) 772 LIST_INIT(&moea64_pvo_table[i]); 773 ENABLE_TRANS(msr); 774 775 /* 776 * Initialize the lock that synchronizes access to the pteg and pvo 777 * tables. 778 */ 779 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 780 MTX_RECURSE); 781 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 782 783 /* 784 * Initialise the unmanaged pvo pool. 785 */ 786 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 787 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 788 moea64_bpvo_pool_index = 0; 789 790 /* 791 * Make sure kernel vsid is allocated as well as VSID 0. 792 */ 793 #ifndef __powerpc64__ 794 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 795 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 796 moea64_vsid_bitmap[0] |= 1; 797 #endif 798 799 /* 800 * Initialize the kernel pmap (which is statically allocated). 801 */ 802 #ifdef __powerpc64__ 803 for (i = 0; i < 64; i++) { 804 pcpup->pc_slb[i].slbv = 0; 805 pcpup->pc_slb[i].slbe = 0; 806 } 807 #else 808 for (i = 0; i < 16; i++) 809 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 810 #endif 811 812 kernel_pmap->pmap_phys = kernel_pmap; 813 CPU_FILL(&kernel_pmap->pm_active); 814 LIST_INIT(&kernel_pmap->pmap_pvo); 815 816 PMAP_LOCK_INIT(kernel_pmap); 817 818 /* 819 * Now map in all the other buffers we allocated earlier 820 */ 821 822 moea64_setup_direct_map(mmup, kernelstart, kernelend); 823} 824 825void 826moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 827{ 828 ihandle_t mmui; 829 phandle_t chosen; 830 phandle_t mmu; 831 size_t sz; 832 int i; 833 vm_offset_t pa, va; 834 void *dpcpu; 835 836 /* 837 * Set up the Open Firmware pmap and add its mappings if not in real 838 * mode. 839 */ 840 841 chosen = OF_finddevice("/chosen"); 842 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 843 mmu = OF_instance_to_package(mmui); 844 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 845 sz = 0; 846 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 847 panic("moea64_bootstrap: too many ofw translations"); 848 849 if (sz > 0) 850 moea64_add_ofw_mappings(mmup, mmu, sz); 851 } 852 853 /* 854 * Calculate the last available physical address. 855 */ 856 for (i = 0; phys_avail[i + 2] != 0; i += 2) 857 ; 858 Maxmem = powerpc_btop(phys_avail[i + 1]); 859 860 /* 861 * Initialize MMU and remap early physical mappings 862 */ 863 MMU_CPU_BOOTSTRAP(mmup,0); 864 mtmsr(mfmsr() | PSL_DR | PSL_IR); 865 pmap_bootstrapped++; 866 bs_remap_earlyboot(); 867 868 /* 869 * Set the start and end of kva. 870 */ 871 virtual_avail = VM_MIN_KERNEL_ADDRESS; 872 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 873 874 /* 875 * Map the entire KVA range into the SLB. We must not fault there. 876 */ 877 #ifdef __powerpc64__ 878 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 879 moea64_bootstrap_slb_prefault(va, 0); 880 #endif 881 882 /* 883 * Figure out how far we can extend virtual_end into segment 16 884 * without running into existing mappings. Segment 16 is guaranteed 885 * to contain neither RAM nor devices (at least on Apple hardware), 886 * but will generally contain some OFW mappings we should not 887 * step on. 888 */ 889 890 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 891 PMAP_LOCK(kernel_pmap); 892 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 893 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 894 virtual_end += PAGE_SIZE; 895 PMAP_UNLOCK(kernel_pmap); 896 #endif 897 898 /* 899 * Allocate a kernel stack with a guard page for thread0 and map it 900 * into the kernel page map. 901 */ 902 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 903 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 904 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 905 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 906 thread0.td_kstack = va; 907 thread0.td_kstack_pages = KSTACK_PAGES; 908 for (i = 0; i < KSTACK_PAGES; i++) { 909 moea64_kenter(mmup, va, pa); 910 pa += PAGE_SIZE; 911 va += PAGE_SIZE; 912 } 913 914 /* 915 * Allocate virtual address space for the message buffer. 916 */ 917 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 918 msgbufp = (struct msgbuf *)virtual_avail; 919 va = virtual_avail; 920 virtual_avail += round_page(msgbufsize); 921 while (va < virtual_avail) { 922 moea64_kenter(mmup, va, pa); 923 pa += PAGE_SIZE; 924 va += PAGE_SIZE; 925 } 926 927 /* 928 * Allocate virtual address space for the dynamic percpu area. 929 */ 930 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 931 dpcpu = (void *)virtual_avail; 932 va = virtual_avail; 933 virtual_avail += DPCPU_SIZE; 934 while (va < virtual_avail) { 935 moea64_kenter(mmup, va, pa); 936 pa += PAGE_SIZE; 937 va += PAGE_SIZE; 938 } 939 dpcpu_init(dpcpu, 0); 940 941 /* 942 * Allocate some things for page zeroing. We put this directly 943 * in the page table, marked with LPTE_LOCKED, to avoid any 944 * of the PVO book-keeping or other parts of the VM system 945 * from even knowing that this hack exists. 946 */ 947 948 if (!hw_direct_map) { 949 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 950 MTX_DEF); 951 for (i = 0; i < 2; i++) { 952 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 953 virtual_end -= PAGE_SIZE; 954 955 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 956 957 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 958 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 959 LOCK_TABLE(); 960 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 961 mmup, moea64_scratchpage_pvo[i]); 962 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 963 |= LPTE_LOCKED; 964 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 965 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 966 moea64_scratchpage_pvo[i]->pvo_vpn); 967 UNLOCK_TABLE(); 968 } 969 } 970} 971 972/* 973 * Activate a user pmap. The pmap must be activated before its address 974 * space can be accessed in any way. 975 */ 976void 977moea64_activate(mmu_t mmu, struct thread *td) 978{ 979 pmap_t pm; 980 981 pm = &td->td_proc->p_vmspace->vm_pmap; 982 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 983 984 #ifdef __powerpc64__ 985 PCPU_SET(userslb, pm->pm_slb); 986 #else 987 PCPU_SET(curpmap, pm->pmap_phys); 988 #endif 989} 990 991void 992moea64_deactivate(mmu_t mmu, struct thread *td) 993{ 994 pmap_t pm; 995 996 pm = &td->td_proc->p_vmspace->vm_pmap; 997 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 998 #ifdef __powerpc64__ 999 PCPU_SET(userslb, NULL); 1000 #else 1001 PCPU_SET(curpmap, NULL); 1002 #endif 1003} 1004 1005void 1006moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1007{ 1008 struct pvo_entry *pvo; 1009 uintptr_t pt; 1010 uint64_t vsid; 1011 int i, ptegidx; 1012 1013 PMAP_LOCK(pm); 1014 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1015 1016 if (pvo != NULL) { 1017 LOCK_TABLE(); 1018 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1019 1020 if (wired) { 1021 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1022 pm->pm_stats.wired_count++; 1023 pvo->pvo_vaddr |= PVO_WIRED; 1024 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1025 } else { 1026 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1027 pm->pm_stats.wired_count--; 1028 pvo->pvo_vaddr &= ~PVO_WIRED; 1029 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1030 } 1031 1032 if (pt != -1) { 1033 /* Update wiring flag in page table. */ 1034 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1035 pvo->pvo_vpn); 1036 } else if (wired) { 1037 /* 1038 * If we are wiring the page, and it wasn't in the 1039 * page table before, add it. 1040 */ 1041 vsid = PVO_VSID(pvo); 1042 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1043 pvo->pvo_vaddr & PVO_LARGE); 1044 1045 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1046 1047 if (i >= 0) { 1048 PVO_PTEGIDX_CLR(pvo); 1049 PVO_PTEGIDX_SET(pvo, i); 1050 } 1051 } 1052 1053 UNLOCK_TABLE(); 1054 } 1055 PMAP_UNLOCK(pm); 1056} 1057 1058/* 1059 * This goes through and sets the physical address of our 1060 * special scratch PTE to the PA we want to zero or copy. Because 1061 * of locking issues (this can get called in pvo_enter() by 1062 * the UMA allocator), we can't use most other utility functions here 1063 */ 1064 1065static __inline 1066void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1067 1068 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1069 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1070 1071 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1072 ~(LPTE_WIMG | LPTE_RPGN); 1073 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1074 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1075 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1076 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1077 moea64_scratchpage_pvo[which]->pvo_vpn); 1078 isync(); 1079} 1080 1081void 1082moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1083{ 1084 vm_offset_t dst; 1085 vm_offset_t src; 1086 1087 dst = VM_PAGE_TO_PHYS(mdst); 1088 src = VM_PAGE_TO_PHYS(msrc); 1089 1090 if (hw_direct_map) { 1091 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1092 } else { 1093 mtx_lock(&moea64_scratchpage_mtx); 1094 1095 moea64_set_scratchpage_pa(mmu, 0, src); 1096 moea64_set_scratchpage_pa(mmu, 1, dst); 1097 1098 kcopy((void *)moea64_scratchpage_va[0], 1099 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1100 1101 mtx_unlock(&moea64_scratchpage_mtx); 1102 } 1103} 1104 1105void 1106moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1107{ 1108 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1109 1110 if (size + off > PAGE_SIZE) 1111 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1112 1113 if (hw_direct_map) { 1114 bzero((caddr_t)pa + off, size); 1115 } else { 1116 mtx_lock(&moea64_scratchpage_mtx); 1117 moea64_set_scratchpage_pa(mmu, 0, pa); 1118 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1119 mtx_unlock(&moea64_scratchpage_mtx); 1120 } 1121} 1122 1123/* 1124 * Zero a page of physical memory by temporarily mapping it 1125 */ 1126void 1127moea64_zero_page(mmu_t mmu, vm_page_t m) 1128{ 1129 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1130 vm_offset_t va, off; 1131 1132 if (!hw_direct_map) { 1133 mtx_lock(&moea64_scratchpage_mtx); 1134 1135 moea64_set_scratchpage_pa(mmu, 0, pa); 1136 va = moea64_scratchpage_va[0]; 1137 } else { 1138 va = pa; 1139 } 1140 1141 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1142 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1143 1144 if (!hw_direct_map) 1145 mtx_unlock(&moea64_scratchpage_mtx); 1146} 1147 1148void 1149moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1150{ 1151 1152 moea64_zero_page(mmu, m); 1153} 1154 1155/* 1156 * Map the given physical page at the specified virtual address in the 1157 * target pmap with the protection requested. If specified the page 1158 * will be wired down. 1159 */ 1160void 1161moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1162 vm_prot_t prot, boolean_t wired) 1163{ 1164 1165 vm_page_lock_queues(); 1166 PMAP_LOCK(pmap); 1167 moea64_enter_locked(mmu, pmap, va, m, prot, wired); 1168 vm_page_unlock_queues(); 1169 PMAP_UNLOCK(pmap); 1170} 1171 1172/* 1173 * Map the given physical page at the specified virtual address in the 1174 * target pmap with the protection requested. If specified the page 1175 * will be wired down. 1176 * 1177 * The page queues and pmap must be locked. 1178 */ 1179 1180static void 1181moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1182 vm_prot_t prot, boolean_t wired) 1183{ 1184 struct pvo_head *pvo_head; 1185 uma_zone_t zone; 1186 vm_page_t pg; 1187 uint64_t pte_lo; 1188 u_int pvo_flags; 1189 int error; 1190 1191 if (!moea64_initialized) { 1192 pvo_head = &moea64_pvo_kunmanaged; 1193 pg = NULL; 1194 zone = moea64_upvo_zone; 1195 pvo_flags = 0; 1196 } else { 1197 pvo_head = vm_page_to_pvoh(m); 1198 pg = m; 1199 zone = moea64_mpvo_zone; 1200 pvo_flags = PVO_MANAGED; 1201 } 1202 1203 if (pmap_bootstrapped) 1204 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1205 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1206 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || 1207 VM_OBJECT_LOCKED(m->object), 1208 ("moea64_enter_locked: page %p is not busy", m)); 1209 1210 /* XXX change the pvo head for fake pages */ 1211 if ((m->oflags & VPO_UNMANAGED) != 0) { 1212 pvo_flags &= ~PVO_MANAGED; 1213 pvo_head = &moea64_pvo_kunmanaged; 1214 zone = moea64_upvo_zone; 1215 } 1216 1217 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1218 1219 if (prot & VM_PROT_WRITE) { 1220 pte_lo |= LPTE_BW; 1221 if (pmap_bootstrapped && 1222 (m->oflags & VPO_UNMANAGED) == 0) 1223 vm_page_aflag_set(m, PGA_WRITEABLE); 1224 } else 1225 pte_lo |= LPTE_BR; 1226 1227 if ((prot & VM_PROT_EXECUTE) == 0) 1228 pte_lo |= LPTE_NOEXEC; 1229 1230 if (wired) 1231 pvo_flags |= PVO_WIRED; 1232 1233 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1234 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1235 1236 /* 1237 * Flush the page from the instruction cache if this page is 1238 * mapped executable and cacheable. 1239 */ 1240 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) 1241 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1242} 1243 1244static void 1245moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1246 vm_size_t sz) 1247{ 1248 1249 /* 1250 * This is much trickier than on older systems because 1251 * we can't sync the icache on physical addresses directly 1252 * without a direct map. Instead we check a couple of cases 1253 * where the memory is already mapped in and, failing that, 1254 * use the same trick we use for page zeroing to create 1255 * a temporary mapping for this physical address. 1256 */ 1257 1258 if (!pmap_bootstrapped) { 1259 /* 1260 * If PMAP is not bootstrapped, we are likely to be 1261 * in real mode. 1262 */ 1263 __syncicache((void *)pa, sz); 1264 } else if (pmap == kernel_pmap) { 1265 __syncicache((void *)va, sz); 1266 } else if (hw_direct_map) { 1267 __syncicache((void *)pa, sz); 1268 } else { 1269 /* Use the scratch page to set up a temp mapping */ 1270 1271 mtx_lock(&moea64_scratchpage_mtx); 1272 1273 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1274 __syncicache((void *)(moea64_scratchpage_va[1] + 1275 (va & ADDR_POFF)), sz); 1276 1277 mtx_unlock(&moea64_scratchpage_mtx); 1278 } 1279} 1280 1281/* 1282 * Maps a sequence of resident pages belonging to the same object. 1283 * The sequence begins with the given page m_start. This page is 1284 * mapped at the given virtual address start. Each subsequent page is 1285 * mapped at a virtual address that is offset from start by the same 1286 * amount as the page is offset from m_start within the object. The 1287 * last page in the sequence is the page with the largest offset from 1288 * m_start that can be mapped at a virtual address less than the given 1289 * virtual address end. Not every virtual page between start and end 1290 * is mapped; only those for which a resident page exists with the 1291 * corresponding offset from m_start are mapped. 1292 */ 1293void 1294moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1295 vm_page_t m_start, vm_prot_t prot) 1296{ 1297 vm_page_t m; 1298 vm_pindex_t diff, psize; 1299 1300 psize = atop(end - start); 1301 m = m_start; 1302 vm_page_lock_queues(); 1303 PMAP_LOCK(pm); 1304 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1305 moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot & 1306 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1307 m = TAILQ_NEXT(m, listq); 1308 } 1309 vm_page_unlock_queues(); 1310 PMAP_UNLOCK(pm); 1311} 1312 1313void 1314moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1315 vm_prot_t prot) 1316{ 1317 1318 vm_page_lock_queues(); 1319 PMAP_LOCK(pm); 1320 moea64_enter_locked(mmu, pm, va, m, 1321 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1322 vm_page_unlock_queues(); 1323 PMAP_UNLOCK(pm); 1324} 1325 1326vm_paddr_t 1327moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1328{ 1329 struct pvo_entry *pvo; 1330 vm_paddr_t pa; 1331 1332 PMAP_LOCK(pm); 1333 pvo = moea64_pvo_find_va(pm, va); 1334 if (pvo == NULL) 1335 pa = 0; 1336 else 1337 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1338 (va - PVO_VADDR(pvo)); 1339 PMAP_UNLOCK(pm); 1340 return (pa); 1341} 1342 1343/* 1344 * Atomically extract and hold the physical page with the given 1345 * pmap and virtual address pair if that mapping permits the given 1346 * protection. 1347 */ 1348vm_page_t 1349moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1350{ 1351 struct pvo_entry *pvo; 1352 vm_page_t m; 1353 vm_paddr_t pa; 1354 1355 m = NULL; 1356 pa = 0; 1357 PMAP_LOCK(pmap); 1358retry: 1359 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1360 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1361 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1362 (prot & VM_PROT_WRITE) == 0)) { 1363 if (vm_page_pa_tryrelock(pmap, 1364 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1365 goto retry; 1366 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1367 vm_page_hold(m); 1368 } 1369 PA_UNLOCK_COND(pa); 1370 PMAP_UNLOCK(pmap); 1371 return (m); 1372} 1373 1374static mmu_t installed_mmu; 1375 1376static void * 1377moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1378{ 1379 /* 1380 * This entire routine is a horrible hack to avoid bothering kmem 1381 * for new KVA addresses. Because this can get called from inside 1382 * kmem allocation routines, calling kmem for a new address here 1383 * can lead to multiply locking non-recursive mutexes. 1384 */ 1385 vm_offset_t va; 1386 1387 vm_page_t m; 1388 int pflags, needed_lock; 1389 1390 *flags = UMA_SLAB_PRIV; 1391 needed_lock = !PMAP_LOCKED(kernel_pmap); 1392 1393 if (needed_lock) 1394 PMAP_LOCK(kernel_pmap); 1395 1396 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1397 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1398 else 1399 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1400 if (wait & M_ZERO) 1401 pflags |= VM_ALLOC_ZERO; 1402 1403 for (;;) { 1404 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1405 if (m == NULL) { 1406 if (wait & M_NOWAIT) 1407 return (NULL); 1408 VM_WAIT; 1409 } else 1410 break; 1411 } 1412 1413 va = VM_PAGE_TO_PHYS(m); 1414 1415 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1416 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1417 PVO_WIRED | PVO_BOOTSTRAP); 1418 1419 if (needed_lock) 1420 PMAP_UNLOCK(kernel_pmap); 1421 1422 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1423 bzero((void *)va, PAGE_SIZE); 1424 1425 return (void *)va; 1426} 1427 1428extern int elf32_nxstack; 1429 1430void 1431moea64_init(mmu_t mmu) 1432{ 1433 1434 CTR0(KTR_PMAP, "moea64_init"); 1435 1436 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1437 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1438 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1439 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1440 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1441 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1442 1443 if (!hw_direct_map) { 1444 installed_mmu = mmu; 1445 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1446 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1447 } 1448 1449#ifdef COMPAT_FREEBSD32 1450 elf32_nxstack = 1; 1451#endif 1452 1453 moea64_initialized = TRUE; 1454} 1455 1456boolean_t 1457moea64_is_referenced(mmu_t mmu, vm_page_t m) 1458{ 1459 1460 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1461 ("moea64_is_referenced: page %p is not managed", m)); 1462 return (moea64_query_bit(mmu, m, PTE_REF)); 1463} 1464 1465boolean_t 1466moea64_is_modified(mmu_t mmu, vm_page_t m) 1467{ 1468 1469 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1470 ("moea64_is_modified: page %p is not managed", m)); 1471 1472 /* 1473 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 1474 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1475 * is clear, no PTEs can have LPTE_CHG set. 1476 */ 1477 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1478 if ((m->oflags & VPO_BUSY) == 0 && 1479 (m->aflags & PGA_WRITEABLE) == 0) 1480 return (FALSE); 1481 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1482} 1483 1484boolean_t 1485moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1486{ 1487 struct pvo_entry *pvo; 1488 boolean_t rv; 1489 1490 PMAP_LOCK(pmap); 1491 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1492 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1493 PMAP_UNLOCK(pmap); 1494 return (rv); 1495} 1496 1497void 1498moea64_clear_reference(mmu_t mmu, vm_page_t m) 1499{ 1500 1501 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1502 ("moea64_clear_reference: page %p is not managed", m)); 1503 moea64_clear_bit(mmu, m, LPTE_REF); 1504} 1505 1506void 1507moea64_clear_modify(mmu_t mmu, vm_page_t m) 1508{ 1509 1510 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1511 ("moea64_clear_modify: page %p is not managed", m)); 1512 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1513 KASSERT((m->oflags & VPO_BUSY) == 0, 1514 ("moea64_clear_modify: page %p is busy", m)); 1515 1516 /* 1517 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1518 * set. If the object containing the page is locked and the page is 1519 * not VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 1520 */ 1521 if ((m->aflags & PGA_WRITEABLE) == 0) 1522 return; 1523 moea64_clear_bit(mmu, m, LPTE_CHG); 1524} 1525 1526/* 1527 * Clear the write and modified bits in each of the given page's mappings. 1528 */ 1529void 1530moea64_remove_write(mmu_t mmu, vm_page_t m) 1531{ 1532 struct pvo_entry *pvo; 1533 uintptr_t pt; 1534 pmap_t pmap; 1535 uint64_t lo = 0; 1536 1537 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1538 ("moea64_remove_write: page %p is not managed", m)); 1539 1540 /* 1541 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 1542 * another thread while the object is locked. Thus, if PGA_WRITEABLE 1543 * is clear, no page table entries need updating. 1544 */ 1545 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1546 if ((m->oflags & VPO_BUSY) == 0 && 1547 (m->aflags & PGA_WRITEABLE) == 0) 1548 return; 1549 vm_page_lock_queues(); 1550 powerpc_sync(); 1551 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1552 pmap = pvo->pvo_pmap; 1553 PMAP_LOCK(pmap); 1554 LOCK_TABLE(); 1555 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1556 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1557 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1558 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1559 if (pt != -1) { 1560 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1561 lo |= pvo->pvo_pte.lpte.pte_lo; 1562 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1563 MOEA64_PTE_CHANGE(mmu, pt, 1564 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1565 if (pvo->pvo_pmap == kernel_pmap) 1566 isync(); 1567 } 1568 } 1569 UNLOCK_TABLE(); 1570 PMAP_UNLOCK(pmap); 1571 } 1572 if ((lo & LPTE_CHG) != 0) 1573 vm_page_dirty(m); 1574 vm_page_aflag_clear(m, PGA_WRITEABLE); 1575 vm_page_unlock_queues(); 1576} 1577 1578/* 1579 * moea64_ts_referenced: 1580 * 1581 * Return a count of reference bits for a page, clearing those bits. 1582 * It is not necessary for every reference bit to be cleared, but it 1583 * is necessary that 0 only be returned when there are truly no 1584 * reference bits set. 1585 * 1586 * XXX: The exact number of bits to check and clear is a matter that 1587 * should be tested and standardized at some point in the future for 1588 * optimal aging of shared pages. 1589 */ 1590boolean_t 1591moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1592{ 1593 1594 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1595 ("moea64_ts_referenced: page %p is not managed", m)); 1596 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1597} 1598 1599/* 1600 * Modify the WIMG settings of all mappings for a page. 1601 */ 1602void 1603moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1604{ 1605 struct pvo_entry *pvo; 1606 struct pvo_head *pvo_head; 1607 uintptr_t pt; 1608 pmap_t pmap; 1609 uint64_t lo; 1610 1611 if ((m->oflags & VPO_UNMANAGED) != 0) { 1612 m->md.mdpg_cache_attrs = ma; 1613 return; 1614 } 1615 1616 vm_page_lock_queues(); 1617 pvo_head = vm_page_to_pvoh(m); 1618 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1619 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1620 pmap = pvo->pvo_pmap; 1621 PMAP_LOCK(pmap); 1622 LOCK_TABLE(); 1623 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1624 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1625 pvo->pvo_pte.lpte.pte_lo |= lo; 1626 if (pt != -1) { 1627 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1628 pvo->pvo_vpn); 1629 if (pvo->pvo_pmap == kernel_pmap) 1630 isync(); 1631 } 1632 UNLOCK_TABLE(); 1633 PMAP_UNLOCK(pmap); 1634 } 1635 m->md.mdpg_cache_attrs = ma; 1636 vm_page_unlock_queues(); 1637} 1638 1639/* 1640 * Map a wired page into kernel virtual address space. 1641 */ 1642void 1643moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1644{ 1645 uint64_t pte_lo; 1646 int error; 1647 1648 pte_lo = moea64_calc_wimg(pa, ma); 1649 1650 PMAP_LOCK(kernel_pmap); 1651 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1652 &moea64_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1653 1654 if (error != 0 && error != ENOENT) 1655 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1656 pa, error); 1657 1658 /* 1659 * Flush the memory from the instruction cache. 1660 */ 1661 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) 1662 __syncicache((void *)va, PAGE_SIZE); 1663 PMAP_UNLOCK(kernel_pmap); 1664} 1665 1666void 1667moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1668{ 1669 1670 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1671} 1672 1673/* 1674 * Extract the physical page address associated with the given kernel virtual 1675 * address. 1676 */ 1677vm_offset_t 1678moea64_kextract(mmu_t mmu, vm_offset_t va) 1679{ 1680 struct pvo_entry *pvo; 1681 vm_paddr_t pa; 1682 1683 /* 1684 * Shortcut the direct-mapped case when applicable. We never put 1685 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1686 */ 1687 if (va < VM_MIN_KERNEL_ADDRESS) 1688 return (va); 1689 1690 PMAP_LOCK(kernel_pmap); 1691 pvo = moea64_pvo_find_va(kernel_pmap, va); 1692 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1693 va)); 1694 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1695 PMAP_UNLOCK(kernel_pmap); 1696 return (pa); 1697} 1698 1699/* 1700 * Remove a wired page from kernel virtual address space. 1701 */ 1702void 1703moea64_kremove(mmu_t mmu, vm_offset_t va) 1704{ 1705 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1706} 1707 1708/* 1709 * Map a range of physical addresses into kernel virtual address space. 1710 * 1711 * The value passed in *virt is a suggested virtual address for the mapping. 1712 * Architectures which can support a direct-mapped physical to virtual region 1713 * can return the appropriate address within that region, leaving '*virt' 1714 * unchanged. We cannot and therefore do not; *virt is updated with the 1715 * first usable address after the mapped region. 1716 */ 1717vm_offset_t 1718moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1719 vm_offset_t pa_end, int prot) 1720{ 1721 vm_offset_t sva, va; 1722 1723 sva = *virt; 1724 va = sva; 1725 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1726 moea64_kenter(mmu, va, pa_start); 1727 *virt = va; 1728 1729 return (sva); 1730} 1731 1732/* 1733 * Returns true if the pmap's pv is one of the first 1734 * 16 pvs linked to from this page. This count may 1735 * be changed upwards or downwards in the future; it 1736 * is only necessary that true be returned for a small 1737 * subset of pmaps for proper page aging. 1738 */ 1739boolean_t 1740moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1741{ 1742 int loops; 1743 struct pvo_entry *pvo; 1744 boolean_t rv; 1745 1746 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1747 ("moea64_page_exists_quick: page %p is not managed", m)); 1748 loops = 0; 1749 rv = FALSE; 1750 vm_page_lock_queues(); 1751 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1752 if (pvo->pvo_pmap == pmap) { 1753 rv = TRUE; 1754 break; 1755 } 1756 if (++loops >= 16) 1757 break; 1758 } 1759 vm_page_unlock_queues(); 1760 return (rv); 1761} 1762 1763/* 1764 * Return the number of managed mappings to the given physical page 1765 * that are wired. 1766 */ 1767int 1768moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1769{ 1770 struct pvo_entry *pvo; 1771 int count; 1772 1773 count = 0; 1774 if ((m->oflags & VPO_UNMANAGED) != 0) 1775 return (count); 1776 vm_page_lock_queues(); 1777 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1778 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1779 count++; 1780 vm_page_unlock_queues(); 1781 return (count); 1782} 1783 1784static uintptr_t moea64_vsidcontext; 1785 1786uintptr_t 1787moea64_get_unique_vsid(void) { 1788 u_int entropy; 1789 register_t hash; 1790 uint32_t mask; 1791 int i; 1792 1793 entropy = 0; 1794 __asm __volatile("mftb %0" : "=r"(entropy)); 1795 1796 mtx_lock(&moea64_slb_mutex); 1797 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1798 u_int n; 1799 1800 /* 1801 * Create a new value by mutiplying by a prime and adding in 1802 * entropy from the timebase register. This is to make the 1803 * VSID more random so that the PT hash function collides 1804 * less often. (Note that the prime casues gcc to do shifts 1805 * instead of a multiply.) 1806 */ 1807 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1808 hash = moea64_vsidcontext & (NVSIDS - 1); 1809 if (hash == 0) /* 0 is special, avoid it */ 1810 continue; 1811 n = hash >> 5; 1812 mask = 1 << (hash & (VSID_NBPW - 1)); 1813 hash = (moea64_vsidcontext & VSID_HASHMASK); 1814 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1815 /* anything free in this bucket? */ 1816 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1817 entropy = (moea64_vsidcontext >> 20); 1818 continue; 1819 } 1820 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1821 mask = 1 << i; 1822 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1823 hash |= i; 1824 } 1825 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1826 ("Allocating in-use VSID %#zx\n", hash)); 1827 moea64_vsid_bitmap[n] |= mask; 1828 mtx_unlock(&moea64_slb_mutex); 1829 return (hash); 1830 } 1831 1832 mtx_unlock(&moea64_slb_mutex); 1833 panic("%s: out of segments",__func__); 1834} 1835 1836#ifdef __powerpc64__ 1837void 1838moea64_pinit(mmu_t mmu, pmap_t pmap) 1839{ 1840 PMAP_LOCK_INIT(pmap); 1841 LIST_INIT(&pmap->pmap_pvo); 1842 1843 pmap->pm_slb_tree_root = slb_alloc_tree(); 1844 pmap->pm_slb = slb_alloc_user_cache(); 1845 pmap->pm_slb_len = 0; 1846} 1847#else 1848void 1849moea64_pinit(mmu_t mmu, pmap_t pmap) 1850{ 1851 int i; 1852 uint32_t hash; 1853 1854 PMAP_LOCK_INIT(pmap); 1855 LIST_INIT(&pmap->pmap_pvo); 1856 1857 if (pmap_bootstrapped) 1858 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1859 (vm_offset_t)pmap); 1860 else 1861 pmap->pmap_phys = pmap; 1862 1863 /* 1864 * Allocate some segment registers for this pmap. 1865 */ 1866 hash = moea64_get_unique_vsid(); 1867 1868 for (i = 0; i < 16; i++) 1869 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1870 1871 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1872} 1873#endif 1874 1875/* 1876 * Initialize the pmap associated with process 0. 1877 */ 1878void 1879moea64_pinit0(mmu_t mmu, pmap_t pm) 1880{ 1881 moea64_pinit(mmu, pm); 1882 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1883} 1884 1885/* 1886 * Set the physical protection on the specified range of this map as requested. 1887 */ 1888static void 1889moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1890{ 1891 uintptr_t pt; 1892 1893 /* 1894 * Grab the PTE pointer before we diddle with the cached PTE 1895 * copy. 1896 */ 1897 LOCK_TABLE(); 1898 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1899 1900 /* 1901 * Change the protection of the page. 1902 */ 1903 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1904 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1905 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1906 if ((prot & VM_PROT_EXECUTE) == 0) 1907 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1908 1909 /* 1910 * If the PVO is in the page table, update that pte as well. 1911 */ 1912 if (pt != -1) { 1913 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1914 pvo->pvo_vpn); 1915 if ((pvo->pvo_pte.lpte.pte_lo & 1916 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1917 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1918 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1919 PAGE_SIZE); 1920 } 1921 } 1922 1923 /* 1924 * Update vm about the REF/CHG bits if the page is managed. 1925 */ 1926 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 1927 struct vm_page *pg; 1928 1929 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1930 if (pg != NULL) { 1931 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 1932 vm_page_dirty(pg); 1933 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 1934 vm_page_aflag_set(pg, PGA_REFERENCED); 1935 } 1936 } 1937 UNLOCK_TABLE(); 1938} 1939 1940void 1941moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1942 vm_prot_t prot) 1943{ 1944 struct pvo_entry *pvo, *tpvo; 1945 1946 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 1947 sva, eva, prot); 1948 1949 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1950 ("moea64_protect: non current pmap")); 1951 1952 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1953 moea64_remove(mmu, pm, sva, eva); 1954 return; 1955 } 1956 1957 PMAP_LOCK(pm); 1958 if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) { 1959 for (; sva < eva; sva += PAGE_SIZE) { 1960 pvo = moea64_pvo_find_va(pm, sva); 1961 if (pvo != NULL) 1962 moea64_pvo_protect(mmu, pm, pvo, prot); 1963 } 1964 } else { 1965 LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { 1966 if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) 1967 continue; 1968 moea64_pvo_protect(mmu, pm, pvo, prot); 1969 } 1970 } 1971 PMAP_UNLOCK(pm); 1972} 1973 1974/* 1975 * Map a list of wired pages into kernel virtual address space. This is 1976 * intended for temporary mappings which do not need page modification or 1977 * references recorded. Existing mappings in the region are overwritten. 1978 */ 1979void 1980moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 1981{ 1982 while (count-- > 0) { 1983 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1984 va += PAGE_SIZE; 1985 m++; 1986 } 1987} 1988 1989/* 1990 * Remove page mappings from kernel virtual address space. Intended for 1991 * temporary mappings entered by moea64_qenter. 1992 */ 1993void 1994moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 1995{ 1996 while (count-- > 0) { 1997 moea64_kremove(mmu, va); 1998 va += PAGE_SIZE; 1999 } 2000} 2001 2002void 2003moea64_release_vsid(uint64_t vsid) 2004{ 2005 int idx, mask; 2006 2007 mtx_lock(&moea64_slb_mutex); 2008 idx = vsid & (NVSIDS-1); 2009 mask = 1 << (idx % VSID_NBPW); 2010 idx /= VSID_NBPW; 2011 KASSERT(moea64_vsid_bitmap[idx] & mask, 2012 ("Freeing unallocated VSID %#jx", vsid)); 2013 moea64_vsid_bitmap[idx] &= ~mask; 2014 mtx_unlock(&moea64_slb_mutex); 2015} 2016 2017 2018void 2019moea64_release(mmu_t mmu, pmap_t pmap) 2020{ 2021 2022 /* 2023 * Free segment registers' VSIDs 2024 */ 2025 #ifdef __powerpc64__ 2026 slb_free_tree(pmap); 2027 slb_free_user_cache(pmap->pm_slb); 2028 #else 2029 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2030 2031 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2032 #endif 2033 2034 PMAP_LOCK_DESTROY(pmap); 2035} 2036 2037/* 2038 * Remove all pages mapped by the specified pmap 2039 */ 2040void 2041moea64_remove_pages(mmu_t mmu, pmap_t pm) 2042{ 2043 struct pvo_entry *pvo, *tpvo; 2044 2045 vm_page_lock_queues(); 2046 PMAP_LOCK(pm); 2047 LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { 2048 if (!(pvo->pvo_vaddr & PVO_WIRED)) 2049 moea64_pvo_remove(mmu, pvo); 2050 } 2051 vm_page_unlock_queues(); 2052 PMAP_UNLOCK(pm); 2053} 2054 2055/* 2056 * Remove the given range of addresses from the specified map. 2057 */ 2058void 2059moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2060{ 2061 struct pvo_entry *pvo, *tpvo; 2062 2063 /* 2064 * Perform an unsynchronized read. This is, however, safe. 2065 */ 2066 if (pm->pm_stats.resident_count == 0) 2067 return; 2068 2069 vm_page_lock_queues(); 2070 PMAP_LOCK(pm); 2071 if ((eva - sva)/PAGE_SIZE < pm->pm_stats.resident_count) { 2072 for (; sva < eva; sva += PAGE_SIZE) { 2073 pvo = moea64_pvo_find_va(pm, sva); 2074 if (pvo != NULL) 2075 moea64_pvo_remove(mmu, pvo); 2076 } 2077 } else { 2078 LIST_FOREACH_SAFE(pvo, &pm->pmap_pvo, pvo_plink, tpvo) { 2079 if (PVO_VADDR(pvo) < sva || PVO_VADDR(pvo) >= eva) 2080 continue; 2081 moea64_pvo_remove(mmu, pvo); 2082 } 2083 } 2084 vm_page_unlock_queues(); 2085 PMAP_UNLOCK(pm); 2086} 2087 2088/* 2089 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2090 * will reflect changes in pte's back to the vm_page. 2091 */ 2092void 2093moea64_remove_all(mmu_t mmu, vm_page_t m) 2094{ 2095 struct pvo_head *pvo_head; 2096 struct pvo_entry *pvo, *next_pvo; 2097 pmap_t pmap; 2098 2099 vm_page_lock_queues(); 2100 pvo_head = vm_page_to_pvoh(m); 2101 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2102 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2103 2104 pmap = pvo->pvo_pmap; 2105 PMAP_LOCK(pmap); 2106 moea64_pvo_remove(mmu, pvo); 2107 PMAP_UNLOCK(pmap); 2108 } 2109 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2110 vm_page_dirty(m); 2111 vm_page_aflag_clear(m, PGA_WRITEABLE); 2112 vm_page_unlock_queues(); 2113} 2114 2115/* 2116 * Allocate a physical page of memory directly from the phys_avail map. 2117 * Can only be called from moea64_bootstrap before avail start and end are 2118 * calculated. 2119 */ 2120vm_offset_t 2121moea64_bootstrap_alloc(vm_size_t size, u_int align) 2122{ 2123 vm_offset_t s, e; 2124 int i, j; 2125 2126 size = round_page(size); 2127 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2128 if (align != 0) 2129 s = (phys_avail[i] + align - 1) & ~(align - 1); 2130 else 2131 s = phys_avail[i]; 2132 e = s + size; 2133 2134 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2135 continue; 2136 2137 if (s + size > platform_real_maxaddr()) 2138 continue; 2139 2140 if (s == phys_avail[i]) { 2141 phys_avail[i] += size; 2142 } else if (e == phys_avail[i + 1]) { 2143 phys_avail[i + 1] -= size; 2144 } else { 2145 for (j = phys_avail_count * 2; j > i; j -= 2) { 2146 phys_avail[j] = phys_avail[j - 2]; 2147 phys_avail[j + 1] = phys_avail[j - 1]; 2148 } 2149 2150 phys_avail[i + 3] = phys_avail[i + 1]; 2151 phys_avail[i + 1] = s; 2152 phys_avail[i + 2] = e; 2153 phys_avail_count++; 2154 } 2155 2156 return (s); 2157 } 2158 panic("moea64_bootstrap_alloc: could not allocate memory"); 2159} 2160 2161static int 2162moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2163 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2164 uint64_t pte_lo, int flags) 2165{ 2166 struct pvo_entry *pvo; 2167 uint64_t vsid; 2168 int first; 2169 u_int ptegidx; 2170 int i; 2171 int bootstrap; 2172 2173 /* 2174 * One nasty thing that can happen here is that the UMA calls to 2175 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2176 * which calls UMA... 2177 * 2178 * We break the loop by detecting recursion and allocating out of 2179 * the bootstrap pool. 2180 */ 2181 2182 first = 0; 2183 bootstrap = (flags & PVO_BOOTSTRAP); 2184 2185 if (!moea64_initialized) 2186 bootstrap = 1; 2187 2188 /* 2189 * Compute the PTE Group index. 2190 */ 2191 va &= ~ADDR_POFF; 2192 vsid = va_to_vsid(pm, va); 2193 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2194 2195 /* 2196 * Remove any existing mapping for this page. Reuse the pvo entry if 2197 * there is a mapping. 2198 */ 2199 LOCK_TABLE(); 2200 2201 moea64_pvo_enter_calls++; 2202 2203 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2204 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2205 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2206 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2207 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2208 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2209 /* Re-insert if spilled */ 2210 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2211 &pvo->pvo_pte.lpte); 2212 if (i >= 0) 2213 PVO_PTEGIDX_SET(pvo, i); 2214 moea64_pte_overflow--; 2215 } 2216 UNLOCK_TABLE(); 2217 return (0); 2218 } 2219 moea64_pvo_remove(mmu, pvo); 2220 break; 2221 } 2222 } 2223 2224 /* 2225 * If we aren't overwriting a mapping, try to allocate. 2226 */ 2227 if (bootstrap) { 2228 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2229 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2230 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2231 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2232 } 2233 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2234 moea64_bpvo_pool_index++; 2235 bootstrap = 1; 2236 } else { 2237 /* 2238 * Note: drop the table lock around the UMA allocation in 2239 * case the UMA allocator needs to manipulate the page 2240 * table. The mapping we are working with is already 2241 * protected by the PMAP lock. 2242 */ 2243 UNLOCK_TABLE(); 2244 pvo = uma_zalloc(zone, M_NOWAIT); 2245 LOCK_TABLE(); 2246 } 2247 2248 if (pvo == NULL) { 2249 UNLOCK_TABLE(); 2250 return (ENOMEM); 2251 } 2252 2253 moea64_pvo_entries++; 2254 pvo->pvo_vaddr = va; 2255 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2256 | (vsid << 16); 2257 pvo->pvo_pmap = pm; 2258 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2259 pvo->pvo_vaddr &= ~ADDR_POFF; 2260 2261 if (flags & PVO_WIRED) 2262 pvo->pvo_vaddr |= PVO_WIRED; 2263 if (pvo_head != &moea64_pvo_kunmanaged) 2264 pvo->pvo_vaddr |= PVO_MANAGED; 2265 if (bootstrap) 2266 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2267 if (flags & PVO_LARGE) 2268 pvo->pvo_vaddr |= PVO_LARGE; 2269 2270 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2271 (uint64_t)(pa) | pte_lo, flags); 2272 2273 /* 2274 * Add to pmap list 2275 */ 2276 LIST_INSERT_HEAD(&pm->pmap_pvo, pvo, pvo_plink); 2277 2278 /* 2279 * Remember if the list was empty and therefore will be the first 2280 * item. 2281 */ 2282 if (LIST_FIRST(pvo_head) == NULL) 2283 first = 1; 2284 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2285 2286 if (pvo->pvo_vaddr & PVO_WIRED) { 2287 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2288 pm->pm_stats.wired_count++; 2289 } 2290 pm->pm_stats.resident_count++; 2291 2292 /* 2293 * We hope this succeeds but it isn't required. 2294 */ 2295 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2296 if (i >= 0) { 2297 PVO_PTEGIDX_SET(pvo, i); 2298 } else { 2299 panic("moea64_pvo_enter: overflow"); 2300 moea64_pte_overflow++; 2301 } 2302 2303 if (pm == kernel_pmap) 2304 isync(); 2305 2306 UNLOCK_TABLE(); 2307 2308#ifdef __powerpc64__ 2309 /* 2310 * Make sure all our bootstrap mappings are in the SLB as soon 2311 * as virtual memory is switched on. 2312 */ 2313 if (!pmap_bootstrapped) 2314 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2315#endif 2316 2317 return (first ? ENOENT : 0); 2318} 2319 2320static void 2321moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2322{ 2323 uintptr_t pt; 2324 2325 /* 2326 * If there is an active pte entry, we need to deactivate it (and 2327 * save the ref & cfg bits). 2328 */ 2329 LOCK_TABLE(); 2330 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2331 if (pt != -1) { 2332 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2333 PVO_PTEGIDX_CLR(pvo); 2334 } else { 2335 moea64_pte_overflow--; 2336 } 2337 2338 /* 2339 * Update our statistics. 2340 */ 2341 pvo->pvo_pmap->pm_stats.resident_count--; 2342 if (pvo->pvo_vaddr & PVO_WIRED) 2343 pvo->pvo_pmap->pm_stats.wired_count--; 2344 2345 /* 2346 * Update vm about the REF/CHG bits if the page is managed. 2347 */ 2348 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED) { 2349 struct vm_page *pg; 2350 2351 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2352 if (pg != NULL) { 2353 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2354 vm_page_dirty(pg); 2355 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2356 vm_page_aflag_set(pg, PGA_REFERENCED); 2357 } 2358 } 2359 2360 /* 2361 * Remove this PVO from the PV and pmap lists. 2362 */ 2363 LIST_REMOVE(pvo, pvo_vlink); 2364 LIST_REMOVE(pvo, pvo_plink); 2365 2366 /* 2367 * Remove this from the overflow list and return it to the pool 2368 * if we aren't going to reuse it. 2369 */ 2370 LIST_REMOVE(pvo, pvo_olink); 2371 2372 moea64_pvo_entries--; 2373 moea64_pvo_remove_calls++; 2374 2375 UNLOCK_TABLE(); 2376 2377 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2378 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2379 moea64_upvo_zone, pvo); 2380} 2381 2382static struct pvo_entry * 2383moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2384{ 2385 struct pvo_entry *pvo; 2386 int ptegidx; 2387 uint64_t vsid; 2388 #ifdef __powerpc64__ 2389 uint64_t slbv; 2390 2391 if (pm == kernel_pmap) { 2392 slbv = kernel_va_to_slbv(va); 2393 } else { 2394 struct slb *slb; 2395 slb = user_va_to_slb_entry(pm, va); 2396 /* The page is not mapped if the segment isn't */ 2397 if (slb == NULL) 2398 return NULL; 2399 slbv = slb->slbv; 2400 } 2401 2402 vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2403 if (slbv & SLBV_L) 2404 va &= ~moea64_large_page_mask; 2405 else 2406 va &= ~ADDR_POFF; 2407 ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L); 2408 #else 2409 va &= ~ADDR_POFF; 2410 vsid = va_to_vsid(pm, va); 2411 ptegidx = va_to_pteg(vsid, va, 0); 2412 #endif 2413 2414 LOCK_TABLE(); 2415 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2416 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2417 break; 2418 } 2419 UNLOCK_TABLE(); 2420 2421 return (pvo); 2422} 2423 2424static boolean_t 2425moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2426{ 2427 struct pvo_entry *pvo; 2428 uintptr_t pt; 2429 2430 vm_page_lock_queues(); 2431 2432 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2433 /* 2434 * See if we saved the bit off. If so, return success. 2435 */ 2436 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2437 vm_page_unlock_queues(); 2438 return (TRUE); 2439 } 2440 } 2441 2442 /* 2443 * No luck, now go through the hard part of looking at the PTEs 2444 * themselves. Sync so that any pending REF/CHG bits are flushed to 2445 * the PTEs. 2446 */ 2447 powerpc_sync(); 2448 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2449 2450 /* 2451 * See if this pvo has a valid PTE. if so, fetch the 2452 * REF/CHG bits from the valid PTE. If the appropriate 2453 * ptebit is set, return success. 2454 */ 2455 LOCK_TABLE(); 2456 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2457 if (pt != -1) { 2458 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2459 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2460 UNLOCK_TABLE(); 2461 vm_page_unlock_queues(); 2462 return (TRUE); 2463 } 2464 } 2465 UNLOCK_TABLE(); 2466 } 2467 2468 vm_page_unlock_queues(); 2469 return (FALSE); 2470} 2471 2472static u_int 2473moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2474{ 2475 u_int count; 2476 struct pvo_entry *pvo; 2477 uintptr_t pt; 2478 2479 vm_page_lock_queues(); 2480 2481 /* 2482 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2483 * we can reset the right ones). note that since the pvo entries and 2484 * list heads are accessed via BAT0 and are never placed in the page 2485 * table, we don't have to worry about further accesses setting the 2486 * REF/CHG bits. 2487 */ 2488 powerpc_sync(); 2489 2490 /* 2491 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2492 * valid pte clear the ptebit from the valid pte. 2493 */ 2494 count = 0; 2495 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2496 2497 LOCK_TABLE(); 2498 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2499 if (pt != -1) { 2500 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2501 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2502 count++; 2503 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2504 pvo->pvo_vpn, ptebit); 2505 } 2506 } 2507 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2508 UNLOCK_TABLE(); 2509 } 2510 2511 vm_page_unlock_queues(); 2512 return (count); 2513} 2514 2515boolean_t 2516moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2517{ 2518 struct pvo_entry *pvo; 2519 vm_offset_t ppa; 2520 int error = 0; 2521 2522 PMAP_LOCK(kernel_pmap); 2523 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2524 pvo = moea64_pvo_find_va(kernel_pmap, ppa); 2525 if (pvo == NULL || 2526 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2527 error = EFAULT; 2528 break; 2529 } 2530 } 2531 PMAP_UNLOCK(kernel_pmap); 2532 2533 return (error); 2534} 2535 2536/* 2537 * Map a set of physical memory pages into the kernel virtual 2538 * address space. Return a pointer to where it is mapped. This 2539 * routine is intended to be used for mapping device memory, 2540 * NOT real memory. 2541 */ 2542void * 2543moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2544{ 2545 vm_offset_t va, tmpva, ppa, offset; 2546 2547 ppa = trunc_page(pa); 2548 offset = pa & PAGE_MASK; 2549 size = roundup(offset + size, PAGE_SIZE); 2550 2551 va = kmem_alloc_nofault(kernel_map, size); 2552 2553 if (!va) 2554 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2555 2556 for (tmpva = va; size > 0;) { 2557 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2558 size -= PAGE_SIZE; 2559 tmpva += PAGE_SIZE; 2560 ppa += PAGE_SIZE; 2561 } 2562 2563 return ((void *)(va + offset)); 2564} 2565 2566void * 2567moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2568{ 2569 2570 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2571} 2572 2573void 2574moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2575{ 2576 vm_offset_t base, offset; 2577 2578 base = trunc_page(va); 2579 offset = va & PAGE_MASK; 2580 size = roundup(offset + size, PAGE_SIZE); 2581 2582 kmem_free(kernel_map, base, size); 2583} 2584 2585void 2586moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2587{ 2588 struct pvo_entry *pvo; 2589 vm_offset_t lim; 2590 vm_paddr_t pa; 2591 vm_size_t len; 2592 2593 PMAP_LOCK(pm); 2594 while (sz > 0) { 2595 lim = round_page(va); 2596 len = MIN(lim - va, sz); 2597 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2598 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2599 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2600 (va & ADDR_POFF); 2601 moea64_syncicache(mmu, pm, va, pa, len); 2602 } 2603 va += len; 2604 sz -= len; 2605 } 2606 PMAP_UNLOCK(pm); 2607} 2608