mmu_oea64.c revision 269388
1/*- 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 * POSSIBILITY OF SUCH DAMAGE. 28 */ 29/*- 30 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 31 * Copyright (C) 1995, 1996 TooLs GmbH. 32 * All rights reserved. 33 * 34 * Redistribution and use in source and binary forms, with or without 35 * modification, are permitted provided that the following conditions 36 * are met: 37 * 1. Redistributions of source code must retain the above copyright 38 * notice, this list of conditions and the following disclaimer. 39 * 2. Redistributions in binary form must reproduce the above copyright 40 * notice, this list of conditions and the following disclaimer in the 41 * documentation and/or other materials provided with the distribution. 42 * 3. All advertising materials mentioning features or use of this software 43 * must display the following acknowledgement: 44 * This product includes software developed by TooLs GmbH. 45 * 4. The name of TooLs GmbH may not be used to endorse or promote products 46 * derived from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 49 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 50 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 51 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 53 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 54 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 55 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 56 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 57 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 58 * 59 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 60 */ 61/*- 62 * Copyright (C) 2001 Benno Rice. 63 * All rights reserved. 64 * 65 * Redistribution and use in source and binary forms, with or without 66 * modification, are permitted provided that the following conditions 67 * are met: 68 * 1. Redistributions of source code must retain the above copyright 69 * notice, this list of conditions and the following disclaimer. 70 * 2. Redistributions in binary form must reproduce the above copyright 71 * notice, this list of conditions and the following disclaimer in the 72 * documentation and/or other materials provided with the distribution. 73 * 74 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 75 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 76 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 77 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 78 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 79 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 80 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 81 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 82 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 83 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 84 */ 85 86#include <sys/cdefs.h> 87__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 269388 2014-08-01 17:09:50Z alc $"); 88 89/* 90 * Manages physical address maps. 91 * 92 * Since the information managed by this module is also stored by the 93 * logical address mapping module, this module may throw away valid virtual 94 * to physical mappings at almost any time. However, invalidations of 95 * mappings must be done as requested. 96 * 97 * In order to cope with hardware architectures which make virtual to 98 * physical map invalidates expensive, this module may delay invalidate 99 * reduced protection operations until such time as they are actually 100 * necessary. This module is given full information as to which processors 101 * are currently using which maps, and to when physical maps must be made 102 * correct. 103 */ 104 105#include "opt_compat.h" 106#include "opt_kstack_pages.h" 107 108#include <sys/param.h> 109#include <sys/kernel.h> 110#include <sys/queue.h> 111#include <sys/cpuset.h> 112#include <sys/ktr.h> 113#include <sys/lock.h> 114#include <sys/msgbuf.h> 115#include <sys/malloc.h> 116#include <sys/mutex.h> 117#include <sys/proc.h> 118#include <sys/rwlock.h> 119#include <sys/sched.h> 120#include <sys/sysctl.h> 121#include <sys/systm.h> 122#include <sys/vmmeter.h> 123 124#include <sys/kdb.h> 125 126#include <dev/ofw/openfirm.h> 127 128#include <vm/vm.h> 129#include <vm/vm_param.h> 130#include <vm/vm_kern.h> 131#include <vm/vm_page.h> 132#include <vm/vm_map.h> 133#include <vm/vm_object.h> 134#include <vm/vm_extern.h> 135#include <vm/vm_pageout.h> 136#include <vm/uma.h> 137 138#include <machine/_inttypes.h> 139#include <machine/cpu.h> 140#include <machine/platform.h> 141#include <machine/frame.h> 142#include <machine/md_var.h> 143#include <machine/psl.h> 144#include <machine/bat.h> 145#include <machine/hid.h> 146#include <machine/pte.h> 147#include <machine/sr.h> 148#include <machine/trap.h> 149#include <machine/mmuvar.h> 150 151#include "mmu_oea64.h" 152#include "mmu_if.h" 153#include "moea64_if.h" 154 155void moea64_release_vsid(uint64_t vsid); 156uintptr_t moea64_get_unique_vsid(void); 157 158#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR) 159#define ENABLE_TRANS(msr) mtmsr(msr) 160 161#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 162#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 163#define VSID_HASH_MASK 0x0000007fffffffffULL 164 165/* 166 * Locking semantics: 167 * -- Read lock: if no modifications are being made to either the PVO lists 168 * or page table or if any modifications being made result in internal 169 * changes (e.g. wiring, protection) such that the existence of the PVOs 170 * is unchanged and they remain associated with the same pmap (in which 171 * case the changes should be protected by the pmap lock) 172 * -- Write lock: required if PTEs/PVOs are being inserted or removed. 173 */ 174 175#define LOCK_TABLE_RD() rw_rlock(&moea64_table_lock) 176#define UNLOCK_TABLE_RD() rw_runlock(&moea64_table_lock) 177#define LOCK_TABLE_WR() rw_wlock(&moea64_table_lock) 178#define UNLOCK_TABLE_WR() rw_wunlock(&moea64_table_lock) 179 180struct ofw_map { 181 cell_t om_va; 182 cell_t om_len; 183 uint64_t om_pa; 184 cell_t om_mode; 185}; 186 187extern unsigned char _etext[]; 188extern unsigned char _end[]; 189 190extern int dumpsys_minidump; 191 192/* 193 * Map of physical memory regions. 194 */ 195static struct mem_region *regions; 196static struct mem_region *pregions; 197static u_int phys_avail_count; 198static int regions_sz, pregions_sz; 199 200extern void bs_remap_earlyboot(void); 201 202/* 203 * Lock for the pteg and pvo tables. 204 */ 205struct rwlock moea64_table_lock; 206struct mtx moea64_slb_mutex; 207 208/* 209 * PTEG data. 210 */ 211u_int moea64_pteg_count; 212u_int moea64_pteg_mask; 213 214/* 215 * PVO data. 216 */ 217struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 218 219uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 220uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 221 222#define BPVO_POOL_SIZE 327680 223static struct pvo_entry *moea64_bpvo_pool; 224static int moea64_bpvo_pool_index = 0; 225 226#define VSID_NBPW (sizeof(u_int32_t) * 8) 227#ifdef __powerpc64__ 228#define NVSIDS (NPMAPS * 16) 229#define VSID_HASHMASK 0xffffffffUL 230#else 231#define NVSIDS NPMAPS 232#define VSID_HASHMASK 0xfffffUL 233#endif 234static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 235 236static boolean_t moea64_initialized = FALSE; 237 238/* 239 * Statistics. 240 */ 241u_int moea64_pte_valid = 0; 242u_int moea64_pte_overflow = 0; 243u_int moea64_pvo_entries = 0; 244u_int moea64_pvo_enter_calls = 0; 245u_int moea64_pvo_remove_calls = 0; 246SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 247 &moea64_pte_valid, 0, ""); 248SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 249 &moea64_pte_overflow, 0, ""); 250SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 251 &moea64_pvo_entries, 0, ""); 252SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 253 &moea64_pvo_enter_calls, 0, ""); 254SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 255 &moea64_pvo_remove_calls, 0, ""); 256 257vm_offset_t moea64_scratchpage_va[2]; 258struct pvo_entry *moea64_scratchpage_pvo[2]; 259uintptr_t moea64_scratchpage_pte[2]; 260struct mtx moea64_scratchpage_mtx; 261 262uint64_t moea64_large_page_mask = 0; 263uint64_t moea64_large_page_size = 0; 264int moea64_large_page_shift = 0; 265 266/* 267 * PVO calls. 268 */ 269static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 270 vm_offset_t, vm_offset_t, uint64_t, int); 271static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 272static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 273 274/* 275 * Utility routines. 276 */ 277static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 278static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 279static void moea64_kremove(mmu_t, vm_offset_t); 280static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 281 vm_offset_t pa, vm_size_t sz); 282 283/* 284 * Kernel MMU interface 285 */ 286void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 287void moea64_clear_modify(mmu_t, vm_page_t); 288void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 289void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 290 vm_page_t *mb, vm_offset_t b_offset, int xfersize); 291void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 292void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 293 vm_prot_t); 294void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 295vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 296vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 297void moea64_init(mmu_t); 298boolean_t moea64_is_modified(mmu_t, vm_page_t); 299boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 300boolean_t moea64_is_referenced(mmu_t, vm_page_t); 301int moea64_ts_referenced(mmu_t, vm_page_t); 302vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, int); 303boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 304int moea64_page_wired_mappings(mmu_t, vm_page_t); 305void moea64_pinit(mmu_t, pmap_t); 306void moea64_pinit0(mmu_t, pmap_t); 307void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 308void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 309void moea64_qremove(mmu_t, vm_offset_t, int); 310void moea64_release(mmu_t, pmap_t); 311void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 312void moea64_remove_pages(mmu_t, pmap_t); 313void moea64_remove_all(mmu_t, vm_page_t); 314void moea64_remove_write(mmu_t, vm_page_t); 315void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 316void moea64_zero_page(mmu_t, vm_page_t); 317void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 318void moea64_zero_page_idle(mmu_t, vm_page_t); 319void moea64_activate(mmu_t, struct thread *); 320void moea64_deactivate(mmu_t, struct thread *); 321void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t); 322void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 323void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 324vm_paddr_t moea64_kextract(mmu_t, vm_offset_t); 325void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 326void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 327void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t); 328boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 329static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 330vm_offset_t moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 331 vm_size_t *sz); 332struct pmap_md * moea64_scan_md(mmu_t mmu, struct pmap_md *prev); 333 334static mmu_method_t moea64_methods[] = { 335 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 336 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 337 MMUMETHOD(mmu_copy_page, moea64_copy_page), 338 MMUMETHOD(mmu_copy_pages, moea64_copy_pages), 339 MMUMETHOD(mmu_enter, moea64_enter), 340 MMUMETHOD(mmu_enter_object, moea64_enter_object), 341 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 342 MMUMETHOD(mmu_extract, moea64_extract), 343 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 344 MMUMETHOD(mmu_init, moea64_init), 345 MMUMETHOD(mmu_is_modified, moea64_is_modified), 346 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 347 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 348 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 349 MMUMETHOD(mmu_map, moea64_map), 350 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 351 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 352 MMUMETHOD(mmu_pinit, moea64_pinit), 353 MMUMETHOD(mmu_pinit0, moea64_pinit0), 354 MMUMETHOD(mmu_protect, moea64_protect), 355 MMUMETHOD(mmu_qenter, moea64_qenter), 356 MMUMETHOD(mmu_qremove, moea64_qremove), 357 MMUMETHOD(mmu_release, moea64_release), 358 MMUMETHOD(mmu_remove, moea64_remove), 359 MMUMETHOD(mmu_remove_pages, moea64_remove_pages), 360 MMUMETHOD(mmu_remove_all, moea64_remove_all), 361 MMUMETHOD(mmu_remove_write, moea64_remove_write), 362 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 363 MMUMETHOD(mmu_unwire, moea64_unwire), 364 MMUMETHOD(mmu_zero_page, moea64_zero_page), 365 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 366 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 367 MMUMETHOD(mmu_activate, moea64_activate), 368 MMUMETHOD(mmu_deactivate, moea64_deactivate), 369 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 370 371 /* Internal interfaces */ 372 MMUMETHOD(mmu_mapdev, moea64_mapdev), 373 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 374 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 375 MMUMETHOD(mmu_kextract, moea64_kextract), 376 MMUMETHOD(mmu_kenter, moea64_kenter), 377 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 378 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 379 MMUMETHOD(mmu_scan_md, moea64_scan_md), 380 MMUMETHOD(mmu_dumpsys_map, moea64_dumpsys_map), 381 382 { 0, 0 } 383}; 384 385MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 386 387static __inline u_int 388va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 389{ 390 uint64_t hash; 391 int shift; 392 393 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 394 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 395 shift); 396 return (hash & moea64_pteg_mask); 397} 398 399static __inline struct pvo_head * 400vm_page_to_pvoh(vm_page_t m) 401{ 402 403 return (&m->md.mdpg_pvoh); 404} 405 406static __inline void 407moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 408 uint64_t pte_lo, int flags) 409{ 410 411 /* 412 * Construct a PTE. Default to IMB initially. Valid bit only gets 413 * set when the real pte is set in memory. 414 * 415 * Note: Don't set the valid bit for correct operation of tlb update. 416 */ 417 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 418 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 419 420 if (flags & PVO_LARGE) 421 pt->pte_hi |= LPTE_BIG; 422 423 pt->pte_lo = pte_lo; 424} 425 426static __inline uint64_t 427moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 428{ 429 uint64_t pte_lo; 430 int i; 431 432 if (ma != VM_MEMATTR_DEFAULT) { 433 switch (ma) { 434 case VM_MEMATTR_UNCACHEABLE: 435 return (LPTE_I | LPTE_G); 436 case VM_MEMATTR_WRITE_COMBINING: 437 case VM_MEMATTR_WRITE_BACK: 438 case VM_MEMATTR_PREFETCHABLE: 439 return (LPTE_I); 440 case VM_MEMATTR_WRITE_THROUGH: 441 return (LPTE_W | LPTE_M); 442 } 443 } 444 445 /* 446 * Assume the page is cache inhibited and access is guarded unless 447 * it's in our available memory array. 448 */ 449 pte_lo = LPTE_I | LPTE_G; 450 for (i = 0; i < pregions_sz; i++) { 451 if ((pa >= pregions[i].mr_start) && 452 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 453 pte_lo &= ~(LPTE_I | LPTE_G); 454 pte_lo |= LPTE_M; 455 break; 456 } 457 } 458 459 return pte_lo; 460} 461 462/* 463 * Quick sort callout for comparing memory regions. 464 */ 465static int om_cmp(const void *a, const void *b); 466 467static int 468om_cmp(const void *a, const void *b) 469{ 470 const struct ofw_map *mapa; 471 const struct ofw_map *mapb; 472 473 mapa = a; 474 mapb = b; 475 if (mapa->om_pa < mapb->om_pa) 476 return (-1); 477 else if (mapa->om_pa > mapb->om_pa) 478 return (1); 479 else 480 return (0); 481} 482 483static void 484moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 485{ 486 struct ofw_map translations[sz/(4*sizeof(cell_t))]; /*>= 4 cells per */ 487 pcell_t acells, trans_cells[sz/sizeof(cell_t)]; 488 register_t msr; 489 vm_offset_t off; 490 vm_paddr_t pa_base; 491 int i, j; 492 493 bzero(translations, sz); 494 OF_getprop(OF_finddevice("/"), "#address-cells", &acells, 495 sizeof(acells)); 496 if (OF_getprop(mmu, "translations", trans_cells, sz) == -1) 497 panic("moea64_bootstrap: can't get ofw translations"); 498 499 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 500 sz /= sizeof(cell_t); 501 for (i = 0, j = 0; i < sz; j++) { 502 translations[j].om_va = trans_cells[i++]; 503 translations[j].om_len = trans_cells[i++]; 504 translations[j].om_pa = trans_cells[i++]; 505 if (acells == 2) { 506 translations[j].om_pa <<= 32; 507 translations[j].om_pa |= trans_cells[i++]; 508 } 509 translations[j].om_mode = trans_cells[i++]; 510 } 511 KASSERT(i == sz, ("Translations map has incorrect cell count (%d/%zd)", 512 i, sz)); 513 514 sz = j; 515 qsort(translations, sz, sizeof (*translations), om_cmp); 516 517 for (i = 0; i < sz; i++) { 518 pa_base = translations[i].om_pa; 519 #ifndef __powerpc64__ 520 if ((translations[i].om_pa >> 32) != 0) 521 panic("OFW translations above 32-bit boundary!"); 522 #endif 523 524 if (pa_base % PAGE_SIZE) 525 panic("OFW translation not page-aligned (phys)!"); 526 if (translations[i].om_va % PAGE_SIZE) 527 panic("OFW translation not page-aligned (virt)!"); 528 529 CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x", 530 pa_base, translations[i].om_va, translations[i].om_len); 531 532 /* Now enter the pages for this mapping */ 533 534 DISABLE_TRANS(msr); 535 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 536 if (moea64_pvo_find_va(kernel_pmap, 537 translations[i].om_va + off) != NULL) 538 continue; 539 540 moea64_kenter(mmup, translations[i].om_va + off, 541 pa_base + off); 542 } 543 ENABLE_TRANS(msr); 544 } 545} 546 547#ifdef __powerpc64__ 548static void 549moea64_probe_large_page(void) 550{ 551 uint16_t pvr = mfpvr() >> 16; 552 553 switch (pvr) { 554 case IBM970: 555 case IBM970FX: 556 case IBM970MP: 557 powerpc_sync(); isync(); 558 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 559 powerpc_sync(); isync(); 560 561 /* FALLTHROUGH */ 562 default: 563 moea64_large_page_size = 0x1000000; /* 16 MB */ 564 moea64_large_page_shift = 24; 565 } 566 567 moea64_large_page_mask = moea64_large_page_size - 1; 568} 569 570static void 571moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 572{ 573 struct slb *cache; 574 struct slb entry; 575 uint64_t esid, slbe; 576 uint64_t i; 577 578 cache = PCPU_GET(slb); 579 esid = va >> ADDR_SR_SHFT; 580 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 581 582 for (i = 0; i < 64; i++) { 583 if (cache[i].slbe == (slbe | i)) 584 return; 585 } 586 587 entry.slbe = slbe; 588 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 589 if (large) 590 entry.slbv |= SLBV_L; 591 592 slb_insert_kernel(entry.slbe, entry.slbv); 593} 594#endif 595 596static void 597moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 598 vm_offset_t kernelend) 599{ 600 register_t msr; 601 vm_paddr_t pa; 602 vm_offset_t size, off; 603 uint64_t pte_lo; 604 int i; 605 606 if (moea64_large_page_size == 0) 607 hw_direct_map = 0; 608 609 DISABLE_TRANS(msr); 610 if (hw_direct_map) { 611 LOCK_TABLE_WR(); 612 PMAP_LOCK(kernel_pmap); 613 for (i = 0; i < pregions_sz; i++) { 614 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 615 pregions[i].mr_size; pa += moea64_large_page_size) { 616 pte_lo = LPTE_M; 617 618 /* 619 * Set memory access as guarded if prefetch within 620 * the page could exit the available physmem area. 621 */ 622 if (pa & moea64_large_page_mask) { 623 pa &= moea64_large_page_mask; 624 pte_lo |= LPTE_G; 625 } 626 if (pa + moea64_large_page_size > 627 pregions[i].mr_start + pregions[i].mr_size) 628 pte_lo |= LPTE_G; 629 630 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 631 NULL, pa, pa, pte_lo, 632 PVO_WIRED | PVO_LARGE); 633 } 634 } 635 PMAP_UNLOCK(kernel_pmap); 636 UNLOCK_TABLE_WR(); 637 } else { 638 size = sizeof(struct pvo_head) * moea64_pteg_count; 639 off = (vm_offset_t)(moea64_pvo_table); 640 for (pa = off; pa < off + size; pa += PAGE_SIZE) 641 moea64_kenter(mmup, pa, pa); 642 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 643 off = (vm_offset_t)(moea64_bpvo_pool); 644 for (pa = off; pa < off + size; pa += PAGE_SIZE) 645 moea64_kenter(mmup, pa, pa); 646 647 /* 648 * Map certain important things, like ourselves. 649 * 650 * NOTE: We do not map the exception vector space. That code is 651 * used only in real mode, and leaving it unmapped allows us to 652 * catch NULL pointer deferences, instead of making NULL a valid 653 * address. 654 */ 655 656 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 657 pa += PAGE_SIZE) 658 moea64_kenter(mmup, pa, pa); 659 } 660 ENABLE_TRANS(msr); 661 662 /* 663 * Allow user to override unmapped_buf_allowed for testing. 664 * XXXKIB Only direct map implementation was tested. 665 */ 666 if (!TUNABLE_INT_FETCH("vfs.unmapped_buf_allowed", 667 &unmapped_buf_allowed)) 668 unmapped_buf_allowed = hw_direct_map; 669} 670 671void 672moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 673{ 674 int i, j; 675 vm_size_t physsz, hwphyssz; 676 677#ifndef __powerpc64__ 678 /* We don't have a direct map since there is no BAT */ 679 hw_direct_map = 0; 680 681 /* Make sure battable is zero, since we have no BAT */ 682 for (i = 0; i < 16; i++) { 683 battable[i].batu = 0; 684 battable[i].batl = 0; 685 } 686#else 687 moea64_probe_large_page(); 688 689 /* Use a direct map if we have large page support */ 690 if (moea64_large_page_size > 0) 691 hw_direct_map = 1; 692 else 693 hw_direct_map = 0; 694#endif 695 696 /* Get physical memory regions from firmware */ 697 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 698 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 699 700 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 701 panic("moea64_bootstrap: phys_avail too small"); 702 703 phys_avail_count = 0; 704 physsz = 0; 705 hwphyssz = 0; 706 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 707 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 708 CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)", 709 regions[i].mr_start, regions[i].mr_start + 710 regions[i].mr_size, regions[i].mr_size); 711 if (hwphyssz != 0 && 712 (physsz + regions[i].mr_size) >= hwphyssz) { 713 if (physsz < hwphyssz) { 714 phys_avail[j] = regions[i].mr_start; 715 phys_avail[j + 1] = regions[i].mr_start + 716 hwphyssz - physsz; 717 physsz = hwphyssz; 718 phys_avail_count++; 719 } 720 break; 721 } 722 phys_avail[j] = regions[i].mr_start; 723 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 724 phys_avail_count++; 725 physsz += regions[i].mr_size; 726 } 727 728 /* Check for overlap with the kernel and exception vectors */ 729 for (j = 0; j < 2*phys_avail_count; j+=2) { 730 if (phys_avail[j] < EXC_LAST) 731 phys_avail[j] += EXC_LAST; 732 733 if (kernelstart >= phys_avail[j] && 734 kernelstart < phys_avail[j+1]) { 735 if (kernelend < phys_avail[j+1]) { 736 phys_avail[2*phys_avail_count] = 737 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 738 phys_avail[2*phys_avail_count + 1] = 739 phys_avail[j+1]; 740 phys_avail_count++; 741 } 742 743 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 744 } 745 746 if (kernelend >= phys_avail[j] && 747 kernelend < phys_avail[j+1]) { 748 if (kernelstart > phys_avail[j]) { 749 phys_avail[2*phys_avail_count] = phys_avail[j]; 750 phys_avail[2*phys_avail_count + 1] = 751 kernelstart & ~PAGE_MASK; 752 phys_avail_count++; 753 } 754 755 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 756 } 757 } 758 759 physmem = btoc(physsz); 760 761#ifdef PTEGCOUNT 762 moea64_pteg_count = PTEGCOUNT; 763#else 764 moea64_pteg_count = 0x1000; 765 766 while (moea64_pteg_count < physmem) 767 moea64_pteg_count <<= 1; 768 769 moea64_pteg_count >>= 1; 770#endif /* PTEGCOUNT */ 771} 772 773void 774moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 775{ 776 vm_size_t size; 777 register_t msr; 778 int i; 779 780 /* 781 * Set PTEG mask 782 */ 783 moea64_pteg_mask = moea64_pteg_count - 1; 784 785 /* 786 * Allocate pv/overflow lists. 787 */ 788 size = sizeof(struct pvo_head) * moea64_pteg_count; 789 790 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 791 PAGE_SIZE); 792 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 793 794 DISABLE_TRANS(msr); 795 for (i = 0; i < moea64_pteg_count; i++) 796 LIST_INIT(&moea64_pvo_table[i]); 797 ENABLE_TRANS(msr); 798 799 /* 800 * Initialize the lock that synchronizes access to the pteg and pvo 801 * tables. 802 */ 803 rw_init_flags(&moea64_table_lock, "pmap tables", RW_RECURSE); 804 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 805 806 /* 807 * Initialise the unmanaged pvo pool. 808 */ 809 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 810 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 811 moea64_bpvo_pool_index = 0; 812 813 /* 814 * Make sure kernel vsid is allocated as well as VSID 0. 815 */ 816 #ifndef __powerpc64__ 817 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 818 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 819 moea64_vsid_bitmap[0] |= 1; 820 #endif 821 822 /* 823 * Initialize the kernel pmap (which is statically allocated). 824 */ 825 #ifdef __powerpc64__ 826 for (i = 0; i < 64; i++) { 827 pcpup->pc_slb[i].slbv = 0; 828 pcpup->pc_slb[i].slbe = 0; 829 } 830 #else 831 for (i = 0; i < 16; i++) 832 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 833 #endif 834 835 kernel_pmap->pmap_phys = kernel_pmap; 836 CPU_FILL(&kernel_pmap->pm_active); 837 RB_INIT(&kernel_pmap->pmap_pvo); 838 839 PMAP_LOCK_INIT(kernel_pmap); 840 841 /* 842 * Now map in all the other buffers we allocated earlier 843 */ 844 845 moea64_setup_direct_map(mmup, kernelstart, kernelend); 846} 847 848void 849moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 850{ 851 ihandle_t mmui; 852 phandle_t chosen; 853 phandle_t mmu; 854 size_t sz; 855 int i; 856 vm_offset_t pa, va; 857 void *dpcpu; 858 859 /* 860 * Set up the Open Firmware pmap and add its mappings if not in real 861 * mode. 862 */ 863 864 chosen = OF_finddevice("/chosen"); 865 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 866 mmu = OF_instance_to_package(mmui); 867 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 868 sz = 0; 869 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 870 panic("moea64_bootstrap: too many ofw translations"); 871 872 if (sz > 0) 873 moea64_add_ofw_mappings(mmup, mmu, sz); 874 } 875 876 /* 877 * Calculate the last available physical address. 878 */ 879 for (i = 0; phys_avail[i + 2] != 0; i += 2) 880 ; 881 Maxmem = powerpc_btop(phys_avail[i + 1]); 882 883 /* 884 * Initialize MMU and remap early physical mappings 885 */ 886 MMU_CPU_BOOTSTRAP(mmup,0); 887 mtmsr(mfmsr() | PSL_DR | PSL_IR); 888 pmap_bootstrapped++; 889 bs_remap_earlyboot(); 890 891 /* 892 * Set the start and end of kva. 893 */ 894 virtual_avail = VM_MIN_KERNEL_ADDRESS; 895 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 896 897 /* 898 * Map the entire KVA range into the SLB. We must not fault there. 899 */ 900 #ifdef __powerpc64__ 901 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 902 moea64_bootstrap_slb_prefault(va, 0); 903 #endif 904 905 /* 906 * Figure out how far we can extend virtual_end into segment 16 907 * without running into existing mappings. Segment 16 is guaranteed 908 * to contain neither RAM nor devices (at least on Apple hardware), 909 * but will generally contain some OFW mappings we should not 910 * step on. 911 */ 912 913 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 914 PMAP_LOCK(kernel_pmap); 915 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 916 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 917 virtual_end += PAGE_SIZE; 918 PMAP_UNLOCK(kernel_pmap); 919 #endif 920 921 /* 922 * Allocate a kernel stack with a guard page for thread0 and map it 923 * into the kernel page map. 924 */ 925 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 926 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 927 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 928 CTR2(KTR_PMAP, "moea64_bootstrap: kstack0 at %#x (%#x)", pa, va); 929 thread0.td_kstack = va; 930 thread0.td_kstack_pages = KSTACK_PAGES; 931 for (i = 0; i < KSTACK_PAGES; i++) { 932 moea64_kenter(mmup, va, pa); 933 pa += PAGE_SIZE; 934 va += PAGE_SIZE; 935 } 936 937 /* 938 * Allocate virtual address space for the message buffer. 939 */ 940 pa = msgbuf_phys = moea64_bootstrap_alloc(msgbufsize, PAGE_SIZE); 941 msgbufp = (struct msgbuf *)virtual_avail; 942 va = virtual_avail; 943 virtual_avail += round_page(msgbufsize); 944 while (va < virtual_avail) { 945 moea64_kenter(mmup, va, pa); 946 pa += PAGE_SIZE; 947 va += PAGE_SIZE; 948 } 949 950 /* 951 * Allocate virtual address space for the dynamic percpu area. 952 */ 953 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 954 dpcpu = (void *)virtual_avail; 955 va = virtual_avail; 956 virtual_avail += DPCPU_SIZE; 957 while (va < virtual_avail) { 958 moea64_kenter(mmup, va, pa); 959 pa += PAGE_SIZE; 960 va += PAGE_SIZE; 961 } 962 dpcpu_init(dpcpu, 0); 963 964 /* 965 * Allocate some things for page zeroing. We put this directly 966 * in the page table, marked with LPTE_LOCKED, to avoid any 967 * of the PVO book-keeping or other parts of the VM system 968 * from even knowing that this hack exists. 969 */ 970 971 if (!hw_direct_map) { 972 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 973 MTX_DEF); 974 for (i = 0; i < 2; i++) { 975 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 976 virtual_end -= PAGE_SIZE; 977 978 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 979 980 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 981 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 982 LOCK_TABLE_RD(); 983 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 984 mmup, moea64_scratchpage_pvo[i]); 985 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 986 |= LPTE_LOCKED; 987 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 988 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 989 moea64_scratchpage_pvo[i]->pvo_vpn); 990 UNLOCK_TABLE_RD(); 991 } 992 } 993} 994 995/* 996 * Activate a user pmap. The pmap must be activated before its address 997 * space can be accessed in any way. 998 */ 999void 1000moea64_activate(mmu_t mmu, struct thread *td) 1001{ 1002 pmap_t pm; 1003 1004 pm = &td->td_proc->p_vmspace->vm_pmap; 1005 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 1006 1007 #ifdef __powerpc64__ 1008 PCPU_SET(userslb, pm->pm_slb); 1009 #else 1010 PCPU_SET(curpmap, pm->pmap_phys); 1011 #endif 1012} 1013 1014void 1015moea64_deactivate(mmu_t mmu, struct thread *td) 1016{ 1017 pmap_t pm; 1018 1019 pm = &td->td_proc->p_vmspace->vm_pmap; 1020 CPU_CLR(PCPU_GET(cpuid), &pm->pm_active); 1021 #ifdef __powerpc64__ 1022 PCPU_SET(userslb, NULL); 1023 #else 1024 PCPU_SET(curpmap, NULL); 1025 #endif 1026} 1027 1028void 1029moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1030{ 1031 struct pvo_entry *pvo; 1032 uintptr_t pt; 1033 uint64_t vsid; 1034 int i, ptegidx; 1035 1036 LOCK_TABLE_WR(); 1037 PMAP_LOCK(pm); 1038 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1039 1040 if (pvo != NULL) { 1041 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1042 1043 if (wired) { 1044 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1045 pm->pm_stats.wired_count++; 1046 pvo->pvo_vaddr |= PVO_WIRED; 1047 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1048 } else { 1049 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1050 pm->pm_stats.wired_count--; 1051 pvo->pvo_vaddr &= ~PVO_WIRED; 1052 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1053 } 1054 1055 if (pt != -1) { 1056 /* Update wiring flag in page table. */ 1057 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1058 pvo->pvo_vpn); 1059 } else if (wired) { 1060 /* 1061 * If we are wiring the page, and it wasn't in the 1062 * page table before, add it. 1063 */ 1064 vsid = PVO_VSID(pvo); 1065 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1066 pvo->pvo_vaddr & PVO_LARGE); 1067 1068 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1069 1070 if (i >= 0) { 1071 PVO_PTEGIDX_CLR(pvo); 1072 PVO_PTEGIDX_SET(pvo, i); 1073 } 1074 } 1075 1076 } 1077 UNLOCK_TABLE_WR(); 1078 PMAP_UNLOCK(pm); 1079} 1080 1081void 1082moea64_unwire(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1083{ 1084 struct pvo_entry key, *pvo; 1085 uintptr_t pt; 1086 1087 LOCK_TABLE_RD(); 1088 PMAP_LOCK(pm); 1089 key.pvo_vaddr = sva; 1090 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 1091 pvo != NULL && PVO_VADDR(pvo) < eva; 1092 pvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo)) { 1093 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1094 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1095 panic("moea64_unwire: pvo %p is missing PVO_WIRED", 1096 pvo); 1097 pvo->pvo_vaddr &= ~PVO_WIRED; 1098 if ((pvo->pvo_pte.lpte.pte_hi & LPTE_WIRED) == 0) 1099 panic("moea64_unwire: pte %p is missing LPTE_WIRED", 1100 &pvo->pvo_pte.lpte); 1101 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1102 if (pt != -1) { 1103 /* 1104 * The PTE's wired attribute is not a hardware 1105 * feature, so there is no need to invalidate any TLB 1106 * entries. 1107 */ 1108 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1109 pvo->pvo_vpn); 1110 } 1111 pm->pm_stats.wired_count--; 1112 } 1113 UNLOCK_TABLE_RD(); 1114 PMAP_UNLOCK(pm); 1115} 1116 1117/* 1118 * This goes through and sets the physical address of our 1119 * special scratch PTE to the PA we want to zero or copy. Because 1120 * of locking issues (this can get called in pvo_enter() by 1121 * the UMA allocator), we can't use most other utility functions here 1122 */ 1123 1124static __inline 1125void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1126 1127 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1128 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1129 1130 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1131 ~(LPTE_WIMG | LPTE_RPGN); 1132 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1133 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1134 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1135 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1136 moea64_scratchpage_pvo[which]->pvo_vpn); 1137 isync(); 1138} 1139 1140void 1141moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1142{ 1143 vm_offset_t dst; 1144 vm_offset_t src; 1145 1146 dst = VM_PAGE_TO_PHYS(mdst); 1147 src = VM_PAGE_TO_PHYS(msrc); 1148 1149 if (hw_direct_map) { 1150 bcopy((void *)src, (void *)dst, PAGE_SIZE); 1151 } else { 1152 mtx_lock(&moea64_scratchpage_mtx); 1153 1154 moea64_set_scratchpage_pa(mmu, 0, src); 1155 moea64_set_scratchpage_pa(mmu, 1, dst); 1156 1157 bcopy((void *)moea64_scratchpage_va[0], 1158 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1159 1160 mtx_unlock(&moea64_scratchpage_mtx); 1161 } 1162} 1163 1164static inline void 1165moea64_copy_pages_dmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1166 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1167{ 1168 void *a_cp, *b_cp; 1169 vm_offset_t a_pg_offset, b_pg_offset; 1170 int cnt; 1171 1172 while (xfersize > 0) { 1173 a_pg_offset = a_offset & PAGE_MASK; 1174 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1175 a_cp = (char *)VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]) + 1176 a_pg_offset; 1177 b_pg_offset = b_offset & PAGE_MASK; 1178 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1179 b_cp = (char *)VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]) + 1180 b_pg_offset; 1181 bcopy(a_cp, b_cp, cnt); 1182 a_offset += cnt; 1183 b_offset += cnt; 1184 xfersize -= cnt; 1185 } 1186} 1187 1188static inline void 1189moea64_copy_pages_nodmap(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1190 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1191{ 1192 void *a_cp, *b_cp; 1193 vm_offset_t a_pg_offset, b_pg_offset; 1194 int cnt; 1195 1196 mtx_lock(&moea64_scratchpage_mtx); 1197 while (xfersize > 0) { 1198 a_pg_offset = a_offset & PAGE_MASK; 1199 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 1200 moea64_set_scratchpage_pa(mmu, 0, 1201 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 1202 a_cp = (char *)moea64_scratchpage_va[0] + a_pg_offset; 1203 b_pg_offset = b_offset & PAGE_MASK; 1204 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 1205 moea64_set_scratchpage_pa(mmu, 1, 1206 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 1207 b_cp = (char *)moea64_scratchpage_va[1] + b_pg_offset; 1208 bcopy(a_cp, b_cp, cnt); 1209 a_offset += cnt; 1210 b_offset += cnt; 1211 xfersize -= cnt; 1212 } 1213 mtx_unlock(&moea64_scratchpage_mtx); 1214} 1215 1216void 1217moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 1218 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 1219{ 1220 1221 if (hw_direct_map) { 1222 moea64_copy_pages_dmap(mmu, ma, a_offset, mb, b_offset, 1223 xfersize); 1224 } else { 1225 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset, 1226 xfersize); 1227 } 1228} 1229 1230void 1231moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1232{ 1233 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1234 1235 if (size + off > PAGE_SIZE) 1236 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1237 1238 if (hw_direct_map) { 1239 bzero((caddr_t)pa + off, size); 1240 } else { 1241 mtx_lock(&moea64_scratchpage_mtx); 1242 moea64_set_scratchpage_pa(mmu, 0, pa); 1243 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1244 mtx_unlock(&moea64_scratchpage_mtx); 1245 } 1246} 1247 1248/* 1249 * Zero a page of physical memory by temporarily mapping it 1250 */ 1251void 1252moea64_zero_page(mmu_t mmu, vm_page_t m) 1253{ 1254 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1255 vm_offset_t va, off; 1256 1257 if (!hw_direct_map) { 1258 mtx_lock(&moea64_scratchpage_mtx); 1259 1260 moea64_set_scratchpage_pa(mmu, 0, pa); 1261 va = moea64_scratchpage_va[0]; 1262 } else { 1263 va = pa; 1264 } 1265 1266 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1267 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1268 1269 if (!hw_direct_map) 1270 mtx_unlock(&moea64_scratchpage_mtx); 1271} 1272 1273void 1274moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1275{ 1276 1277 moea64_zero_page(mmu, m); 1278} 1279 1280/* 1281 * Map the given physical page at the specified virtual address in the 1282 * target pmap with the protection requested. If specified the page 1283 * will be wired down. 1284 */ 1285 1286void 1287moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1288 vm_prot_t prot, boolean_t wired) 1289{ 1290 struct pvo_head *pvo_head; 1291 uma_zone_t zone; 1292 uint64_t pte_lo; 1293 u_int pvo_flags; 1294 int error; 1295 1296 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1297 VM_OBJECT_ASSERT_LOCKED(m->object); 1298 1299 if ((m->oflags & VPO_UNMANAGED) != 0 || !moea64_initialized) { 1300 pvo_head = NULL; 1301 zone = moea64_upvo_zone; 1302 pvo_flags = 0; 1303 } else { 1304 pvo_head = vm_page_to_pvoh(m); 1305 zone = moea64_mpvo_zone; 1306 pvo_flags = PVO_MANAGED; 1307 } 1308 1309 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1310 1311 if (prot & VM_PROT_WRITE) { 1312 pte_lo |= LPTE_BW; 1313 if (pmap_bootstrapped && 1314 (m->oflags & VPO_UNMANAGED) == 0) 1315 vm_page_aflag_set(m, PGA_WRITEABLE); 1316 } else 1317 pte_lo |= LPTE_BR; 1318 1319 if ((prot & VM_PROT_EXECUTE) == 0) 1320 pte_lo |= LPTE_NOEXEC; 1321 1322 if (wired) 1323 pvo_flags |= PVO_WIRED; 1324 1325 LOCK_TABLE_WR(); 1326 PMAP_LOCK(pmap); 1327 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1328 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1329 PMAP_UNLOCK(pmap); 1330 UNLOCK_TABLE_WR(); 1331 1332 /* 1333 * Flush the page from the instruction cache if this page is 1334 * mapped executable and cacheable. 1335 */ 1336 if (pmap != kernel_pmap && !(m->aflags & PGA_EXECUTABLE) && 1337 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1338 vm_page_aflag_set(m, PGA_EXECUTABLE); 1339 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1340 } 1341} 1342 1343static void 1344moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1345 vm_size_t sz) 1346{ 1347 1348 /* 1349 * This is much trickier than on older systems because 1350 * we can't sync the icache on physical addresses directly 1351 * without a direct map. Instead we check a couple of cases 1352 * where the memory is already mapped in and, failing that, 1353 * use the same trick we use for page zeroing to create 1354 * a temporary mapping for this physical address. 1355 */ 1356 1357 if (!pmap_bootstrapped) { 1358 /* 1359 * If PMAP is not bootstrapped, we are likely to be 1360 * in real mode. 1361 */ 1362 __syncicache((void *)pa, sz); 1363 } else if (pmap == kernel_pmap) { 1364 __syncicache((void *)va, sz); 1365 } else if (hw_direct_map) { 1366 __syncicache((void *)pa, sz); 1367 } else { 1368 /* Use the scratch page to set up a temp mapping */ 1369 1370 mtx_lock(&moea64_scratchpage_mtx); 1371 1372 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1373 __syncicache((void *)(moea64_scratchpage_va[1] + 1374 (va & ADDR_POFF)), sz); 1375 1376 mtx_unlock(&moea64_scratchpage_mtx); 1377 } 1378} 1379 1380/* 1381 * Maps a sequence of resident pages belonging to the same object. 1382 * The sequence begins with the given page m_start. This page is 1383 * mapped at the given virtual address start. Each subsequent page is 1384 * mapped at a virtual address that is offset from start by the same 1385 * amount as the page is offset from m_start within the object. The 1386 * last page in the sequence is the page with the largest offset from 1387 * m_start that can be mapped at a virtual address less than the given 1388 * virtual address end. Not every virtual page between start and end 1389 * is mapped; only those for which a resident page exists with the 1390 * corresponding offset from m_start are mapped. 1391 */ 1392void 1393moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1394 vm_page_t m_start, vm_prot_t prot) 1395{ 1396 vm_page_t m; 1397 vm_pindex_t diff, psize; 1398 1399 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1400 1401 psize = atop(end - start); 1402 m = m_start; 1403 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1404 moea64_enter(mmu, pm, start + ptoa(diff), m, prot & 1405 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1406 m = TAILQ_NEXT(m, listq); 1407 } 1408} 1409 1410void 1411moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1412 vm_prot_t prot) 1413{ 1414 1415 moea64_enter(mmu, pm, va, m, 1416 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1417} 1418 1419vm_paddr_t 1420moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1421{ 1422 struct pvo_entry *pvo; 1423 vm_paddr_t pa; 1424 1425 PMAP_LOCK(pm); 1426 pvo = moea64_pvo_find_va(pm, va); 1427 if (pvo == NULL) 1428 pa = 0; 1429 else 1430 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1431 (va - PVO_VADDR(pvo)); 1432 PMAP_UNLOCK(pm); 1433 return (pa); 1434} 1435 1436/* 1437 * Atomically extract and hold the physical page with the given 1438 * pmap and virtual address pair if that mapping permits the given 1439 * protection. 1440 */ 1441vm_page_t 1442moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1443{ 1444 struct pvo_entry *pvo; 1445 vm_page_t m; 1446 vm_paddr_t pa; 1447 1448 m = NULL; 1449 pa = 0; 1450 PMAP_LOCK(pmap); 1451retry: 1452 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1453 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1454 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1455 (prot & VM_PROT_WRITE) == 0)) { 1456 if (vm_page_pa_tryrelock(pmap, 1457 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1458 goto retry; 1459 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1460 vm_page_hold(m); 1461 } 1462 PA_UNLOCK_COND(pa); 1463 PMAP_UNLOCK(pmap); 1464 return (m); 1465} 1466 1467static mmu_t installed_mmu; 1468 1469static void * 1470moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1471{ 1472 /* 1473 * This entire routine is a horrible hack to avoid bothering kmem 1474 * for new KVA addresses. Because this can get called from inside 1475 * kmem allocation routines, calling kmem for a new address here 1476 * can lead to multiply locking non-recursive mutexes. 1477 */ 1478 vm_offset_t va; 1479 1480 vm_page_t m; 1481 int pflags, needed_lock; 1482 1483 *flags = UMA_SLAB_PRIV; 1484 needed_lock = !PMAP_LOCKED(kernel_pmap); 1485 pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; 1486 1487 for (;;) { 1488 m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); 1489 if (m == NULL) { 1490 if (wait & M_NOWAIT) 1491 return (NULL); 1492 VM_WAIT; 1493 } else 1494 break; 1495 } 1496 1497 va = VM_PAGE_TO_PHYS(m); 1498 1499 LOCK_TABLE_WR(); 1500 if (needed_lock) 1501 PMAP_LOCK(kernel_pmap); 1502 1503 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1504 NULL, va, VM_PAGE_TO_PHYS(m), LPTE_M, PVO_WIRED | PVO_BOOTSTRAP); 1505 1506 if (needed_lock) 1507 PMAP_UNLOCK(kernel_pmap); 1508 UNLOCK_TABLE_WR(); 1509 1510 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1511 bzero((void *)va, PAGE_SIZE); 1512 1513 return (void *)va; 1514} 1515 1516extern int elf32_nxstack; 1517 1518void 1519moea64_init(mmu_t mmu) 1520{ 1521 1522 CTR0(KTR_PMAP, "moea64_init"); 1523 1524 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1525 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1526 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1527 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1528 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1529 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1530 1531 if (!hw_direct_map) { 1532 installed_mmu = mmu; 1533 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1534 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1535 } 1536 1537#ifdef COMPAT_FREEBSD32 1538 elf32_nxstack = 1; 1539#endif 1540 1541 moea64_initialized = TRUE; 1542} 1543 1544boolean_t 1545moea64_is_referenced(mmu_t mmu, vm_page_t m) 1546{ 1547 1548 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1549 ("moea64_is_referenced: page %p is not managed", m)); 1550 return (moea64_query_bit(mmu, m, PTE_REF)); 1551} 1552 1553boolean_t 1554moea64_is_modified(mmu_t mmu, vm_page_t m) 1555{ 1556 1557 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1558 ("moea64_is_modified: page %p is not managed", m)); 1559 1560 /* 1561 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1562 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 1563 * is clear, no PTEs can have LPTE_CHG set. 1564 */ 1565 VM_OBJECT_ASSERT_LOCKED(m->object); 1566 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1567 return (FALSE); 1568 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1569} 1570 1571boolean_t 1572moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1573{ 1574 struct pvo_entry *pvo; 1575 boolean_t rv; 1576 1577 PMAP_LOCK(pmap); 1578 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1579 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1580 PMAP_UNLOCK(pmap); 1581 return (rv); 1582} 1583 1584void 1585moea64_clear_modify(mmu_t mmu, vm_page_t m) 1586{ 1587 1588 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1589 ("moea64_clear_modify: page %p is not managed", m)); 1590 VM_OBJECT_ASSERT_WLOCKED(m->object); 1591 KASSERT(!vm_page_xbusied(m), 1592 ("moea64_clear_modify: page %p is exclusive busied", m)); 1593 1594 /* 1595 * If the page is not PGA_WRITEABLE, then no PTEs can have LPTE_CHG 1596 * set. If the object containing the page is locked and the page is 1597 * not exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 1598 */ 1599 if ((m->aflags & PGA_WRITEABLE) == 0) 1600 return; 1601 moea64_clear_bit(mmu, m, LPTE_CHG); 1602} 1603 1604/* 1605 * Clear the write and modified bits in each of the given page's mappings. 1606 */ 1607void 1608moea64_remove_write(mmu_t mmu, vm_page_t m) 1609{ 1610 struct pvo_entry *pvo; 1611 uintptr_t pt; 1612 pmap_t pmap; 1613 uint64_t lo = 0; 1614 1615 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1616 ("moea64_remove_write: page %p is not managed", m)); 1617 1618 /* 1619 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 1620 * set by another thread while the object is locked. Thus, 1621 * if PGA_WRITEABLE is clear, no page table entries need updating. 1622 */ 1623 VM_OBJECT_ASSERT_WLOCKED(m->object); 1624 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 1625 return; 1626 powerpc_sync(); 1627 LOCK_TABLE_RD(); 1628 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1629 pmap = pvo->pvo_pmap; 1630 PMAP_LOCK(pmap); 1631 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1632 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1633 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1634 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1635 if (pt != -1) { 1636 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1637 lo |= pvo->pvo_pte.lpte.pte_lo; 1638 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1639 MOEA64_PTE_CHANGE(mmu, pt, 1640 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1641 if (pvo->pvo_pmap == kernel_pmap) 1642 isync(); 1643 } 1644 } 1645 if ((lo & LPTE_CHG) != 0) 1646 vm_page_dirty(m); 1647 PMAP_UNLOCK(pmap); 1648 } 1649 UNLOCK_TABLE_RD(); 1650 vm_page_aflag_clear(m, PGA_WRITEABLE); 1651} 1652 1653/* 1654 * moea64_ts_referenced: 1655 * 1656 * Return a count of reference bits for a page, clearing those bits. 1657 * It is not necessary for every reference bit to be cleared, but it 1658 * is necessary that 0 only be returned when there are truly no 1659 * reference bits set. 1660 * 1661 * XXX: The exact number of bits to check and clear is a matter that 1662 * should be tested and standardized at some point in the future for 1663 * optimal aging of shared pages. 1664 */ 1665int 1666moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1667{ 1668 1669 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1670 ("moea64_ts_referenced: page %p is not managed", m)); 1671 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1672} 1673 1674/* 1675 * Modify the WIMG settings of all mappings for a page. 1676 */ 1677void 1678moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1679{ 1680 struct pvo_entry *pvo; 1681 struct pvo_head *pvo_head; 1682 uintptr_t pt; 1683 pmap_t pmap; 1684 uint64_t lo; 1685 1686 if ((m->oflags & VPO_UNMANAGED) != 0) { 1687 m->md.mdpg_cache_attrs = ma; 1688 return; 1689 } 1690 1691 pvo_head = vm_page_to_pvoh(m); 1692 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1693 LOCK_TABLE_RD(); 1694 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1695 pmap = pvo->pvo_pmap; 1696 PMAP_LOCK(pmap); 1697 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1698 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1699 pvo->pvo_pte.lpte.pte_lo |= lo; 1700 if (pt != -1) { 1701 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1702 pvo->pvo_vpn); 1703 if (pvo->pvo_pmap == kernel_pmap) 1704 isync(); 1705 } 1706 PMAP_UNLOCK(pmap); 1707 } 1708 UNLOCK_TABLE_RD(); 1709 m->md.mdpg_cache_attrs = ma; 1710} 1711 1712/* 1713 * Map a wired page into kernel virtual address space. 1714 */ 1715void 1716moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1717{ 1718 uint64_t pte_lo; 1719 int error; 1720 1721 pte_lo = moea64_calc_wimg(pa, ma); 1722 1723 LOCK_TABLE_WR(); 1724 PMAP_LOCK(kernel_pmap); 1725 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1726 NULL, va, pa, pte_lo, PVO_WIRED); 1727 PMAP_UNLOCK(kernel_pmap); 1728 UNLOCK_TABLE_WR(); 1729 1730 if (error != 0 && error != ENOENT) 1731 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1732 pa, error); 1733} 1734 1735void 1736moea64_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1737{ 1738 1739 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1740} 1741 1742/* 1743 * Extract the physical page address associated with the given kernel virtual 1744 * address. 1745 */ 1746vm_paddr_t 1747moea64_kextract(mmu_t mmu, vm_offset_t va) 1748{ 1749 struct pvo_entry *pvo; 1750 vm_paddr_t pa; 1751 1752 /* 1753 * Shortcut the direct-mapped case when applicable. We never put 1754 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1755 */ 1756 if (va < VM_MIN_KERNEL_ADDRESS) 1757 return (va); 1758 1759 PMAP_LOCK(kernel_pmap); 1760 pvo = moea64_pvo_find_va(kernel_pmap, va); 1761 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1762 va)); 1763 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | (va - PVO_VADDR(pvo)); 1764 PMAP_UNLOCK(kernel_pmap); 1765 return (pa); 1766} 1767 1768/* 1769 * Remove a wired page from kernel virtual address space. 1770 */ 1771void 1772moea64_kremove(mmu_t mmu, vm_offset_t va) 1773{ 1774 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1775} 1776 1777/* 1778 * Map a range of physical addresses into kernel virtual address space. 1779 * 1780 * The value passed in *virt is a suggested virtual address for the mapping. 1781 * Architectures which can support a direct-mapped physical to virtual region 1782 * can return the appropriate address within that region, leaving '*virt' 1783 * unchanged. We cannot and therefore do not; *virt is updated with the 1784 * first usable address after the mapped region. 1785 */ 1786vm_offset_t 1787moea64_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1788 vm_paddr_t pa_end, int prot) 1789{ 1790 vm_offset_t sva, va; 1791 1792 sva = *virt; 1793 va = sva; 1794 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1795 moea64_kenter(mmu, va, pa_start); 1796 *virt = va; 1797 1798 return (sva); 1799} 1800 1801/* 1802 * Returns true if the pmap's pv is one of the first 1803 * 16 pvs linked to from this page. This count may 1804 * be changed upwards or downwards in the future; it 1805 * is only necessary that true be returned for a small 1806 * subset of pmaps for proper page aging. 1807 */ 1808boolean_t 1809moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1810{ 1811 int loops; 1812 struct pvo_entry *pvo; 1813 boolean_t rv; 1814 1815 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1816 ("moea64_page_exists_quick: page %p is not managed", m)); 1817 loops = 0; 1818 rv = FALSE; 1819 LOCK_TABLE_RD(); 1820 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1821 if (pvo->pvo_pmap == pmap) { 1822 rv = TRUE; 1823 break; 1824 } 1825 if (++loops >= 16) 1826 break; 1827 } 1828 UNLOCK_TABLE_RD(); 1829 return (rv); 1830} 1831 1832/* 1833 * Return the number of managed mappings to the given physical page 1834 * that are wired. 1835 */ 1836int 1837moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1838{ 1839 struct pvo_entry *pvo; 1840 int count; 1841 1842 count = 0; 1843 if ((m->oflags & VPO_UNMANAGED) != 0) 1844 return (count); 1845 LOCK_TABLE_RD(); 1846 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1847 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1848 count++; 1849 UNLOCK_TABLE_RD(); 1850 return (count); 1851} 1852 1853static uintptr_t moea64_vsidcontext; 1854 1855uintptr_t 1856moea64_get_unique_vsid(void) { 1857 u_int entropy; 1858 register_t hash; 1859 uint32_t mask; 1860 int i; 1861 1862 entropy = 0; 1863 __asm __volatile("mftb %0" : "=r"(entropy)); 1864 1865 mtx_lock(&moea64_slb_mutex); 1866 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1867 u_int n; 1868 1869 /* 1870 * Create a new value by mutiplying by a prime and adding in 1871 * entropy from the timebase register. This is to make the 1872 * VSID more random so that the PT hash function collides 1873 * less often. (Note that the prime casues gcc to do shifts 1874 * instead of a multiply.) 1875 */ 1876 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1877 hash = moea64_vsidcontext & (NVSIDS - 1); 1878 if (hash == 0) /* 0 is special, avoid it */ 1879 continue; 1880 n = hash >> 5; 1881 mask = 1 << (hash & (VSID_NBPW - 1)); 1882 hash = (moea64_vsidcontext & VSID_HASHMASK); 1883 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1884 /* anything free in this bucket? */ 1885 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1886 entropy = (moea64_vsidcontext >> 20); 1887 continue; 1888 } 1889 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1890 mask = 1 << i; 1891 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1892 hash |= i; 1893 } 1894 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1895 ("Allocating in-use VSID %#zx\n", hash)); 1896 moea64_vsid_bitmap[n] |= mask; 1897 mtx_unlock(&moea64_slb_mutex); 1898 return (hash); 1899 } 1900 1901 mtx_unlock(&moea64_slb_mutex); 1902 panic("%s: out of segments",__func__); 1903} 1904 1905#ifdef __powerpc64__ 1906void 1907moea64_pinit(mmu_t mmu, pmap_t pmap) 1908{ 1909 1910 RB_INIT(&pmap->pmap_pvo); 1911 1912 pmap->pm_slb_tree_root = slb_alloc_tree(); 1913 pmap->pm_slb = slb_alloc_user_cache(); 1914 pmap->pm_slb_len = 0; 1915} 1916#else 1917void 1918moea64_pinit(mmu_t mmu, pmap_t pmap) 1919{ 1920 int i; 1921 uint32_t hash; 1922 1923 RB_INIT(&pmap->pmap_pvo); 1924 1925 if (pmap_bootstrapped) 1926 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1927 (vm_offset_t)pmap); 1928 else 1929 pmap->pmap_phys = pmap; 1930 1931 /* 1932 * Allocate some segment registers for this pmap. 1933 */ 1934 hash = moea64_get_unique_vsid(); 1935 1936 for (i = 0; i < 16; i++) 1937 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1938 1939 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1940} 1941#endif 1942 1943/* 1944 * Initialize the pmap associated with process 0. 1945 */ 1946void 1947moea64_pinit0(mmu_t mmu, pmap_t pm) 1948{ 1949 1950 PMAP_LOCK_INIT(pm); 1951 moea64_pinit(mmu, pm); 1952 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1953} 1954 1955/* 1956 * Set the physical protection on the specified range of this map as requested. 1957 */ 1958static void 1959moea64_pvo_protect(mmu_t mmu, pmap_t pm, struct pvo_entry *pvo, vm_prot_t prot) 1960{ 1961 uintptr_t pt; 1962 struct vm_page *pg; 1963 uint64_t oldlo; 1964 1965 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1966 1967 /* 1968 * Grab the PTE pointer before we diddle with the cached PTE 1969 * copy. 1970 */ 1971 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1972 1973 /* 1974 * Change the protection of the page. 1975 */ 1976 oldlo = pvo->pvo_pte.lpte.pte_lo; 1977 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1978 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1979 if ((prot & VM_PROT_EXECUTE) == 0) 1980 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1981 if (prot & VM_PROT_WRITE) 1982 pvo->pvo_pte.lpte.pte_lo |= LPTE_BW; 1983 else 1984 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1985 1986 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1987 1988 /* 1989 * If the PVO is in the page table, update that pte as well. 1990 */ 1991 if (pt != -1) 1992 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1993 pvo->pvo_vpn); 1994 if (pm != kernel_pmap && pg != NULL && !(pg->aflags & PGA_EXECUTABLE) && 1995 (pvo->pvo_pte.lpte.pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1996 if ((pg->oflags & VPO_UNMANAGED) == 0) 1997 vm_page_aflag_set(pg, PGA_EXECUTABLE); 1998 moea64_syncicache(mmu, pm, PVO_VADDR(pvo), 1999 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, PAGE_SIZE); 2000 } 2001 2002 /* 2003 * Update vm about the REF/CHG bits if the page is managed and we have 2004 * removed write access. 2005 */ 2006 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && 2007 (oldlo & LPTE_PP) != LPTE_BR && !(prot & VM_PROT_WRITE)) { 2008 if (pg != NULL) { 2009 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2010 vm_page_dirty(pg); 2011 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2012 vm_page_aflag_set(pg, PGA_REFERENCED); 2013 } 2014 } 2015} 2016 2017void 2018moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 2019 vm_prot_t prot) 2020{ 2021 struct pvo_entry *pvo, *tpvo, key; 2022 2023 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, 2024 sva, eva, prot); 2025 2026 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 2027 ("moea64_protect: non current pmap")); 2028 2029 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 2030 moea64_remove(mmu, pm, sva, eva); 2031 return; 2032 } 2033 2034 LOCK_TABLE_RD(); 2035 PMAP_LOCK(pm); 2036 key.pvo_vaddr = sva; 2037 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2038 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2039 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2040 moea64_pvo_protect(mmu, pm, pvo, prot); 2041 } 2042 UNLOCK_TABLE_RD(); 2043 PMAP_UNLOCK(pm); 2044} 2045 2046/* 2047 * Map a list of wired pages into kernel virtual address space. This is 2048 * intended for temporary mappings which do not need page modification or 2049 * references recorded. Existing mappings in the region are overwritten. 2050 */ 2051void 2052moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2053{ 2054 while (count-- > 0) { 2055 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2056 va += PAGE_SIZE; 2057 m++; 2058 } 2059} 2060 2061/* 2062 * Remove page mappings from kernel virtual address space. Intended for 2063 * temporary mappings entered by moea64_qenter. 2064 */ 2065void 2066moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2067{ 2068 while (count-- > 0) { 2069 moea64_kremove(mmu, va); 2070 va += PAGE_SIZE; 2071 } 2072} 2073 2074void 2075moea64_release_vsid(uint64_t vsid) 2076{ 2077 int idx, mask; 2078 2079 mtx_lock(&moea64_slb_mutex); 2080 idx = vsid & (NVSIDS-1); 2081 mask = 1 << (idx % VSID_NBPW); 2082 idx /= VSID_NBPW; 2083 KASSERT(moea64_vsid_bitmap[idx] & mask, 2084 ("Freeing unallocated VSID %#jx", vsid)); 2085 moea64_vsid_bitmap[idx] &= ~mask; 2086 mtx_unlock(&moea64_slb_mutex); 2087} 2088 2089 2090void 2091moea64_release(mmu_t mmu, pmap_t pmap) 2092{ 2093 2094 /* 2095 * Free segment registers' VSIDs 2096 */ 2097 #ifdef __powerpc64__ 2098 slb_free_tree(pmap); 2099 slb_free_user_cache(pmap->pm_slb); 2100 #else 2101 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2102 2103 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2104 #endif 2105} 2106 2107/* 2108 * Remove all pages mapped by the specified pmap 2109 */ 2110void 2111moea64_remove_pages(mmu_t mmu, pmap_t pm) 2112{ 2113 struct pvo_entry *pvo, *tpvo; 2114 2115 LOCK_TABLE_WR(); 2116 PMAP_LOCK(pm); 2117 RB_FOREACH_SAFE(pvo, pvo_tree, &pm->pmap_pvo, tpvo) { 2118 if (!(pvo->pvo_vaddr & PVO_WIRED)) 2119 moea64_pvo_remove(mmu, pvo); 2120 } 2121 UNLOCK_TABLE_WR(); 2122 PMAP_UNLOCK(pm); 2123} 2124 2125/* 2126 * Remove the given range of addresses from the specified map. 2127 */ 2128void 2129moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2130{ 2131 struct pvo_entry *pvo, *tpvo, key; 2132 2133 /* 2134 * Perform an unsynchronized read. This is, however, safe. 2135 */ 2136 if (pm->pm_stats.resident_count == 0) 2137 return; 2138 2139 LOCK_TABLE_WR(); 2140 PMAP_LOCK(pm); 2141 key.pvo_vaddr = sva; 2142 for (pvo = RB_NFIND(pvo_tree, &pm->pmap_pvo, &key); 2143 pvo != NULL && PVO_VADDR(pvo) < eva; pvo = tpvo) { 2144 tpvo = RB_NEXT(pvo_tree, &pm->pmap_pvo, pvo); 2145 moea64_pvo_remove(mmu, pvo); 2146 } 2147 UNLOCK_TABLE_WR(); 2148 PMAP_UNLOCK(pm); 2149} 2150 2151/* 2152 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2153 * will reflect changes in pte's back to the vm_page. 2154 */ 2155void 2156moea64_remove_all(mmu_t mmu, vm_page_t m) 2157{ 2158 struct pvo_entry *pvo, *next_pvo; 2159 pmap_t pmap; 2160 2161 LOCK_TABLE_WR(); 2162 LIST_FOREACH_SAFE(pvo, vm_page_to_pvoh(m), pvo_vlink, next_pvo) { 2163 pmap = pvo->pvo_pmap; 2164 PMAP_LOCK(pmap); 2165 moea64_pvo_remove(mmu, pvo); 2166 PMAP_UNLOCK(pmap); 2167 } 2168 UNLOCK_TABLE_WR(); 2169 if ((m->aflags & PGA_WRITEABLE) && moea64_is_modified(mmu, m)) 2170 vm_page_dirty(m); 2171 vm_page_aflag_clear(m, PGA_WRITEABLE); 2172 vm_page_aflag_clear(m, PGA_EXECUTABLE); 2173} 2174 2175/* 2176 * Allocate a physical page of memory directly from the phys_avail map. 2177 * Can only be called from moea64_bootstrap before avail start and end are 2178 * calculated. 2179 */ 2180vm_offset_t 2181moea64_bootstrap_alloc(vm_size_t size, u_int align) 2182{ 2183 vm_offset_t s, e; 2184 int i, j; 2185 2186 size = round_page(size); 2187 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2188 if (align != 0) 2189 s = (phys_avail[i] + align - 1) & ~(align - 1); 2190 else 2191 s = phys_avail[i]; 2192 e = s + size; 2193 2194 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2195 continue; 2196 2197 if (s + size > platform_real_maxaddr()) 2198 continue; 2199 2200 if (s == phys_avail[i]) { 2201 phys_avail[i] += size; 2202 } else if (e == phys_avail[i + 1]) { 2203 phys_avail[i + 1] -= size; 2204 } else { 2205 for (j = phys_avail_count * 2; j > i; j -= 2) { 2206 phys_avail[j] = phys_avail[j - 2]; 2207 phys_avail[j + 1] = phys_avail[j - 1]; 2208 } 2209 2210 phys_avail[i + 3] = phys_avail[i + 1]; 2211 phys_avail[i + 1] = s; 2212 phys_avail[i + 2] = e; 2213 phys_avail_count++; 2214 } 2215 2216 return (s); 2217 } 2218 panic("moea64_bootstrap_alloc: could not allocate memory"); 2219} 2220 2221static int 2222moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2223 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2224 uint64_t pte_lo, int flags) 2225{ 2226 struct pvo_entry *pvo; 2227 uintptr_t pt; 2228 uint64_t vsid; 2229 int first; 2230 u_int ptegidx; 2231 int i; 2232 int bootstrap; 2233 2234 /* 2235 * One nasty thing that can happen here is that the UMA calls to 2236 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2237 * which calls UMA... 2238 * 2239 * We break the loop by detecting recursion and allocating out of 2240 * the bootstrap pool. 2241 */ 2242 2243 first = 0; 2244 bootstrap = (flags & PVO_BOOTSTRAP); 2245 2246 if (!moea64_initialized) 2247 bootstrap = 1; 2248 2249 PMAP_LOCK_ASSERT(pm, MA_OWNED); 2250 rw_assert(&moea64_table_lock, RA_WLOCKED); 2251 2252 /* 2253 * Compute the PTE Group index. 2254 */ 2255 va &= ~ADDR_POFF; 2256 vsid = va_to_vsid(pm, va); 2257 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2258 2259 /* 2260 * Remove any existing mapping for this page. Reuse the pvo entry if 2261 * there is a mapping. 2262 */ 2263 moea64_pvo_enter_calls++; 2264 2265 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2266 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2267 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2268 (pvo->pvo_pte.lpte.pte_lo & (LPTE_NOEXEC | LPTE_PP)) 2269 == (pte_lo & (LPTE_NOEXEC | LPTE_PP))) { 2270 /* 2271 * The physical page and protection are not 2272 * changing. Instead, this may be a request 2273 * to change the mapping's wired attribute. 2274 */ 2275 pt = -1; 2276 if ((flags & PVO_WIRED) != 0 && 2277 (pvo->pvo_vaddr & PVO_WIRED) == 0) { 2278 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2279 pvo->pvo_vaddr |= PVO_WIRED; 2280 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2281 pm->pm_stats.wired_count++; 2282 } else if ((flags & PVO_WIRED) == 0 && 2283 (pvo->pvo_vaddr & PVO_WIRED) != 0) { 2284 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2285 pvo->pvo_vaddr &= ~PVO_WIRED; 2286 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 2287 pm->pm_stats.wired_count--; 2288 } 2289 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2290 KASSERT(pt == -1, 2291 ("moea64_pvo_enter: valid pt")); 2292 /* Re-insert if spilled */ 2293 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2294 &pvo->pvo_pte.lpte); 2295 if (i >= 0) 2296 PVO_PTEGIDX_SET(pvo, i); 2297 moea64_pte_overflow--; 2298 } else if (pt != -1) { 2299 /* 2300 * The PTE's wired attribute is not a 2301 * hardware feature, so there is no 2302 * need to invalidate any TLB entries. 2303 */ 2304 MOEA64_PTE_CHANGE(mmu, pt, 2305 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2306 } 2307 return (0); 2308 } 2309 moea64_pvo_remove(mmu, pvo); 2310 break; 2311 } 2312 } 2313 2314 /* 2315 * If we aren't overwriting a mapping, try to allocate. 2316 */ 2317 if (bootstrap) { 2318 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2319 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2320 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2321 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2322 } 2323 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2324 moea64_bpvo_pool_index++; 2325 bootstrap = 1; 2326 } else { 2327 pvo = uma_zalloc(zone, M_NOWAIT); 2328 } 2329 2330 if (pvo == NULL) 2331 return (ENOMEM); 2332 2333 moea64_pvo_entries++; 2334 pvo->pvo_vaddr = va; 2335 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2336 | (vsid << 16); 2337 pvo->pvo_pmap = pm; 2338 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2339 pvo->pvo_vaddr &= ~ADDR_POFF; 2340 2341 if (flags & PVO_WIRED) 2342 pvo->pvo_vaddr |= PVO_WIRED; 2343 if (pvo_head != NULL) 2344 pvo->pvo_vaddr |= PVO_MANAGED; 2345 if (bootstrap) 2346 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2347 if (flags & PVO_LARGE) 2348 pvo->pvo_vaddr |= PVO_LARGE; 2349 2350 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2351 (uint64_t)(pa) | pte_lo, flags); 2352 2353 /* 2354 * Add to pmap list 2355 */ 2356 RB_INSERT(pvo_tree, &pm->pmap_pvo, pvo); 2357 2358 /* 2359 * Remember if the list was empty and therefore will be the first 2360 * item. 2361 */ 2362 if (pvo_head != NULL) { 2363 if (LIST_FIRST(pvo_head) == NULL) 2364 first = 1; 2365 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2366 } 2367 2368 if (pvo->pvo_vaddr & PVO_WIRED) { 2369 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2370 pm->pm_stats.wired_count++; 2371 } 2372 pm->pm_stats.resident_count++; 2373 2374 /* 2375 * We hope this succeeds but it isn't required. 2376 */ 2377 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2378 if (i >= 0) { 2379 PVO_PTEGIDX_SET(pvo, i); 2380 } else { 2381 panic("moea64_pvo_enter: overflow"); 2382 moea64_pte_overflow++; 2383 } 2384 2385 if (pm == kernel_pmap) 2386 isync(); 2387 2388#ifdef __powerpc64__ 2389 /* 2390 * Make sure all our bootstrap mappings are in the SLB as soon 2391 * as virtual memory is switched on. 2392 */ 2393 if (!pmap_bootstrapped) 2394 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2395#endif 2396 2397 return (first ? ENOENT : 0); 2398} 2399 2400static void 2401moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2402{ 2403 struct vm_page *pg; 2404 uintptr_t pt; 2405 2406 PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED); 2407 rw_assert(&moea64_table_lock, RA_WLOCKED); 2408 2409 /* 2410 * If there is an active pte entry, we need to deactivate it (and 2411 * save the ref & cfg bits). 2412 */ 2413 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2414 if (pt != -1) { 2415 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2416 PVO_PTEGIDX_CLR(pvo); 2417 } else { 2418 moea64_pte_overflow--; 2419 } 2420 2421 /* 2422 * Update our statistics. 2423 */ 2424 pvo->pvo_pmap->pm_stats.resident_count--; 2425 if (pvo->pvo_vaddr & PVO_WIRED) 2426 pvo->pvo_pmap->pm_stats.wired_count--; 2427 2428 /* 2429 * Remove this PVO from the pmap list. 2430 */ 2431 RB_REMOVE(pvo_tree, &pvo->pvo_pmap->pmap_pvo, pvo); 2432 2433 /* 2434 * Remove this from the overflow list and return it to the pool 2435 * if we aren't going to reuse it. 2436 */ 2437 LIST_REMOVE(pvo, pvo_olink); 2438 2439 /* 2440 * Update vm about the REF/CHG bits if the page is managed. 2441 */ 2442 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2443 2444 if ((pvo->pvo_vaddr & PVO_MANAGED) == PVO_MANAGED && pg != NULL) { 2445 LIST_REMOVE(pvo, pvo_vlink); 2446 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 2447 if (pvo->pvo_pte.lpte.pte_lo & LPTE_CHG) 2448 vm_page_dirty(pg); 2449 if (pvo->pvo_pte.lpte.pte_lo & LPTE_REF) 2450 vm_page_aflag_set(pg, PGA_REFERENCED); 2451 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2452 vm_page_aflag_clear(pg, PGA_WRITEABLE); 2453 } 2454 if (LIST_EMPTY(vm_page_to_pvoh(pg))) 2455 vm_page_aflag_clear(pg, PGA_EXECUTABLE); 2456 } 2457 2458 moea64_pvo_entries--; 2459 moea64_pvo_remove_calls++; 2460 2461 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2462 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2463 moea64_upvo_zone, pvo); 2464} 2465 2466static struct pvo_entry * 2467moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2468{ 2469 struct pvo_entry key; 2470 2471 key.pvo_vaddr = va & ~ADDR_POFF; 2472 return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); 2473} 2474 2475static boolean_t 2476moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2477{ 2478 struct pvo_entry *pvo; 2479 uintptr_t pt; 2480 2481 LOCK_TABLE_RD(); 2482 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2483 /* 2484 * See if we saved the bit off. If so, return success. 2485 */ 2486 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2487 UNLOCK_TABLE_RD(); 2488 return (TRUE); 2489 } 2490 } 2491 2492 /* 2493 * No luck, now go through the hard part of looking at the PTEs 2494 * themselves. Sync so that any pending REF/CHG bits are flushed to 2495 * the PTEs. 2496 */ 2497 powerpc_sync(); 2498 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2499 2500 /* 2501 * See if this pvo has a valid PTE. if so, fetch the 2502 * REF/CHG bits from the valid PTE. If the appropriate 2503 * ptebit is set, return success. 2504 */ 2505 PMAP_LOCK(pvo->pvo_pmap); 2506 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2507 if (pt != -1) { 2508 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2509 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2510 PMAP_UNLOCK(pvo->pvo_pmap); 2511 UNLOCK_TABLE_RD(); 2512 return (TRUE); 2513 } 2514 } 2515 PMAP_UNLOCK(pvo->pvo_pmap); 2516 } 2517 2518 UNLOCK_TABLE_RD(); 2519 return (FALSE); 2520} 2521 2522static u_int 2523moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2524{ 2525 u_int count; 2526 struct pvo_entry *pvo; 2527 uintptr_t pt; 2528 2529 /* 2530 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2531 * we can reset the right ones). note that since the pvo entries and 2532 * list heads are accessed via BAT0 and are never placed in the page 2533 * table, we don't have to worry about further accesses setting the 2534 * REF/CHG bits. 2535 */ 2536 powerpc_sync(); 2537 2538 /* 2539 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2540 * valid pte clear the ptebit from the valid pte. 2541 */ 2542 count = 0; 2543 LOCK_TABLE_RD(); 2544 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2545 PMAP_LOCK(pvo->pvo_pmap); 2546 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2547 if (pt != -1) { 2548 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2549 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2550 count++; 2551 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2552 pvo->pvo_vpn, ptebit); 2553 } 2554 } 2555 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2556 PMAP_UNLOCK(pvo->pvo_pmap); 2557 } 2558 2559 UNLOCK_TABLE_RD(); 2560 return (count); 2561} 2562 2563boolean_t 2564moea64_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2565{ 2566 struct pvo_entry *pvo, key; 2567 vm_offset_t ppa; 2568 int error = 0; 2569 2570 PMAP_LOCK(kernel_pmap); 2571 key.pvo_vaddr = ppa = pa & ~ADDR_POFF; 2572 for (pvo = RB_FIND(pvo_tree, &kernel_pmap->pmap_pvo, &key); 2573 ppa < pa + size; ppa += PAGE_SIZE, 2574 pvo = RB_NEXT(pvo_tree, &kernel_pmap->pmap_pvo, pvo)) { 2575 if (pvo == NULL || 2576 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2577 error = EFAULT; 2578 break; 2579 } 2580 } 2581 PMAP_UNLOCK(kernel_pmap); 2582 2583 return (error); 2584} 2585 2586/* 2587 * Map a set of physical memory pages into the kernel virtual 2588 * address space. Return a pointer to where it is mapped. This 2589 * routine is intended to be used for mapping device memory, 2590 * NOT real memory. 2591 */ 2592void * 2593moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2594{ 2595 vm_offset_t va, tmpva, ppa, offset; 2596 2597 ppa = trunc_page(pa); 2598 offset = pa & PAGE_MASK; 2599 size = roundup2(offset + size, PAGE_SIZE); 2600 2601 va = kva_alloc(size); 2602 2603 if (!va) 2604 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2605 2606 for (tmpva = va; size > 0;) { 2607 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2608 size -= PAGE_SIZE; 2609 tmpva += PAGE_SIZE; 2610 ppa += PAGE_SIZE; 2611 } 2612 2613 return ((void *)(va + offset)); 2614} 2615 2616void * 2617moea64_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2618{ 2619 2620 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2621} 2622 2623void 2624moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2625{ 2626 vm_offset_t base, offset; 2627 2628 base = trunc_page(va); 2629 offset = va & PAGE_MASK; 2630 size = roundup2(offset + size, PAGE_SIZE); 2631 2632 kva_free(base, size); 2633} 2634 2635void 2636moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2637{ 2638 struct pvo_entry *pvo; 2639 vm_offset_t lim; 2640 vm_paddr_t pa; 2641 vm_size_t len; 2642 2643 PMAP_LOCK(pm); 2644 while (sz > 0) { 2645 lim = round_page(va); 2646 len = MIN(lim - va, sz); 2647 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2648 if (pvo != NULL && !(pvo->pvo_pte.lpte.pte_lo & LPTE_I)) { 2649 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 2650 (va & ADDR_POFF); 2651 moea64_syncicache(mmu, pm, va, pa, len); 2652 } 2653 va += len; 2654 sz -= len; 2655 } 2656 PMAP_UNLOCK(pm); 2657} 2658 2659vm_offset_t 2660moea64_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2661 vm_size_t *sz) 2662{ 2663 if (md->md_vaddr == ~0UL) 2664 return (md->md_paddr + ofs); 2665 else 2666 return (md->md_vaddr + ofs); 2667} 2668 2669struct pmap_md * 2670moea64_scan_md(mmu_t mmu, struct pmap_md *prev) 2671{ 2672 static struct pmap_md md; 2673 struct pvo_entry *pvo; 2674 vm_offset_t va; 2675 2676 if (dumpsys_minidump) { 2677 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2678 if (prev == NULL) { 2679 /* 1st: kernel .data and .bss. */ 2680 md.md_index = 1; 2681 md.md_vaddr = trunc_page((uintptr_t)_etext); 2682 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2683 return (&md); 2684 } 2685 switch (prev->md_index) { 2686 case 1: 2687 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2688 md.md_index = 2; 2689 md.md_vaddr = (vm_offset_t)msgbufp->msg_ptr; 2690 md.md_size = round_page(msgbufp->msg_size); 2691 break; 2692 case 2: 2693 /* 3rd: kernel VM. */ 2694 va = prev->md_vaddr + prev->md_size; 2695 /* Find start of next chunk (from va). */ 2696 while (va < virtual_end) { 2697 /* Don't dump the buffer cache. */ 2698 if (va >= kmi.buffer_sva && 2699 va < kmi.buffer_eva) { 2700 va = kmi.buffer_eva; 2701 continue; 2702 } 2703 pvo = moea64_pvo_find_va(kernel_pmap, 2704 va & ~ADDR_POFF); 2705 if (pvo != NULL && 2706 (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) 2707 break; 2708 va += PAGE_SIZE; 2709 } 2710 if (va < virtual_end) { 2711 md.md_vaddr = va; 2712 va += PAGE_SIZE; 2713 /* Find last page in chunk. */ 2714 while (va < virtual_end) { 2715 /* Don't run into the buffer cache. */ 2716 if (va == kmi.buffer_sva) 2717 break; 2718 pvo = moea64_pvo_find_va(kernel_pmap, 2719 va & ~ADDR_POFF); 2720 if (pvo == NULL || 2721 !(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) 2722 break; 2723 va += PAGE_SIZE; 2724 } 2725 md.md_size = va - md.md_vaddr; 2726 break; 2727 } 2728 md.md_index = 3; 2729 /* FALLTHROUGH */ 2730 default: 2731 return (NULL); 2732 } 2733 } else { /* minidumps */ 2734 if (prev == NULL) { 2735 /* first physical chunk. */ 2736 md.md_paddr = pregions[0].mr_start; 2737 md.md_size = pregions[0].mr_size; 2738 md.md_vaddr = ~0UL; 2739 md.md_index = 1; 2740 } else if (md.md_index < pregions_sz) { 2741 md.md_paddr = pregions[md.md_index].mr_start; 2742 md.md_size = pregions[md.md_index].mr_size; 2743 md.md_vaddr = ~0UL; 2744 md.md_index++; 2745 } else { 2746 /* There's no next physical chunk. */ 2747 return (NULL); 2748 } 2749 } 2750 2751 return (&md); 2752} 2753