mmu_oea64.c revision 216383
1240616Sjimharris/*- 2240616Sjimharris * Copyright (c) 2001 The NetBSD Foundation, Inc. 3240616Sjimharris * All rights reserved. 4240616Sjimharris * 5240616Sjimharris * This code is derived from software contributed to The NetBSD Foundation 6240616Sjimharris * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7240616Sjimharris * 8240616Sjimharris * Redistribution and use in source and binary forms, with or without 9240616Sjimharris * modification, are permitted provided that the following conditions 10240616Sjimharris * are met: 11240616Sjimharris * 1. Redistributions of source code must retain the above copyright 12240616Sjimharris * notice, this list of conditions and the following disclaimer. 13240616Sjimharris * 2. Redistributions in binary form must reproduce the above copyright 14240616Sjimharris * notice, this list of conditions and the following disclaimer in the 15240616Sjimharris * documentation and/or other materials provided with the distribution. 16240616Sjimharris * 3. All advertising materials mentioning features or use of this software 17240616Sjimharris * must display the following acknowledgement: 18240616Sjimharris * This product includes software developed by the NetBSD 19240616Sjimharris * Foundation, Inc. and its contributors. 20240616Sjimharris * 4. Neither the name of The NetBSD Foundation nor the names of its 21240616Sjimharris * contributors may be used to endorse or promote products derived 22240616Sjimharris * from this software without specific prior written permission. 23240616Sjimharris * 24240616Sjimharris * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25240616Sjimharris * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26240616Sjimharris * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27240616Sjimharris * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28240616Sjimharris * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29240616Sjimharris * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30240616Sjimharris * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31240616Sjimharris * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32240616Sjimharris * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33240616Sjimharris * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34240616Sjimharris * POSSIBILITY OF SUCH DAMAGE. 35240616Sjimharris */ 36240616Sjimharris/*- 37240616Sjimharris * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38240616Sjimharris * Copyright (C) 1995, 1996 TooLs GmbH. 39240616Sjimharris * All rights reserved. 40240616Sjimharris * 41240616Sjimharris * Redistribution and use in source and binary forms, with or without 42240616Sjimharris * modification, are permitted provided that the following conditions 43240616Sjimharris * are met: 44240616Sjimharris * 1. Redistributions of source code must retain the above copyright 45240616Sjimharris * notice, this list of conditions and the following disclaimer. 46240616Sjimharris * 2. Redistributions in binary form must reproduce the above copyright 47240616Sjimharris * notice, this list of conditions and the following disclaimer in the 48240616Sjimharris * documentation and/or other materials provided with the distribution. 49240616Sjimharris * 3. All advertising materials mentioning features or use of this software 50240616Sjimharris * must display the following acknowledgement: 51240616Sjimharris * This product includes software developed by TooLs GmbH. 52240616Sjimharris * 4. The name of TooLs GmbH may not be used to endorse or promote products 53240616Sjimharris * derived from this software without specific prior written permission. 54240616Sjimharris * 55240616Sjimharris * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56240616Sjimharris * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57240616Sjimharris * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58240616Sjimharris * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59240616Sjimharris * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60240616Sjimharris * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61240616Sjimharris * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62240616Sjimharris * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63240616Sjimharris * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64240616Sjimharris * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65240616Sjimharris * 66240616Sjimharris * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67240616Sjimharris */ 68240616Sjimharris/*- 69240616Sjimharris * Copyright (C) 2001 Benno Rice. 70240616Sjimharris * All rights reserved. 71240616Sjimharris * 72240616Sjimharris * Redistribution and use in source and binary forms, with or without 73240616Sjimharris * modification, are permitted provided that the following conditions 74240616Sjimharris * are met: 75240616Sjimharris * 1. Redistributions of source code must retain the above copyright 76240616Sjimharris * notice, this list of conditions and the following disclaimer. 77240616Sjimharris * 2. Redistributions in binary form must reproduce the above copyright 78240616Sjimharris * notice, this list of conditions and the following disclaimer in the 79240616Sjimharris * documentation and/or other materials provided with the distribution. 80240616Sjimharris * 81240616Sjimharris * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82240616Sjimharris * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83240616Sjimharris * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84240616Sjimharris * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85240616Sjimharris * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86240616Sjimharris * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87240616Sjimharris * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88240616Sjimharris * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89240616Sjimharris * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90240616Sjimharris * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91240616Sjimharris */ 92240616Sjimharris 93240616Sjimharris#include <sys/cdefs.h> 94240616Sjimharris__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 216383 2010-12-11 20:29:52Z nwhitehorn $"); 95240616Sjimharris 96240616Sjimharris/* 97240616Sjimharris * Manages physical address maps. 98240616Sjimharris * 99240616Sjimharris * In addition to hardware address maps, this module is called upon to 100240616Sjimharris * provide software-use-only maps which may or may not be stored in the 101240616Sjimharris * same form as hardware maps. These pseudo-maps are used to store 102240616Sjimharris * intermediate results from copy operations to and from address spaces. 103240616Sjimharris * 104240616Sjimharris * Since the information managed by this module is also stored by the 105240616Sjimharris * logical address mapping module, this module may throw away valid virtual 106240616Sjimharris * to physical mappings at almost any time. However, invalidations of 107240616Sjimharris * mappings must be done as requested. 108240616Sjimharris * 109240616Sjimharris * In order to cope with hardware architectures which make virtual to 110240616Sjimharris * physical map invalidates expensive, this module may delay invalidate 111240616Sjimharris * reduced protection operations until such time as they are actually 112240616Sjimharris * necessary. This module is given full information as to which processors 113240616Sjimharris * are currently using which maps, and to when physical maps must be made 114240616Sjimharris * correct. 115240616Sjimharris */ 116240616Sjimharris 117240616Sjimharris#include "opt_kstack_pages.h" 118240616Sjimharris 119240616Sjimharris#include <sys/param.h> 120240616Sjimharris#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <sys/kdb.h> 131 132#include <dev/ofw/openfirm.h> 133 134#include <vm/vm.h> 135#include <vm/vm_param.h> 136#include <vm/vm_kern.h> 137#include <vm/vm_page.h> 138#include <vm/vm_map.h> 139#include <vm/vm_object.h> 140#include <vm/vm_extern.h> 141#include <vm/vm_pageout.h> 142#include <vm/vm_pager.h> 143#include <vm/uma.h> 144 145#include <machine/_inttypes.h> 146#include <machine/cpu.h> 147#include <machine/platform.h> 148#include <machine/frame.h> 149#include <machine/md_var.h> 150#include <machine/psl.h> 151#include <machine/bat.h> 152#include <machine/hid.h> 153#include <machine/pte.h> 154#include <machine/sr.h> 155#include <machine/trap.h> 156#include <machine/mmuvar.h> 157 158#include "mmu_oea64.h" 159#include "mmu_if.h" 160#include "moea64_if.h" 161 162void moea64_release_vsid(uint64_t vsid); 163uintptr_t moea64_get_unique_vsid(void); 164 165#define DISABLE_TRANS(msr) msr = mfmsr(); mtmsr(msr & ~PSL_DR); isync() 166#define ENABLE_TRANS(msr) mtmsr(msr); isync() 167 168#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 169#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 170#define VSID_HASH_MASK 0x0000007fffffffffULL 171 172#define MOEA_PVO_CHECK(pvo) 173 174#define LOCK_TABLE() mtx_lock(&moea64_table_mutex) 175#define UNLOCK_TABLE() mtx_unlock(&moea64_table_mutex); 176#define ASSERT_TABLE_LOCK() mtx_assert(&moea64_table_mutex, MA_OWNED) 177 178struct ofw_map { 179 cell_t om_va; 180 cell_t om_len; 181 cell_t om_pa_hi; 182 cell_t om_pa_lo; 183 cell_t om_mode; 184}; 185 186/* 187 * Map of physical memory regions. 188 */ 189static struct mem_region *regions; 190static struct mem_region *pregions; 191static u_int phys_avail_count; 192static int regions_sz, pregions_sz; 193 194extern void bs_remap_earlyboot(void); 195 196/* 197 * Lock for the pteg and pvo tables. 198 */ 199struct mtx moea64_table_mutex; 200struct mtx moea64_slb_mutex; 201 202/* 203 * PTEG data. 204 */ 205u_int moea64_pteg_count; 206u_int moea64_pteg_mask; 207 208/* 209 * PVO data. 210 */ 211struct pvo_head *moea64_pvo_table; /* pvo entries by pteg index */ 212struct pvo_head moea64_pvo_kunmanaged = /* list of unmanaged pages */ 213 LIST_HEAD_INITIALIZER(moea64_pvo_kunmanaged); 214 215uma_zone_t moea64_upvo_zone; /* zone for pvo entries for unmanaged pages */ 216uma_zone_t moea64_mpvo_zone; /* zone for pvo entries for managed pages */ 217 218#define BPVO_POOL_SIZE 327680 219static struct pvo_entry *moea64_bpvo_pool; 220static int moea64_bpvo_pool_index = 0; 221 222#define VSID_NBPW (sizeof(u_int32_t) * 8) 223#ifdef __powerpc64__ 224#define NVSIDS (NPMAPS * 16) 225#define VSID_HASHMASK 0xffffffffUL 226#else 227#define NVSIDS NPMAPS 228#define VSID_HASHMASK 0xfffffUL 229#endif 230static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; 231 232static boolean_t moea64_initialized = FALSE; 233 234/* 235 * Statistics. 236 */ 237u_int moea64_pte_valid = 0; 238u_int moea64_pte_overflow = 0; 239u_int moea64_pvo_entries = 0; 240u_int moea64_pvo_enter_calls = 0; 241u_int moea64_pvo_remove_calls = 0; 242SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_valid, CTLFLAG_RD, 243 &moea64_pte_valid, 0, ""); 244SYSCTL_INT(_machdep, OID_AUTO, moea64_pte_overflow, CTLFLAG_RD, 245 &moea64_pte_overflow, 0, ""); 246SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_entries, CTLFLAG_RD, 247 &moea64_pvo_entries, 0, ""); 248SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_enter_calls, CTLFLAG_RD, 249 &moea64_pvo_enter_calls, 0, ""); 250SYSCTL_INT(_machdep, OID_AUTO, moea64_pvo_remove_calls, CTLFLAG_RD, 251 &moea64_pvo_remove_calls, 0, ""); 252 253vm_offset_t moea64_scratchpage_va[2]; 254struct pvo_entry *moea64_scratchpage_pvo[2]; 255uintptr_t moea64_scratchpage_pte[2]; 256struct mtx moea64_scratchpage_mtx; 257 258uint64_t moea64_large_page_mask = 0; 259int moea64_large_page_size = 0; 260int moea64_large_page_shift = 0; 261 262/* 263 * PVO calls. 264 */ 265static int moea64_pvo_enter(mmu_t, pmap_t, uma_zone_t, struct pvo_head *, 266 vm_offset_t, vm_offset_t, uint64_t, int); 267static void moea64_pvo_remove(mmu_t, struct pvo_entry *); 268static struct pvo_entry *moea64_pvo_find_va(pmap_t, vm_offset_t); 269 270/* 271 * Utility routines. 272 */ 273static void moea64_enter_locked(mmu_t, pmap_t, vm_offset_t, 274 vm_page_t, vm_prot_t, boolean_t); 275static boolean_t moea64_query_bit(mmu_t, vm_page_t, u_int64_t); 276static u_int moea64_clear_bit(mmu_t, vm_page_t, u_int64_t); 277static void moea64_kremove(mmu_t, vm_offset_t); 278static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va, 279 vm_offset_t pa, vm_size_t sz); 280 281/* 282 * Kernel MMU interface 283 */ 284void moea64_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 285void moea64_clear_modify(mmu_t, vm_page_t); 286void moea64_clear_reference(mmu_t, vm_page_t); 287void moea64_copy_page(mmu_t, vm_page_t, vm_page_t); 288void moea64_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t); 289void moea64_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_page_t, 290 vm_prot_t); 291void moea64_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t); 292vm_paddr_t moea64_extract(mmu_t, pmap_t, vm_offset_t); 293vm_page_t moea64_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t); 294void moea64_init(mmu_t); 295boolean_t moea64_is_modified(mmu_t, vm_page_t); 296boolean_t moea64_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 297boolean_t moea64_is_referenced(mmu_t, vm_page_t); 298boolean_t moea64_ts_referenced(mmu_t, vm_page_t); 299vm_offset_t moea64_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int); 300boolean_t moea64_page_exists_quick(mmu_t, pmap_t, vm_page_t); 301int moea64_page_wired_mappings(mmu_t, vm_page_t); 302void moea64_pinit(mmu_t, pmap_t); 303void moea64_pinit0(mmu_t, pmap_t); 304void moea64_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t); 305void moea64_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 306void moea64_qremove(mmu_t, vm_offset_t, int); 307void moea64_release(mmu_t, pmap_t); 308void moea64_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 309void moea64_remove_all(mmu_t, vm_page_t); 310void moea64_remove_write(mmu_t, vm_page_t); 311void moea64_zero_page(mmu_t, vm_page_t); 312void moea64_zero_page_area(mmu_t, vm_page_t, int, int); 313void moea64_zero_page_idle(mmu_t, vm_page_t); 314void moea64_activate(mmu_t, struct thread *); 315void moea64_deactivate(mmu_t, struct thread *); 316void *moea64_mapdev(mmu_t, vm_offset_t, vm_size_t); 317void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t); 318void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t); 319vm_offset_t moea64_kextract(mmu_t, vm_offset_t); 320void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma); 321void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma); 322void moea64_kenter(mmu_t, vm_offset_t, vm_offset_t); 323boolean_t moea64_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t); 324static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t); 325 326static mmu_method_t moea64_methods[] = { 327 MMUMETHOD(mmu_change_wiring, moea64_change_wiring), 328 MMUMETHOD(mmu_clear_modify, moea64_clear_modify), 329 MMUMETHOD(mmu_clear_reference, moea64_clear_reference), 330 MMUMETHOD(mmu_copy_page, moea64_copy_page), 331 MMUMETHOD(mmu_enter, moea64_enter), 332 MMUMETHOD(mmu_enter_object, moea64_enter_object), 333 MMUMETHOD(mmu_enter_quick, moea64_enter_quick), 334 MMUMETHOD(mmu_extract, moea64_extract), 335 MMUMETHOD(mmu_extract_and_hold, moea64_extract_and_hold), 336 MMUMETHOD(mmu_init, moea64_init), 337 MMUMETHOD(mmu_is_modified, moea64_is_modified), 338 MMUMETHOD(mmu_is_prefaultable, moea64_is_prefaultable), 339 MMUMETHOD(mmu_is_referenced, moea64_is_referenced), 340 MMUMETHOD(mmu_ts_referenced, moea64_ts_referenced), 341 MMUMETHOD(mmu_map, moea64_map), 342 MMUMETHOD(mmu_page_exists_quick,moea64_page_exists_quick), 343 MMUMETHOD(mmu_page_wired_mappings,moea64_page_wired_mappings), 344 MMUMETHOD(mmu_pinit, moea64_pinit), 345 MMUMETHOD(mmu_pinit0, moea64_pinit0), 346 MMUMETHOD(mmu_protect, moea64_protect), 347 MMUMETHOD(mmu_qenter, moea64_qenter), 348 MMUMETHOD(mmu_qremove, moea64_qremove), 349 MMUMETHOD(mmu_release, moea64_release), 350 MMUMETHOD(mmu_remove, moea64_remove), 351 MMUMETHOD(mmu_remove_all, moea64_remove_all), 352 MMUMETHOD(mmu_remove_write, moea64_remove_write), 353 MMUMETHOD(mmu_sync_icache, moea64_sync_icache), 354 MMUMETHOD(mmu_zero_page, moea64_zero_page), 355 MMUMETHOD(mmu_zero_page_area, moea64_zero_page_area), 356 MMUMETHOD(mmu_zero_page_idle, moea64_zero_page_idle), 357 MMUMETHOD(mmu_activate, moea64_activate), 358 MMUMETHOD(mmu_deactivate, moea64_deactivate), 359 MMUMETHOD(mmu_page_set_memattr, moea64_page_set_memattr), 360 361 /* Internal interfaces */ 362 MMUMETHOD(mmu_mapdev, moea64_mapdev), 363 MMUMETHOD(mmu_mapdev_attr, moea64_mapdev_attr), 364 MMUMETHOD(mmu_unmapdev, moea64_unmapdev), 365 MMUMETHOD(mmu_kextract, moea64_kextract), 366 MMUMETHOD(mmu_kenter, moea64_kenter), 367 MMUMETHOD(mmu_kenter_attr, moea64_kenter_attr), 368 MMUMETHOD(mmu_dev_direct_mapped,moea64_dev_direct_mapped), 369 370 { 0, 0 } 371}; 372 373MMU_DEF(oea64_mmu, "mmu_oea64_base", moea64_methods, 0); 374 375static __inline u_int 376va_to_pteg(uint64_t vsid, vm_offset_t addr, int large) 377{ 378 uint64_t hash; 379 int shift; 380 381 shift = large ? moea64_large_page_shift : ADDR_PIDX_SHFT; 382 hash = (vsid & VSID_HASH_MASK) ^ (((uint64_t)addr & ADDR_PIDX) >> 383 shift); 384 return (hash & moea64_pteg_mask); 385} 386 387static __inline struct pvo_head * 388vm_page_to_pvoh(vm_page_t m) 389{ 390 391 return (&m->md.mdpg_pvoh); 392} 393 394static __inline void 395moea64_attr_clear(vm_page_t m, u_int64_t ptebit) 396{ 397 398 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 399 m->md.mdpg_attrs &= ~ptebit; 400} 401 402static __inline u_int64_t 403moea64_attr_fetch(vm_page_t m) 404{ 405 406 return (m->md.mdpg_attrs); 407} 408 409static __inline void 410moea64_attr_save(vm_page_t m, u_int64_t ptebit) 411{ 412 413 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 414 m->md.mdpg_attrs |= ptebit; 415} 416 417static __inline void 418moea64_pte_create(struct lpte *pt, uint64_t vsid, vm_offset_t va, 419 uint64_t pte_lo, int flags) 420{ 421 422 ASSERT_TABLE_LOCK(); 423 424 /* 425 * Construct a PTE. Default to IMB initially. Valid bit only gets 426 * set when the real pte is set in memory. 427 * 428 * Note: Don't set the valid bit for correct operation of tlb update. 429 */ 430 pt->pte_hi = (vsid << LPTE_VSID_SHIFT) | 431 (((uint64_t)(va & ADDR_PIDX) >> ADDR_API_SHFT64) & LPTE_API); 432 433 if (flags & PVO_LARGE) 434 pt->pte_hi |= LPTE_BIG; 435 436 pt->pte_lo = pte_lo; 437} 438 439static __inline uint64_t 440moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 441{ 442 uint64_t pte_lo; 443 int i; 444 445 if (ma != VM_MEMATTR_DEFAULT) { 446 switch (ma) { 447 case VM_MEMATTR_UNCACHEABLE: 448 return (LPTE_I | LPTE_G); 449 case VM_MEMATTR_WRITE_COMBINING: 450 case VM_MEMATTR_WRITE_BACK: 451 case VM_MEMATTR_PREFETCHABLE: 452 return (LPTE_I); 453 case VM_MEMATTR_WRITE_THROUGH: 454 return (LPTE_W | LPTE_M); 455 } 456 } 457 458 /* 459 * Assume the page is cache inhibited and access is guarded unless 460 * it's in our available memory array. 461 */ 462 pte_lo = LPTE_I | LPTE_G; 463 for (i = 0; i < pregions_sz; i++) { 464 if ((pa >= pregions[i].mr_start) && 465 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 466 pte_lo &= ~(LPTE_I | LPTE_G); 467 pte_lo |= LPTE_M; 468 break; 469 } 470 } 471 472 return pte_lo; 473} 474 475/* 476 * Quick sort callout for comparing memory regions. 477 */ 478static int mr_cmp(const void *a, const void *b); 479static int om_cmp(const void *a, const void *b); 480 481static int 482mr_cmp(const void *a, const void *b) 483{ 484 const struct mem_region *regiona; 485 const struct mem_region *regionb; 486 487 regiona = a; 488 regionb = b; 489 if (regiona->mr_start < regionb->mr_start) 490 return (-1); 491 else if (regiona->mr_start > regionb->mr_start) 492 return (1); 493 else 494 return (0); 495} 496 497static int 498om_cmp(const void *a, const void *b) 499{ 500 const struct ofw_map *mapa; 501 const struct ofw_map *mapb; 502 503 mapa = a; 504 mapb = b; 505 if (mapa->om_pa_hi < mapb->om_pa_hi) 506 return (-1); 507 else if (mapa->om_pa_hi > mapb->om_pa_hi) 508 return (1); 509 else if (mapa->om_pa_lo < mapb->om_pa_lo) 510 return (-1); 511 else if (mapa->om_pa_lo > mapb->om_pa_lo) 512 return (1); 513 else 514 return (0); 515} 516 517static void 518moea64_add_ofw_mappings(mmu_t mmup, phandle_t mmu, size_t sz) 519{ 520 struct ofw_map translations[sz/sizeof(struct ofw_map)]; 521 register_t msr; 522 vm_offset_t off; 523 vm_paddr_t pa_base; 524 int i, ofw_mappings; 525 526 bzero(translations, sz); 527 if (OF_getprop(mmu, "translations", translations, sz) == -1) 528 panic("moea64_bootstrap: can't get ofw translations"); 529 530 CTR0(KTR_PMAP, "moea64_add_ofw_mappings: translations"); 531 sz /= sizeof(*translations); 532 qsort(translations, sz, sizeof (*translations), om_cmp); 533 534 for (i = 0, ofw_mappings = 0; i < sz; i++) { 535 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 536 (uint32_t)(translations[i].om_pa_lo), translations[i].om_va, 537 translations[i].om_len); 538 539 if (translations[i].om_pa_lo % PAGE_SIZE) 540 panic("OFW translation not page-aligned!"); 541 542 pa_base = translations[i].om_pa_lo; 543 544 #ifdef __powerpc64__ 545 pa_base += (vm_offset_t)translations[i].om_pa_hi << 32; 546 #else 547 if (translations[i].om_pa_hi) 548 panic("OFW translations above 32-bit boundary!"); 549 #endif 550 551 /* Now enter the pages for this mapping */ 552 553 DISABLE_TRANS(msr); 554 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 555 if (moea64_pvo_find_va(kernel_pmap, 556 translations[i].om_va + off) != NULL) 557 continue; 558 559 moea64_kenter(mmup, translations[i].om_va + off, 560 pa_base + off); 561 562 ofw_mappings++; 563 } 564 ENABLE_TRANS(msr); 565 } 566} 567 568#ifdef __powerpc64__ 569static void 570moea64_probe_large_page(void) 571{ 572 uint16_t pvr = mfpvr() >> 16; 573 574 switch (pvr) { 575 case IBM970: 576 case IBM970FX: 577 case IBM970MP: 578 powerpc_sync(); isync(); 579 mtspr(SPR_HID4, mfspr(SPR_HID4) & ~HID4_970_DISABLE_LG_PG); 580 powerpc_sync(); isync(); 581 582 /* FALLTHROUGH */ 583 case IBMCELLBE: 584 moea64_large_page_size = 0x1000000; /* 16 MB */ 585 moea64_large_page_shift = 24; 586 break; 587 default: 588 moea64_large_page_size = 0; 589 } 590 591 moea64_large_page_mask = moea64_large_page_size - 1; 592} 593 594static void 595moea64_bootstrap_slb_prefault(vm_offset_t va, int large) 596{ 597 struct slb *cache; 598 struct slb entry; 599 uint64_t esid, slbe; 600 uint64_t i; 601 602 cache = PCPU_GET(slb); 603 esid = va >> ADDR_SR_SHFT; 604 slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; 605 606 for (i = 0; i < 64; i++) { 607 if (cache[i].slbe == (slbe | i)) 608 return; 609 } 610 611 entry.slbe = slbe; 612 entry.slbv = KERNEL_VSID(esid) << SLBV_VSID_SHIFT; 613 if (large) 614 entry.slbv |= SLBV_L; 615 616 slb_insert_kernel(entry.slbe, entry.slbv); 617} 618#endif 619 620static void 621moea64_setup_direct_map(mmu_t mmup, vm_offset_t kernelstart, 622 vm_offset_t kernelend) 623{ 624 register_t msr; 625 vm_paddr_t pa; 626 vm_offset_t size, off; 627 uint64_t pte_lo; 628 int i; 629 630 if (moea64_large_page_size == 0) 631 hw_direct_map = 0; 632 633 DISABLE_TRANS(msr); 634 if (hw_direct_map) { 635 PMAP_LOCK(kernel_pmap); 636 for (i = 0; i < pregions_sz; i++) { 637 for (pa = pregions[i].mr_start; pa < pregions[i].mr_start + 638 pregions[i].mr_size; pa += moea64_large_page_size) { 639 pte_lo = LPTE_M; 640 641 /* 642 * Set memory access as guarded if prefetch within 643 * the page could exit the available physmem area. 644 */ 645 if (pa & moea64_large_page_mask) { 646 pa &= moea64_large_page_mask; 647 pte_lo |= LPTE_G; 648 } 649 if (pa + moea64_large_page_size > 650 pregions[i].mr_start + pregions[i].mr_size) 651 pte_lo |= LPTE_G; 652 653 moea64_pvo_enter(mmup, kernel_pmap, moea64_upvo_zone, 654 &moea64_pvo_kunmanaged, pa, pa, 655 pte_lo, PVO_WIRED | PVO_LARGE | 656 VM_PROT_EXECUTE); 657 } 658 } 659 PMAP_UNLOCK(kernel_pmap); 660 } else { 661 size = sizeof(struct pvo_head) * moea64_pteg_count; 662 off = (vm_offset_t)(moea64_pvo_table); 663 for (pa = off; pa < off + size; pa += PAGE_SIZE) 664 moea64_kenter(mmup, pa, pa); 665 size = BPVO_POOL_SIZE*sizeof(struct pvo_entry); 666 off = (vm_offset_t)(moea64_bpvo_pool); 667 for (pa = off; pa < off + size; pa += PAGE_SIZE) 668 moea64_kenter(mmup, pa, pa); 669 670 /* 671 * Map certain important things, like ourselves. 672 * 673 * NOTE: We do not map the exception vector space. That code is 674 * used only in real mode, and leaving it unmapped allows us to 675 * catch NULL pointer deferences, instead of making NULL a valid 676 * address. 677 */ 678 679 for (pa = kernelstart & ~PAGE_MASK; pa < kernelend; 680 pa += PAGE_SIZE) 681 moea64_kenter(mmup, pa, pa); 682 } 683 ENABLE_TRANS(msr); 684} 685 686void 687moea64_early_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 688{ 689 int i, j; 690 vm_size_t physsz, hwphyssz; 691 692#ifndef __powerpc64__ 693 /* We don't have a direct map since there is no BAT */ 694 hw_direct_map = 0; 695 696 /* Make sure battable is zero, since we have no BAT */ 697 for (i = 0; i < 16; i++) { 698 battable[i].batu = 0; 699 battable[i].batl = 0; 700 } 701#else 702 moea64_probe_large_page(); 703 704 /* Use a direct map if we have large page support */ 705 if (moea64_large_page_size > 0) 706 hw_direct_map = 1; 707 else 708 hw_direct_map = 0; 709#endif 710 711 /* Get physical memory regions from firmware */ 712 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 713 CTR0(KTR_PMAP, "moea64_bootstrap: physical memory"); 714 715 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 716 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 717 panic("moea64_bootstrap: phys_avail too small"); 718 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 719 phys_avail_count = 0; 720 physsz = 0; 721 hwphyssz = 0; 722 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 723 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 724 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 725 regions[i].mr_start + regions[i].mr_size, 726 regions[i].mr_size); 727 if (hwphyssz != 0 && 728 (physsz + regions[i].mr_size) >= hwphyssz) { 729 if (physsz < hwphyssz) { 730 phys_avail[j] = regions[i].mr_start; 731 phys_avail[j + 1] = regions[i].mr_start + 732 hwphyssz - physsz; 733 physsz = hwphyssz; 734 phys_avail_count++; 735 } 736 break; 737 } 738 phys_avail[j] = regions[i].mr_start; 739 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 740 phys_avail_count++; 741 physsz += regions[i].mr_size; 742 } 743 744 /* Check for overlap with the kernel and exception vectors */ 745 for (j = 0; j < 2*phys_avail_count; j+=2) { 746 if (phys_avail[j] < EXC_LAST) 747 phys_avail[j] += EXC_LAST; 748 749 if (kernelstart >= phys_avail[j] && 750 kernelstart < phys_avail[j+1]) { 751 if (kernelend < phys_avail[j+1]) { 752 phys_avail[2*phys_avail_count] = 753 (kernelend & ~PAGE_MASK) + PAGE_SIZE; 754 phys_avail[2*phys_avail_count + 1] = 755 phys_avail[j+1]; 756 phys_avail_count++; 757 } 758 759 phys_avail[j+1] = kernelstart & ~PAGE_MASK; 760 } 761 762 if (kernelend >= phys_avail[j] && 763 kernelend < phys_avail[j+1]) { 764 if (kernelstart > phys_avail[j]) { 765 phys_avail[2*phys_avail_count] = phys_avail[j]; 766 phys_avail[2*phys_avail_count + 1] = 767 kernelstart & ~PAGE_MASK; 768 phys_avail_count++; 769 } 770 771 phys_avail[j] = (kernelend & ~PAGE_MASK) + PAGE_SIZE; 772 } 773 } 774 775 physmem = btoc(physsz); 776 777#ifdef PTEGCOUNT 778 moea64_pteg_count = PTEGCOUNT; 779#else 780 moea64_pteg_count = 0x1000; 781 782 while (moea64_pteg_count < physmem) 783 moea64_pteg_count <<= 1; 784 785 moea64_pteg_count >>= 1; 786#endif /* PTEGCOUNT */ 787} 788 789void 790moea64_mid_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 791{ 792 vm_size_t size; 793 register_t msr; 794 int i; 795 796 /* 797 * Set PTEG mask 798 */ 799 moea64_pteg_mask = moea64_pteg_count - 1; 800 801 /* 802 * Allocate pv/overflow lists. 803 */ 804 size = sizeof(struct pvo_head) * moea64_pteg_count; 805 806 moea64_pvo_table = (struct pvo_head *)moea64_bootstrap_alloc(size, 807 PAGE_SIZE); 808 CTR1(KTR_PMAP, "moea64_bootstrap: PVO table at %p", moea64_pvo_table); 809 810 DISABLE_TRANS(msr); 811 for (i = 0; i < moea64_pteg_count; i++) 812 LIST_INIT(&moea64_pvo_table[i]); 813 ENABLE_TRANS(msr); 814 815 /* 816 * Initialize the lock that synchronizes access to the pteg and pvo 817 * tables. 818 */ 819 mtx_init(&moea64_table_mutex, "pmap table", NULL, MTX_DEF | 820 MTX_RECURSE); 821 mtx_init(&moea64_slb_mutex, "SLB table", NULL, MTX_DEF); 822 823 /* 824 * Initialise the unmanaged pvo pool. 825 */ 826 moea64_bpvo_pool = (struct pvo_entry *)moea64_bootstrap_alloc( 827 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 828 moea64_bpvo_pool_index = 0; 829 830 /* 831 * Make sure kernel vsid is allocated as well as VSID 0. 832 */ 833 #ifndef __powerpc64__ 834 moea64_vsid_bitmap[(KERNEL_VSIDBITS & (NVSIDS - 1)) / VSID_NBPW] 835 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 836 moea64_vsid_bitmap[0] |= 1; 837 #endif 838 839 /* 840 * Initialize the kernel pmap (which is statically allocated). 841 */ 842 #ifdef __powerpc64__ 843 for (i = 0; i < 64; i++) { 844 pcpup->pc_slb[i].slbv = 0; 845 pcpup->pc_slb[i].slbe = 0; 846 } 847 #else 848 for (i = 0; i < 16; i++) 849 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT + i; 850 #endif 851 852 kernel_pmap->pmap_phys = kernel_pmap; 853 kernel_pmap->pm_active = ~0; 854 855 PMAP_LOCK_INIT(kernel_pmap); 856 857 /* 858 * Now map in all the other buffers we allocated earlier 859 */ 860 861 moea64_setup_direct_map(mmup, kernelstart, kernelend); 862} 863 864void 865moea64_late_bootstrap(mmu_t mmup, vm_offset_t kernelstart, vm_offset_t kernelend) 866{ 867 ihandle_t mmui; 868 phandle_t chosen; 869 phandle_t mmu; 870 size_t sz; 871 int i; 872 vm_offset_t pa, va; 873 void *dpcpu; 874 875 /* 876 * Set up the Open Firmware pmap and add its mappings if not in real 877 * mode. 878 */ 879 880 chosen = OF_finddevice("/chosen"); 881 if (chosen != -1 && OF_getprop(chosen, "mmu", &mmui, 4) != -1) { 882 mmu = OF_instance_to_package(mmui); 883 if (mmu == -1 || (sz = OF_getproplen(mmu, "translations")) == -1) 884 sz = 0; 885 if (sz > 6144 /* tmpstksz - 2 KB headroom */) 886 panic("moea64_bootstrap: too many ofw translations"); 887 888 if (sz > 0) 889 moea64_add_ofw_mappings(mmup, mmu, sz); 890 } 891 892 /* 893 * Calculate the last available physical address. 894 */ 895 for (i = 0; phys_avail[i + 2] != 0; i += 2) 896 ; 897 Maxmem = powerpc_btop(phys_avail[i + 1]); 898 899 /* 900 * Initialize MMU and remap early physical mappings 901 */ 902 MMU_CPU_BOOTSTRAP(mmup,0); 903 mtmsr(mfmsr() | PSL_DR | PSL_IR); isync(); 904 pmap_bootstrapped++; 905 bs_remap_earlyboot(); 906 907 /* 908 * Set the start and end of kva. 909 */ 910 virtual_avail = VM_MIN_KERNEL_ADDRESS; 911 virtual_end = VM_MAX_SAFE_KERNEL_ADDRESS; 912 913 /* 914 * Map the entire KVA range into the SLB. We must not fault there. 915 */ 916 #ifdef __powerpc64__ 917 for (va = virtual_avail; va < virtual_end; va += SEGMENT_LENGTH) 918 moea64_bootstrap_slb_prefault(va, 0); 919 #endif 920 921 /* 922 * Figure out how far we can extend virtual_end into segment 16 923 * without running into existing mappings. Segment 16 is guaranteed 924 * to contain neither RAM nor devices (at least on Apple hardware), 925 * but will generally contain some OFW mappings we should not 926 * step on. 927 */ 928 929 #ifndef __powerpc64__ /* KVA is in high memory on PPC64 */ 930 PMAP_LOCK(kernel_pmap); 931 while (virtual_end < VM_MAX_KERNEL_ADDRESS && 932 moea64_pvo_find_va(kernel_pmap, virtual_end+1) == NULL) 933 virtual_end += PAGE_SIZE; 934 PMAP_UNLOCK(kernel_pmap); 935 #endif 936 937 /* 938 * Allocate a kernel stack with a guard page for thread0 and map it 939 * into the kernel page map. 940 */ 941 pa = moea64_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, PAGE_SIZE); 942 va = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 943 virtual_avail = va + KSTACK_PAGES * PAGE_SIZE; 944 CTR2(KTR_PMAP, "moea_bootstrap: kstack0 at %#x (%#x)", pa, va); 945 thread0.td_kstack = va; 946 thread0.td_kstack_pages = KSTACK_PAGES; 947 for (i = 0; i < KSTACK_PAGES; i++) { 948 moea64_kenter(mmup, va, pa); 949 pa += PAGE_SIZE; 950 va += PAGE_SIZE; 951 } 952 953 /* 954 * Allocate virtual address space for the message buffer. 955 */ 956 pa = msgbuf_phys = moea64_bootstrap_alloc(MSGBUF_SIZE, PAGE_SIZE); 957 msgbufp = (struct msgbuf *)virtual_avail; 958 va = virtual_avail; 959 virtual_avail += round_page(MSGBUF_SIZE); 960 while (va < virtual_avail) { 961 moea64_kenter(mmup, va, pa); 962 pa += PAGE_SIZE; 963 va += PAGE_SIZE; 964 } 965 966 /* 967 * Allocate virtual address space for the dynamic percpu area. 968 */ 969 pa = moea64_bootstrap_alloc(DPCPU_SIZE, PAGE_SIZE); 970 dpcpu = (void *)virtual_avail; 971 va = virtual_avail; 972 virtual_avail += DPCPU_SIZE; 973 while (va < virtual_avail) { 974 moea64_kenter(mmup, va, pa); 975 pa += PAGE_SIZE; 976 va += PAGE_SIZE; 977 } 978 dpcpu_init(dpcpu, 0); 979 980 /* 981 * Allocate some things for page zeroing. We put this directly 982 * in the page table, marked with LPTE_LOCKED, to avoid any 983 * of the PVO book-keeping or other parts of the VM system 984 * from even knowing that this hack exists. 985 */ 986 987 if (!hw_direct_map) { 988 mtx_init(&moea64_scratchpage_mtx, "pvo zero page", NULL, 989 MTX_DEF); 990 for (i = 0; i < 2; i++) { 991 moea64_scratchpage_va[i] = (virtual_end+1) - PAGE_SIZE; 992 virtual_end -= PAGE_SIZE; 993 994 moea64_kenter(mmup, moea64_scratchpage_va[i], 0); 995 996 moea64_scratchpage_pvo[i] = moea64_pvo_find_va( 997 kernel_pmap, (vm_offset_t)moea64_scratchpage_va[i]); 998 LOCK_TABLE(); 999 moea64_scratchpage_pte[i] = MOEA64_PVO_TO_PTE( 1000 mmup, moea64_scratchpage_pvo[i]); 1001 moea64_scratchpage_pvo[i]->pvo_pte.lpte.pte_hi 1002 |= LPTE_LOCKED; 1003 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[i], 1004 &moea64_scratchpage_pvo[i]->pvo_pte.lpte, 1005 moea64_scratchpage_pvo[i]->pvo_vpn); 1006 UNLOCK_TABLE(); 1007 } 1008 } 1009} 1010 1011/* 1012 * Activate a user pmap. The pmap must be activated before its address 1013 * space can be accessed in any way. 1014 */ 1015void 1016moea64_activate(mmu_t mmu, struct thread *td) 1017{ 1018 pmap_t pm; 1019 1020 pm = &td->td_proc->p_vmspace->vm_pmap; 1021 pm->pm_active |= PCPU_GET(cpumask); 1022 1023 #ifdef __powerpc64__ 1024 PCPU_SET(userslb, pm->pm_slb); 1025 #else 1026 PCPU_SET(curpmap, pm->pmap_phys); 1027 #endif 1028} 1029 1030void 1031moea64_deactivate(mmu_t mmu, struct thread *td) 1032{ 1033 pmap_t pm; 1034 1035 pm = &td->td_proc->p_vmspace->vm_pmap; 1036 pm->pm_active &= ~(PCPU_GET(cpumask)); 1037 #ifdef __powerpc64__ 1038 PCPU_SET(userslb, NULL); 1039 #else 1040 PCPU_SET(curpmap, NULL); 1041 #endif 1042} 1043 1044void 1045moea64_change_wiring(mmu_t mmu, pmap_t pm, vm_offset_t va, boolean_t wired) 1046{ 1047 struct pvo_entry *pvo; 1048 uintptr_t pt; 1049 uint64_t vsid; 1050 int i, ptegidx; 1051 1052 PMAP_LOCK(pm); 1053 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 1054 1055 if (pvo != NULL) { 1056 LOCK_TABLE(); 1057 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1058 1059 if (wired) { 1060 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 1061 pm->pm_stats.wired_count++; 1062 pvo->pvo_vaddr |= PVO_WIRED; 1063 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 1064 } else { 1065 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1066 pm->pm_stats.wired_count--; 1067 pvo->pvo_vaddr &= ~PVO_WIRED; 1068 pvo->pvo_pte.lpte.pte_hi &= ~LPTE_WIRED; 1069 } 1070 1071 if (pt != -1) { 1072 /* Update wiring flag in page table. */ 1073 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1074 pvo->pvo_vpn); 1075 } else if (wired) { 1076 /* 1077 * If we are wiring the page, and it wasn't in the 1078 * page table before, add it. 1079 */ 1080 vsid = PVO_VSID(pvo); 1081 ptegidx = va_to_pteg(vsid, PVO_VADDR(pvo), 1082 pvo->pvo_vaddr & PVO_LARGE); 1083 1084 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 1085 1086 if (i >= 0) { 1087 PVO_PTEGIDX_CLR(pvo); 1088 PVO_PTEGIDX_SET(pvo, i); 1089 } 1090 } 1091 1092 UNLOCK_TABLE(); 1093 } 1094 PMAP_UNLOCK(pm); 1095} 1096 1097/* 1098 * This goes through and sets the physical address of our 1099 * special scratch PTE to the PA we want to zero or copy. Because 1100 * of locking issues (this can get called in pvo_enter() by 1101 * the UMA allocator), we can't use most other utility functions here 1102 */ 1103 1104static __inline 1105void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) { 1106 1107 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!")); 1108 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED); 1109 1110 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo &= 1111 ~(LPTE_WIMG | LPTE_RPGN); 1112 moea64_scratchpage_pvo[which]->pvo_pte.lpte.pte_lo |= 1113 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa; 1114 MOEA64_PTE_CHANGE(mmup, moea64_scratchpage_pte[which], 1115 &moea64_scratchpage_pvo[which]->pvo_pte.lpte, 1116 moea64_scratchpage_pvo[which]->pvo_vpn); 1117 isync(); 1118} 1119 1120void 1121moea64_copy_page(mmu_t mmu, vm_page_t msrc, vm_page_t mdst) 1122{ 1123 vm_offset_t dst; 1124 vm_offset_t src; 1125 1126 dst = VM_PAGE_TO_PHYS(mdst); 1127 src = VM_PAGE_TO_PHYS(msrc); 1128 1129 if (hw_direct_map) { 1130 kcopy((void *)src, (void *)dst, PAGE_SIZE); 1131 } else { 1132 mtx_lock(&moea64_scratchpage_mtx); 1133 1134 moea64_set_scratchpage_pa(mmu, 0, src); 1135 moea64_set_scratchpage_pa(mmu, 1, dst); 1136 1137 kcopy((void *)moea64_scratchpage_va[0], 1138 (void *)moea64_scratchpage_va[1], PAGE_SIZE); 1139 1140 mtx_unlock(&moea64_scratchpage_mtx); 1141 } 1142} 1143 1144void 1145moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1146{ 1147 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1148 1149 if (!moea64_initialized) 1150 panic("moea64_zero_page: can't zero pa %#" PRIxPTR, pa); 1151 if (size + off > PAGE_SIZE) 1152 panic("moea64_zero_page: size + off > PAGE_SIZE"); 1153 1154 if (hw_direct_map) { 1155 bzero((caddr_t)pa + off, size); 1156 } else { 1157 mtx_lock(&moea64_scratchpage_mtx); 1158 moea64_set_scratchpage_pa(mmu, 0, pa); 1159 bzero((caddr_t)moea64_scratchpage_va[0] + off, size); 1160 mtx_unlock(&moea64_scratchpage_mtx); 1161 } 1162} 1163 1164/* 1165 * Zero a page of physical memory by temporarily mapping it 1166 */ 1167void 1168moea64_zero_page(mmu_t mmu, vm_page_t m) 1169{ 1170 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 1171 vm_offset_t va, off; 1172 1173 if (!moea64_initialized) 1174 panic("moea64_zero_page: can't zero pa %#zx", pa); 1175 1176 if (!hw_direct_map) { 1177 mtx_lock(&moea64_scratchpage_mtx); 1178 1179 moea64_set_scratchpage_pa(mmu, 0, pa); 1180 va = moea64_scratchpage_va[0]; 1181 } else { 1182 va = pa; 1183 } 1184 1185 for (off = 0; off < PAGE_SIZE; off += cacheline_size) 1186 __asm __volatile("dcbz 0,%0" :: "r"(va + off)); 1187 1188 if (!hw_direct_map) 1189 mtx_unlock(&moea64_scratchpage_mtx); 1190} 1191 1192void 1193moea64_zero_page_idle(mmu_t mmu, vm_page_t m) 1194{ 1195 1196 moea64_zero_page(mmu, m); 1197} 1198 1199/* 1200 * Map the given physical page at the specified virtual address in the 1201 * target pmap with the protection requested. If specified the page 1202 * will be wired down. 1203 */ 1204void 1205moea64_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1206 vm_prot_t prot, boolean_t wired) 1207{ 1208 1209 vm_page_lock_queues(); 1210 PMAP_LOCK(pmap); 1211 moea64_enter_locked(mmu, pmap, va, m, prot, wired); 1212 vm_page_unlock_queues(); 1213 PMAP_UNLOCK(pmap); 1214} 1215 1216/* 1217 * Map the given physical page at the specified virtual address in the 1218 * target pmap with the protection requested. If specified the page 1219 * will be wired down. 1220 * 1221 * The page queues and pmap must be locked. 1222 */ 1223 1224static void 1225moea64_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1226 vm_prot_t prot, boolean_t wired) 1227{ 1228 struct pvo_head *pvo_head; 1229 uma_zone_t zone; 1230 vm_page_t pg; 1231 uint64_t pte_lo; 1232 u_int pvo_flags; 1233 int error; 1234 1235 if (!moea64_initialized) { 1236 pvo_head = &moea64_pvo_kunmanaged; 1237 pg = NULL; 1238 zone = moea64_upvo_zone; 1239 pvo_flags = 0; 1240 } else { 1241 pvo_head = vm_page_to_pvoh(m); 1242 pg = m; 1243 zone = moea64_mpvo_zone; 1244 pvo_flags = PVO_MANAGED; 1245 } 1246 1247 if (pmap_bootstrapped) 1248 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1249 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1250 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1251 (m->oflags & VPO_BUSY) != 0 || VM_OBJECT_LOCKED(m->object), 1252 ("moea64_enter_locked: page %p is not busy", m)); 1253 1254 /* XXX change the pvo head for fake pages */ 1255 if ((m->flags & PG_FICTITIOUS) == PG_FICTITIOUS) { 1256 pvo_flags &= ~PVO_MANAGED; 1257 pvo_head = &moea64_pvo_kunmanaged; 1258 zone = moea64_upvo_zone; 1259 } 1260 1261 pte_lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), pmap_page_get_memattr(m)); 1262 1263 if (prot & VM_PROT_WRITE) { 1264 pte_lo |= LPTE_BW; 1265 if (pmap_bootstrapped && 1266 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 1267 vm_page_flag_set(m, PG_WRITEABLE); 1268 } else 1269 pte_lo |= LPTE_BR; 1270 1271 if (prot & VM_PROT_EXECUTE) 1272 pvo_flags |= VM_PROT_EXECUTE; 1273 1274 if (wired) 1275 pvo_flags |= PVO_WIRED; 1276 1277 if ((m->flags & PG_FICTITIOUS) != 0) 1278 pvo_flags |= PVO_FAKE; 1279 1280 error = moea64_pvo_enter(mmu, pmap, zone, pvo_head, va, 1281 VM_PAGE_TO_PHYS(m), pte_lo, pvo_flags); 1282 1283 /* 1284 * Flush the page from the instruction cache if this page is 1285 * mapped executable and cacheable. 1286 */ 1287 if ((pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) 1288 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1289} 1290 1291static void 1292moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa, 1293 vm_size_t sz) 1294{ 1295 1296 /* 1297 * This is much trickier than on older systems because 1298 * we can't sync the icache on physical addresses directly 1299 * without a direct map. Instead we check a couple of cases 1300 * where the memory is already mapped in and, failing that, 1301 * use the same trick we use for page zeroing to create 1302 * a temporary mapping for this physical address. 1303 */ 1304 1305 if (!pmap_bootstrapped) { 1306 /* 1307 * If PMAP is not bootstrapped, we are likely to be 1308 * in real mode. 1309 */ 1310 __syncicache((void *)pa, sz); 1311 } else if (pmap == kernel_pmap) { 1312 __syncicache((void *)va, sz); 1313 } else if (hw_direct_map) { 1314 __syncicache((void *)pa, sz); 1315 } else { 1316 /* Use the scratch page to set up a temp mapping */ 1317 1318 mtx_lock(&moea64_scratchpage_mtx); 1319 1320 moea64_set_scratchpage_pa(mmu, 1, pa & ~ADDR_POFF); 1321 __syncicache((void *)(moea64_scratchpage_va[1] + 1322 (va & ADDR_POFF)), sz); 1323 1324 mtx_unlock(&moea64_scratchpage_mtx); 1325 } 1326} 1327 1328/* 1329 * Maps a sequence of resident pages belonging to the same object. 1330 * The sequence begins with the given page m_start. This page is 1331 * mapped at the given virtual address start. Each subsequent page is 1332 * mapped at a virtual address that is offset from start by the same 1333 * amount as the page is offset from m_start within the object. The 1334 * last page in the sequence is the page with the largest offset from 1335 * m_start that can be mapped at a virtual address less than the given 1336 * virtual address end. Not every virtual page between start and end 1337 * is mapped; only those for which a resident page exists with the 1338 * corresponding offset from m_start are mapped. 1339 */ 1340void 1341moea64_enter_object(mmu_t mmu, pmap_t pm, vm_offset_t start, vm_offset_t end, 1342 vm_page_t m_start, vm_prot_t prot) 1343{ 1344 vm_page_t m; 1345 vm_pindex_t diff, psize; 1346 1347 psize = atop(end - start); 1348 m = m_start; 1349 vm_page_lock_queues(); 1350 PMAP_LOCK(pm); 1351 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1352 moea64_enter_locked(mmu, pm, start + ptoa(diff), m, prot & 1353 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1354 m = TAILQ_NEXT(m, listq); 1355 } 1356 vm_page_unlock_queues(); 1357 PMAP_UNLOCK(pm); 1358} 1359 1360void 1361moea64_enter_quick(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_page_t m, 1362 vm_prot_t prot) 1363{ 1364 1365 vm_page_lock_queues(); 1366 PMAP_LOCK(pm); 1367 moea64_enter_locked(mmu, pm, va, m, 1368 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1369 vm_page_unlock_queues(); 1370 PMAP_UNLOCK(pm); 1371} 1372 1373vm_paddr_t 1374moea64_extract(mmu_t mmu, pmap_t pm, vm_offset_t va) 1375{ 1376 struct pvo_entry *pvo; 1377 vm_paddr_t pa; 1378 1379 PMAP_LOCK(pm); 1380 pvo = moea64_pvo_find_va(pm, va); 1381 if (pvo == NULL) 1382 pa = 0; 1383 else 1384 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) | 1385 (va - PVO_VADDR(pvo)); 1386 PMAP_UNLOCK(pm); 1387 return (pa); 1388} 1389 1390/* 1391 * Atomically extract and hold the physical page with the given 1392 * pmap and virtual address pair if that mapping permits the given 1393 * protection. 1394 */ 1395vm_page_t 1396moea64_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1397{ 1398 struct pvo_entry *pvo; 1399 vm_page_t m; 1400 vm_paddr_t pa; 1401 1402 m = NULL; 1403 pa = 0; 1404 PMAP_LOCK(pmap); 1405retry: 1406 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1407 if (pvo != NULL && (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) && 1408 ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == LPTE_RW || 1409 (prot & VM_PROT_WRITE) == 0)) { 1410 if (vm_page_pa_tryrelock(pmap, 1411 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, &pa)) 1412 goto retry; 1413 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 1414 vm_page_hold(m); 1415 } 1416 PA_UNLOCK_COND(pa); 1417 PMAP_UNLOCK(pmap); 1418 return (m); 1419} 1420 1421static mmu_t installed_mmu; 1422 1423static void * 1424moea64_uma_page_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1425{ 1426 /* 1427 * This entire routine is a horrible hack to avoid bothering kmem 1428 * for new KVA addresses. Because this can get called from inside 1429 * kmem allocation routines, calling kmem for a new address here 1430 * can lead to multiply locking non-recursive mutexes. 1431 */ 1432 static vm_pindex_t color; 1433 vm_offset_t va; 1434 1435 vm_page_t m; 1436 int pflags, needed_lock; 1437 1438 *flags = UMA_SLAB_PRIV; 1439 needed_lock = !PMAP_LOCKED(kernel_pmap); 1440 1441 if (needed_lock) 1442 PMAP_LOCK(kernel_pmap); 1443 1444 if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) 1445 pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; 1446 else 1447 pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED; 1448 if (wait & M_ZERO) 1449 pflags |= VM_ALLOC_ZERO; 1450 1451 for (;;) { 1452 m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); 1453 if (m == NULL) { 1454 if (wait & M_NOWAIT) 1455 return (NULL); 1456 VM_WAIT; 1457 } else 1458 break; 1459 } 1460 1461 va = VM_PAGE_TO_PHYS(m); 1462 1463 moea64_pvo_enter(installed_mmu, kernel_pmap, moea64_upvo_zone, 1464 &moea64_pvo_kunmanaged, va, VM_PAGE_TO_PHYS(m), LPTE_M, 1465 PVO_WIRED | PVO_BOOTSTRAP); 1466 1467 if (needed_lock) 1468 PMAP_UNLOCK(kernel_pmap); 1469 1470 if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) 1471 bzero((void *)va, PAGE_SIZE); 1472 1473 return (void *)va; 1474} 1475 1476void 1477moea64_init(mmu_t mmu) 1478{ 1479 1480 CTR0(KTR_PMAP, "moea64_init"); 1481 1482 moea64_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1483 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1484 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1485 moea64_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1486 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1487 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1488 1489 if (!hw_direct_map) { 1490 installed_mmu = mmu; 1491 uma_zone_set_allocf(moea64_upvo_zone,moea64_uma_page_alloc); 1492 uma_zone_set_allocf(moea64_mpvo_zone,moea64_uma_page_alloc); 1493 } 1494 1495 moea64_initialized = TRUE; 1496} 1497 1498boolean_t 1499moea64_is_referenced(mmu_t mmu, vm_page_t m) 1500{ 1501 1502 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1503 ("moea64_is_referenced: page %p is not managed", m)); 1504 return (moea64_query_bit(mmu, m, PTE_REF)); 1505} 1506 1507boolean_t 1508moea64_is_modified(mmu_t mmu, vm_page_t m) 1509{ 1510 1511 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1512 ("moea64_is_modified: page %p is not managed", m)); 1513 1514 /* 1515 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 1516 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 1517 * is clear, no PTEs can have LPTE_CHG set. 1518 */ 1519 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1520 if ((m->oflags & VPO_BUSY) == 0 && 1521 (m->flags & PG_WRITEABLE) == 0) 1522 return (FALSE); 1523 return (moea64_query_bit(mmu, m, LPTE_CHG)); 1524} 1525 1526boolean_t 1527moea64_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1528{ 1529 struct pvo_entry *pvo; 1530 boolean_t rv; 1531 1532 PMAP_LOCK(pmap); 1533 pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); 1534 rv = pvo == NULL || (pvo->pvo_pte.lpte.pte_hi & LPTE_VALID) == 0; 1535 PMAP_UNLOCK(pmap); 1536 return (rv); 1537} 1538 1539void 1540moea64_clear_reference(mmu_t mmu, vm_page_t m) 1541{ 1542 1543 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1544 ("moea64_clear_reference: page %p is not managed", m)); 1545 moea64_clear_bit(mmu, m, LPTE_REF); 1546} 1547 1548void 1549moea64_clear_modify(mmu_t mmu, vm_page_t m) 1550{ 1551 1552 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1553 ("moea64_clear_modify: page %p is not managed", m)); 1554 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1555 KASSERT((m->oflags & VPO_BUSY) == 0, 1556 ("moea64_clear_modify: page %p is busy", m)); 1557 1558 /* 1559 * If the page is not PG_WRITEABLE, then no PTEs can have LPTE_CHG 1560 * set. If the object containing the page is locked and the page is 1561 * not VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 1562 */ 1563 if ((m->flags & PG_WRITEABLE) == 0) 1564 return; 1565 moea64_clear_bit(mmu, m, LPTE_CHG); 1566} 1567 1568/* 1569 * Clear the write and modified bits in each of the given page's mappings. 1570 */ 1571void 1572moea64_remove_write(mmu_t mmu, vm_page_t m) 1573{ 1574 struct pvo_entry *pvo; 1575 uintptr_t pt; 1576 pmap_t pmap; 1577 uint64_t lo; 1578 1579 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1580 ("moea64_remove_write: page %p is not managed", m)); 1581 1582 /* 1583 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 1584 * another thread while the object is locked. Thus, if PG_WRITEABLE 1585 * is clear, no page table entries need updating. 1586 */ 1587 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 1588 if ((m->oflags & VPO_BUSY) == 0 && 1589 (m->flags & PG_WRITEABLE) == 0) 1590 return; 1591 vm_page_lock_queues(); 1592 lo = moea64_attr_fetch(m); 1593 powerpc_sync(); 1594 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1595 pmap = pvo->pvo_pmap; 1596 PMAP_LOCK(pmap); 1597 LOCK_TABLE(); 1598 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_PP) != LPTE_BR) { 1599 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1600 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1601 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1602 if (pt != -1) { 1603 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 1604 lo |= pvo->pvo_pte.lpte.pte_lo; 1605 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_CHG; 1606 MOEA64_PTE_CHANGE(mmu, pt, 1607 &pvo->pvo_pte.lpte, pvo->pvo_vpn); 1608 if (pvo->pvo_pmap == kernel_pmap) 1609 isync(); 1610 } 1611 } 1612 UNLOCK_TABLE(); 1613 PMAP_UNLOCK(pmap); 1614 } 1615 if ((lo & LPTE_CHG) != 0) { 1616 moea64_attr_clear(m, LPTE_CHG); 1617 vm_page_dirty(m); 1618 } 1619 vm_page_flag_clear(m, PG_WRITEABLE); 1620 vm_page_unlock_queues(); 1621} 1622 1623/* 1624 * moea64_ts_referenced: 1625 * 1626 * Return a count of reference bits for a page, clearing those bits. 1627 * It is not necessary for every reference bit to be cleared, but it 1628 * is necessary that 0 only be returned when there are truly no 1629 * reference bits set. 1630 * 1631 * XXX: The exact number of bits to check and clear is a matter that 1632 * should be tested and standardized at some point in the future for 1633 * optimal aging of shared pages. 1634 */ 1635boolean_t 1636moea64_ts_referenced(mmu_t mmu, vm_page_t m) 1637{ 1638 1639 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1640 ("moea64_ts_referenced: page %p is not managed", m)); 1641 return (moea64_clear_bit(mmu, m, LPTE_REF)); 1642} 1643 1644/* 1645 * Modify the WIMG settings of all mappings for a page. 1646 */ 1647void 1648moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm_memattr_t ma) 1649{ 1650 struct pvo_entry *pvo; 1651 struct pvo_head *pvo_head; 1652 uintptr_t pt; 1653 pmap_t pmap; 1654 uint64_t lo; 1655 1656 if (m->flags & PG_FICTITIOUS) { 1657 m->md.mdpg_cache_attrs = ma; 1658 return; 1659 } 1660 1661 vm_page_lock_queues(); 1662 pvo_head = vm_page_to_pvoh(m); 1663 lo = moea64_calc_wimg(VM_PAGE_TO_PHYS(m), ma); 1664 LIST_FOREACH(pvo, pvo_head, pvo_vlink) { 1665 pmap = pvo->pvo_pmap; 1666 PMAP_LOCK(pmap); 1667 LOCK_TABLE(); 1668 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1669 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_WIMG; 1670 pvo->pvo_pte.lpte.pte_lo |= lo; 1671 if (pt != -1) { 1672 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1673 pvo->pvo_vpn); 1674 if (pvo->pvo_pmap == kernel_pmap) 1675 isync(); 1676 } 1677 UNLOCK_TABLE(); 1678 PMAP_UNLOCK(pmap); 1679 } 1680 m->md.mdpg_cache_attrs = ma; 1681 vm_page_unlock_queues(); 1682} 1683 1684/* 1685 * Map a wired page into kernel virtual address space. 1686 */ 1687void 1688moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma) 1689{ 1690 uint64_t pte_lo; 1691 int error; 1692 1693 pte_lo = moea64_calc_wimg(pa, ma); 1694 1695 PMAP_LOCK(kernel_pmap); 1696 error = moea64_pvo_enter(mmu, kernel_pmap, moea64_upvo_zone, 1697 &moea64_pvo_kunmanaged, va, pa, pte_lo, 1698 PVO_WIRED | VM_PROT_EXECUTE); 1699 1700 if (error != 0 && error != ENOENT) 1701 panic("moea64_kenter: failed to enter va %#zx pa %#zx: %d", va, 1702 pa, error); 1703 1704 /* 1705 * Flush the memory from the instruction cache. 1706 */ 1707 if ((pte_lo & (LPTE_I | LPTE_G)) == 0) 1708 __syncicache((void *)va, PAGE_SIZE); 1709 PMAP_UNLOCK(kernel_pmap); 1710} 1711 1712void 1713moea64_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1714{ 1715 1716 moea64_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1717} 1718 1719/* 1720 * Extract the physical page address associated with the given kernel virtual 1721 * address. 1722 */ 1723vm_offset_t 1724moea64_kextract(mmu_t mmu, vm_offset_t va) 1725{ 1726 struct pvo_entry *pvo; 1727 vm_paddr_t pa; 1728 1729 /* 1730 * Shortcut the direct-mapped case when applicable. We never put 1731 * anything but 1:1 mappings below VM_MIN_KERNEL_ADDRESS. 1732 */ 1733 if (va < VM_MIN_KERNEL_ADDRESS) 1734 return (va); 1735 1736 PMAP_LOCK(kernel_pmap); 1737 pvo = moea64_pvo_find_va(kernel_pmap, va); 1738 KASSERT(pvo != NULL, ("moea64_kextract: no addr found for %#" PRIxPTR, 1739 va)); 1740 pa = (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) + (va - PVO_VADDR(pvo)); 1741 PMAP_UNLOCK(kernel_pmap); 1742 return (pa); 1743} 1744 1745/* 1746 * Remove a wired page from kernel virtual address space. 1747 */ 1748void 1749moea64_kremove(mmu_t mmu, vm_offset_t va) 1750{ 1751 moea64_remove(mmu, kernel_pmap, va, va + PAGE_SIZE); 1752} 1753 1754/* 1755 * Map a range of physical addresses into kernel virtual address space. 1756 * 1757 * The value passed in *virt is a suggested virtual address for the mapping. 1758 * Architectures which can support a direct-mapped physical to virtual region 1759 * can return the appropriate address within that region, leaving '*virt' 1760 * unchanged. We cannot and therefore do not; *virt is updated with the 1761 * first usable address after the mapped region. 1762 */ 1763vm_offset_t 1764moea64_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1765 vm_offset_t pa_end, int prot) 1766{ 1767 vm_offset_t sva, va; 1768 1769 sva = *virt; 1770 va = sva; 1771 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1772 moea64_kenter(mmu, va, pa_start); 1773 *virt = va; 1774 1775 return (sva); 1776} 1777 1778/* 1779 * Returns true if the pmap's pv is one of the first 1780 * 16 pvs linked to from this page. This count may 1781 * be changed upwards or downwards in the future; it 1782 * is only necessary that true be returned for a small 1783 * subset of pmaps for proper page aging. 1784 */ 1785boolean_t 1786moea64_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 1787{ 1788 int loops; 1789 struct pvo_entry *pvo; 1790 boolean_t rv; 1791 1792 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 1793 ("moea64_page_exists_quick: page %p is not managed", m)); 1794 loops = 0; 1795 rv = FALSE; 1796 vm_page_lock_queues(); 1797 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1798 if (pvo->pvo_pmap == pmap) { 1799 rv = TRUE; 1800 break; 1801 } 1802 if (++loops >= 16) 1803 break; 1804 } 1805 vm_page_unlock_queues(); 1806 return (rv); 1807} 1808 1809/* 1810 * Return the number of managed mappings to the given physical page 1811 * that are wired. 1812 */ 1813int 1814moea64_page_wired_mappings(mmu_t mmu, vm_page_t m) 1815{ 1816 struct pvo_entry *pvo; 1817 int count; 1818 1819 count = 0; 1820 if ((m->flags & PG_FICTITIOUS) != 0) 1821 return (count); 1822 vm_page_lock_queues(); 1823 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) 1824 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 1825 count++; 1826 vm_page_unlock_queues(); 1827 return (count); 1828} 1829 1830static uintptr_t moea64_vsidcontext; 1831 1832uintptr_t 1833moea64_get_unique_vsid(void) { 1834 u_int entropy; 1835 register_t hash; 1836 uint32_t mask; 1837 int i; 1838 1839 entropy = 0; 1840 __asm __volatile("mftb %0" : "=r"(entropy)); 1841 1842 mtx_lock(&moea64_slb_mutex); 1843 for (i = 0; i < NVSIDS; i += VSID_NBPW) { 1844 u_int n; 1845 1846 /* 1847 * Create a new value by mutiplying by a prime and adding in 1848 * entropy from the timebase register. This is to make the 1849 * VSID more random so that the PT hash function collides 1850 * less often. (Note that the prime casues gcc to do shifts 1851 * instead of a multiply.) 1852 */ 1853 moea64_vsidcontext = (moea64_vsidcontext * 0x1105) + entropy; 1854 hash = moea64_vsidcontext & (NVSIDS - 1); 1855 if (hash == 0) /* 0 is special, avoid it */ 1856 continue; 1857 n = hash >> 5; 1858 mask = 1 << (hash & (VSID_NBPW - 1)); 1859 hash = (moea64_vsidcontext & VSID_HASHMASK); 1860 if (moea64_vsid_bitmap[n] & mask) { /* collision? */ 1861 /* anything free in this bucket? */ 1862 if (moea64_vsid_bitmap[n] == 0xffffffff) { 1863 entropy = (moea64_vsidcontext >> 20); 1864 continue; 1865 } 1866 i = ffs(~moea64_vsid_bitmap[n]) - 1; 1867 mask = 1 << i; 1868 hash &= VSID_HASHMASK & ~(VSID_NBPW - 1); 1869 hash |= i; 1870 } 1871 KASSERT(!(moea64_vsid_bitmap[n] & mask), 1872 ("Allocating in-use VSID %#zx\n", hash)); 1873 moea64_vsid_bitmap[n] |= mask; 1874 mtx_unlock(&moea64_slb_mutex); 1875 return (hash); 1876 } 1877 1878 mtx_unlock(&moea64_slb_mutex); 1879 panic("%s: out of segments",__func__); 1880} 1881 1882#ifdef __powerpc64__ 1883void 1884moea64_pinit(mmu_t mmu, pmap_t pmap) 1885{ 1886 PMAP_LOCK_INIT(pmap); 1887 1888 pmap->pm_slb_tree_root = slb_alloc_tree(); 1889 pmap->pm_slb = slb_alloc_user_cache(); 1890 pmap->pm_slb_len = 0; 1891} 1892#else 1893void 1894moea64_pinit(mmu_t mmu, pmap_t pmap) 1895{ 1896 int i; 1897 uint32_t hash; 1898 1899 PMAP_LOCK_INIT(pmap); 1900 1901 if (pmap_bootstrapped) 1902 pmap->pmap_phys = (pmap_t)moea64_kextract(mmu, 1903 (vm_offset_t)pmap); 1904 else 1905 pmap->pmap_phys = pmap; 1906 1907 /* 1908 * Allocate some segment registers for this pmap. 1909 */ 1910 hash = moea64_get_unique_vsid(); 1911 1912 for (i = 0; i < 16; i++) 1913 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1914 1915 KASSERT(pmap->pm_sr[0] != 0, ("moea64_pinit: pm_sr[0] = 0")); 1916} 1917#endif 1918 1919/* 1920 * Initialize the pmap associated with process 0. 1921 */ 1922void 1923moea64_pinit0(mmu_t mmu, pmap_t pm) 1924{ 1925 moea64_pinit(mmu, pm); 1926 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1927} 1928 1929/* 1930 * Set the physical protection on the specified range of this map as requested. 1931 */ 1932void 1933moea64_protect(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva, 1934 vm_prot_t prot) 1935{ 1936 struct pvo_entry *pvo; 1937 uintptr_t pt; 1938 1939 CTR4(KTR_PMAP, "moea64_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1940 eva, prot); 1941 1942 1943 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1944 ("moea64_protect: non current pmap")); 1945 1946 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1947 moea64_remove(mmu, pm, sva, eva); 1948 return; 1949 } 1950 1951 vm_page_lock_queues(); 1952 PMAP_LOCK(pm); 1953 for (; sva < eva; sva += PAGE_SIZE) { 1954 pvo = moea64_pvo_find_va(pm, sva); 1955 if (pvo == NULL) 1956 continue; 1957 1958 /* 1959 * Grab the PTE pointer before we diddle with the cached PTE 1960 * copy. 1961 */ 1962 LOCK_TABLE(); 1963 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 1964 1965 /* 1966 * Change the protection of the page. 1967 */ 1968 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_PP; 1969 pvo->pvo_pte.lpte.pte_lo |= LPTE_BR; 1970 pvo->pvo_pte.lpte.pte_lo &= ~LPTE_NOEXEC; 1971 if ((prot & VM_PROT_EXECUTE) == 0) 1972 pvo->pvo_pte.lpte.pte_lo |= LPTE_NOEXEC; 1973 1974 /* 1975 * If the PVO is in the page table, update that pte as well. 1976 */ 1977 if (pt != -1) { 1978 MOEA64_PTE_CHANGE(mmu, pt, &pvo->pvo_pte.lpte, 1979 pvo->pvo_vpn); 1980 if ((pvo->pvo_pte.lpte.pte_lo & 1981 (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) { 1982 moea64_syncicache(mmu, pm, sva, 1983 pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN, 1984 PAGE_SIZE); 1985 } 1986 } 1987 UNLOCK_TABLE(); 1988 } 1989 vm_page_unlock_queues(); 1990 PMAP_UNLOCK(pm); 1991} 1992 1993/* 1994 * Map a list of wired pages into kernel virtual address space. This is 1995 * intended for temporary mappings which do not need page modification or 1996 * references recorded. Existing mappings in the region are overwritten. 1997 */ 1998void 1999moea64_qenter(mmu_t mmu, vm_offset_t va, vm_page_t *m, int count) 2000{ 2001 while (count-- > 0) { 2002 moea64_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 2003 va += PAGE_SIZE; 2004 m++; 2005 } 2006} 2007 2008/* 2009 * Remove page mappings from kernel virtual address space. Intended for 2010 * temporary mappings entered by moea64_qenter. 2011 */ 2012void 2013moea64_qremove(mmu_t mmu, vm_offset_t va, int count) 2014{ 2015 while (count-- > 0) { 2016 moea64_kremove(mmu, va); 2017 va += PAGE_SIZE; 2018 } 2019} 2020 2021void 2022moea64_release_vsid(uint64_t vsid) 2023{ 2024 int idx, mask; 2025 2026 mtx_lock(&moea64_slb_mutex); 2027 idx = vsid & (NVSIDS-1); 2028 mask = 1 << (idx % VSID_NBPW); 2029 idx /= VSID_NBPW; 2030 KASSERT(moea64_vsid_bitmap[idx] & mask, 2031 ("Freeing unallocated VSID %#jx", vsid)); 2032 moea64_vsid_bitmap[idx] &= ~mask; 2033 mtx_unlock(&moea64_slb_mutex); 2034} 2035 2036 2037void 2038moea64_release(mmu_t mmu, pmap_t pmap) 2039{ 2040 2041 /* 2042 * Free segment registers' VSIDs 2043 */ 2044 #ifdef __powerpc64__ 2045 slb_free_tree(pmap); 2046 slb_free_user_cache(pmap->pm_slb); 2047 #else 2048 KASSERT(pmap->pm_sr[0] != 0, ("moea64_release: pm_sr[0] = 0")); 2049 2050 moea64_release_vsid(VSID_TO_HASH(pmap->pm_sr[0])); 2051 #endif 2052 2053 PMAP_LOCK_DESTROY(pmap); 2054} 2055 2056/* 2057 * Remove the given range of addresses from the specified map. 2058 */ 2059void 2060moea64_remove(mmu_t mmu, pmap_t pm, vm_offset_t sva, vm_offset_t eva) 2061{ 2062 struct pvo_entry *pvo; 2063 2064 vm_page_lock_queues(); 2065 PMAP_LOCK(pm); 2066 for (; sva < eva; sva += PAGE_SIZE) { 2067 pvo = moea64_pvo_find_va(pm, sva); 2068 if (pvo != NULL) 2069 moea64_pvo_remove(mmu, pvo); 2070 } 2071 vm_page_unlock_queues(); 2072 PMAP_UNLOCK(pm); 2073} 2074 2075/* 2076 * Remove physical page from all pmaps in which it resides. moea64_pvo_remove() 2077 * will reflect changes in pte's back to the vm_page. 2078 */ 2079void 2080moea64_remove_all(mmu_t mmu, vm_page_t m) 2081{ 2082 struct pvo_head *pvo_head; 2083 struct pvo_entry *pvo, *next_pvo; 2084 pmap_t pmap; 2085 2086 vm_page_lock_queues(); 2087 pvo_head = vm_page_to_pvoh(m); 2088 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 2089 next_pvo = LIST_NEXT(pvo, pvo_vlink); 2090 2091 MOEA_PVO_CHECK(pvo); /* sanity check */ 2092 pmap = pvo->pvo_pmap; 2093 PMAP_LOCK(pmap); 2094 moea64_pvo_remove(mmu, pvo); 2095 PMAP_UNLOCK(pmap); 2096 } 2097 if ((m->flags & PG_WRITEABLE) && moea64_is_modified(mmu, m)) { 2098 moea64_attr_clear(m, LPTE_CHG); 2099 vm_page_dirty(m); 2100 } 2101 vm_page_flag_clear(m, PG_WRITEABLE); 2102 vm_page_unlock_queues(); 2103} 2104 2105/* 2106 * Allocate a physical page of memory directly from the phys_avail map. 2107 * Can only be called from moea64_bootstrap before avail start and end are 2108 * calculated. 2109 */ 2110vm_offset_t 2111moea64_bootstrap_alloc(vm_size_t size, u_int align) 2112{ 2113 vm_offset_t s, e; 2114 int i, j; 2115 2116 size = round_page(size); 2117 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 2118 if (align != 0) 2119 s = (phys_avail[i] + align - 1) & ~(align - 1); 2120 else 2121 s = phys_avail[i]; 2122 e = s + size; 2123 2124 if (s < phys_avail[i] || e > phys_avail[i + 1]) 2125 continue; 2126 2127 if (s + size > platform_real_maxaddr()) 2128 continue; 2129 2130 if (s == phys_avail[i]) { 2131 phys_avail[i] += size; 2132 } else if (e == phys_avail[i + 1]) { 2133 phys_avail[i + 1] -= size; 2134 } else { 2135 for (j = phys_avail_count * 2; j > i; j -= 2) { 2136 phys_avail[j] = phys_avail[j - 2]; 2137 phys_avail[j + 1] = phys_avail[j - 1]; 2138 } 2139 2140 phys_avail[i + 3] = phys_avail[i + 1]; 2141 phys_avail[i + 1] = s; 2142 phys_avail[i + 2] = e; 2143 phys_avail_count++; 2144 } 2145 2146 return (s); 2147 } 2148 panic("moea64_bootstrap_alloc: could not allocate memory"); 2149} 2150 2151static int 2152moea64_pvo_enter(mmu_t mmu, pmap_t pm, uma_zone_t zone, 2153 struct pvo_head *pvo_head, vm_offset_t va, vm_offset_t pa, 2154 uint64_t pte_lo, int flags) 2155{ 2156 struct pvo_entry *pvo; 2157 uint64_t vsid; 2158 int first; 2159 u_int ptegidx; 2160 int i; 2161 int bootstrap; 2162 2163 /* 2164 * One nasty thing that can happen here is that the UMA calls to 2165 * allocate new PVOs need to map more memory, which calls pvo_enter(), 2166 * which calls UMA... 2167 * 2168 * We break the loop by detecting recursion and allocating out of 2169 * the bootstrap pool. 2170 */ 2171 2172 first = 0; 2173 bootstrap = (flags & PVO_BOOTSTRAP); 2174 2175 if (!moea64_initialized) 2176 bootstrap = 1; 2177 2178 /* 2179 * Compute the PTE Group index. 2180 */ 2181 va &= ~ADDR_POFF; 2182 vsid = va_to_vsid(pm, va); 2183 ptegidx = va_to_pteg(vsid, va, flags & PVO_LARGE); 2184 2185 /* 2186 * Remove any existing mapping for this page. Reuse the pvo entry if 2187 * there is a mapping. 2188 */ 2189 LOCK_TABLE(); 2190 2191 moea64_pvo_enter_calls++; 2192 2193 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2194 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 2195 if ((pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) == pa && 2196 (pvo->pvo_pte.lpte.pte_lo & LPTE_PP) == 2197 (pte_lo & LPTE_PP)) { 2198 if (!(pvo->pvo_pte.lpte.pte_hi & LPTE_VALID)) { 2199 /* Re-insert if spilled */ 2200 i = MOEA64_PTE_INSERT(mmu, ptegidx, 2201 &pvo->pvo_pte.lpte); 2202 if (i >= 0) 2203 PVO_PTEGIDX_SET(pvo, i); 2204 moea64_pte_overflow--; 2205 } 2206 UNLOCK_TABLE(); 2207 return (0); 2208 } 2209 moea64_pvo_remove(mmu, pvo); 2210 break; 2211 } 2212 } 2213 2214 /* 2215 * If we aren't overwriting a mapping, try to allocate. 2216 */ 2217 if (bootstrap) { 2218 if (moea64_bpvo_pool_index >= BPVO_POOL_SIZE) { 2219 panic("moea64_enter: bpvo pool exhausted, %d, %d, %zd", 2220 moea64_bpvo_pool_index, BPVO_POOL_SIZE, 2221 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 2222 } 2223 pvo = &moea64_bpvo_pool[moea64_bpvo_pool_index]; 2224 moea64_bpvo_pool_index++; 2225 bootstrap = 1; 2226 } else { 2227 /* 2228 * Note: drop the table lock around the UMA allocation in 2229 * case the UMA allocator needs to manipulate the page 2230 * table. The mapping we are working with is already 2231 * protected by the PMAP lock. 2232 */ 2233 UNLOCK_TABLE(); 2234 pvo = uma_zalloc(zone, M_NOWAIT); 2235 LOCK_TABLE(); 2236 } 2237 2238 if (pvo == NULL) { 2239 UNLOCK_TABLE(); 2240 return (ENOMEM); 2241 } 2242 2243 moea64_pvo_entries++; 2244 pvo->pvo_vaddr = va; 2245 pvo->pvo_vpn = (uint64_t)((va & ADDR_PIDX) >> ADDR_PIDX_SHFT) 2246 | (vsid << 16); 2247 pvo->pvo_pmap = pm; 2248 LIST_INSERT_HEAD(&moea64_pvo_table[ptegidx], pvo, pvo_olink); 2249 pvo->pvo_vaddr &= ~ADDR_POFF; 2250 2251 if (!(flags & VM_PROT_EXECUTE)) 2252 pte_lo |= LPTE_NOEXEC; 2253 if (flags & PVO_WIRED) 2254 pvo->pvo_vaddr |= PVO_WIRED; 2255 if (pvo_head != &moea64_pvo_kunmanaged) 2256 pvo->pvo_vaddr |= PVO_MANAGED; 2257 if (bootstrap) 2258 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 2259 if (flags & PVO_FAKE) 2260 pvo->pvo_vaddr |= PVO_FAKE; 2261 if (flags & PVO_LARGE) 2262 pvo->pvo_vaddr |= PVO_LARGE; 2263 2264 moea64_pte_create(&pvo->pvo_pte.lpte, vsid, va, 2265 (uint64_t)(pa) | pte_lo, flags); 2266 2267 /* 2268 * Remember if the list was empty and therefore will be the first 2269 * item. 2270 */ 2271 if (LIST_FIRST(pvo_head) == NULL) 2272 first = 1; 2273 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 2274 2275 if (pvo->pvo_vaddr & PVO_WIRED) { 2276 pvo->pvo_pte.lpte.pte_hi |= LPTE_WIRED; 2277 pm->pm_stats.wired_count++; 2278 } 2279 pm->pm_stats.resident_count++; 2280 2281 /* 2282 * We hope this succeeds but it isn't required. 2283 */ 2284 i = MOEA64_PTE_INSERT(mmu, ptegidx, &pvo->pvo_pte.lpte); 2285 if (i >= 0) { 2286 PVO_PTEGIDX_SET(pvo, i); 2287 } else { 2288 panic("moea64_pvo_enter: overflow"); 2289 moea64_pte_overflow++; 2290 } 2291 2292 if (pm == kernel_pmap) 2293 isync(); 2294 2295 UNLOCK_TABLE(); 2296 2297#ifdef __powerpc64__ 2298 /* 2299 * Make sure all our bootstrap mappings are in the SLB as soon 2300 * as virtual memory is switched on. 2301 */ 2302 if (!pmap_bootstrapped) 2303 moea64_bootstrap_slb_prefault(va, flags & PVO_LARGE); 2304#endif 2305 2306 return (first ? ENOENT : 0); 2307} 2308 2309static void 2310moea64_pvo_remove(mmu_t mmu, struct pvo_entry *pvo) 2311{ 2312 uintptr_t pt; 2313 2314 /* 2315 * If there is an active pte entry, we need to deactivate it (and 2316 * save the ref & cfg bits). 2317 */ 2318 LOCK_TABLE(); 2319 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2320 if (pt != -1) { 2321 MOEA64_PTE_UNSET(mmu, pt, &pvo->pvo_pte.lpte, pvo->pvo_vpn); 2322 PVO_PTEGIDX_CLR(pvo); 2323 } else { 2324 moea64_pte_overflow--; 2325 } 2326 2327 /* 2328 * Update our statistics. 2329 */ 2330 pvo->pvo_pmap->pm_stats.resident_count--; 2331 if (pvo->pvo_vaddr & PVO_WIRED) 2332 pvo->pvo_pmap->pm_stats.wired_count--; 2333 2334 /* 2335 * Save the REF/CHG bits into their cache if the page is managed. 2336 */ 2337 if ((pvo->pvo_vaddr & (PVO_MANAGED|PVO_FAKE)) == PVO_MANAGED) { 2338 struct vm_page *pg; 2339 2340 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN); 2341 if (pg != NULL) { 2342 moea64_attr_save(pg, pvo->pvo_pte.lpte.pte_lo & 2343 (LPTE_REF | LPTE_CHG)); 2344 } 2345 } 2346 2347 /* 2348 * Remove this PVO from the PV list. 2349 */ 2350 LIST_REMOVE(pvo, pvo_vlink); 2351 2352 /* 2353 * Remove this from the overflow list and return it to the pool 2354 * if we aren't going to reuse it. 2355 */ 2356 LIST_REMOVE(pvo, pvo_olink); 2357 2358 moea64_pvo_entries--; 2359 moea64_pvo_remove_calls++; 2360 2361 UNLOCK_TABLE(); 2362 2363 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 2364 uma_zfree((pvo->pvo_vaddr & PVO_MANAGED) ? moea64_mpvo_zone : 2365 moea64_upvo_zone, pvo); 2366} 2367 2368static struct pvo_entry * 2369moea64_pvo_find_va(pmap_t pm, vm_offset_t va) 2370{ 2371 struct pvo_entry *pvo; 2372 int ptegidx; 2373 uint64_t vsid; 2374 #ifdef __powerpc64__ 2375 uint64_t slbv; 2376 2377 if (pm == kernel_pmap) { 2378 slbv = kernel_va_to_slbv(va); 2379 } else { 2380 struct slb *slb; 2381 slb = user_va_to_slb_entry(pm, va); 2382 /* The page is not mapped if the segment isn't */ 2383 if (slb == NULL) 2384 return NULL; 2385 slbv = slb->slbv; 2386 } 2387 2388 vsid = (slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT; 2389 if (slbv & SLBV_L) 2390 va &= ~moea64_large_page_mask; 2391 else 2392 va &= ~ADDR_POFF; 2393 ptegidx = va_to_pteg(vsid, va, slbv & SLBV_L); 2394 #else 2395 va &= ~ADDR_POFF; 2396 vsid = va_to_vsid(pm, va); 2397 ptegidx = va_to_pteg(vsid, va, 0); 2398 #endif 2399 2400 LOCK_TABLE(); 2401 LIST_FOREACH(pvo, &moea64_pvo_table[ptegidx], pvo_olink) { 2402 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) 2403 break; 2404 } 2405 UNLOCK_TABLE(); 2406 2407 return (pvo); 2408} 2409 2410static boolean_t 2411moea64_query_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2412{ 2413 struct pvo_entry *pvo; 2414 uintptr_t pt; 2415 2416 if (moea64_attr_fetch(m) & ptebit) 2417 return (TRUE); 2418 2419 vm_page_lock_queues(); 2420 2421 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2422 MOEA_PVO_CHECK(pvo); /* sanity check */ 2423 2424 /* 2425 * See if we saved the bit off. If so, cache it and return 2426 * success. 2427 */ 2428 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2429 moea64_attr_save(m, ptebit); 2430 MOEA_PVO_CHECK(pvo); /* sanity check */ 2431 vm_page_unlock_queues(); 2432 return (TRUE); 2433 } 2434 } 2435 2436 /* 2437 * No luck, now go through the hard part of looking at the PTEs 2438 * themselves. Sync so that any pending REF/CHG bits are flushed to 2439 * the PTEs. 2440 */ 2441 powerpc_sync(); 2442 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2443 MOEA_PVO_CHECK(pvo); /* sanity check */ 2444 2445 /* 2446 * See if this pvo has a valid PTE. if so, fetch the 2447 * REF/CHG bits from the valid PTE. If the appropriate 2448 * ptebit is set, cache it and return success. 2449 */ 2450 LOCK_TABLE(); 2451 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2452 if (pt != -1) { 2453 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2454 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2455 UNLOCK_TABLE(); 2456 2457 moea64_attr_save(m, ptebit); 2458 MOEA_PVO_CHECK(pvo); /* sanity check */ 2459 vm_page_unlock_queues(); 2460 return (TRUE); 2461 } 2462 } 2463 UNLOCK_TABLE(); 2464 } 2465 2466 vm_page_unlock_queues(); 2467 return (FALSE); 2468} 2469 2470static u_int 2471moea64_clear_bit(mmu_t mmu, vm_page_t m, u_int64_t ptebit) 2472{ 2473 u_int count; 2474 struct pvo_entry *pvo; 2475 uintptr_t pt; 2476 2477 vm_page_lock_queues(); 2478 2479 /* 2480 * Clear the cached value. 2481 */ 2482 moea64_attr_clear(m, ptebit); 2483 2484 /* 2485 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2486 * we can reset the right ones). note that since the pvo entries and 2487 * list heads are accessed via BAT0 and are never placed in the page 2488 * table, we don't have to worry about further accesses setting the 2489 * REF/CHG bits. 2490 */ 2491 powerpc_sync(); 2492 2493 /* 2494 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2495 * valid pte clear the ptebit from the valid pte. 2496 */ 2497 count = 0; 2498 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2499 MOEA_PVO_CHECK(pvo); /* sanity check */ 2500 2501 LOCK_TABLE(); 2502 pt = MOEA64_PVO_TO_PTE(mmu, pvo); 2503 if (pt != -1) { 2504 MOEA64_PTE_SYNCH(mmu, pt, &pvo->pvo_pte.lpte); 2505 if (pvo->pvo_pte.lpte.pte_lo & ptebit) { 2506 count++; 2507 MOEA64_PTE_CLEAR(mmu, pt, &pvo->pvo_pte.lpte, 2508 pvo->pvo_vpn, ptebit); 2509 } 2510 } 2511 pvo->pvo_pte.lpte.pte_lo &= ~ptebit; 2512 MOEA_PVO_CHECK(pvo); /* sanity check */ 2513 UNLOCK_TABLE(); 2514 } 2515 2516 vm_page_unlock_queues(); 2517 return (count); 2518} 2519 2520boolean_t 2521moea64_dev_direct_mapped(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2522{ 2523 struct pvo_entry *pvo; 2524 vm_offset_t ppa; 2525 int error = 0; 2526 2527 PMAP_LOCK(kernel_pmap); 2528 for (ppa = pa & ~ADDR_POFF; ppa < pa + size; ppa += PAGE_SIZE) { 2529 pvo = moea64_pvo_find_va(kernel_pmap, ppa); 2530 if (pvo == NULL || 2531 (pvo->pvo_pte.lpte.pte_lo & LPTE_RPGN) != ppa) { 2532 error = EFAULT; 2533 break; 2534 } 2535 } 2536 PMAP_UNLOCK(kernel_pmap); 2537 2538 return (error); 2539} 2540 2541/* 2542 * Map a set of physical memory pages into the kernel virtual 2543 * address space. Return a pointer to where it is mapped. This 2544 * routine is intended to be used for mapping device memory, 2545 * NOT real memory. 2546 */ 2547void * 2548moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma) 2549{ 2550 vm_offset_t va, tmpva, ppa, offset; 2551 2552 ppa = trunc_page(pa); 2553 offset = pa & PAGE_MASK; 2554 size = roundup(offset + size, PAGE_SIZE); 2555 2556 va = kmem_alloc_nofault(kernel_map, size); 2557 2558 if (!va) 2559 panic("moea64_mapdev: Couldn't alloc kernel virtual memory"); 2560 2561 for (tmpva = va; size > 0;) { 2562 moea64_kenter_attr(mmu, tmpva, ppa, ma); 2563 size -= PAGE_SIZE; 2564 tmpva += PAGE_SIZE; 2565 ppa += PAGE_SIZE; 2566 } 2567 2568 return ((void *)(va + offset)); 2569} 2570 2571void * 2572moea64_mapdev(mmu_t mmu, vm_offset_t pa, vm_size_t size) 2573{ 2574 2575 return moea64_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT); 2576} 2577 2578void 2579moea64_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2580{ 2581 vm_offset_t base, offset; 2582 2583 base = trunc_page(va); 2584 offset = va & PAGE_MASK; 2585 size = roundup(offset + size, PAGE_SIZE); 2586 2587 kmem_free(kernel_map, base, size); 2588} 2589 2590void 2591moea64_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2592{ 2593 struct pvo_entry *pvo; 2594 vm_offset_t lim; 2595 vm_paddr_t pa; 2596 vm_size_t len; 2597 2598 PMAP_LOCK(pm); 2599 while (sz > 0) { 2600 lim = round_page(va); 2601 len = MIN(lim - va, sz); 2602 pvo = moea64_pvo_find_va(pm, va & ~ADDR_POFF); 2603 if (pvo != NULL) { 2604 pa = (pvo->pvo_pte.pte.pte_lo & LPTE_RPGN) | 2605 (va & ADDR_POFF); 2606 moea64_syncicache(mmu, pm, va, pa, len); 2607 } 2608 va += len; 2609 sz -= len; 2610 } 2611 PMAP_UNLOCK(pm); 2612} 2613