mmu_oea.c revision 92521
1/* 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/* 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/* 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#ifndef lint 94static const char rcsid[] = 95 "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 92521 2002-03-17 23:58:12Z benno $"; 96#endif /* not lint */ 97 98/* 99 * Manages physical address maps. 100 * 101 * In addition to hardware address maps, this module is called upon to 102 * provide software-use-only maps which may or may not be stored in the 103 * same form as hardware maps. These pseudo-maps are used to store 104 * intermediate results from copy operations to and from address spaces. 105 * 106 * Since the information managed by this module is also stored by the 107 * logical address mapping module, this module may throw away valid virtual 108 * to physical mappings at almost any time. However, invalidations of 109 * mappings must be done as requested. 110 * 111 * In order to cope with hardware architectures which make virtual to 112 * physical map invalidates expensive, this module may delay invalidate 113 * reduced protection operations until such time as they are actually 114 * necessary. This module is given full information as to which processors 115 * are currently using which maps, and to when physical maps must be made 116 * correct. 117 */ 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <dev/ofw/openfirm.h> 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/vm_zone.h> 142 143#include <machine/bat.h> 144#include <machine/frame.h> 145#include <machine/md_var.h> 146#include <machine/psl.h> 147#include <machine/pte.h> 148#include <machine/sr.h> 149 150#define PMAP_DEBUG 151 152#define TODO panic("%s: not implemented", __func__); 153 154#define PMAP_LOCK(pm) 155#define PMAP_UNLOCK(pm) 156 157#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 158#define TLBSYNC() __asm __volatile("tlbsync"); 159#define SYNC() __asm __volatile("sync"); 160#define EIEIO() __asm __volatile("eieio"); 161 162#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 163#define VSID_TO_SR(vsid) ((vsid) & 0xf) 164#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 165 166#define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 167#define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 168#define PVO_WIRED 0x0010 /* PVO entry is wired */ 169#define PVO_MANAGED 0x0020 /* PVO entry is managed */ 170#define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 171#define PVO_BOOTSTRAP 0x0004 /* PVO entry allocated during 172 bootstrap */ 173#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 174#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 175#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 176#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 177#define PVO_PTEGIDX_CLR(pvo) \ 178 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 179#define PVO_PTEGIDX_SET(pvo, i) \ 180 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 181 182#define PMAP_PVO_CHECK(pvo) 183 184struct mem_region { 185 vm_offset_t mr_start; 186 vm_offset_t mr_size; 187}; 188 189struct ofw_map { 190 vm_offset_t om_va; 191 vm_size_t om_len; 192 vm_offset_t om_pa; 193 u_int om_mode; 194}; 195 196int pmap_bootstrapped = 0; 197 198/* 199 * Virtual and physical address of message buffer. 200 */ 201struct msgbuf *msgbufp; 202vm_offset_t msgbuf_phys; 203 204/* 205 * Physical addresses of first and last available physical page. 206 */ 207vm_offset_t avail_start; 208vm_offset_t avail_end; 209 210/* 211 * Map of physical memory regions. 212 */ 213vm_offset_t phys_avail[128]; 214u_int phys_avail_count; 215static struct mem_region regions[128]; 216static struct ofw_map translations[128]; 217static int translations_size; 218 219/* 220 * First and last available kernel virtual addresses. 221 */ 222vm_offset_t virtual_avail; 223vm_offset_t virtual_end; 224vm_offset_t kernel_vm_end; 225 226/* 227 * Kernel pmap. 228 */ 229struct pmap kernel_pmap_store; 230extern struct pmap ofw_pmap; 231 232/* 233 * PTEG data. 234 */ 235static struct pteg *pmap_pteg_table; 236u_int pmap_pteg_count; 237u_int pmap_pteg_mask; 238 239/* 240 * PVO data. 241 */ 242struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 243struct pvo_head pmap_pvo_kunmanaged = 244 LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 245struct pvo_head pmap_pvo_unmanaged = 246 LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 247 248vm_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 249vm_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 250struct vm_object pmap_upvo_zone_obj; 251struct vm_object pmap_mpvo_zone_obj; 252 253#define PMAP_PVO_SIZE 1024 254static struct pvo_entry *pmap_bpvo_pool; 255static int pmap_bpvo_pool_index; 256static int pmap_bpvo_pool_count; 257 258#define VSID_NBPW (sizeof(u_int32_t) * 8) 259static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 260 261static boolean_t pmap_initialized = FALSE; 262 263/* 264 * Statistics. 265 */ 266u_int pmap_pte_valid = 0; 267u_int pmap_pte_overflow = 0; 268u_int pmap_pte_replacements = 0; 269u_int pmap_pvo_entries = 0; 270u_int pmap_pvo_enter_calls = 0; 271u_int pmap_pvo_remove_calls = 0; 272u_int pmap_pte_spills = 0; 273SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 274 0, ""); 275SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 276 &pmap_pte_overflow, 0, ""); 277SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 278 &pmap_pte_replacements, 0, ""); 279SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 280 0, ""); 281SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 282 &pmap_pvo_enter_calls, 0, ""); 283SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 284 &pmap_pvo_remove_calls, 0, ""); 285SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 286 &pmap_pte_spills, 0, ""); 287 288struct pvo_entry *pmap_pvo_zeropage; 289 290vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 291u_int pmap_rkva_count = 4; 292 293/* 294 * Allocate physical memory for use in pmap_bootstrap. 295 */ 296static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 297 298/* 299 * PTE calls. 300 */ 301static int pmap_pte_insert(u_int, struct pte *); 302 303/* 304 * PVO calls. 305 */ 306static int pmap_pvo_enter(pmap_t, vm_zone_t, struct pvo_head *, 307 vm_offset_t, vm_offset_t, u_int, int); 308static void pmap_pvo_remove(struct pvo_entry *, int); 309static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 310static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 311 312/* 313 * Utility routines. 314 */ 315static struct pvo_entry *pmap_rkva_alloc(void); 316static void pmap_pa_map(struct pvo_entry *, vm_offset_t, 317 struct pte *, int *); 318static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 319static void pmap_syncicache(vm_offset_t, vm_size_t); 320static boolean_t pmap_query_bit(vm_page_t, int); 321static boolean_t pmap_clear_bit(vm_page_t, int); 322static void tlbia(void); 323 324static __inline int 325va_to_sr(u_int *sr, vm_offset_t va) 326{ 327 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 328} 329 330static __inline u_int 331va_to_pteg(u_int sr, vm_offset_t addr) 332{ 333 u_int hash; 334 335 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 336 ADDR_PIDX_SHFT); 337 return (hash & pmap_pteg_mask); 338} 339 340static __inline struct pvo_head * 341pa_to_pvoh(vm_offset_t pa) 342{ 343 struct vm_page *pg; 344 345 pg = PHYS_TO_VM_PAGE(pa); 346 347 if (pg == NULL) 348 return (&pmap_pvo_unmanaged); 349 350 return (&pg->md.mdpg_pvoh); 351} 352 353static __inline struct pvo_head * 354vm_page_to_pvoh(vm_page_t m) 355{ 356 357 return (&m->md.mdpg_pvoh); 358} 359 360static __inline void 361pmap_attr_clear(vm_page_t m, int ptebit) 362{ 363 364 m->md.mdpg_attrs &= ~ptebit; 365} 366 367static __inline int 368pmap_attr_fetch(vm_page_t m) 369{ 370 371 return (m->md.mdpg_attrs); 372} 373 374static __inline void 375pmap_attr_save(vm_page_t m, int ptebit) 376{ 377 378 m->md.mdpg_attrs |= ptebit; 379} 380 381static __inline int 382pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 383{ 384 if (pt->pte_hi == pvo_pt->pte_hi) 385 return (1); 386 387 return (0); 388} 389 390static __inline int 391pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 392{ 393 return (pt->pte_hi & ~PTE_VALID) == 394 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 395 ((va >> ADDR_API_SHFT) & PTE_API) | which); 396} 397 398static __inline void 399pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 400{ 401 /* 402 * Construct a PTE. Default to IMB initially. Valid bit only gets 403 * set when the real pte is set in memory. 404 * 405 * Note: Don't set the valid bit for correct operation of tlb update. 406 */ 407 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 408 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 409 pt->pte_lo = pte_lo; 410} 411 412static __inline void 413pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 414{ 415 416 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 417} 418 419static __inline void 420pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 421{ 422 423 /* 424 * As shown in Section 7.6.3.2.3 425 */ 426 pt->pte_lo &= ~ptebit; 427 TLBIE(va); 428 EIEIO(); 429 TLBSYNC(); 430 SYNC(); 431} 432 433static __inline void 434pmap_pte_set(struct pte *pt, struct pte *pvo_pt) 435{ 436 437 pvo_pt->pte_hi |= PTE_VALID; 438 439 /* 440 * Update the PTE as defined in section 7.6.3.1. 441 * Note that the REF/CHG bits are from pvo_pt and thus should havce 442 * been saved so this routine can restore them (if desired). 443 */ 444 pt->pte_lo = pvo_pt->pte_lo; 445 EIEIO(); 446 pt->pte_hi = pvo_pt->pte_hi; 447 SYNC(); 448 pmap_pte_valid++; 449} 450 451static __inline void 452pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 453{ 454 455 pvo_pt->pte_hi &= ~PTE_VALID; 456 457 /* 458 * Force the reg & chg bits back into the PTEs. 459 */ 460 SYNC(); 461 462 /* 463 * Invalidate the pte. 464 */ 465 pt->pte_hi &= ~PTE_VALID; 466 467 SYNC(); 468 TLBIE(va); 469 EIEIO(); 470 TLBSYNC(); 471 SYNC(); 472 473 /* 474 * Save the reg & chg bits. 475 */ 476 pmap_pte_synch(pt, pvo_pt); 477 pmap_pte_valid--; 478} 479 480static __inline void 481pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 482{ 483 484 /* 485 * Invalidate the PTE 486 */ 487 pmap_pte_unset(pt, pvo_pt, va); 488 pmap_pte_set(pt, pvo_pt); 489} 490 491/* 492 * Quick sort callout for comparing memory regions. 493 */ 494static int mr_cmp(const void *a, const void *b); 495static int om_cmp(const void *a, const void *b); 496 497static int 498mr_cmp(const void *a, const void *b) 499{ 500 const struct mem_region *regiona; 501 const struct mem_region *regionb; 502 503 regiona = a; 504 regionb = b; 505 if (regiona->mr_start < regionb->mr_start) 506 return (-1); 507 else if (regiona->mr_start > regionb->mr_start) 508 return (1); 509 else 510 return (0); 511} 512 513static int 514om_cmp(const void *a, const void *b) 515{ 516 const struct ofw_map *mapa; 517 const struct ofw_map *mapb; 518 519 mapa = a; 520 mapb = b; 521 if (mapa->om_pa < mapb->om_pa) 522 return (-1); 523 else if (mapa->om_pa > mapb->om_pa) 524 return (1); 525 else 526 return (0); 527} 528 529void 530pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 531{ 532 ihandle_t pmem, mmui; 533 phandle_t chosen, mmu; 534 int sz; 535 int i, j; 536 vm_size_t size, physsz; 537 vm_offset_t pa, va, off; 538 u_int batl, batu; 539 540 /* 541 * Use an IBAT and a DBAT to map the bottom segment of memory 542 * where we are. 543 */ 544 batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 545 batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 546 __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" 547 :: "r"(batu), "r"(batl)); 548#if 0 549 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 550 batl = BATL(0x80000000, BAT_M, BAT_PP_RW); 551 __asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1" 552 :: "r"(batu), "r"(batl)); 553#endif 554 555 /* 556 * Set the start and end of kva. 557 */ 558 virtual_avail = VM_MIN_KERNEL_ADDRESS; 559 virtual_end = VM_MAX_KERNEL_ADDRESS; 560 561 if ((pmem = OF_finddevice("/memory")) == -1) 562 panic("pmap_bootstrap: can't locate memory device"); 563 if ((sz = OF_getproplen(pmem, "available")) == -1) 564 panic("pmap_bootstrap: can't get length of available memory"); 565 if (sizeof(phys_avail) < sz) 566 panic("pmap_bootstrap: phys_avail too small"); 567 if (sizeof(regions) < sz) 568 panic("pmap_bootstrap: regions too small"); 569 bzero(regions, sz); 570 if (OF_getprop(pmem, "available", regions, sz) == -1) 571 panic("pmap_bootstrap: can't get available memory"); 572 sz /= sizeof(*regions); 573 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 574 qsort(regions, sz, sizeof(*regions), mr_cmp); 575 phys_avail_count = 0; 576 physsz = 0; 577 for (i = 0, j = 0; i < sz; i++, j += 2) { 578 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 579 regions[i].mr_start + regions[i].mr_size, 580 regions[i].mr_size); 581 phys_avail[j] = regions[i].mr_start; 582 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 583 phys_avail_count++; 584 physsz += regions[i].mr_size; 585 } 586 physmem = btoc(physsz); 587 588 /* 589 * Allocate PTEG table. 590 */ 591#ifdef PTEGCOUNT 592 pmap_pteg_count = PTEGCOUNT; 593#else 594 pmap_pteg_count = 0x1000; 595 596 while (pmap_pteg_count < physmem) 597 pmap_pteg_count <<= 1; 598 599 pmap_pteg_count >>= 1; 600#endif /* PTEGCOUNT */ 601 602 size = pmap_pteg_count * sizeof(struct pteg); 603 CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 604 size); 605 pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 606 CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 607 bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 608 pmap_pteg_mask = pmap_pteg_count - 1; 609 610 /* 611 * Allocate PTE overflow lists. 612 */ 613 size = sizeof(struct pvo_head) * pmap_pteg_count; 614 pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 615 PAGE_SIZE); 616 CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 617 for (i = 0; i < pmap_pteg_count; i++) 618 LIST_INIT(&pmap_pvo_table[i]); 619 620 /* 621 * Allocate the message buffer. 622 */ 623 msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 624 625 /* 626 * Initialise the unmanaged pvo pool. 627 */ 628 pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(PAGE_SIZE, 0); 629 pmap_bpvo_pool_index = 0; 630 pmap_bpvo_pool_count = (int)PAGE_SIZE / sizeof(struct pvo_entry); 631 632 /* 633 * Make sure kernel vsid is allocated as well as VSID 0. 634 */ 635 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 636 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 637 pmap_vsid_bitmap[0] |= 1; 638 639 /* 640 * Set up the OpenFirmware pmap and add it's mappings. 641 */ 642 pmap_pinit(&ofw_pmap); 643 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 644 if ((chosen = OF_finddevice("/chosen")) == -1) 645 panic("pmap_bootstrap: can't find /chosen"); 646 OF_getprop(chosen, "mmu", &mmui, 4); 647 if ((mmu = OF_instance_to_package(mmui)) == -1) 648 panic("pmap_bootstrap: can't get mmu package"); 649 if ((sz = OF_getproplen(mmu, "translations")) == -1) 650 panic("pmap_bootstrap: can't get ofw translation count"); 651 if (sizeof(translations) < sz) 652 panic("pmap_bootstrap: translations too small"); 653 bzero(translations, sz); 654 if (OF_getprop(mmu, "translations", translations, sz) == -1) 655 panic("pmap_bootstrap: can't get ofw translations"); 656 CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 657 qsort(translations, sz, sizeof (*translations), om_cmp); 658 for (i = 0; i < sz; i++) { 659 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 660 translations[i].om_pa, translations[i].om_va, 661 translations[i].om_len); 662 663 /* Drop stuff below something? */ 664 665 /* Enter the pages? */ 666 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 667 struct vm_page m; 668 669 m.phys_addr = translations[i].om_pa + off; 670 pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 671 VM_PROT_ALL, 1); 672 } 673 } 674#ifdef SMP 675 TLBSYNC(); 676#endif 677 678 /* 679 * Initialize the kernel pmap (which is statically allocated). 680 */ 681 for (i = 0; i < 16; i++) { 682 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 683 } 684 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 685 kernel_pmap->pm_active = ~0; 686 kernel_pmap->pm_count = 1; 687 688 /* 689 * Allocate a kernel stack with a guard page for thread0 and map it 690 * into the kernel page map. 691 */ 692 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 693 kstack0_phys = pa; 694 kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 695 CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 696 kstack0); 697 virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 698 for (i = 0; i < KSTACK_PAGES; i++) { 699 pa = kstack0_phys + i * PAGE_SIZE; 700 va = kstack0 + i * PAGE_SIZE; 701 pmap_kenter(va, pa); 702 TLBIE(va); 703 } 704 705 /* 706 * Calculate the first and last available physical addresses. 707 */ 708 avail_start = phys_avail[0]; 709 for (i = 0; phys_avail[i + 2] != 0; i += 2) 710 ; 711 avail_end = phys_avail[i + 1]; 712 Maxmem = powerpc_btop(avail_end); 713 714 /* 715 * Allocate virtual address space for the message buffer. 716 */ 717 msgbufp = (struct msgbuf *)virtual_avail; 718 virtual_avail += round_page(MSGBUF_SIZE); 719 720 /* 721 * Initialize hardware. 722 */ 723 for (i = 0; i < 16; i++) { 724 __asm __volatile("mtsrin %0,%1" 725 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT)); 726 } 727 __asm __volatile ("mtsr %0,%1" 728 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 729 __asm __volatile ("sync; mtsdr1 %0; isync" 730 :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 731 tlbia(); 732 733 pmap_bootstrapped++; 734} 735 736/* 737 * Activate a user pmap. The pmap must be activated before it's address 738 * space can be accessed in any way. 739 */ 740void 741pmap_activate(struct thread *td) 742{ 743 pmap_t pm; 744 int i; 745 746 /* 747 * Load all the data we need up front to encourasge the compiler to 748 * not issue any loads while we have interrupts disabled below. 749 */ 750 pm = &td->td_proc->p_vmspace->vm_pmap; 751 752 KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); 753 754 pm->pm_active |= PCPU_GET(cpumask); 755 756 /* 757 * XXX: Address this again later? 758 * NetBSD only change the segment registers on return to userland. 759 */ 760#if 0 761 critical_enter(); 762 763 for (i = 0; i < 16; i++) { 764 __asm __volatile("mtsr %0,%1" :: "r"(i), "r"(pm->pm_sr[i])); 765 } 766 __asm __volatile("sync; isync"); 767 768 critical_exit(); 769#endif 770} 771 772void 773pmap_deactivate(struct thread *td) 774{ 775 pmap_t pm; 776 777 pm = &td->td_proc->p_vmspace->vm_pmap; 778 pm->pm_active &= ~(PCPU_GET(cpumask)); 779} 780 781vm_offset_t 782pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 783{ 784 TODO; 785 return (0); 786} 787 788void 789pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 790{ 791 TODO; 792} 793 794void 795pmap_clear_modify(vm_page_t m) 796{ 797 798 if (m->flags * PG_FICTITIOUS) 799 return; 800 pmap_clear_bit(m, PTE_CHG); 801} 802 803void 804pmap_collect(void) 805{ 806 TODO; 807} 808 809void 810pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 811 vm_size_t len, vm_offset_t src_addr) 812{ 813 TODO; 814} 815 816void 817pmap_copy_page(vm_offset_t src, vm_offset_t dst) 818{ 819 TODO; 820} 821 822/* 823 * Zero a page of physical memory by temporarily mapping it into the tlb. 824 */ 825void 826pmap_zero_page(vm_offset_t pa) 827{ 828 caddr_t va; 829 int i; 830 831 if (pa < SEGMENT_LENGTH) { 832 va = (caddr_t) pa; 833 } else if (pmap_initialized) { 834 if (pmap_pvo_zeropage == NULL) 835 pmap_pvo_zeropage = pmap_rkva_alloc(); 836 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 837 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 838 } else { 839 panic("pmap_zero_page: can't zero pa %#x", pa); 840 } 841 842 bzero(va, PAGE_SIZE); 843 844 for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { 845 __asm __volatile("dcbz 0,%0" :: "r"(va)); 846 va += CACHELINESIZE; 847 } 848 849 if (pa >= SEGMENT_LENGTH) 850 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 851} 852 853void 854pmap_zero_page_area(vm_offset_t pa, int off, int size) 855{ 856 TODO; 857} 858 859/* 860 * Map the given physical page at the specified virtual address in the 861 * target pmap with the protection requested. If specified the page 862 * will be wired down. 863 */ 864void 865pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 866 boolean_t wired) 867{ 868 struct pvo_head *pvo_head; 869 vm_zone_t zone; 870 u_int pte_lo, pvo_flags; 871 int error; 872 873 if (!pmap_initialized) { 874 pvo_head = &pmap_pvo_kunmanaged; 875 zone = pmap_upvo_zone; 876 pvo_flags = 0; 877 } else { 878 pvo_head = pa_to_pvoh(m->phys_addr); 879 zone = pmap_mpvo_zone; 880 pvo_flags = PVO_MANAGED; 881 } 882 883 pte_lo = PTE_I | PTE_G; 884 885 if (prot & VM_PROT_WRITE) 886 pte_lo |= PTE_BW; 887 else 888 pte_lo |= PTE_BR; 889 890 if (prot & VM_PROT_EXECUTE) 891 pvo_flags |= PVO_EXECUTABLE; 892 893 if (wired) 894 pvo_flags |= PVO_WIRED; 895 896 error = pmap_pvo_enter(pmap, zone, pvo_head, va, m->phys_addr, pte_lo, 897 pvo_flags); 898 899 if (error == ENOENT) { 900 /* 901 * Flush the real memory from the cache. 902 */ 903 if ((pvo_flags & PVO_EXECUTABLE) && (pte_lo & PTE_I) == 0) { 904 pmap_syncicache(m->phys_addr, PAGE_SIZE); 905 } 906 } 907} 908 909vm_offset_t 910pmap_extract(pmap_t pmap, vm_offset_t va) 911{ 912 TODO; 913 return (0); 914} 915 916/* 917 * Grow the number of kernel page table entries. Unneeded. 918 */ 919void 920pmap_growkernel(vm_offset_t addr) 921{ 922} 923 924void 925pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 926{ 927 928 CTR(KTR_PMAP, "pmap_init"); 929} 930 931void 932pmap_init2(void) 933{ 934 935 CTR(KTR_PMAP, "pmap_init2"); 936 937 pmap_upvo_zone = zinit("UPVO entry", sizeof (struct pvo_entry), 938 0, 0, 0); 939 pmap_mpvo_zone = zinit("MPVO entry", sizeof(struct pvo_entry), 940 PMAP_PVO_SIZE, ZONE_INTERRUPT, 1); 941 pmap_initialized = TRUE; 942} 943 944boolean_t 945pmap_is_modified(vm_page_t m) 946{ 947 TODO; 948 return (0); 949} 950 951void 952pmap_clear_reference(vm_page_t m) 953{ 954 TODO; 955} 956 957/* 958 * pmap_ts_referenced: 959 * 960 * Return a count of reference bits for a page, clearing those bits. 961 * It is not necessary for every reference bit to be cleared, but it 962 * is necessary that 0 only be returned when there are truly no 963 * reference bits set. 964 * 965 * XXX: The exact number of bits to check and clear is a matter that 966 * should be tested and standardized at some point in the future for 967 * optimal aging of shared pages. 968 */ 969 970int 971pmap_ts_referenced(vm_page_t m) 972{ 973 TODO; 974 return (0); 975} 976 977/* 978 * Map a wired page into kernel virtual address space. 979 */ 980void 981pmap_kenter(vm_offset_t va, vm_offset_t pa) 982{ 983 u_int pte_lo; 984 int error; 985 int i; 986 987#if 0 988 if (va < VM_MIN_KERNEL_ADDRESS) 989 panic("pmap_kenter: attempt to enter non-kernel address %#x", 990 va); 991#endif 992 993 pte_lo = PTE_I | PTE_G | PTE_BW; 994 for (i = 0; phys_avail[i + 2] != 0; i += 2) { 995 if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { 996 pte_lo &= ~(PTE_I | PTE_G); 997 break; 998 } 999 } 1000 1001 error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 1002 &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1003 1004 if (error != 0 && error != ENOENT) 1005 panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 1006 pa, error); 1007 1008 /* 1009 * Flush the real memory from the instruction cache. 1010 */ 1011 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1012 pmap_syncicache(pa, PAGE_SIZE); 1013 } 1014} 1015 1016vm_offset_t 1017pmap_kextract(vm_offset_t va) 1018{ 1019 TODO; 1020 return (0); 1021} 1022 1023/* 1024 * Remove a wired page from kernel virtual address space. 1025 */ 1026void 1027pmap_kremove(vm_offset_t va) 1028{ 1029 1030 pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE)); 1031} 1032 1033/* 1034 * Map a range of physical addresses into kernel virtual address space. 1035 * 1036 * The value passed in *virt is a suggested virtual address for the mapping. 1037 * Architectures which can support a direct-mapped physical to virtual region 1038 * can return the appropriate address within that region, leaving '*virt' 1039 * unchanged. We cannot and therefore do not; *virt is updated with the 1040 * first usable address after the mapped region. 1041 */ 1042vm_offset_t 1043pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 1044{ 1045 vm_offset_t sva, va; 1046 1047 sva = *virt; 1048 va = sva; 1049 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1050 pmap_kenter(va, pa_start); 1051 *virt = va; 1052 return (sva); 1053} 1054 1055int 1056pmap_mincore(pmap_t pmap, vm_offset_t addr) 1057{ 1058 TODO; 1059 return (0); 1060} 1061 1062/* 1063 * Create the uarea for a new process. 1064 * This routine directly affects the fork perf for a process. 1065 */ 1066void 1067pmap_new_proc(struct proc *p) 1068{ 1069 vm_object_t upobj; 1070 vm_offset_t up; 1071 vm_page_t m; 1072 u_int i; 1073 1074 /* 1075 * Allocate the object for the upages. 1076 */ 1077 upobj = p->p_upages_obj; 1078 if (upobj == NULL) { 1079 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 1080 p->p_upages_obj = upobj; 1081 } 1082 1083 /* 1084 * Get a kernel virtual address for the uarea for this process. 1085 */ 1086 up = (vm_offset_t)p->p_uarea; 1087 if (up == 0) { 1088 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 1089 if (up == 0) 1090 panic("pmap_new_proc: upage allocation failed"); 1091 p->p_uarea = (struct user *)up; 1092 } 1093 1094 for (i = 0; i < UAREA_PAGES; i++) { 1095 /* 1096 * Get a uarea page. 1097 */ 1098 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1099 1100 /* 1101 * Wire the page. 1102 */ 1103 m->wire_count++; 1104 1105 /* 1106 * Enter the page into the kernel address space. 1107 */ 1108 pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1109 1110 vm_page_wakeup(m); 1111 vm_page_flag_clear(m, PG_ZERO); 1112 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1113 m->valid = VM_PAGE_BITS_ALL; 1114 } 1115} 1116 1117void 1118pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 1119 vm_pindex_t pindex, vm_size_t size, int limit) 1120{ 1121 TODO; 1122} 1123 1124/* 1125 * Lower the permission for all mappings to a given page. 1126 */ 1127void 1128pmap_page_protect(vm_page_t m, vm_prot_t prot) 1129{ 1130 struct pvo_head *pvo_head; 1131 struct pvo_entry *pvo, *next_pvo; 1132 struct pte *pt; 1133 1134 /* 1135 * Since the routine only downgrades protection, if the 1136 * maximal protection is desired, there isn't any change 1137 * to be made. 1138 */ 1139 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 1140 (VM_PROT_READ|VM_PROT_WRITE)) 1141 return; 1142 1143 pvo_head = vm_page_to_pvoh(m); 1144 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1145 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1146 PMAP_PVO_CHECK(pvo); /* sanity check */ 1147 1148 /* 1149 * Downgrading to no mapping at all, we just remove the entry. 1150 */ 1151 if ((prot & VM_PROT_READ) == 0) { 1152 pmap_pvo_remove(pvo, -1); 1153 continue; 1154 } 1155 1156 /* 1157 * If EXEC permission is being revoked, just clear the flag 1158 * in the PVO. 1159 */ 1160 if ((prot & VM_PROT_EXECUTE) == 0) 1161 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1162 1163 /* 1164 * If this entry is already RO, don't diddle with the page 1165 * table. 1166 */ 1167 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1168 PMAP_PVO_CHECK(pvo); 1169 continue; 1170 } 1171 1172 /* 1173 * Grab the PTE before we diddle the bits so pvo_to_pte can 1174 * verify the pte contents are as expected. 1175 */ 1176 pt = pmap_pvo_to_pte(pvo, -1); 1177 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1178 pvo->pvo_pte.pte_lo |= PTE_BR; 1179 if (pt != NULL) 1180 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1181 PMAP_PVO_CHECK(pvo); /* sanity check */ 1182 } 1183} 1184 1185/* 1186 * Make the specified page pageable (or not). Unneeded. 1187 */ 1188void 1189pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1190 boolean_t pageable) 1191{ 1192} 1193 1194/* 1195 * Returns true if the pmap's pv is one of the first 1196 * 16 pvs linked to from this page. This count may 1197 * be changed upwards or downwards in the future; it 1198 * is only necessary that true be returned for a small 1199 * subset of pmaps for proper page aging. 1200 */ 1201boolean_t 1202pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 1203{ 1204 TODO; 1205 return (0); 1206} 1207 1208static u_int pmap_vsidcontext; 1209 1210void 1211pmap_pinit(pmap_t pmap) 1212{ 1213 int i, mask; 1214 u_int entropy; 1215 1216 entropy = 0; 1217 __asm __volatile("mftb %0" : "=r"(entropy)); 1218 1219 /* 1220 * Allocate some segment registers for this pmap. 1221 */ 1222 pmap->pm_count = 1; 1223 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1224 u_int hash, n; 1225 1226 /* 1227 * Create a new value by mutiplying by a prime and adding in 1228 * entropy from the timebase register. This is to make the 1229 * VSID more random so that the PT hash function collides 1230 * less often. (Note that the prime casues gcc to do shifts 1231 * instead of a multiply.) 1232 */ 1233 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1234 hash = pmap_vsidcontext & (NPMAPS - 1); 1235 if (hash == 0) /* 0 is special, avoid it */ 1236 continue; 1237 n = hash >> 5; 1238 mask = 1 << (hash & (VSID_NBPW - 1)); 1239 hash = (pmap_vsidcontext & 0xfffff); 1240 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1241 /* anything free in this bucket? */ 1242 if (pmap_vsid_bitmap[n] == 0xffffffff) { 1243 entropy = (pmap_vsidcontext >> 20); 1244 continue; 1245 } 1246 i = ffs(~pmap_vsid_bitmap[i]) - 1; 1247 mask = 1 << i; 1248 hash &= 0xfffff & ~(VSID_NBPW - 1); 1249 hash |= i; 1250 } 1251 pmap_vsid_bitmap[n] |= mask; 1252 for (i = 0; i < 16; i++) 1253 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1254 return; 1255 } 1256 1257 panic("pmap_pinit: out of segments"); 1258} 1259 1260/* 1261 * Initialize the pmap associated with process 0. 1262 */ 1263void 1264pmap_pinit0(pmap_t pm) 1265{ 1266 1267 pmap_pinit(pm); 1268 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1269} 1270 1271void 1272pmap_pinit2(pmap_t pmap) 1273{ 1274 /* XXX: Remove this stub when no longer called */ 1275} 1276 1277void 1278pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) 1279{ 1280 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1281 ("pmap_prefault: non current pmap")); 1282 /* XXX */ 1283} 1284 1285void 1286pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1287{ 1288 TODO; 1289} 1290 1291vm_offset_t 1292pmap_phys_address(int ppn) 1293{ 1294 TODO; 1295 return (0); 1296} 1297 1298/* 1299 * Map a list of wired pages into kernel virtual address space. This is 1300 * intended for temporary mappings which do not need page modification or 1301 * references recorded. Existing mappings in the region are overwritten. 1302 */ 1303void 1304pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1305{ 1306 int i; 1307 1308 for (i = 0; i < count; i++, va += PAGE_SIZE) 1309 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 1310} 1311 1312/* 1313 * Remove page mappings from kernel virtual address space. Intended for 1314 * temporary mappings entered by pmap_qenter. 1315 */ 1316void 1317pmap_qremove(vm_offset_t va, int count) 1318{ 1319 int i; 1320 1321 for (i = 0; i < count; i++, va += PAGE_SIZE) 1322 pmap_kremove(va); 1323} 1324 1325/* 1326 * Add a reference to the specified pmap. 1327 */ 1328void 1329pmap_reference(pmap_t pm) 1330{ 1331 1332 if (pm != NULL) 1333 pm->pm_count++; 1334} 1335 1336void 1337pmap_release(pmap_t pmap) 1338{ 1339 TODO; 1340} 1341 1342/* 1343 * Remove the given range of addresses from the specified map. 1344 */ 1345void 1346pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1347{ 1348 struct pvo_entry *pvo; 1349 int pteidx; 1350 1351 for (; sva < eva; sva += PAGE_SIZE) { 1352 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1353 if (pvo != NULL) { 1354 pmap_pvo_remove(pvo, pteidx); 1355 } 1356 } 1357} 1358 1359void 1360pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1361{ 1362 TODO; 1363} 1364 1365void 1366pmap_swapin_proc(struct proc *p) 1367{ 1368 TODO; 1369} 1370 1371void 1372pmap_swapout_proc(struct proc *p) 1373{ 1374 TODO; 1375} 1376 1377/* 1378 * Create the kernel stack and pcb for a new thread. 1379 * This routine directly affects the fork perf for a process and 1380 * create performance for a thread. 1381 */ 1382void 1383pmap_new_thread(struct thread *td) 1384{ 1385 vm_object_t ksobj; 1386 vm_offset_t ks; 1387 vm_page_t m; 1388 u_int i; 1389 1390 /* 1391 * Allocate object for the kstack. 1392 */ 1393 ksobj = td->td_kstack_obj; 1394 if (ksobj == NULL) { 1395 ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); 1396 td->td_kstack_obj = ksobj; 1397 } 1398 1399 /* 1400 * Get a kernel virtual address for the kstack for this thread. 1401 */ 1402 ks = td->td_kstack; 1403 if (ks == 0) { 1404 ks = kmem_alloc_nofault(kernel_map, 1405 (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); 1406 if (ks == 0) 1407 panic("pmap_new_thread: kstack allocation failed"); 1408 TLBIE(ks); 1409 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 1410 td->td_kstack = ks; 1411 } 1412 1413 for (i = 0; i < KSTACK_PAGES; i++) { 1414 /* 1415 * Get a kernel stack page. 1416 */ 1417 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1418 1419 /* 1420 * Wire the page. 1421 */ 1422 m->wire_count++; 1423 1424 /* 1425 * Enter the page into the kernel address space. 1426 */ 1427 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1428 1429 vm_page_wakeup(m); 1430 vm_page_flag_clear(m, PG_ZERO); 1431 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1432 m->valid = VM_PAGE_BITS_ALL; 1433 } 1434} 1435 1436void 1437pmap_dispose_proc(struct proc *p) 1438{ 1439 TODO; 1440} 1441 1442void 1443pmap_dispose_thread(struct thread *td) 1444{ 1445 TODO; 1446} 1447 1448void 1449pmap_swapin_thread(struct thread *td) 1450{ 1451 TODO; 1452} 1453 1454void 1455pmap_swapout_thread(struct thread *td) 1456{ 1457 TODO; 1458} 1459 1460/* 1461 * Allocate a physical page of memory directly from the phys_avail map. 1462 * Can only be called from pmap_bootstrap before avail start and end are 1463 * calculated. 1464 */ 1465static vm_offset_t 1466pmap_bootstrap_alloc(vm_size_t size, u_int align) 1467{ 1468 vm_offset_t s, e; 1469 int i, j; 1470 1471 size = round_page(size); 1472 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1473 if (align != 0) 1474 s = (phys_avail[i] + align - 1) & ~(align - 1); 1475 else 1476 s = phys_avail[i]; 1477 e = s + size; 1478 1479 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1480 continue; 1481 1482 if (s == phys_avail[i]) { 1483 phys_avail[i] += size; 1484 } else if (e == phys_avail[i + 1]) { 1485 phys_avail[i + 1] -= size; 1486 } else { 1487 for (j = phys_avail_count * 2; j > i; j -= 2) { 1488 phys_avail[j] = phys_avail[j - 2]; 1489 phys_avail[j + 1] = phys_avail[j - 1]; 1490 } 1491 1492 phys_avail[i + 3] = phys_avail[i + 1]; 1493 phys_avail[i + 1] = s; 1494 phys_avail[i + 2] = e; 1495 phys_avail_count++; 1496 } 1497 1498 return (s); 1499 } 1500 panic("pmap_bootstrap_alloc: could not allocate memory"); 1501} 1502 1503/* 1504 * Return an unmapped pvo for a kernel virtual address. 1505 * Used by pmap functions that operate on physical pages. 1506 */ 1507static struct pvo_entry * 1508pmap_rkva_alloc(void) 1509{ 1510 struct pvo_entry *pvo; 1511 struct pte *pt; 1512 vm_offset_t kva; 1513 int pteidx; 1514 1515 if (pmap_rkva_count == 0) 1516 panic("pmap_rkva_alloc: no more reserved KVAs"); 1517 1518 kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 1519 pmap_kenter(kva, 0); 1520 1521 pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 1522 1523 if (pvo == NULL) 1524 panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 1525 1526 pt = pmap_pvo_to_pte(pvo, pteidx); 1527 1528 if (pt == NULL) 1529 panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 1530 1531 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1532 PVO_PTEGIDX_CLR(pvo); 1533 1534 pmap_pte_overflow++; 1535 1536 return (pvo); 1537} 1538 1539static void 1540pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1541 int *depth_p) 1542{ 1543 struct pte *pt; 1544 1545 /* 1546 * If this pvo already has a valid pte, we need to save it so it can 1547 * be restored later. We then just reload the new PTE over the old 1548 * slot. 1549 */ 1550 if (saved_pt != NULL) { 1551 pt = pmap_pvo_to_pte(pvo, -1); 1552 1553 if (pt != NULL) { 1554 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1555 PVO_PTEGIDX_CLR(pvo); 1556 pmap_pte_overflow++; 1557 } 1558 1559 *saved_pt = pvo->pvo_pte; 1560 1561 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1562 } 1563 1564 pvo->pvo_pte.pte_lo |= pa; 1565 1566 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1567 panic("pmap_pa_map: could not spill pvo %p", pvo); 1568 1569 if (depth_p != NULL) 1570 (*depth_p)++; 1571} 1572 1573static void 1574pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 1575{ 1576 struct pte *pt; 1577 1578 pt = pmap_pvo_to_pte(pvo, -1); 1579 1580 if (pt != NULL) { 1581 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1582 PVO_PTEGIDX_CLR(pvo); 1583 pmap_pte_overflow++; 1584 } 1585 1586 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1587 1588 /* 1589 * If there is a saved PTE and it's valid, restore it and return. 1590 */ 1591 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1592 if (depth_p != NULL && --(*depth_p) == 0) 1593 panic("pmap_pa_unmap: restoring but depth == 0"); 1594 1595 pvo->pvo_pte = *saved_pt; 1596 1597 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1598 panic("pmap_pa_unmap: could not spill pvo %p", pvo); 1599 } 1600} 1601 1602static void 1603pmap_syncicache(vm_offset_t pa, vm_size_t len) 1604{ 1605 __syncicache((void *)pa, len); 1606} 1607 1608static void 1609tlbia(void) 1610{ 1611 caddr_t i; 1612 1613 SYNC(); 1614 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1615 TLBIE(i); 1616 EIEIO(); 1617 } 1618 TLBSYNC(); 1619 SYNC(); 1620} 1621 1622static int 1623pmap_pvo_enter(pmap_t pm, vm_zone_t zone, struct pvo_head *pvo_head, 1624 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1625{ 1626 struct pvo_entry *pvo; 1627 u_int sr; 1628 int first; 1629 u_int ptegidx; 1630 int i; 1631 1632 pmap_pvo_enter_calls++; 1633 1634 /* 1635 * Compute the PTE Group index. 1636 */ 1637 va &= ~ADDR_POFF; 1638 sr = va_to_sr(pm->pm_sr, va); 1639 ptegidx = va_to_pteg(sr, va); 1640 1641 /* 1642 * Remove any existing mapping for this page. Reuse the pvo entry if 1643 * there is a mapping. 1644 */ 1645 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1646 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1647 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa) 1648 return (0); 1649 pmap_pvo_remove(pvo, -1); 1650 break; 1651 } 1652 } 1653 1654 /* 1655 * If we aren't overwriting a mapping, try to allocate. 1656 */ 1657 if (pmap_initialized) { 1658 pvo = zalloc(zone); 1659 } else { 1660 if (pmap_bpvo_pool_index >= pmap_bpvo_pool_count) { 1661 pmap_bpvo_pool = (struct pvo_entry *) 1662 pmap_bootstrap_alloc(PAGE_SIZE, 0); 1663 pmap_bpvo_pool_index = 0; 1664 } 1665 pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 1666 pmap_bpvo_pool_index++; 1667 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1668 } 1669 1670 if (pvo == NULL) { 1671 return (ENOMEM); 1672 } 1673 1674 pmap_pvo_entries++; 1675 pvo->pvo_vaddr = va; 1676 pvo->pvo_pmap = pm; 1677 LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1678 pvo->pvo_vaddr &= ~ADDR_POFF; 1679 if (flags & VM_PROT_EXECUTE) 1680 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1681 if (flags & PVO_WIRED) 1682 pvo->pvo_vaddr |= PVO_WIRED; 1683 if (pvo_head != &pmap_pvo_kunmanaged) 1684 pvo->pvo_vaddr |= PVO_MANAGED; 1685 pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1686 1687 /* 1688 * Remember if the list was empty and therefore will be the first 1689 * item. 1690 */ 1691 first = LIST_FIRST(pvo_head) == NULL; 1692 1693 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1694 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1695 pvo->pvo_pmap->pm_stats.wired_count++; 1696 pvo->pvo_pmap->pm_stats.resident_count++; 1697 1698 /* 1699 * We hope this succeeds but it isn't required. 1700 */ 1701 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1702 if (i >= 0) { 1703 PVO_PTEGIDX_SET(pvo, i); 1704 } else { 1705 panic("pmap_pvo_enter: overflow"); 1706 pmap_pte_overflow++; 1707 } 1708 1709 return (first ? ENOENT : 0); 1710} 1711 1712static void 1713pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 1714{ 1715 struct pte *pt; 1716 1717 /* 1718 * If there is an active pte entry, we need to deactivate it (and 1719 * save the ref & cfg bits). 1720 */ 1721 pt = pmap_pvo_to_pte(pvo, pteidx); 1722 if (pt != NULL) { 1723 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1724 PVO_PTEGIDX_CLR(pvo); 1725 } else { 1726 pmap_pte_overflow--; 1727 } 1728 1729 /* 1730 * Update our statistics. 1731 */ 1732 pvo->pvo_pmap->pm_stats.resident_count--; 1733 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1734 pvo->pvo_pmap->pm_stats.wired_count--; 1735 1736 /* 1737 * Save the REF/CHG bits into their cache if the page is managed. 1738 */ 1739 if (pvo->pvo_vaddr & PVO_MANAGED) { 1740 struct vm_page *pg; 1741 1742 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1743 if (pg != NULL) { 1744 pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 1745 (PTE_REF | PTE_CHG)); 1746 } 1747 } 1748 1749 /* 1750 * Remove this PVO from the PV list. 1751 */ 1752 LIST_REMOVE(pvo, pvo_vlink); 1753 1754 /* 1755 * Remove this from the overflow list and return it to the pool 1756 * if we aren't going to reuse it. 1757 */ 1758 LIST_REMOVE(pvo, pvo_olink); 1759 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 1760 zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 1761 pmap_upvo_zone, pvo); 1762 pmap_pvo_entries--; 1763 pmap_pvo_remove_calls++; 1764} 1765 1766static __inline int 1767pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1768{ 1769 int pteidx; 1770 1771 /* 1772 * We can find the actual pte entry without searching by grabbing 1773 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1774 * noticing the HID bit. 1775 */ 1776 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1777 if (pvo->pvo_pte.pte_hi & PTE_HID) 1778 pteidx ^= pmap_pteg_mask * 8; 1779 1780 return (pteidx); 1781} 1782 1783static struct pvo_entry * 1784pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 1785{ 1786 struct pvo_entry *pvo; 1787 int ptegidx; 1788 u_int sr; 1789 1790 va &= ~ADDR_POFF; 1791 sr = va_to_sr(pm->pm_sr, va); 1792 ptegidx = va_to_pteg(sr, va); 1793 1794 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1795 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1796 if (pteidx_p) 1797 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1798 return (pvo); 1799 } 1800 } 1801 1802 return (NULL); 1803} 1804 1805static struct pte * 1806pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1807{ 1808 struct pte *pt; 1809 1810 /* 1811 * If we haven't been supplied the ptegidx, calculate it. 1812 */ 1813 if (pteidx == -1) { 1814 int ptegidx; 1815 u_int sr; 1816 1817 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 1818 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 1819 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1820 } 1821 1822 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1823 1824 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1825 panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 1826 "valid pte index", pvo); 1827 } 1828 1829 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1830 panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 1831 "pvo but no valid pte", pvo); 1832 } 1833 1834 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1835 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1836 panic("pmap_pvo_to_pte: pvo %p has valid pte in " 1837 "pmap_pteg_table %p but invalid in pvo", pvo, pt); 1838 } 1839 1840 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 1841 != 0) { 1842 panic("pmap_pvo_to_pte: pvo %p pte does not match " 1843 "pte %p in pmap_pteg_table", pvo, pt); 1844 } 1845 1846 return (pt); 1847 } 1848 1849 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1850 panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 1851 "pmap_pteg_table but valid in pvo", pvo, pt); 1852 } 1853 1854 return (NULL); 1855} 1856 1857/* 1858 * XXX: THIS STUFF SHOULD BE IN pte.c? 1859 */ 1860int 1861pmap_pte_spill(vm_offset_t addr) 1862{ 1863 struct pvo_entry *source_pvo, *victim_pvo; 1864 struct pvo_entry *pvo; 1865 int ptegidx, i, j; 1866 u_int sr; 1867 struct pteg *pteg; 1868 struct pte *pt; 1869 1870 pmap_pte_spills++; 1871 1872 __asm __volatile("mfsrin %0,%1" : "=r"(sr) : "r"(addr)); 1873 ptegidx = va_to_pteg(sr, addr); 1874 1875 /* 1876 * Have to substitute some entry. Use the primary hash for this. 1877 * Use low bits of timebase as random generator. 1878 */ 1879 pteg = &pmap_pteg_table[ptegidx]; 1880 __asm __volatile("mftb %0" : "=r"(i)); 1881 i &= 7; 1882 pt = &pteg->pt[i]; 1883 1884 source_pvo = NULL; 1885 victim_pvo = NULL; 1886 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1887 /* 1888 * We need to find a pvo entry for this address. 1889 */ 1890 PMAP_PVO_CHECK(pvo); 1891 if (source_pvo == NULL && 1892 pmap_pte_match(&pvo->pvo_pte, sr, addr, 1893 pvo->pvo_pte.pte_hi & PTE_HID)) { 1894 /* 1895 * Now found an entry to be spilled into the pteg. 1896 * The PTE is now valid, so we know it's active. 1897 */ 1898 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1899 1900 if (j >= 0) { 1901 PVO_PTEGIDX_SET(pvo, j); 1902 pmap_pte_overflow--; 1903 PMAP_PVO_CHECK(pvo); 1904 return (1); 1905 } 1906 1907 source_pvo = pvo; 1908 1909 if (victim_pvo != NULL) 1910 break; 1911 } 1912 1913 /* 1914 * We also need the pvo entry of the victim we are replacing 1915 * so save the R & C bits of the PTE. 1916 */ 1917 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 1918 pmap_pte_compare(pt, &pvo->pvo_pte)) { 1919 victim_pvo = pvo; 1920 if (source_pvo != NULL) 1921 break; 1922 } 1923 } 1924 1925 if (source_pvo == NULL) 1926 return (0); 1927 1928 if (victim_pvo == NULL) { 1929 if ((pt->pte_hi & PTE_HID) == 0) 1930 panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 1931 "entry", pt); 1932 1933 /* 1934 * If this is a secondary PTE, we need to search it's primary 1935 * pvo bucket for the matching PVO. 1936 */ 1937 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 1938 pvo_olink) { 1939 PMAP_PVO_CHECK(pvo); 1940 /* 1941 * We also need the pvo entry of the victim we are 1942 * replacing so save the R & C bits of the PTE. 1943 */ 1944 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 1945 victim_pvo = pvo; 1946 break; 1947 } 1948 } 1949 1950 if (victim_pvo == NULL) 1951 panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 1952 "entry", pt); 1953 } 1954 1955 /* 1956 * We are invalidating the TLB entry for the EA we are replacing even 1957 * though it's valid. If we don't, we lose any ref/chg bit changes 1958 * contained in the TLB entry. 1959 */ 1960 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 1961 1962 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 1963 pmap_pte_set(pt, &source_pvo->pvo_pte); 1964 1965 PVO_PTEGIDX_CLR(victim_pvo); 1966 PVO_PTEGIDX_SET(source_pvo, i); 1967 pmap_pte_replacements++; 1968 1969 PMAP_PVO_CHECK(victim_pvo); 1970 PMAP_PVO_CHECK(source_pvo); 1971 1972 return (1); 1973} 1974 1975static int 1976pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 1977{ 1978 struct pte *pt; 1979 int i; 1980 1981 /* 1982 * First try primary hash. 1983 */ 1984 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 1985 if ((pt->pte_hi & PTE_VALID) == 0) { 1986 pvo_pt->pte_hi &= ~PTE_HID; 1987 pmap_pte_set(pt, pvo_pt); 1988 return (i); 1989 } 1990 } 1991 1992 /* 1993 * Now try secondary hash. 1994 */ 1995 ptegidx ^= pmap_pteg_mask; 1996 ptegidx++; 1997 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 1998 if ((pt->pte_hi & PTE_VALID) == 0) { 1999 pvo_pt->pte_hi |= PTE_HID; 2000 pmap_pte_set(pt, pvo_pt); 2001 return (i); 2002 } 2003 } 2004 2005 panic("pmap_pte_insert: overflow"); 2006 return (-1); 2007} 2008 2009static boolean_t 2010pmap_query_bit(vm_page_t m, int ptebit) 2011{ 2012 struct pvo_entry *pvo; 2013 struct pte *pt; 2014 2015 if (pmap_attr_fetch(m) & ptebit) 2016 return (TRUE); 2017 2018 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2019 PMAP_PVO_CHECK(pvo); /* sanity check */ 2020 2021 /* 2022 * See if we saved the bit off. If so, cache it and return 2023 * success. 2024 */ 2025 if (pvo->pvo_pte.pte_lo & ptebit) { 2026 pmap_attr_save(m, ptebit); 2027 PMAP_PVO_CHECK(pvo); /* sanity check */ 2028 return (TRUE); 2029 } 2030 } 2031 2032 /* 2033 * No luck, now go through the hard part of looking at the PTEs 2034 * themselves. Sync so that any pending REF/CHG bits are flushed to 2035 * the PTEs. 2036 */ 2037 SYNC(); 2038 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2039 PMAP_PVO_CHECK(pvo); /* sanity check */ 2040 2041 /* 2042 * See if this pvo has a valid PTE. if so, fetch the 2043 * REF/CHG bits from the valid PTE. If the appropriate 2044 * ptebit is set, cache it and return success. 2045 */ 2046 pt = pmap_pvo_to_pte(pvo, -1); 2047 if (pt != NULL) { 2048 pmap_pte_synch(pt, &pvo->pvo_pte); 2049 if (pvo->pvo_pte.pte_lo & ptebit) { 2050 pmap_attr_save(m, ptebit); 2051 PMAP_PVO_CHECK(pvo); /* sanity check */ 2052 return (TRUE); 2053 } 2054 } 2055 } 2056 2057 return (TRUE); 2058} 2059 2060static boolean_t 2061pmap_clear_bit(vm_page_t m, int ptebit) 2062{ 2063 struct pvo_entry *pvo; 2064 struct pte *pt; 2065 int rv; 2066 2067 /* 2068 * Clear the cached value. 2069 */ 2070 rv = pmap_attr_fetch(m); 2071 pmap_attr_clear(m, ptebit); 2072 2073 /* 2074 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2075 * we can reset the right ones). note that since the pvo entries and 2076 * list heads are accessed via BAT0 and are never placed in the page 2077 * table, we don't have to worry about further accesses setting the 2078 * REF/CHG bits. 2079 */ 2080 SYNC(); 2081 2082 /* 2083 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2084 * valid pte clear the ptebit from the valid pte. 2085 */ 2086 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2087 PMAP_PVO_CHECK(pvo); /* sanity check */ 2088 pt = pmap_pvo_to_pte(pvo, -1); 2089 if (pt != NULL) { 2090 pmap_pte_synch(pt, &pvo->pvo_pte); 2091 if (pvo->pvo_pte.pte_lo & ptebit) 2092 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2093 } 2094 rv |= pvo->pvo_pte.pte_lo; 2095 pvo->pvo_pte.pte_lo &= ~ptebit; 2096 PMAP_PVO_CHECK(pvo); /* sanity check */ 2097 } 2098 2099 return ((rv & ptebit) != 0); 2100} 2101