mmu_oea.c revision 96353
190075Sobrien/* 290075Sobrien * Copyright (c) 2001 The NetBSD Foundation, Inc. 3169689Skan * All rights reserved. 490075Sobrien * 590075Sobrien * This code is derived from software contributed to The NetBSD Foundation 690075Sobrien * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790075Sobrien * 890075Sobrien * Redistribution and use in source and binary forms, with or without 990075Sobrien * modification, are permitted provided that the following conditions 1090075Sobrien * are met: 1190075Sobrien * 1. Redistributions of source code must retain the above copyright 1290075Sobrien * notice, this list of conditions and the following disclaimer. 1390075Sobrien * 2. Redistributions in binary form must reproduce the above copyright 1490075Sobrien * notice, this list of conditions and the following disclaimer in the 1590075Sobrien * documentation and/or other materials provided with the distribution. 1690075Sobrien * 3. All advertising materials mentioning features or use of this software 1790075Sobrien * must display the following acknowledgement: 1890075Sobrien * This product includes software developed by the NetBSD 19169689Skan * Foundation, Inc. and its contributors. 20169689Skan * 4. Neither the name of The NetBSD Foundation nor the names of its 2190075Sobrien * contributors may be used to endorse or promote products derived 2290075Sobrien * from this software without specific prior written permission. 2390075Sobrien * 2490075Sobrien * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25132718Skan * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26132718Skan * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790075Sobrien * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28169689Skan * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990075Sobrien * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090075Sobrien * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3196263Sobrien * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3296263Sobrien * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390075Sobrien * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3496263Sobrien * POSSIBILITY OF SUCH DAMAGE. 35169689Skan */ 3690075Sobrien/* 3790075Sobrien * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3890075Sobrien * Copyright (C) 1995, 1996 TooLs GmbH. 3990075Sobrien * All rights reserved. 4090075Sobrien * 4190075Sobrien * Redistribution and use in source and binary forms, with or without 4290075Sobrien * modification, are permitted provided that the following conditions 4390075Sobrien * are met: 4490075Sobrien * 1. Redistributions of source code must retain the above copyright 4590075Sobrien * notice, this list of conditions and the following disclaimer. 4690075Sobrien * 2. Redistributions in binary form must reproduce the above copyright 4790075Sobrien * notice, this list of conditions and the following disclaimer in the 4890075Sobrien * documentation and/or other materials provided with the distribution. 4990075Sobrien * 3. All advertising materials mentioning features or use of this software 5090075Sobrien * must display the following acknowledgement: 51132718Skan * This product includes software developed by TooLs GmbH. 52132718Skan * 4. The name of TooLs GmbH may not be used to endorse or promote products 53132718Skan * derived from this software without specific prior written permission. 54169689Skan * 55169689Skan * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56132718Skan * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57132718Skan * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5890075Sobrien * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59107590Sobrien * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60107590Sobrien * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61107590Sobrien * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62107590Sobrien * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63132718Skan * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64107590Sobrien * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65107590Sobrien * 66107590Sobrien * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67107590Sobrien */ 68132718Skan/* 69117395Skan * Copyright (C) 2001 Benno Rice. 70107590Sobrien * All rights reserved. 71107590Sobrien * 72169689Skan * Redistribution and use in source and binary forms, with or without 73107590Sobrien * modification, are permitted provided that the following conditions 74169689Skan * are met: 75107590Sobrien * 1. Redistributions of source code must retain the above copyright 76107590Sobrien * notice, this list of conditions and the following disclaimer. 77107590Sobrien * 2. Redistributions in binary form must reproduce the above copyright 78107590Sobrien * notice, this list of conditions and the following disclaimer in the 79107590Sobrien * documentation and/or other materials provided with the distribution. 80107590Sobrien * 8190075Sobrien * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8290075Sobrien * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8390075Sobrien * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8490075Sobrien * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85132718Skan * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8690075Sobrien * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8790075Sobrien * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8890075Sobrien * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8990075Sobrien * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90169689Skan * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9190075Sobrien */ 9290075Sobrien 93132718Skan#ifndef lint 94107590Sobrienstatic const char rcsid[] = 9590075Sobrien "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 96353 2002-05-10 14:21:48Z benno $"; 9690075Sobrien#endif /* not lint */ 9790075Sobrien 98169689Skan/* 99107590Sobrien * Manages physical address maps. 10090075Sobrien * 10190075Sobrien * In addition to hardware address maps, this module is called upon to 10290075Sobrien * provide software-use-only maps which may or may not be stored in the 10390075Sobrien * same form as hardware maps. These pseudo-maps are used to store 10490075Sobrien * intermediate results from copy operations to and from address spaces. 105132718Skan * 10690075Sobrien * Since the information managed by this module is also stored by the 107132718Skan * logical address mapping module, this module may throw away valid virtual 108169689Skan * to physical mappings at almost any time. However, invalidations of 109169689Skan * mappings must be done as requested. 110169689Skan * 11190075Sobrien * In order to cope with hardware architectures which make virtual to 112169689Skan * physical map invalidates expensive, this module may delay invalidate 113169689Skan * reduced protection operations until such time as they are actually 114117395Skan * necessary. This module is given full information as to which processors 115117395Skan * are currently using which maps, and to when physical maps must be made 116169689Skan * correct. 117169689Skan */ 118169689Skan 119169689Skan#include <sys/param.h> 120117395Skan#include <sys/kernel.h> 121169689Skan#include <sys/ktr.h> 122132718Skan#include <sys/lock.h> 12390075Sobrien#include <sys/msgbuf.h> 12490075Sobrien#include <sys/mutex.h> 12590075Sobrien#include <sys/proc.h> 12690075Sobrien#include <sys/sysctl.h> 12790075Sobrien#include <sys/systm.h> 128169689Skan#include <sys/vmmeter.h> 129169689Skan 130169689Skan#include <dev/ofw/openfirm.h> 131169689Skan 132169689Skan#include <vm/vm.h> 133169689Skan#include <vm/vm_param.h> 134169689Skan#include <vm/vm_kern.h> 135169689Skan#include <vm/vm_page.h> 136169689Skan#include <vm/vm_map.h> 137169689Skan#include <vm/vm_object.h> 138169689Skan#include <vm/vm_extern.h> 139169689Skan#include <vm/vm_pageout.h> 140169689Skan#include <vm/vm_pager.h> 141169689Skan#include <vm/uma.h> 142169689Skan 143169689Skan#include <machine/bat.h> 144169689Skan#include <machine/frame.h> 145169689Skan#include <machine/md_var.h> 14690075Sobrien#include <machine/psl.h> 14790075Sobrien#include <machine/pte.h> 148117395Skan#include <machine/sr.h> 14990075Sobrien 15090075Sobrien#define PMAP_DEBUG 15190075Sobrien 15290075Sobrien#define TODO panic("%s: not implemented", __func__); 15390075Sobrien 15490075Sobrien#define PMAP_LOCK(pm) 155169689Skan#define PMAP_UNLOCK(pm) 15690075Sobrien 15790075Sobrien#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 158132718Skan#define TLBSYNC() __asm __volatile("tlbsync"); 15990075Sobrien#define SYNC() __asm __volatile("sync"); 160169689Skan#define EIEIO() __asm __volatile("eieio"); 16190075Sobrien 16290075Sobrien#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16390075Sobrien#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16490075Sobrien#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16590075Sobrien 16690075Sobrien#define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 16790075Sobrien#define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 16890075Sobrien#define PVO_WIRED 0x0010 /* PVO entry is wired */ 16990075Sobrien#define PVO_MANAGED 0x0020 /* PVO entry is managed */ 170169689Skan#define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 171169689Skan#define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during 17290075Sobrien bootstrap */ 17390075Sobrien#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 174169689Skan#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 17590075Sobrien#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 17690075Sobrien#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 17790075Sobrien#define PVO_PTEGIDX_CLR(pvo) \ 178117395Skan ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 17990075Sobrien#define PVO_PTEGIDX_SET(pvo, i) \ 18090075Sobrien ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 18190075Sobrien 18290075Sobrien#define PMAP_PVO_CHECK(pvo) 18390075Sobrien 184169689Skanstruct mem_region { 18590075Sobrien vm_offset_t mr_start; 18690075Sobrien vm_offset_t mr_size; 18790075Sobrien}; 188169689Skan 18990075Sobrienstruct ofw_map { 19090075Sobrien vm_offset_t om_va; 19190075Sobrien vm_size_t om_len; 19290075Sobrien vm_offset_t om_pa; 193169689Skan u_int om_mode; 194169689Skan}; 195169689Skan 196169689Skanint pmap_bootstrapped = 0; 19790075Sobrien 19890075Sobrien/* 19990075Sobrien * Virtual and physical address of message buffer. 20090075Sobrien */ 20190075Sobrienstruct msgbuf *msgbufp; 20290075Sobrienvm_offset_t msgbuf_phys; 20390075Sobrien 20490075Sobrien/* 205169689Skan * Physical addresses of first and last available physical page. 20690075Sobrien */ 20790075Sobrienvm_offset_t avail_start; 20890075Sobrienvm_offset_t avail_end; 209169689Skan 21090075Sobrien/* 21190075Sobrien * Map of physical memory regions. 21290075Sobrien */ 21390075Sobrienvm_offset_t phys_avail[128]; 21490075Sobrienu_int phys_avail_count; 21590075Sobrienstatic struct mem_region regions[128]; 21690075Sobrienstatic struct ofw_map translations[128]; 21790075Sobrienstatic int translations_size; 21890075Sobrien 219169689Skan/* 22090075Sobrien * First and last available kernel virtual addresses. 221169689Skan */ 22290075Sobrienvm_offset_t virtual_avail; 22390075Sobrienvm_offset_t virtual_end; 224169689Skanvm_offset_t kernel_vm_end; 225169689Skan 22690075Sobrien/* 22790075Sobrien * Kernel pmap. 22890075Sobrien */ 22990075Sobrienstruct pmap kernel_pmap_store; 23090075Sobrienextern struct pmap ofw_pmap; 23190075Sobrien 23290075Sobrien/* 23390075Sobrien * PTEG data. 23490075Sobrien */ 23590075Sobrienstatic struct pteg *pmap_pteg_table; 23690075Sobrienu_int pmap_pteg_count; 23790075Sobrienu_int pmap_pteg_mask; 23890075Sobrien 239117395Skan/* 240117395Skan * PVO data. 241117395Skan */ 242132718Skanstruct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 243117395Skanstruct pvo_head pmap_pvo_kunmanaged = 244117395Skan LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 245117395Skanstruct pvo_head pmap_pvo_unmanaged = 246117395Skan LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 247117395Skan 248117395Skanuma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 249169689Skanuma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 250117395Skanstruct vm_object pmap_upvo_zone_obj; 251169689Skanstruct vm_object pmap_mpvo_zone_obj; 252117395Skanstatic vm_object_t pmap_pvo_obj; 253117395Skanstatic u_int pmap_pvo_count; 254117395Skan 255117395Skan#define PMAP_PVO_SIZE 1024 256117395Skanstatic struct pvo_entry *pmap_bpvo_pool; 257117395Skanstatic int pmap_bpvo_pool_index; 258117395Skanstatic int pmap_bpvo_pool_count; 259117395Skan 260132718Skan#define VSID_NBPW (sizeof(u_int32_t) * 8) 261117395Skanstatic u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 262169689Skan 263117395Skanstatic boolean_t pmap_initialized = FALSE; 264132718Skan 265117395Skan/* 266132718Skan * Statistics. 267117395Skan */ 268132718Skanu_int pmap_pte_valid = 0; 269169689Skanu_int pmap_pte_overflow = 0; 270169689Skanu_int pmap_pte_replacements = 0; 271117395Skanu_int pmap_pvo_entries = 0; 272117395Skanu_int pmap_pvo_enter_calls = 0; 273117395Skanu_int pmap_pvo_remove_calls = 0; 27490075Sobrienu_int pmap_pte_spills = 0; 275117395SkanSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 27690075Sobrien 0, ""); 27790075SobrienSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 27890075Sobrien &pmap_pte_overflow, 0, ""); 279132718SkanSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 28090075Sobrien &pmap_pte_replacements, 0, ""); 28190075SobrienSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 282169689Skan 0, ""); 283117395SkanSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 28490075Sobrien &pmap_pvo_enter_calls, 0, ""); 285169689SkanSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 28690075Sobrien &pmap_pvo_remove_calls, 0, ""); 28790075SobrienSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 28890075Sobrien &pmap_pte_spills, 0, ""); 289117395Skan 290117395Skanstruct pvo_entry *pmap_pvo_zeropage; 29190075Sobrien 29290075Sobrienvm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 29390075Sobrienu_int pmap_rkva_count = 4; 29490075Sobrien 29590075Sobrien/* 296169689Skan * Allocate physical memory for use in pmap_bootstrap. 29790075Sobrien */ 29890075Sobrienstatic vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 29990075Sobrien 30090075Sobrien/* 30190075Sobrien * PTE calls. 30290075Sobrien */ 30390075Sobrienstatic int pmap_pte_insert(u_int, struct pte *); 30490075Sobrien 30590075Sobrien/* 30690075Sobrien * PVO calls. 30790075Sobrien */ 30890075Sobrienstatic int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 30990075Sobrien vm_offset_t, vm_offset_t, u_int, int); 310169689Skanstatic void pmap_pvo_remove(struct pvo_entry *, int); 311169689Skanstatic struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 312169689Skanstatic struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 313169689Skan 314169689Skan/* 315169689Skan * Utility routines. 316169689Skan */ 317169689Skanstatic void * pmap_pvo_allocf(uma_zone_t, int, u_int8_t *, int); 318169689Skanstatic struct pvo_entry *pmap_rkva_alloc(void); 319169689Skanstatic void pmap_pa_map(struct pvo_entry *, vm_offset_t, 32090075Sobrien struct pte *, int *); 32190075Sobrienstatic void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 32290075Sobrienstatic void pmap_syncicache(vm_offset_t, vm_size_t); 32390075Sobrienstatic boolean_t pmap_query_bit(vm_page_t, int); 32490075Sobrienstatic boolean_t pmap_clear_bit(vm_page_t, int); 32590075Sobrienstatic void tlbia(void); 32690075Sobrien 32790075Sobrienstatic __inline int 32890075Sobrienva_to_sr(u_int *sr, vm_offset_t va) 32990075Sobrien{ 33090075Sobrien return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 33190075Sobrien} 33290075Sobrien 33390075Sobrienstatic __inline u_int 33490075Sobrienva_to_pteg(u_int sr, vm_offset_t addr) 33590075Sobrien{ 33690075Sobrien u_int hash; 33790075Sobrien 33890075Sobrien hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 339132718Skan ADDR_PIDX_SHFT); 34090075Sobrien return (hash & pmap_pteg_mask); 34190075Sobrien} 34290075Sobrien 34390075Sobrienstatic __inline struct pvo_head * 34490075Sobrienpa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 345117395Skan{ 346169689Skan struct vm_page *pg; 34790075Sobrien 348169689Skan pg = PHYS_TO_VM_PAGE(pa); 34990075Sobrien 35090075Sobrien if (pg_p != NULL) 35190075Sobrien *pg_p = pg; 35290075Sobrien 35390075Sobrien if (pg == NULL) 354117395Skan return (&pmap_pvo_unmanaged); 35590075Sobrien 356169689Skan return (&pg->md.mdpg_pvoh); 35790075Sobrien} 35890075Sobrien 359169689Skanstatic __inline struct pvo_head * 36090075Sobrienvm_page_to_pvoh(vm_page_t m) 36190075Sobrien{ 362169689Skan 36390075Sobrien return (&m->md.mdpg_pvoh); 36490075Sobrien} 36590075Sobrien 366117395Skanstatic __inline void 367117395Skanpmap_attr_clear(vm_page_t m, int ptebit) 368169689Skan{ 369117395Skan 37090075Sobrien m->md.mdpg_attrs &= ~ptebit; 37190075Sobrien} 37290075Sobrien 37390075Sobrienstatic __inline int 37490075Sobrienpmap_attr_fetch(vm_page_t m) 37590075Sobrien{ 37690075Sobrien 377132718Skan return (m->md.mdpg_attrs); 37890075Sobrien} 37990075Sobrien 38090075Sobrienstatic __inline void 38190075Sobrienpmap_attr_save(vm_page_t m, int ptebit) 38290075Sobrien{ 38390075Sobrien 38490075Sobrien m->md.mdpg_attrs |= ptebit; 38590075Sobrien} 38690075Sobrien 38790075Sobrienstatic __inline int 38890075Sobrienpmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 389132718Skan{ 39090075Sobrien if (pt->pte_hi == pvo_pt->pte_hi) 39190075Sobrien return (1); 39290075Sobrien 39390075Sobrien return (0); 394169689Skan} 39590075Sobrien 39690075Sobrienstatic __inline int 39790075Sobrienpmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 39890075Sobrien{ 39990075Sobrien return (pt->pte_hi & ~PTE_VALID) == 40090075Sobrien (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 40190075Sobrien ((va >> ADDR_API_SHFT) & PTE_API) | which); 40290075Sobrien} 40390075Sobrien 40490075Sobrienstatic __inline void 40590075Sobrienpmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 40690075Sobrien{ 40790075Sobrien /* 40890075Sobrien * Construct a PTE. Default to IMB initially. Valid bit only gets 40990075Sobrien * set when the real pte is set in memory. 41090075Sobrien * 41190075Sobrien * Note: Don't set the valid bit for correct operation of tlb update. 41290075Sobrien */ 41390075Sobrien pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 41490075Sobrien (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 41590075Sobrien pt->pte_lo = pte_lo; 416132718Skan} 41790075Sobrien 418117395Skanstatic __inline void 41990075Sobrienpmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 420117395Skan{ 421169689Skan 42290075Sobrien pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 423117395Skan} 42490075Sobrien 425169689Skanstatic __inline void 42690075Sobrienpmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 42790075Sobrien{ 42890075Sobrien 42990075Sobrien /* 43090075Sobrien * As shown in Section 7.6.3.2.3 43190075Sobrien */ 43290075Sobrien pt->pte_lo &= ~ptebit; 43390075Sobrien TLBIE(va); 43490075Sobrien EIEIO(); 43590075Sobrien TLBSYNC(); 43690075Sobrien SYNC(); 43790075Sobrien} 43890075Sobrien 43990075Sobrienstatic __inline void 44090075Sobrienpmap_pte_set(struct pte *pt, struct pte *pvo_pt) 44190075Sobrien{ 44290075Sobrien 44390075Sobrien pvo_pt->pte_hi |= PTE_VALID; 44490075Sobrien 445117395Skan /* 44690075Sobrien * Update the PTE as defined in section 7.6.3.1. 44790075Sobrien * Note that the REF/CHG bits are from pvo_pt and thus should havce 448117395Skan * been saved so this routine can restore them (if desired). 449117395Skan */ 45090075Sobrien pt->pte_lo = pvo_pt->pte_lo; 45190075Sobrien EIEIO(); 45290075Sobrien pt->pte_hi = pvo_pt->pte_hi; 453169689Skan SYNC(); 45490075Sobrien pmap_pte_valid++; 45590075Sobrien} 45690075Sobrien 45790075Sobrienstatic __inline void 45890075Sobrienpmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 45990075Sobrien{ 460169689Skan 46190075Sobrien pvo_pt->pte_hi &= ~PTE_VALID; 46290075Sobrien 46390075Sobrien /* 46490075Sobrien * Force the reg & chg bits back into the PTEs. 46590075Sobrien */ 46690075Sobrien SYNC(); 467117395Skan 46890075Sobrien /* 46990075Sobrien * Invalidate the pte. 470117395Skan */ 471117395Skan pt->pte_hi &= ~PTE_VALID; 47290075Sobrien 47390075Sobrien SYNC(); 474117395Skan TLBIE(va); 47590075Sobrien EIEIO(); 47690075Sobrien TLBSYNC(); 47790075Sobrien SYNC(); 478169689Skan 479169689Skan /* 480169689Skan * Save the reg & chg bits. 481169689Skan */ 482169689Skan pmap_pte_synch(pt, pvo_pt); 483169689Skan pmap_pte_valid--; 484169689Skan} 485169689Skan 486169689Skanstatic __inline void 487169689Skanpmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 488169689Skan{ 489169689Skan 490169689Skan /* 491169689Skan * Invalidate the PTE 492169689Skan */ 493169689Skan pmap_pte_unset(pt, pvo_pt, va); 494169689Skan pmap_pte_set(pt, pvo_pt); 495169689Skan} 496169689Skan 497169689Skan/* 498169689Skan * Quick sort callout for comparing memory regions. 499169689Skan */ 500169689Skanstatic int mr_cmp(const void *a, const void *b); 501169689Skanstatic int om_cmp(const void *a, const void *b); 502169689Skan 50390075Sobrienstatic int 50490075Sobrienmr_cmp(const void *a, const void *b) 50590075Sobrien{ 50690075Sobrien const struct mem_region *regiona; 507132718Skan const struct mem_region *regionb; 50890075Sobrien 50990075Sobrien regiona = a; 51090075Sobrien regionb = b; 51190075Sobrien if (regiona->mr_start < regionb->mr_start) 51290075Sobrien return (-1); 51390075Sobrien else if (regiona->mr_start > regionb->mr_start) 51490075Sobrien return (1); 51590075Sobrien else 51690075Sobrien return (0); 51790075Sobrien} 51890075Sobrien 51990075Sobrienstatic int 52090075Sobrienom_cmp(const void *a, const void *b) 52190075Sobrien{ 522132718Skan const struct ofw_map *mapa; 52390075Sobrien const struct ofw_map *mapb; 524169689Skan 525169689Skan mapa = a; 52690075Sobrien mapb = b; 52790075Sobrien if (mapa->om_pa < mapb->om_pa) 52890075Sobrien return (-1); 52990075Sobrien else if (mapa->om_pa > mapb->om_pa) 53090075Sobrien return (1); 531169689Skan else 532169689Skan return (0); 53390075Sobrien} 53490075Sobrien 53590075Sobrienvoid 53690075Sobrienpmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 53790075Sobrien{ 53890075Sobrien ihandle_t pmem, mmui; 539132718Skan phandle_t chosen, mmu; 54090075Sobrien int sz; 54190075Sobrien int i, j; 54290075Sobrien vm_size_t size, physsz; 54390075Sobrien vm_offset_t pa, va, off; 54490075Sobrien u_int batl, batu; 54590075Sobrien 54690075Sobrien /* 54790075Sobrien * Use an IBAT and a DBAT to map the bottom segment of memory 54890075Sobrien * where we are. 54990075Sobrien */ 55090075Sobrien batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 55190075Sobrien batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 55290075Sobrien __asm ("mtibatu 0,%0; mtibatl 0,%1; mtdbatu 0,%0; mtdbatl 0,%1" 55390075Sobrien :: "r"(batu), "r"(batl)); 55490075Sobrien#if 0 555169689Skan batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 556169689Skan batl = BATL(0x80000000, BAT_M, BAT_PP_RW); 55790075Sobrien __asm ("mtibatu 1,%0; mtibatl 1,%1; mtdbatu 1,%0; mtdbatl 1,%1" 55890075Sobrien :: "r"(batu), "r"(batl)); 55990075Sobrien#endif 560169689Skan 56190075Sobrien /* 56290075Sobrien * Set the start and end of kva. 563169689Skan */ 56490075Sobrien virtual_avail = VM_MIN_KERNEL_ADDRESS; 565169689Skan virtual_end = VM_MAX_KERNEL_ADDRESS; 56690075Sobrien 567169689Skan if ((pmem = OF_finddevice("/memory")) == -1) 568169689Skan panic("pmap_bootstrap: can't locate memory device"); 569169689Skan if ((sz = OF_getproplen(pmem, "available")) == -1) 570169689Skan panic("pmap_bootstrap: can't get length of available memory"); 57190075Sobrien if (sizeof(phys_avail) < sz) 57290075Sobrien panic("pmap_bootstrap: phys_avail too small"); 57390075Sobrien if (sizeof(regions) < sz) 57490075Sobrien panic("pmap_bootstrap: regions too small"); 57590075Sobrien bzero(regions, sz); 57690075Sobrien if (OF_getprop(pmem, "available", regions, sz) == -1) 57790075Sobrien panic("pmap_bootstrap: can't get available memory"); 57890075Sobrien sz /= sizeof(*regions); 579132718Skan CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 58090075Sobrien qsort(regions, sz, sizeof(*regions), mr_cmp); 581117395Skan phys_avail_count = 0; 58290075Sobrien physsz = 0; 583169689Skan for (i = 0, j = 0; i < sz; i++, j += 2) { 584169689Skan CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 58590075Sobrien regions[i].mr_start + regions[i].mr_size, 58690075Sobrien regions[i].mr_size); 587169689Skan phys_avail[j] = regions[i].mr_start; 588169689Skan phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 589169689Skan phys_avail_count++; 590169689Skan physsz += regions[i].mr_size; 591169689Skan } 592169689Skan physmem = btoc(physsz); 593169689Skan 594169689Skan /* 595169689Skan * Allocate PTEG table. 59690075Sobrien */ 59790075Sobrien#ifdef PTEGCOUNT 59890075Sobrien pmap_pteg_count = PTEGCOUNT; 59990075Sobrien#else 60090075Sobrien pmap_pteg_count = 0x1000; 601132718Skan 60290075Sobrien while (pmap_pteg_count < physmem) 603117395Skan pmap_pteg_count <<= 1; 60490075Sobrien 605117395Skan pmap_pteg_count >>= 1; 606169689Skan#endif /* PTEGCOUNT */ 607117395Skan 60890075Sobrien size = pmap_pteg_count * sizeof(struct pteg); 60990075Sobrien CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 61090075Sobrien size); 61190075Sobrien pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 61290075Sobrien CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 61390075Sobrien bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 61490075Sobrien pmap_pteg_mask = pmap_pteg_count - 1; 61590075Sobrien 61690075Sobrien /* 61790075Sobrien * Allocate pv/overflow lists. 61890075Sobrien */ 61990075Sobrien size = sizeof(struct pvo_head) * pmap_pteg_count; 62090075Sobrien pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 62190075Sobrien PAGE_SIZE); 622132718Skan CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 62390075Sobrien for (i = 0; i < pmap_pteg_count; i++) 624169689Skan LIST_INIT(&pmap_pvo_table[i]); 62590075Sobrien 62690075Sobrien /* 62790075Sobrien * Allocate the message buffer. 62890075Sobrien */ 62990075Sobrien msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 63090075Sobrien 63190075Sobrien /* 63290075Sobrien * Initialise the unmanaged pvo pool. 63390075Sobrien */ 63490075Sobrien pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc(PAGE_SIZE, 0); 635169689Skan pmap_bpvo_pool_index = 0; 636169689Skan pmap_bpvo_pool_count = (int)PAGE_SIZE / sizeof(struct pvo_entry); 63790075Sobrien 63890075Sobrien /* 63990075Sobrien * Make sure kernel vsid is allocated as well as VSID 0. 64090075Sobrien */ 64190075Sobrien pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 64290075Sobrien |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 64390075Sobrien pmap_vsid_bitmap[0] |= 1; 64490075Sobrien 64590075Sobrien /* 64690075Sobrien * Set up the OpenFirmware pmap and add it's mappings. 64790075Sobrien */ 648169689Skan pmap_pinit(&ofw_pmap); 649169689Skan ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 65090075Sobrien if ((chosen = OF_finddevice("/chosen")) == -1) 651169689Skan panic("pmap_bootstrap: can't find /chosen"); 652169689Skan OF_getprop(chosen, "mmu", &mmui, 4); 65390075Sobrien if ((mmu = OF_instance_to_package(mmui)) == -1) 654169689Skan panic("pmap_bootstrap: can't get mmu package"); 65590075Sobrien if ((sz = OF_getproplen(mmu, "translations")) == -1) 656169689Skan panic("pmap_bootstrap: can't get ofw translation count"); 65790075Sobrien if (sizeof(translations) < sz) 65890075Sobrien panic("pmap_bootstrap: translations too small"); 659169689Skan bzero(translations, sz); 660169689Skan if (OF_getprop(mmu, "translations", translations, sz) == -1) 661169689Skan panic("pmap_bootstrap: can't get ofw translations"); 66290075Sobrien CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 663169689Skan qsort(translations, sz, sizeof (*translations), om_cmp); 66490075Sobrien for (i = 0; i < sz; i++) { 66590075Sobrien CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 66690075Sobrien translations[i].om_pa, translations[i].om_va, 667117395Skan translations[i].om_len); 66890075Sobrien 66990075Sobrien /* Drop stuff below something? */ 67090075Sobrien 67190075Sobrien /* Enter the pages? */ 67290075Sobrien for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 673169689Skan struct vm_page m; 67490075Sobrien 67590075Sobrien m.phys_addr = translations[i].om_pa + off; 67690075Sobrien pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 677169689Skan VM_PROT_ALL, 1); 67890075Sobrien } 67990075Sobrien } 68090075Sobrien#ifdef SMP 68190075Sobrien TLBSYNC(); 682169689Skan#endif 683169689Skan 684169689Skan /* 68590075Sobrien * Initialize the kernel pmap (which is statically allocated). 68690075Sobrien */ 68790075Sobrien for (i = 0; i < 16; i++) { 68890075Sobrien kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 68990075Sobrien } 69090075Sobrien kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 69190075Sobrien kernel_pmap->pm_active = ~0; 692169689Skan 69390075Sobrien /* 69490075Sobrien * Allocate a kernel stack with a guard page for thread0 and map it 695169689Skan * into the kernel page map. 69690075Sobrien */ 697169689Skan pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 69890075Sobrien kstack0_phys = pa; 69990075Sobrien kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 70090075Sobrien CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 701169689Skan kstack0); 702169689Skan virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 70390075Sobrien for (i = 0; i < KSTACK_PAGES; i++) { 704169689Skan pa = kstack0_phys + i * PAGE_SIZE; 705169689Skan va = kstack0 + i * PAGE_SIZE; 70690075Sobrien pmap_kenter(va, pa); 70790075Sobrien TLBIE(va); 70890075Sobrien } 70990075Sobrien 71090075Sobrien /* 711169689Skan * Calculate the first and last available physical addresses. 712169689Skan */ 713169689Skan avail_start = phys_avail[0]; 71490075Sobrien for (i = 0; phys_avail[i + 2] != 0; i += 2) 71590075Sobrien ; 716169689Skan avail_end = phys_avail[i + 1]; 71790075Sobrien Maxmem = powerpc_btop(avail_end); 71890075Sobrien 71990075Sobrien /* 720169689Skan * Allocate virtual address space for the message buffer. 721169689Skan */ 72290075Sobrien msgbufp = (struct msgbuf *)virtual_avail; 72390075Sobrien virtual_avail += round_page(MSGBUF_SIZE); 724169689Skan 72590075Sobrien /* 726169689Skan * Initialize hardware. 727169689Skan */ 728169689Skan for (i = 0; i < 16; i++) { 729169689Skan mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 73090075Sobrien } 731169689Skan __asm __volatile ("mtsr %0,%1" 732169689Skan :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 73390075Sobrien __asm __volatile ("sync; mtsdr1 %0; isync" 734169689Skan :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 73590075Sobrien tlbia(); 736169689Skan 737169689Skan pmap_bootstrapped++; 73890075Sobrien} 73990075Sobrien 74090075Sobrien/* 741169689Skan * Activate a user pmap. The pmap must be activated before it's address 74290075Sobrien * space can be accessed in any way. 74390075Sobrien */ 744169689Skanvoid 745169689Skanpmap_activate(struct thread *td) 746169689Skan{ 747169689Skan pmap_t pm, pmr; 748169689Skan 749169689Skan /* 750169689Skan * Load all the data we need up front to encourasge the compiler to 751169689Skan * not issue any loads while we have interrupts disabled below. 752169689Skan */ 753169689Skan pm = &td->td_proc->p_vmspace->vm_pmap; 754169689Skan 75590075Sobrien KASSERT(pm->pm_active == 0, ("pmap_activate: pmap already active?")); 756117395Skan 75790075Sobrien if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) 75890075Sobrien pmr = pm; 75990075Sobrien 76090075Sobrien pm->pm_active |= PCPU_GET(cpumask); 76190075Sobrien PCPU_SET(curpmap, pmr); 762169689Skan} 76390075Sobrien 76490075Sobrienvoid 76590075Sobrienpmap_deactivate(struct thread *td) 766169689Skan{ 76790075Sobrien pmap_t pm; 76890075Sobrien 76990075Sobrien pm = &td->td_proc->p_vmspace->vm_pmap; 77090075Sobrien pm->pm_active &= ~(PCPU_GET(cpumask)); 771169689Skan PCPU_SET(curpmap, NULL); 772169689Skan} 773169689Skan 77490075Sobrienvm_offset_t 77590075Sobrienpmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 77690075Sobrien{ 77790075Sobrien 77890075Sobrien return (va); 77990075Sobrien} 78090075Sobrien 781169689Skanvoid 782169689Skanpmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) 78390075Sobrien{ 784169689Skan struct pvo_entry *pvo; 78590075Sobrien 786169689Skan pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 78790075Sobrien 78890075Sobrien if (pvo != NULL) { 789169689Skan if (wired) { 790169689Skan if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 79190075Sobrien pm->pm_stats.wired_count++; 79290075Sobrien pvo->pvo_vaddr |= PVO_WIRED; 793169689Skan } else { 79490075Sobrien if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 79590075Sobrien pm->pm_stats.wired_count--; 79690075Sobrien pvo->pvo_vaddr &= ~PVO_WIRED; 797169689Skan } 798169689Skan } 79990075Sobrien} 80090075Sobrien 801169689Skanvoid 80290075Sobrienpmap_clear_modify(vm_page_t m) 803169689Skan{ 804169689Skan 80590075Sobrien if (m->flags * PG_FICTITIOUS) 80690075Sobrien return; 80790075Sobrien pmap_clear_bit(m, PTE_CHG); 80890075Sobrien} 80990075Sobrien 81090075Sobrienvoid 81190075Sobrienpmap_collect(void) 81290075Sobrien{ 813169689Skan TODO; 81490075Sobrien} 815169689Skan 816169689Skanvoid 817169689Skanpmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 818169689Skan vm_size_t len, vm_offset_t src_addr) 819169689Skan{ 820169689Skan TODO; 821169689Skan} 82290075Sobrien 823169689Skanvoid 824169689Skanpmap_copy_page(vm_page_t src, vm_page_t dst) 825169689Skan{ 826169689Skan TODO; 82790075Sobrien} 828169689Skan 82990075Sobrien/* 83090075Sobrien * Zero a page of physical memory by temporarily mapping it into the tlb. 83190075Sobrien */ 83290075Sobrienvoid 83390075Sobrienpmap_zero_page(vm_page_t m) 83490075Sobrien{ 83590075Sobrien vm_offset_t pa = VM_PAGE_TO_PHYS(m); 83690075Sobrien caddr_t va; 83790075Sobrien int i; 83890075Sobrien 83990075Sobrien if (pa < SEGMENT_LENGTH) { 84090075Sobrien va = (caddr_t) pa; 84190075Sobrien } else if (pmap_initialized) { 84290075Sobrien if (pmap_pvo_zeropage == NULL) 84390075Sobrien pmap_pvo_zeropage = pmap_rkva_alloc(); 84490075Sobrien pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 84590075Sobrien va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 84690075Sobrien } else { 84790075Sobrien panic("pmap_zero_page: can't zero pa %#x", pa); 84890075Sobrien } 84990075Sobrien 85090075Sobrien bzero(va, PAGE_SIZE); 85190075Sobrien 85290075Sobrien for (i = PAGE_SIZE / CACHELINESIZE; i > 0; i--) { 85390075Sobrien __asm __volatile("dcbz 0,%0" :: "r"(va)); 85490075Sobrien va += CACHELINESIZE; 85590075Sobrien } 85690075Sobrien 85790075Sobrien if (pa >= SEGMENT_LENGTH) 858117395Skan pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 85990075Sobrien} 86090075Sobrien 86190075Sobrienvoid 862132718Skanpmap_zero_page_area(vm_page_t m, int off, int size) 86390075Sobrien{ 86490075Sobrien TODO; 865169689Skan} 86690075Sobrien 86790075Sobrien/* 86890075Sobrien * Map the given physical page at the specified virtual address in the 869169689Skan * target pmap with the protection requested. If specified the page 87090075Sobrien * will be wired down. 87190075Sobrien */ 87290075Sobrienvoid 87390075Sobrienpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 87490075Sobrien boolean_t wired) 87590075Sobrien{ 87690075Sobrien struct pvo_head *pvo_head; 87790075Sobrien uma_zone_t zone; 87890075Sobrien vm_page_t pg; 87990075Sobrien u_int pte_lo, pvo_flags, was_exec, i; 88090075Sobrien int error; 88190075Sobrien 882132718Skan if (!pmap_initialized) { 88390075Sobrien pvo_head = &pmap_pvo_kunmanaged; 88490075Sobrien zone = pmap_upvo_zone; 885169689Skan pvo_flags = 0; 88690075Sobrien pg = NULL; 88790075Sobrien was_exec = PTE_EXEC; 88890075Sobrien } else { 88990075Sobrien pvo_head = pa_to_pvoh(VM_PAGE_TO_PHYS(m), &pg); 89090075Sobrien zone = pmap_mpvo_zone; 89190075Sobrien pvo_flags = PVO_MANAGED; 89290075Sobrien was_exec = 0; 89390075Sobrien } 894169689Skan 895169689Skan /* 89690075Sobrien * If this is a managed page, and it's the first reference to the page, 89790075Sobrien * clear the execness of the page. Otherwise fetch the execness. 89890075Sobrien */ 899169689Skan if (pg != NULL) { 90090075Sobrien if (LIST_EMPTY(pvo_head)) { 90190075Sobrien pmap_attr_clear(pg, PTE_EXEC); 90290075Sobrien } else { 90390075Sobrien was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 90490075Sobrien } 90590075Sobrien } 906169689Skan 907169689Skan 90890075Sobrien /* 90990075Sobrien * Assume the page is cache inhibited and access is guarded unless 91090075Sobrien * it's in our available memory array. 91190075Sobrien */ 912169689Skan pte_lo = PTE_I | PTE_G; 913169689Skan for (i = 0; i < (phys_avail_count * 2); i += 2) { 914117395Skan if (VM_PAGE_TO_PHYS(m) >= phys_avail[i] && 91590075Sobrien VM_PAGE_TO_PHYS(m) <= phys_avail[i + 1]) { 91690075Sobrien pte_lo &= ~(PTE_I | PTE_G); 91790075Sobrien break; 91890075Sobrien } 91990075Sobrien } 92090075Sobrien 92190075Sobrien if (prot & VM_PROT_WRITE) 92290075Sobrien pte_lo |= PTE_BW; 923132718Skan else 92490075Sobrien pte_lo |= PTE_BR; 92590075Sobrien 92690075Sobrien pvo_flags |= (prot & VM_PROT_EXECUTE); 92790075Sobrien 928117395Skan if (wired) 929117395Skan pvo_flags |= PVO_WIRED; 930117395Skan 931117395Skan error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 932117395Skan pte_lo, pvo_flags); 933132718Skan 934132718Skan /* 935132718Skan * Flush the real page from the instruction cache if this page is 936117395Skan * mapped executable and cacheable and was not previously mapped (or 937117395Skan * was not mapped executable). 938117395Skan */ 939169689Skan if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 940117395Skan (pte_lo & PTE_I) == 0 && was_exec == 0) { 941169689Skan /* 942169689Skan * Flush the real memory from the cache. 943169689Skan */ 944169689Skan pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 945169689Skan if (pg != NULL) 946169689Skan pmap_attr_save(pg, PTE_EXEC); 947169689Skan } 948169689Skan} 949169689Skan 950169689Skanvm_offset_t 951169689Skanpmap_extract(pmap_t pm, vm_offset_t va) 952169689Skan{ 953169689Skan struct pvo_entry *pvo; 954169689Skan 955169689Skan pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 956169689Skan 957169689Skan if (pvo != NULL) { 958169689Skan return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 959169689Skan } 960169689Skan 961169689Skan return (0); 962169689Skan} 963169689Skan 964169689Skan/* 965169689Skan * Grow the number of kernel page table entries. Unneeded. 966169689Skan */ 967169689Skanvoid 968169689Skanpmap_growkernel(vm_offset_t addr) 969169689Skan{ 970169689Skan} 971169689Skan 972169689Skanvoid 973169689Skanpmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 974169689Skan{ 975169689Skan 976117395Skan CTR0(KTR_PMAP, "pmap_init"); 977169689Skan} 978117395Skan 979117395Skanvoid 980117395Skanpmap_init2(void) 981169689Skan{ 982117395Skan 983117395Skan CTR0(KTR_PMAP, "pmap_init2"); 984169689Skan 985169689Skan pmap_pvo_obj = vm_object_allocate(OBJT_PHYS, 16); 986169689Skan pmap_pvo_count = 0; 987117395Skan pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 988169689Skan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 989169689Skan uma_zone_set_allocf(pmap_upvo_zone, pmap_pvo_allocf); 990169689Skan pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 991117395Skan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); 992169689Skan uma_zone_set_allocf(pmap_mpvo_zone, pmap_pvo_allocf); 993117395Skan pmap_initialized = TRUE; 994169689Skan} 995169689Skan 996169689Skanboolean_t 997117395Skanpmap_is_modified(vm_page_t m) 998169689Skan{ 999169689Skan 1000169689Skan if (m->flags & PG_FICTITIOUS) 1001117395Skan return (FALSE); 1002117395Skan 1003117395Skan return (pmap_query_bit(m, PTE_CHG)); 1004117395Skan} 1005117395Skan 1006169689Skanvoid 1007117395Skanpmap_clear_reference(vm_page_t m) 1008169689Skan{ 1009169689Skan TODO; 1010169689Skan} 1011117395Skan 1012169689Skan/* 1013169689Skan * pmap_ts_referenced: 1014169689Skan * 1015169689Skan * Return a count of reference bits for a page, clearing those bits. 1016169689Skan * It is not necessary for every reference bit to be cleared, but it 1017169689Skan * is necessary that 0 only be returned when there are truly no 1018169689Skan * reference bits set. 1019169689Skan * 1020169689Skan * XXX: The exact number of bits to check and clear is a matter that 1021169689Skan * should be tested and standardized at some point in the future for 1022169689Skan * optimal aging of shared pages. 1023169689Skan */ 1024169689Skan 1025169689Skanint 1026169689Skanpmap_ts_referenced(vm_page_t m) 1027169689Skan{ 1028169689Skan TODO; 1029169689Skan return (0); 1030169689Skan} 1031169689Skan 1032169689Skan/* 1033169689Skan * Map a wired page into kernel virtual address space. 1034169689Skan */ 1035169689Skanvoid 1036169689Skanpmap_kenter(vm_offset_t va, vm_offset_t pa) 1037169689Skan{ 1038169689Skan u_int pte_lo; 1039169689Skan int error; 1040169689Skan int i; 1041169689Skan 1042169689Skan#if 0 1043169689Skan if (va < VM_MIN_KERNEL_ADDRESS) 1044169689Skan panic("pmap_kenter: attempt to enter non-kernel address %#x", 1045169689Skan va); 1046169689Skan#endif 1047169689Skan 1048169689Skan pte_lo = PTE_I | PTE_G | PTE_BW; 1049169689Skan for (i = 0; phys_avail[i + 2] != 0; i += 2) { 1050169689Skan if (pa >= phys_avail[i] && pa < phys_avail[i + 1]) { 1051169689Skan pte_lo &= ~(PTE_I | PTE_G); 1052169689Skan break; 1053169689Skan } 1054169689Skan } 1055169689Skan 1056169689Skan error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 1057169689Skan &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1058169689Skan 1059169689Skan if (error != 0 && error != ENOENT) 1060169689Skan panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 1061169689Skan pa, error); 1062169689Skan 1063169689Skan /* 1064169689Skan * Flush the real memory from the instruction cache. 1065169689Skan */ 1066169689Skan if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1067169689Skan pmap_syncicache(pa, PAGE_SIZE); 1068169689Skan } 1069169689Skan} 1070169689Skan 1071169689Skan/* 1072169689Skan * Extract the physical page address associated with the given kernel virtual 1073169689Skan * address. 1074169689Skan */ 1075169689Skanvm_offset_t 1076169689Skanpmap_kextract(vm_offset_t va) 1077169689Skan{ 1078169689Skan struct pvo_entry *pvo; 1079169689Skan 1080 pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1081 if (pvo == NULL) { 1082 return (0); 1083 } 1084 1085 return ((pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1086} 1087 1088/* 1089 * Remove a wired page from kernel virtual address space. 1090 */ 1091void 1092pmap_kremove(vm_offset_t va) 1093{ 1094 1095 pmap_remove(kernel_pmap, va, roundup(va, PAGE_SIZE)); 1096} 1097 1098/* 1099 * Map a range of physical addresses into kernel virtual address space. 1100 * 1101 * The value passed in *virt is a suggested virtual address for the mapping. 1102 * Architectures which can support a direct-mapped physical to virtual region 1103 * can return the appropriate address within that region, leaving '*virt' 1104 * unchanged. We cannot and therefore do not; *virt is updated with the 1105 * first usable address after the mapped region. 1106 */ 1107vm_offset_t 1108pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 1109{ 1110 vm_offset_t sva, va; 1111 1112 sva = *virt; 1113 va = sva; 1114 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1115 pmap_kenter(va, pa_start); 1116 *virt = va; 1117 return (sva); 1118} 1119 1120int 1121pmap_mincore(pmap_t pmap, vm_offset_t addr) 1122{ 1123 TODO; 1124 return (0); 1125} 1126 1127/* 1128 * Create the uarea for a new process. 1129 * This routine directly affects the fork perf for a process. 1130 */ 1131void 1132pmap_new_proc(struct proc *p) 1133{ 1134 vm_object_t upobj; 1135 vm_offset_t up; 1136 vm_page_t m; 1137 u_int i; 1138 1139 /* 1140 * Allocate the object for the upages. 1141 */ 1142 upobj = p->p_upages_obj; 1143 if (upobj == NULL) { 1144 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 1145 p->p_upages_obj = upobj; 1146 } 1147 1148 /* 1149 * Get a kernel virtual address for the uarea for this process. 1150 */ 1151 up = (vm_offset_t)p->p_uarea; 1152 if (up == 0) { 1153 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 1154 if (up == 0) 1155 panic("pmap_new_proc: upage allocation failed"); 1156 p->p_uarea = (struct user *)up; 1157 } 1158 1159 for (i = 0; i < UAREA_PAGES; i++) { 1160 /* 1161 * Get a uarea page. 1162 */ 1163 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1164 1165 /* 1166 * Wire the page. 1167 */ 1168 m->wire_count++; 1169 1170 /* 1171 * Enter the page into the kernel address space. 1172 */ 1173 pmap_kenter(up + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1174 1175 vm_page_wakeup(m); 1176 vm_page_flag_clear(m, PG_ZERO); 1177 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1178 m->valid = VM_PAGE_BITS_ALL; 1179 } 1180} 1181 1182void 1183pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1184 vm_pindex_t pindex, vm_size_t size, int limit) 1185{ 1186 1187 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1188 ("pmap_remove_pages: non current pmap")); 1189 /* XXX */ 1190} 1191 1192/* 1193 * Lower the permission for all mappings to a given page. 1194 */ 1195void 1196pmap_page_protect(vm_page_t m, vm_prot_t prot) 1197{ 1198 struct pvo_head *pvo_head; 1199 struct pvo_entry *pvo, *next_pvo; 1200 struct pte *pt; 1201 1202 /* 1203 * Since the routine only downgrades protection, if the 1204 * maximal protection is desired, there isn't any change 1205 * to be made. 1206 */ 1207 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 1208 (VM_PROT_READ|VM_PROT_WRITE)) 1209 return; 1210 1211 pvo_head = vm_page_to_pvoh(m); 1212 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1213 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1214 PMAP_PVO_CHECK(pvo); /* sanity check */ 1215 1216 /* 1217 * Downgrading to no mapping at all, we just remove the entry. 1218 */ 1219 if ((prot & VM_PROT_READ) == 0) { 1220 pmap_pvo_remove(pvo, -1); 1221 continue; 1222 } 1223 1224 /* 1225 * If EXEC permission is being revoked, just clear the flag 1226 * in the PVO. 1227 */ 1228 if ((prot & VM_PROT_EXECUTE) == 0) 1229 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1230 1231 /* 1232 * If this entry is already RO, don't diddle with the page 1233 * table. 1234 */ 1235 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1236 PMAP_PVO_CHECK(pvo); 1237 continue; 1238 } 1239 1240 /* 1241 * Grab the PTE before we diddle the bits so pvo_to_pte can 1242 * verify the pte contents are as expected. 1243 */ 1244 pt = pmap_pvo_to_pte(pvo, -1); 1245 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1246 pvo->pvo_pte.pte_lo |= PTE_BR; 1247 if (pt != NULL) 1248 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1249 PMAP_PVO_CHECK(pvo); /* sanity check */ 1250 } 1251} 1252 1253/* 1254 * Make the specified page pageable (or not). Unneeded. 1255 */ 1256void 1257pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1258 boolean_t pageable) 1259{ 1260} 1261 1262/* 1263 * Returns true if the pmap's pv is one of the first 1264 * 16 pvs linked to from this page. This count may 1265 * be changed upwards or downwards in the future; it 1266 * is only necessary that true be returned for a small 1267 * subset of pmaps for proper page aging. 1268 */ 1269boolean_t 1270pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 1271{ 1272 TODO; 1273 return (0); 1274} 1275 1276static u_int pmap_vsidcontext; 1277 1278void 1279pmap_pinit(pmap_t pmap) 1280{ 1281 int i, mask; 1282 u_int entropy; 1283 1284 entropy = 0; 1285 __asm __volatile("mftb %0" : "=r"(entropy)); 1286 1287 /* 1288 * Allocate some segment registers for this pmap. 1289 */ 1290 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1291 u_int hash, n; 1292 1293 /* 1294 * Create a new value by mutiplying by a prime and adding in 1295 * entropy from the timebase register. This is to make the 1296 * VSID more random so that the PT hash function collides 1297 * less often. (Note that the prime casues gcc to do shifts 1298 * instead of a multiply.) 1299 */ 1300 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1301 hash = pmap_vsidcontext & (NPMAPS - 1); 1302 if (hash == 0) /* 0 is special, avoid it */ 1303 continue; 1304 n = hash >> 5; 1305 mask = 1 << (hash & (VSID_NBPW - 1)); 1306 hash = (pmap_vsidcontext & 0xfffff); 1307 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1308 /* anything free in this bucket? */ 1309 if (pmap_vsid_bitmap[n] == 0xffffffff) { 1310 entropy = (pmap_vsidcontext >> 20); 1311 continue; 1312 } 1313 i = ffs(~pmap_vsid_bitmap[i]) - 1; 1314 mask = 1 << i; 1315 hash &= 0xfffff & ~(VSID_NBPW - 1); 1316 hash |= i; 1317 } 1318 pmap_vsid_bitmap[n] |= mask; 1319 for (i = 0; i < 16; i++) 1320 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1321 return; 1322 } 1323 1324 panic("pmap_pinit: out of segments"); 1325} 1326 1327/* 1328 * Initialize the pmap associated with process 0. 1329 */ 1330void 1331pmap_pinit0(pmap_t pm) 1332{ 1333 1334 pmap_pinit(pm); 1335 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1336} 1337 1338void 1339pmap_pinit2(pmap_t pmap) 1340{ 1341 /* XXX: Remove this stub when no longer called */ 1342} 1343 1344void 1345pmap_prefault(pmap_t pm, vm_offset_t va, vm_map_entry_t entry) 1346{ 1347 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1348 ("pmap_prefault: non current pmap")); 1349 /* XXX */ 1350} 1351 1352/* 1353 * Set the physical protection on the specified range of this map as requested. 1354 */ 1355void 1356pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1357{ 1358 struct pvo_entry *pvo; 1359 struct pte *pt; 1360 int pteidx; 1361 1362 CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1363 eva, prot); 1364 1365 1366 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1367 ("pmap_protect: non current pmap")); 1368 1369 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1370 pmap_remove(pm, sva, eva); 1371 return; 1372 } 1373 1374 for (; sva < eva; sva += PAGE_SIZE) { 1375 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1376 if (pvo == NULL) 1377 continue; 1378 1379 if ((prot & VM_PROT_EXECUTE) == 0) 1380 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1381 1382 /* 1383 * Grab the PTE pointer before we diddle with the cached PTE 1384 * copy. 1385 */ 1386 pt = pmap_pvo_to_pte(pvo, pteidx); 1387 /* 1388 * Change the protection of the page. 1389 */ 1390 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1391 pvo->pvo_pte.pte_lo |= PTE_BR; 1392 1393 /* 1394 * If the PVO is in the page table, update that pte as well. 1395 */ 1396 if (pt != NULL) 1397 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1398 } 1399} 1400 1401vm_offset_t 1402pmap_phys_address(int ppn) 1403{ 1404 TODO; 1405 return (0); 1406} 1407 1408/* 1409 * Map a list of wired pages into kernel virtual address space. This is 1410 * intended for temporary mappings which do not need page modification or 1411 * references recorded. Existing mappings in the region are overwritten. 1412 */ 1413void 1414pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1415{ 1416 int i; 1417 1418 for (i = 0; i < count; i++, va += PAGE_SIZE) 1419 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 1420} 1421 1422/* 1423 * Remove page mappings from kernel virtual address space. Intended for 1424 * temporary mappings entered by pmap_qenter. 1425 */ 1426void 1427pmap_qremove(vm_offset_t va, int count) 1428{ 1429 int i; 1430 1431 for (i = 0; i < count; i++, va += PAGE_SIZE) 1432 pmap_kremove(va); 1433} 1434 1435void 1436pmap_release(pmap_t pmap) 1437{ 1438 TODO; 1439} 1440 1441/* 1442 * Remove the given range of addresses from the specified map. 1443 */ 1444void 1445pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1446{ 1447 struct pvo_entry *pvo; 1448 int pteidx; 1449 1450 for (; sva < eva; sva += PAGE_SIZE) { 1451 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1452 if (pvo != NULL) { 1453 pmap_pvo_remove(pvo, pteidx); 1454 } 1455 } 1456} 1457 1458/* 1459 * Remove all pages from specified address space, this aids process exit 1460 * speeds. This is much faster than pmap_remove in the case of running down 1461 * an entire address space. Only works for the current pmap. 1462 */ 1463void 1464pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1465{ 1466 1467 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1468 ("pmap_remove_pages: non current pmap")); 1469 pmap_remove(pm, sva, eva); 1470} 1471 1472void 1473pmap_swapin_proc(struct proc *p) 1474{ 1475 TODO; 1476} 1477 1478void 1479pmap_swapout_proc(struct proc *p) 1480{ 1481 TODO; 1482} 1483 1484/* 1485 * Create the kernel stack and pcb for a new thread. 1486 * This routine directly affects the fork perf for a process and 1487 * create performance for a thread. 1488 */ 1489void 1490pmap_new_thread(struct thread *td) 1491{ 1492 vm_object_t ksobj; 1493 vm_offset_t ks; 1494 vm_page_t m; 1495 u_int i; 1496 1497 /* 1498 * Allocate object for the kstack. 1499 */ 1500 ksobj = td->td_kstack_obj; 1501 if (ksobj == NULL) { 1502 ksobj = vm_object_allocate(OBJT_DEFAULT, KSTACK_PAGES); 1503 td->td_kstack_obj = ksobj; 1504 } 1505 1506 /* 1507 * Get a kernel virtual address for the kstack for this thread. 1508 */ 1509 ks = td->td_kstack; 1510 if (ks == 0) { 1511 ks = kmem_alloc_nofault(kernel_map, 1512 (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE); 1513 if (ks == 0) 1514 panic("pmap_new_thread: kstack allocation failed"); 1515 TLBIE(ks); 1516 ks += KSTACK_GUARD_PAGES * PAGE_SIZE; 1517 td->td_kstack = ks; 1518 } 1519 1520 for (i = 0; i < KSTACK_PAGES; i++) { 1521 /* 1522 * Get a kernel stack page. 1523 */ 1524 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1525 1526 /* 1527 * Wire the page. 1528 */ 1529 m->wire_count++; 1530 1531 /* 1532 * Enter the page into the kernel address space. 1533 */ 1534 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1535 1536 vm_page_wakeup(m); 1537 vm_page_flag_clear(m, PG_ZERO); 1538 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1539 m->valid = VM_PAGE_BITS_ALL; 1540 } 1541} 1542 1543void 1544pmap_dispose_proc(struct proc *p) 1545{ 1546 TODO; 1547} 1548 1549void 1550pmap_dispose_thread(struct thread *td) 1551{ 1552 TODO; 1553} 1554 1555void 1556pmap_swapin_thread(struct thread *td) 1557{ 1558 TODO; 1559} 1560 1561void 1562pmap_swapout_thread(struct thread *td) 1563{ 1564 TODO; 1565} 1566 1567/* 1568 * Allocate a physical page of memory directly from the phys_avail map. 1569 * Can only be called from pmap_bootstrap before avail start and end are 1570 * calculated. 1571 */ 1572static vm_offset_t 1573pmap_bootstrap_alloc(vm_size_t size, u_int align) 1574{ 1575 vm_offset_t s, e; 1576 int i, j; 1577 1578 size = round_page(size); 1579 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1580 if (align != 0) 1581 s = (phys_avail[i] + align - 1) & ~(align - 1); 1582 else 1583 s = phys_avail[i]; 1584 e = s + size; 1585 1586 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1587 continue; 1588 1589 if (s == phys_avail[i]) { 1590 phys_avail[i] += size; 1591 } else if (e == phys_avail[i + 1]) { 1592 phys_avail[i + 1] -= size; 1593 } else { 1594 for (j = phys_avail_count * 2; j > i; j -= 2) { 1595 phys_avail[j] = phys_avail[j - 2]; 1596 phys_avail[j + 1] = phys_avail[j - 1]; 1597 } 1598 1599 phys_avail[i + 3] = phys_avail[i + 1]; 1600 phys_avail[i + 1] = s; 1601 phys_avail[i + 2] = e; 1602 phys_avail_count++; 1603 } 1604 1605 return (s); 1606 } 1607 panic("pmap_bootstrap_alloc: could not allocate memory"); 1608} 1609 1610/* 1611 * Return an unmapped pvo for a kernel virtual address. 1612 * Used by pmap functions that operate on physical pages. 1613 */ 1614static struct pvo_entry * 1615pmap_rkva_alloc(void) 1616{ 1617 struct pvo_entry *pvo; 1618 struct pte *pt; 1619 vm_offset_t kva; 1620 int pteidx; 1621 1622 if (pmap_rkva_count == 0) 1623 panic("pmap_rkva_alloc: no more reserved KVAs"); 1624 1625 kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 1626 pmap_kenter(kva, 0); 1627 1628 pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 1629 1630 if (pvo == NULL) 1631 panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 1632 1633 pt = pmap_pvo_to_pte(pvo, pteidx); 1634 1635 if (pt == NULL) 1636 panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 1637 1638 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1639 PVO_PTEGIDX_CLR(pvo); 1640 1641 pmap_pte_overflow++; 1642 1643 return (pvo); 1644} 1645 1646static void 1647pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1648 int *depth_p) 1649{ 1650 struct pte *pt; 1651 1652 /* 1653 * If this pvo already has a valid pte, we need to save it so it can 1654 * be restored later. We then just reload the new PTE over the old 1655 * slot. 1656 */ 1657 if (saved_pt != NULL) { 1658 pt = pmap_pvo_to_pte(pvo, -1); 1659 1660 if (pt != NULL) { 1661 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1662 PVO_PTEGIDX_CLR(pvo); 1663 pmap_pte_overflow++; 1664 } 1665 1666 *saved_pt = pvo->pvo_pte; 1667 1668 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1669 } 1670 1671 pvo->pvo_pte.pte_lo |= pa; 1672 1673 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1674 panic("pmap_pa_map: could not spill pvo %p", pvo); 1675 1676 if (depth_p != NULL) 1677 (*depth_p)++; 1678} 1679 1680static void 1681pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 1682{ 1683 struct pte *pt; 1684 1685 pt = pmap_pvo_to_pte(pvo, -1); 1686 1687 if (pt != NULL) { 1688 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1689 PVO_PTEGIDX_CLR(pvo); 1690 pmap_pte_overflow++; 1691 } 1692 1693 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1694 1695 /* 1696 * If there is a saved PTE and it's valid, restore it and return. 1697 */ 1698 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1699 if (depth_p != NULL && --(*depth_p) == 0) 1700 panic("pmap_pa_unmap: restoring but depth == 0"); 1701 1702 pvo->pvo_pte = *saved_pt; 1703 1704 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1705 panic("pmap_pa_unmap: could not spill pvo %p", pvo); 1706 } 1707} 1708 1709static void 1710pmap_syncicache(vm_offset_t pa, vm_size_t len) 1711{ 1712 __syncicache((void *)pa, len); 1713} 1714 1715static void 1716tlbia(void) 1717{ 1718 caddr_t i; 1719 1720 SYNC(); 1721 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1722 TLBIE(i); 1723 EIEIO(); 1724 } 1725 TLBSYNC(); 1726 SYNC(); 1727} 1728 1729static int 1730pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1731 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1732{ 1733 struct pvo_entry *pvo; 1734 u_int sr; 1735 int first; 1736 u_int ptegidx; 1737 int i; 1738 1739 pmap_pvo_enter_calls++; 1740 first = 0; 1741 1742 /* 1743 * Compute the PTE Group index. 1744 */ 1745 va &= ~ADDR_POFF; 1746 sr = va_to_sr(pm->pm_sr, va); 1747 ptegidx = va_to_pteg(sr, va); 1748 1749 /* 1750 * Remove any existing mapping for this page. Reuse the pvo entry if 1751 * there is a mapping. 1752 */ 1753 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1754 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1755 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 1756 (pvo->pvo_pte.pte_lo & PTE_PP) == 1757 (pte_lo & PTE_PP)) { 1758 return (0); 1759 } 1760 pmap_pvo_remove(pvo, -1); 1761 break; 1762 } 1763 } 1764 1765 /* 1766 * If we aren't overwriting a mapping, try to allocate. 1767 */ 1768 if (pmap_initialized) { 1769 pvo = uma_zalloc(zone, M_NOWAIT); 1770 } else { 1771 if (pmap_bpvo_pool_index >= pmap_bpvo_pool_count) { 1772 pmap_bpvo_pool = (struct pvo_entry *) 1773 pmap_bootstrap_alloc(PAGE_SIZE, 0); 1774 pmap_bpvo_pool_index = 0; 1775 } 1776 pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 1777 pmap_bpvo_pool_index++; 1778 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1779 } 1780 1781 if (pvo == NULL) { 1782 return (ENOMEM); 1783 } 1784 1785 pmap_pvo_entries++; 1786 pvo->pvo_vaddr = va; 1787 pvo->pvo_pmap = pm; 1788 LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1789 pvo->pvo_vaddr &= ~ADDR_POFF; 1790 if (flags & VM_PROT_EXECUTE) 1791 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1792 if (flags & PVO_WIRED) 1793 pvo->pvo_vaddr |= PVO_WIRED; 1794 if (pvo_head != &pmap_pvo_kunmanaged) 1795 pvo->pvo_vaddr |= PVO_MANAGED; 1796 pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1797 1798 /* 1799 * Remember if the list was empty and therefore will be the first 1800 * item. 1801 */ 1802 if (LIST_FIRST(pvo_head) == NULL) 1803 first = 1; 1804 1805 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1806 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1807 pvo->pvo_pmap->pm_stats.wired_count++; 1808 pvo->pvo_pmap->pm_stats.resident_count++; 1809 1810 /* 1811 * We hope this succeeds but it isn't required. 1812 */ 1813 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1814 if (i >= 0) { 1815 PVO_PTEGIDX_SET(pvo, i); 1816 } else { 1817 panic("pmap_pvo_enter: overflow"); 1818 pmap_pte_overflow++; 1819 } 1820 1821 return (first ? ENOENT : 0); 1822} 1823 1824static void 1825pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 1826{ 1827 struct pte *pt; 1828 1829 /* 1830 * If there is an active pte entry, we need to deactivate it (and 1831 * save the ref & cfg bits). 1832 */ 1833 pt = pmap_pvo_to_pte(pvo, pteidx); 1834 if (pt != NULL) { 1835 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1836 PVO_PTEGIDX_CLR(pvo); 1837 } else { 1838 pmap_pte_overflow--; 1839 } 1840 1841 /* 1842 * Update our statistics. 1843 */ 1844 pvo->pvo_pmap->pm_stats.resident_count--; 1845 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1846 pvo->pvo_pmap->pm_stats.wired_count--; 1847 1848 /* 1849 * Save the REF/CHG bits into their cache if the page is managed. 1850 */ 1851 if (pvo->pvo_vaddr & PVO_MANAGED) { 1852 struct vm_page *pg; 1853 1854 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1855 if (pg != NULL) { 1856 pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 1857 (PTE_REF | PTE_CHG)); 1858 } 1859 } 1860 1861 /* 1862 * Remove this PVO from the PV list. 1863 */ 1864 LIST_REMOVE(pvo, pvo_vlink); 1865 1866 /* 1867 * Remove this from the overflow list and return it to the pool 1868 * if we aren't going to reuse it. 1869 */ 1870 LIST_REMOVE(pvo, pvo_olink); 1871 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 1872 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 1873 pmap_upvo_zone, pvo); 1874 pmap_pvo_entries--; 1875 pmap_pvo_remove_calls++; 1876} 1877 1878static __inline int 1879pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1880{ 1881 int pteidx; 1882 1883 /* 1884 * We can find the actual pte entry without searching by grabbing 1885 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1886 * noticing the HID bit. 1887 */ 1888 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1889 if (pvo->pvo_pte.pte_hi & PTE_HID) 1890 pteidx ^= pmap_pteg_mask * 8; 1891 1892 return (pteidx); 1893} 1894 1895static struct pvo_entry * 1896pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 1897{ 1898 struct pvo_entry *pvo; 1899 int ptegidx; 1900 u_int sr; 1901 1902 va &= ~ADDR_POFF; 1903 sr = va_to_sr(pm->pm_sr, va); 1904 ptegidx = va_to_pteg(sr, va); 1905 1906 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1907 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1908 if (pteidx_p) 1909 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1910 return (pvo); 1911 } 1912 } 1913 1914 return (NULL); 1915} 1916 1917static struct pte * 1918pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1919{ 1920 struct pte *pt; 1921 1922 /* 1923 * If we haven't been supplied the ptegidx, calculate it. 1924 */ 1925 if (pteidx == -1) { 1926 int ptegidx; 1927 u_int sr; 1928 1929 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 1930 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 1931 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 1932 } 1933 1934 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 1935 1936 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 1937 panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 1938 "valid pte index", pvo); 1939 } 1940 1941 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 1942 panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 1943 "pvo but no valid pte", pvo); 1944 } 1945 1946 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 1947 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 1948 panic("pmap_pvo_to_pte: pvo %p has valid pte in " 1949 "pmap_pteg_table %p but invalid in pvo", pvo, pt); 1950 } 1951 1952 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 1953 != 0) { 1954 panic("pmap_pvo_to_pte: pvo %p pte does not match " 1955 "pte %p in pmap_pteg_table", pvo, pt); 1956 } 1957 1958 return (pt); 1959 } 1960 1961 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 1962 panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 1963 "pmap_pteg_table but valid in pvo", pvo, pt); 1964 } 1965 1966 return (NULL); 1967} 1968 1969static void * 1970pmap_pvo_allocf(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) 1971{ 1972 vm_page_t m; 1973 1974 if (bytes != PAGE_SIZE) 1975 panic("pmap_pvo_allocf: benno was shortsighted. hit him."); 1976 1977 *flags = UMA_SLAB_PRIV; 1978 m = vm_page_alloc(pmap_pvo_obj, pmap_pvo_count, VM_ALLOC_SYSTEM); 1979 if (m == NULL) 1980 return (NULL); 1981 pmap_pvo_count++; 1982 return ((void *)VM_PAGE_TO_PHYS(m)); 1983} 1984 1985/* 1986 * XXX: THIS STUFF SHOULD BE IN pte.c? 1987 */ 1988int 1989pmap_pte_spill(vm_offset_t addr) 1990{ 1991 struct pvo_entry *source_pvo, *victim_pvo; 1992 struct pvo_entry *pvo; 1993 int ptegidx, i, j; 1994 u_int sr; 1995 struct pteg *pteg; 1996 struct pte *pt; 1997 1998 pmap_pte_spills++; 1999 2000 sr = mfsrin(addr); 2001 ptegidx = va_to_pteg(sr, addr); 2002 2003 /* 2004 * Have to substitute some entry. Use the primary hash for this. 2005 * Use low bits of timebase as random generator. 2006 */ 2007 pteg = &pmap_pteg_table[ptegidx]; 2008 __asm __volatile("mftb %0" : "=r"(i)); 2009 i &= 7; 2010 pt = &pteg->pt[i]; 2011 2012 source_pvo = NULL; 2013 victim_pvo = NULL; 2014 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2015 /* 2016 * We need to find a pvo entry for this address. 2017 */ 2018 PMAP_PVO_CHECK(pvo); 2019 if (source_pvo == NULL && 2020 pmap_pte_match(&pvo->pvo_pte, sr, addr, 2021 pvo->pvo_pte.pte_hi & PTE_HID)) { 2022 /* 2023 * Now found an entry to be spilled into the pteg. 2024 * The PTE is now valid, so we know it's active. 2025 */ 2026 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 2027 2028 if (j >= 0) { 2029 PVO_PTEGIDX_SET(pvo, j); 2030 pmap_pte_overflow--; 2031 PMAP_PVO_CHECK(pvo); 2032 return (1); 2033 } 2034 2035 source_pvo = pvo; 2036 2037 if (victim_pvo != NULL) 2038 break; 2039 } 2040 2041 /* 2042 * We also need the pvo entry of the victim we are replacing 2043 * so save the R & C bits of the PTE. 2044 */ 2045 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2046 pmap_pte_compare(pt, &pvo->pvo_pte)) { 2047 victim_pvo = pvo; 2048 if (source_pvo != NULL) 2049 break; 2050 } 2051 } 2052 2053 if (source_pvo == NULL) 2054 return (0); 2055 2056 if (victim_pvo == NULL) { 2057 if ((pt->pte_hi & PTE_HID) == 0) 2058 panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 2059 "entry", pt); 2060 2061 /* 2062 * If this is a secondary PTE, we need to search it's primary 2063 * pvo bucket for the matching PVO. 2064 */ 2065 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 2066 pvo_olink) { 2067 PMAP_PVO_CHECK(pvo); 2068 /* 2069 * We also need the pvo entry of the victim we are 2070 * replacing so save the R & C bits of the PTE. 2071 */ 2072 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 2073 victim_pvo = pvo; 2074 break; 2075 } 2076 } 2077 2078 if (victim_pvo == NULL) 2079 panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 2080 "entry", pt); 2081 } 2082 2083 /* 2084 * We are invalidating the TLB entry for the EA we are replacing even 2085 * though it's valid. If we don't, we lose any ref/chg bit changes 2086 * contained in the TLB entry. 2087 */ 2088 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 2089 2090 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2091 pmap_pte_set(pt, &source_pvo->pvo_pte); 2092 2093 PVO_PTEGIDX_CLR(victim_pvo); 2094 PVO_PTEGIDX_SET(source_pvo, i); 2095 pmap_pte_replacements++; 2096 2097 PMAP_PVO_CHECK(victim_pvo); 2098 PMAP_PVO_CHECK(source_pvo); 2099 2100 return (1); 2101} 2102 2103static int 2104pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2105{ 2106 struct pte *pt; 2107 int i; 2108 2109 /* 2110 * First try primary hash. 2111 */ 2112 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2113 if ((pt->pte_hi & PTE_VALID) == 0) { 2114 pvo_pt->pte_hi &= ~PTE_HID; 2115 pmap_pte_set(pt, pvo_pt); 2116 return (i); 2117 } 2118 } 2119 2120 /* 2121 * Now try secondary hash. 2122 */ 2123 ptegidx ^= pmap_pteg_mask; 2124 ptegidx++; 2125 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2126 if ((pt->pte_hi & PTE_VALID) == 0) { 2127 pvo_pt->pte_hi |= PTE_HID; 2128 pmap_pte_set(pt, pvo_pt); 2129 return (i); 2130 } 2131 } 2132 2133 panic("pmap_pte_insert: overflow"); 2134 return (-1); 2135} 2136 2137static boolean_t 2138pmap_query_bit(vm_page_t m, int ptebit) 2139{ 2140 struct pvo_entry *pvo; 2141 struct pte *pt; 2142 2143 if (pmap_attr_fetch(m) & ptebit) 2144 return (TRUE); 2145 2146 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2147 PMAP_PVO_CHECK(pvo); /* sanity check */ 2148 2149 /* 2150 * See if we saved the bit off. If so, cache it and return 2151 * success. 2152 */ 2153 if (pvo->pvo_pte.pte_lo & ptebit) { 2154 pmap_attr_save(m, ptebit); 2155 PMAP_PVO_CHECK(pvo); /* sanity check */ 2156 return (TRUE); 2157 } 2158 } 2159 2160 /* 2161 * No luck, now go through the hard part of looking at the PTEs 2162 * themselves. Sync so that any pending REF/CHG bits are flushed to 2163 * the PTEs. 2164 */ 2165 SYNC(); 2166 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2167 PMAP_PVO_CHECK(pvo); /* sanity check */ 2168 2169 /* 2170 * See if this pvo has a valid PTE. if so, fetch the 2171 * REF/CHG bits from the valid PTE. If the appropriate 2172 * ptebit is set, cache it and return success. 2173 */ 2174 pt = pmap_pvo_to_pte(pvo, -1); 2175 if (pt != NULL) { 2176 pmap_pte_synch(pt, &pvo->pvo_pte); 2177 if (pvo->pvo_pte.pte_lo & ptebit) { 2178 pmap_attr_save(m, ptebit); 2179 PMAP_PVO_CHECK(pvo); /* sanity check */ 2180 return (TRUE); 2181 } 2182 } 2183 } 2184 2185 return (TRUE); 2186} 2187 2188static boolean_t 2189pmap_clear_bit(vm_page_t m, int ptebit) 2190{ 2191 struct pvo_entry *pvo; 2192 struct pte *pt; 2193 int rv; 2194 2195 /* 2196 * Clear the cached value. 2197 */ 2198 rv = pmap_attr_fetch(m); 2199 pmap_attr_clear(m, ptebit); 2200 2201 /* 2202 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2203 * we can reset the right ones). note that since the pvo entries and 2204 * list heads are accessed via BAT0 and are never placed in the page 2205 * table, we don't have to worry about further accesses setting the 2206 * REF/CHG bits. 2207 */ 2208 SYNC(); 2209 2210 /* 2211 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2212 * valid pte clear the ptebit from the valid pte. 2213 */ 2214 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2215 PMAP_PVO_CHECK(pvo); /* sanity check */ 2216 pt = pmap_pvo_to_pte(pvo, -1); 2217 if (pt != NULL) { 2218 pmap_pte_synch(pt, &pvo->pvo_pte); 2219 if (pvo->pvo_pte.pte_lo & ptebit) 2220 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2221 } 2222 rv |= pvo->pvo_pte.pte_lo; 2223 pvo->pvo_pte.pte_lo &= ~ptebit; 2224 PMAP_PVO_CHECK(pvo); /* sanity check */ 2225 } 2226 2227 return ((rv & ptebit) != 0); 2228} 2229