mmu_oea.c revision 134535
177957Sbenno/* 290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc. 390643Sbenno * All rights reserved. 490643Sbenno * 590643Sbenno * This code is derived from software contributed to The NetBSD Foundation 690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790643Sbenno * 890643Sbenno * Redistribution and use in source and binary forms, with or without 990643Sbenno * modification, are permitted provided that the following conditions 1090643Sbenno * are met: 1190643Sbenno * 1. Redistributions of source code must retain the above copyright 1290643Sbenno * notice, this list of conditions and the following disclaimer. 1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright 1490643Sbenno * notice, this list of conditions and the following disclaimer in the 1590643Sbenno * documentation and/or other materials provided with the distribution. 1690643Sbenno * 3. All advertising materials mentioning features or use of this software 1790643Sbenno * must display the following acknowledgement: 1890643Sbenno * This product includes software developed by the NetBSD 1990643Sbenno * Foundation, Inc. and its contributors. 2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its 2190643Sbenno * contributors may be used to endorse or promote products derived 2290643Sbenno * from this software without specific prior written permission. 2390643Sbenno * 2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790643Sbenno * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490643Sbenno * POSSIBILITY OF SUCH DAMAGE. 3590643Sbenno */ 3690643Sbenno/* 3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH. 3977957Sbenno * All rights reserved. 4077957Sbenno * 4177957Sbenno * Redistribution and use in source and binary forms, with or without 4277957Sbenno * modification, are permitted provided that the following conditions 4377957Sbenno * are met: 4477957Sbenno * 1. Redistributions of source code must retain the above copyright 4577957Sbenno * notice, this list of conditions and the following disclaimer. 4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 4777957Sbenno * notice, this list of conditions and the following disclaimer in the 4877957Sbenno * documentation and/or other materials provided with the distribution. 4977957Sbenno * 3. All advertising materials mentioning features or use of this software 5077957Sbenno * must display the following acknowledgement: 5177957Sbenno * This product includes software developed by TooLs GmbH. 5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products 5377957Sbenno * derived from this software without specific prior written permission. 5477957Sbenno * 5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 6577957Sbenno * 6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6777957Sbenno */ 6877957Sbenno/* 6977957Sbenno * Copyright (C) 2001 Benno Rice. 7077957Sbenno * All rights reserved. 7177957Sbenno * 7277957Sbenno * Redistribution and use in source and binary forms, with or without 7377957Sbenno * modification, are permitted provided that the following conditions 7477957Sbenno * are met: 7577957Sbenno * 1. Redistributions of source code must retain the above copyright 7677957Sbenno * notice, this list of conditions and the following disclaimer. 7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 7877957Sbenno * notice, this list of conditions and the following disclaimer in the 7977957Sbenno * documentation and/or other materials provided with the distribution. 8077957Sbenno * 8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9177957Sbenno */ 9277957Sbenno 93113038Sobrien#include <sys/cdefs.h> 94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 134535 2004-08-30 21:39:22Z alc $"); 9577957Sbenno 9690643Sbenno/* 9790643Sbenno * Manages physical address maps. 9890643Sbenno * 9990643Sbenno * In addition to hardware address maps, this module is called upon to 10090643Sbenno * provide software-use-only maps which may or may not be stored in the 10190643Sbenno * same form as hardware maps. These pseudo-maps are used to store 10290643Sbenno * intermediate results from copy operations to and from address spaces. 10390643Sbenno * 10490643Sbenno * Since the information managed by this module is also stored by the 10590643Sbenno * logical address mapping module, this module may throw away valid virtual 10690643Sbenno * to physical mappings at almost any time. However, invalidations of 10790643Sbenno * mappings must be done as requested. 10890643Sbenno * 10990643Sbenno * In order to cope with hardware architectures which make virtual to 11090643Sbenno * physical map invalidates expensive, this module may delay invalidate 11190643Sbenno * reduced protection operations until such time as they are actually 11290643Sbenno * necessary. This module is given full information as to which processors 11390643Sbenno * are currently using which maps, and to when physical maps must be made 11490643Sbenno * correct. 11590643Sbenno */ 11690643Sbenno 117118239Speter#include "opt_kstack_pages.h" 118118239Speter 11977957Sbenno#include <sys/param.h> 12080431Speter#include <sys/kernel.h> 12190643Sbenno#include <sys/ktr.h> 12290643Sbenno#include <sys/lock.h> 12390643Sbenno#include <sys/msgbuf.h> 12490643Sbenno#include <sys/mutex.h> 12577957Sbenno#include <sys/proc.h> 12690643Sbenno#include <sys/sysctl.h> 12790643Sbenno#include <sys/systm.h> 12877957Sbenno#include <sys/vmmeter.h> 12977957Sbenno 13090643Sbenno#include <dev/ofw/openfirm.h> 13190643Sbenno 13290643Sbenno#include <vm/vm.h> 13377957Sbenno#include <vm/vm_param.h> 13477957Sbenno#include <vm/vm_kern.h> 13577957Sbenno#include <vm/vm_page.h> 13677957Sbenno#include <vm/vm_map.h> 13777957Sbenno#include <vm/vm_object.h> 13877957Sbenno#include <vm/vm_extern.h> 13977957Sbenno#include <vm/vm_pageout.h> 14077957Sbenno#include <vm/vm_pager.h> 14192847Sjeff#include <vm/uma.h> 14277957Sbenno 143125687Sgrehan#include <machine/cpu.h> 14497346Sbenno#include <machine/powerpc.h> 14583730Smp#include <machine/bat.h> 14690643Sbenno#include <machine/frame.h> 14790643Sbenno#include <machine/md_var.h> 14890643Sbenno#include <machine/psl.h> 14977957Sbenno#include <machine/pte.h> 15090643Sbenno#include <machine/sr.h> 15177957Sbenno 15290643Sbenno#define PMAP_DEBUG 15377957Sbenno 15490643Sbenno#define TODO panic("%s: not implemented", __func__); 15577957Sbenno 15690643Sbenno#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 15790643Sbenno#define TLBSYNC() __asm __volatile("tlbsync"); 15890643Sbenno#define SYNC() __asm __volatile("sync"); 15990643Sbenno#define EIEIO() __asm __volatile("eieio"); 16090643Sbenno 16190643Sbenno#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16290643Sbenno#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16390643Sbenno#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16490643Sbenno 16590643Sbenno#define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 16690643Sbenno#define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 16790643Sbenno#define PVO_WIRED 0x0010 /* PVO entry is wired */ 16890643Sbenno#define PVO_MANAGED 0x0020 /* PVO entry is managed */ 16990643Sbenno#define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 17094835Sbenno#define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during 17192521Sbenno bootstrap */ 17290643Sbenno#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 17390643Sbenno#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 17490643Sbenno#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 17590643Sbenno#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 17690643Sbenno#define PVO_PTEGIDX_CLR(pvo) \ 17790643Sbenno ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 17890643Sbenno#define PVO_PTEGIDX_SET(pvo, i) \ 17990643Sbenno ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 18090643Sbenno 18190643Sbenno#define PMAP_PVO_CHECK(pvo) 18290643Sbenno 18390643Sbennostruct ofw_map { 18490643Sbenno vm_offset_t om_va; 18590643Sbenno vm_size_t om_len; 18690643Sbenno vm_offset_t om_pa; 18790643Sbenno u_int om_mode; 18890643Sbenno}; 18977957Sbenno 19090643Sbennoint pmap_bootstrapped = 0; 19177957Sbenno 19290643Sbenno/* 19390643Sbenno * Virtual and physical address of message buffer. 19490643Sbenno */ 19590643Sbennostruct msgbuf *msgbufp; 19690643Sbennovm_offset_t msgbuf_phys; 19777957Sbenno 198110172Sgrehanint pmap_pagedaemon_waken; 199110172Sgrehan 20090643Sbenno/* 20190643Sbenno * Map of physical memory regions. 20290643Sbenno */ 20390643Sbennovm_offset_t phys_avail[128]; 20490643Sbennou_int phys_avail_count; 20597346Sbennostatic struct mem_region *regions; 20697346Sbennostatic struct mem_region *pregions; 20797346Sbennoint regions_sz, pregions_sz; 208100319Sbennostatic struct ofw_map *translations; 20977957Sbenno 21090643Sbenno/* 21190643Sbenno * First and last available kernel virtual addresses. 21290643Sbenno */ 21390643Sbennovm_offset_t virtual_avail; 21490643Sbennovm_offset_t virtual_end; 21590643Sbennovm_offset_t kernel_vm_end; 21677957Sbenno 21790643Sbenno/* 21890643Sbenno * Kernel pmap. 21990643Sbenno */ 22090643Sbennostruct pmap kernel_pmap_store; 22190643Sbennoextern struct pmap ofw_pmap; 22277957Sbenno 22390643Sbenno/* 224134535Salc * Lock for the pteg and pvo tables. 225134535Salc */ 226134535Salcstruct mtx pmap_table_mutex; 227134535Salc 228134535Salc/* 22990643Sbenno * PTEG data. 23090643Sbenno */ 23190643Sbennostatic struct pteg *pmap_pteg_table; 23290643Sbennou_int pmap_pteg_count; 23390643Sbennou_int pmap_pteg_mask; 23477957Sbenno 23590643Sbenno/* 23690643Sbenno * PVO data. 23790643Sbenno */ 23890643Sbennostruct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 23990643Sbennostruct pvo_head pmap_pvo_kunmanaged = 24090643Sbenno LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 24190643Sbennostruct pvo_head pmap_pvo_unmanaged = 24290643Sbenno LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 24377957Sbenno 24492847Sjeffuma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 24592847Sjeffuma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 24677957Sbenno 24799037Sbenno#define BPVO_POOL_SIZE 32768 24892521Sbennostatic struct pvo_entry *pmap_bpvo_pool; 24999037Sbennostatic int pmap_bpvo_pool_index = 0; 25077957Sbenno 25190643Sbenno#define VSID_NBPW (sizeof(u_int32_t) * 8) 25290643Sbennostatic u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 25377957Sbenno 25490643Sbennostatic boolean_t pmap_initialized = FALSE; 25577957Sbenno 25690643Sbenno/* 25790643Sbenno * Statistics. 25890643Sbenno */ 25990643Sbennou_int pmap_pte_valid = 0; 26090643Sbennou_int pmap_pte_overflow = 0; 26190643Sbennou_int pmap_pte_replacements = 0; 26290643Sbennou_int pmap_pvo_entries = 0; 26390643Sbennou_int pmap_pvo_enter_calls = 0; 26490643Sbennou_int pmap_pvo_remove_calls = 0; 26590643Sbennou_int pmap_pte_spills = 0; 26690643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 26790643Sbenno 0, ""); 26890643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 26990643Sbenno &pmap_pte_overflow, 0, ""); 27090643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 27190643Sbenno &pmap_pte_replacements, 0, ""); 27290643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 27390643Sbenno 0, ""); 27490643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 27590643Sbenno &pmap_pvo_enter_calls, 0, ""); 27690643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 27790643Sbenno &pmap_pvo_remove_calls, 0, ""); 27890643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 27990643Sbenno &pmap_pte_spills, 0, ""); 28077957Sbenno 28190643Sbennostruct pvo_entry *pmap_pvo_zeropage; 28277957Sbenno 28390643Sbennovm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 28490643Sbennou_int pmap_rkva_count = 4; 28577957Sbenno 28690643Sbenno/* 28790643Sbenno * Allocate physical memory for use in pmap_bootstrap. 28890643Sbenno */ 28990643Sbennostatic vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 29077957Sbenno 29190643Sbenno/* 29290643Sbenno * PTE calls. 29390643Sbenno */ 29490643Sbennostatic int pmap_pte_insert(u_int, struct pte *); 29577957Sbenno 29677957Sbenno/* 29790643Sbenno * PVO calls. 29877957Sbenno */ 29992847Sjeffstatic int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 30090643Sbenno vm_offset_t, vm_offset_t, u_int, int); 30190643Sbennostatic void pmap_pvo_remove(struct pvo_entry *, int); 30290643Sbennostatic struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 30390643Sbennostatic struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 30490643Sbenno 30590643Sbenno/* 30690643Sbenno * Utility routines. 30790643Sbenno */ 30890643Sbennostatic struct pvo_entry *pmap_rkva_alloc(void); 30990643Sbennostatic void pmap_pa_map(struct pvo_entry *, vm_offset_t, 31090643Sbenno struct pte *, int *); 31190643Sbennostatic void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 31290643Sbennostatic void pmap_syncicache(vm_offset_t, vm_size_t); 31390643Sbennostatic boolean_t pmap_query_bit(vm_page_t, int); 314110172Sgrehanstatic u_int pmap_clear_bit(vm_page_t, int, int *); 31590643Sbennostatic void tlbia(void); 31690643Sbenno 31790643Sbennostatic __inline int 31890643Sbennova_to_sr(u_int *sr, vm_offset_t va) 31977957Sbenno{ 32090643Sbenno return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 32190643Sbenno} 32277957Sbenno 32390643Sbennostatic __inline u_int 32490643Sbennova_to_pteg(u_int sr, vm_offset_t addr) 32590643Sbenno{ 32690643Sbenno u_int hash; 32790643Sbenno 32890643Sbenno hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 32990643Sbenno ADDR_PIDX_SHFT); 33090643Sbenno return (hash & pmap_pteg_mask); 33177957Sbenno} 33277957Sbenno 33390643Sbennostatic __inline struct pvo_head * 33496250Sbennopa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 33577957Sbenno{ 33690643Sbenno struct vm_page *pg; 33777957Sbenno 33890643Sbenno pg = PHYS_TO_VM_PAGE(pa); 33990643Sbenno 34096250Sbenno if (pg_p != NULL) 34196250Sbenno *pg_p = pg; 34296250Sbenno 34390643Sbenno if (pg == NULL) 34490643Sbenno return (&pmap_pvo_unmanaged); 34590643Sbenno 34690643Sbenno return (&pg->md.mdpg_pvoh); 34777957Sbenno} 34877957Sbenno 34990643Sbennostatic __inline struct pvo_head * 35090643Sbennovm_page_to_pvoh(vm_page_t m) 35190643Sbenno{ 35290643Sbenno 35390643Sbenno return (&m->md.mdpg_pvoh); 35490643Sbenno} 35590643Sbenno 35677957Sbennostatic __inline void 35790643Sbennopmap_attr_clear(vm_page_t m, int ptebit) 35877957Sbenno{ 35990643Sbenno 36090643Sbenno m->md.mdpg_attrs &= ~ptebit; 36177957Sbenno} 36277957Sbenno 36377957Sbennostatic __inline int 36490643Sbennopmap_attr_fetch(vm_page_t m) 36577957Sbenno{ 36677957Sbenno 36790643Sbenno return (m->md.mdpg_attrs); 36877957Sbenno} 36977957Sbenno 37090643Sbennostatic __inline void 37190643Sbennopmap_attr_save(vm_page_t m, int ptebit) 37290643Sbenno{ 37390643Sbenno 37490643Sbenno m->md.mdpg_attrs |= ptebit; 37590643Sbenno} 37690643Sbenno 37777957Sbennostatic __inline int 37890643Sbennopmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 37977957Sbenno{ 38090643Sbenno if (pt->pte_hi == pvo_pt->pte_hi) 38190643Sbenno return (1); 38290643Sbenno 38390643Sbenno return (0); 38477957Sbenno} 38577957Sbenno 38677957Sbennostatic __inline int 38790643Sbennopmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 38877957Sbenno{ 38990643Sbenno return (pt->pte_hi & ~PTE_VALID) == 39090643Sbenno (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 39190643Sbenno ((va >> ADDR_API_SHFT) & PTE_API) | which); 39290643Sbenno} 39377957Sbenno 39490643Sbennostatic __inline void 39590643Sbennopmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 39690643Sbenno{ 39790643Sbenno /* 39890643Sbenno * Construct a PTE. Default to IMB initially. Valid bit only gets 39990643Sbenno * set when the real pte is set in memory. 40090643Sbenno * 40190643Sbenno * Note: Don't set the valid bit for correct operation of tlb update. 40290643Sbenno */ 40390643Sbenno pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 40490643Sbenno (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 40590643Sbenno pt->pte_lo = pte_lo; 40677957Sbenno} 40777957Sbenno 40890643Sbennostatic __inline void 40990643Sbennopmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 41077957Sbenno{ 41177957Sbenno 41290643Sbenno pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 41377957Sbenno} 41477957Sbenno 41590643Sbennostatic __inline void 41690643Sbennopmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 41777957Sbenno{ 41877957Sbenno 41990643Sbenno /* 42090643Sbenno * As shown in Section 7.6.3.2.3 42190643Sbenno */ 42290643Sbenno pt->pte_lo &= ~ptebit; 42390643Sbenno TLBIE(va); 42490643Sbenno EIEIO(); 42590643Sbenno TLBSYNC(); 42690643Sbenno SYNC(); 42777957Sbenno} 42877957Sbenno 42990643Sbennostatic __inline void 43090643Sbennopmap_pte_set(struct pte *pt, struct pte *pvo_pt) 43177957Sbenno{ 43277957Sbenno 43390643Sbenno pvo_pt->pte_hi |= PTE_VALID; 43490643Sbenno 43577957Sbenno /* 43690643Sbenno * Update the PTE as defined in section 7.6.3.1. 43790643Sbenno * Note that the REF/CHG bits are from pvo_pt and thus should havce 43890643Sbenno * been saved so this routine can restore them (if desired). 43977957Sbenno */ 44090643Sbenno pt->pte_lo = pvo_pt->pte_lo; 44190643Sbenno EIEIO(); 44290643Sbenno pt->pte_hi = pvo_pt->pte_hi; 44390643Sbenno SYNC(); 44490643Sbenno pmap_pte_valid++; 44590643Sbenno} 44677957Sbenno 44790643Sbennostatic __inline void 44890643Sbennopmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 44990643Sbenno{ 45090643Sbenno 45190643Sbenno pvo_pt->pte_hi &= ~PTE_VALID; 45290643Sbenno 45377957Sbenno /* 45490643Sbenno * Force the reg & chg bits back into the PTEs. 45577957Sbenno */ 45690643Sbenno SYNC(); 45777957Sbenno 45890643Sbenno /* 45990643Sbenno * Invalidate the pte. 46090643Sbenno */ 46190643Sbenno pt->pte_hi &= ~PTE_VALID; 46277957Sbenno 46390643Sbenno SYNC(); 46490643Sbenno TLBIE(va); 46590643Sbenno EIEIO(); 46690643Sbenno TLBSYNC(); 46790643Sbenno SYNC(); 46877957Sbenno 46990643Sbenno /* 47090643Sbenno * Save the reg & chg bits. 47190643Sbenno */ 47290643Sbenno pmap_pte_synch(pt, pvo_pt); 47390643Sbenno pmap_pte_valid--; 47477957Sbenno} 47577957Sbenno 47690643Sbennostatic __inline void 47790643Sbennopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 47890643Sbenno{ 47990643Sbenno 48090643Sbenno /* 48190643Sbenno * Invalidate the PTE 48290643Sbenno */ 48390643Sbenno pmap_pte_unset(pt, pvo_pt, va); 48490643Sbenno pmap_pte_set(pt, pvo_pt); 48590643Sbenno} 48690643Sbenno 48777957Sbenno/* 48890643Sbenno * Quick sort callout for comparing memory regions. 48977957Sbenno */ 49090643Sbennostatic int mr_cmp(const void *a, const void *b); 49190643Sbennostatic int om_cmp(const void *a, const void *b); 49290643Sbenno 49390643Sbennostatic int 49490643Sbennomr_cmp(const void *a, const void *b) 49577957Sbenno{ 49690643Sbenno const struct mem_region *regiona; 49790643Sbenno const struct mem_region *regionb; 49877957Sbenno 49990643Sbenno regiona = a; 50090643Sbenno regionb = b; 50190643Sbenno if (regiona->mr_start < regionb->mr_start) 50290643Sbenno return (-1); 50390643Sbenno else if (regiona->mr_start > regionb->mr_start) 50490643Sbenno return (1); 50590643Sbenno else 50690643Sbenno return (0); 50790643Sbenno} 50877957Sbenno 50990643Sbennostatic int 51090643Sbennoom_cmp(const void *a, const void *b) 51190643Sbenno{ 51290643Sbenno const struct ofw_map *mapa; 51390643Sbenno const struct ofw_map *mapb; 51490643Sbenno 51590643Sbenno mapa = a; 51690643Sbenno mapb = b; 51790643Sbenno if (mapa->om_pa < mapb->om_pa) 51890643Sbenno return (-1); 51990643Sbenno else if (mapa->om_pa > mapb->om_pa) 52090643Sbenno return (1); 52190643Sbenno else 52290643Sbenno return (0); 52377957Sbenno} 52477957Sbenno 52577957Sbennovoid 52690643Sbennopmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 52777957Sbenno{ 52897346Sbenno ihandle_t mmui; 52990643Sbenno phandle_t chosen, mmu; 53090643Sbenno int sz; 53190643Sbenno int i, j; 532103604Sgrehan int ofw_mappings; 53391793Sbenno vm_size_t size, physsz; 53490643Sbenno vm_offset_t pa, va, off; 53590643Sbenno u_int batl, batu; 53677957Sbenno 53799037Sbenno /* 538103604Sgrehan * Set up BAT0 to map the lowest 256 MB area 53999037Sbenno */ 54099037Sbenno battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 54199037Sbenno battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 54299037Sbenno 54399037Sbenno /* 54499037Sbenno * Map PCI memory space. 54599037Sbenno */ 54699037Sbenno battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 54799037Sbenno battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 54899037Sbenno 54999037Sbenno battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 55099037Sbenno battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 55199037Sbenno 55299037Sbenno battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 55399037Sbenno battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 55499037Sbenno 55599037Sbenno battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 55699037Sbenno battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 55799037Sbenno 55899037Sbenno /* 55999037Sbenno * Map obio devices. 56099037Sbenno */ 56199037Sbenno battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 56299037Sbenno battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 56399037Sbenno 56477957Sbenno /* 56590643Sbenno * Use an IBAT and a DBAT to map the bottom segment of memory 56690643Sbenno * where we are. 56777957Sbenno */ 56890643Sbenno batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 56990643Sbenno batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 570131808Sgrehan __asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n" 571131808Sgrehan "mtdbatu 0,%0; mtdbatl 0,%1; isync" 57290643Sbenno :: "r"(batu), "r"(batl)); 57399037Sbenno 57490643Sbenno#if 0 57599037Sbenno /* map frame buffer */ 57699037Sbenno batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 57799037Sbenno batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 578131808Sgrehan __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 57999037Sbenno :: "r"(batu), "r"(batl)); 58099037Sbenno#endif 58199037Sbenno 58299037Sbenno#if 1 58399037Sbenno /* map pci space */ 58490643Sbenno batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 58599037Sbenno batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 586131808Sgrehan __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 58790643Sbenno :: "r"(batu), "r"(batl)); 58890643Sbenno#endif 58977957Sbenno 59077957Sbenno /* 59190643Sbenno * Set the start and end of kva. 59277957Sbenno */ 59390643Sbenno virtual_avail = VM_MIN_KERNEL_ADDRESS; 59490643Sbenno virtual_end = VM_MAX_KERNEL_ADDRESS; 59590643Sbenno 59697346Sbenno mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 59797346Sbenno CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 59897346Sbenno 59997346Sbenno qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 60097346Sbenno for (i = 0; i < pregions_sz; i++) { 601103604Sgrehan vm_offset_t pa; 602103604Sgrehan vm_offset_t end; 603103604Sgrehan 60497346Sbenno CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 60597346Sbenno pregions[i].mr_start, 60697346Sbenno pregions[i].mr_start + pregions[i].mr_size, 60797346Sbenno pregions[i].mr_size); 608103604Sgrehan /* 609103604Sgrehan * Install entries into the BAT table to allow all 610103604Sgrehan * of physmem to be convered by on-demand BAT entries. 611103604Sgrehan * The loop will sometimes set the same battable element 612103604Sgrehan * twice, but that's fine since they won't be used for 613103604Sgrehan * a while yet. 614103604Sgrehan */ 615103604Sgrehan pa = pregions[i].mr_start & 0xf0000000; 616103604Sgrehan end = pregions[i].mr_start + pregions[i].mr_size; 617103604Sgrehan do { 618103604Sgrehan u_int n = pa >> ADDR_SR_SHFT; 619103604Sgrehan 620103604Sgrehan battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 621103604Sgrehan battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 622103604Sgrehan pa += SEGMENT_LENGTH; 623103604Sgrehan } while (pa < end); 62497346Sbenno } 62597346Sbenno 62697346Sbenno if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 62790643Sbenno panic("pmap_bootstrap: phys_avail too small"); 62897346Sbenno qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 62990643Sbenno phys_avail_count = 0; 63091793Sbenno physsz = 0; 63197346Sbenno for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 63290643Sbenno CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 63390643Sbenno regions[i].mr_start + regions[i].mr_size, 63490643Sbenno regions[i].mr_size); 63590643Sbenno phys_avail[j] = regions[i].mr_start; 63690643Sbenno phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 63790643Sbenno phys_avail_count++; 63891793Sbenno physsz += regions[i].mr_size; 63977957Sbenno } 64091793Sbenno physmem = btoc(physsz); 64177957Sbenno 64277957Sbenno /* 64390643Sbenno * Allocate PTEG table. 64477957Sbenno */ 64590643Sbenno#ifdef PTEGCOUNT 64690643Sbenno pmap_pteg_count = PTEGCOUNT; 64790643Sbenno#else 64890643Sbenno pmap_pteg_count = 0x1000; 64977957Sbenno 65090643Sbenno while (pmap_pteg_count < physmem) 65190643Sbenno pmap_pteg_count <<= 1; 65277957Sbenno 65390643Sbenno pmap_pteg_count >>= 1; 65490643Sbenno#endif /* PTEGCOUNT */ 65577957Sbenno 65690643Sbenno size = pmap_pteg_count * sizeof(struct pteg); 65790643Sbenno CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 65890643Sbenno size); 65990643Sbenno pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 66090643Sbenno CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 66190643Sbenno bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 66290643Sbenno pmap_pteg_mask = pmap_pteg_count - 1; 66377957Sbenno 66490643Sbenno /* 66594839Sbenno * Allocate pv/overflow lists. 66690643Sbenno */ 66790643Sbenno size = sizeof(struct pvo_head) * pmap_pteg_count; 66890643Sbenno pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 66990643Sbenno PAGE_SIZE); 67090643Sbenno CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 67190643Sbenno for (i = 0; i < pmap_pteg_count; i++) 67290643Sbenno LIST_INIT(&pmap_pvo_table[i]); 67377957Sbenno 67490643Sbenno /* 675134535Salc * Initialize the lock that synchronizes access to the pteg and pvo 676134535Salc * tables. 677134535Salc */ 678134535Salc mtx_init(&pmap_table_mutex, "pmap table", NULL, MTX_DEF); 679134535Salc 680134535Salc /* 68190643Sbenno * Allocate the message buffer. 68290643Sbenno */ 68390643Sbenno msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 68477957Sbenno 68590643Sbenno /* 68690643Sbenno * Initialise the unmanaged pvo pool. 68790643Sbenno */ 68899037Sbenno pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( 68999037Sbenno BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 69092521Sbenno pmap_bpvo_pool_index = 0; 69177957Sbenno 69277957Sbenno /* 69390643Sbenno * Make sure kernel vsid is allocated as well as VSID 0. 69477957Sbenno */ 69590643Sbenno pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 69690643Sbenno |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 69790643Sbenno pmap_vsid_bitmap[0] |= 1; 69877957Sbenno 69990643Sbenno /* 700133862Smarius * Set up the Open Firmware pmap and add it's mappings. 70190643Sbenno */ 70290643Sbenno pmap_pinit(&ofw_pmap); 70390643Sbenno ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 704126478Sgrehan ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 70590643Sbenno if ((chosen = OF_finddevice("/chosen")) == -1) 70690643Sbenno panic("pmap_bootstrap: can't find /chosen"); 70790643Sbenno OF_getprop(chosen, "mmu", &mmui, 4); 70890643Sbenno if ((mmu = OF_instance_to_package(mmui)) == -1) 70990643Sbenno panic("pmap_bootstrap: can't get mmu package"); 71090643Sbenno if ((sz = OF_getproplen(mmu, "translations")) == -1) 71190643Sbenno panic("pmap_bootstrap: can't get ofw translation count"); 712100319Sbenno translations = NULL; 713131401Sgrehan for (i = 0; phys_avail[i] != 0; i += 2) { 714131401Sgrehan if (phys_avail[i + 1] >= sz) { 715100319Sbenno translations = (struct ofw_map *)phys_avail[i]; 716131401Sgrehan break; 717131401Sgrehan } 718100319Sbenno } 719100319Sbenno if (translations == NULL) 720100319Sbenno panic("pmap_bootstrap: no space to copy translations"); 72190643Sbenno bzero(translations, sz); 72290643Sbenno if (OF_getprop(mmu, "translations", translations, sz) == -1) 72390643Sbenno panic("pmap_bootstrap: can't get ofw translations"); 72490643Sbenno CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 72597346Sbenno sz /= sizeof(*translations); 72690643Sbenno qsort(translations, sz, sizeof (*translations), om_cmp); 727103604Sgrehan for (i = 0, ofw_mappings = 0; i < sz; i++) { 72890643Sbenno CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 72990643Sbenno translations[i].om_pa, translations[i].om_va, 73090643Sbenno translations[i].om_len); 73177957Sbenno 732103604Sgrehan /* 733103604Sgrehan * If the mapping is 1:1, let the RAM and device on-demand 734103604Sgrehan * BAT tables take care of the translation. 735103604Sgrehan */ 736103604Sgrehan if (translations[i].om_va == translations[i].om_pa) 737103604Sgrehan continue; 73877957Sbenno 739103604Sgrehan /* Enter the pages */ 74090643Sbenno for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 74190643Sbenno struct vm_page m; 74277957Sbenno 74390643Sbenno m.phys_addr = translations[i].om_pa + off; 74490643Sbenno pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 745103604Sgrehan VM_PROT_ALL, 1); 746103604Sgrehan ofw_mappings++; 74777957Sbenno } 74877957Sbenno } 74990643Sbenno#ifdef SMP 75090643Sbenno TLBSYNC(); 75190643Sbenno#endif 75277957Sbenno 75390643Sbenno /* 75490643Sbenno * Initialize the kernel pmap (which is statically allocated). 75590643Sbenno */ 756134329Salc PMAP_LOCK_INIT(kernel_pmap); 75790643Sbenno for (i = 0; i < 16; i++) { 75890643Sbenno kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 75977957Sbenno } 76090643Sbenno kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 761126478Sgrehan kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT; 76290643Sbenno kernel_pmap->pm_active = ~0; 76377957Sbenno 76477957Sbenno /* 76590643Sbenno * Allocate a kernel stack with a guard page for thread0 and map it 76690643Sbenno * into the kernel page map. 76777957Sbenno */ 76890643Sbenno pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 76990643Sbenno kstack0_phys = pa; 77090643Sbenno kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 77190643Sbenno CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 77290643Sbenno kstack0); 77390643Sbenno virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 77490643Sbenno for (i = 0; i < KSTACK_PAGES; i++) { 77590643Sbenno pa = kstack0_phys + i * PAGE_SIZE; 77690643Sbenno va = kstack0 + i * PAGE_SIZE; 77790643Sbenno pmap_kenter(va, pa); 77890643Sbenno TLBIE(va); 77977957Sbenno } 78077957Sbenno 78190643Sbenno /* 782127875Salc * Calculate the last available physical address. 78390643Sbenno */ 78490643Sbenno for (i = 0; phys_avail[i + 2] != 0; i += 2) 78590643Sbenno ; 786128103Salc Maxmem = powerpc_btop(phys_avail[i + 1]); 78777957Sbenno 78877957Sbenno /* 78990643Sbenno * Allocate virtual address space for the message buffer. 79077957Sbenno */ 79190643Sbenno msgbufp = (struct msgbuf *)virtual_avail; 79290643Sbenno virtual_avail += round_page(MSGBUF_SIZE); 79377957Sbenno 79477957Sbenno /* 79590643Sbenno * Initialize hardware. 79677957Sbenno */ 79777957Sbenno for (i = 0; i < 16; i++) { 79894836Sbenno mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 79977957Sbenno } 80077957Sbenno __asm __volatile ("mtsr %0,%1" 80190643Sbenno :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 80277957Sbenno __asm __volatile ("sync; mtsdr1 %0; isync" 80390643Sbenno :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 80477957Sbenno tlbia(); 80577957Sbenno 80690643Sbenno pmap_bootstrapped++; 80777957Sbenno} 80877957Sbenno 80977957Sbenno/* 81090643Sbenno * Activate a user pmap. The pmap must be activated before it's address 81190643Sbenno * space can be accessed in any way. 81277957Sbenno */ 81377957Sbennovoid 81490643Sbennopmap_activate(struct thread *td) 81577957Sbenno{ 81696250Sbenno pmap_t pm, pmr; 81777957Sbenno 81877957Sbenno /* 819103604Sgrehan * Load all the data we need up front to encourage the compiler to 82090643Sbenno * not issue any loads while we have interrupts disabled below. 82177957Sbenno */ 82290643Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 82377957Sbenno 82496250Sbenno if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) 82596250Sbenno pmr = pm; 82696250Sbenno 82790643Sbenno pm->pm_active |= PCPU_GET(cpumask); 82896250Sbenno PCPU_SET(curpmap, pmr); 82977957Sbenno} 83077957Sbenno 83191483Sbennovoid 83291483Sbennopmap_deactivate(struct thread *td) 83391483Sbenno{ 83491483Sbenno pmap_t pm; 83591483Sbenno 83691483Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 83791483Sbenno pm->pm_active &= ~(PCPU_GET(cpumask)); 83896250Sbenno PCPU_SET(curpmap, NULL); 83991483Sbenno} 84091483Sbenno 84190643Sbennovm_offset_t 84290643Sbennopmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 84377957Sbenno{ 84496353Sbenno 84596353Sbenno return (va); 84677957Sbenno} 84777957Sbenno 84877957Sbennovoid 84996353Sbennopmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) 85077957Sbenno{ 85196353Sbenno struct pvo_entry *pvo; 85296353Sbenno 853134329Salc PMAP_LOCK(pm); 85496353Sbenno pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 85596353Sbenno 85696353Sbenno if (pvo != NULL) { 85796353Sbenno if (wired) { 85896353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 85996353Sbenno pm->pm_stats.wired_count++; 86096353Sbenno pvo->pvo_vaddr |= PVO_WIRED; 86196353Sbenno } else { 86296353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 86396353Sbenno pm->pm_stats.wired_count--; 86496353Sbenno pvo->pvo_vaddr &= ~PVO_WIRED; 86596353Sbenno } 86696353Sbenno } 867134329Salc PMAP_UNLOCK(pm); 86877957Sbenno} 86977957Sbenno 87077957Sbennovoid 87190643Sbennopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 87290643Sbenno vm_size_t len, vm_offset_t src_addr) 87377957Sbenno{ 87497385Sbenno 87597385Sbenno /* 87697385Sbenno * This is not needed as it's mainly an optimisation. 87797385Sbenno * It may want to be implemented later though. 87897385Sbenno */ 87977957Sbenno} 88077957Sbenno 88177957Sbennovoid 88297385Sbennopmap_copy_page(vm_page_t msrc, vm_page_t mdst) 88377957Sbenno{ 88497385Sbenno vm_offset_t dst; 88597385Sbenno vm_offset_t src; 88697385Sbenno 88797385Sbenno dst = VM_PAGE_TO_PHYS(mdst); 88897385Sbenno src = VM_PAGE_TO_PHYS(msrc); 88997385Sbenno 89097385Sbenno kcopy((void *)src, (void *)dst, PAGE_SIZE); 89177957Sbenno} 89277957Sbenno 89377957Sbenno/* 89490643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb. 89577957Sbenno */ 89677957Sbennovoid 89794777Speterpmap_zero_page(vm_page_t m) 89877957Sbenno{ 89994777Speter vm_offset_t pa = VM_PAGE_TO_PHYS(m); 900110172Sgrehan caddr_t va; 90177957Sbenno 90290643Sbenno if (pa < SEGMENT_LENGTH) { 90390643Sbenno va = (caddr_t) pa; 90490643Sbenno } else if (pmap_initialized) { 90590643Sbenno if (pmap_pvo_zeropage == NULL) 90690643Sbenno pmap_pvo_zeropage = pmap_rkva_alloc(); 90790643Sbenno pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 90890643Sbenno va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 90990643Sbenno } else { 91090643Sbenno panic("pmap_zero_page: can't zero pa %#x", pa); 91177957Sbenno } 91290643Sbenno 91390643Sbenno bzero(va, PAGE_SIZE); 91490643Sbenno 91590643Sbenno if (pa >= SEGMENT_LENGTH) 91690643Sbenno pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 91777957Sbenno} 91877957Sbenno 91977957Sbennovoid 92094777Speterpmap_zero_page_area(vm_page_t m, int off, int size) 92177957Sbenno{ 92299666Sbenno vm_offset_t pa = VM_PAGE_TO_PHYS(m); 923103604Sgrehan caddr_t va; 92499666Sbenno 92599666Sbenno if (pa < SEGMENT_LENGTH) { 92699666Sbenno va = (caddr_t) pa; 92799666Sbenno } else if (pmap_initialized) { 92899666Sbenno if (pmap_pvo_zeropage == NULL) 92999666Sbenno pmap_pvo_zeropage = pmap_rkva_alloc(); 93099666Sbenno pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 93199666Sbenno va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 93299666Sbenno } else { 93399666Sbenno panic("pmap_zero_page: can't zero pa %#x", pa); 93499666Sbenno } 93599666Sbenno 936103604Sgrehan bzero(va + off, size); 93799666Sbenno 93899666Sbenno if (pa >= SEGMENT_LENGTH) 93999666Sbenno pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 94077957Sbenno} 94177957Sbenno 94299571Spetervoid 94399571Speterpmap_zero_page_idle(vm_page_t m) 94499571Speter{ 94599571Speter 94699571Speter /* XXX this is called outside of Giant, is pmap_zero_page safe? */ 94799571Speter /* XXX maybe have a dedicated mapping for this to avoid the problem? */ 94899571Speter mtx_lock(&Giant); 94999571Speter pmap_zero_page(m); 95099571Speter mtx_unlock(&Giant); 95199571Speter} 95299571Speter 95377957Sbenno/* 95490643Sbenno * Map the given physical page at the specified virtual address in the 95590643Sbenno * target pmap with the protection requested. If specified the page 95690643Sbenno * will be wired down. 95777957Sbenno */ 95877957Sbennovoid 95990643Sbennopmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 96090643Sbenno boolean_t wired) 96177957Sbenno{ 96290643Sbenno struct pvo_head *pvo_head; 96392847Sjeff uma_zone_t zone; 96496250Sbenno vm_page_t pg; 96596250Sbenno u_int pte_lo, pvo_flags, was_exec, i; 96690643Sbenno int error; 96777957Sbenno 96890643Sbenno if (!pmap_initialized) { 96990643Sbenno pvo_head = &pmap_pvo_kunmanaged; 97090643Sbenno zone = pmap_upvo_zone; 97190643Sbenno pvo_flags = 0; 97296250Sbenno pg = NULL; 97396250Sbenno was_exec = PTE_EXEC; 97490643Sbenno } else { 975110172Sgrehan pvo_head = vm_page_to_pvoh(m); 976110172Sgrehan pg = m; 97790643Sbenno zone = pmap_mpvo_zone; 97890643Sbenno pvo_flags = PVO_MANAGED; 97996250Sbenno was_exec = 0; 98090643Sbenno } 981134535Salc if (pmap_bootstrapped) 982134329Salc vm_page_lock_queues(); 983134535Salc PMAP_LOCK(pmap); 98477957Sbenno 98596250Sbenno /* 98696250Sbenno * If this is a managed page, and it's the first reference to the page, 98796250Sbenno * clear the execness of the page. Otherwise fetch the execness. 98896250Sbenno */ 98996250Sbenno if (pg != NULL) { 99096250Sbenno if (LIST_EMPTY(pvo_head)) { 99196250Sbenno pmap_attr_clear(pg, PTE_EXEC); 99296250Sbenno } else { 99396250Sbenno was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 99496250Sbenno } 99596250Sbenno } 99696250Sbenno 99796250Sbenno 99896250Sbenno /* 99996250Sbenno * Assume the page is cache inhibited and access is guarded unless 100096250Sbenno * it's in our available memory array. 100196250Sbenno */ 100290643Sbenno pte_lo = PTE_I | PTE_G; 100397346Sbenno for (i = 0; i < pregions_sz; i++) { 100497346Sbenno if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 100597346Sbenno (VM_PAGE_TO_PHYS(m) < 100697346Sbenno (pregions[i].mr_start + pregions[i].mr_size))) { 100796250Sbenno pte_lo &= ~(PTE_I | PTE_G); 100896250Sbenno break; 100996250Sbenno } 101096250Sbenno } 101177957Sbenno 101290643Sbenno if (prot & VM_PROT_WRITE) 101390643Sbenno pte_lo |= PTE_BW; 101490643Sbenno else 101590643Sbenno pte_lo |= PTE_BR; 101677957Sbenno 101796250Sbenno pvo_flags |= (prot & VM_PROT_EXECUTE); 101877957Sbenno 101990643Sbenno if (wired) 102090643Sbenno pvo_flags |= PVO_WIRED; 102177957Sbenno 102296250Sbenno error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 102396250Sbenno pte_lo, pvo_flags); 102490643Sbenno 102596250Sbenno /* 102696250Sbenno * Flush the real page from the instruction cache if this page is 102796250Sbenno * mapped executable and cacheable and was not previously mapped (or 102896250Sbenno * was not mapped executable). 102996250Sbenno */ 103096250Sbenno if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 103196250Sbenno (pte_lo & PTE_I) == 0 && was_exec == 0) { 103277957Sbenno /* 103390643Sbenno * Flush the real memory from the cache. 103477957Sbenno */ 103596250Sbenno pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 103696250Sbenno if (pg != NULL) 103796250Sbenno pmap_attr_save(pg, PTE_EXEC); 103877957Sbenno } 1039134329Salc if (pmap_bootstrapped) 1040134329Salc vm_page_unlock_queues(); 1041103604Sgrehan 1042103604Sgrehan /* XXX syncicache always until problems are sorted */ 1043103604Sgrehan pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1044134535Salc PMAP_UNLOCK(pmap); 104577957Sbenno} 104677957Sbenno 1047117045Salcvm_page_t 1048117045Salcpmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte) 1049117045Salc{ 1050117045Salc 1051133143Salc mtx_lock(&Giant); 1052117045Salc pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE); 1053133143Salc mtx_unlock(&Giant); 1054117045Salc return (NULL); 1055117045Salc} 1056117045Salc 1057131658Salcvm_paddr_t 105896353Sbennopmap_extract(pmap_t pm, vm_offset_t va) 105977957Sbenno{ 106096353Sbenno struct pvo_entry *pvo; 1061134329Salc vm_paddr_t pa; 106296353Sbenno 1063134329Salc PMAP_LOCK(pm); 106496353Sbenno pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1065134329Salc if (pvo == NULL) 1066134329Salc pa = 0; 1067134329Salc else 1068134329Salc pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1069134329Salc PMAP_UNLOCK(pm); 1070134329Salc return (pa); 107177957Sbenno} 107277957Sbenno 107377957Sbenno/* 1074120336Sgrehan * Atomically extract and hold the physical page with the given 1075120336Sgrehan * pmap and virtual address pair if that mapping permits the given 1076120336Sgrehan * protection. 1077120336Sgrehan */ 1078120336Sgrehanvm_page_t 1079120336Sgrehanpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1080120336Sgrehan{ 1081132666Salc struct pvo_entry *pvo; 1082120336Sgrehan vm_page_t m; 1083120336Sgrehan 1084120336Sgrehan m = NULL; 1085120336Sgrehan mtx_lock(&Giant); 1086134329Salc vm_page_lock_queues(); 1087134329Salc PMAP_LOCK(pmap); 1088132666Salc pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1089132666Salc if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && 1090132666Salc ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || 1091132666Salc (prot & VM_PROT_WRITE) == 0)) { 1092132666Salc m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1093120336Sgrehan vm_page_hold(m); 1094120336Sgrehan } 1095134329Salc vm_page_unlock_queues(); 1096134329Salc PMAP_UNLOCK(pmap); 1097120336Sgrehan mtx_unlock(&Giant); 1098120336Sgrehan return (m); 1099120336Sgrehan} 1100120336Sgrehan 1101120336Sgrehan/* 110290643Sbenno * Grow the number of kernel page table entries. Unneeded. 110377957Sbenno */ 110490643Sbennovoid 110590643Sbennopmap_growkernel(vm_offset_t addr) 110677957Sbenno{ 110790643Sbenno} 110877957Sbenno 110990643Sbennovoid 1110127869Salcpmap_init(void) 111190643Sbenno{ 111277957Sbenno 111394753Sbenno CTR0(KTR_PMAP, "pmap_init"); 111477957Sbenno 111592847Sjeff pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1116125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1117125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 111892847Sjeff pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1119125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1120125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 112190643Sbenno pmap_initialized = TRUE; 112277957Sbenno} 112377957Sbenno 112499037Sbennovoid 112599037Sbennopmap_init2(void) 112699037Sbenno{ 112799037Sbenno 112899037Sbenno CTR0(KTR_PMAP, "pmap_init2"); 112999037Sbenno} 113099037Sbenno 113190643Sbennoboolean_t 113290643Sbennopmap_is_modified(vm_page_t m) 113390643Sbenno{ 113496353Sbenno 1135110172Sgrehan if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 113696353Sbenno return (FALSE); 113796353Sbenno 113896353Sbenno return (pmap_query_bit(m, PTE_CHG)); 113990643Sbenno} 114090643Sbenno 1141120722Salc/* 1142120722Salc * pmap_is_prefaultable: 1143120722Salc * 1144120722Salc * Return whether or not the specified virtual address is elgible 1145120722Salc * for prefault. 1146120722Salc */ 1147120722Salcboolean_t 1148120722Salcpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 1149120722Salc{ 1150120722Salc 1151120722Salc return (FALSE); 1152120722Salc} 1153120722Salc 115490643Sbennovoid 115590643Sbennopmap_clear_reference(vm_page_t m) 115690643Sbenno{ 1157110172Sgrehan 1158110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1159110172Sgrehan return; 1160110172Sgrehan pmap_clear_bit(m, PTE_REF, NULL); 116190643Sbenno} 116290643Sbenno 1163110172Sgrehanvoid 1164110172Sgrehanpmap_clear_modify(vm_page_t m) 1165110172Sgrehan{ 1166110172Sgrehan 1167110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1168110172Sgrehan return; 1169110172Sgrehan pmap_clear_bit(m, PTE_CHG, NULL); 1170110172Sgrehan} 1171110172Sgrehan 117291403Ssilby/* 117391403Ssilby * pmap_ts_referenced: 117491403Ssilby * 117591403Ssilby * Return a count of reference bits for a page, clearing those bits. 117691403Ssilby * It is not necessary for every reference bit to be cleared, but it 117791403Ssilby * is necessary that 0 only be returned when there are truly no 117891403Ssilby * reference bits set. 117991403Ssilby * 118091403Ssilby * XXX: The exact number of bits to check and clear is a matter that 118191403Ssilby * should be tested and standardized at some point in the future for 118291403Ssilby * optimal aging of shared pages. 118391403Ssilby */ 118490643Sbennoint 118590643Sbennopmap_ts_referenced(vm_page_t m) 118690643Sbenno{ 1187110172Sgrehan int count; 1188110172Sgrehan 1189110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1190110172Sgrehan return (0); 1191110172Sgrehan 1192110172Sgrehan count = pmap_clear_bit(m, PTE_REF, NULL); 1193110172Sgrehan 1194110172Sgrehan return (count); 119590643Sbenno} 119690643Sbenno 119777957Sbenno/* 119890643Sbenno * Map a wired page into kernel virtual address space. 119977957Sbenno */ 120077957Sbennovoid 120190643Sbennopmap_kenter(vm_offset_t va, vm_offset_t pa) 120277957Sbenno{ 120390643Sbenno u_int pte_lo; 120490643Sbenno int error; 120590643Sbenno int i; 120677957Sbenno 120790643Sbenno#if 0 120890643Sbenno if (va < VM_MIN_KERNEL_ADDRESS) 120990643Sbenno panic("pmap_kenter: attempt to enter non-kernel address %#x", 121090643Sbenno va); 121190643Sbenno#endif 121277957Sbenno 1213103604Sgrehan pte_lo = PTE_I | PTE_G; 1214103604Sgrehan for (i = 0; i < pregions_sz; i++) { 1215103604Sgrehan if ((pa >= pregions[i].mr_start) && 1216103604Sgrehan (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 121790643Sbenno pte_lo &= ~(PTE_I | PTE_G); 121877957Sbenno break; 121977957Sbenno } 1220103604Sgrehan } 122177957Sbenno 122290643Sbenno error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 122390643Sbenno &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 122490643Sbenno 122590643Sbenno if (error != 0 && error != ENOENT) 122690643Sbenno panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 122790643Sbenno pa, error); 122890643Sbenno 122977957Sbenno /* 123090643Sbenno * Flush the real memory from the instruction cache. 123177957Sbenno */ 123290643Sbenno if ((pte_lo & (PTE_I | PTE_G)) == 0) { 123390643Sbenno pmap_syncicache(pa, PAGE_SIZE); 123477957Sbenno } 123577957Sbenno} 123677957Sbenno 123794838Sbenno/* 123894838Sbenno * Extract the physical page address associated with the given kernel virtual 123994838Sbenno * address. 124094838Sbenno */ 124190643Sbennovm_offset_t 124290643Sbennopmap_kextract(vm_offset_t va) 124377957Sbenno{ 124494838Sbenno struct pvo_entry *pvo; 1245134329Salc vm_paddr_t pa; 124694838Sbenno 1247125185Sgrehan#ifdef UMA_MD_SMALL_ALLOC 1248125185Sgrehan /* 1249125185Sgrehan * Allow direct mappings 1250125185Sgrehan */ 1251125185Sgrehan if (va < VM_MIN_KERNEL_ADDRESS) { 1252125185Sgrehan return (va); 1253125185Sgrehan } 1254125185Sgrehan#endif 1255125185Sgrehan 1256134329Salc PMAP_LOCK(kernel_pmap); 125794838Sbenno pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1258125185Sgrehan KASSERT(pvo != NULL, ("pmap_kextract: no addr found")); 1259134329Salc pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1260134329Salc PMAP_UNLOCK(kernel_pmap); 1261134329Salc return (pa); 126277957Sbenno} 126377957Sbenno 126491456Sbenno/* 126591456Sbenno * Remove a wired page from kernel virtual address space. 126691456Sbenno */ 126777957Sbennovoid 126877957Sbennopmap_kremove(vm_offset_t va) 126977957Sbenno{ 127091456Sbenno 1271103604Sgrehan pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 127277957Sbenno} 127377957Sbenno 127477957Sbenno/* 127590643Sbenno * Map a range of physical addresses into kernel virtual address space. 127690643Sbenno * 127790643Sbenno * The value passed in *virt is a suggested virtual address for the mapping. 127890643Sbenno * Architectures which can support a direct-mapped physical to virtual region 127990643Sbenno * can return the appropriate address within that region, leaving '*virt' 128090643Sbenno * unchanged. We cannot and therefore do not; *virt is updated with the 128190643Sbenno * first usable address after the mapped region. 128277957Sbenno */ 128390643Sbennovm_offset_t 128490643Sbennopmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 128577957Sbenno{ 128690643Sbenno vm_offset_t sva, va; 128777957Sbenno 128890643Sbenno sva = *virt; 128990643Sbenno va = sva; 129090643Sbenno for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 129190643Sbenno pmap_kenter(va, pa_start); 129290643Sbenno *virt = va; 129390643Sbenno return (sva); 129477957Sbenno} 129577957Sbenno 129690643Sbennoint 129790643Sbennopmap_mincore(pmap_t pmap, vm_offset_t addr) 129877957Sbenno{ 129990643Sbenno TODO; 130090643Sbenno return (0); 130177957Sbenno} 130277957Sbenno 130377957Sbennovoid 130494838Sbennopmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1305117206Salc vm_pindex_t pindex, vm_size_t size) 130690643Sbenno{ 130794838Sbenno 1308117206Salc VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1309117206Salc KASSERT(object->type == OBJT_DEVICE, 1310117206Salc ("pmap_object_init_pt: non-device object")); 131194838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1312117206Salc ("pmap_object_init_pt: non current pmap")); 131377957Sbenno} 131477957Sbenno 131577957Sbenno/* 131690643Sbenno * Lower the permission for all mappings to a given page. 131777957Sbenno */ 131877957Sbennovoid 131977957Sbennopmap_page_protect(vm_page_t m, vm_prot_t prot) 132077957Sbenno{ 132190643Sbenno struct pvo_head *pvo_head; 132290643Sbenno struct pvo_entry *pvo, *next_pvo; 132390643Sbenno struct pte *pt; 1324134329Salc pmap_t pmap; 132577957Sbenno 132690643Sbenno /* 132790643Sbenno * Since the routine only downgrades protection, if the 132890643Sbenno * maximal protection is desired, there isn't any change 132990643Sbenno * to be made. 133090643Sbenno */ 133190643Sbenno if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 133290643Sbenno (VM_PROT_READ|VM_PROT_WRITE)) 133377957Sbenno return; 133477957Sbenno 133590643Sbenno pvo_head = vm_page_to_pvoh(m); 133690643Sbenno for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 133790643Sbenno next_pvo = LIST_NEXT(pvo, pvo_vlink); 133890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 1339134329Salc pmap = pvo->pvo_pmap; 1340134329Salc PMAP_LOCK(pmap); 134190643Sbenno 134290643Sbenno /* 134390643Sbenno * Downgrading to no mapping at all, we just remove the entry. 134490643Sbenno */ 134590643Sbenno if ((prot & VM_PROT_READ) == 0) { 134690643Sbenno pmap_pvo_remove(pvo, -1); 1347134329Salc PMAP_UNLOCK(pmap); 134890643Sbenno continue; 134977957Sbenno } 135090643Sbenno 135190643Sbenno /* 135290643Sbenno * If EXEC permission is being revoked, just clear the flag 135390643Sbenno * in the PVO. 135490643Sbenno */ 135590643Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 135690643Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 135790643Sbenno 135890643Sbenno /* 135990643Sbenno * If this entry is already RO, don't diddle with the page 136090643Sbenno * table. 136190643Sbenno */ 136290643Sbenno if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1363134329Salc PMAP_UNLOCK(pmap); 136490643Sbenno PMAP_PVO_CHECK(pvo); 136590643Sbenno continue; 136677957Sbenno } 136790643Sbenno 136890643Sbenno /* 136990643Sbenno * Grab the PTE before we diddle the bits so pvo_to_pte can 137090643Sbenno * verify the pte contents are as expected. 137190643Sbenno */ 137290643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 137390643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_PP; 137490643Sbenno pvo->pvo_pte.pte_lo |= PTE_BR; 137590643Sbenno if (pt != NULL) 137690643Sbenno pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1377134329Salc PMAP_UNLOCK(pmap); 137890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 137977957Sbenno } 1380133166Sgrehan 1381133166Sgrehan /* 1382133166Sgrehan * Downgrading from writeable: clear the VM page flag 1383133166Sgrehan */ 1384133166Sgrehan if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) 1385133166Sgrehan vm_page_flag_clear(m, PG_WRITEABLE); 138677957Sbenno} 138777957Sbenno 138877957Sbenno/* 138991403Ssilby * Returns true if the pmap's pv is one of the first 139091403Ssilby * 16 pvs linked to from this page. This count may 139191403Ssilby * be changed upwards or downwards in the future; it 139291403Ssilby * is only necessary that true be returned for a small 139391403Ssilby * subset of pmaps for proper page aging. 139491403Ssilby */ 139590643Sbennoboolean_t 139691403Ssilbypmap_page_exists_quick(pmap_t pmap, vm_page_t m) 139790643Sbenno{ 1398110172Sgrehan int loops; 1399110172Sgrehan struct pvo_entry *pvo; 1400110172Sgrehan 1401110172Sgrehan if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1402110172Sgrehan return FALSE; 1403110172Sgrehan 1404110172Sgrehan loops = 0; 1405110172Sgrehan LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1406110172Sgrehan if (pvo->pvo_pmap == pmap) 1407110172Sgrehan return (TRUE); 1408110172Sgrehan if (++loops >= 16) 1409110172Sgrehan break; 1410110172Sgrehan } 1411110172Sgrehan 1412110172Sgrehan return (FALSE); 141390643Sbenno} 141477957Sbenno 141590643Sbennostatic u_int pmap_vsidcontext; 141677957Sbenno 141790643Sbennovoid 141890643Sbennopmap_pinit(pmap_t pmap) 141990643Sbenno{ 142090643Sbenno int i, mask; 142190643Sbenno u_int entropy; 142277957Sbenno 1423126478Sgrehan KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap")); 1424134329Salc PMAP_LOCK_INIT(pmap); 1425126478Sgrehan 142690643Sbenno entropy = 0; 142790643Sbenno __asm __volatile("mftb %0" : "=r"(entropy)); 142877957Sbenno 142990643Sbenno /* 143090643Sbenno * Allocate some segment registers for this pmap. 143190643Sbenno */ 143290643Sbenno for (i = 0; i < NPMAPS; i += VSID_NBPW) { 143390643Sbenno u_int hash, n; 143477957Sbenno 143577957Sbenno /* 143690643Sbenno * Create a new value by mutiplying by a prime and adding in 143790643Sbenno * entropy from the timebase register. This is to make the 143890643Sbenno * VSID more random so that the PT hash function collides 143990643Sbenno * less often. (Note that the prime casues gcc to do shifts 144090643Sbenno * instead of a multiply.) 144177957Sbenno */ 144290643Sbenno pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 144390643Sbenno hash = pmap_vsidcontext & (NPMAPS - 1); 144490643Sbenno if (hash == 0) /* 0 is special, avoid it */ 144590643Sbenno continue; 144690643Sbenno n = hash >> 5; 144790643Sbenno mask = 1 << (hash & (VSID_NBPW - 1)); 144890643Sbenno hash = (pmap_vsidcontext & 0xfffff); 144990643Sbenno if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 145090643Sbenno /* anything free in this bucket? */ 145190643Sbenno if (pmap_vsid_bitmap[n] == 0xffffffff) { 145290643Sbenno entropy = (pmap_vsidcontext >> 20); 145390643Sbenno continue; 145490643Sbenno } 145590643Sbenno i = ffs(~pmap_vsid_bitmap[i]) - 1; 145690643Sbenno mask = 1 << i; 145790643Sbenno hash &= 0xfffff & ~(VSID_NBPW - 1); 145890643Sbenno hash |= i; 145977957Sbenno } 146090643Sbenno pmap_vsid_bitmap[n] |= mask; 146190643Sbenno for (i = 0; i < 16; i++) 146290643Sbenno pmap->pm_sr[i] = VSID_MAKE(i, hash); 146390643Sbenno return; 146490643Sbenno } 146577957Sbenno 146690643Sbenno panic("pmap_pinit: out of segments"); 146777957Sbenno} 146877957Sbenno 146977957Sbenno/* 147090643Sbenno * Initialize the pmap associated with process 0. 147177957Sbenno */ 147277957Sbennovoid 147390643Sbennopmap_pinit0(pmap_t pm) 147477957Sbenno{ 147577957Sbenno 147690643Sbenno pmap_pinit(pm); 147790643Sbenno bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 147877957Sbenno} 147977957Sbenno 148094838Sbenno/* 148194838Sbenno * Set the physical protection on the specified range of this map as requested. 148294838Sbenno */ 148390643Sbennovoid 148494838Sbennopmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 148590643Sbenno{ 148694838Sbenno struct pvo_entry *pvo; 148794838Sbenno struct pte *pt; 148894838Sbenno int pteidx; 148994838Sbenno 149094838Sbenno CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 149194838Sbenno eva, prot); 149294838Sbenno 149394838Sbenno 149494838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 149594838Sbenno ("pmap_protect: non current pmap")); 149694838Sbenno 149794838Sbenno if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1498132899Salc mtx_lock(&Giant); 149994838Sbenno pmap_remove(pm, sva, eva); 1500132899Salc mtx_unlock(&Giant); 150194838Sbenno return; 150294838Sbenno } 150394838Sbenno 1504132899Salc mtx_lock(&Giant); 1505132220Salc vm_page_lock_queues(); 1506134329Salc PMAP_LOCK(pm); 150794838Sbenno for (; sva < eva; sva += PAGE_SIZE) { 150894838Sbenno pvo = pmap_pvo_find_va(pm, sva, &pteidx); 150994838Sbenno if (pvo == NULL) 151094838Sbenno continue; 151194838Sbenno 151294838Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 151394838Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 151494838Sbenno 151594838Sbenno /* 151694838Sbenno * Grab the PTE pointer before we diddle with the cached PTE 151794838Sbenno * copy. 151894838Sbenno */ 151994838Sbenno pt = pmap_pvo_to_pte(pvo, pteidx); 152094838Sbenno /* 152194838Sbenno * Change the protection of the page. 152294838Sbenno */ 152394838Sbenno pvo->pvo_pte.pte_lo &= ~PTE_PP; 152494838Sbenno pvo->pvo_pte.pte_lo |= PTE_BR; 152594838Sbenno 152694838Sbenno /* 152794838Sbenno * If the PVO is in the page table, update that pte as well. 152894838Sbenno */ 152994838Sbenno if (pt != NULL) 153094838Sbenno pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 153194838Sbenno } 1532132220Salc vm_page_unlock_queues(); 1533134329Salc PMAP_UNLOCK(pm); 1534132899Salc mtx_unlock(&Giant); 153577957Sbenno} 153677957Sbenno 153791456Sbenno/* 153891456Sbenno * Map a list of wired pages into kernel virtual address space. This is 153991456Sbenno * intended for temporary mappings which do not need page modification or 154091456Sbenno * references recorded. Existing mappings in the region are overwritten. 154191456Sbenno */ 154290643Sbennovoid 1543110172Sgrehanpmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 154477957Sbenno{ 1545110172Sgrehan vm_offset_t va; 154677957Sbenno 1547110172Sgrehan va = sva; 1548110172Sgrehan while (count-- > 0) { 1549110172Sgrehan pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 1550110172Sgrehan va += PAGE_SIZE; 1551110172Sgrehan m++; 1552110172Sgrehan } 155390643Sbenno} 155477957Sbenno 155591456Sbenno/* 155691456Sbenno * Remove page mappings from kernel virtual address space. Intended for 155791456Sbenno * temporary mappings entered by pmap_qenter. 155891456Sbenno */ 155990643Sbennovoid 1560110172Sgrehanpmap_qremove(vm_offset_t sva, int count) 156190643Sbenno{ 1562110172Sgrehan vm_offset_t va; 156391456Sbenno 1564110172Sgrehan va = sva; 1565110172Sgrehan while (count-- > 0) { 156691456Sbenno pmap_kremove(va); 1567110172Sgrehan va += PAGE_SIZE; 1568110172Sgrehan } 156977957Sbenno} 157077957Sbenno 157190643Sbennovoid 157290643Sbennopmap_release(pmap_t pmap) 157390643Sbenno{ 1574103604Sgrehan int idx, mask; 1575103604Sgrehan 1576103604Sgrehan /* 1577103604Sgrehan * Free segment register's VSID 1578103604Sgrehan */ 1579103604Sgrehan if (pmap->pm_sr[0] == 0) 1580103604Sgrehan panic("pmap_release"); 1581103604Sgrehan 1582103604Sgrehan idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1583103604Sgrehan mask = 1 << (idx % VSID_NBPW); 1584103604Sgrehan idx /= VSID_NBPW; 1585103604Sgrehan pmap_vsid_bitmap[idx] &= ~mask; 1586134329Salc PMAP_LOCK_DESTROY(pmap); 158777957Sbenno} 158877957Sbenno 158991456Sbenno/* 159091456Sbenno * Remove the given range of addresses from the specified map. 159191456Sbenno */ 159290643Sbennovoid 159391456Sbennopmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 159477957Sbenno{ 159591456Sbenno struct pvo_entry *pvo; 159691456Sbenno int pteidx; 159791456Sbenno 1598132220Salc vm_page_lock_queues(); 1599134329Salc PMAP_LOCK(pm); 160091456Sbenno for (; sva < eva; sva += PAGE_SIZE) { 160191456Sbenno pvo = pmap_pvo_find_va(pm, sva, &pteidx); 160291456Sbenno if (pvo != NULL) { 160391456Sbenno pmap_pvo_remove(pvo, pteidx); 160491456Sbenno } 160591456Sbenno } 1606132220Salc vm_page_unlock_queues(); 1607134329Salc PMAP_UNLOCK(pm); 160877957Sbenno} 160977957Sbenno 161094838Sbenno/* 1611110172Sgrehan * Remove physical page from all pmaps in which it resides. pmap_pvo_remove() 1612110172Sgrehan * will reflect changes in pte's back to the vm_page. 1613110172Sgrehan */ 1614110172Sgrehanvoid 1615110172Sgrehanpmap_remove_all(vm_page_t m) 1616110172Sgrehan{ 1617110172Sgrehan struct pvo_head *pvo_head; 1618110172Sgrehan struct pvo_entry *pvo, *next_pvo; 1619134329Salc pmap_t pmap; 1620110172Sgrehan 1621120336Sgrehan mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1622120336Sgrehan 1623110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1624110172Sgrehan for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1625110172Sgrehan next_pvo = LIST_NEXT(pvo, pvo_vlink); 1626133166Sgrehan 1627110172Sgrehan PMAP_PVO_CHECK(pvo); /* sanity check */ 1628134329Salc pmap = pvo->pvo_pmap; 1629134329Salc PMAP_LOCK(pmap); 1630110172Sgrehan pmap_pvo_remove(pvo, -1); 1631134329Salc PMAP_UNLOCK(pmap); 1632110172Sgrehan } 1633110172Sgrehan vm_page_flag_clear(m, PG_WRITEABLE); 1634110172Sgrehan} 1635110172Sgrehan 1636110172Sgrehan/* 163794838Sbenno * Remove all pages from specified address space, this aids process exit 163894838Sbenno * speeds. This is much faster than pmap_remove in the case of running down 163994838Sbenno * an entire address space. Only works for the current pmap. 164094838Sbenno */ 164190643Sbennovoid 164294838Sbennopmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 164377957Sbenno{ 164477957Sbenno} 164577957Sbenno 164677957Sbenno/* 164790643Sbenno * Allocate a physical page of memory directly from the phys_avail map. 164890643Sbenno * Can only be called from pmap_bootstrap before avail start and end are 164990643Sbenno * calculated. 165083682Smp */ 165190643Sbennostatic vm_offset_t 165290643Sbennopmap_bootstrap_alloc(vm_size_t size, u_int align) 165383682Smp{ 165490643Sbenno vm_offset_t s, e; 165590643Sbenno int i, j; 165683682Smp 165790643Sbenno size = round_page(size); 165890643Sbenno for (i = 0; phys_avail[i + 1] != 0; i += 2) { 165990643Sbenno if (align != 0) 166090643Sbenno s = (phys_avail[i] + align - 1) & ~(align - 1); 166190643Sbenno else 166290643Sbenno s = phys_avail[i]; 166390643Sbenno e = s + size; 166490643Sbenno 166590643Sbenno if (s < phys_avail[i] || e > phys_avail[i + 1]) 166690643Sbenno continue; 166790643Sbenno 166890643Sbenno if (s == phys_avail[i]) { 166990643Sbenno phys_avail[i] += size; 167090643Sbenno } else if (e == phys_avail[i + 1]) { 167190643Sbenno phys_avail[i + 1] -= size; 167290643Sbenno } else { 167390643Sbenno for (j = phys_avail_count * 2; j > i; j -= 2) { 167490643Sbenno phys_avail[j] = phys_avail[j - 2]; 167590643Sbenno phys_avail[j + 1] = phys_avail[j - 1]; 167690643Sbenno } 167790643Sbenno 167890643Sbenno phys_avail[i + 3] = phys_avail[i + 1]; 167990643Sbenno phys_avail[i + 1] = s; 168090643Sbenno phys_avail[i + 2] = e; 168190643Sbenno phys_avail_count++; 168290643Sbenno } 168390643Sbenno 168490643Sbenno return (s); 168583682Smp } 168690643Sbenno panic("pmap_bootstrap_alloc: could not allocate memory"); 168783682Smp} 168883682Smp 168983682Smp/* 169090643Sbenno * Return an unmapped pvo for a kernel virtual address. 169190643Sbenno * Used by pmap functions that operate on physical pages. 169283682Smp */ 169390643Sbennostatic struct pvo_entry * 169490643Sbennopmap_rkva_alloc(void) 169583682Smp{ 169690643Sbenno struct pvo_entry *pvo; 169790643Sbenno struct pte *pt; 169890643Sbenno vm_offset_t kva; 169990643Sbenno int pteidx; 170083682Smp 170190643Sbenno if (pmap_rkva_count == 0) 170290643Sbenno panic("pmap_rkva_alloc: no more reserved KVAs"); 170390643Sbenno 170490643Sbenno kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 170590643Sbenno pmap_kenter(kva, 0); 170690643Sbenno 170790643Sbenno pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 170890643Sbenno 170990643Sbenno if (pvo == NULL) 171090643Sbenno panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 171190643Sbenno 171290643Sbenno pt = pmap_pvo_to_pte(pvo, pteidx); 171390643Sbenno 171490643Sbenno if (pt == NULL) 171590643Sbenno panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 171690643Sbenno 171790643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 171890643Sbenno PVO_PTEGIDX_CLR(pvo); 171990643Sbenno 172090643Sbenno pmap_pte_overflow++; 172190643Sbenno 172290643Sbenno return (pvo); 172390643Sbenno} 172490643Sbenno 172590643Sbennostatic void 172690643Sbennopmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 172790643Sbenno int *depth_p) 172890643Sbenno{ 172990643Sbenno struct pte *pt; 173090643Sbenno 173190643Sbenno /* 173290643Sbenno * If this pvo already has a valid pte, we need to save it so it can 173390643Sbenno * be restored later. We then just reload the new PTE over the old 173490643Sbenno * slot. 173590643Sbenno */ 173690643Sbenno if (saved_pt != NULL) { 173790643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 173890643Sbenno 173990643Sbenno if (pt != NULL) { 174090643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 174190643Sbenno PVO_PTEGIDX_CLR(pvo); 174290643Sbenno pmap_pte_overflow++; 174383682Smp } 174490643Sbenno 174590643Sbenno *saved_pt = pvo->pvo_pte; 174690643Sbenno 174790643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 174883682Smp } 174990643Sbenno 175090643Sbenno pvo->pvo_pte.pte_lo |= pa; 175190643Sbenno 175290643Sbenno if (!pmap_pte_spill(pvo->pvo_vaddr)) 175390643Sbenno panic("pmap_pa_map: could not spill pvo %p", pvo); 175490643Sbenno 175590643Sbenno if (depth_p != NULL) 175690643Sbenno (*depth_p)++; 175783682Smp} 175883682Smp 175990643Sbennostatic void 176090643Sbennopmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 176177957Sbenno{ 176290643Sbenno struct pte *pt; 176377957Sbenno 176490643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 176590643Sbenno 176690643Sbenno if (pt != NULL) { 176790643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 176890643Sbenno PVO_PTEGIDX_CLR(pvo); 176990643Sbenno pmap_pte_overflow++; 177090643Sbenno } 177190643Sbenno 177290643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 177390643Sbenno 177490643Sbenno /* 177590643Sbenno * If there is a saved PTE and it's valid, restore it and return. 177690643Sbenno */ 177790643Sbenno if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 177890643Sbenno if (depth_p != NULL && --(*depth_p) == 0) 177990643Sbenno panic("pmap_pa_unmap: restoring but depth == 0"); 178090643Sbenno 178190643Sbenno pvo->pvo_pte = *saved_pt; 178290643Sbenno 178390643Sbenno if (!pmap_pte_spill(pvo->pvo_vaddr)) 178490643Sbenno panic("pmap_pa_unmap: could not spill pvo %p", pvo); 178590643Sbenno } 178677957Sbenno} 178777957Sbenno 178890643Sbennostatic void 178990643Sbennopmap_syncicache(vm_offset_t pa, vm_size_t len) 179077957Sbenno{ 179190643Sbenno __syncicache((void *)pa, len); 179290643Sbenno} 179377957Sbenno 179490643Sbennostatic void 179590643Sbennotlbia(void) 179690643Sbenno{ 179790643Sbenno caddr_t i; 179890643Sbenno 179990643Sbenno SYNC(); 180090643Sbenno for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 180190643Sbenno TLBIE(i); 180290643Sbenno EIEIO(); 180390643Sbenno } 180490643Sbenno TLBSYNC(); 180590643Sbenno SYNC(); 180677957Sbenno} 180777957Sbenno 180890643Sbennostatic int 180992847Sjeffpmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 181090643Sbenno vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 181177957Sbenno{ 181290643Sbenno struct pvo_entry *pvo; 181390643Sbenno u_int sr; 181490643Sbenno int first; 181590643Sbenno u_int ptegidx; 181690643Sbenno int i; 1817103604Sgrehan int bootstrap; 181877957Sbenno 181990643Sbenno pmap_pvo_enter_calls++; 182096250Sbenno first = 0; 1821103604Sgrehan 1822103604Sgrehan bootstrap = 0; 182390643Sbenno 182490643Sbenno /* 182590643Sbenno * Compute the PTE Group index. 182690643Sbenno */ 182790643Sbenno va &= ~ADDR_POFF; 182890643Sbenno sr = va_to_sr(pm->pm_sr, va); 182990643Sbenno ptegidx = va_to_pteg(sr, va); 183090643Sbenno 183190643Sbenno /* 183290643Sbenno * Remove any existing mapping for this page. Reuse the pvo entry if 183390643Sbenno * there is a mapping. 183490643Sbenno */ 1835134535Salc mtx_lock(&pmap_table_mutex); 183690643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 183790643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 183896334Sbenno if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 183996334Sbenno (pvo->pvo_pte.pte_lo & PTE_PP) == 184096334Sbenno (pte_lo & PTE_PP)) { 1841134535Salc mtx_unlock(&pmap_table_mutex); 184292521Sbenno return (0); 184396334Sbenno } 184490643Sbenno pmap_pvo_remove(pvo, -1); 184590643Sbenno break; 184690643Sbenno } 184790643Sbenno } 184890643Sbenno 184990643Sbenno /* 185090643Sbenno * If we aren't overwriting a mapping, try to allocate. 185190643Sbenno */ 185292521Sbenno if (pmap_initialized) { 185392847Sjeff pvo = uma_zalloc(zone, M_NOWAIT); 185492521Sbenno } else { 185599037Sbenno if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { 185699037Sbenno panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", 185799037Sbenno pmap_bpvo_pool_index, BPVO_POOL_SIZE, 185899037Sbenno BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 185992521Sbenno } 186092521Sbenno pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 186192521Sbenno pmap_bpvo_pool_index++; 1862103604Sgrehan bootstrap = 1; 186392521Sbenno } 186490643Sbenno 186590643Sbenno if (pvo == NULL) { 1866134535Salc mtx_unlock(&pmap_table_mutex); 186790643Sbenno return (ENOMEM); 186890643Sbenno } 186990643Sbenno 187090643Sbenno pmap_pvo_entries++; 187190643Sbenno pvo->pvo_vaddr = va; 187290643Sbenno pvo->pvo_pmap = pm; 187390643Sbenno LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 187490643Sbenno pvo->pvo_vaddr &= ~ADDR_POFF; 187590643Sbenno if (flags & VM_PROT_EXECUTE) 187690643Sbenno pvo->pvo_vaddr |= PVO_EXECUTABLE; 187790643Sbenno if (flags & PVO_WIRED) 187890643Sbenno pvo->pvo_vaddr |= PVO_WIRED; 187990643Sbenno if (pvo_head != &pmap_pvo_kunmanaged) 188090643Sbenno pvo->pvo_vaddr |= PVO_MANAGED; 1881103604Sgrehan if (bootstrap) 1882103604Sgrehan pvo->pvo_vaddr |= PVO_BOOTSTRAP; 188390643Sbenno pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 188490643Sbenno 188590643Sbenno /* 188690643Sbenno * Remember if the list was empty and therefore will be the first 188790643Sbenno * item. 188890643Sbenno */ 188996250Sbenno if (LIST_FIRST(pvo_head) == NULL) 189096250Sbenno first = 1; 189190643Sbenno 189290643Sbenno LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 189390643Sbenno if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1894134453Salc pm->pm_stats.wired_count++; 1895134453Salc pm->pm_stats.resident_count++; 189690643Sbenno 189790643Sbenno /* 189890643Sbenno * We hope this succeeds but it isn't required. 189990643Sbenno */ 190090643Sbenno i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 190190643Sbenno if (i >= 0) { 190290643Sbenno PVO_PTEGIDX_SET(pvo, i); 190390643Sbenno } else { 190490643Sbenno panic("pmap_pvo_enter: overflow"); 190590643Sbenno pmap_pte_overflow++; 190690643Sbenno } 190790643Sbenno 1908134535Salc mtx_unlock(&pmap_table_mutex); 190990643Sbenno return (first ? ENOENT : 0); 191077957Sbenno} 191177957Sbenno 191290643Sbennostatic void 191390643Sbennopmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 191477957Sbenno{ 191590643Sbenno struct pte *pt; 191677957Sbenno 191790643Sbenno /* 191890643Sbenno * If there is an active pte entry, we need to deactivate it (and 191990643Sbenno * save the ref & cfg bits). 192090643Sbenno */ 192190643Sbenno pt = pmap_pvo_to_pte(pvo, pteidx); 192290643Sbenno if (pt != NULL) { 192390643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 192490643Sbenno PVO_PTEGIDX_CLR(pvo); 192590643Sbenno } else { 192690643Sbenno pmap_pte_overflow--; 1927110172Sgrehan } 192890643Sbenno 192990643Sbenno /* 193090643Sbenno * Update our statistics. 193190643Sbenno */ 193290643Sbenno pvo->pvo_pmap->pm_stats.resident_count--; 193390643Sbenno if (pvo->pvo_pte.pte_lo & PVO_WIRED) 193490643Sbenno pvo->pvo_pmap->pm_stats.wired_count--; 193590643Sbenno 193690643Sbenno /* 193790643Sbenno * Save the REF/CHG bits into their cache if the page is managed. 193890643Sbenno */ 193990643Sbenno if (pvo->pvo_vaddr & PVO_MANAGED) { 194090643Sbenno struct vm_page *pg; 194190643Sbenno 194292067Sbenno pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 194390643Sbenno if (pg != NULL) { 194490643Sbenno pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 194590643Sbenno (PTE_REF | PTE_CHG)); 194690643Sbenno } 194790643Sbenno } 194890643Sbenno 194990643Sbenno /* 195090643Sbenno * Remove this PVO from the PV list. 195190643Sbenno */ 195290643Sbenno LIST_REMOVE(pvo, pvo_vlink); 195390643Sbenno 195490643Sbenno /* 195590643Sbenno * Remove this from the overflow list and return it to the pool 195690643Sbenno * if we aren't going to reuse it. 195790643Sbenno */ 195890643Sbenno LIST_REMOVE(pvo, pvo_olink); 195992521Sbenno if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 196092847Sjeff uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 196192521Sbenno pmap_upvo_zone, pvo); 196290643Sbenno pmap_pvo_entries--; 196390643Sbenno pmap_pvo_remove_calls++; 196477957Sbenno} 196577957Sbenno 196690643Sbennostatic __inline int 196790643Sbennopmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 196877957Sbenno{ 196990643Sbenno int pteidx; 197077957Sbenno 197190643Sbenno /* 197290643Sbenno * We can find the actual pte entry without searching by grabbing 197390643Sbenno * the PTEG index from 3 unused bits in pte_lo[11:9] and by 197490643Sbenno * noticing the HID bit. 197590643Sbenno */ 197690643Sbenno pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 197790643Sbenno if (pvo->pvo_pte.pte_hi & PTE_HID) 197890643Sbenno pteidx ^= pmap_pteg_mask * 8; 197990643Sbenno 198090643Sbenno return (pteidx); 198177957Sbenno} 198277957Sbenno 198390643Sbennostatic struct pvo_entry * 198490643Sbennopmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 198577957Sbenno{ 198690643Sbenno struct pvo_entry *pvo; 198790643Sbenno int ptegidx; 198890643Sbenno u_int sr; 198977957Sbenno 199090643Sbenno va &= ~ADDR_POFF; 199190643Sbenno sr = va_to_sr(pm->pm_sr, va); 199290643Sbenno ptegidx = va_to_pteg(sr, va); 199390643Sbenno 1994134535Salc mtx_lock(&pmap_table_mutex); 199590643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 199690643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 199790643Sbenno if (pteidx_p) 199890643Sbenno *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1999134535Salc break; 200090643Sbenno } 200190643Sbenno } 2002134535Salc mtx_unlock(&pmap_table_mutex); 200390643Sbenno 2004134535Salc return (pvo); 200577957Sbenno} 200677957Sbenno 200790643Sbennostatic struct pte * 200890643Sbennopmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 200977957Sbenno{ 201090643Sbenno struct pte *pt; 201177957Sbenno 201290643Sbenno /* 201390643Sbenno * If we haven't been supplied the ptegidx, calculate it. 201490643Sbenno */ 201590643Sbenno if (pteidx == -1) { 201690643Sbenno int ptegidx; 201790643Sbenno u_int sr; 201877957Sbenno 201990643Sbenno sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 202090643Sbenno ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 202190643Sbenno pteidx = pmap_pvo_pte_index(pvo, ptegidx); 202290643Sbenno } 202390643Sbenno 202490643Sbenno pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 202590643Sbenno 202690643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 202790643Sbenno panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 202890643Sbenno "valid pte index", pvo); 202990643Sbenno } 203090643Sbenno 203190643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 203290643Sbenno panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 203390643Sbenno "pvo but no valid pte", pvo); 203490643Sbenno } 203590643Sbenno 203690643Sbenno if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 203790643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 203890643Sbenno panic("pmap_pvo_to_pte: pvo %p has valid pte in " 203990643Sbenno "pmap_pteg_table %p but invalid in pvo", pvo, pt); 204077957Sbenno } 204190643Sbenno 204290643Sbenno if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 204390643Sbenno != 0) { 204490643Sbenno panic("pmap_pvo_to_pte: pvo %p pte does not match " 204590643Sbenno "pte %p in pmap_pteg_table", pvo, pt); 204690643Sbenno } 204790643Sbenno 204890643Sbenno return (pt); 204977957Sbenno } 205077957Sbenno 205190643Sbenno if (pvo->pvo_pte.pte_hi & PTE_VALID) { 205290643Sbenno panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 205390643Sbenno "pmap_pteg_table but valid in pvo", pvo, pt); 205490643Sbenno } 205577957Sbenno 205690643Sbenno return (NULL); 205777957Sbenno} 205878880Sbenno 205978880Sbenno/* 206090643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c? 206178880Sbenno */ 206290643Sbennoint 206390643Sbennopmap_pte_spill(vm_offset_t addr) 206478880Sbenno{ 206590643Sbenno struct pvo_entry *source_pvo, *victim_pvo; 206690643Sbenno struct pvo_entry *pvo; 206790643Sbenno int ptegidx, i, j; 206890643Sbenno u_int sr; 206990643Sbenno struct pteg *pteg; 207090643Sbenno struct pte *pt; 207178880Sbenno 207290643Sbenno pmap_pte_spills++; 207390643Sbenno 207494836Sbenno sr = mfsrin(addr); 207590643Sbenno ptegidx = va_to_pteg(sr, addr); 207690643Sbenno 207778880Sbenno /* 207890643Sbenno * Have to substitute some entry. Use the primary hash for this. 207990643Sbenno * Use low bits of timebase as random generator. 208078880Sbenno */ 208190643Sbenno pteg = &pmap_pteg_table[ptegidx]; 2082134535Salc mtx_lock(&pmap_table_mutex); 208390643Sbenno __asm __volatile("mftb %0" : "=r"(i)); 208490643Sbenno i &= 7; 208590643Sbenno pt = &pteg->pt[i]; 208678880Sbenno 208790643Sbenno source_pvo = NULL; 208890643Sbenno victim_pvo = NULL; 208990643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 209078880Sbenno /* 209190643Sbenno * We need to find a pvo entry for this address. 209278880Sbenno */ 209390643Sbenno PMAP_PVO_CHECK(pvo); 209490643Sbenno if (source_pvo == NULL && 209590643Sbenno pmap_pte_match(&pvo->pvo_pte, sr, addr, 209690643Sbenno pvo->pvo_pte.pte_hi & PTE_HID)) { 209790643Sbenno /* 209890643Sbenno * Now found an entry to be spilled into the pteg. 209990643Sbenno * The PTE is now valid, so we know it's active. 210090643Sbenno */ 210190643Sbenno j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 210278880Sbenno 210390643Sbenno if (j >= 0) { 210490643Sbenno PVO_PTEGIDX_SET(pvo, j); 210590643Sbenno pmap_pte_overflow--; 210690643Sbenno PMAP_PVO_CHECK(pvo); 2107134535Salc mtx_unlock(&pmap_table_mutex); 210890643Sbenno return (1); 210990643Sbenno } 211090643Sbenno 211190643Sbenno source_pvo = pvo; 211290643Sbenno 211390643Sbenno if (victim_pvo != NULL) 211490643Sbenno break; 211590643Sbenno } 211690643Sbenno 211778880Sbenno /* 211890643Sbenno * We also need the pvo entry of the victim we are replacing 211990643Sbenno * so save the R & C bits of the PTE. 212078880Sbenno */ 212190643Sbenno if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 212290643Sbenno pmap_pte_compare(pt, &pvo->pvo_pte)) { 212390643Sbenno victim_pvo = pvo; 212490643Sbenno if (source_pvo != NULL) 212590643Sbenno break; 212690643Sbenno } 212790643Sbenno } 212878880Sbenno 2129134535Salc if (source_pvo == NULL) { 2130134535Salc mtx_unlock(&pmap_table_mutex); 213190643Sbenno return (0); 2132134535Salc } 213390643Sbenno 213490643Sbenno if (victim_pvo == NULL) { 213590643Sbenno if ((pt->pte_hi & PTE_HID) == 0) 213690643Sbenno panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 213790643Sbenno "entry", pt); 213890643Sbenno 213978880Sbenno /* 214090643Sbenno * If this is a secondary PTE, we need to search it's primary 214190643Sbenno * pvo bucket for the matching PVO. 214278880Sbenno */ 214390643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 214490643Sbenno pvo_olink) { 214590643Sbenno PMAP_PVO_CHECK(pvo); 214690643Sbenno /* 214790643Sbenno * We also need the pvo entry of the victim we are 214890643Sbenno * replacing so save the R & C bits of the PTE. 214990643Sbenno */ 215090643Sbenno if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 215190643Sbenno victim_pvo = pvo; 215290643Sbenno break; 215390643Sbenno } 215490643Sbenno } 215578880Sbenno 215690643Sbenno if (victim_pvo == NULL) 215790643Sbenno panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 215890643Sbenno "entry", pt); 215990643Sbenno } 216078880Sbenno 216190643Sbenno /* 216290643Sbenno * We are invalidating the TLB entry for the EA we are replacing even 216390643Sbenno * though it's valid. If we don't, we lose any ref/chg bit changes 216490643Sbenno * contained in the TLB entry. 216590643Sbenno */ 216690643Sbenno source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 216778880Sbenno 216890643Sbenno pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 216990643Sbenno pmap_pte_set(pt, &source_pvo->pvo_pte); 217090643Sbenno 217190643Sbenno PVO_PTEGIDX_CLR(victim_pvo); 217290643Sbenno PVO_PTEGIDX_SET(source_pvo, i); 217390643Sbenno pmap_pte_replacements++; 217490643Sbenno 217590643Sbenno PMAP_PVO_CHECK(victim_pvo); 217690643Sbenno PMAP_PVO_CHECK(source_pvo); 217790643Sbenno 2178134535Salc mtx_unlock(&pmap_table_mutex); 217990643Sbenno return (1); 218090643Sbenno} 218190643Sbenno 218290643Sbennostatic int 218390643Sbennopmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 218490643Sbenno{ 218590643Sbenno struct pte *pt; 218690643Sbenno int i; 218790643Sbenno 218890643Sbenno /* 218990643Sbenno * First try primary hash. 219090643Sbenno */ 219190643Sbenno for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 219290643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 219390643Sbenno pvo_pt->pte_hi &= ~PTE_HID; 219490643Sbenno pmap_pte_set(pt, pvo_pt); 219590643Sbenno return (i); 219678880Sbenno } 219790643Sbenno } 219878880Sbenno 219990643Sbenno /* 220090643Sbenno * Now try secondary hash. 220190643Sbenno */ 220290643Sbenno ptegidx ^= pmap_pteg_mask; 220390643Sbenno ptegidx++; 220490643Sbenno for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 220590643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 220690643Sbenno pvo_pt->pte_hi |= PTE_HID; 220790643Sbenno pmap_pte_set(pt, pvo_pt); 220890643Sbenno return (i); 220990643Sbenno } 221090643Sbenno } 221178880Sbenno 221290643Sbenno panic("pmap_pte_insert: overflow"); 221390643Sbenno return (-1); 221478880Sbenno} 221584921Sbenno 221690643Sbennostatic boolean_t 221790643Sbennopmap_query_bit(vm_page_t m, int ptebit) 221884921Sbenno{ 221990643Sbenno struct pvo_entry *pvo; 222090643Sbenno struct pte *pt; 222184921Sbenno 2222123560Sgrehan#if 0 222390643Sbenno if (pmap_attr_fetch(m) & ptebit) 222490643Sbenno return (TRUE); 2225123560Sgrehan#endif 222684921Sbenno 222790643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 222890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 222984921Sbenno 223090643Sbenno /* 223190643Sbenno * See if we saved the bit off. If so, cache it and return 223290643Sbenno * success. 223390643Sbenno */ 223490643Sbenno if (pvo->pvo_pte.pte_lo & ptebit) { 223590643Sbenno pmap_attr_save(m, ptebit); 223690643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 223790643Sbenno return (TRUE); 223890643Sbenno } 223990643Sbenno } 224084921Sbenno 224190643Sbenno /* 224290643Sbenno * No luck, now go through the hard part of looking at the PTEs 224390643Sbenno * themselves. Sync so that any pending REF/CHG bits are flushed to 224490643Sbenno * the PTEs. 224590643Sbenno */ 224690643Sbenno SYNC(); 224790643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 224890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 224990643Sbenno 225090643Sbenno /* 225190643Sbenno * See if this pvo has a valid PTE. if so, fetch the 225290643Sbenno * REF/CHG bits from the valid PTE. If the appropriate 225390643Sbenno * ptebit is set, cache it and return success. 225490643Sbenno */ 225590643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 225690643Sbenno if (pt != NULL) { 225790643Sbenno pmap_pte_synch(pt, &pvo->pvo_pte); 225890643Sbenno if (pvo->pvo_pte.pte_lo & ptebit) { 225990643Sbenno pmap_attr_save(m, ptebit); 226090643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 226190643Sbenno return (TRUE); 226290643Sbenno } 226390643Sbenno } 226484921Sbenno } 226584921Sbenno 2266123354Sgallatin return (FALSE); 226784921Sbenno} 226890643Sbenno 2269110172Sgrehanstatic u_int 2270110172Sgrehanpmap_clear_bit(vm_page_t m, int ptebit, int *origbit) 227190643Sbenno{ 2272110172Sgrehan u_int count; 227390643Sbenno struct pvo_entry *pvo; 227490643Sbenno struct pte *pt; 227590643Sbenno int rv; 227690643Sbenno 227790643Sbenno /* 227890643Sbenno * Clear the cached value. 227990643Sbenno */ 228090643Sbenno rv = pmap_attr_fetch(m); 228190643Sbenno pmap_attr_clear(m, ptebit); 228290643Sbenno 228390643Sbenno /* 228490643Sbenno * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 228590643Sbenno * we can reset the right ones). note that since the pvo entries and 228690643Sbenno * list heads are accessed via BAT0 and are never placed in the page 228790643Sbenno * table, we don't have to worry about further accesses setting the 228890643Sbenno * REF/CHG bits. 228990643Sbenno */ 229090643Sbenno SYNC(); 229190643Sbenno 229290643Sbenno /* 229390643Sbenno * For each pvo entry, clear the pvo's ptebit. If this pvo has a 229490643Sbenno * valid pte clear the ptebit from the valid pte. 229590643Sbenno */ 2296110172Sgrehan count = 0; 229790643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 229890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 229990643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 230090643Sbenno if (pt != NULL) { 230190643Sbenno pmap_pte_synch(pt, &pvo->pvo_pte); 2302110172Sgrehan if (pvo->pvo_pte.pte_lo & ptebit) { 2303110172Sgrehan count++; 230490643Sbenno pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2305110172Sgrehan } 230690643Sbenno } 230790643Sbenno rv |= pvo->pvo_pte.pte_lo; 230890643Sbenno pvo->pvo_pte.pte_lo &= ~ptebit; 230990643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 231090643Sbenno } 231190643Sbenno 2312110172Sgrehan if (origbit != NULL) { 2313110172Sgrehan *origbit = rv; 2314110172Sgrehan } 2315110172Sgrehan 2316110172Sgrehan return (count); 231790643Sbenno} 231899038Sbenno 231999038Sbenno/* 2320103604Sgrehan * Return true if the physical range is encompassed by the battable[idx] 2321103604Sgrehan */ 2322103604Sgrehanstatic int 2323103604Sgrehanpmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2324103604Sgrehan{ 2325103604Sgrehan u_int prot; 2326103604Sgrehan u_int32_t start; 2327103604Sgrehan u_int32_t end; 2328103604Sgrehan u_int32_t bat_ble; 2329103604Sgrehan 2330103604Sgrehan /* 2331103604Sgrehan * Return immediately if not a valid mapping 2332103604Sgrehan */ 2333103604Sgrehan if (!battable[idx].batu & BAT_Vs) 2334103604Sgrehan return (EINVAL); 2335103604Sgrehan 2336103604Sgrehan /* 2337103604Sgrehan * The BAT entry must be cache-inhibited, guarded, and r/w 2338103604Sgrehan * so it can function as an i/o page 2339103604Sgrehan */ 2340103604Sgrehan prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2341103604Sgrehan if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2342103604Sgrehan return (EPERM); 2343103604Sgrehan 2344103604Sgrehan /* 2345103604Sgrehan * The address should be within the BAT range. Assume that the 2346103604Sgrehan * start address in the BAT has the correct alignment (thus 2347103604Sgrehan * not requiring masking) 2348103604Sgrehan */ 2349103604Sgrehan start = battable[idx].batl & BAT_PBS; 2350103604Sgrehan bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2351103604Sgrehan end = start | (bat_ble << 15) | 0x7fff; 2352103604Sgrehan 2353103604Sgrehan if ((pa < start) || ((pa + size) > end)) 2354103604Sgrehan return (ERANGE); 2355103604Sgrehan 2356103604Sgrehan return (0); 2357103604Sgrehan} 2358103604Sgrehan 2359133855Sssouhlalint 2360133855Sssouhlalpmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size) 2361133855Sssouhlal{ 2362133855Sssouhlal int i; 2363103604Sgrehan 2364133855Sssouhlal /* 2365133855Sssouhlal * This currently does not work for entries that 2366133855Sssouhlal * overlap 256M BAT segments. 2367133855Sssouhlal */ 2368133855Sssouhlal 2369133855Sssouhlal for(i = 0; i < 16; i++) 2370133855Sssouhlal if (pmap_bat_mapped(i, pa, size) == 0) 2371133855Sssouhlal return (0); 2372133855Sssouhlal 2373133855Sssouhlal return (EFAULT); 2374133855Sssouhlal} 2375133855Sssouhlal 2376103604Sgrehan/* 237799038Sbenno * Map a set of physical memory pages into the kernel virtual 237899038Sbenno * address space. Return a pointer to where it is mapped. This 237999038Sbenno * routine is intended to be used for mapping device memory, 238099038Sbenno * NOT real memory. 238199038Sbenno */ 238299038Sbennovoid * 238399038Sbennopmap_mapdev(vm_offset_t pa, vm_size_t size) 238499038Sbenno{ 2385103604Sgrehan vm_offset_t va, tmpva, ppa, offset; 2386103604Sgrehan int i; 2387103604Sgrehan 2388103604Sgrehan ppa = trunc_page(pa); 238999038Sbenno offset = pa & PAGE_MASK; 239099038Sbenno size = roundup(offset + size, PAGE_SIZE); 239199038Sbenno 239299038Sbenno GIANT_REQUIRED; 239399038Sbenno 2394103604Sgrehan /* 2395103604Sgrehan * If the physical address lies within a valid BAT table entry, 2396103604Sgrehan * return the 1:1 mapping. This currently doesn't work 2397103604Sgrehan * for regions that overlap 256M BAT segments. 2398103604Sgrehan */ 2399103604Sgrehan for (i = 0; i < 16; i++) { 2400103604Sgrehan if (pmap_bat_mapped(i, pa, size) == 0) 2401103604Sgrehan return ((void *) pa); 2402103604Sgrehan } 2403103604Sgrehan 2404118365Salc va = kmem_alloc_nofault(kernel_map, size); 240599038Sbenno if (!va) 240699038Sbenno panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 240799038Sbenno 240899038Sbenno for (tmpva = va; size > 0;) { 2409103604Sgrehan pmap_kenter(tmpva, ppa); 241099038Sbenno TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 241199038Sbenno size -= PAGE_SIZE; 241299038Sbenno tmpva += PAGE_SIZE; 2413103604Sgrehan ppa += PAGE_SIZE; 241499038Sbenno } 241599038Sbenno 241699038Sbenno return ((void *)(va + offset)); 241799038Sbenno} 241899038Sbenno 241999038Sbennovoid 242099038Sbennopmap_unmapdev(vm_offset_t va, vm_size_t size) 242199038Sbenno{ 242299038Sbenno vm_offset_t base, offset; 242399038Sbenno 2424103604Sgrehan /* 2425103604Sgrehan * If this is outside kernel virtual space, then it's a 2426103604Sgrehan * battable entry and doesn't require unmapping 2427103604Sgrehan */ 2428103604Sgrehan if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2429103604Sgrehan base = trunc_page(va); 2430103604Sgrehan offset = va & PAGE_MASK; 2431103604Sgrehan size = roundup(offset + size, PAGE_SIZE); 2432103604Sgrehan kmem_free(kernel_map, base, size); 2433103604Sgrehan } 243499038Sbenno} 2435