mmu_oea.c revision 134329
177957Sbenno/* 290643Sbenno * Copyright (c) 2001 The NetBSD Foundation, Inc. 390643Sbenno * All rights reserved. 490643Sbenno * 590643Sbenno * This code is derived from software contributed to The NetBSD Foundation 690643Sbenno * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 790643Sbenno * 890643Sbenno * Redistribution and use in source and binary forms, with or without 990643Sbenno * modification, are permitted provided that the following conditions 1090643Sbenno * are met: 1190643Sbenno * 1. Redistributions of source code must retain the above copyright 1290643Sbenno * notice, this list of conditions and the following disclaimer. 1390643Sbenno * 2. Redistributions in binary form must reproduce the above copyright 1490643Sbenno * notice, this list of conditions and the following disclaimer in the 1590643Sbenno * documentation and/or other materials provided with the distribution. 1690643Sbenno * 3. All advertising materials mentioning features or use of this software 1790643Sbenno * must display the following acknowledgement: 1890643Sbenno * This product includes software developed by the NetBSD 1990643Sbenno * Foundation, Inc. and its contributors. 2090643Sbenno * 4. Neither the name of The NetBSD Foundation nor the names of its 2190643Sbenno * contributors may be used to endorse or promote products derived 2290643Sbenno * from this software without specific prior written permission. 2390643Sbenno * 2490643Sbenno * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2590643Sbenno * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2690643Sbenno * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2790643Sbenno * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2890643Sbenno * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2990643Sbenno * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 3090643Sbenno * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 3190643Sbenno * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 3290643Sbenno * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 3390643Sbenno * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 3490643Sbenno * POSSIBILITY OF SUCH DAMAGE. 3590643Sbenno */ 3690643Sbenno/* 3777957Sbenno * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3877957Sbenno * Copyright (C) 1995, 1996 TooLs GmbH. 3977957Sbenno * All rights reserved. 4077957Sbenno * 4177957Sbenno * Redistribution and use in source and binary forms, with or without 4277957Sbenno * modification, are permitted provided that the following conditions 4377957Sbenno * are met: 4477957Sbenno * 1. Redistributions of source code must retain the above copyright 4577957Sbenno * notice, this list of conditions and the following disclaimer. 4677957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 4777957Sbenno * notice, this list of conditions and the following disclaimer in the 4877957Sbenno * documentation and/or other materials provided with the distribution. 4977957Sbenno * 3. All advertising materials mentioning features or use of this software 5077957Sbenno * must display the following acknowledgement: 5177957Sbenno * This product includes software developed by TooLs GmbH. 5277957Sbenno * 4. The name of TooLs GmbH may not be used to endorse or promote products 5377957Sbenno * derived from this software without specific prior written permission. 5477957Sbenno * 5577957Sbenno * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 5677957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 5777957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 5877957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 5977957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 6077957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 6177957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 6277957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 6377957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 6477957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 6577957Sbenno * 6678880Sbenno * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 6777957Sbenno */ 6877957Sbenno/* 6977957Sbenno * Copyright (C) 2001 Benno Rice. 7077957Sbenno * All rights reserved. 7177957Sbenno * 7277957Sbenno * Redistribution and use in source and binary forms, with or without 7377957Sbenno * modification, are permitted provided that the following conditions 7477957Sbenno * are met: 7577957Sbenno * 1. Redistributions of source code must retain the above copyright 7677957Sbenno * notice, this list of conditions and the following disclaimer. 7777957Sbenno * 2. Redistributions in binary form must reproduce the above copyright 7877957Sbenno * notice, this list of conditions and the following disclaimer in the 7977957Sbenno * documentation and/or other materials provided with the distribution. 8077957Sbenno * 8177957Sbenno * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 8277957Sbenno * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 8377957Sbenno * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 8477957Sbenno * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 8577957Sbenno * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 8677957Sbenno * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 8777957Sbenno * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 8877957Sbenno * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 8977957Sbenno * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 9077957Sbenno * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 9177957Sbenno */ 9277957Sbenno 93113038Sobrien#include <sys/cdefs.h> 94113038Sobrien__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 134329 2004-08-26 04:15:36Z alc $"); 9577957Sbenno 9690643Sbenno/* 9790643Sbenno * Manages physical address maps. 9890643Sbenno * 9990643Sbenno * In addition to hardware address maps, this module is called upon to 10090643Sbenno * provide software-use-only maps which may or may not be stored in the 10190643Sbenno * same form as hardware maps. These pseudo-maps are used to store 10290643Sbenno * intermediate results from copy operations to and from address spaces. 10390643Sbenno * 10490643Sbenno * Since the information managed by this module is also stored by the 10590643Sbenno * logical address mapping module, this module may throw away valid virtual 10690643Sbenno * to physical mappings at almost any time. However, invalidations of 10790643Sbenno * mappings must be done as requested. 10890643Sbenno * 10990643Sbenno * In order to cope with hardware architectures which make virtual to 11090643Sbenno * physical map invalidates expensive, this module may delay invalidate 11190643Sbenno * reduced protection operations until such time as they are actually 11290643Sbenno * necessary. This module is given full information as to which processors 11390643Sbenno * are currently using which maps, and to when physical maps must be made 11490643Sbenno * correct. 11590643Sbenno */ 11690643Sbenno 117118239Speter#include "opt_kstack_pages.h" 118118239Speter 11977957Sbenno#include <sys/param.h> 12080431Speter#include <sys/kernel.h> 12190643Sbenno#include <sys/ktr.h> 12290643Sbenno#include <sys/lock.h> 12390643Sbenno#include <sys/msgbuf.h> 12490643Sbenno#include <sys/mutex.h> 12577957Sbenno#include <sys/proc.h> 12690643Sbenno#include <sys/sysctl.h> 12790643Sbenno#include <sys/systm.h> 12877957Sbenno#include <sys/vmmeter.h> 12977957Sbenno 13090643Sbenno#include <dev/ofw/openfirm.h> 13190643Sbenno 13290643Sbenno#include <vm/vm.h> 13377957Sbenno#include <vm/vm_param.h> 13477957Sbenno#include <vm/vm_kern.h> 13577957Sbenno#include <vm/vm_page.h> 13677957Sbenno#include <vm/vm_map.h> 13777957Sbenno#include <vm/vm_object.h> 13877957Sbenno#include <vm/vm_extern.h> 13977957Sbenno#include <vm/vm_pageout.h> 14077957Sbenno#include <vm/vm_pager.h> 14192847Sjeff#include <vm/uma.h> 14277957Sbenno 143125687Sgrehan#include <machine/cpu.h> 14497346Sbenno#include <machine/powerpc.h> 14583730Smp#include <machine/bat.h> 14690643Sbenno#include <machine/frame.h> 14790643Sbenno#include <machine/md_var.h> 14890643Sbenno#include <machine/psl.h> 14977957Sbenno#include <machine/pte.h> 15090643Sbenno#include <machine/sr.h> 15177957Sbenno 15290643Sbenno#define PMAP_DEBUG 15377957Sbenno 15490643Sbenno#define TODO panic("%s: not implemented", __func__); 15577957Sbenno 15690643Sbenno#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 15790643Sbenno#define TLBSYNC() __asm __volatile("tlbsync"); 15890643Sbenno#define SYNC() __asm __volatile("sync"); 15990643Sbenno#define EIEIO() __asm __volatile("eieio"); 16090643Sbenno 16190643Sbenno#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 16290643Sbenno#define VSID_TO_SR(vsid) ((vsid) & 0xf) 16390643Sbenno#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 16490643Sbenno 16590643Sbenno#define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 16690643Sbenno#define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 16790643Sbenno#define PVO_WIRED 0x0010 /* PVO entry is wired */ 16890643Sbenno#define PVO_MANAGED 0x0020 /* PVO entry is managed */ 16990643Sbenno#define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 17094835Sbenno#define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during 17192521Sbenno bootstrap */ 17290643Sbenno#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 17390643Sbenno#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 17490643Sbenno#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 17590643Sbenno#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 17690643Sbenno#define PVO_PTEGIDX_CLR(pvo) \ 17790643Sbenno ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 17890643Sbenno#define PVO_PTEGIDX_SET(pvo, i) \ 17990643Sbenno ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 18090643Sbenno 18190643Sbenno#define PMAP_PVO_CHECK(pvo) 18290643Sbenno 18390643Sbennostruct ofw_map { 18490643Sbenno vm_offset_t om_va; 18590643Sbenno vm_size_t om_len; 18690643Sbenno vm_offset_t om_pa; 18790643Sbenno u_int om_mode; 18890643Sbenno}; 18977957Sbenno 19090643Sbennoint pmap_bootstrapped = 0; 19177957Sbenno 19290643Sbenno/* 19390643Sbenno * Virtual and physical address of message buffer. 19490643Sbenno */ 19590643Sbennostruct msgbuf *msgbufp; 19690643Sbennovm_offset_t msgbuf_phys; 19777957Sbenno 198110172Sgrehanint pmap_pagedaemon_waken; 199110172Sgrehan 20090643Sbenno/* 20190643Sbenno * Map of physical memory regions. 20290643Sbenno */ 20390643Sbennovm_offset_t phys_avail[128]; 20490643Sbennou_int phys_avail_count; 20597346Sbennostatic struct mem_region *regions; 20697346Sbennostatic struct mem_region *pregions; 20797346Sbennoint regions_sz, pregions_sz; 208100319Sbennostatic struct ofw_map *translations; 20977957Sbenno 21090643Sbenno/* 21190643Sbenno * First and last available kernel virtual addresses. 21290643Sbenno */ 21390643Sbennovm_offset_t virtual_avail; 21490643Sbennovm_offset_t virtual_end; 21590643Sbennovm_offset_t kernel_vm_end; 21677957Sbenno 21790643Sbenno/* 21890643Sbenno * Kernel pmap. 21990643Sbenno */ 22090643Sbennostruct pmap kernel_pmap_store; 22190643Sbennoextern struct pmap ofw_pmap; 22277957Sbenno 22390643Sbenno/* 22490643Sbenno * PTEG data. 22590643Sbenno */ 22690643Sbennostatic struct pteg *pmap_pteg_table; 22790643Sbennou_int pmap_pteg_count; 22890643Sbennou_int pmap_pteg_mask; 22977957Sbenno 23090643Sbenno/* 23190643Sbenno * PVO data. 23290643Sbenno */ 23390643Sbennostruct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 23490643Sbennostruct pvo_head pmap_pvo_kunmanaged = 23590643Sbenno LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 23690643Sbennostruct pvo_head pmap_pvo_unmanaged = 23790643Sbenno LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 23877957Sbenno 23992847Sjeffuma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 24092847Sjeffuma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 24177957Sbenno 24299037Sbenno#define BPVO_POOL_SIZE 32768 24392521Sbennostatic struct pvo_entry *pmap_bpvo_pool; 24499037Sbennostatic int pmap_bpvo_pool_index = 0; 24577957Sbenno 24690643Sbenno#define VSID_NBPW (sizeof(u_int32_t) * 8) 24790643Sbennostatic u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 24877957Sbenno 24990643Sbennostatic boolean_t pmap_initialized = FALSE; 25077957Sbenno 25190643Sbenno/* 25290643Sbenno * Statistics. 25390643Sbenno */ 25490643Sbennou_int pmap_pte_valid = 0; 25590643Sbennou_int pmap_pte_overflow = 0; 25690643Sbennou_int pmap_pte_replacements = 0; 25790643Sbennou_int pmap_pvo_entries = 0; 25890643Sbennou_int pmap_pvo_enter_calls = 0; 25990643Sbennou_int pmap_pvo_remove_calls = 0; 26090643Sbennou_int pmap_pte_spills = 0; 26190643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 26290643Sbenno 0, ""); 26390643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 26490643Sbenno &pmap_pte_overflow, 0, ""); 26590643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 26690643Sbenno &pmap_pte_replacements, 0, ""); 26790643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 26890643Sbenno 0, ""); 26990643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 27090643Sbenno &pmap_pvo_enter_calls, 0, ""); 27190643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 27290643Sbenno &pmap_pvo_remove_calls, 0, ""); 27390643SbennoSYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 27490643Sbenno &pmap_pte_spills, 0, ""); 27577957Sbenno 27690643Sbennostruct pvo_entry *pmap_pvo_zeropage; 27777957Sbenno 27890643Sbennovm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 27990643Sbennou_int pmap_rkva_count = 4; 28077957Sbenno 28190643Sbenno/* 28290643Sbenno * Allocate physical memory for use in pmap_bootstrap. 28390643Sbenno */ 28490643Sbennostatic vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 28577957Sbenno 28690643Sbenno/* 28790643Sbenno * PTE calls. 28890643Sbenno */ 28990643Sbennostatic int pmap_pte_insert(u_int, struct pte *); 29077957Sbenno 29177957Sbenno/* 29290643Sbenno * PVO calls. 29377957Sbenno */ 29492847Sjeffstatic int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 29590643Sbenno vm_offset_t, vm_offset_t, u_int, int); 29690643Sbennostatic void pmap_pvo_remove(struct pvo_entry *, int); 29790643Sbennostatic struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 29890643Sbennostatic struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 29990643Sbenno 30090643Sbenno/* 30190643Sbenno * Utility routines. 30290643Sbenno */ 30390643Sbennostatic struct pvo_entry *pmap_rkva_alloc(void); 30490643Sbennostatic void pmap_pa_map(struct pvo_entry *, vm_offset_t, 30590643Sbenno struct pte *, int *); 30690643Sbennostatic void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 30790643Sbennostatic void pmap_syncicache(vm_offset_t, vm_size_t); 30890643Sbennostatic boolean_t pmap_query_bit(vm_page_t, int); 309110172Sgrehanstatic u_int pmap_clear_bit(vm_page_t, int, int *); 31090643Sbennostatic void tlbia(void); 31190643Sbenno 31290643Sbennostatic __inline int 31390643Sbennova_to_sr(u_int *sr, vm_offset_t va) 31477957Sbenno{ 31590643Sbenno return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 31690643Sbenno} 31777957Sbenno 31890643Sbennostatic __inline u_int 31990643Sbennova_to_pteg(u_int sr, vm_offset_t addr) 32090643Sbenno{ 32190643Sbenno u_int hash; 32290643Sbenno 32390643Sbenno hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 32490643Sbenno ADDR_PIDX_SHFT); 32590643Sbenno return (hash & pmap_pteg_mask); 32677957Sbenno} 32777957Sbenno 32890643Sbennostatic __inline struct pvo_head * 32996250Sbennopa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 33077957Sbenno{ 33190643Sbenno struct vm_page *pg; 33277957Sbenno 33390643Sbenno pg = PHYS_TO_VM_PAGE(pa); 33490643Sbenno 33596250Sbenno if (pg_p != NULL) 33696250Sbenno *pg_p = pg; 33796250Sbenno 33890643Sbenno if (pg == NULL) 33990643Sbenno return (&pmap_pvo_unmanaged); 34090643Sbenno 34190643Sbenno return (&pg->md.mdpg_pvoh); 34277957Sbenno} 34377957Sbenno 34490643Sbennostatic __inline struct pvo_head * 34590643Sbennovm_page_to_pvoh(vm_page_t m) 34690643Sbenno{ 34790643Sbenno 34890643Sbenno return (&m->md.mdpg_pvoh); 34990643Sbenno} 35090643Sbenno 35177957Sbennostatic __inline void 35290643Sbennopmap_attr_clear(vm_page_t m, int ptebit) 35377957Sbenno{ 35490643Sbenno 35590643Sbenno m->md.mdpg_attrs &= ~ptebit; 35677957Sbenno} 35777957Sbenno 35877957Sbennostatic __inline int 35990643Sbennopmap_attr_fetch(vm_page_t m) 36077957Sbenno{ 36177957Sbenno 36290643Sbenno return (m->md.mdpg_attrs); 36377957Sbenno} 36477957Sbenno 36590643Sbennostatic __inline void 36690643Sbennopmap_attr_save(vm_page_t m, int ptebit) 36790643Sbenno{ 36890643Sbenno 36990643Sbenno m->md.mdpg_attrs |= ptebit; 37090643Sbenno} 37190643Sbenno 37277957Sbennostatic __inline int 37390643Sbennopmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 37477957Sbenno{ 37590643Sbenno if (pt->pte_hi == pvo_pt->pte_hi) 37690643Sbenno return (1); 37790643Sbenno 37890643Sbenno return (0); 37977957Sbenno} 38077957Sbenno 38177957Sbennostatic __inline int 38290643Sbennopmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 38377957Sbenno{ 38490643Sbenno return (pt->pte_hi & ~PTE_VALID) == 38590643Sbenno (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 38690643Sbenno ((va >> ADDR_API_SHFT) & PTE_API) | which); 38790643Sbenno} 38877957Sbenno 38990643Sbennostatic __inline void 39090643Sbennopmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 39190643Sbenno{ 39290643Sbenno /* 39390643Sbenno * Construct a PTE. Default to IMB initially. Valid bit only gets 39490643Sbenno * set when the real pte is set in memory. 39590643Sbenno * 39690643Sbenno * Note: Don't set the valid bit for correct operation of tlb update. 39790643Sbenno */ 39890643Sbenno pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 39990643Sbenno (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 40090643Sbenno pt->pte_lo = pte_lo; 40177957Sbenno} 40277957Sbenno 40390643Sbennostatic __inline void 40490643Sbennopmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 40577957Sbenno{ 40677957Sbenno 40790643Sbenno pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 40877957Sbenno} 40977957Sbenno 41090643Sbennostatic __inline void 41190643Sbennopmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 41277957Sbenno{ 41377957Sbenno 41490643Sbenno /* 41590643Sbenno * As shown in Section 7.6.3.2.3 41690643Sbenno */ 41790643Sbenno pt->pte_lo &= ~ptebit; 41890643Sbenno TLBIE(va); 41990643Sbenno EIEIO(); 42090643Sbenno TLBSYNC(); 42190643Sbenno SYNC(); 42277957Sbenno} 42377957Sbenno 42490643Sbennostatic __inline void 42590643Sbennopmap_pte_set(struct pte *pt, struct pte *pvo_pt) 42677957Sbenno{ 42777957Sbenno 42890643Sbenno pvo_pt->pte_hi |= PTE_VALID; 42990643Sbenno 43077957Sbenno /* 43190643Sbenno * Update the PTE as defined in section 7.6.3.1. 43290643Sbenno * Note that the REF/CHG bits are from pvo_pt and thus should havce 43390643Sbenno * been saved so this routine can restore them (if desired). 43477957Sbenno */ 43590643Sbenno pt->pte_lo = pvo_pt->pte_lo; 43690643Sbenno EIEIO(); 43790643Sbenno pt->pte_hi = pvo_pt->pte_hi; 43890643Sbenno SYNC(); 43990643Sbenno pmap_pte_valid++; 44090643Sbenno} 44177957Sbenno 44290643Sbennostatic __inline void 44390643Sbennopmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 44490643Sbenno{ 44590643Sbenno 44690643Sbenno pvo_pt->pte_hi &= ~PTE_VALID; 44790643Sbenno 44877957Sbenno /* 44990643Sbenno * Force the reg & chg bits back into the PTEs. 45077957Sbenno */ 45190643Sbenno SYNC(); 45277957Sbenno 45390643Sbenno /* 45490643Sbenno * Invalidate the pte. 45590643Sbenno */ 45690643Sbenno pt->pte_hi &= ~PTE_VALID; 45777957Sbenno 45890643Sbenno SYNC(); 45990643Sbenno TLBIE(va); 46090643Sbenno EIEIO(); 46190643Sbenno TLBSYNC(); 46290643Sbenno SYNC(); 46377957Sbenno 46490643Sbenno /* 46590643Sbenno * Save the reg & chg bits. 46690643Sbenno */ 46790643Sbenno pmap_pte_synch(pt, pvo_pt); 46890643Sbenno pmap_pte_valid--; 46977957Sbenno} 47077957Sbenno 47190643Sbennostatic __inline void 47290643Sbennopmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 47390643Sbenno{ 47490643Sbenno 47590643Sbenno /* 47690643Sbenno * Invalidate the PTE 47790643Sbenno */ 47890643Sbenno pmap_pte_unset(pt, pvo_pt, va); 47990643Sbenno pmap_pte_set(pt, pvo_pt); 48090643Sbenno} 48190643Sbenno 48277957Sbenno/* 48390643Sbenno * Quick sort callout for comparing memory regions. 48477957Sbenno */ 48590643Sbennostatic int mr_cmp(const void *a, const void *b); 48690643Sbennostatic int om_cmp(const void *a, const void *b); 48790643Sbenno 48890643Sbennostatic int 48990643Sbennomr_cmp(const void *a, const void *b) 49077957Sbenno{ 49190643Sbenno const struct mem_region *regiona; 49290643Sbenno const struct mem_region *regionb; 49377957Sbenno 49490643Sbenno regiona = a; 49590643Sbenno regionb = b; 49690643Sbenno if (regiona->mr_start < regionb->mr_start) 49790643Sbenno return (-1); 49890643Sbenno else if (regiona->mr_start > regionb->mr_start) 49990643Sbenno return (1); 50090643Sbenno else 50190643Sbenno return (0); 50290643Sbenno} 50377957Sbenno 50490643Sbennostatic int 50590643Sbennoom_cmp(const void *a, const void *b) 50690643Sbenno{ 50790643Sbenno const struct ofw_map *mapa; 50890643Sbenno const struct ofw_map *mapb; 50990643Sbenno 51090643Sbenno mapa = a; 51190643Sbenno mapb = b; 51290643Sbenno if (mapa->om_pa < mapb->om_pa) 51390643Sbenno return (-1); 51490643Sbenno else if (mapa->om_pa > mapb->om_pa) 51590643Sbenno return (1); 51690643Sbenno else 51790643Sbenno return (0); 51877957Sbenno} 51977957Sbenno 52077957Sbennovoid 52190643Sbennopmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 52277957Sbenno{ 52397346Sbenno ihandle_t mmui; 52490643Sbenno phandle_t chosen, mmu; 52590643Sbenno int sz; 52690643Sbenno int i, j; 527103604Sgrehan int ofw_mappings; 52891793Sbenno vm_size_t size, physsz; 52990643Sbenno vm_offset_t pa, va, off; 53090643Sbenno u_int batl, batu; 53177957Sbenno 53299037Sbenno /* 533103604Sgrehan * Set up BAT0 to map the lowest 256 MB area 53499037Sbenno */ 53599037Sbenno battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 53699037Sbenno battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 53799037Sbenno 53899037Sbenno /* 53999037Sbenno * Map PCI memory space. 54099037Sbenno */ 54199037Sbenno battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 54299037Sbenno battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 54399037Sbenno 54499037Sbenno battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 54599037Sbenno battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 54699037Sbenno 54799037Sbenno battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 54899037Sbenno battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 54999037Sbenno 55099037Sbenno battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 55199037Sbenno battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 55299037Sbenno 55399037Sbenno /* 55499037Sbenno * Map obio devices. 55599037Sbenno */ 55699037Sbenno battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 55799037Sbenno battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 55899037Sbenno 55977957Sbenno /* 56090643Sbenno * Use an IBAT and a DBAT to map the bottom segment of memory 56190643Sbenno * where we are. 56277957Sbenno */ 56390643Sbenno batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 56490643Sbenno batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 565131808Sgrehan __asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n" 566131808Sgrehan "mtdbatu 0,%0; mtdbatl 0,%1; isync" 56790643Sbenno :: "r"(batu), "r"(batl)); 56899037Sbenno 56990643Sbenno#if 0 57099037Sbenno /* map frame buffer */ 57199037Sbenno batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 57299037Sbenno batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 573131808Sgrehan __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 57499037Sbenno :: "r"(batu), "r"(batl)); 57599037Sbenno#endif 57699037Sbenno 57799037Sbenno#if 1 57899037Sbenno /* map pci space */ 57990643Sbenno batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 58099037Sbenno batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 581131808Sgrehan __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 58290643Sbenno :: "r"(batu), "r"(batl)); 58390643Sbenno#endif 58477957Sbenno 58577957Sbenno /* 58690643Sbenno * Set the start and end of kva. 58777957Sbenno */ 58890643Sbenno virtual_avail = VM_MIN_KERNEL_ADDRESS; 58990643Sbenno virtual_end = VM_MAX_KERNEL_ADDRESS; 59090643Sbenno 59197346Sbenno mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 59297346Sbenno CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 59397346Sbenno 59497346Sbenno qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 59597346Sbenno for (i = 0; i < pregions_sz; i++) { 596103604Sgrehan vm_offset_t pa; 597103604Sgrehan vm_offset_t end; 598103604Sgrehan 59997346Sbenno CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 60097346Sbenno pregions[i].mr_start, 60197346Sbenno pregions[i].mr_start + pregions[i].mr_size, 60297346Sbenno pregions[i].mr_size); 603103604Sgrehan /* 604103604Sgrehan * Install entries into the BAT table to allow all 605103604Sgrehan * of physmem to be convered by on-demand BAT entries. 606103604Sgrehan * The loop will sometimes set the same battable element 607103604Sgrehan * twice, but that's fine since they won't be used for 608103604Sgrehan * a while yet. 609103604Sgrehan */ 610103604Sgrehan pa = pregions[i].mr_start & 0xf0000000; 611103604Sgrehan end = pregions[i].mr_start + pregions[i].mr_size; 612103604Sgrehan do { 613103604Sgrehan u_int n = pa >> ADDR_SR_SHFT; 614103604Sgrehan 615103604Sgrehan battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 616103604Sgrehan battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 617103604Sgrehan pa += SEGMENT_LENGTH; 618103604Sgrehan } while (pa < end); 61997346Sbenno } 62097346Sbenno 62197346Sbenno if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 62290643Sbenno panic("pmap_bootstrap: phys_avail too small"); 62397346Sbenno qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 62490643Sbenno phys_avail_count = 0; 62591793Sbenno physsz = 0; 62697346Sbenno for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 62790643Sbenno CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 62890643Sbenno regions[i].mr_start + regions[i].mr_size, 62990643Sbenno regions[i].mr_size); 63090643Sbenno phys_avail[j] = regions[i].mr_start; 63190643Sbenno phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 63290643Sbenno phys_avail_count++; 63391793Sbenno physsz += regions[i].mr_size; 63477957Sbenno } 63591793Sbenno physmem = btoc(physsz); 63677957Sbenno 63777957Sbenno /* 63890643Sbenno * Allocate PTEG table. 63977957Sbenno */ 64090643Sbenno#ifdef PTEGCOUNT 64190643Sbenno pmap_pteg_count = PTEGCOUNT; 64290643Sbenno#else 64390643Sbenno pmap_pteg_count = 0x1000; 64477957Sbenno 64590643Sbenno while (pmap_pteg_count < physmem) 64690643Sbenno pmap_pteg_count <<= 1; 64777957Sbenno 64890643Sbenno pmap_pteg_count >>= 1; 64990643Sbenno#endif /* PTEGCOUNT */ 65077957Sbenno 65190643Sbenno size = pmap_pteg_count * sizeof(struct pteg); 65290643Sbenno CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 65390643Sbenno size); 65490643Sbenno pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 65590643Sbenno CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 65690643Sbenno bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 65790643Sbenno pmap_pteg_mask = pmap_pteg_count - 1; 65877957Sbenno 65990643Sbenno /* 66094839Sbenno * Allocate pv/overflow lists. 66190643Sbenno */ 66290643Sbenno size = sizeof(struct pvo_head) * pmap_pteg_count; 66390643Sbenno pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 66490643Sbenno PAGE_SIZE); 66590643Sbenno CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 66690643Sbenno for (i = 0; i < pmap_pteg_count; i++) 66790643Sbenno LIST_INIT(&pmap_pvo_table[i]); 66877957Sbenno 66990643Sbenno /* 67090643Sbenno * Allocate the message buffer. 67190643Sbenno */ 67290643Sbenno msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 67377957Sbenno 67490643Sbenno /* 67590643Sbenno * Initialise the unmanaged pvo pool. 67690643Sbenno */ 67799037Sbenno pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( 67899037Sbenno BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 67992521Sbenno pmap_bpvo_pool_index = 0; 68077957Sbenno 68177957Sbenno /* 68290643Sbenno * Make sure kernel vsid is allocated as well as VSID 0. 68377957Sbenno */ 68490643Sbenno pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 68590643Sbenno |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 68690643Sbenno pmap_vsid_bitmap[0] |= 1; 68777957Sbenno 68890643Sbenno /* 689133862Smarius * Set up the Open Firmware pmap and add it's mappings. 69090643Sbenno */ 69190643Sbenno pmap_pinit(&ofw_pmap); 69290643Sbenno ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 693126478Sgrehan ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 69490643Sbenno if ((chosen = OF_finddevice("/chosen")) == -1) 69590643Sbenno panic("pmap_bootstrap: can't find /chosen"); 69690643Sbenno OF_getprop(chosen, "mmu", &mmui, 4); 69790643Sbenno if ((mmu = OF_instance_to_package(mmui)) == -1) 69890643Sbenno panic("pmap_bootstrap: can't get mmu package"); 69990643Sbenno if ((sz = OF_getproplen(mmu, "translations")) == -1) 70090643Sbenno panic("pmap_bootstrap: can't get ofw translation count"); 701100319Sbenno translations = NULL; 702131401Sgrehan for (i = 0; phys_avail[i] != 0; i += 2) { 703131401Sgrehan if (phys_avail[i + 1] >= sz) { 704100319Sbenno translations = (struct ofw_map *)phys_avail[i]; 705131401Sgrehan break; 706131401Sgrehan } 707100319Sbenno } 708100319Sbenno if (translations == NULL) 709100319Sbenno panic("pmap_bootstrap: no space to copy translations"); 71090643Sbenno bzero(translations, sz); 71190643Sbenno if (OF_getprop(mmu, "translations", translations, sz) == -1) 71290643Sbenno panic("pmap_bootstrap: can't get ofw translations"); 71390643Sbenno CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 71497346Sbenno sz /= sizeof(*translations); 71590643Sbenno qsort(translations, sz, sizeof (*translations), om_cmp); 716103604Sgrehan for (i = 0, ofw_mappings = 0; i < sz; i++) { 71790643Sbenno CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 71890643Sbenno translations[i].om_pa, translations[i].om_va, 71990643Sbenno translations[i].om_len); 72077957Sbenno 721103604Sgrehan /* 722103604Sgrehan * If the mapping is 1:1, let the RAM and device on-demand 723103604Sgrehan * BAT tables take care of the translation. 724103604Sgrehan */ 725103604Sgrehan if (translations[i].om_va == translations[i].om_pa) 726103604Sgrehan continue; 72777957Sbenno 728103604Sgrehan /* Enter the pages */ 72990643Sbenno for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 73090643Sbenno struct vm_page m; 73177957Sbenno 73290643Sbenno m.phys_addr = translations[i].om_pa + off; 73390643Sbenno pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 734103604Sgrehan VM_PROT_ALL, 1); 735103604Sgrehan ofw_mappings++; 73677957Sbenno } 73777957Sbenno } 73890643Sbenno#ifdef SMP 73990643Sbenno TLBSYNC(); 74090643Sbenno#endif 74177957Sbenno 74290643Sbenno /* 74390643Sbenno * Initialize the kernel pmap (which is statically allocated). 74490643Sbenno */ 745134329Salc PMAP_LOCK_INIT(kernel_pmap); 74690643Sbenno for (i = 0; i < 16; i++) { 74790643Sbenno kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 74877957Sbenno } 74990643Sbenno kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 750126478Sgrehan kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT; 75190643Sbenno kernel_pmap->pm_active = ~0; 75277957Sbenno 75377957Sbenno /* 75490643Sbenno * Allocate a kernel stack with a guard page for thread0 and map it 75590643Sbenno * into the kernel page map. 75677957Sbenno */ 75790643Sbenno pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 75890643Sbenno kstack0_phys = pa; 75990643Sbenno kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 76090643Sbenno CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 76190643Sbenno kstack0); 76290643Sbenno virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 76390643Sbenno for (i = 0; i < KSTACK_PAGES; i++) { 76490643Sbenno pa = kstack0_phys + i * PAGE_SIZE; 76590643Sbenno va = kstack0 + i * PAGE_SIZE; 76690643Sbenno pmap_kenter(va, pa); 76790643Sbenno TLBIE(va); 76877957Sbenno } 76977957Sbenno 77090643Sbenno /* 771127875Salc * Calculate the last available physical address. 77290643Sbenno */ 77390643Sbenno for (i = 0; phys_avail[i + 2] != 0; i += 2) 77490643Sbenno ; 775128103Salc Maxmem = powerpc_btop(phys_avail[i + 1]); 77677957Sbenno 77777957Sbenno /* 77890643Sbenno * Allocate virtual address space for the message buffer. 77977957Sbenno */ 78090643Sbenno msgbufp = (struct msgbuf *)virtual_avail; 78190643Sbenno virtual_avail += round_page(MSGBUF_SIZE); 78277957Sbenno 78377957Sbenno /* 78490643Sbenno * Initialize hardware. 78577957Sbenno */ 78677957Sbenno for (i = 0; i < 16; i++) { 78794836Sbenno mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 78877957Sbenno } 78977957Sbenno __asm __volatile ("mtsr %0,%1" 79090643Sbenno :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 79177957Sbenno __asm __volatile ("sync; mtsdr1 %0; isync" 79290643Sbenno :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 79377957Sbenno tlbia(); 79477957Sbenno 79590643Sbenno pmap_bootstrapped++; 79677957Sbenno} 79777957Sbenno 79877957Sbenno/* 79990643Sbenno * Activate a user pmap. The pmap must be activated before it's address 80090643Sbenno * space can be accessed in any way. 80177957Sbenno */ 80277957Sbennovoid 80390643Sbennopmap_activate(struct thread *td) 80477957Sbenno{ 80596250Sbenno pmap_t pm, pmr; 80677957Sbenno 80777957Sbenno /* 808103604Sgrehan * Load all the data we need up front to encourage the compiler to 80990643Sbenno * not issue any loads while we have interrupts disabled below. 81077957Sbenno */ 81190643Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 81277957Sbenno 81396250Sbenno if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) 81496250Sbenno pmr = pm; 81596250Sbenno 81690643Sbenno pm->pm_active |= PCPU_GET(cpumask); 81796250Sbenno PCPU_SET(curpmap, pmr); 81877957Sbenno} 81977957Sbenno 82091483Sbennovoid 82191483Sbennopmap_deactivate(struct thread *td) 82291483Sbenno{ 82391483Sbenno pmap_t pm; 82491483Sbenno 82591483Sbenno pm = &td->td_proc->p_vmspace->vm_pmap; 82691483Sbenno pm->pm_active &= ~(PCPU_GET(cpumask)); 82796250Sbenno PCPU_SET(curpmap, NULL); 82891483Sbenno} 82991483Sbenno 83090643Sbennovm_offset_t 83190643Sbennopmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 83277957Sbenno{ 83396353Sbenno 83496353Sbenno return (va); 83577957Sbenno} 83677957Sbenno 83777957Sbennovoid 83896353Sbennopmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) 83977957Sbenno{ 84096353Sbenno struct pvo_entry *pvo; 84196353Sbenno 842134329Salc PMAP_LOCK(pm); 84396353Sbenno pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 84496353Sbenno 84596353Sbenno if (pvo != NULL) { 84696353Sbenno if (wired) { 84796353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 84896353Sbenno pm->pm_stats.wired_count++; 84996353Sbenno pvo->pvo_vaddr |= PVO_WIRED; 85096353Sbenno } else { 85196353Sbenno if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 85296353Sbenno pm->pm_stats.wired_count--; 85396353Sbenno pvo->pvo_vaddr &= ~PVO_WIRED; 85496353Sbenno } 85596353Sbenno } 856134329Salc PMAP_UNLOCK(pm); 85777957Sbenno} 85877957Sbenno 85977957Sbennovoid 86090643Sbennopmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 86190643Sbenno vm_size_t len, vm_offset_t src_addr) 86277957Sbenno{ 86397385Sbenno 86497385Sbenno /* 86597385Sbenno * This is not needed as it's mainly an optimisation. 86697385Sbenno * It may want to be implemented later though. 86797385Sbenno */ 86877957Sbenno} 86977957Sbenno 87077957Sbennovoid 87197385Sbennopmap_copy_page(vm_page_t msrc, vm_page_t mdst) 87277957Sbenno{ 87397385Sbenno vm_offset_t dst; 87497385Sbenno vm_offset_t src; 87597385Sbenno 87697385Sbenno dst = VM_PAGE_TO_PHYS(mdst); 87797385Sbenno src = VM_PAGE_TO_PHYS(msrc); 87897385Sbenno 87997385Sbenno kcopy((void *)src, (void *)dst, PAGE_SIZE); 88077957Sbenno} 88177957Sbenno 88277957Sbenno/* 88390643Sbenno * Zero a page of physical memory by temporarily mapping it into the tlb. 88477957Sbenno */ 88577957Sbennovoid 88694777Speterpmap_zero_page(vm_page_t m) 88777957Sbenno{ 88894777Speter vm_offset_t pa = VM_PAGE_TO_PHYS(m); 889110172Sgrehan caddr_t va; 89077957Sbenno 89190643Sbenno if (pa < SEGMENT_LENGTH) { 89290643Sbenno va = (caddr_t) pa; 89390643Sbenno } else if (pmap_initialized) { 89490643Sbenno if (pmap_pvo_zeropage == NULL) 89590643Sbenno pmap_pvo_zeropage = pmap_rkva_alloc(); 89690643Sbenno pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 89790643Sbenno va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 89890643Sbenno } else { 89990643Sbenno panic("pmap_zero_page: can't zero pa %#x", pa); 90077957Sbenno } 90190643Sbenno 90290643Sbenno bzero(va, PAGE_SIZE); 90390643Sbenno 90490643Sbenno if (pa >= SEGMENT_LENGTH) 90590643Sbenno pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 90677957Sbenno} 90777957Sbenno 90877957Sbennovoid 90994777Speterpmap_zero_page_area(vm_page_t m, int off, int size) 91077957Sbenno{ 91199666Sbenno vm_offset_t pa = VM_PAGE_TO_PHYS(m); 912103604Sgrehan caddr_t va; 91399666Sbenno 91499666Sbenno if (pa < SEGMENT_LENGTH) { 91599666Sbenno va = (caddr_t) pa; 91699666Sbenno } else if (pmap_initialized) { 91799666Sbenno if (pmap_pvo_zeropage == NULL) 91899666Sbenno pmap_pvo_zeropage = pmap_rkva_alloc(); 91999666Sbenno pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 92099666Sbenno va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 92199666Sbenno } else { 92299666Sbenno panic("pmap_zero_page: can't zero pa %#x", pa); 92399666Sbenno } 92499666Sbenno 925103604Sgrehan bzero(va + off, size); 92699666Sbenno 92799666Sbenno if (pa >= SEGMENT_LENGTH) 92899666Sbenno pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 92977957Sbenno} 93077957Sbenno 93199571Spetervoid 93299571Speterpmap_zero_page_idle(vm_page_t m) 93399571Speter{ 93499571Speter 93599571Speter /* XXX this is called outside of Giant, is pmap_zero_page safe? */ 93699571Speter /* XXX maybe have a dedicated mapping for this to avoid the problem? */ 93799571Speter mtx_lock(&Giant); 93899571Speter pmap_zero_page(m); 93999571Speter mtx_unlock(&Giant); 94099571Speter} 94199571Speter 94277957Sbenno/* 94390643Sbenno * Map the given physical page at the specified virtual address in the 94490643Sbenno * target pmap with the protection requested. If specified the page 94590643Sbenno * will be wired down. 94677957Sbenno */ 94777957Sbennovoid 94890643Sbennopmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 94990643Sbenno boolean_t wired) 95077957Sbenno{ 95190643Sbenno struct pvo_head *pvo_head; 95292847Sjeff uma_zone_t zone; 95396250Sbenno vm_page_t pg; 95496250Sbenno u_int pte_lo, pvo_flags, was_exec, i; 95590643Sbenno int error; 95677957Sbenno 95790643Sbenno if (!pmap_initialized) { 95890643Sbenno pvo_head = &pmap_pvo_kunmanaged; 95990643Sbenno zone = pmap_upvo_zone; 96090643Sbenno pvo_flags = 0; 96196250Sbenno pg = NULL; 96296250Sbenno was_exec = PTE_EXEC; 96390643Sbenno } else { 964110172Sgrehan pvo_head = vm_page_to_pvoh(m); 965110172Sgrehan pg = m; 96690643Sbenno zone = pmap_mpvo_zone; 96790643Sbenno pvo_flags = PVO_MANAGED; 96896250Sbenno was_exec = 0; 96990643Sbenno } 970134329Salc if (pmap_bootstrapped) { 971134329Salc vm_page_lock_queues(); 972134329Salc PMAP_LOCK(pmap); 973134329Salc } 97477957Sbenno 97596250Sbenno /* 97696250Sbenno * If this is a managed page, and it's the first reference to the page, 97796250Sbenno * clear the execness of the page. Otherwise fetch the execness. 97896250Sbenno */ 97996250Sbenno if (pg != NULL) { 98096250Sbenno if (LIST_EMPTY(pvo_head)) { 98196250Sbenno pmap_attr_clear(pg, PTE_EXEC); 98296250Sbenno } else { 98396250Sbenno was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 98496250Sbenno } 98596250Sbenno } 98696250Sbenno 98796250Sbenno 98896250Sbenno /* 98996250Sbenno * Assume the page is cache inhibited and access is guarded unless 99096250Sbenno * it's in our available memory array. 99196250Sbenno */ 99290643Sbenno pte_lo = PTE_I | PTE_G; 99397346Sbenno for (i = 0; i < pregions_sz; i++) { 99497346Sbenno if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 99597346Sbenno (VM_PAGE_TO_PHYS(m) < 99697346Sbenno (pregions[i].mr_start + pregions[i].mr_size))) { 99796250Sbenno pte_lo &= ~(PTE_I | PTE_G); 99896250Sbenno break; 99996250Sbenno } 100096250Sbenno } 100177957Sbenno 100290643Sbenno if (prot & VM_PROT_WRITE) 100390643Sbenno pte_lo |= PTE_BW; 100490643Sbenno else 100590643Sbenno pte_lo |= PTE_BR; 100677957Sbenno 100796250Sbenno pvo_flags |= (prot & VM_PROT_EXECUTE); 100877957Sbenno 100990643Sbenno if (wired) 101090643Sbenno pvo_flags |= PVO_WIRED; 101177957Sbenno 101296250Sbenno error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 101396250Sbenno pte_lo, pvo_flags); 101490643Sbenno 101596250Sbenno /* 101696250Sbenno * Flush the real page from the instruction cache if this page is 101796250Sbenno * mapped executable and cacheable and was not previously mapped (or 101896250Sbenno * was not mapped executable). 101996250Sbenno */ 102096250Sbenno if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 102196250Sbenno (pte_lo & PTE_I) == 0 && was_exec == 0) { 102277957Sbenno /* 102390643Sbenno * Flush the real memory from the cache. 102477957Sbenno */ 102596250Sbenno pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 102696250Sbenno if (pg != NULL) 102796250Sbenno pmap_attr_save(pg, PTE_EXEC); 102877957Sbenno } 1029134329Salc if (pmap_bootstrapped) 1030134329Salc vm_page_unlock_queues(); 1031103604Sgrehan 1032103604Sgrehan /* XXX syncicache always until problems are sorted */ 1033103604Sgrehan pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1034134329Salc if (pmap_bootstrapped) 1035134329Salc PMAP_UNLOCK(pmap); 103677957Sbenno} 103777957Sbenno 1038117045Salcvm_page_t 1039117045Salcpmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte) 1040117045Salc{ 1041117045Salc 1042133143Salc mtx_lock(&Giant); 1043117045Salc pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE); 1044133143Salc mtx_unlock(&Giant); 1045117045Salc return (NULL); 1046117045Salc} 1047117045Salc 1048131658Salcvm_paddr_t 104996353Sbennopmap_extract(pmap_t pm, vm_offset_t va) 105077957Sbenno{ 105196353Sbenno struct pvo_entry *pvo; 1052134329Salc vm_paddr_t pa; 105396353Sbenno 1054134329Salc PMAP_LOCK(pm); 105596353Sbenno pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1056134329Salc if (pvo == NULL) 1057134329Salc pa = 0; 1058134329Salc else 1059134329Salc pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1060134329Salc PMAP_UNLOCK(pm); 1061134329Salc return (pa); 106277957Sbenno} 106377957Sbenno 106477957Sbenno/* 1065120336Sgrehan * Atomically extract and hold the physical page with the given 1066120336Sgrehan * pmap and virtual address pair if that mapping permits the given 1067120336Sgrehan * protection. 1068120336Sgrehan */ 1069120336Sgrehanvm_page_t 1070120336Sgrehanpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1071120336Sgrehan{ 1072132666Salc struct pvo_entry *pvo; 1073120336Sgrehan vm_page_t m; 1074120336Sgrehan 1075120336Sgrehan m = NULL; 1076120336Sgrehan mtx_lock(&Giant); 1077134329Salc vm_page_lock_queues(); 1078134329Salc PMAP_LOCK(pmap); 1079132666Salc pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1080132666Salc if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && 1081132666Salc ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || 1082132666Salc (prot & VM_PROT_WRITE) == 0)) { 1083132666Salc m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1084120336Sgrehan vm_page_hold(m); 1085120336Sgrehan } 1086134329Salc vm_page_unlock_queues(); 1087134329Salc PMAP_UNLOCK(pmap); 1088120336Sgrehan mtx_unlock(&Giant); 1089120336Sgrehan return (m); 1090120336Sgrehan} 1091120336Sgrehan 1092120336Sgrehan/* 109390643Sbenno * Grow the number of kernel page table entries. Unneeded. 109477957Sbenno */ 109590643Sbennovoid 109690643Sbennopmap_growkernel(vm_offset_t addr) 109777957Sbenno{ 109890643Sbenno} 109977957Sbenno 110090643Sbennovoid 1101127869Salcpmap_init(void) 110290643Sbenno{ 110377957Sbenno 110494753Sbenno CTR0(KTR_PMAP, "pmap_init"); 110577957Sbenno 110692847Sjeff pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1107125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1108125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 110992847Sjeff pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1110125442Sgrehan NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1111125442Sgrehan UMA_ZONE_VM | UMA_ZONE_NOFREE); 111290643Sbenno pmap_initialized = TRUE; 111377957Sbenno} 111477957Sbenno 111599037Sbennovoid 111699037Sbennopmap_init2(void) 111799037Sbenno{ 111899037Sbenno 111999037Sbenno CTR0(KTR_PMAP, "pmap_init2"); 112099037Sbenno} 112199037Sbenno 112290643Sbennoboolean_t 112390643Sbennopmap_is_modified(vm_page_t m) 112490643Sbenno{ 112596353Sbenno 1126110172Sgrehan if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 112796353Sbenno return (FALSE); 112896353Sbenno 112996353Sbenno return (pmap_query_bit(m, PTE_CHG)); 113090643Sbenno} 113190643Sbenno 1132120722Salc/* 1133120722Salc * pmap_is_prefaultable: 1134120722Salc * 1135120722Salc * Return whether or not the specified virtual address is elgible 1136120722Salc * for prefault. 1137120722Salc */ 1138120722Salcboolean_t 1139120722Salcpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 1140120722Salc{ 1141120722Salc 1142120722Salc return (FALSE); 1143120722Salc} 1144120722Salc 114590643Sbennovoid 114690643Sbennopmap_clear_reference(vm_page_t m) 114790643Sbenno{ 1148110172Sgrehan 1149110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1150110172Sgrehan return; 1151110172Sgrehan pmap_clear_bit(m, PTE_REF, NULL); 115290643Sbenno} 115390643Sbenno 1154110172Sgrehanvoid 1155110172Sgrehanpmap_clear_modify(vm_page_t m) 1156110172Sgrehan{ 1157110172Sgrehan 1158110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1159110172Sgrehan return; 1160110172Sgrehan pmap_clear_bit(m, PTE_CHG, NULL); 1161110172Sgrehan} 1162110172Sgrehan 116391403Ssilby/* 116491403Ssilby * pmap_ts_referenced: 116591403Ssilby * 116691403Ssilby * Return a count of reference bits for a page, clearing those bits. 116791403Ssilby * It is not necessary for every reference bit to be cleared, but it 116891403Ssilby * is necessary that 0 only be returned when there are truly no 116991403Ssilby * reference bits set. 117091403Ssilby * 117191403Ssilby * XXX: The exact number of bits to check and clear is a matter that 117291403Ssilby * should be tested and standardized at some point in the future for 117391403Ssilby * optimal aging of shared pages. 117491403Ssilby */ 117590643Sbennoint 117690643Sbennopmap_ts_referenced(vm_page_t m) 117790643Sbenno{ 1178110172Sgrehan int count; 1179110172Sgrehan 1180110172Sgrehan if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1181110172Sgrehan return (0); 1182110172Sgrehan 1183110172Sgrehan count = pmap_clear_bit(m, PTE_REF, NULL); 1184110172Sgrehan 1185110172Sgrehan return (count); 118690643Sbenno} 118790643Sbenno 118877957Sbenno/* 118990643Sbenno * Map a wired page into kernel virtual address space. 119077957Sbenno */ 119177957Sbennovoid 119290643Sbennopmap_kenter(vm_offset_t va, vm_offset_t pa) 119377957Sbenno{ 119490643Sbenno u_int pte_lo; 119590643Sbenno int error; 119690643Sbenno int i; 119777957Sbenno 119890643Sbenno#if 0 119990643Sbenno if (va < VM_MIN_KERNEL_ADDRESS) 120090643Sbenno panic("pmap_kenter: attempt to enter non-kernel address %#x", 120190643Sbenno va); 120290643Sbenno#endif 120377957Sbenno 1204103604Sgrehan pte_lo = PTE_I | PTE_G; 1205103604Sgrehan for (i = 0; i < pregions_sz; i++) { 1206103604Sgrehan if ((pa >= pregions[i].mr_start) && 1207103604Sgrehan (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 120890643Sbenno pte_lo &= ~(PTE_I | PTE_G); 120977957Sbenno break; 121077957Sbenno } 1211103604Sgrehan } 121277957Sbenno 121390643Sbenno error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 121490643Sbenno &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 121590643Sbenno 121690643Sbenno if (error != 0 && error != ENOENT) 121790643Sbenno panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 121890643Sbenno pa, error); 121990643Sbenno 122077957Sbenno /* 122190643Sbenno * Flush the real memory from the instruction cache. 122277957Sbenno */ 122390643Sbenno if ((pte_lo & (PTE_I | PTE_G)) == 0) { 122490643Sbenno pmap_syncicache(pa, PAGE_SIZE); 122577957Sbenno } 122677957Sbenno} 122777957Sbenno 122894838Sbenno/* 122994838Sbenno * Extract the physical page address associated with the given kernel virtual 123094838Sbenno * address. 123194838Sbenno */ 123290643Sbennovm_offset_t 123390643Sbennopmap_kextract(vm_offset_t va) 123477957Sbenno{ 123594838Sbenno struct pvo_entry *pvo; 1236134329Salc vm_paddr_t pa; 123794838Sbenno 1238125185Sgrehan#ifdef UMA_MD_SMALL_ALLOC 1239125185Sgrehan /* 1240125185Sgrehan * Allow direct mappings 1241125185Sgrehan */ 1242125185Sgrehan if (va < VM_MIN_KERNEL_ADDRESS) { 1243125185Sgrehan return (va); 1244125185Sgrehan } 1245125185Sgrehan#endif 1246125185Sgrehan 1247134329Salc PMAP_LOCK(kernel_pmap); 124894838Sbenno pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1249125185Sgrehan KASSERT(pvo != NULL, ("pmap_kextract: no addr found")); 1250134329Salc pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1251134329Salc PMAP_UNLOCK(kernel_pmap); 1252134329Salc return (pa); 125377957Sbenno} 125477957Sbenno 125591456Sbenno/* 125691456Sbenno * Remove a wired page from kernel virtual address space. 125791456Sbenno */ 125877957Sbennovoid 125977957Sbennopmap_kremove(vm_offset_t va) 126077957Sbenno{ 126191456Sbenno 1262103604Sgrehan pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 126377957Sbenno} 126477957Sbenno 126577957Sbenno/* 126690643Sbenno * Map a range of physical addresses into kernel virtual address space. 126790643Sbenno * 126890643Sbenno * The value passed in *virt is a suggested virtual address for the mapping. 126990643Sbenno * Architectures which can support a direct-mapped physical to virtual region 127090643Sbenno * can return the appropriate address within that region, leaving '*virt' 127190643Sbenno * unchanged. We cannot and therefore do not; *virt is updated with the 127290643Sbenno * first usable address after the mapped region. 127377957Sbenno */ 127490643Sbennovm_offset_t 127590643Sbennopmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 127677957Sbenno{ 127790643Sbenno vm_offset_t sva, va; 127877957Sbenno 127990643Sbenno sva = *virt; 128090643Sbenno va = sva; 128190643Sbenno for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 128290643Sbenno pmap_kenter(va, pa_start); 128390643Sbenno *virt = va; 128490643Sbenno return (sva); 128577957Sbenno} 128677957Sbenno 128790643Sbennoint 128890643Sbennopmap_mincore(pmap_t pmap, vm_offset_t addr) 128977957Sbenno{ 129090643Sbenno TODO; 129190643Sbenno return (0); 129277957Sbenno} 129377957Sbenno 129477957Sbennovoid 129594838Sbennopmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1296117206Salc vm_pindex_t pindex, vm_size_t size) 129790643Sbenno{ 129894838Sbenno 1299117206Salc VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1300117206Salc KASSERT(object->type == OBJT_DEVICE, 1301117206Salc ("pmap_object_init_pt: non-device object")); 130294838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1303117206Salc ("pmap_object_init_pt: non current pmap")); 130477957Sbenno} 130577957Sbenno 130677957Sbenno/* 130790643Sbenno * Lower the permission for all mappings to a given page. 130877957Sbenno */ 130977957Sbennovoid 131077957Sbennopmap_page_protect(vm_page_t m, vm_prot_t prot) 131177957Sbenno{ 131290643Sbenno struct pvo_head *pvo_head; 131390643Sbenno struct pvo_entry *pvo, *next_pvo; 131490643Sbenno struct pte *pt; 1315134329Salc pmap_t pmap; 131677957Sbenno 131790643Sbenno /* 131890643Sbenno * Since the routine only downgrades protection, if the 131990643Sbenno * maximal protection is desired, there isn't any change 132090643Sbenno * to be made. 132190643Sbenno */ 132290643Sbenno if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 132390643Sbenno (VM_PROT_READ|VM_PROT_WRITE)) 132477957Sbenno return; 132577957Sbenno 132690643Sbenno pvo_head = vm_page_to_pvoh(m); 132790643Sbenno for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 132890643Sbenno next_pvo = LIST_NEXT(pvo, pvo_vlink); 132990643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 1330134329Salc pmap = pvo->pvo_pmap; 1331134329Salc PMAP_LOCK(pmap); 133290643Sbenno 133390643Sbenno /* 133490643Sbenno * Downgrading to no mapping at all, we just remove the entry. 133590643Sbenno */ 133690643Sbenno if ((prot & VM_PROT_READ) == 0) { 133790643Sbenno pmap_pvo_remove(pvo, -1); 1338134329Salc PMAP_UNLOCK(pmap); 133990643Sbenno continue; 134077957Sbenno } 134190643Sbenno 134290643Sbenno /* 134390643Sbenno * If EXEC permission is being revoked, just clear the flag 134490643Sbenno * in the PVO. 134590643Sbenno */ 134690643Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 134790643Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 134890643Sbenno 134990643Sbenno /* 135090643Sbenno * If this entry is already RO, don't diddle with the page 135190643Sbenno * table. 135290643Sbenno */ 135390643Sbenno if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1354134329Salc PMAP_UNLOCK(pmap); 135590643Sbenno PMAP_PVO_CHECK(pvo); 135690643Sbenno continue; 135777957Sbenno } 135890643Sbenno 135990643Sbenno /* 136090643Sbenno * Grab the PTE before we diddle the bits so pvo_to_pte can 136190643Sbenno * verify the pte contents are as expected. 136290643Sbenno */ 136390643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 136490643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_PP; 136590643Sbenno pvo->pvo_pte.pte_lo |= PTE_BR; 136690643Sbenno if (pt != NULL) 136790643Sbenno pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1368134329Salc PMAP_UNLOCK(pmap); 136990643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 137077957Sbenno } 1371133166Sgrehan 1372133166Sgrehan /* 1373133166Sgrehan * Downgrading from writeable: clear the VM page flag 1374133166Sgrehan */ 1375133166Sgrehan if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) 1376133166Sgrehan vm_page_flag_clear(m, PG_WRITEABLE); 137777957Sbenno} 137877957Sbenno 137977957Sbenno/* 138091403Ssilby * Returns true if the pmap's pv is one of the first 138191403Ssilby * 16 pvs linked to from this page. This count may 138291403Ssilby * be changed upwards or downwards in the future; it 138391403Ssilby * is only necessary that true be returned for a small 138491403Ssilby * subset of pmaps for proper page aging. 138591403Ssilby */ 138690643Sbennoboolean_t 138791403Ssilbypmap_page_exists_quick(pmap_t pmap, vm_page_t m) 138890643Sbenno{ 1389110172Sgrehan int loops; 1390110172Sgrehan struct pvo_entry *pvo; 1391110172Sgrehan 1392110172Sgrehan if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1393110172Sgrehan return FALSE; 1394110172Sgrehan 1395110172Sgrehan loops = 0; 1396110172Sgrehan LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1397110172Sgrehan if (pvo->pvo_pmap == pmap) 1398110172Sgrehan return (TRUE); 1399110172Sgrehan if (++loops >= 16) 1400110172Sgrehan break; 1401110172Sgrehan } 1402110172Sgrehan 1403110172Sgrehan return (FALSE); 140490643Sbenno} 140577957Sbenno 140690643Sbennostatic u_int pmap_vsidcontext; 140777957Sbenno 140890643Sbennovoid 140990643Sbennopmap_pinit(pmap_t pmap) 141090643Sbenno{ 141190643Sbenno int i, mask; 141290643Sbenno u_int entropy; 141377957Sbenno 1414126478Sgrehan KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap")); 1415134329Salc PMAP_LOCK_INIT(pmap); 1416126478Sgrehan 141790643Sbenno entropy = 0; 141890643Sbenno __asm __volatile("mftb %0" : "=r"(entropy)); 141977957Sbenno 142090643Sbenno /* 142190643Sbenno * Allocate some segment registers for this pmap. 142290643Sbenno */ 142390643Sbenno for (i = 0; i < NPMAPS; i += VSID_NBPW) { 142490643Sbenno u_int hash, n; 142577957Sbenno 142677957Sbenno /* 142790643Sbenno * Create a new value by mutiplying by a prime and adding in 142890643Sbenno * entropy from the timebase register. This is to make the 142990643Sbenno * VSID more random so that the PT hash function collides 143090643Sbenno * less often. (Note that the prime casues gcc to do shifts 143190643Sbenno * instead of a multiply.) 143277957Sbenno */ 143390643Sbenno pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 143490643Sbenno hash = pmap_vsidcontext & (NPMAPS - 1); 143590643Sbenno if (hash == 0) /* 0 is special, avoid it */ 143690643Sbenno continue; 143790643Sbenno n = hash >> 5; 143890643Sbenno mask = 1 << (hash & (VSID_NBPW - 1)); 143990643Sbenno hash = (pmap_vsidcontext & 0xfffff); 144090643Sbenno if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 144190643Sbenno /* anything free in this bucket? */ 144290643Sbenno if (pmap_vsid_bitmap[n] == 0xffffffff) { 144390643Sbenno entropy = (pmap_vsidcontext >> 20); 144490643Sbenno continue; 144590643Sbenno } 144690643Sbenno i = ffs(~pmap_vsid_bitmap[i]) - 1; 144790643Sbenno mask = 1 << i; 144890643Sbenno hash &= 0xfffff & ~(VSID_NBPW - 1); 144990643Sbenno hash |= i; 145077957Sbenno } 145190643Sbenno pmap_vsid_bitmap[n] |= mask; 145290643Sbenno for (i = 0; i < 16; i++) 145390643Sbenno pmap->pm_sr[i] = VSID_MAKE(i, hash); 145490643Sbenno return; 145590643Sbenno } 145677957Sbenno 145790643Sbenno panic("pmap_pinit: out of segments"); 145877957Sbenno} 145977957Sbenno 146077957Sbenno/* 146190643Sbenno * Initialize the pmap associated with process 0. 146277957Sbenno */ 146377957Sbennovoid 146490643Sbennopmap_pinit0(pmap_t pm) 146577957Sbenno{ 146677957Sbenno 146790643Sbenno pmap_pinit(pm); 146890643Sbenno bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 146977957Sbenno} 147077957Sbenno 147194838Sbenno/* 147294838Sbenno * Set the physical protection on the specified range of this map as requested. 147394838Sbenno */ 147490643Sbennovoid 147594838Sbennopmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 147690643Sbenno{ 147794838Sbenno struct pvo_entry *pvo; 147894838Sbenno struct pte *pt; 147994838Sbenno int pteidx; 148094838Sbenno 148194838Sbenno CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 148294838Sbenno eva, prot); 148394838Sbenno 148494838Sbenno 148594838Sbenno KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 148694838Sbenno ("pmap_protect: non current pmap")); 148794838Sbenno 148894838Sbenno if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1489132899Salc mtx_lock(&Giant); 149094838Sbenno pmap_remove(pm, sva, eva); 1491132899Salc mtx_unlock(&Giant); 149294838Sbenno return; 149394838Sbenno } 149494838Sbenno 1495132899Salc mtx_lock(&Giant); 1496132220Salc vm_page_lock_queues(); 1497134329Salc PMAP_LOCK(pm); 149894838Sbenno for (; sva < eva; sva += PAGE_SIZE) { 149994838Sbenno pvo = pmap_pvo_find_va(pm, sva, &pteidx); 150094838Sbenno if (pvo == NULL) 150194838Sbenno continue; 150294838Sbenno 150394838Sbenno if ((prot & VM_PROT_EXECUTE) == 0) 150494838Sbenno pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 150594838Sbenno 150694838Sbenno /* 150794838Sbenno * Grab the PTE pointer before we diddle with the cached PTE 150894838Sbenno * copy. 150994838Sbenno */ 151094838Sbenno pt = pmap_pvo_to_pte(pvo, pteidx); 151194838Sbenno /* 151294838Sbenno * Change the protection of the page. 151394838Sbenno */ 151494838Sbenno pvo->pvo_pte.pte_lo &= ~PTE_PP; 151594838Sbenno pvo->pvo_pte.pte_lo |= PTE_BR; 151694838Sbenno 151794838Sbenno /* 151894838Sbenno * If the PVO is in the page table, update that pte as well. 151994838Sbenno */ 152094838Sbenno if (pt != NULL) 152194838Sbenno pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 152294838Sbenno } 1523132220Salc vm_page_unlock_queues(); 1524134329Salc PMAP_UNLOCK(pm); 1525132899Salc mtx_unlock(&Giant); 152677957Sbenno} 152777957Sbenno 152891456Sbenno/* 152991456Sbenno * Map a list of wired pages into kernel virtual address space. This is 153091456Sbenno * intended for temporary mappings which do not need page modification or 153191456Sbenno * references recorded. Existing mappings in the region are overwritten. 153291456Sbenno */ 153390643Sbennovoid 1534110172Sgrehanpmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 153577957Sbenno{ 1536110172Sgrehan vm_offset_t va; 153777957Sbenno 1538110172Sgrehan va = sva; 1539110172Sgrehan while (count-- > 0) { 1540110172Sgrehan pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 1541110172Sgrehan va += PAGE_SIZE; 1542110172Sgrehan m++; 1543110172Sgrehan } 154490643Sbenno} 154577957Sbenno 154691456Sbenno/* 154791456Sbenno * Remove page mappings from kernel virtual address space. Intended for 154891456Sbenno * temporary mappings entered by pmap_qenter. 154991456Sbenno */ 155090643Sbennovoid 1551110172Sgrehanpmap_qremove(vm_offset_t sva, int count) 155290643Sbenno{ 1553110172Sgrehan vm_offset_t va; 155491456Sbenno 1555110172Sgrehan va = sva; 1556110172Sgrehan while (count-- > 0) { 155791456Sbenno pmap_kremove(va); 1558110172Sgrehan va += PAGE_SIZE; 1559110172Sgrehan } 156077957Sbenno} 156177957Sbenno 156290643Sbennovoid 156390643Sbennopmap_release(pmap_t pmap) 156490643Sbenno{ 1565103604Sgrehan int idx, mask; 1566103604Sgrehan 1567103604Sgrehan /* 1568103604Sgrehan * Free segment register's VSID 1569103604Sgrehan */ 1570103604Sgrehan if (pmap->pm_sr[0] == 0) 1571103604Sgrehan panic("pmap_release"); 1572103604Sgrehan 1573103604Sgrehan idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1574103604Sgrehan mask = 1 << (idx % VSID_NBPW); 1575103604Sgrehan idx /= VSID_NBPW; 1576103604Sgrehan pmap_vsid_bitmap[idx] &= ~mask; 1577134329Salc PMAP_LOCK_DESTROY(pmap); 157877957Sbenno} 157977957Sbenno 158091456Sbenno/* 158191456Sbenno * Remove the given range of addresses from the specified map. 158291456Sbenno */ 158390643Sbennovoid 158491456Sbennopmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 158577957Sbenno{ 158691456Sbenno struct pvo_entry *pvo; 158791456Sbenno int pteidx; 158891456Sbenno 1589132220Salc vm_page_lock_queues(); 1590134329Salc PMAP_LOCK(pm); 159191456Sbenno for (; sva < eva; sva += PAGE_SIZE) { 159291456Sbenno pvo = pmap_pvo_find_va(pm, sva, &pteidx); 159391456Sbenno if (pvo != NULL) { 159491456Sbenno pmap_pvo_remove(pvo, pteidx); 159591456Sbenno } 159691456Sbenno } 1597132220Salc vm_page_unlock_queues(); 1598134329Salc PMAP_UNLOCK(pm); 159977957Sbenno} 160077957Sbenno 160194838Sbenno/* 1602110172Sgrehan * Remove physical page from all pmaps in which it resides. pmap_pvo_remove() 1603110172Sgrehan * will reflect changes in pte's back to the vm_page. 1604110172Sgrehan */ 1605110172Sgrehanvoid 1606110172Sgrehanpmap_remove_all(vm_page_t m) 1607110172Sgrehan{ 1608110172Sgrehan struct pvo_head *pvo_head; 1609110172Sgrehan struct pvo_entry *pvo, *next_pvo; 1610134329Salc pmap_t pmap; 1611110172Sgrehan 1612120336Sgrehan mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1613120336Sgrehan 1614110172Sgrehan pvo_head = vm_page_to_pvoh(m); 1615110172Sgrehan for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1616110172Sgrehan next_pvo = LIST_NEXT(pvo, pvo_vlink); 1617133166Sgrehan 1618110172Sgrehan PMAP_PVO_CHECK(pvo); /* sanity check */ 1619134329Salc pmap = pvo->pvo_pmap; 1620134329Salc PMAP_LOCK(pmap); 1621110172Sgrehan pmap_pvo_remove(pvo, -1); 1622134329Salc PMAP_UNLOCK(pmap); 1623110172Sgrehan } 1624110172Sgrehan vm_page_flag_clear(m, PG_WRITEABLE); 1625110172Sgrehan} 1626110172Sgrehan 1627110172Sgrehan/* 162894838Sbenno * Remove all pages from specified address space, this aids process exit 162994838Sbenno * speeds. This is much faster than pmap_remove in the case of running down 163094838Sbenno * an entire address space. Only works for the current pmap. 163194838Sbenno */ 163290643Sbennovoid 163394838Sbennopmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 163477957Sbenno{ 163577957Sbenno} 163677957Sbenno 163777957Sbenno/* 163890643Sbenno * Allocate a physical page of memory directly from the phys_avail map. 163990643Sbenno * Can only be called from pmap_bootstrap before avail start and end are 164090643Sbenno * calculated. 164183682Smp */ 164290643Sbennostatic vm_offset_t 164390643Sbennopmap_bootstrap_alloc(vm_size_t size, u_int align) 164483682Smp{ 164590643Sbenno vm_offset_t s, e; 164690643Sbenno int i, j; 164783682Smp 164890643Sbenno size = round_page(size); 164990643Sbenno for (i = 0; phys_avail[i + 1] != 0; i += 2) { 165090643Sbenno if (align != 0) 165190643Sbenno s = (phys_avail[i] + align - 1) & ~(align - 1); 165290643Sbenno else 165390643Sbenno s = phys_avail[i]; 165490643Sbenno e = s + size; 165590643Sbenno 165690643Sbenno if (s < phys_avail[i] || e > phys_avail[i + 1]) 165790643Sbenno continue; 165890643Sbenno 165990643Sbenno if (s == phys_avail[i]) { 166090643Sbenno phys_avail[i] += size; 166190643Sbenno } else if (e == phys_avail[i + 1]) { 166290643Sbenno phys_avail[i + 1] -= size; 166390643Sbenno } else { 166490643Sbenno for (j = phys_avail_count * 2; j > i; j -= 2) { 166590643Sbenno phys_avail[j] = phys_avail[j - 2]; 166690643Sbenno phys_avail[j + 1] = phys_avail[j - 1]; 166790643Sbenno } 166890643Sbenno 166990643Sbenno phys_avail[i + 3] = phys_avail[i + 1]; 167090643Sbenno phys_avail[i + 1] = s; 167190643Sbenno phys_avail[i + 2] = e; 167290643Sbenno phys_avail_count++; 167390643Sbenno } 167490643Sbenno 167590643Sbenno return (s); 167683682Smp } 167790643Sbenno panic("pmap_bootstrap_alloc: could not allocate memory"); 167883682Smp} 167983682Smp 168083682Smp/* 168190643Sbenno * Return an unmapped pvo for a kernel virtual address. 168290643Sbenno * Used by pmap functions that operate on physical pages. 168383682Smp */ 168490643Sbennostatic struct pvo_entry * 168590643Sbennopmap_rkva_alloc(void) 168683682Smp{ 168790643Sbenno struct pvo_entry *pvo; 168890643Sbenno struct pte *pt; 168990643Sbenno vm_offset_t kva; 169090643Sbenno int pteidx; 169183682Smp 169290643Sbenno if (pmap_rkva_count == 0) 169390643Sbenno panic("pmap_rkva_alloc: no more reserved KVAs"); 169490643Sbenno 169590643Sbenno kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 169690643Sbenno pmap_kenter(kva, 0); 169790643Sbenno 169890643Sbenno pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 169990643Sbenno 170090643Sbenno if (pvo == NULL) 170190643Sbenno panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 170290643Sbenno 170390643Sbenno pt = pmap_pvo_to_pte(pvo, pteidx); 170490643Sbenno 170590643Sbenno if (pt == NULL) 170690643Sbenno panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 170790643Sbenno 170890643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 170990643Sbenno PVO_PTEGIDX_CLR(pvo); 171090643Sbenno 171190643Sbenno pmap_pte_overflow++; 171290643Sbenno 171390643Sbenno return (pvo); 171490643Sbenno} 171590643Sbenno 171690643Sbennostatic void 171790643Sbennopmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 171890643Sbenno int *depth_p) 171990643Sbenno{ 172090643Sbenno struct pte *pt; 172190643Sbenno 172290643Sbenno /* 172390643Sbenno * If this pvo already has a valid pte, we need to save it so it can 172490643Sbenno * be restored later. We then just reload the new PTE over the old 172590643Sbenno * slot. 172690643Sbenno */ 172790643Sbenno if (saved_pt != NULL) { 172890643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 172990643Sbenno 173090643Sbenno if (pt != NULL) { 173190643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 173290643Sbenno PVO_PTEGIDX_CLR(pvo); 173390643Sbenno pmap_pte_overflow++; 173483682Smp } 173590643Sbenno 173690643Sbenno *saved_pt = pvo->pvo_pte; 173790643Sbenno 173890643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 173983682Smp } 174090643Sbenno 174190643Sbenno pvo->pvo_pte.pte_lo |= pa; 174290643Sbenno 174390643Sbenno if (!pmap_pte_spill(pvo->pvo_vaddr)) 174490643Sbenno panic("pmap_pa_map: could not spill pvo %p", pvo); 174590643Sbenno 174690643Sbenno if (depth_p != NULL) 174790643Sbenno (*depth_p)++; 174883682Smp} 174983682Smp 175090643Sbennostatic void 175190643Sbennopmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 175277957Sbenno{ 175390643Sbenno struct pte *pt; 175477957Sbenno 175590643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 175690643Sbenno 175790643Sbenno if (pt != NULL) { 175890643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 175990643Sbenno PVO_PTEGIDX_CLR(pvo); 176090643Sbenno pmap_pte_overflow++; 176190643Sbenno } 176290643Sbenno 176390643Sbenno pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 176490643Sbenno 176590643Sbenno /* 176690643Sbenno * If there is a saved PTE and it's valid, restore it and return. 176790643Sbenno */ 176890643Sbenno if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 176990643Sbenno if (depth_p != NULL && --(*depth_p) == 0) 177090643Sbenno panic("pmap_pa_unmap: restoring but depth == 0"); 177190643Sbenno 177290643Sbenno pvo->pvo_pte = *saved_pt; 177390643Sbenno 177490643Sbenno if (!pmap_pte_spill(pvo->pvo_vaddr)) 177590643Sbenno panic("pmap_pa_unmap: could not spill pvo %p", pvo); 177690643Sbenno } 177777957Sbenno} 177877957Sbenno 177990643Sbennostatic void 178090643Sbennopmap_syncicache(vm_offset_t pa, vm_size_t len) 178177957Sbenno{ 178290643Sbenno __syncicache((void *)pa, len); 178390643Sbenno} 178477957Sbenno 178590643Sbennostatic void 178690643Sbennotlbia(void) 178790643Sbenno{ 178890643Sbenno caddr_t i; 178990643Sbenno 179090643Sbenno SYNC(); 179190643Sbenno for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 179290643Sbenno TLBIE(i); 179390643Sbenno EIEIO(); 179490643Sbenno } 179590643Sbenno TLBSYNC(); 179690643Sbenno SYNC(); 179777957Sbenno} 179877957Sbenno 179990643Sbennostatic int 180092847Sjeffpmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 180190643Sbenno vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 180277957Sbenno{ 180390643Sbenno struct pvo_entry *pvo; 180490643Sbenno u_int sr; 180590643Sbenno int first; 180690643Sbenno u_int ptegidx; 180790643Sbenno int i; 1808103604Sgrehan int bootstrap; 180977957Sbenno 181090643Sbenno pmap_pvo_enter_calls++; 181196250Sbenno first = 0; 1812103604Sgrehan 1813103604Sgrehan bootstrap = 0; 181490643Sbenno 181590643Sbenno /* 181690643Sbenno * Compute the PTE Group index. 181790643Sbenno */ 181890643Sbenno va &= ~ADDR_POFF; 181990643Sbenno sr = va_to_sr(pm->pm_sr, va); 182090643Sbenno ptegidx = va_to_pteg(sr, va); 182190643Sbenno 182290643Sbenno /* 182390643Sbenno * Remove any existing mapping for this page. Reuse the pvo entry if 182490643Sbenno * there is a mapping. 182590643Sbenno */ 182690643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 182790643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 182896334Sbenno if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 182996334Sbenno (pvo->pvo_pte.pte_lo & PTE_PP) == 183096334Sbenno (pte_lo & PTE_PP)) { 183192521Sbenno return (0); 183296334Sbenno } 183390643Sbenno pmap_pvo_remove(pvo, -1); 183490643Sbenno break; 183590643Sbenno } 183690643Sbenno } 183790643Sbenno 183890643Sbenno /* 183990643Sbenno * If we aren't overwriting a mapping, try to allocate. 184090643Sbenno */ 184192521Sbenno if (pmap_initialized) { 184292847Sjeff pvo = uma_zalloc(zone, M_NOWAIT); 184392521Sbenno } else { 184499037Sbenno if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { 184599037Sbenno panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", 184699037Sbenno pmap_bpvo_pool_index, BPVO_POOL_SIZE, 184799037Sbenno BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 184892521Sbenno } 184992521Sbenno pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 185092521Sbenno pmap_bpvo_pool_index++; 1851103604Sgrehan bootstrap = 1; 185292521Sbenno } 185390643Sbenno 185490643Sbenno if (pvo == NULL) { 185590643Sbenno return (ENOMEM); 185690643Sbenno } 185790643Sbenno 185890643Sbenno pmap_pvo_entries++; 185990643Sbenno pvo->pvo_vaddr = va; 186090643Sbenno pvo->pvo_pmap = pm; 186190643Sbenno LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 186290643Sbenno pvo->pvo_vaddr &= ~ADDR_POFF; 186390643Sbenno if (flags & VM_PROT_EXECUTE) 186490643Sbenno pvo->pvo_vaddr |= PVO_EXECUTABLE; 186590643Sbenno if (flags & PVO_WIRED) 186690643Sbenno pvo->pvo_vaddr |= PVO_WIRED; 186790643Sbenno if (pvo_head != &pmap_pvo_kunmanaged) 186890643Sbenno pvo->pvo_vaddr |= PVO_MANAGED; 1869103604Sgrehan if (bootstrap) 1870103604Sgrehan pvo->pvo_vaddr |= PVO_BOOTSTRAP; 187190643Sbenno pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 187290643Sbenno 187390643Sbenno /* 187490643Sbenno * Remember if the list was empty and therefore will be the first 187590643Sbenno * item. 187690643Sbenno */ 187796250Sbenno if (LIST_FIRST(pvo_head) == NULL) 187896250Sbenno first = 1; 187990643Sbenno 188090643Sbenno LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 188190643Sbenno if (pvo->pvo_pte.pte_lo & PVO_WIRED) 188290643Sbenno pvo->pvo_pmap->pm_stats.wired_count++; 188390643Sbenno pvo->pvo_pmap->pm_stats.resident_count++; 188490643Sbenno 188590643Sbenno /* 188690643Sbenno * We hope this succeeds but it isn't required. 188790643Sbenno */ 188890643Sbenno i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 188990643Sbenno if (i >= 0) { 189090643Sbenno PVO_PTEGIDX_SET(pvo, i); 189190643Sbenno } else { 189290643Sbenno panic("pmap_pvo_enter: overflow"); 189390643Sbenno pmap_pte_overflow++; 189490643Sbenno } 189590643Sbenno 189690643Sbenno return (first ? ENOENT : 0); 189777957Sbenno} 189877957Sbenno 189990643Sbennostatic void 190090643Sbennopmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 190177957Sbenno{ 190290643Sbenno struct pte *pt; 190377957Sbenno 190490643Sbenno /* 190590643Sbenno * If there is an active pte entry, we need to deactivate it (and 190690643Sbenno * save the ref & cfg bits). 190790643Sbenno */ 190890643Sbenno pt = pmap_pvo_to_pte(pvo, pteidx); 190990643Sbenno if (pt != NULL) { 191090643Sbenno pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 191190643Sbenno PVO_PTEGIDX_CLR(pvo); 191290643Sbenno } else { 191390643Sbenno pmap_pte_overflow--; 1914110172Sgrehan } 191590643Sbenno 191690643Sbenno /* 191790643Sbenno * Update our statistics. 191890643Sbenno */ 191990643Sbenno pvo->pvo_pmap->pm_stats.resident_count--; 192090643Sbenno if (pvo->pvo_pte.pte_lo & PVO_WIRED) 192190643Sbenno pvo->pvo_pmap->pm_stats.wired_count--; 192290643Sbenno 192390643Sbenno /* 192490643Sbenno * Save the REF/CHG bits into their cache if the page is managed. 192590643Sbenno */ 192690643Sbenno if (pvo->pvo_vaddr & PVO_MANAGED) { 192790643Sbenno struct vm_page *pg; 192890643Sbenno 192992067Sbenno pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 193090643Sbenno if (pg != NULL) { 193190643Sbenno pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 193290643Sbenno (PTE_REF | PTE_CHG)); 193390643Sbenno } 193490643Sbenno } 193590643Sbenno 193690643Sbenno /* 193790643Sbenno * Remove this PVO from the PV list. 193890643Sbenno */ 193990643Sbenno LIST_REMOVE(pvo, pvo_vlink); 194090643Sbenno 194190643Sbenno /* 194290643Sbenno * Remove this from the overflow list and return it to the pool 194390643Sbenno * if we aren't going to reuse it. 194490643Sbenno */ 194590643Sbenno LIST_REMOVE(pvo, pvo_olink); 194692521Sbenno if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 194792847Sjeff uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 194892521Sbenno pmap_upvo_zone, pvo); 194990643Sbenno pmap_pvo_entries--; 195090643Sbenno pmap_pvo_remove_calls++; 195177957Sbenno} 195277957Sbenno 195390643Sbennostatic __inline int 195490643Sbennopmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 195577957Sbenno{ 195690643Sbenno int pteidx; 195777957Sbenno 195890643Sbenno /* 195990643Sbenno * We can find the actual pte entry without searching by grabbing 196090643Sbenno * the PTEG index from 3 unused bits in pte_lo[11:9] and by 196190643Sbenno * noticing the HID bit. 196290643Sbenno */ 196390643Sbenno pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 196490643Sbenno if (pvo->pvo_pte.pte_hi & PTE_HID) 196590643Sbenno pteidx ^= pmap_pteg_mask * 8; 196690643Sbenno 196790643Sbenno return (pteidx); 196877957Sbenno} 196977957Sbenno 197090643Sbennostatic struct pvo_entry * 197190643Sbennopmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 197277957Sbenno{ 197390643Sbenno struct pvo_entry *pvo; 197490643Sbenno int ptegidx; 197590643Sbenno u_int sr; 197677957Sbenno 197790643Sbenno va &= ~ADDR_POFF; 197890643Sbenno sr = va_to_sr(pm->pm_sr, va); 197990643Sbenno ptegidx = va_to_pteg(sr, va); 198090643Sbenno 198190643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 198290643Sbenno if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 198390643Sbenno if (pteidx_p) 198490643Sbenno *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 198590643Sbenno return (pvo); 198690643Sbenno } 198790643Sbenno } 198890643Sbenno 198990643Sbenno return (NULL); 199077957Sbenno} 199177957Sbenno 199290643Sbennostatic struct pte * 199390643Sbennopmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 199477957Sbenno{ 199590643Sbenno struct pte *pt; 199677957Sbenno 199790643Sbenno /* 199890643Sbenno * If we haven't been supplied the ptegidx, calculate it. 199990643Sbenno */ 200090643Sbenno if (pteidx == -1) { 200190643Sbenno int ptegidx; 200290643Sbenno u_int sr; 200377957Sbenno 200490643Sbenno sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 200590643Sbenno ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 200690643Sbenno pteidx = pmap_pvo_pte_index(pvo, ptegidx); 200790643Sbenno } 200890643Sbenno 200990643Sbenno pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 201090643Sbenno 201190643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 201290643Sbenno panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 201390643Sbenno "valid pte index", pvo); 201490643Sbenno } 201590643Sbenno 201690643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 201790643Sbenno panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 201890643Sbenno "pvo but no valid pte", pvo); 201990643Sbenno } 202090643Sbenno 202190643Sbenno if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 202290643Sbenno if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 202390643Sbenno panic("pmap_pvo_to_pte: pvo %p has valid pte in " 202490643Sbenno "pmap_pteg_table %p but invalid in pvo", pvo, pt); 202577957Sbenno } 202690643Sbenno 202790643Sbenno if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 202890643Sbenno != 0) { 202990643Sbenno panic("pmap_pvo_to_pte: pvo %p pte does not match " 203090643Sbenno "pte %p in pmap_pteg_table", pvo, pt); 203190643Sbenno } 203290643Sbenno 203390643Sbenno return (pt); 203477957Sbenno } 203577957Sbenno 203690643Sbenno if (pvo->pvo_pte.pte_hi & PTE_VALID) { 203790643Sbenno panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 203890643Sbenno "pmap_pteg_table but valid in pvo", pvo, pt); 203990643Sbenno } 204077957Sbenno 204190643Sbenno return (NULL); 204277957Sbenno} 204378880Sbenno 204478880Sbenno/* 204590643Sbenno * XXX: THIS STUFF SHOULD BE IN pte.c? 204678880Sbenno */ 204790643Sbennoint 204890643Sbennopmap_pte_spill(vm_offset_t addr) 204978880Sbenno{ 205090643Sbenno struct pvo_entry *source_pvo, *victim_pvo; 205190643Sbenno struct pvo_entry *pvo; 205290643Sbenno int ptegidx, i, j; 205390643Sbenno u_int sr; 205490643Sbenno struct pteg *pteg; 205590643Sbenno struct pte *pt; 205678880Sbenno 205790643Sbenno pmap_pte_spills++; 205890643Sbenno 205994836Sbenno sr = mfsrin(addr); 206090643Sbenno ptegidx = va_to_pteg(sr, addr); 206190643Sbenno 206278880Sbenno /* 206390643Sbenno * Have to substitute some entry. Use the primary hash for this. 206490643Sbenno * Use low bits of timebase as random generator. 206578880Sbenno */ 206690643Sbenno pteg = &pmap_pteg_table[ptegidx]; 206790643Sbenno __asm __volatile("mftb %0" : "=r"(i)); 206890643Sbenno i &= 7; 206990643Sbenno pt = &pteg->pt[i]; 207078880Sbenno 207190643Sbenno source_pvo = NULL; 207290643Sbenno victim_pvo = NULL; 207390643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 207478880Sbenno /* 207590643Sbenno * We need to find a pvo entry for this address. 207678880Sbenno */ 207790643Sbenno PMAP_PVO_CHECK(pvo); 207890643Sbenno if (source_pvo == NULL && 207990643Sbenno pmap_pte_match(&pvo->pvo_pte, sr, addr, 208090643Sbenno pvo->pvo_pte.pte_hi & PTE_HID)) { 208190643Sbenno /* 208290643Sbenno * Now found an entry to be spilled into the pteg. 208390643Sbenno * The PTE is now valid, so we know it's active. 208490643Sbenno */ 208590643Sbenno j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 208678880Sbenno 208790643Sbenno if (j >= 0) { 208890643Sbenno PVO_PTEGIDX_SET(pvo, j); 208990643Sbenno pmap_pte_overflow--; 209090643Sbenno PMAP_PVO_CHECK(pvo); 209190643Sbenno return (1); 209290643Sbenno } 209390643Sbenno 209490643Sbenno source_pvo = pvo; 209590643Sbenno 209690643Sbenno if (victim_pvo != NULL) 209790643Sbenno break; 209890643Sbenno } 209990643Sbenno 210078880Sbenno /* 210190643Sbenno * We also need the pvo entry of the victim we are replacing 210290643Sbenno * so save the R & C bits of the PTE. 210378880Sbenno */ 210490643Sbenno if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 210590643Sbenno pmap_pte_compare(pt, &pvo->pvo_pte)) { 210690643Sbenno victim_pvo = pvo; 210790643Sbenno if (source_pvo != NULL) 210890643Sbenno break; 210990643Sbenno } 211090643Sbenno } 211178880Sbenno 211290643Sbenno if (source_pvo == NULL) 211390643Sbenno return (0); 211490643Sbenno 211590643Sbenno if (victim_pvo == NULL) { 211690643Sbenno if ((pt->pte_hi & PTE_HID) == 0) 211790643Sbenno panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 211890643Sbenno "entry", pt); 211990643Sbenno 212078880Sbenno /* 212190643Sbenno * If this is a secondary PTE, we need to search it's primary 212290643Sbenno * pvo bucket for the matching PVO. 212378880Sbenno */ 212490643Sbenno LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 212590643Sbenno pvo_olink) { 212690643Sbenno PMAP_PVO_CHECK(pvo); 212790643Sbenno /* 212890643Sbenno * We also need the pvo entry of the victim we are 212990643Sbenno * replacing so save the R & C bits of the PTE. 213090643Sbenno */ 213190643Sbenno if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 213290643Sbenno victim_pvo = pvo; 213390643Sbenno break; 213490643Sbenno } 213590643Sbenno } 213678880Sbenno 213790643Sbenno if (victim_pvo == NULL) 213890643Sbenno panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 213990643Sbenno "entry", pt); 214090643Sbenno } 214178880Sbenno 214290643Sbenno /* 214390643Sbenno * We are invalidating the TLB entry for the EA we are replacing even 214490643Sbenno * though it's valid. If we don't, we lose any ref/chg bit changes 214590643Sbenno * contained in the TLB entry. 214690643Sbenno */ 214790643Sbenno source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 214878880Sbenno 214990643Sbenno pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 215090643Sbenno pmap_pte_set(pt, &source_pvo->pvo_pte); 215190643Sbenno 215290643Sbenno PVO_PTEGIDX_CLR(victim_pvo); 215390643Sbenno PVO_PTEGIDX_SET(source_pvo, i); 215490643Sbenno pmap_pte_replacements++; 215590643Sbenno 215690643Sbenno PMAP_PVO_CHECK(victim_pvo); 215790643Sbenno PMAP_PVO_CHECK(source_pvo); 215890643Sbenno 215990643Sbenno return (1); 216090643Sbenno} 216190643Sbenno 216290643Sbennostatic int 216390643Sbennopmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 216490643Sbenno{ 216590643Sbenno struct pte *pt; 216690643Sbenno int i; 216790643Sbenno 216890643Sbenno /* 216990643Sbenno * First try primary hash. 217090643Sbenno */ 217190643Sbenno for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 217290643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 217390643Sbenno pvo_pt->pte_hi &= ~PTE_HID; 217490643Sbenno pmap_pte_set(pt, pvo_pt); 217590643Sbenno return (i); 217678880Sbenno } 217790643Sbenno } 217878880Sbenno 217990643Sbenno /* 218090643Sbenno * Now try secondary hash. 218190643Sbenno */ 218290643Sbenno ptegidx ^= pmap_pteg_mask; 218390643Sbenno ptegidx++; 218490643Sbenno for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 218590643Sbenno if ((pt->pte_hi & PTE_VALID) == 0) { 218690643Sbenno pvo_pt->pte_hi |= PTE_HID; 218790643Sbenno pmap_pte_set(pt, pvo_pt); 218890643Sbenno return (i); 218990643Sbenno } 219090643Sbenno } 219178880Sbenno 219290643Sbenno panic("pmap_pte_insert: overflow"); 219390643Sbenno return (-1); 219478880Sbenno} 219584921Sbenno 219690643Sbennostatic boolean_t 219790643Sbennopmap_query_bit(vm_page_t m, int ptebit) 219884921Sbenno{ 219990643Sbenno struct pvo_entry *pvo; 220090643Sbenno struct pte *pt; 220184921Sbenno 2202123560Sgrehan#if 0 220390643Sbenno if (pmap_attr_fetch(m) & ptebit) 220490643Sbenno return (TRUE); 2205123560Sgrehan#endif 220684921Sbenno 220790643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 220890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 220984921Sbenno 221090643Sbenno /* 221190643Sbenno * See if we saved the bit off. If so, cache it and return 221290643Sbenno * success. 221390643Sbenno */ 221490643Sbenno if (pvo->pvo_pte.pte_lo & ptebit) { 221590643Sbenno pmap_attr_save(m, ptebit); 221690643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 221790643Sbenno return (TRUE); 221890643Sbenno } 221990643Sbenno } 222084921Sbenno 222190643Sbenno /* 222290643Sbenno * No luck, now go through the hard part of looking at the PTEs 222390643Sbenno * themselves. Sync so that any pending REF/CHG bits are flushed to 222490643Sbenno * the PTEs. 222590643Sbenno */ 222690643Sbenno SYNC(); 222790643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 222890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 222990643Sbenno 223090643Sbenno /* 223190643Sbenno * See if this pvo has a valid PTE. if so, fetch the 223290643Sbenno * REF/CHG bits from the valid PTE. If the appropriate 223390643Sbenno * ptebit is set, cache it and return success. 223490643Sbenno */ 223590643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 223690643Sbenno if (pt != NULL) { 223790643Sbenno pmap_pte_synch(pt, &pvo->pvo_pte); 223890643Sbenno if (pvo->pvo_pte.pte_lo & ptebit) { 223990643Sbenno pmap_attr_save(m, ptebit); 224090643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 224190643Sbenno return (TRUE); 224290643Sbenno } 224390643Sbenno } 224484921Sbenno } 224584921Sbenno 2246123354Sgallatin return (FALSE); 224784921Sbenno} 224890643Sbenno 2249110172Sgrehanstatic u_int 2250110172Sgrehanpmap_clear_bit(vm_page_t m, int ptebit, int *origbit) 225190643Sbenno{ 2252110172Sgrehan u_int count; 225390643Sbenno struct pvo_entry *pvo; 225490643Sbenno struct pte *pt; 225590643Sbenno int rv; 225690643Sbenno 225790643Sbenno /* 225890643Sbenno * Clear the cached value. 225990643Sbenno */ 226090643Sbenno rv = pmap_attr_fetch(m); 226190643Sbenno pmap_attr_clear(m, ptebit); 226290643Sbenno 226390643Sbenno /* 226490643Sbenno * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 226590643Sbenno * we can reset the right ones). note that since the pvo entries and 226690643Sbenno * list heads are accessed via BAT0 and are never placed in the page 226790643Sbenno * table, we don't have to worry about further accesses setting the 226890643Sbenno * REF/CHG bits. 226990643Sbenno */ 227090643Sbenno SYNC(); 227190643Sbenno 227290643Sbenno /* 227390643Sbenno * For each pvo entry, clear the pvo's ptebit. If this pvo has a 227490643Sbenno * valid pte clear the ptebit from the valid pte. 227590643Sbenno */ 2276110172Sgrehan count = 0; 227790643Sbenno LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 227890643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 227990643Sbenno pt = pmap_pvo_to_pte(pvo, -1); 228090643Sbenno if (pt != NULL) { 228190643Sbenno pmap_pte_synch(pt, &pvo->pvo_pte); 2282110172Sgrehan if (pvo->pvo_pte.pte_lo & ptebit) { 2283110172Sgrehan count++; 228490643Sbenno pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2285110172Sgrehan } 228690643Sbenno } 228790643Sbenno rv |= pvo->pvo_pte.pte_lo; 228890643Sbenno pvo->pvo_pte.pte_lo &= ~ptebit; 228990643Sbenno PMAP_PVO_CHECK(pvo); /* sanity check */ 229090643Sbenno } 229190643Sbenno 2292110172Sgrehan if (origbit != NULL) { 2293110172Sgrehan *origbit = rv; 2294110172Sgrehan } 2295110172Sgrehan 2296110172Sgrehan return (count); 229790643Sbenno} 229899038Sbenno 229999038Sbenno/* 2300103604Sgrehan * Return true if the physical range is encompassed by the battable[idx] 2301103604Sgrehan */ 2302103604Sgrehanstatic int 2303103604Sgrehanpmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2304103604Sgrehan{ 2305103604Sgrehan u_int prot; 2306103604Sgrehan u_int32_t start; 2307103604Sgrehan u_int32_t end; 2308103604Sgrehan u_int32_t bat_ble; 2309103604Sgrehan 2310103604Sgrehan /* 2311103604Sgrehan * Return immediately if not a valid mapping 2312103604Sgrehan */ 2313103604Sgrehan if (!battable[idx].batu & BAT_Vs) 2314103604Sgrehan return (EINVAL); 2315103604Sgrehan 2316103604Sgrehan /* 2317103604Sgrehan * The BAT entry must be cache-inhibited, guarded, and r/w 2318103604Sgrehan * so it can function as an i/o page 2319103604Sgrehan */ 2320103604Sgrehan prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2321103604Sgrehan if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2322103604Sgrehan return (EPERM); 2323103604Sgrehan 2324103604Sgrehan /* 2325103604Sgrehan * The address should be within the BAT range. Assume that the 2326103604Sgrehan * start address in the BAT has the correct alignment (thus 2327103604Sgrehan * not requiring masking) 2328103604Sgrehan */ 2329103604Sgrehan start = battable[idx].batl & BAT_PBS; 2330103604Sgrehan bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2331103604Sgrehan end = start | (bat_ble << 15) | 0x7fff; 2332103604Sgrehan 2333103604Sgrehan if ((pa < start) || ((pa + size) > end)) 2334103604Sgrehan return (ERANGE); 2335103604Sgrehan 2336103604Sgrehan return (0); 2337103604Sgrehan} 2338103604Sgrehan 2339133855Sssouhlalint 2340133855Sssouhlalpmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size) 2341133855Sssouhlal{ 2342133855Sssouhlal int i; 2343103604Sgrehan 2344133855Sssouhlal /* 2345133855Sssouhlal * This currently does not work for entries that 2346133855Sssouhlal * overlap 256M BAT segments. 2347133855Sssouhlal */ 2348133855Sssouhlal 2349133855Sssouhlal for(i = 0; i < 16; i++) 2350133855Sssouhlal if (pmap_bat_mapped(i, pa, size) == 0) 2351133855Sssouhlal return (0); 2352133855Sssouhlal 2353133855Sssouhlal return (EFAULT); 2354133855Sssouhlal} 2355133855Sssouhlal 2356103604Sgrehan/* 235799038Sbenno * Map a set of physical memory pages into the kernel virtual 235899038Sbenno * address space. Return a pointer to where it is mapped. This 235999038Sbenno * routine is intended to be used for mapping device memory, 236099038Sbenno * NOT real memory. 236199038Sbenno */ 236299038Sbennovoid * 236399038Sbennopmap_mapdev(vm_offset_t pa, vm_size_t size) 236499038Sbenno{ 2365103604Sgrehan vm_offset_t va, tmpva, ppa, offset; 2366103604Sgrehan int i; 2367103604Sgrehan 2368103604Sgrehan ppa = trunc_page(pa); 236999038Sbenno offset = pa & PAGE_MASK; 237099038Sbenno size = roundup(offset + size, PAGE_SIZE); 237199038Sbenno 237299038Sbenno GIANT_REQUIRED; 237399038Sbenno 2374103604Sgrehan /* 2375103604Sgrehan * If the physical address lies within a valid BAT table entry, 2376103604Sgrehan * return the 1:1 mapping. This currently doesn't work 2377103604Sgrehan * for regions that overlap 256M BAT segments. 2378103604Sgrehan */ 2379103604Sgrehan for (i = 0; i < 16; i++) { 2380103604Sgrehan if (pmap_bat_mapped(i, pa, size) == 0) 2381103604Sgrehan return ((void *) pa); 2382103604Sgrehan } 2383103604Sgrehan 2384118365Salc va = kmem_alloc_nofault(kernel_map, size); 238599038Sbenno if (!va) 238699038Sbenno panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 238799038Sbenno 238899038Sbenno for (tmpva = va; size > 0;) { 2389103604Sgrehan pmap_kenter(tmpva, ppa); 239099038Sbenno TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 239199038Sbenno size -= PAGE_SIZE; 239299038Sbenno tmpva += PAGE_SIZE; 2393103604Sgrehan ppa += PAGE_SIZE; 239499038Sbenno } 239599038Sbenno 239699038Sbenno return ((void *)(va + offset)); 239799038Sbenno} 239899038Sbenno 239999038Sbennovoid 240099038Sbennopmap_unmapdev(vm_offset_t va, vm_size_t size) 240199038Sbenno{ 240299038Sbenno vm_offset_t base, offset; 240399038Sbenno 2404103604Sgrehan /* 2405103604Sgrehan * If this is outside kernel virtual space, then it's a 2406103604Sgrehan * battable entry and doesn't require unmapping 2407103604Sgrehan */ 2408103604Sgrehan if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2409103604Sgrehan base = trunc_page(va); 2410103604Sgrehan offset = va & PAGE_MASK; 2411103604Sgrehan size = roundup(offset + size, PAGE_SIZE); 2412103604Sgrehan kmem_free(kernel_map, base, size); 2413103604Sgrehan } 241499038Sbenno} 2415