pmap-v4.c revision 147217
1129198Scognet/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ 2139735Simp/*- 3129198Scognet * Copyright 2004 Olivier Houchard. 4129198Scognet * Copyright 2003 Wasabi Systems, Inc. 5129198Scognet * All rights reserved. 6129198Scognet * 7129198Scognet * Written by Steve C. Woodford for Wasabi Systems, Inc. 8129198Scognet * 9129198Scognet * Redistribution and use in source and binary forms, with or without 10129198Scognet * modification, are permitted provided that the following conditions 11129198Scognet * are met: 12129198Scognet * 1. Redistributions of source code must retain the above copyright 13129198Scognet * notice, this list of conditions and the following disclaimer. 14129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 15129198Scognet * notice, this list of conditions and the following disclaimer in the 16129198Scognet * documentation and/or other materials provided with the distribution. 17129198Scognet * 3. All advertising materials mentioning features or use of this software 18129198Scognet * must display the following acknowledgement: 19129198Scognet * This product includes software developed for the NetBSD Project by 20129198Scognet * Wasabi Systems, Inc. 21129198Scognet * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22129198Scognet * or promote products derived from this software without specific prior 23129198Scognet * written permission. 24129198Scognet * 25129198Scognet * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28129198Scognet * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35129198Scognet * POSSIBILITY OF SUCH DAMAGE. 36129198Scognet */ 37129198Scognet 38139735Simp/*- 39129198Scognet * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40129198Scognet * Copyright (c) 2001 Richard Earnshaw 41129198Scognet * Copyright (c) 2001-2002 Christopher Gilbert 42129198Scognet * All rights reserved. 43129198Scognet * 44129198Scognet * 1. Redistributions of source code must retain the above copyright 45129198Scognet * notice, this list of conditions and the following disclaimer. 46129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 47129198Scognet * notice, this list of conditions and the following disclaimer in the 48129198Scognet * documentation and/or other materials provided with the distribution. 49129198Scognet * 3. The name of the company nor the name of the author may be used to 50129198Scognet * endorse or promote products derived from this software without specific 51129198Scognet * prior written permission. 52129198Scognet * 53129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54129198Scognet * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55129198Scognet * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56129198Scognet * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57129198Scognet * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58129198Scognet * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59129198Scognet * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63129198Scognet * SUCH DAMAGE. 64129198Scognet */ 65129198Scognet/*- 66129198Scognet * Copyright (c) 1999 The NetBSD Foundation, Inc. 67129198Scognet * All rights reserved. 68129198Scognet * 69129198Scognet * This code is derived from software contributed to The NetBSD Foundation 70129198Scognet * by Charles M. Hannum. 71129198Scognet * 72129198Scognet * Redistribution and use in source and binary forms, with or without 73129198Scognet * modification, are permitted provided that the following conditions 74129198Scognet * are met: 75129198Scognet * 1. Redistributions of source code must retain the above copyright 76129198Scognet * notice, this list of conditions and the following disclaimer. 77129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 78129198Scognet * notice, this list of conditions and the following disclaimer in the 79129198Scognet * documentation and/or other materials provided with the distribution. 80129198Scognet * 3. All advertising materials mentioning features or use of this software 81129198Scognet * must display the following acknowledgement: 82129198Scognet * This product includes software developed by the NetBSD 83129198Scognet * Foundation, Inc. and its contributors. 84129198Scognet * 4. Neither the name of The NetBSD Foundation nor the names of its 85129198Scognet * contributors may be used to endorse or promote products derived 86129198Scognet * from this software without specific prior written permission. 87129198Scognet * 88129198Scognet * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 89129198Scognet * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 90129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 91129198Scognet * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 92129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 93129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 94129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 95129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 96129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 97129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 98129198Scognet * POSSIBILITY OF SUCH DAMAGE. 99129198Scognet */ 100129198Scognet 101139735Simp/*- 102129198Scognet * Copyright (c) 1994-1998 Mark Brinicombe. 103129198Scognet * Copyright (c) 1994 Brini. 104129198Scognet * All rights reserved. 105139735Simp * 106129198Scognet * This code is derived from software written for Brini by Mark Brinicombe 107129198Scognet * 108129198Scognet * Redistribution and use in source and binary forms, with or without 109129198Scognet * modification, are permitted provided that the following conditions 110129198Scognet * are met: 111129198Scognet * 1. Redistributions of source code must retain the above copyright 112129198Scognet * notice, this list of conditions and the following disclaimer. 113129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 114129198Scognet * notice, this list of conditions and the following disclaimer in the 115129198Scognet * documentation and/or other materials provided with the distribution. 116129198Scognet * 3. All advertising materials mentioning features or use of this software 117129198Scognet * must display the following acknowledgement: 118129198Scognet * This product includes software developed by Mark Brinicombe. 119129198Scognet * 4. The name of the author may not be used to endorse or promote products 120129198Scognet * derived from this software without specific prior written permission. 121129198Scognet * 122129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 123129198Scognet * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 124129198Scognet * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 125129198Scognet * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 126129198Scognet * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 127129198Scognet * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 128129198Scognet * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 129129198Scognet * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 130129198Scognet * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 131129198Scognet * 132129198Scognet * RiscBSD kernel project 133129198Scognet * 134129198Scognet * pmap.c 135129198Scognet * 136129198Scognet * Machine dependant vm stuff 137129198Scognet * 138129198Scognet * Created : 20/09/94 139129198Scognet */ 140129198Scognet 141129198Scognet/* 142129198Scognet * Special compilation symbols 143129198Scognet * PMAP_DEBUG - Build in pmap_debug_level code 144129198Scognet */ 145129198Scognet/* Include header files */ 146135641Scognet 147137552Scognet#include "opt_vm.h" 148137552Scognet 149129198Scognet#include <sys/cdefs.h> 150129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/pmap.c 147217 2005-06-10 03:33:36Z alc $"); 151129198Scognet#include <sys/param.h> 152129198Scognet#include <sys/systm.h> 153129198Scognet#include <sys/kernel.h> 154129198Scognet#include <sys/proc.h> 155129198Scognet#include <sys/malloc.h> 156129198Scognet#include <sys/msgbuf.h> 157129198Scognet#include <sys/vmmeter.h> 158129198Scognet#include <sys/mman.h> 159129198Scognet#include <sys/smp.h> 160129198Scognet#include <sys/sx.h> 161129198Scognet#include <sys/sched.h> 162129198Scognet 163129198Scognet#include <vm/vm.h> 164129198Scognet#include <vm/uma.h> 165129198Scognet#include <vm/pmap.h> 166129198Scognet#include <vm/vm_kern.h> 167129198Scognet#include <vm/vm_object.h> 168129198Scognet#include <vm/vm_map.h> 169129198Scognet#include <vm/vm_page.h> 170129198Scognet#include <vm/vm_pageout.h> 171129198Scognet#include <vm/vm_extern.h> 172129198Scognet#include <sys/lock.h> 173129198Scognet#include <sys/mutex.h> 174129198Scognet#include <machine/md_var.h> 175129198Scognet#include <machine/vmparam.h> 176129198Scognet#include <machine/cpu.h> 177129198Scognet#include <machine/cpufunc.h> 178129198Scognet#include <machine/pcb.h> 179129198Scognet 180129198Scognet#ifdef PMAP_DEBUG 181129198Scognet#define PDEBUG(_lev_,_stat_) \ 182129198Scognet if (pmap_debug_level >= (_lev_)) \ 183129198Scognet ((_stat_)) 184129198Scognet#define dprintf printf 185129198Scognet 186129198Scognetint pmap_debug_level = 0; 187135641Scognet#define PMAP_INLINE 188129198Scognet#else /* PMAP_DEBUG */ 189129198Scognet#define PDEBUG(_lev_,_stat_) /* Nothing */ 190129198Scognet#define dprintf(x, arg...) 191135641Scognet#define PMAP_INLINE __inline 192129198Scognet#endif /* PMAP_DEBUG */ 193129198Scognet 194129198Scognetextern struct pv_addr systempage; 195129198Scognet/* 196129198Scognet * Internal function prototypes 197129198Scognet */ 198135641Scognetstatic void pmap_free_pv_entry (pv_entry_t); 199129198Scognetstatic pv_entry_t pmap_get_pv_entry(void); 200129198Scognet 201129198Scognetstatic void pmap_vac_me_harder(struct vm_page *, pmap_t, 202129198Scognet vm_offset_t); 203129198Scognetstatic void pmap_vac_me_kpmap(struct vm_page *, pmap_t, 204129198Scognet vm_offset_t); 205129198Scognetstatic void pmap_vac_me_user(struct vm_page *, pmap_t, vm_offset_t); 206129198Scognetstatic void pmap_alloc_l1(pmap_t); 207129198Scognetstatic void pmap_free_l1(pmap_t); 208129198Scognetstatic void pmap_use_l1(pmap_t); 209129198Scognet 210135641Scognetstatic int pmap_clearbit(struct vm_page *, u_int); 211129198Scognet 212129198Scognetstatic struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); 213129198Scognetstatic struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); 214129198Scognetstatic void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 215129198Scognetstatic vm_offset_t kernel_pt_lookup(vm_paddr_t); 216129198Scognet 217129198Scognetstatic MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); 218129198Scognet 219129198Scognetvm_offset_t avail_end; /* PA of last available physical page */ 220129198Scognetvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 221129198Scognetvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 222135641Scognetvm_offset_t pmap_curmaxkvaddr; 223129198Scognet 224129198Scognetextern void *end; 225129198Scognetvm_offset_t kernel_vm_end = 0; 226129198Scognet 227129198Scognetstruct pmap kernel_pmap_store; 228129198Scognetpmap_t kernel_pmap; 229129198Scognet 230129198Scognetstatic pt_entry_t *csrc_pte, *cdst_pte; 231129198Scognetstatic vm_offset_t csrcp, cdstp; 232129198Scognetstatic void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 233129198Scognet/* 234129198Scognet * These routines are called when the CPU type is identified to set up 235129198Scognet * the PTE prototypes, cache modes, etc. 236129198Scognet * 237129198Scognet * The variables are always here, just in case LKMs need to reference 238129198Scognet * them (though, they shouldn't). 239129198Scognet */ 240129198Scognet 241129198Scognetpt_entry_t pte_l1_s_cache_mode; 242129198Scognetpt_entry_t pte_l1_s_cache_mode_pt; 243129198Scognetpt_entry_t pte_l1_s_cache_mask; 244129198Scognet 245129198Scognetpt_entry_t pte_l2_l_cache_mode; 246129198Scognetpt_entry_t pte_l2_l_cache_mode_pt; 247129198Scognetpt_entry_t pte_l2_l_cache_mask; 248129198Scognet 249129198Scognetpt_entry_t pte_l2_s_cache_mode; 250129198Scognetpt_entry_t pte_l2_s_cache_mode_pt; 251129198Scognetpt_entry_t pte_l2_s_cache_mask; 252129198Scognet 253129198Scognetpt_entry_t pte_l2_s_prot_u; 254129198Scognetpt_entry_t pte_l2_s_prot_w; 255129198Scognetpt_entry_t pte_l2_s_prot_mask; 256129198Scognet 257129198Scognetpt_entry_t pte_l1_s_proto; 258129198Scognetpt_entry_t pte_l1_c_proto; 259129198Scognetpt_entry_t pte_l2_s_proto; 260129198Scognet 261129198Scognetvoid (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 262129198Scognetvoid (*pmap_zero_page_func)(vm_paddr_t, int, int); 263129198Scognet/* 264129198Scognet * Which pmap is currently 'live' in the cache 265129198Scognet * 266129198Scognet * XXXSCW: Fix for SMP ... 267129198Scognet */ 268129198Scognetunion pmap_cache_state *pmap_cache_state; 269129198Scognet 270129198ScognetLIST_HEAD(pmaplist, pmap); 271129198Scognetstruct pmaplist allpmaps; 272129198Scognet 273129198Scognetstatic boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 274129198Scognet 275129198Scognet/* static pt_entry_t *msgbufmap;*/ 276129198Scognetstruct msgbuf *msgbufp = 0; 277129198Scognet 278129198Scognetextern void bcopy_page(vm_offset_t, vm_offset_t); 279129198Scognetextern void bzero_page(vm_offset_t); 280137362Scognet 281137362Scognetchar *_tmppt; 282137362Scognet 283129198Scognet/* 284129198Scognet * Metadata for L1 translation tables. 285129198Scognet */ 286129198Scognetstruct l1_ttable { 287129198Scognet /* Entry on the L1 Table list */ 288129198Scognet SLIST_ENTRY(l1_ttable) l1_link; 289129198Scognet 290129198Scognet /* Entry on the L1 Least Recently Used list */ 291129198Scognet TAILQ_ENTRY(l1_ttable) l1_lru; 292129198Scognet 293129198Scognet /* Track how many domains are allocated from this L1 */ 294129198Scognet volatile u_int l1_domain_use_count; 295129198Scognet 296129198Scognet /* 297129198Scognet * A free-list of domain numbers for this L1. 298129198Scognet * We avoid using ffs() and a bitmap to track domains since ffs() 299129198Scognet * is slow on ARM. 300129198Scognet */ 301129198Scognet u_int8_t l1_domain_first; 302129198Scognet u_int8_t l1_domain_free[PMAP_DOMAINS]; 303129198Scognet 304129198Scognet /* Physical address of this L1 page table */ 305129198Scognet vm_paddr_t l1_physaddr; 306129198Scognet 307129198Scognet /* KVA of this L1 page table */ 308129198Scognet pd_entry_t *l1_kva; 309129198Scognet}; 310129198Scognet 311129198Scognet/* 312129198Scognet * Convert a virtual address into its L1 table index. That is, the 313129198Scognet * index used to locate the L2 descriptor table pointer in an L1 table. 314129198Scognet * This is basically used to index l1->l1_kva[]. 315129198Scognet * 316129198Scognet * Each L2 descriptor table represents 1MB of VA space. 317129198Scognet */ 318129198Scognet#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 319129198Scognet 320129198Scognet/* 321129198Scognet * L1 Page Tables are tracked using a Least Recently Used list. 322129198Scognet * - New L1s are allocated from the HEAD. 323129198Scognet * - Freed L1s are added to the TAIl. 324129198Scognet * - Recently accessed L1s (where an 'access' is some change to one of 325129198Scognet * the userland pmaps which owns this L1) are moved to the TAIL. 326129198Scognet */ 327129198Scognetstatic TAILQ_HEAD(, l1_ttable) l1_lru_list; 328135641Scognet/* 329135641Scognet * A list of all L1 tables 330135641Scognet */ 331135641Scognetstatic SLIST_HEAD(, l1_ttable) l1_list; 332129198Scognetstatic struct mtx l1_lru_lock; 333129198Scognet 334129198Scognet/* 335129198Scognet * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 336129198Scognet * 337129198Scognet * This is normally 16MB worth L2 page descriptors for any given pmap. 338129198Scognet * Reference counts are maintained for L2 descriptors so they can be 339129198Scognet * freed when empty. 340129198Scognet */ 341129198Scognetstruct l2_dtable { 342129198Scognet /* The number of L2 page descriptors allocated to this l2_dtable */ 343129198Scognet u_int l2_occupancy; 344129198Scognet 345129198Scognet /* List of L2 page descriptors */ 346129198Scognet struct l2_bucket { 347129198Scognet pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 348129198Scognet vm_paddr_t l2b_phys; /* Physical address of same */ 349129198Scognet u_short l2b_l1idx; /* This L2 table's L1 index */ 350129198Scognet u_short l2b_occupancy; /* How many active descriptors */ 351129198Scognet } l2_bucket[L2_BUCKET_SIZE]; 352129198Scognet}; 353129198Scognet 354135641Scognet/* pmap_kenter_internal flags */ 355135641Scognet#define KENTER_CACHE 0x1 356142570Scognet#define KENTER_USER 0x2 357135641Scognet 358129198Scognet/* 359129198Scognet * Given an L1 table index, calculate the corresponding l2_dtable index 360129198Scognet * and bucket index within the l2_dtable. 361129198Scognet */ 362129198Scognet#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 363129198Scognet (L2_SIZE - 1)) 364129198Scognet#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 365129198Scognet 366129198Scognet/* 367129198Scognet * Given a virtual address, this macro returns the 368129198Scognet * virtual address required to drop into the next L2 bucket. 369129198Scognet */ 370129198Scognet#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 371129198Scognet 372129198Scognet/* 373129198Scognet * L2 allocation. 374129198Scognet */ 375129198Scognet#define pmap_alloc_l2_dtable() \ 376129198Scognet (void*)uma_zalloc(l2table_zone, M_NOWAIT) 377129198Scognet#define pmap_free_l2_dtable(l2) \ 378129198Scognet uma_zfree(l2table_zone, l2) 379129198Scognet 380129198Scognet/* 381129198Scognet * We try to map the page tables write-through, if possible. However, not 382129198Scognet * all CPUs have a write-through cache mode, so on those we have to sync 383129198Scognet * the cache when we frob page tables. 384129198Scognet * 385129198Scognet * We try to evaluate this at compile time, if possible. However, it's 386129198Scognet * not always possible to do that, hence this run-time var. 387129198Scognet */ 388129198Scognetint pmap_needs_pte_sync; 389129198Scognet 390129198Scognet/* 391129198Scognet * Macro to determine if a mapping might be resident in the 392129198Scognet * instruction cache and/or TLB 393129198Scognet */ 394129198Scognet#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 395129198Scognet 396129198Scognet/* 397129198Scognet * Macro to determine if a mapping might be resident in the 398129198Scognet * data cache and/or TLB 399129198Scognet */ 400129198Scognet#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 401129198Scognet 402129198Scognet/* 403129198Scognet * Data for the pv entry allocation mechanism 404129198Scognet */ 405144760Scognet#define MINPV 2048 406129198Scognet 407129198Scognet#ifndef PMAP_SHPGPERPROC 408129198Scognet#define PMAP_SHPGPERPROC 200 409129198Scognet#endif 410129198Scognet 411135641Scognet#define pmap_is_current(pm) ((pm) == pmap_kernel() || \ 412135641Scognet curproc->p_vmspace->vm_map.pmap == (pm)) 413129198Scognetstatic uma_zone_t pvzone; 414147114Scognetuma_zone_t l2zone; 415129198Scognetstatic uma_zone_t l2table_zone; 416135641Scognetstatic vm_offset_t pmap_kernel_l2dtable_kva; 417135641Scognetstatic vm_offset_t pmap_kernel_l2ptp_kva; 418135641Scognetstatic vm_paddr_t pmap_kernel_l2ptp_phys; 419129198Scognetstatic struct vm_object pvzone_obj; 420129198Scognetstatic struct vm_object l2zone_obj; 421129198Scognetstatic int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 422129198Scognetint pmap_pagedaemon_waken = 0; 423129198Scognet 424129198Scognet/* 425129198Scognet * This list exists for the benefit of pmap_map_chunk(). It keeps track 426129198Scognet * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 427129198Scognet * find them as necessary. 428129198Scognet * 429129198Scognet * Note that the data on this list MUST remain valid after initarm() returns, 430129198Scognet * as pmap_bootstrap() uses it to contruct L2 table metadata. 431129198Scognet */ 432129198ScognetSLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 433129198Scognet 434129198Scognetstatic void 435129198Scognetpmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 436129198Scognet{ 437129198Scognet int i; 438129198Scognet 439129198Scognet l1->l1_kva = l1pt; 440129198Scognet l1->l1_domain_use_count = 0; 441129198Scognet l1->l1_domain_first = 0; 442129198Scognet 443129198Scognet for (i = 0; i < PMAP_DOMAINS; i++) 444129198Scognet l1->l1_domain_free[i] = i + 1; 445129198Scognet 446129198Scognet /* 447129198Scognet * Copy the kernel's L1 entries to each new L1. 448129198Scognet */ 449129198Scognet if (pmap_initialized) 450129198Scognet memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); 451129198Scognet 452129198Scognet if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0) 453129198Scognet panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 454135641Scognet SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 455129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 456129198Scognet} 457129198Scognet 458129198Scognetstatic vm_offset_t 459129198Scognetkernel_pt_lookup(vm_paddr_t pa) 460129198Scognet{ 461129198Scognet struct pv_addr *pv; 462129198Scognet 463129198Scognet SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 464129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 465129198Scognet if (pv->pv_pa == (pa & ~PAGE_MASK)) { 466129198Scognet return (pv->pv_va | (pa & PAGE_MASK)); 467129198Scognet } 468129198Scognet#else 469129198Scognet if (pv->pv_pa == pa) 470129198Scognet return (pv->pv_va); 471129198Scognet#endif 472129198Scognet } 473129198Scognet return (0); 474129198Scognet} 475129198Scognet 476129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 477129198Scognetvoid 478129198Scognetpmap_pte_init_generic(void) 479129198Scognet{ 480129198Scognet 481129198Scognet pte_l1_s_cache_mode = L1_S_B|L1_S_C; 482129198Scognet pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 483129198Scognet 484129198Scognet pte_l2_l_cache_mode = L2_B|L2_C; 485129198Scognet pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 486129198Scognet 487129198Scognet pte_l2_s_cache_mode = L2_B|L2_C; 488129198Scognet pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 489129198Scognet 490129198Scognet /* 491129198Scognet * If we have a write-through cache, set B and C. If 492129198Scognet * we have a write-back cache, then we assume setting 493129198Scognet * only C will make those pages write-through. 494129198Scognet */ 495129198Scognet if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { 496129198Scognet pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 497129198Scognet pte_l2_l_cache_mode_pt = L2_B|L2_C; 498129198Scognet pte_l2_s_cache_mode_pt = L2_B|L2_C; 499129198Scognet } else { 500129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 501129198Scognet pte_l2_l_cache_mode_pt = L2_C; 502129198Scognet pte_l2_s_cache_mode_pt = L2_C; 503129198Scognet } 504129198Scognet 505129198Scognet pte_l2_s_prot_u = L2_S_PROT_U_generic; 506129198Scognet pte_l2_s_prot_w = L2_S_PROT_W_generic; 507129198Scognet pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 508129198Scognet 509129198Scognet pte_l1_s_proto = L1_S_PROTO_generic; 510129198Scognet pte_l1_c_proto = L1_C_PROTO_generic; 511129198Scognet pte_l2_s_proto = L2_S_PROTO_generic; 512129198Scognet 513129198Scognet pmap_copy_page_func = pmap_copy_page_generic; 514129198Scognet pmap_zero_page_func = pmap_zero_page_generic; 515129198Scognet} 516129198Scognet 517129198Scognet#if defined(CPU_ARM8) 518129198Scognetvoid 519129198Scognetpmap_pte_init_arm8(void) 520129198Scognet{ 521129198Scognet 522129198Scognet /* 523129198Scognet * ARM8 is compatible with generic, but we need to use 524129198Scognet * the page tables uncached. 525129198Scognet */ 526129198Scognet pmap_pte_init_generic(); 527129198Scognet 528129198Scognet pte_l1_s_cache_mode_pt = 0; 529129198Scognet pte_l2_l_cache_mode_pt = 0; 530129198Scognet pte_l2_s_cache_mode_pt = 0; 531129198Scognet} 532129198Scognet#endif /* CPU_ARM8 */ 533129198Scognet 534129198Scognet#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) 535129198Scognetvoid 536129198Scognetpmap_pte_init_arm9(void) 537129198Scognet{ 538129198Scognet 539129198Scognet /* 540129198Scognet * ARM9 is compatible with generic, but we want to use 541129198Scognet * write-through caching for now. 542129198Scognet */ 543129198Scognet pmap_pte_init_generic(); 544129198Scognet 545129198Scognet pte_l1_s_cache_mode = L1_S_C; 546129198Scognet pte_l2_l_cache_mode = L2_C; 547129198Scognet pte_l2_s_cache_mode = L2_C; 548129198Scognet 549129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 550129198Scognet pte_l2_l_cache_mode_pt = L2_C; 551129198Scognet pte_l2_s_cache_mode_pt = L2_C; 552129198Scognet} 553129198Scognet#endif /* CPU_ARM9 */ 554129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 555129198Scognet 556129198Scognet#if defined(CPU_ARM10) 557129198Scognetvoid 558129198Scognetpmap_pte_init_arm10(void) 559129198Scognet{ 560129198Scognet 561129198Scognet /* 562129198Scognet * ARM10 is compatible with generic, but we want to use 563129198Scognet * write-through caching for now. 564129198Scognet */ 565129198Scognet pmap_pte_init_generic(); 566129198Scognet 567129198Scognet pte_l1_s_cache_mode = L1_S_B | L1_S_C; 568129198Scognet pte_l2_l_cache_mode = L2_B | L2_C; 569129198Scognet pte_l2_s_cache_mode = L2_B | L2_C; 570129198Scognet 571129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 572129198Scognet pte_l2_l_cache_mode_pt = L2_C; 573129198Scognet pte_l2_s_cache_mode_pt = L2_C; 574129198Scognet 575129198Scognet} 576129198Scognet#endif /* CPU_ARM10 */ 577129198Scognet 578129198Scognet#if ARM_MMU_SA1 == 1 579129198Scognetvoid 580129198Scognetpmap_pte_init_sa1(void) 581129198Scognet{ 582129198Scognet 583129198Scognet /* 584129198Scognet * The StrongARM SA-1 cache does not have a write-through 585129198Scognet * mode. So, do the generic initialization, then reset 586129198Scognet * the page table cache mode to B=1,C=1, and note that 587129198Scognet * the PTEs need to be sync'd. 588129198Scognet */ 589129198Scognet pmap_pte_init_generic(); 590129198Scognet 591129198Scognet pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 592129198Scognet pte_l2_l_cache_mode_pt = L2_B|L2_C; 593129198Scognet pte_l2_s_cache_mode_pt = L2_B|L2_C; 594129198Scognet 595129198Scognet pmap_needs_pte_sync = 1; 596129198Scognet} 597129198Scognet#endif /* ARM_MMU_SA1 == 1*/ 598129198Scognet 599129198Scognet#if ARM_MMU_XSCALE == 1 600129198Scognet#if (ARM_NMMUS > 1) 601129198Scognetstatic u_int xscale_use_minidata; 602129198Scognet#endif 603129198Scognet 604129198Scognetvoid 605129198Scognetpmap_pte_init_xscale(void) 606129198Scognet{ 607129198Scognet uint32_t auxctl; 608129198Scognet int write_through = 0; 609129198Scognet 610135641Scognet pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; 611129198Scognet pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 612129198Scognet 613129198Scognet pte_l2_l_cache_mode = L2_B|L2_C; 614129198Scognet pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 615129198Scognet 616129198Scognet pte_l2_s_cache_mode = L2_B|L2_C; 617129198Scognet pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 618129198Scognet 619129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 620129198Scognet pte_l2_l_cache_mode_pt = L2_C; 621129198Scognet pte_l2_s_cache_mode_pt = L2_C; 622129198Scognet#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 623129198Scognet /* 624129198Scognet * The XScale core has an enhanced mode where writes that 625129198Scognet * miss the cache cause a cache line to be allocated. This 626129198Scognet * is significantly faster than the traditional, write-through 627129198Scognet * behavior of this case. 628129198Scognet */ 629129198Scognet pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); 630129198Scognet pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); 631129198Scognet pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); 632129198Scognet#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 633129198Scognet#ifdef XSCALE_CACHE_WRITE_THROUGH 634129198Scognet /* 635129198Scognet * Some versions of the XScale core have various bugs in 636129198Scognet * their cache units, the work-around for which is to run 637129198Scognet * the cache in write-through mode. Unfortunately, this 638129198Scognet * has a major (negative) impact on performance. So, we 639129198Scognet * go ahead and run fast-and-loose, in the hopes that we 640129198Scognet * don't line up the planets in a way that will trip the 641129198Scognet * bugs. 642129198Scognet * 643129198Scognet * However, we give you the option to be slow-but-correct. 644129198Scognet */ 645129198Scognet write_through = 1; 646129198Scognet#elif defined(XSCALE_CACHE_WRITE_BACK) 647129198Scognet /* force write back cache mode */ 648129198Scognet write_through = 0; 649129198Scognet#elif defined(CPU_XSCALE_PXA2X0) 650129198Scognet /* 651129198Scognet * Intel PXA2[15]0 processors are known to have a bug in 652129198Scognet * write-back cache on revision 4 and earlier (stepping 653129198Scognet * A[01] and B[012]). Fixed for C0 and later. 654129198Scognet */ 655129198Scognet { 656129198Scognet uint32_t id, type; 657129198Scognet 658129198Scognet id = cpufunc_id(); 659129198Scognet type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 660129198Scognet 661129198Scognet if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 662129198Scognet if ((id & CPU_ID_REVISION_MASK) < 5) { 663129198Scognet /* write through for stepping A0-1 and B0-2 */ 664129198Scognet write_through = 1; 665129198Scognet } 666129198Scognet } 667129198Scognet } 668129198Scognet#endif /* XSCALE_CACHE_WRITE_THROUGH */ 669129198Scognet 670129198Scognet if (write_through) { 671129198Scognet pte_l1_s_cache_mode = L1_S_C; 672129198Scognet pte_l2_l_cache_mode = L2_C; 673129198Scognet pte_l2_s_cache_mode = L2_C; 674129198Scognet } 675129198Scognet 676129198Scognet#if (ARM_NMMUS > 1) 677129198Scognet xscale_use_minidata = 1; 678129198Scognet#endif 679129198Scognet 680129198Scognet pte_l2_s_prot_u = L2_S_PROT_U_xscale; 681129198Scognet pte_l2_s_prot_w = L2_S_PROT_W_xscale; 682129198Scognet pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 683129198Scognet 684129198Scognet pte_l1_s_proto = L1_S_PROTO_xscale; 685129198Scognet pte_l1_c_proto = L1_C_PROTO_xscale; 686129198Scognet pte_l2_s_proto = L2_S_PROTO_xscale; 687129198Scognet 688129198Scognet pmap_copy_page_func = pmap_copy_page_xscale; 689129198Scognet pmap_zero_page_func = pmap_zero_page_xscale; 690129198Scognet 691129198Scognet /* 692129198Scognet * Disable ECC protection of page table access, for now. 693129198Scognet */ 694129198Scognet __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 695129198Scognet auxctl &= ~XSCALE_AUXCTL_P; 696129198Scognet __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 697129198Scognet} 698129198Scognet 699129198Scognet/* 700129198Scognet * xscale_setup_minidata: 701129198Scognet * 702129198Scognet * Set up the mini-data cache clean area. We require the 703129198Scognet * caller to allocate the right amount of physically and 704129198Scognet * virtually contiguous space. 705129198Scognet */ 706129198Scognetextern vm_offset_t xscale_minidata_clean_addr; 707129198Scognetextern vm_size_t xscale_minidata_clean_size; /* already initialized */ 708129198Scognetvoid 709129198Scognetxscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) 710129198Scognet{ 711129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 712129198Scognet pt_entry_t *pte; 713129198Scognet vm_size_t size; 714129198Scognet uint32_t auxctl; 715129198Scognet 716129198Scognet xscale_minidata_clean_addr = va; 717129198Scognet 718129198Scognet /* Round it to page size. */ 719129198Scognet size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 720129198Scognet 721129198Scognet for (; size != 0; 722129198Scognet va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 723129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 724129198Scognet pte = (pt_entry_t *) 725129198Scognet kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 726129198Scognet#else 727129198Scognet pte = (pt_entry_t *) kernel_pt_lookup( 728129198Scognet pde[L1_IDX(va)] & L1_C_ADDR_MASK); 729129198Scognet#endif 730129198Scognet if (pte == NULL) 731129198Scognet panic("xscale_setup_minidata: can't find L2 table for " 732129198Scognet "VA 0x%08x", (u_int32_t) va); 733129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 734129198Scognet pte[(va >> PAGE_SHIFT) & 0x3ff] = 735129198Scognet#else 736129198Scognet pte[l2pte_index(va)] = 737129198Scognet#endif 738129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 739129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 740129198Scognet } 741129198Scognet 742129198Scognet /* 743129198Scognet * Configure the mini-data cache for write-back with 744129198Scognet * read/write-allocate. 745129198Scognet * 746129198Scognet * NOTE: In order to reconfigure the mini-data cache, we must 747129198Scognet * make sure it contains no valid data! In order to do that, 748129198Scognet * we must issue a global data cache invalidate command! 749129198Scognet * 750129198Scognet * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 751129198Scognet * THIS IS VERY IMPORTANT! 752129198Scognet */ 753129198Scognet 754129198Scognet /* Invalidate data and mini-data. */ 755129198Scognet __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 756129198Scognet __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 757129198Scognet auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 758129198Scognet __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 759129198Scognet} 760129198Scognet#endif 761129198Scognet 762129198Scognet/* 763129198Scognet * Allocate an L1 translation table for the specified pmap. 764129198Scognet * This is called at pmap creation time. 765129198Scognet */ 766129198Scognetstatic void 767129198Scognetpmap_alloc_l1(pmap_t pm) 768129198Scognet{ 769129198Scognet struct l1_ttable *l1; 770129198Scognet u_int8_t domain; 771129198Scognet 772129198Scognet /* 773129198Scognet * Remove the L1 at the head of the LRU list 774129198Scognet */ 775129198Scognet mtx_lock(&l1_lru_lock); 776129198Scognet l1 = TAILQ_FIRST(&l1_lru_list); 777129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 778129198Scognet 779129198Scognet /* 780129198Scognet * Pick the first available domain number, and update 781129198Scognet * the link to the next number. 782129198Scognet */ 783129198Scognet domain = l1->l1_domain_first; 784129198Scognet l1->l1_domain_first = l1->l1_domain_free[domain]; 785129198Scognet 786129198Scognet /* 787129198Scognet * If there are still free domain numbers in this L1, 788129198Scognet * put it back on the TAIL of the LRU list. 789129198Scognet */ 790129198Scognet if (++l1->l1_domain_use_count < PMAP_DOMAINS) 791129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 792129198Scognet 793129198Scognet mtx_unlock(&l1_lru_lock); 794129198Scognet 795129198Scognet /* 796129198Scognet * Fix up the relevant bits in the pmap structure 797129198Scognet */ 798129198Scognet pm->pm_l1 = l1; 799129198Scognet pm->pm_domain = domain; 800129198Scognet} 801129198Scognet 802129198Scognet/* 803129198Scognet * Free an L1 translation table. 804129198Scognet * This is called at pmap destruction time. 805129198Scognet */ 806129198Scognetstatic void 807129198Scognetpmap_free_l1(pmap_t pm) 808129198Scognet{ 809129198Scognet struct l1_ttable *l1 = pm->pm_l1; 810129198Scognet 811129198Scognet mtx_lock(&l1_lru_lock); 812129198Scognet 813129198Scognet /* 814129198Scognet * If this L1 is currently on the LRU list, remove it. 815129198Scognet */ 816129198Scognet if (l1->l1_domain_use_count < PMAP_DOMAINS) 817129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 818129198Scognet 819129198Scognet /* 820129198Scognet * Free up the domain number which was allocated to the pmap 821129198Scognet */ 822129198Scognet l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; 823129198Scognet l1->l1_domain_first = pm->pm_domain; 824129198Scognet l1->l1_domain_use_count--; 825129198Scognet 826129198Scognet /* 827129198Scognet * The L1 now must have at least 1 free domain, so add 828129198Scognet * it back to the LRU list. If the use count is zero, 829129198Scognet * put it at the head of the list, otherwise it goes 830129198Scognet * to the tail. 831129198Scognet */ 832129198Scognet if (l1->l1_domain_use_count == 0) { 833129198Scognet TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 834129198Scognet } else 835129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 836129198Scognet 837129198Scognet mtx_unlock(&l1_lru_lock); 838129198Scognet} 839129198Scognet 840129198Scognetstatic PMAP_INLINE void 841129198Scognetpmap_use_l1(pmap_t pm) 842129198Scognet{ 843129198Scognet struct l1_ttable *l1; 844129198Scognet 845129198Scognet /* 846129198Scognet * Do nothing if we're in interrupt context. 847129198Scognet * Access to an L1 by the kernel pmap must not affect 848129198Scognet * the LRU list. 849129198Scognet */ 850129198Scognet if (pm == pmap_kernel()) 851129198Scognet return; 852129198Scognet 853129198Scognet l1 = pm->pm_l1; 854129198Scognet 855129198Scognet /* 856129198Scognet * If the L1 is not currently on the LRU list, just return 857129198Scognet */ 858129198Scognet if (l1->l1_domain_use_count == PMAP_DOMAINS) 859129198Scognet return; 860129198Scognet 861129198Scognet mtx_lock(&l1_lru_lock); 862129198Scognet 863129198Scognet /* 864129198Scognet * Check the use count again, now that we've acquired the lock 865129198Scognet */ 866129198Scognet if (l1->l1_domain_use_count == PMAP_DOMAINS) { 867129198Scognet mtx_unlock(&l1_lru_lock); 868129198Scognet return; 869129198Scognet } 870129198Scognet 871129198Scognet /* 872129198Scognet * Move the L1 to the back of the LRU list 873129198Scognet */ 874129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 875129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 876129198Scognet 877129198Scognet mtx_unlock(&l1_lru_lock); 878129198Scognet} 879129198Scognet 880129198Scognet 881129198Scognet/* 882129198Scognet * Returns a pointer to the L2 bucket associated with the specified pmap 883129198Scognet * and VA, or NULL if no L2 bucket exists for the address. 884129198Scognet */ 885129198Scognetstatic PMAP_INLINE struct l2_bucket * 886129198Scognetpmap_get_l2_bucket(pmap_t pm, vm_offset_t va) 887129198Scognet{ 888129198Scognet struct l2_dtable *l2; 889129198Scognet struct l2_bucket *l2b; 890129198Scognet u_short l1idx; 891129198Scognet 892129198Scognet l1idx = L1_IDX(va); 893129198Scognet 894129198Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 895129198Scognet (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 896129198Scognet return (NULL); 897129198Scognet 898129198Scognet return (l2b); 899129198Scognet} 900129198Scognet 901129198Scognet/* 902129198Scognet * Returns a pointer to the L2 bucket associated with the specified pmap 903129198Scognet * and VA. 904129198Scognet * 905129198Scognet * If no L2 bucket exists, perform the necessary allocations to put an L2 906129198Scognet * bucket/page table in place. 907129198Scognet * 908129198Scognet * Note that if a new L2 bucket/page was allocated, the caller *must* 909129198Scognet * increment the bucket occupancy counter appropriately *before* 910129198Scognet * releasing the pmap's lock to ensure no other thread or cpu deallocates 911129198Scognet * the bucket/page in the meantime. 912129198Scognet */ 913129198Scognetstatic struct l2_bucket * 914129198Scognetpmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) 915129198Scognet{ 916129198Scognet struct l2_dtable *l2; 917129198Scognet struct l2_bucket *l2b; 918129198Scognet u_short l1idx; 919129198Scognet 920129198Scognet l1idx = L1_IDX(va); 921129198Scognet 922129198Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 923129198Scognet /* 924129198Scognet * No mapping at this address, as there is 925129198Scognet * no entry in the L1 table. 926129198Scognet * Need to allocate a new l2_dtable. 927129198Scognet */ 928129198Scognet if ((l2 = pmap_alloc_l2_dtable()) == NULL) { 929129198Scognet return (NULL); 930129198Scognet } 931129198Scognet bzero(l2, sizeof(*l2)); 932129198Scognet /* 933129198Scognet * Link it into the parent pmap 934129198Scognet */ 935129198Scognet pm->pm_l2[L2_IDX(l1idx)] = l2; 936129198Scognet } 937129198Scognet 938129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 939129198Scognet 940129198Scognet /* 941129198Scognet * Fetch pointer to the L2 page table associated with the address. 942129198Scognet */ 943129198Scognet if (l2b->l2b_kva == NULL) { 944129198Scognet pt_entry_t *ptep; 945129198Scognet 946129198Scognet /* 947129198Scognet * No L2 page table has been allocated. Chances are, this 948129198Scognet * is because we just allocated the l2_dtable, above. 949129198Scognet */ 950129198Scognet ptep = (void*)uma_zalloc(l2zone, M_NOWAIT); 951129198Scognet l2b->l2b_phys = vtophys(ptep); 952129198Scognet if (ptep == NULL) { 953129198Scognet /* 954129198Scognet * Oops, no more L2 page tables available at this 955129198Scognet * time. We may need to deallocate the l2_dtable 956129198Scognet * if we allocated a new one above. 957129198Scognet */ 958129198Scognet if (l2->l2_occupancy == 0) { 959129198Scognet pm->pm_l2[L2_IDX(l1idx)] = NULL; 960129198Scognet pmap_free_l2_dtable(l2); 961129198Scognet } 962129198Scognet return (NULL); 963129198Scognet } 964129198Scognet 965129198Scognet l2->l2_occupancy++; 966129198Scognet l2b->l2b_kva = ptep; 967129198Scognet l2b->l2b_l1idx = l1idx; 968129198Scognet } 969129198Scognet 970129198Scognet return (l2b); 971129198Scognet} 972129198Scognet 973129198Scognetstatic PMAP_INLINE void 974129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 975129198Scognetpmap_free_l2_ptp(pt_entry_t *l2) 976129198Scognet#else 977129198Scognetpmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) 978129198Scognet#endif 979129198Scognet{ 980129198Scognet#ifdef PMAP_INCLUDE_PTE_SYNC 981129198Scognet /* 982129198Scognet * Note: With a write-back cache, we may need to sync this 983129198Scognet * L2 table before re-using it. 984129198Scognet * This is because it may have belonged to a non-current 985129198Scognet * pmap, in which case the cache syncs would have been 986129198Scognet * skipped when the pages were being unmapped. If the 987129198Scognet * L2 table were then to be immediately re-allocated to 988129198Scognet * the *current* pmap, it may well contain stale mappings 989129198Scognet * which have not yet been cleared by a cache write-back 990129198Scognet * and so would still be visible to the mmu. 991129198Scognet */ 992129198Scognet if (need_sync) 993129198Scognet PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 994129198Scognet#endif 995129198Scognet uma_zfree(l2zone, l2); 996129198Scognet} 997129198Scognet/* 998129198Scognet * One or more mappings in the specified L2 descriptor table have just been 999129198Scognet * invalidated. 1000129198Scognet * 1001129198Scognet * Garbage collect the metadata and descriptor table itself if necessary. 1002129198Scognet * 1003129198Scognet * The pmap lock must be acquired when this is called (not necessary 1004129198Scognet * for the kernel pmap). 1005129198Scognet */ 1006129198Scognetstatic void 1007129198Scognetpmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 1008129198Scognet{ 1009129198Scognet struct l2_dtable *l2; 1010129198Scognet pd_entry_t *pl1pd, l1pd; 1011129198Scognet pt_entry_t *ptep; 1012129198Scognet u_short l1idx; 1013129198Scognet 1014129198Scognet 1015129198Scognet /* 1016129198Scognet * Update the bucket's reference count according to how many 1017129198Scognet * PTEs the caller has just invalidated. 1018129198Scognet */ 1019129198Scognet l2b->l2b_occupancy -= count; 1020129198Scognet 1021129198Scognet /* 1022129198Scognet * Note: 1023129198Scognet * 1024129198Scognet * Level 2 page tables allocated to the kernel pmap are never freed 1025129198Scognet * as that would require checking all Level 1 page tables and 1026129198Scognet * removing any references to the Level 2 page table. See also the 1027129198Scognet * comment elsewhere about never freeing bootstrap L2 descriptors. 1028129198Scognet * 1029129198Scognet * We make do with just invalidating the mapping in the L2 table. 1030129198Scognet * 1031129198Scognet * This isn't really a big deal in practice and, in fact, leads 1032129198Scognet * to a performance win over time as we don't need to continually 1033129198Scognet * alloc/free. 1034129198Scognet */ 1035129198Scognet if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) 1036129198Scognet return; 1037129198Scognet 1038129198Scognet /* 1039129198Scognet * There are no more valid mappings in this level 2 page table. 1040129198Scognet * Go ahead and NULL-out the pointer in the bucket, then 1041129198Scognet * free the page table. 1042129198Scognet */ 1043129198Scognet l1idx = l2b->l2b_l1idx; 1044129198Scognet ptep = l2b->l2b_kva; 1045129198Scognet l2b->l2b_kva = NULL; 1046129198Scognet 1047129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1048129198Scognet 1049129198Scognet /* 1050129198Scognet * If the L1 slot matches the pmap's domain 1051129198Scognet * number, then invalidate it. 1052129198Scognet */ 1053129198Scognet l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 1054129198Scognet if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 1055129198Scognet *pl1pd = 0; 1056129198Scognet PTE_SYNC(pl1pd); 1057129198Scognet } 1058129198Scognet 1059129198Scognet /* 1060129198Scognet * Release the L2 descriptor table back to the pool cache. 1061129198Scognet */ 1062129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 1063129198Scognet pmap_free_l2_ptp(ptep); 1064129198Scognet#else 1065135641Scognet pmap_free_l2_ptp(!pmap_is_current(pm), ptep); 1066129198Scognet#endif 1067129198Scognet 1068129198Scognet /* 1069129198Scognet * Update the reference count in the associated l2_dtable 1070129198Scognet */ 1071129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 1072129198Scognet if (--l2->l2_occupancy > 0) 1073129198Scognet return; 1074129198Scognet 1075129198Scognet /* 1076129198Scognet * There are no more valid mappings in any of the Level 1 1077129198Scognet * slots managed by this l2_dtable. Go ahead and NULL-out 1078129198Scognet * the pointer in the parent pmap and free the l2_dtable. 1079129198Scognet */ 1080129198Scognet pm->pm_l2[L2_IDX(l1idx)] = NULL; 1081129198Scognet pmap_free_l2_dtable(l2); 1082129198Scognet} 1083129198Scognet 1084129198Scognet/* 1085129198Scognet * Pool cache constructors for L2 descriptor tables, metadata and pmap 1086129198Scognet * structures. 1087129198Scognet */ 1088133237Scognetstatic int 1089133237Scognetpmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) 1090129198Scognet{ 1091129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 1092129198Scognet struct l2_bucket *l2b; 1093129198Scognet pt_entry_t *ptep, pte; 1094129198Scognet vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; 1095129198Scognet 1096129198Scognet /* 1097129198Scognet * The mappings for these page tables were initially made using 1098135641Scognet * pmap_kenter() by the pool subsystem. Therefore, the cache- 1099129198Scognet * mode will not be right for page table mappings. To avoid 1100135641Scognet * polluting the pmap_kenter() code with a special case for 1101129198Scognet * page tables, we simply fix up the cache-mode here if it's not 1102129198Scognet * correct. 1103129198Scognet */ 1104147114Scognet#ifdef ARM_USE_SMALL_ALLOC 1105147114Scognet if (flags & UMA_SLAB_KMEM) { 1106147114Scognet#endif 1107147114Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 1108147114Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1109147114Scognet pte = *ptep; 1110129198Scognet 1111147114Scognet if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1112147114Scognet /* 1113147114Scognet * Page tables must have the cache-mode set to 1114147114Scognet * Write-Thru. 1115147114Scognet */ 1116147114Scognet *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1117147114Scognet PTE_SYNC(ptep); 1118147114Scognet cpu_tlb_flushD_SE(va); 1119147114Scognet cpu_cpwait(); 1120147114Scognet } 1121147114Scognet 1122147114Scognet#ifdef ARM_USE_SMALL_ALLOC 1123129198Scognet } 1124129198Scognet#endif 1125147114Scognet#endif 1126129198Scognet memset(mem, 0, L2_TABLE_SIZE_REAL); 1127129198Scognet PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1128133237Scognet return (0); 1129129198Scognet} 1130129198Scognet 1131129198Scognet/* 1132129198Scognet * A bunch of routines to conditionally flush the caches/TLB depending 1133129198Scognet * on whether the specified pmap actually needs to be flushed at any 1134129198Scognet * given time. 1135129198Scognet */ 1136129198Scognetstatic PMAP_INLINE void 1137129198Scognetpmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) 1138129198Scognet{ 1139129198Scognet 1140135641Scognet if (pmap_is_current(pm)) 1141129198Scognet cpu_tlb_flushID_SE(va); 1142129198Scognet} 1143129198Scognet 1144129198Scognetstatic PMAP_INLINE void 1145129198Scognetpmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) 1146129198Scognet{ 1147129198Scognet 1148135641Scognet if (pmap_is_current(pm)) 1149129198Scognet cpu_tlb_flushD_SE(va); 1150129198Scognet} 1151129198Scognet 1152129198Scognetstatic PMAP_INLINE void 1153129198Scognetpmap_tlb_flushID(pmap_t pm) 1154129198Scognet{ 1155129198Scognet 1156135641Scognet if (pmap_is_current(pm)) 1157129198Scognet cpu_tlb_flushID(); 1158129198Scognet} 1159129198Scognetstatic PMAP_INLINE void 1160129198Scognetpmap_tlb_flushD(pmap_t pm) 1161129198Scognet{ 1162129198Scognet 1163135641Scognet if (pmap_is_current(pm)) 1164129198Scognet cpu_tlb_flushD(); 1165129198Scognet} 1166129198Scognet 1167129198Scognetstatic PMAP_INLINE void 1168129198Scognetpmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) 1169129198Scognet{ 1170129198Scognet 1171135641Scognet if (pmap_is_current(pm)) 1172129198Scognet cpu_idcache_wbinv_range(va, len); 1173129198Scognet} 1174129198Scognet 1175129198Scognetstatic PMAP_INLINE void 1176129198Scognetpmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, 1177129198Scognet boolean_t do_inv, boolean_t rd_only) 1178129198Scognet{ 1179129198Scognet 1180135641Scognet if (pmap_is_current(pm)) { 1181129198Scognet if (do_inv) { 1182129198Scognet if (rd_only) 1183129198Scognet cpu_dcache_inv_range(va, len); 1184129198Scognet else 1185129198Scognet cpu_dcache_wbinv_range(va, len); 1186129198Scognet } else 1187129198Scognet if (!rd_only) 1188129198Scognet cpu_dcache_wb_range(va, len); 1189129198Scognet } 1190129198Scognet} 1191129198Scognet 1192129198Scognetstatic PMAP_INLINE void 1193129198Scognetpmap_idcache_wbinv_all(pmap_t pm) 1194129198Scognet{ 1195129198Scognet 1196135641Scognet if (pmap_is_current(pm)) 1197129198Scognet cpu_idcache_wbinv_all(); 1198129198Scognet} 1199129198Scognet 1200129198Scognetstatic PMAP_INLINE void 1201129198Scognetpmap_dcache_wbinv_all(pmap_t pm) 1202129198Scognet{ 1203129198Scognet 1204135641Scognet if (pmap_is_current(pm)) 1205129198Scognet cpu_dcache_wbinv_all(); 1206129198Scognet} 1207129198Scognet 1208129198Scognet/* 1209144760Scognet * this routine defines the region(s) of memory that should 1210144760Scognet * not be tested for the modified bit. 1211144760Scognet */ 1212144760Scognetstatic PMAP_INLINE int 1213144760Scognetpmap_track_modified(vm_offset_t va) 1214144760Scognet{ 1215144760Scognet if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) 1216144760Scognet return 1; 1217144760Scognet else 1218144760Scognet return 0; 1219144760Scognet} 1220144760Scognet/* 1221129198Scognet * PTE_SYNC_CURRENT: 1222129198Scognet * 1223129198Scognet * Make sure the pte is written out to RAM. 1224129198Scognet * We need to do this for one of two cases: 1225129198Scognet * - We're dealing with the kernel pmap 1226129198Scognet * - There is no pmap active in the cache/tlb. 1227129198Scognet * - The specified pmap is 'active' in the cache/tlb. 1228129198Scognet */ 1229129198Scognet#ifdef PMAP_INCLUDE_PTE_SYNC 1230129198Scognet#define PTE_SYNC_CURRENT(pm, ptep) \ 1231129198Scognetdo { \ 1232129198Scognet if (PMAP_NEEDS_PTE_SYNC && \ 1233135641Scognet pmap_is_current(pm)) \ 1234129198Scognet PTE_SYNC(ptep); \ 1235129198Scognet} while (/*CONSTCOND*/0) 1236129198Scognet#else 1237129198Scognet#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 1238129198Scognet#endif 1239129198Scognet 1240129198Scognet/* 1241129198Scognet * Since we have a virtually indexed cache, we may need to inhibit caching if 1242129198Scognet * there is more than one mapping and at least one of them is writable. 1243129198Scognet * Since we purge the cache on every context switch, we only need to check for 1244129198Scognet * other mappings within the same pmap, or kernel_pmap. 1245129198Scognet * This function is also called when a page is unmapped, to possibly reenable 1246129198Scognet * caching on any remaining mappings. 1247129198Scognet * 1248129198Scognet * The code implements the following logic, where: 1249129198Scognet * 1250129198Scognet * KW = # of kernel read/write pages 1251129198Scognet * KR = # of kernel read only pages 1252129198Scognet * UW = # of user read/write pages 1253129198Scognet * UR = # of user read only pages 1254129198Scognet * 1255129198Scognet * KC = kernel mapping is cacheable 1256129198Scognet * UC = user mapping is cacheable 1257129198Scognet * 1258129198Scognet * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 1259129198Scognet * +--------------------------------------------- 1260129198Scognet * UW=0,UR=0 | --- KC=1 KC=1 KC=0 1261129198Scognet * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 1262129198Scognet * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1263129198Scognet * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1264129198Scognet */ 1265129198Scognet 1266129198Scognetstatic const int pmap_vac_flags[4][4] = { 1267129198Scognet {-1, 0, 0, PVF_KNC}, 1268129198Scognet {0, 0, PVF_NC, PVF_NC}, 1269129198Scognet {0, PVF_NC, PVF_NC, PVF_NC}, 1270129198Scognet {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} 1271129198Scognet}; 1272129198Scognet 1273129198Scognetstatic PMAP_INLINE int 1274129198Scognetpmap_get_vac_flags(const struct vm_page *pg) 1275129198Scognet{ 1276129198Scognet int kidx, uidx; 1277129198Scognet 1278129198Scognet kidx = 0; 1279129198Scognet if (pg->md.kro_mappings || pg->md.krw_mappings > 1) 1280129198Scognet kidx |= 1; 1281129198Scognet if (pg->md.krw_mappings) 1282129198Scognet kidx |= 2; 1283129198Scognet 1284129198Scognet uidx = 0; 1285129198Scognet if (pg->md.uro_mappings || pg->md.urw_mappings > 1) 1286129198Scognet uidx |= 1; 1287129198Scognet if (pg->md.urw_mappings) 1288129198Scognet uidx |= 2; 1289129198Scognet 1290129198Scognet return (pmap_vac_flags[uidx][kidx]); 1291129198Scognet} 1292129198Scognet 1293129198Scognetstatic __inline void 1294129198Scognetpmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1295129198Scognet{ 1296129198Scognet int nattr; 1297129198Scognet 1298129198Scognet nattr = pmap_get_vac_flags(pg); 1299129198Scognet 1300129198Scognet if (nattr < 0) { 1301129198Scognet pg->md.pvh_attrs &= ~PVF_NC; 1302129198Scognet return; 1303129198Scognet } 1304129198Scognet 1305129198Scognet if (nattr == 0 && (pg->md.pvh_attrs & PVF_NC) == 0) { 1306129198Scognet return; 1307129198Scognet } 1308129198Scognet 1309129198Scognet if (pm == pmap_kernel()) 1310129198Scognet pmap_vac_me_kpmap(pg, pm, va); 1311129198Scognet else 1312129198Scognet pmap_vac_me_user(pg, pm, va); 1313129198Scognet 1314129198Scognet pg->md.pvh_attrs = (pg->md.pvh_attrs & ~PVF_NC) | nattr; 1315129198Scognet} 1316129198Scognet 1317129198Scognetstatic void 1318129198Scognetpmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1319129198Scognet{ 1320129198Scognet u_int u_cacheable, u_entries; 1321129198Scognet struct pv_entry *pv; 1322129198Scognet pmap_t last_pmap = pm; 1323129198Scognet 1324129198Scognet /* 1325129198Scognet * Pass one, see if there are both kernel and user pmaps for 1326129198Scognet * this page. Calculate whether there are user-writable or 1327129198Scognet * kernel-writable pages. 1328129198Scognet */ 1329129198Scognet u_cacheable = 0; 1330129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1331129198Scognet if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) 1332129198Scognet u_cacheable++; 1333129198Scognet } 1334129198Scognet 1335129198Scognet u_entries = pg->md.urw_mappings + pg->md.uro_mappings; 1336129198Scognet 1337129198Scognet /* 1338129198Scognet * We know we have just been updating a kernel entry, so if 1339129198Scognet * all user pages are already cacheable, then there is nothing 1340129198Scognet * further to do. 1341129198Scognet */ 1342129198Scognet if (pg->md.k_mappings == 0 && u_cacheable == u_entries) 1343129198Scognet return; 1344129198Scognet 1345129198Scognet if (u_entries) { 1346129198Scognet /* 1347129198Scognet * Scan over the list again, for each entry, if it 1348129198Scognet * might not be set correctly, call pmap_vac_me_user 1349129198Scognet * to recalculate the settings. 1350129198Scognet */ 1351129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1352129198Scognet /* 1353129198Scognet * We know kernel mappings will get set 1354129198Scognet * correctly in other calls. We also know 1355129198Scognet * that if the pmap is the same as last_pmap 1356129198Scognet * then we've just handled this entry. 1357129198Scognet */ 1358129198Scognet if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) 1359129198Scognet continue; 1360129198Scognet 1361129198Scognet /* 1362129198Scognet * If there are kernel entries and this page 1363129198Scognet * is writable but non-cacheable, then we can 1364129198Scognet * skip this entry also. 1365129198Scognet */ 1366129198Scognet if (pg->md.k_mappings && 1367129198Scognet (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 1368129198Scognet (PVF_NC | PVF_WRITE)) 1369129198Scognet continue; 1370129198Scognet 1371129198Scognet /* 1372129198Scognet * Similarly if there are no kernel-writable 1373129198Scognet * entries and the page is already 1374129198Scognet * read-only/cacheable. 1375129198Scognet */ 1376129198Scognet if (pg->md.krw_mappings == 0 && 1377129198Scognet (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) 1378129198Scognet continue; 1379129198Scognet 1380129198Scognet /* 1381129198Scognet * For some of the remaining cases, we know 1382129198Scognet * that we must recalculate, but for others we 1383129198Scognet * can't tell if they are correct or not, so 1384129198Scognet * we recalculate anyway. 1385129198Scognet */ 1386129198Scognet pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0); 1387129198Scognet } 1388129198Scognet 1389129198Scognet if (pg->md.k_mappings == 0) 1390129198Scognet return; 1391129198Scognet } 1392129198Scognet 1393129198Scognet pmap_vac_me_user(pg, pm, va); 1394129198Scognet} 1395129198Scognet 1396129198Scognetstatic void 1397129198Scognetpmap_vac_me_user(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1398129198Scognet{ 1399129198Scognet pmap_t kpmap = pmap_kernel(); 1400129198Scognet struct pv_entry *pv, *npv; 1401129198Scognet struct l2_bucket *l2b; 1402129198Scognet pt_entry_t *ptep, pte; 1403129198Scognet u_int entries = 0; 1404129198Scognet u_int writable = 0; 1405129198Scognet u_int cacheable_entries = 0; 1406129198Scognet u_int kern_cacheable = 0; 1407129198Scognet u_int other_writable = 0; 1408129198Scognet 1409129198Scognet /* 1410129198Scognet * Count mappings and writable mappings in this pmap. 1411129198Scognet * Include kernel mappings as part of our own. 1412129198Scognet * Keep a pointer to the first one. 1413129198Scognet */ 1414129198Scognet npv = TAILQ_FIRST(&pg->md.pv_list); 1415129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1416129198Scognet /* Count mappings in the same pmap */ 1417129198Scognet if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { 1418129198Scognet if (entries++ == 0) 1419129198Scognet npv = pv; 1420129198Scognet 1421129198Scognet /* Cacheable mappings */ 1422129198Scognet if ((pv->pv_flags & PVF_NC) == 0) { 1423129198Scognet cacheable_entries++; 1424129198Scognet if (kpmap == pv->pv_pmap) 1425129198Scognet kern_cacheable++; 1426129198Scognet } 1427129198Scognet 1428129198Scognet /* Writable mappings */ 1429129198Scognet if (pv->pv_flags & PVF_WRITE) 1430129198Scognet ++writable; 1431129198Scognet } else 1432129198Scognet if (pv->pv_flags & PVF_WRITE) 1433129198Scognet other_writable = 1; 1434129198Scognet } 1435129198Scognet 1436129198Scognet /* 1437129198Scognet * Enable or disable caching as necessary. 1438129198Scognet * Note: the first entry might be part of the kernel pmap, 1439129198Scognet * so we can't assume this is indicative of the state of the 1440129198Scognet * other (maybe non-kpmap) entries. 1441129198Scognet */ 1442129198Scognet if ((entries > 1 && writable) || 1443129198Scognet (entries > 0 && pm == kpmap && other_writable)) { 1444129198Scognet if (cacheable_entries == 0) 1445129198Scognet return; 1446129198Scognet 1447129198Scognet for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) { 1448129198Scognet if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || 1449129198Scognet (pv->pv_flags & PVF_NC)) 1450129198Scognet continue; 1451129198Scognet 1452129198Scognet pv->pv_flags |= PVF_NC; 1453129198Scognet 1454129198Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1455129198Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1456129198Scognet pte = *ptep & ~L2_S_CACHE_MASK; 1457129198Scognet 1458129198Scognet if ((va != pv->pv_va || pm != pv->pv_pmap) && 1459129198Scognet l2pte_valid(pte)) { 1460129198Scognet if (PV_BEEN_EXECD(pv->pv_flags)) { 1461129198Scognet pmap_idcache_wbinv_range(pv->pv_pmap, 1462129198Scognet pv->pv_va, PAGE_SIZE); 1463129198Scognet pmap_tlb_flushID_SE(pv->pv_pmap, 1464129198Scognet pv->pv_va); 1465129198Scognet } else 1466129198Scognet if (PV_BEEN_REFD(pv->pv_flags)) { 1467129198Scognet pmap_dcache_wb_range(pv->pv_pmap, 1468129198Scognet pv->pv_va, PAGE_SIZE, TRUE, 1469129198Scognet (pv->pv_flags & PVF_WRITE) == 0); 1470129198Scognet pmap_tlb_flushD_SE(pv->pv_pmap, 1471129198Scognet pv->pv_va); 1472129198Scognet } 1473129198Scognet } 1474129198Scognet 1475129198Scognet *ptep = pte; 1476129198Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1477129198Scognet } 1478129198Scognet cpu_cpwait(); 1479129198Scognet } else 1480129198Scognet if (entries > cacheable_entries) { 1481129198Scognet /* 1482129198Scognet * Turn cacheing back on for some pages. If it is a kernel 1483129198Scognet * page, only do so if there are no other writable pages. 1484129198Scognet */ 1485129198Scognet for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) { 1486129198Scognet if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && 1487129198Scognet (kpmap != pv->pv_pmap || other_writable))) 1488129198Scognet continue; 1489129198Scognet 1490129198Scognet pv->pv_flags &= ~PVF_NC; 1491129198Scognet 1492129198Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1493129198Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1494129198Scognet pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1495129198Scognet 1496129198Scognet if (l2pte_valid(pte)) { 1497129198Scognet if (PV_BEEN_EXECD(pv->pv_flags)) { 1498129198Scognet pmap_tlb_flushID_SE(pv->pv_pmap, 1499129198Scognet pv->pv_va); 1500129198Scognet } else 1501129198Scognet if (PV_BEEN_REFD(pv->pv_flags)) { 1502129198Scognet pmap_tlb_flushD_SE(pv->pv_pmap, 1503129198Scognet pv->pv_va); 1504129198Scognet } 1505129198Scognet } 1506129198Scognet 1507129198Scognet *ptep = pte; 1508129198Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1509129198Scognet } 1510129198Scognet } 1511129198Scognet} 1512129198Scognet 1513129198Scognet/* 1514129198Scognet * Modify pte bits for all ptes corresponding to the given physical address. 1515129198Scognet * We use `maskbits' rather than `clearbits' because we're always passing 1516129198Scognet * constants and the latter would require an extra inversion at run-time. 1517129198Scognet */ 1518135641Scognetstatic int 1519129198Scognetpmap_clearbit(struct vm_page *pg, u_int maskbits) 1520129198Scognet{ 1521129198Scognet struct l2_bucket *l2b; 1522129198Scognet struct pv_entry *pv; 1523129198Scognet pt_entry_t *ptep, npte, opte; 1524129198Scognet pmap_t pm; 1525129198Scognet vm_offset_t va; 1526129198Scognet u_int oflags; 1527135641Scognet int count = 0; 1528129198Scognet#if 0 1529129198Scognet PMAP_HEAD_TO_MAP_LOCK(); 1530129198Scognet simple_lock(&pg->mdpage.pvh_slock); 1531129198Scognet#endif 1532129198Scognet 1533129198Scognet /* 1534129198Scognet * Clear saved attributes (modify, reference) 1535129198Scognet */ 1536129198Scognet pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 1537129198Scognet 1538129198Scognet if (TAILQ_EMPTY(&pg->md.pv_list)) { 1539129198Scognet#if 0 1540129198Scognet simple_unlock(&pg->mdpage.pvh_slock); 1541129198Scognet PMAP_HEAD_TO_MAP_UNLOCK(); 1542129198Scognet#endif 1543135641Scognet return (0); 1544129198Scognet } 1545129198Scognet 1546129198Scognet /* 1547129198Scognet * Loop over all current mappings setting/clearing as appropos 1548129198Scognet */ 1549129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1550129198Scognet va = pv->pv_va; 1551129198Scognet pm = pv->pv_pmap; 1552129198Scognet oflags = pv->pv_flags; 1553129198Scognet pv->pv_flags &= ~maskbits; 1554129198Scognet 1555129198Scognet#if 0 1556129198Scognet pmap_acquire_pmap_lock(pm); 1557129198Scognet#endif 1558129198Scognet 1559129198Scognet l2b = pmap_get_l2_bucket(pm, va); 1560129198Scognet 1561129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1562129198Scognet npte = opte = *ptep; 1563129198Scognet 1564144760Scognet if (maskbits & (PVF_WRITE|PVF_MOD) && 1565144760Scognet !pmap_track_modified(pv->pv_va)) { 1566129198Scognet if ((pv->pv_flags & PVF_NC)) { 1567129198Scognet /* 1568129198Scognet * Entry is not cacheable: 1569129198Scognet * 1570129198Scognet * Don't turn caching on again if this is a 1571129198Scognet * modified emulation. This would be 1572129198Scognet * inconsitent with the settings created by 1573129198Scognet * pmap_vac_me_harder(). Otherwise, it's safe 1574129198Scognet * to re-enable cacheing. 1575129198Scognet * 1576129198Scognet * There's no need to call pmap_vac_me_harder() 1577129198Scognet * here: all pages are losing their write 1578129198Scognet * permission. 1579129198Scognet */ 1580129198Scognet if (maskbits & PVF_WRITE) { 1581129198Scognet npte |= pte_l2_s_cache_mode; 1582129198Scognet pv->pv_flags &= ~PVF_NC; 1583129198Scognet } 1584129198Scognet } else 1585129198Scognet if (opte & L2_S_PROT_W) { 1586144760Scognet vm_page_dirty(pg); 1587129198Scognet /* 1588129198Scognet * Entry is writable/cacheable: check if pmap 1589129198Scognet * is current if it is flush it, otherwise it 1590129198Scognet * won't be in the cache 1591129198Scognet */ 1592129198Scognet if (PV_BEEN_EXECD(oflags)) 1593129198Scognet pmap_idcache_wbinv_range(pm, pv->pv_va, 1594129198Scognet PAGE_SIZE); 1595129198Scognet else 1596129198Scognet if (PV_BEEN_REFD(oflags)) 1597129198Scognet pmap_dcache_wb_range(pm, pv->pv_va, 1598129198Scognet PAGE_SIZE, 1599129198Scognet (maskbits & PVF_REF) ? TRUE : FALSE, 1600129198Scognet FALSE); 1601129198Scognet } 1602129198Scognet 1603129198Scognet /* make the pte read only */ 1604129198Scognet npte &= ~L2_S_PROT_W; 1605129198Scognet 1606129198Scognet if (maskbits & PVF_WRITE) { 1607129198Scognet /* 1608129198Scognet * Keep alias accounting up to date 1609129198Scognet */ 1610129198Scognet if (pv->pv_pmap == pmap_kernel()) { 1611129198Scognet if (oflags & PVF_WRITE) { 1612129198Scognet pg->md.krw_mappings--; 1613129198Scognet pg->md.kro_mappings++; 1614129198Scognet } 1615129198Scognet } else 1616129198Scognet if (oflags & PVF_WRITE) { 1617129198Scognet pg->md.urw_mappings--; 1618129198Scognet pg->md.uro_mappings++; 1619129198Scognet } 1620129198Scognet } 1621129198Scognet } 1622129198Scognet 1623144760Scognet if (maskbits & PVF_REF && !pmap_track_modified(pv->pv_va)) { 1624129198Scognet if ((pv->pv_flags & PVF_NC) == 0 && 1625129198Scognet (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { 1626129198Scognet /* 1627129198Scognet * Check npte here; we may have already 1628129198Scognet * done the wbinv above, and the validity 1629129198Scognet * of the PTE is the same for opte and 1630129198Scognet * npte. 1631129198Scognet */ 1632129198Scognet if (npte & L2_S_PROT_W) { 1633129198Scognet if (PV_BEEN_EXECD(oflags)) 1634129198Scognet pmap_idcache_wbinv_range(pm, 1635129198Scognet pv->pv_va, PAGE_SIZE); 1636129198Scognet else 1637129198Scognet if (PV_BEEN_REFD(oflags)) 1638129198Scognet pmap_dcache_wb_range(pm, 1639129198Scognet pv->pv_va, PAGE_SIZE, 1640129198Scognet TRUE, FALSE); 1641129198Scognet } else 1642129198Scognet if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { 1643129198Scognet /* XXXJRT need idcache_inv_range */ 1644129198Scognet if (PV_BEEN_EXECD(oflags)) 1645129198Scognet pmap_idcache_wbinv_range(pm, 1646129198Scognet pv->pv_va, PAGE_SIZE); 1647129198Scognet else 1648129198Scognet if (PV_BEEN_REFD(oflags)) 1649129198Scognet pmap_dcache_wb_range(pm, 1650129198Scognet pv->pv_va, PAGE_SIZE, 1651129198Scognet TRUE, TRUE); 1652129198Scognet } 1653129198Scognet } 1654129198Scognet 1655129198Scognet /* 1656129198Scognet * Make the PTE invalid so that we will take a 1657129198Scognet * page fault the next time the mapping is 1658129198Scognet * referenced. 1659129198Scognet */ 1660129198Scognet npte &= ~L2_TYPE_MASK; 1661129198Scognet npte |= L2_TYPE_INV; 1662129198Scognet } 1663129198Scognet 1664129198Scognet if (npte != opte) { 1665135641Scognet count++; 1666129198Scognet *ptep = npte; 1667129198Scognet PTE_SYNC(ptep); 1668129198Scognet /* Flush the TLB entry if a current pmap. */ 1669129198Scognet if (PV_BEEN_EXECD(oflags)) 1670129198Scognet pmap_tlb_flushID_SE(pm, pv->pv_va); 1671129198Scognet else 1672129198Scognet if (PV_BEEN_REFD(oflags)) 1673129198Scognet pmap_tlb_flushD_SE(pm, pv->pv_va); 1674129198Scognet } 1675129198Scognet 1676129198Scognet#if 0 1677129198Scognet pmap_release_pmap_lock(pm); 1678129198Scognet#endif 1679129198Scognet 1680129198Scognet } 1681129198Scognet 1682129198Scognet#if 0 1683129198Scognet simple_unlock(&pg->mdpage.pvh_slock); 1684129198Scognet PMAP_HEAD_TO_MAP_UNLOCK(); 1685129198Scognet#endif 1686137664Scognet if (maskbits & PVF_WRITE) 1687137664Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1688135641Scognet return (count); 1689129198Scognet} 1690129198Scognet 1691129198Scognet/* 1692129198Scognet * main pv_entry manipulation functions: 1693129198Scognet * pmap_enter_pv: enter a mapping onto a vm_page list 1694129198Scognet * pmap_remove_pv: remove a mappiing from a vm_page list 1695129198Scognet * 1696129198Scognet * NOTE: pmap_enter_pv expects to lock the pvh itself 1697129198Scognet * pmap_remove_pv expects te caller to lock the pvh before calling 1698129198Scognet */ 1699129198Scognet 1700129198Scognet/* 1701129198Scognet * pmap_enter_pv: enter a mapping onto a vm_page lst 1702129198Scognet * 1703129198Scognet * => caller should hold the proper lock on pmap_main_lock 1704129198Scognet * => caller should have pmap locked 1705129198Scognet * => we will gain the lock on the vm_page and allocate the new pv_entry 1706129198Scognet * => caller should adjust ptp's wire_count before calling 1707129198Scognet * => caller should not adjust pmap's wire_count 1708129198Scognet */ 1709129198Scognetstatic void 1710129198Scognetpmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 1711129198Scognet vm_offset_t va, u_int flags) 1712129198Scognet{ 1713129198Scognet 1714129198Scognet 1715129198Scognet pve->pv_pmap = pm; 1716129198Scognet pve->pv_va = va; 1717129198Scognet pve->pv_flags = flags; 1718129198Scognet 1719129198Scognet#if 0 1720129198Scognet mtx_lock(&pg->md.pvh_mtx); 1721129198Scognet#endif 1722129198Scognet TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1723144760Scognet TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); 1724129198Scognet pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); 1725129198Scognet if (pm == pmap_kernel()) { 1726129198Scognet if (flags & PVF_WRITE) 1727129198Scognet pg->md.krw_mappings++; 1728129198Scognet else 1729129198Scognet pg->md.kro_mappings++; 1730129198Scognet } 1731129198Scognet if (flags & PVF_WRITE) 1732129198Scognet pg->md.urw_mappings++; 1733129198Scognet else 1734129198Scognet pg->md.uro_mappings++; 1735135641Scognet pg->md.pv_list_count++; 1736129198Scognet#if 0 1737129198Scognet mtx_unlock(&pg->md.pvh_mtx); 1738129198Scognet#endif 1739129198Scognet if (pve->pv_flags & PVF_WIRED) 1740129198Scognet ++pm->pm_stats.wired_count; 1741144760Scognet vm_page_flag_set(pg, PG_REFERENCED); 1742129198Scognet} 1743129198Scognet 1744129198Scognet/* 1745129198Scognet * 1746129198Scognet * pmap_find_pv: Find a pv entry 1747129198Scognet * 1748129198Scognet * => caller should hold lock on vm_page 1749129198Scognet */ 1750129198Scognetstatic PMAP_INLINE struct pv_entry * 1751129198Scognetpmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1752129198Scognet{ 1753129198Scognet struct pv_entry *pv; 1754129198Scognet 1755129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) 1756129198Scognet if (pm == pv->pv_pmap && va == pv->pv_va) 1757129198Scognet break; 1758129198Scognet return (pv); 1759129198Scognet} 1760129198Scognet 1761129198Scognet/* 1762129198Scognet * vector_page_setprot: 1763129198Scognet * 1764129198Scognet * Manipulate the protection of the vector page. 1765129198Scognet */ 1766129198Scognetvoid 1767129198Scognetvector_page_setprot(int prot) 1768129198Scognet{ 1769129198Scognet struct l2_bucket *l2b; 1770129198Scognet pt_entry_t *ptep; 1771129198Scognet 1772129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 1773129198Scognet 1774129198Scognet ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 1775129198Scognet 1776129198Scognet *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 1777129198Scognet PTE_SYNC(ptep); 1778129198Scognet cpu_tlb_flushD_SE(vector_page); 1779129198Scognet cpu_cpwait(); 1780129198Scognet} 1781129198Scognet 1782129198Scognet/* 1783129198Scognet * pmap_remove_pv: try to remove a mapping from a pv_list 1784129198Scognet * 1785129198Scognet * => caller should hold proper lock on pmap_main_lock 1786129198Scognet * => pmap should be locked 1787129198Scognet * => caller should hold lock on vm_page [so that attrs can be adjusted] 1788129198Scognet * => caller should adjust ptp's wire_count and free PTP if needed 1789129198Scognet * => caller should NOT adjust pmap's wire_count 1790129198Scognet * => we return the removed pve 1791129198Scognet */ 1792135641Scognet 1793135641Scognetstatic void 1794135641Scognetpmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) 1795135641Scognet{ 1796135641Scognet 1797135641Scognet TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); 1798144760Scognet TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); 1799135641Scognet if (pve->pv_flags & PVF_WIRED) 1800135641Scognet --pm->pm_stats.wired_count; 1801135641Scognet pg->md.pv_list_count--; 1802144760Scognet if (pg->md.pvh_attrs & PVF_MOD) 1803144760Scognet vm_page_dirty(pg); 1804135641Scognet if (pm == pmap_kernel()) { 1805135641Scognet if (pve->pv_flags & PVF_WRITE) 1806135641Scognet pg->md.krw_mappings--; 1807135641Scognet else 1808135641Scognet pg->md.kro_mappings--; 1809135641Scognet } else 1810135641Scognet if (pve->pv_flags & PVF_WRITE) 1811135641Scognet pg->md.urw_mappings--; 1812135641Scognet else 1813135641Scognet pg->md.uro_mappings--; 1814144760Scognet if (TAILQ_FIRST(&pg->md.pv_list) == NULL || 1815144760Scognet (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0)) { 1816144760Scognet pg->md.pvh_attrs &= ~PVF_MOD; 1817144760Scognet if (TAILQ_FIRST(&pg->md.pv_list) == NULL) 1818144760Scognet pg->md.pvh_attrs &= ~PVF_REF; 1819137664Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1820146647Scognet } 1821144760Scognet if (TAILQ_FIRST(&pg->md.pv_list)) 1822144760Scognet vm_page_flag_set(pg, PG_REFERENCED); 1823144760Scognet if (pve->pv_flags & PVF_WRITE) 1824144760Scognet pmap_vac_me_harder(pg, pm, 0); 1825135641Scognet} 1826135641Scognet 1827129198Scognetstatic struct pv_entry * 1828129198Scognetpmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1829129198Scognet{ 1830135641Scognet struct pv_entry *pve; 1831129198Scognet 1832135641Scognet pve = TAILQ_FIRST(&pg->md.pv_list); 1833129198Scognet 1834129198Scognet while (pve) { 1835129198Scognet if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ 1836135641Scognet pmap_nuke_pv(pg, pm, pve); 1837129198Scognet break; 1838129198Scognet } 1839129198Scognet pve = TAILQ_NEXT(pve, pv_list); 1840129198Scognet } 1841129198Scognet 1842129198Scognet return(pve); /* return removed pve */ 1843129198Scognet} 1844129198Scognet/* 1845129198Scognet * 1846129198Scognet * pmap_modify_pv: Update pv flags 1847129198Scognet * 1848129198Scognet * => caller should hold lock on vm_page [so that attrs can be adjusted] 1849129198Scognet * => caller should NOT adjust pmap's wire_count 1850129198Scognet * => caller must call pmap_vac_me_harder() if writable status of a page 1851129198Scognet * may have changed. 1852129198Scognet * => we return the old flags 1853129198Scognet * 1854129198Scognet * Modify a physical-virtual mapping in the pv table 1855129198Scognet */ 1856129198Scognetstatic u_int 1857129198Scognetpmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, 1858129198Scognet u_int clr_mask, u_int set_mask) 1859129198Scognet{ 1860129198Scognet struct pv_entry *npv; 1861129198Scognet u_int flags, oflags; 1862129198Scognet 1863129198Scognet if ((npv = pmap_find_pv(pg, pm, va)) == NULL) 1864129198Scognet return (0); 1865129198Scognet 1866129198Scognet /* 1867129198Scognet * There is at least one VA mapping this page. 1868129198Scognet */ 1869129198Scognet 1870129198Scognet if (clr_mask & (PVF_REF | PVF_MOD)) 1871129198Scognet pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1872129198Scognet 1873129198Scognet oflags = npv->pv_flags; 1874129198Scognet npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1875129198Scognet 1876129198Scognet if ((flags ^ oflags) & PVF_WIRED) { 1877129198Scognet if (flags & PVF_WIRED) 1878129198Scognet ++pm->pm_stats.wired_count; 1879129198Scognet else 1880129198Scognet --pm->pm_stats.wired_count; 1881129198Scognet } 1882129198Scognet 1883129198Scognet if ((flags ^ oflags) & PVF_WRITE) { 1884129198Scognet if (pm == pmap_kernel()) { 1885129198Scognet if (flags & PVF_WRITE) { 1886129198Scognet pg->md.krw_mappings++; 1887129198Scognet pg->md.kro_mappings--; 1888129198Scognet } else { 1889129198Scognet pg->md.kro_mappings++; 1890129198Scognet pg->md.krw_mappings--; 1891129198Scognet } 1892129198Scognet } else 1893129198Scognet if (flags & PVF_WRITE) { 1894129198Scognet pg->md.urw_mappings++; 1895129198Scognet pg->md.uro_mappings--; 1896129198Scognet } else { 1897129198Scognet pg->md.uro_mappings++; 1898129198Scognet pg->md.urw_mappings--; 1899129198Scognet } 1900144760Scognet if (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0) { 1901144760Scognet pg->md.pvh_attrs &= ~PVF_MOD; 1902144760Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1903144760Scognet } 1904144760Scognet pmap_vac_me_harder(pg, pm, 0); 1905129198Scognet } 1906129198Scognet 1907129198Scognet return (oflags); 1908129198Scognet} 1909129198Scognet 1910129198Scognet/* Function to set the debug level of the pmap code */ 1911129198Scognet#ifdef PMAP_DEBUG 1912129198Scognetvoid 1913129198Scognetpmap_debug(int level) 1914129198Scognet{ 1915129198Scognet pmap_debug_level = level; 1916129198Scognet dprintf("pmap_debug: level=%d\n", pmap_debug_level); 1917129198Scognet} 1918129198Scognet#endif /* PMAP_DEBUG */ 1919129198Scognet 1920129198Scognetvoid 1921129198Scognetpmap_pinit0(struct pmap *pmap) 1922129198Scognet{ 1923129198Scognet PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); 1924129198Scognet 1925129198Scognet dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n", 1926129198Scognet (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir); 1927135641Scognet bcopy(kernel_pmap, pmap, sizeof(*pmap)); 1928129198Scognet} 1929129198Scognet 1930147217Salc/* 1931147217Salc * Initialize a vm_page's machine-dependent fields. 1932147217Salc */ 1933147217Salcvoid 1934147217Salcpmap_page_init(vm_page_t m) 1935147217Salc{ 1936129198Scognet 1937147217Salc TAILQ_INIT(&m->md.pv_list); 1938147217Salc m->md.pv_list_count = 0; 1939147217Salc} 1940147217Salc 1941129198Scognet/* 1942129198Scognet * Initialize the pmap module. 1943129198Scognet * Called by vm_init, to initialize any structures that the pmap 1944129198Scognet * system needs to map virtual memory. 1945129198Scognet */ 1946129198Scognetvoid 1947129198Scognetpmap_init(void) 1948129198Scognet{ 1949129198Scognet 1950129198Scognet PDEBUG(1, printf("pmap_init: phys_start = %08x\n")); 1951147114Scognet 1952129198Scognet /* 1953129198Scognet * init the pv free list 1954129198Scognet */ 1955129198Scognet pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 1956129198Scognet NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1957129198Scognet uma_prealloc(pvzone, MINPV); 1958129198Scognet /* 1959129198Scognet * Now it is safe to enable pv_table recording. 1960129198Scognet */ 1961129198Scognet pmap_initialized = TRUE; 1962129198Scognet PDEBUG(1, printf("pmap_init: done!\n")); 1963147114Scognet 1964129198Scognet} 1965129198Scognet 1966129198Scognetint 1967129198Scognetpmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) 1968129198Scognet{ 1969129198Scognet struct l2_dtable *l2; 1970129198Scognet struct l2_bucket *l2b; 1971129198Scognet pd_entry_t *pl1pd, l1pd; 1972129198Scognet pt_entry_t *ptep, pte; 1973129198Scognet vm_paddr_t pa; 1974129198Scognet u_int l1idx; 1975129198Scognet int rv = 0; 1976129198Scognet 1977129198Scognet#if 0 1978129198Scognet PMAP_MAP_TO_HEAD_LOCK(); 1979129198Scognet pmap_acquire_pmap_lock(pm); 1980129198Scognet#endif 1981129198Scognet l1idx = L1_IDX(va); 1982129198Scognet 1983129198Scognet /* 1984129198Scognet * If there is no l2_dtable for this address, then the process 1985129198Scognet * has no business accessing it. 1986129198Scognet * 1987129198Scognet * Note: This will catch userland processes trying to access 1988129198Scognet * kernel addresses. 1989129198Scognet */ 1990129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 1991129198Scognet if (l2 == NULL) 1992129198Scognet goto out; 1993129198Scognet 1994129198Scognet /* 1995129198Scognet * Likewise if there is no L2 descriptor table 1996129198Scognet */ 1997129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1998129198Scognet if (l2b->l2b_kva == NULL) 1999129198Scognet goto out; 2000129198Scognet 2001129198Scognet /* 2002129198Scognet * Check the PTE itself. 2003129198Scognet */ 2004129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2005129198Scognet pte = *ptep; 2006129198Scognet if (pte == 0) 2007129198Scognet goto out; 2008129198Scognet 2009129198Scognet /* 2010129198Scognet * Catch a userland access to the vector page mapped at 0x0 2011129198Scognet */ 2012129198Scognet if (user && (pte & L2_S_PROT_U) == 0) 2013129198Scognet goto out; 2014129198Scognet 2015129198Scognet pa = l2pte_pa(pte); 2016129198Scognet 2017129198Scognet if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { 2018129198Scognet /* 2019129198Scognet * This looks like a good candidate for "page modified" 2020129198Scognet * emulation... 2021129198Scognet */ 2022129198Scognet struct pv_entry *pv; 2023129198Scognet struct vm_page *pg; 2024129198Scognet 2025129198Scognet /* Extract the physical address of the page */ 2026129198Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 2027129198Scognet goto out; 2028129198Scognet } 2029129198Scognet /* Get the current flags for this page. */ 2030129198Scognet 2031129198Scognet pv = pmap_find_pv(pg, pm, va); 2032129198Scognet if (pv == NULL) { 2033129198Scognet goto out; 2034129198Scognet } 2035129198Scognet 2036129198Scognet /* 2037129198Scognet * Do the flags say this page is writable? If not then it 2038129198Scognet * is a genuine write fault. If yes then the write fault is 2039129198Scognet * our fault as we did not reflect the write access in the 2040129198Scognet * PTE. Now we know a write has occurred we can correct this 2041129198Scognet * and also set the modified bit 2042129198Scognet */ 2043129198Scognet if ((pv->pv_flags & PVF_WRITE) == 0) { 2044129198Scognet goto out; 2045129198Scognet } 2046129198Scognet 2047144760Scognet if (pmap_track_modified(pv->pv_va)) { 2048144760Scognet pg->md.pvh_attrs |= PVF_REF | PVF_MOD; 2049144760Scognet vm_page_dirty(pg); 2050144760Scognet } 2051129198Scognet pv->pv_flags |= PVF_REF | PVF_MOD; 2052129198Scognet 2053129198Scognet /* 2054129198Scognet * Re-enable write permissions for the page. No need to call 2055129198Scognet * pmap_vac_me_harder(), since this is just a 2056129198Scognet * modified-emulation fault, and the PVF_WRITE bit isn't 2057129198Scognet * changing. We've already set the cacheable bits based on 2058129198Scognet * the assumption that we can write to this page. 2059129198Scognet */ 2060147114Scognet *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; 2061129198Scognet PTE_SYNC(ptep); 2062129198Scognet rv = 1; 2063129198Scognet } else 2064129198Scognet if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 2065129198Scognet /* 2066129198Scognet * This looks like a good candidate for "page referenced" 2067129198Scognet * emulation. 2068129198Scognet */ 2069129198Scognet struct pv_entry *pv; 2070129198Scognet struct vm_page *pg; 2071129198Scognet 2072129198Scognet /* Extract the physical address of the page */ 2073144760Scognet vm_page_lock_queues(); 2074144760Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 2075144760Scognet vm_page_unlock_queues(); 2076129198Scognet goto out; 2077144760Scognet } 2078129198Scognet /* Get the current flags for this page. */ 2079129198Scognet 2080129198Scognet pv = pmap_find_pv(pg, pm, va); 2081129198Scognet if (pv == NULL) { 2082144760Scognet vm_page_unlock_queues(); 2083129198Scognet goto out; 2084129198Scognet } 2085129198Scognet 2086129198Scognet pg->md.pvh_attrs |= PVF_REF; 2087129198Scognet pv->pv_flags |= PVF_REF; 2088129198Scognet 2089129198Scognet 2090129198Scognet *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; 2091129198Scognet PTE_SYNC(ptep); 2092129198Scognet rv = 1; 2093144760Scognet vm_page_unlock_queues(); 2094129198Scognet } 2095129198Scognet 2096129198Scognet /* 2097129198Scognet * We know there is a valid mapping here, so simply 2098129198Scognet * fix up the L1 if necessary. 2099129198Scognet */ 2100129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 2101129198Scognet l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 2102129198Scognet if (*pl1pd != l1pd) { 2103129198Scognet *pl1pd = l1pd; 2104129198Scognet PTE_SYNC(pl1pd); 2105129198Scognet rv = 1; 2106129198Scognet } 2107129198Scognet 2108129198Scognet#ifdef CPU_SA110 2109129198Scognet /* 2110129198Scognet * There are bugs in the rev K SA110. This is a check for one 2111129198Scognet * of them. 2112129198Scognet */ 2113129198Scognet if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && 2114129198Scognet curcpu()->ci_arm_cpurev < 3) { 2115129198Scognet /* Always current pmap */ 2116129198Scognet if (l2pte_valid(pte)) { 2117129198Scognet extern int kernel_debug; 2118129198Scognet if (kernel_debug & 1) { 2119129198Scognet struct proc *p = curlwp->l_proc; 2120129198Scognet printf("prefetch_abort: page is already " 2121129198Scognet "mapped - pte=%p *pte=%08x\n", ptep, pte); 2122129198Scognet printf("prefetch_abort: pc=%08lx proc=%p " 2123129198Scognet "process=%s\n", va, p, p->p_comm); 2124129198Scognet printf("prefetch_abort: far=%08x fs=%x\n", 2125129198Scognet cpu_faultaddress(), cpu_faultstatus()); 2126129198Scognet } 2127129198Scognet#ifdef DDB 2128129198Scognet if (kernel_debug & 2) 2129129198Scognet Debugger(); 2130129198Scognet#endif 2131129198Scognet rv = 1; 2132129198Scognet } 2133129198Scognet } 2134129198Scognet#endif /* CPU_SA110 */ 2135129198Scognet 2136129198Scognet#ifdef DEBUG 2137129198Scognet /* 2138129198Scognet * If 'rv == 0' at this point, it generally indicates that there is a 2139129198Scognet * stale TLB entry for the faulting address. This happens when two or 2140129198Scognet * more processes are sharing an L1. Since we don't flush the TLB on 2141129198Scognet * a context switch between such processes, we can take domain faults 2142129198Scognet * for mappings which exist at the same VA in both processes. EVEN IF 2143129198Scognet * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 2144129198Scognet * example. 2145129198Scognet * 2146129198Scognet * This is extremely likely to happen if pmap_enter() updated the L1 2147129198Scognet * entry for a recently entered mapping. In this case, the TLB is 2148129198Scognet * flushed for the new mapping, but there may still be TLB entries for 2149129198Scognet * other mappings belonging to other processes in the 1MB range 2150129198Scognet * covered by the L1 entry. 2151129198Scognet * 2152129198Scognet * Since 'rv == 0', we know that the L1 already contains the correct 2153129198Scognet * value, so the fault must be due to a stale TLB entry. 2154129198Scognet * 2155129198Scognet * Since we always need to flush the TLB anyway in the case where we 2156129198Scognet * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 2157129198Scognet * stale TLB entries dynamically. 2158129198Scognet * 2159129198Scognet * However, the above condition can ONLY happen if the current L1 is 2160129198Scognet * being shared. If it happens when the L1 is unshared, it indicates 2161129198Scognet * that other parts of the pmap are not doing their job WRT managing 2162129198Scognet * the TLB. 2163129198Scognet */ 2164129198Scognet if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 2165129198Scognet extern int last_fault_code; 2166129198Scognet printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 2167129198Scognet pm, va, ftype); 2168129198Scognet printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 2169129198Scognet l2, l2b, ptep, pl1pd); 2170129198Scognet printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 2171129198Scognet pte, l1pd, last_fault_code); 2172129198Scognet#ifdef DDB 2173129198Scognet Debugger(); 2174129198Scognet#endif 2175129198Scognet } 2176129198Scognet#endif 2177129198Scognet 2178129198Scognet cpu_tlb_flushID_SE(va); 2179129198Scognet cpu_cpwait(); 2180129198Scognet 2181129198Scognet rv = 1; 2182129198Scognet 2183129198Scognetout: 2184129198Scognet#if 0 2185129198Scognet pmap_release_pmap_lock(pm); 2186129198Scognet PMAP_MAP_TO_HEAD_UNLOCK(); 2187129198Scognet#endif 2188129198Scognet return (rv); 2189129198Scognet} 2190129198Scognet 2191129198Scognet/* 2192129198Scognet * Initialize the address space (zone) for the pv_entries. Set a 2193129198Scognet * high water mark so that the system can recover from excessive 2194129198Scognet * numbers of pv entries. 2195129198Scognet */ 2196129198Scognetvoid 2197129198Scognetpmap_init2() 2198129198Scognet{ 2199129198Scognet int shpgperproc = PMAP_SHPGPERPROC; 2200129198Scognet struct l2_bucket *l2b; 2201129198Scognet struct l1_ttable *l1; 2202129198Scognet pd_entry_t *pl1pt; 2203129198Scognet pt_entry_t *ptep, pte; 2204129198Scognet vm_offset_t va, eva; 2205129198Scognet u_int loop, needed; 2206129198Scognet 2207129198Scognet TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 2208129198Scognet 2209129198Scognet pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 2210129198Scognet pv_entry_high_water = 9 * (pv_entry_max / 10); 2211129198Scognet l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, 2212129198Scognet NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 2213135641Scognet uma_prealloc(l2zone, 4096); 2214137663Scognet l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), 2215137663Scognet NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 2216137663Scognet UMA_ZONE_VM | UMA_ZONE_NOFREE); 2217137663Scognet uma_prealloc(l2table_zone, 1024); 2218137663Scognet 2219129198Scognet uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 2220129198Scognet uma_zone_set_obj(l2zone, &l2zone_obj, pv_entry_max); 2221129198Scognet 2222129198Scognet needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 2223129198Scognet needed -= 1; 2224129198Scognet l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); 2225129198Scognet 2226129198Scognet for (loop = 0; loop < needed; loop++, l1++) { 2227129198Scognet /* Allocate a L1 page table */ 2228132503Scognet va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 2229132503Scognet 0xffffffff, L1_TABLE_SIZE, 0); 2230129198Scognet 2231129198Scognet if (va == 0) 2232129198Scognet panic("Cannot allocate L1 KVM"); 2233129198Scognet 2234129198Scognet eva = va + L1_TABLE_SIZE; 2235129198Scognet pl1pt = (pd_entry_t *)va; 2236129198Scognet 2237135641Scognet while (va < eva) { 2238129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2239129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2240129198Scognet pte = *ptep; 2241129198Scognet pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 2242129198Scognet *ptep = pte; 2243129198Scognet PTE_SYNC(ptep); 2244129198Scognet cpu_tlb_flushD_SE(va); 2245129198Scognet 2246129198Scognet va += PAGE_SIZE; 2247129198Scognet } 2248129198Scognet pmap_init_l1(l1, pl1pt); 2249129198Scognet } 2250129198Scognet 2251129198Scognet 2252129198Scognet#ifdef DEBUG 2253129198Scognet printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 2254129198Scognet needed); 2255129198Scognet#endif 2256129198Scognet} 2257129198Scognet 2258129198Scognet/* 2259129198Scognet * This is used to stuff certain critical values into the PCB where they 2260129198Scognet * can be accessed quickly from cpu_switch() et al. 2261129198Scognet */ 2262129198Scognetvoid 2263129198Scognetpmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) 2264129198Scognet{ 2265129198Scognet struct l2_bucket *l2b; 2266129198Scognet 2267129198Scognet pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; 2268129198Scognet pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 2269129198Scognet (DOMAIN_CLIENT << (pm->pm_domain * 2)); 2270129198Scognet 2271129198Scognet if (vector_page < KERNBASE) { 2272129198Scognet pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 2273129198Scognet l2b = pmap_get_l2_bucket(pm, vector_page); 2274129198Scognet pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | 2275145071Scognet L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); 2276129198Scognet } else 2277129198Scognet pcb->pcb_pl1vec = NULL; 2278129198Scognet} 2279129198Scognet 2280129198Scognetvoid 2281129198Scognetpmap_activate(struct thread *td) 2282129198Scognet{ 2283129198Scognet pmap_t pm; 2284129198Scognet struct pcb *pcb; 2285129198Scognet int s; 2286129198Scognet 2287135641Scognet pm = vmspace_pmap(td->td_proc->p_vmspace); 2288129198Scognet pcb = td->td_pcb; 2289129198Scognet 2290129198Scognet critical_enter(); 2291129198Scognet pmap_set_pcb_pagedir(pm, pcb); 2292129198Scognet 2293129198Scognet if (td == curthread) { 2294129198Scognet u_int cur_dacr, cur_ttb; 2295129198Scognet 2296129198Scognet __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); 2297129198Scognet __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); 2298129198Scognet 2299129198Scognet cur_ttb &= ~(L1_TABLE_SIZE - 1); 2300129198Scognet 2301129198Scognet if (cur_ttb == (u_int)pcb->pcb_pagedir && 2302129198Scognet cur_dacr == pcb->pcb_dacr) { 2303129198Scognet /* 2304129198Scognet * No need to switch address spaces. 2305129198Scognet */ 2306129198Scognet critical_exit(); 2307129198Scognet return; 2308129198Scognet } 2309129198Scognet 2310129198Scognet 2311129198Scognet /* 2312129198Scognet * We MUST, I repeat, MUST fix up the L1 entry corresponding 2313129198Scognet * to 'vector_page' in the incoming L1 table before switching 2314129198Scognet * to it otherwise subsequent interrupts/exceptions (including 2315129198Scognet * domain faults!) will jump into hyperspace. 2316129198Scognet */ 2317129198Scognet if (pcb->pcb_pl1vec) { 2318129198Scognet 2319129198Scognet *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2320129198Scognet /* 2321129198Scognet * Don't need to PTE_SYNC() at this point since 2322129198Scognet * cpu_setttb() is about to flush both the cache 2323129198Scognet * and the TLB. 2324129198Scognet */ 2325129198Scognet } 2326129198Scognet 2327129198Scognet cpu_domains(pcb->pcb_dacr); 2328129198Scognet cpu_setttb(pcb->pcb_pagedir); 2329129198Scognet 2330129198Scognet splx(s); 2331129198Scognet } 2332129198Scognet critical_exit(); 2333129198Scognet} 2334129198Scognet 2335129198Scognetstatic int 2336129198Scognetpmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) 2337129198Scognet{ 2338129198Scognet pd_entry_t *pdep, pde; 2339129198Scognet pt_entry_t *ptep, pte; 2340129198Scognet vm_offset_t pa; 2341129198Scognet int rv = 0; 2342129198Scognet 2343129198Scognet /* 2344129198Scognet * Make sure the descriptor itself has the correct cache mode 2345129198Scognet */ 2346129198Scognet pdep = &kl1[L1_IDX(va)]; 2347129198Scognet pde = *pdep; 2348129198Scognet 2349129198Scognet if (l1pte_section_p(pde)) { 2350129198Scognet if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 2351129198Scognet *pdep = (pde & ~L1_S_CACHE_MASK) | 2352129198Scognet pte_l1_s_cache_mode_pt; 2353129198Scognet PTE_SYNC(pdep); 2354129198Scognet cpu_dcache_wbinv_range((vm_offset_t)pdep, 2355129198Scognet sizeof(*pdep)); 2356129198Scognet rv = 1; 2357129198Scognet } 2358129198Scognet } else { 2359129198Scognet pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2360129198Scognet ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2361129198Scognet if (ptep == NULL) 2362129198Scognet panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 2363129198Scognet 2364129198Scognet ptep = &ptep[l2pte_index(va)]; 2365129198Scognet pte = *ptep; 2366129198Scognet if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 2367129198Scognet *ptep = (pte & ~L2_S_CACHE_MASK) | 2368129198Scognet pte_l2_s_cache_mode_pt; 2369129198Scognet PTE_SYNC(ptep); 2370129198Scognet cpu_dcache_wbinv_range((vm_offset_t)ptep, 2371129198Scognet sizeof(*ptep)); 2372129198Scognet rv = 1; 2373129198Scognet } 2374129198Scognet } 2375129198Scognet 2376129198Scognet return (rv); 2377129198Scognet} 2378129198Scognet 2379129198Scognetstatic void 2380129198Scognetpmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, 2381129198Scognet pt_entry_t **ptep) 2382129198Scognet{ 2383129198Scognet vm_offset_t va = *availp; 2384129198Scognet struct l2_bucket *l2b; 2385129198Scognet 2386129198Scognet if (ptep) { 2387129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2388129198Scognet if (l2b == NULL) 2389129198Scognet panic("pmap_alloc_specials: no l2b for 0x%x", va); 2390129198Scognet 2391129198Scognet *ptep = &l2b->l2b_kva[l2pte_index(va)]; 2392129198Scognet } 2393129198Scognet 2394129198Scognet *vap = va; 2395129198Scognet *availp = va + (PAGE_SIZE * pages); 2396129198Scognet} 2397129198Scognet 2398129198Scognet/* 2399129198Scognet * Bootstrap the system enough to run with virtual memory. 2400129198Scognet * 2401129198Scognet * On the arm this is called after mapping has already been enabled 2402129198Scognet * and just syncs the pmap module with what has already been done. 2403129198Scognet * [We can't call it easily with mapping off since the kernel is not 2404129198Scognet * mapped with PA == VA, hence we would have to relocate every address 2405129198Scognet * from the linked base (virtual) address "KERNBASE" to the actual 2406129198Scognet * (physical) address starting relative to 0] 2407129198Scognet */ 2408129198Scognet#define PMAP_STATIC_L2_SIZE 16 2409147114Scognet#ifdef ARM_USE_SMALL_ALLOC 2410147114Scognetextern struct mtx smallalloc_mtx; 2411147114Scognetextern vm_offset_t alloc_curaddr; 2412147114Scognet#endif 2413147114Scognet 2414129198Scognetvoid 2415129198Scognetpmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt) 2416129198Scognet{ 2417129198Scognet static struct l1_ttable static_l1; 2418129198Scognet static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 2419129198Scognet struct l1_ttable *l1 = &static_l1; 2420129198Scognet struct l2_dtable *l2; 2421129198Scognet struct l2_bucket *l2b; 2422129198Scognet pd_entry_t pde; 2423129198Scognet pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; 2424129198Scognet pt_entry_t *ptep; 2425129198Scognet vm_paddr_t pa; 2426129198Scognet vm_offset_t va; 2427135641Scognet vm_size_t size; 2428129198Scognet int l1idx, l2idx, l2next = 0; 2429129198Scognet 2430129198Scognet PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n", 2431129198Scognet firstaddr, loadaddr)); 2432129198Scognet 2433129198Scognet virtual_avail = firstaddr; 2434129198Scognet kernel_pmap = &kernel_pmap_store; 2435129198Scognet kernel_pmap->pm_l1 = l1; 2436143192Scognet 2437143192Scognet /* 2438129198Scognet * Scan the L1 translation table created by initarm() and create 2439129198Scognet * the required metadata for all valid mappings found in it. 2440129198Scognet */ 2441129198Scognet for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 2442129198Scognet pde = kernel_l1pt[l1idx]; 2443129198Scognet 2444129198Scognet /* 2445129198Scognet * We're only interested in Coarse mappings. 2446129198Scognet * pmap_extract() can deal with section mappings without 2447129198Scognet * recourse to checking L2 metadata. 2448129198Scognet */ 2449129198Scognet if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 2450129198Scognet continue; 2451129198Scognet 2452129198Scognet /* 2453129198Scognet * Lookup the KVA of this L2 descriptor table 2454129198Scognet */ 2455129198Scognet pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2456129198Scognet ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2457129198Scognet 2458129198Scognet if (ptep == NULL) { 2459129198Scognet panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 2460129198Scognet (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); 2461129198Scognet } 2462129198Scognet 2463129198Scognet /* 2464129198Scognet * Fetch the associated L2 metadata structure. 2465129198Scognet * Allocate a new one if necessary. 2466129198Scognet */ 2467129198Scognet if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { 2468129198Scognet if (l2next == PMAP_STATIC_L2_SIZE) 2469129198Scognet panic("pmap_bootstrap: out of static L2s"); 2470129198Scognet kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = 2471129198Scognet &static_l2[l2next++]; 2472129198Scognet } 2473129198Scognet 2474129198Scognet /* 2475129198Scognet * One more L1 slot tracked... 2476129198Scognet */ 2477129198Scognet l2->l2_occupancy++; 2478129198Scognet 2479129198Scognet /* 2480129198Scognet * Fill in the details of the L2 descriptor in the 2481129198Scognet * appropriate bucket. 2482129198Scognet */ 2483129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2484129198Scognet l2b->l2b_kva = ptep; 2485129198Scognet l2b->l2b_phys = pa; 2486129198Scognet l2b->l2b_l1idx = l1idx; 2487129198Scognet 2488129198Scognet /* 2489129198Scognet * Establish an initial occupancy count for this descriptor 2490129198Scognet */ 2491129198Scognet for (l2idx = 0; 2492129198Scognet l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 2493129198Scognet l2idx++) { 2494129198Scognet if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 2495129198Scognet l2b->l2b_occupancy++; 2496129198Scognet } 2497129198Scognet } 2498129198Scognet 2499129198Scognet /* 2500129198Scognet * Make sure the descriptor itself has the correct cache mode. 2501129198Scognet * If not, fix it, but whine about the problem. Port-meisters 2502129198Scognet * should consider this a clue to fix up their initarm() 2503129198Scognet * function. :) 2504129198Scognet */ 2505129198Scognet if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { 2506129198Scognet printf("pmap_bootstrap: WARNING! wrong cache mode for " 2507129198Scognet "L2 pte @ %p\n", ptep); 2508129198Scognet } 2509129198Scognet } 2510129198Scognet 2511129198Scognet 2512129198Scognet /* 2513129198Scognet * Ensure the primary (kernel) L1 has the correct cache mode for 2514129198Scognet * a page table. Bitch if it is not correctly set. 2515129198Scognet */ 2516129198Scognet for (va = (vm_offset_t)kernel_l1pt; 2517129198Scognet va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 2518129198Scognet if (pmap_set_pt_cache_mode(kernel_l1pt, va)) 2519129198Scognet printf("pmap_bootstrap: WARNING! wrong cache mode for " 2520129198Scognet "primary L1 @ 0x%x\n", va); 2521129198Scognet } 2522129198Scognet 2523129198Scognet cpu_dcache_wbinv_all(); 2524129198Scognet cpu_tlb_flushID(); 2525129198Scognet cpu_cpwait(); 2526129198Scognet 2527129198Scognet kernel_pmap->pm_active = -1; 2528129198Scognet kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; 2529129198Scognet LIST_INIT(&allpmaps); 2530144760Scognet TAILQ_INIT(&kernel_pmap->pm_pvlist); 2531129198Scognet LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 2532129198Scognet 2533129198Scognet /* 2534129198Scognet * Reserve some special page table entries/VA space for temporary 2535129198Scognet * mapping of pages. 2536129198Scognet */ 2537129198Scognet#define SYSMAP(c, p, v, n) \ 2538129198Scognet v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 2539129198Scognet 2540129198Scognet pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); 2541129198Scognet pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); 2542129198Scognet pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); 2543129198Scognet pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); 2544135641Scognet size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; 2545135641Scognet pmap_alloc_specials(&virtual_avail, 2546135641Scognet round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 2547135641Scognet &pmap_kernel_l2ptp_kva, NULL); 2548135641Scognet 2549135641Scognet size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; 2550135641Scognet pmap_alloc_specials(&virtual_avail, 2551135641Scognet round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 2552135641Scognet &pmap_kernel_l2dtable_kva, NULL); 2553135641Scognet 2554137362Scognet pmap_alloc_specials(&virtual_avail, 2555137362Scognet 1, (vm_offset_t*)&_tmppt, NULL); 2556135641Scognet SLIST_INIT(&l1_list); 2557129198Scognet TAILQ_INIT(&l1_lru_list); 2558129198Scognet mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); 2559129198Scognet pmap_init_l1(l1, kernel_l1pt); 2560129198Scognet cpu_dcache_wbinv_all(); 2561129198Scognet 2562129198Scognet virtual_avail = round_page(virtual_avail); 2563129198Scognet virtual_end = lastaddr; 2564135641Scognet kernel_vm_end = pmap_curmaxkvaddr; 2565147114Scognet#ifdef ARM_USE_SMALL_ALLOC 2566147114Scognet mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF); 2567147114Scognet alloc_curaddr = lastaddr; 2568147114Scognet#endif 2569129198Scognet} 2570129198Scognet 2571129198Scognet/*************************************************** 2572129198Scognet * Pmap allocation/deallocation routines. 2573129198Scognet ***************************************************/ 2574129198Scognet 2575129198Scognet/* 2576129198Scognet * Release any resources held by the given physical map. 2577129198Scognet * Called when a pmap initialized by pmap_pinit is being released. 2578129198Scognet * Should only be called if the map contains no valid mappings. 2579129198Scognet */ 2580129198Scognetvoid 2581129198Scognetpmap_release(pmap_t pmap) 2582129198Scognet{ 2583135641Scognet struct pcb *pcb; 2584135641Scognet 2585135641Scognet pmap_idcache_wbinv_all(pmap); 2586135641Scognet pmap_tlb_flushID(pmap); 2587135641Scognet cpu_cpwait(); 2588135641Scognet LIST_REMOVE(pmap, pm_list); 2589135641Scognet if (vector_page < KERNBASE) { 2590135641Scognet struct pcb *curpcb = PCPU_GET(curpcb); 2591135641Scognet pcb = thread0.td_pcb; 2592135641Scognet if (pmap_is_current(pmap)) { 2593135641Scognet /* 2594135641Scognet * Frob the L1 entry corresponding to the vector 2595135641Scognet * page so that it contains the kernel pmap's domain 2596135641Scognet * number. This will ensure pmap_remove() does not 2597135641Scognet * pull the current vector page out from under us. 2598135641Scognet */ 2599135641Scognet critical_enter(); 2600135641Scognet *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2601135641Scognet cpu_domains(pcb->pcb_dacr); 2602135641Scognet cpu_setttb(pcb->pcb_pagedir); 2603135641Scognet critical_exit(); 2604135641Scognet } 2605135641Scognet pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); 2606135641Scognet /* 2607135641Scognet * Make sure cpu_switch(), et al, DTRT. This is safe to do 2608135641Scognet * since this process has no remaining mappings of its own. 2609135641Scognet */ 2610135641Scognet curpcb->pcb_pl1vec = pcb->pcb_pl1vec; 2611135641Scognet curpcb->pcb_l1vec = pcb->pcb_l1vec; 2612135641Scognet curpcb->pcb_dacr = pcb->pcb_dacr; 2613135641Scognet curpcb->pcb_pagedir = pcb->pcb_pagedir; 2614135641Scognet 2615135641Scognet } 2616129198Scognet pmap_free_l1(pmap); 2617135641Scognet 2618129198Scognet dprintf("pmap_release()\n"); 2619129198Scognet} 2620129198Scognet 2621129198Scognet 2622135641Scognet 2623129198Scognet/* 2624135641Scognet * Helper function for pmap_grow_l2_bucket() 2625135641Scognet */ 2626135641Scognetstatic __inline int 2627135641Scognetpmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) 2628135641Scognet{ 2629135641Scognet struct l2_bucket *l2b; 2630135641Scognet pt_entry_t *ptep; 2631135641Scognet vm_paddr_t pa; 2632135641Scognet struct vm_page *pg; 2633135641Scognet 2634144760Scognet pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO | 2635135641Scognet VM_ALLOC_WIRED); 2636135641Scognet if (pg == NULL) 2637135641Scognet return (1); 2638135641Scognet pa = VM_PAGE_TO_PHYS(pg); 2639135641Scognet 2640135641Scognet if (pap) 2641135641Scognet *pap = pa; 2642135641Scognet 2643135641Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2644135641Scognet 2645135641Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2646135641Scognet *ptep = L2_S_PROTO | pa | cache_mode | 2647135641Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 2648135641Scognet PTE_SYNC(ptep); 2649135641Scognet return (0); 2650135641Scognet} 2651135641Scognet 2652135641Scognet/* 2653135641Scognet * This is the same as pmap_alloc_l2_bucket(), except that it is only 2654135641Scognet * used by pmap_growkernel(). 2655135641Scognet */ 2656135641Scognetstatic __inline struct l2_bucket * 2657135641Scognetpmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) 2658135641Scognet{ 2659135641Scognet struct l2_dtable *l2; 2660135641Scognet struct l2_bucket *l2b; 2661135641Scognet struct l1_ttable *l1; 2662135641Scognet pd_entry_t *pl1pd; 2663135641Scognet u_short l1idx; 2664135641Scognet vm_offset_t nva; 2665135641Scognet 2666135641Scognet l1idx = L1_IDX(va); 2667135641Scognet 2668135641Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 2669135641Scognet /* 2670135641Scognet * No mapping at this address, as there is 2671135641Scognet * no entry in the L1 table. 2672135641Scognet * Need to allocate a new l2_dtable. 2673135641Scognet */ 2674135641Scognet nva = pmap_kernel_l2dtable_kva; 2675135641Scognet if ((nva & PAGE_MASK) == 0) { 2676135641Scognet /* 2677135641Scognet * Need to allocate a backing page 2678135641Scognet */ 2679135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2680135641Scognet return (NULL); 2681135641Scognet } 2682135641Scognet 2683135641Scognet l2 = (struct l2_dtable *)nva; 2684135641Scognet nva += sizeof(struct l2_dtable); 2685135641Scognet 2686135641Scognet if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & 2687135641Scognet PAGE_MASK)) { 2688135641Scognet /* 2689135641Scognet * The new l2_dtable straddles a page boundary. 2690135641Scognet * Map in another page to cover it. 2691135641Scognet */ 2692135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2693135641Scognet return (NULL); 2694135641Scognet } 2695135641Scognet 2696135641Scognet pmap_kernel_l2dtable_kva = nva; 2697135641Scognet 2698135641Scognet /* 2699135641Scognet * Link it into the parent pmap 2700135641Scognet */ 2701135641Scognet pm->pm_l2[L2_IDX(l1idx)] = l2; 2702135641Scognet } 2703135641Scognet 2704135641Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2705135641Scognet 2706135641Scognet /* 2707135641Scognet * Fetch pointer to the L2 page table associated with the address. 2708135641Scognet */ 2709135641Scognet if (l2b->l2b_kva == NULL) { 2710135641Scognet pt_entry_t *ptep; 2711135641Scognet 2712135641Scognet /* 2713135641Scognet * No L2 page table has been allocated. Chances are, this 2714135641Scognet * is because we just allocated the l2_dtable, above. 2715135641Scognet */ 2716135641Scognet nva = pmap_kernel_l2ptp_kva; 2717135641Scognet ptep = (pt_entry_t *)nva; 2718135641Scognet if ((nva & PAGE_MASK) == 0) { 2719135641Scognet /* 2720135641Scognet * Need to allocate a backing page 2721135641Scognet */ 2722135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 2723135641Scognet &pmap_kernel_l2ptp_phys)) 2724135641Scognet return (NULL); 2725135641Scognet PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 2726135641Scognet } 2727135641Scognet 2728135641Scognet l2->l2_occupancy++; 2729135641Scognet l2b->l2b_kva = ptep; 2730135641Scognet l2b->l2b_l1idx = l1idx; 2731135641Scognet l2b->l2b_phys = pmap_kernel_l2ptp_phys; 2732135641Scognet 2733135641Scognet pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 2734135641Scognet pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 2735135641Scognet } 2736135641Scognet 2737135641Scognet /* Distribute new L1 entry to all other L1s */ 2738135641Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2739145071Scognet pl1pd = &l1->l1_kva[L1_IDX(va)]; 2740135641Scognet *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 2741135641Scognet L1_C_PROTO; 2742135641Scognet PTE_SYNC(pl1pd); 2743135641Scognet } 2744135641Scognet 2745135641Scognet return (l2b); 2746135641Scognet} 2747135641Scognet 2748135641Scognet 2749135641Scognet/* 2750129198Scognet * grow the number of kernel page table entries, if needed 2751129198Scognet */ 2752129198Scognetvoid 2753129198Scognetpmap_growkernel(vm_offset_t addr) 2754129198Scognet{ 2755135641Scognet pmap_t kpm = pmap_kernel(); 2756135641Scognet int s; 2757129198Scognet 2758135641Scognet if (addr <= pmap_curmaxkvaddr) 2759135641Scognet return; /* we are OK */ 2760135641Scognet 2761135641Scognet /* 2762135641Scognet * whoops! we need to add kernel PTPs 2763135641Scognet */ 2764135641Scognet 2765135641Scognet s = splhigh(); /* to be safe */ 2766135641Scognet 2767135641Scognet /* Map 1MB at a time */ 2768135641Scognet for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) 2769135641Scognet pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 2770135641Scognet 2771135641Scognet /* 2772135641Scognet * flush out the cache, expensive but growkernel will happen so 2773135641Scognet * rarely 2774135641Scognet */ 2775135641Scognet cpu_dcache_wbinv_all(); 2776135641Scognet cpu_tlb_flushD(); 2777135641Scognet cpu_cpwait(); 2778135641Scognet kernel_vm_end = pmap_curmaxkvaddr; 2779135641Scognet 2780129198Scognet} 2781129198Scognet 2782129198Scognet 2783129198Scognet/* 2784129198Scognet * pmap_page_protect: 2785129198Scognet * 2786129198Scognet * Lower the permission for all mappings to a given page. 2787129198Scognet */ 2788129198Scognetvoid 2789129198Scognetpmap_page_protect(vm_page_t m, vm_prot_t prot) 2790129198Scognet{ 2791135641Scognet switch(prot) { 2792135641Scognet case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 2793135641Scognet case VM_PROT_READ|VM_PROT_WRITE: 2794135641Scognet return; 2795135641Scognet 2796135641Scognet case VM_PROT_READ: 2797135641Scognet case VM_PROT_READ|VM_PROT_EXECUTE: 2798135641Scognet pmap_clearbit(m, PVF_WRITE); 2799135641Scognet break; 2800135641Scognet 2801135641Scognet default: 2802135641Scognet pmap_remove_all(m); 2803135641Scognet break; 2804129198Scognet } 2805135641Scognet 2806129198Scognet} 2807129198Scognet 2808129198Scognet 2809129198Scognet/* 2810129198Scognet * Remove all pages from specified address space 2811129198Scognet * this aids process exit speeds. Also, this code 2812129198Scognet * is special cased for current process only, but 2813129198Scognet * can have the more generic (and slightly slower) 2814129198Scognet * mode enabled. This is much faster than pmap_remove 2815129198Scognet * in the case of running down an entire address space. 2816129198Scognet */ 2817129198Scognetvoid 2818129198Scognetpmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2819129198Scognet{ 2820144760Scognet struct pv_entry *pv, *npv; 2821144760Scognet struct l2_bucket *l2b = NULL; 2822144760Scognet vm_page_t m; 2823144760Scognet pt_entry_t *pt; 2824144760Scognet 2825144760Scognet vm_page_lock_queues(); 2826144760Scognet for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { 2827144760Scognet if (pv->pv_va >= eva || pv->pv_va < sva) { 2828144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2829144760Scognet continue; 2830144760Scognet } 2831144760Scognet if (pv->pv_flags & PVF_WIRED) { 2832144760Scognet /* The page is wired, cannot remove it now. */ 2833144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2834144760Scognet continue; 2835144760Scognet } 2836144760Scognet pmap->pm_stats.resident_count--; 2837144760Scognet l2b = pmap_get_l2_bucket(pmap, pv->pv_va); 2838144760Scognet KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); 2839144760Scognet pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2840144760Scognet m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); 2841144760Scognet *pt = 0; 2842144760Scognet PTE_SYNC(pt); 2843144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2844144760Scognet pmap_nuke_pv(m, pmap, pv); 2845144760Scognet pmap_free_pv_entry(pv); 2846144760Scognet } 2847144760Scognet vm_page_unlock_queues(); 2848135641Scognet cpu_idcache_wbinv_all(); 2849135641Scognet cpu_tlb_flushID(); 2850135641Scognet cpu_cpwait(); 2851129198Scognet} 2852129198Scognet 2853129198Scognet 2854129198Scognet/*************************************************** 2855129198Scognet * Low level mapping routines..... 2856129198Scognet ***************************************************/ 2857129198Scognet 2858147114Scognet/* Map a section into the KVA. */ 2859147114Scognet 2860147114Scognetvoid 2861147114Scognetpmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) 2862147114Scognet{ 2863147114Scognet pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, 2864147114Scognet VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2865147114Scognet struct l1_ttable *l1; 2866147114Scognet 2867147114Scognet KASSERT(((va | pa) & L1_S_OFFSET) == 0, 2868147114Scognet ("Not a valid section mapping")); 2869147114Scognet if (flags & SECTION_CACHE) 2870147114Scognet pd |= pte_l1_s_cache_mode; 2871147114Scognet else if (flags & SECTION_PT) 2872147114Scognet pd |= pte_l1_s_cache_mode_pt; 2873147114Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2874147114Scognet l1->l1_kva[L1_IDX(va)] = pd; 2875147114Scognet PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2876147114Scognet } 2877147114Scognet} 2878147114Scognet 2879129198Scognet/* 2880129198Scognet * add a wired page to the kva 2881129198Scognet * note that in order for the mapping to take effect -- you 2882129198Scognet * should do a invltlb after doing the pmap_kenter... 2883129198Scognet */ 2884135641Scognetstatic PMAP_INLINE void 2885135641Scognetpmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) 2886129198Scognet{ 2887129198Scognet struct l2_bucket *l2b; 2888129198Scognet pt_entry_t *pte; 2889129198Scognet pt_entry_t opte; 2890129198Scognet PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", 2891129198Scognet (uint32_t) va, (uint32_t) pa)); 2892129198Scognet 2893129198Scognet 2894129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2895135641Scognet if (l2b == NULL) 2896135641Scognet l2b = pmap_grow_l2_bucket(pmap_kernel(), va); 2897129198Scognet KASSERT(l2b != NULL, ("No L2 Bucket")); 2898129198Scognet pte = &l2b->l2b_kva[l2pte_index(va)]; 2899129198Scognet opte = *pte; 2900129198Scognet PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", 2901129198Scognet (uint32_t) pte, opte, *pte)); 2902129198Scognet if (l2pte_valid(opte)) { 2903129198Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 2904129198Scognet cpu_tlb_flushD_SE(va); 2905129198Scognet cpu_cpwait(); 2906135641Scognet } else { 2907129198Scognet if (opte == 0) 2908129198Scognet l2b->l2b_occupancy++; 2909135641Scognet } 2910129198Scognet *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, 2911135641Scognet VM_PROT_READ | VM_PROT_WRITE); 2912135641Scognet if (flags & KENTER_CACHE) 2913135641Scognet *pte |= pte_l2_s_cache_mode; 2914142570Scognet if (flags & KENTER_USER) 2915142570Scognet *pte |= L2_S_PROT_U; 2916129198Scognet PTE_SYNC(pte); 2917135641Scognet} 2918129198Scognet 2919135641Scognetvoid 2920135641Scognetpmap_kenter(vm_offset_t va, vm_paddr_t pa) 2921135641Scognet{ 2922135641Scognet pmap_kenter_internal(va, pa, KENTER_CACHE); 2923129198Scognet} 2924129198Scognet 2925142570Scognetvoid 2926142570Scognetpmap_kenter_user(vm_offset_t va, vm_paddr_t pa) 2927142570Scognet{ 2928143192Scognet 2929142570Scognet pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); 2930143192Scognet /* 2931143192Scognet * Call pmap_fault_fixup now, to make sure we'll have no exception 2932143192Scognet * at the first use of the new address, or bad things will happen, 2933143192Scognet * as we use one of these addresses in the exception handlers. 2934143192Scognet */ 2935143192Scognet pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1); 2936142570Scognet} 2937129198Scognet 2938129198Scognet/* 2939135641Scognet * remove a page rom the kernel pagetables 2940129198Scognet */ 2941129198ScognetPMAP_INLINE void 2942129198Scognetpmap_kremove(vm_offset_t va) 2943129198Scognet{ 2944135641Scognet struct l2_bucket *l2b; 2945135641Scognet pt_entry_t *pte, opte; 2946135641Scognet 2947135641Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2948145071Scognet if (!l2b) 2949145071Scognet return; 2950135641Scognet KASSERT(l2b != NULL, ("No L2 Bucket")); 2951135641Scognet pte = &l2b->l2b_kva[l2pte_index(va)]; 2952135641Scognet opte = *pte; 2953135641Scognet if (l2pte_valid(opte)) { 2954135641Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 2955135641Scognet cpu_tlb_flushD_SE(va); 2956135641Scognet cpu_cpwait(); 2957144760Scognet *pte = 0; 2958135641Scognet } 2959129198Scognet} 2960129198Scognet 2961129198Scognet 2962129198Scognet/* 2963129198Scognet * Used to map a range of physical addresses into kernel 2964129198Scognet * virtual address space. 2965129198Scognet * 2966129198Scognet * The value passed in '*virt' is a suggested virtual address for 2967129198Scognet * the mapping. Architectures which can support a direct-mapped 2968129198Scognet * physical to virtual region can return the appropriate address 2969129198Scognet * within that region, leaving '*virt' unchanged. Other 2970129198Scognet * architectures should map the pages starting at '*virt' and 2971129198Scognet * update '*virt' with the first usable address after the mapped 2972129198Scognet * region. 2973129198Scognet */ 2974129198Scognetvm_offset_t 2975129198Scognetpmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 2976129198Scognet{ 2977129198Scognet vm_offset_t sva = *virt; 2978129198Scognet vm_offset_t va = sva; 2979129198Scognet 2980129198Scognet PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " 2981129198Scognet "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, 2982129198Scognet prot)); 2983129198Scognet 2984129198Scognet while (start < end) { 2985129198Scognet pmap_kenter(va, start); 2986129198Scognet va += PAGE_SIZE; 2987129198Scognet start += PAGE_SIZE; 2988129198Scognet } 2989129198Scognet *virt = va; 2990129198Scognet return (sva); 2991129198Scognet} 2992129198Scognet 2993143724Scognetstatic void 2994146596Scognetpmap_wb_page(vm_page_t m, boolean_t do_inv) 2995143724Scognet{ 2996143724Scognet struct pv_entry *pv; 2997129198Scognet 2998143724Scognet TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2999146596Scognet pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, do_inv, 3000144760Scognet (pv->pv_flags & PVF_WRITE) == 0); 3001143724Scognet} 3002143724Scognet 3003129198Scognet/* 3004129198Scognet * Add a list of wired pages to the kva 3005129198Scognet * this routine is only used for temporary 3006129198Scognet * kernel mappings that do not need to have 3007129198Scognet * page modification or references recorded. 3008129198Scognet * Note that old mappings are simply written 3009129198Scognet * over. The page *must* be wired. 3010129198Scognet */ 3011129198Scognetvoid 3012129198Scognetpmap_qenter(vm_offset_t va, vm_page_t *m, int count) 3013129198Scognet{ 3014129198Scognet int i; 3015129198Scognet 3016129198Scognet for (i = 0; i < count; i++) { 3017146596Scognet pmap_wb_page(m[i], TRUE); 3018135641Scognet pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), 3019135641Scognet KENTER_CACHE); 3020129198Scognet va += PAGE_SIZE; 3021129198Scognet } 3022129198Scognet} 3023129198Scognet 3024129198Scognet 3025129198Scognet/* 3026129198Scognet * this routine jerks page mappings from the 3027129198Scognet * kernel -- it is meant only for temporary mappings. 3028129198Scognet */ 3029129198Scognetvoid 3030129198Scognetpmap_qremove(vm_offset_t va, int count) 3031129198Scognet{ 3032146596Scognet vm_paddr_t pa; 3033129198Scognet int i; 3034129198Scognet 3035129198Scognet for (i = 0; i < count; i++) { 3036146596Scognet pa = vtophys(va); 3037146596Scognet if (pa) { 3038146596Scognet pmap_wb_page(PHYS_TO_VM_PAGE(pa), TRUE); 3039146596Scognet pmap_kremove(va); 3040146596Scognet } 3041129198Scognet va += PAGE_SIZE; 3042129198Scognet } 3043129198Scognet} 3044129198Scognet 3045129198Scognet 3046129198Scognet/* 3047129198Scognet * pmap_object_init_pt preloads the ptes for a given object 3048129198Scognet * into the specified pmap. This eliminates the blast of soft 3049129198Scognet * faults on process startup and immediately after an mmap. 3050129198Scognet */ 3051129198Scognetvoid 3052129198Scognetpmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3053129198Scognet vm_pindex_t pindex, vm_size_t size) 3054129198Scognet{ 3055129198Scognet printf("pmap_object_init_pt()\n"); 3056129198Scognet} 3057129198Scognet 3058129198Scognet 3059129198Scognet/* 3060129198Scognet * pmap_is_prefaultable: 3061129198Scognet * 3062129198Scognet * Return whether or not the specified virtual address is elgible 3063129198Scognet * for prefault. 3064129198Scognet */ 3065129198Scognetboolean_t 3066129198Scognetpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3067129198Scognet{ 3068135641Scognet pd_entry_t *pde; 3069129198Scognet pt_entry_t *pte; 3070129198Scognet 3071135641Scognet if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) 3072135641Scognet return (FALSE); 3073135641Scognet if (*pte == 0) 3074135641Scognet return (TRUE); 3075135641Scognet return (FALSE); 3076129198Scognet} 3077129198Scognet 3078129198Scognet/* 3079129198Scognet * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 3080129198Scognet * Returns TRUE if the mapping exists, else FALSE. 3081129198Scognet * 3082129198Scognet * NOTE: This function is only used by a couple of arm-specific modules. 3083129198Scognet * It is not safe to take any pmap locks here, since we could be right 3084129198Scognet * in the middle of debugging the pmap anyway... 3085129198Scognet * 3086129198Scognet * It is possible for this routine to return FALSE even though a valid 3087129198Scognet * mapping does exist. This is because we don't lock, so the metadata 3088129198Scognet * state may be inconsistent. 3089129198Scognet * 3090129198Scognet * NOTE: We can return a NULL *ptp in the case where the L1 pde is 3091129198Scognet * a "section" mapping. 3092129198Scognet */ 3093129198Scognetboolean_t 3094129198Scognetpmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) 3095129198Scognet{ 3096129198Scognet struct l2_dtable *l2; 3097129198Scognet pd_entry_t *pl1pd, l1pd; 3098129198Scognet pt_entry_t *ptep; 3099129198Scognet u_short l1idx; 3100129198Scognet 3101129198Scognet if (pm->pm_l1 == NULL) 3102129198Scognet return (FALSE); 3103129198Scognet 3104129198Scognet l1idx = L1_IDX(va); 3105129198Scognet *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3106129198Scognet l1pd = *pl1pd; 3107129198Scognet 3108129198Scognet if (l1pte_section_p(l1pd)) { 3109129198Scognet *ptp = NULL; 3110129198Scognet return (TRUE); 3111129198Scognet } 3112129198Scognet 3113129198Scognet if (pm->pm_l2 == NULL) 3114129198Scognet return (FALSE); 3115129198Scognet 3116129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 3117129198Scognet 3118129198Scognet if (l2 == NULL || 3119129198Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3120129198Scognet return (FALSE); 3121129198Scognet } 3122129198Scognet 3123129198Scognet *ptp = &ptep[l2pte_index(va)]; 3124129198Scognet return (TRUE); 3125129198Scognet} 3126129198Scognet 3127129198Scognet/* 3128129198Scognet * Routine: pmap_remove_all 3129129198Scognet * Function: 3130129198Scognet * Removes this physical page from 3131129198Scognet * all physical maps in which it resides. 3132129198Scognet * Reflects back modify bits to the pager. 3133129198Scognet * 3134129198Scognet * Notes: 3135129198Scognet * Original versions of this routine were very 3136129198Scognet * inefficient because they iteratively called 3137129198Scognet * pmap_remove (slow...) 3138129198Scognet */ 3139129198Scognetvoid 3140129198Scognetpmap_remove_all(vm_page_t m) 3141129198Scognet{ 3142129198Scognet pv_entry_t pv; 3143135641Scognet pt_entry_t *ptep, pte; 3144135641Scognet struct l2_bucket *l2b; 3145135641Scognet boolean_t flush = FALSE; 3146135641Scognet pmap_t curpm; 3147135641Scognet int flags = 0; 3148129198Scognet 3149129198Scognet#if defined(PMAP_DEBUG) 3150129198Scognet /* 3151129198Scognet * XXX this makes pmap_page_protect(NONE) illegal for non-managed 3152129198Scognet * pages! 3153129198Scognet */ 3154147217Salc if (m->flags & PG_FICTITIOUS) { 3155129198Scognet panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m)); 3156129198Scognet } 3157129198Scognet#endif 3158129198Scognet 3159135641Scognet if (TAILQ_EMPTY(&m->md.pv_list)) 3160135641Scognet return; 3161135641Scognet curpm = vmspace_pmap(curproc->p_vmspace); 3162129198Scognet while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3163135641Scognet if (flush == FALSE && (pv->pv_pmap == curpm || 3164135641Scognet pv->pv_pmap == pmap_kernel())) 3165135641Scognet flush = TRUE; 3166135641Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 3167135641Scognet KASSERT(l2b != NULL, ("No l2 bucket")); 3168135641Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 3169135641Scognet pte = *ptep; 3170135641Scognet *ptep = 0; 3171135641Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 3172135641Scognet pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); 3173135641Scognet if (pv->pv_flags & PVF_WIRED) 3174135641Scognet pv->pv_pmap->pm_stats.wired_count--; 3175129198Scognet pv->pv_pmap->pm_stats.resident_count--; 3176135641Scognet flags |= pv->pv_flags; 3177135641Scognet pmap_nuke_pv(m, pv->pv_pmap, pv); 3178129198Scognet pmap_free_pv_entry(pv); 3179129198Scognet } 3180129198Scognet 3181135641Scognet if (flush) { 3182135641Scognet if (PV_BEEN_EXECD(flags)) 3183135641Scognet pmap_tlb_flushID(curpm); 3184135641Scognet else 3185135641Scognet pmap_tlb_flushD(curpm); 3186135641Scognet } 3187129198Scognet} 3188129198Scognet 3189129198Scognet 3190129198Scognet/* 3191129198Scognet * Set the physical protection on the 3192129198Scognet * specified range of this map as requested. 3193129198Scognet */ 3194129198Scognetvoid 3195129198Scognetpmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3196129198Scognet{ 3197129198Scognet struct l2_bucket *l2b; 3198129198Scognet pt_entry_t *ptep, pte; 3199129198Scognet vm_offset_t next_bucket; 3200129198Scognet u_int flags; 3201129198Scognet int flush; 3202129198Scognet 3203129198Scognet if ((prot & VM_PROT_READ) == 0) { 3204132899Salc mtx_lock(&Giant); 3205129198Scognet pmap_remove(pm, sva, eva); 3206132899Salc mtx_unlock(&Giant); 3207129198Scognet return; 3208129198Scognet } 3209129198Scognet 3210129198Scognet if (prot & VM_PROT_WRITE) { 3211129198Scognet /* 3212129198Scognet * If this is a read->write transition, just ignore it and let 3213135641Scognet * vm_fault() take care of it later. 3214129198Scognet */ 3215129198Scognet return; 3216129198Scognet } 3217129198Scognet 3218132899Salc mtx_lock(&Giant); 3219129198Scognet 3220129198Scognet /* 3221129198Scognet * OK, at this point, we know we're doing write-protect operation. 3222129198Scognet * If the pmap is active, write-back the range. 3223129198Scognet */ 3224129198Scognet pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); 3225129198Scognet 3226129198Scognet flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3227129198Scognet flags = 0; 3228129198Scognet 3229144760Scognet vm_page_lock_queues(); 3230129198Scognet while (sva < eva) { 3231129198Scognet next_bucket = L2_NEXT_BUCKET(sva); 3232129198Scognet if (next_bucket > eva) 3233129198Scognet next_bucket = eva; 3234129198Scognet 3235129198Scognet l2b = pmap_get_l2_bucket(pm, sva); 3236129198Scognet if (l2b == NULL) { 3237129198Scognet sva = next_bucket; 3238129198Scognet continue; 3239129198Scognet } 3240129198Scognet 3241129198Scognet ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3242129198Scognet 3243129198Scognet while (sva < next_bucket) { 3244129198Scognet if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { 3245129198Scognet struct vm_page *pg; 3246129198Scognet u_int f; 3247129198Scognet 3248129198Scognet pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3249129198Scognet pte &= ~L2_S_PROT_W; 3250129198Scognet *ptep = pte; 3251129198Scognet PTE_SYNC(ptep); 3252129198Scognet 3253129198Scognet if (pg != NULL) { 3254129198Scognet f = pmap_modify_pv(pg, pm, sva, 3255129198Scognet PVF_WRITE, 0); 3256129198Scognet pmap_vac_me_harder(pg, pm, sva); 3257144760Scognet if (pmap_track_modified(sva)) 3258144760Scognet vm_page_dirty(pg); 3259129198Scognet } else 3260129198Scognet f = PVF_REF | PVF_EXEC; 3261129198Scognet 3262129198Scognet if (flush >= 0) { 3263129198Scognet flush++; 3264129198Scognet flags |= f; 3265129198Scognet } else 3266129198Scognet if (PV_BEEN_EXECD(f)) 3267129198Scognet pmap_tlb_flushID_SE(pm, sva); 3268129198Scognet else 3269129198Scognet if (PV_BEEN_REFD(f)) 3270129198Scognet pmap_tlb_flushD_SE(pm, sva); 3271129198Scognet } 3272129198Scognet 3273129198Scognet sva += PAGE_SIZE; 3274129198Scognet ptep++; 3275129198Scognet } 3276129198Scognet } 3277129198Scognet 3278129198Scognet 3279129198Scognet if (flush) { 3280129198Scognet if (PV_BEEN_EXECD(flags)) 3281129198Scognet pmap_tlb_flushID(pm); 3282129198Scognet else 3283129198Scognet if (PV_BEEN_REFD(flags)) 3284129198Scognet pmap_tlb_flushD(pm); 3285129198Scognet } 3286144760Scognet vm_page_unlock_queues(); 3287129198Scognet 3288132899Salc mtx_unlock(&Giant); 3289129198Scognet} 3290129198Scognet 3291129198Scognet 3292129198Scognet/* 3293129198Scognet * Insert the given physical page (p) at 3294129198Scognet * the specified virtual address (v) in the 3295129198Scognet * target physical map with the protection requested. 3296129198Scognet * 3297129198Scognet * If specified, the page will be wired down, meaning 3298129198Scognet * that the related pte can not be reclaimed. 3299129198Scognet * 3300129198Scognet * NB: This is the only routine which MAY NOT lazy-evaluate 3301129198Scognet * or lose information. That is, this routine must actually 3302129198Scognet * insert this page into the given map NOW. 3303129198Scognet */ 3304135641Scognet 3305129198Scognetvoid 3306129198Scognetpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3307129198Scognet boolean_t wired) 3308129198Scognet{ 3309135641Scognet struct l2_bucket *l2b = NULL; 3310129198Scognet struct vm_page *opg; 3311144760Scognet struct pv_entry *pve = NULL; 3312129198Scognet pt_entry_t *ptep, npte, opte; 3313129198Scognet u_int nflags; 3314129198Scognet u_int oflags; 3315129198Scognet vm_paddr_t pa; 3316129198Scognet 3317135641Scognet vm_page_lock_queues(); 3318129198Scognet if (va == vector_page) { 3319129198Scognet pa = systempage.pv_pa; 3320129198Scognet m = NULL; 3321129198Scognet } else 3322129198Scognet pa = VM_PAGE_TO_PHYS(m); 3323129198Scognet nflags = 0; 3324129198Scognet if (prot & VM_PROT_WRITE) 3325129198Scognet nflags |= PVF_WRITE; 3326129198Scognet if (prot & VM_PROT_EXECUTE) 3327129198Scognet nflags |= PVF_EXEC; 3328129198Scognet if (wired) 3329129198Scognet nflags |= PVF_WIRED; 3330129198Scognet PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " 3331129198Scognet "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired)); 3332129198Scognet 3333135641Scognet if (pmap == pmap_kernel()) { 3334129198Scognet l2b = pmap_get_l2_bucket(pmap, va); 3335135641Scognet if (l2b == NULL) 3336135641Scognet l2b = pmap_grow_l2_bucket(pmap, va); 3337135641Scognet } else 3338129198Scognet l2b = pmap_alloc_l2_bucket(pmap, va); 3339135641Scognet KASSERT(l2b != NULL, 3340135641Scognet ("pmap_enter: failed to allocate l2 bucket")); 3341129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 3342129198Scognet 3343135641Scognet opte = *ptep; 3344129198Scognet npte = pa; 3345129198Scognet oflags = 0; 3346129198Scognet if (opte) { 3347129198Scognet /* 3348129198Scognet * There is already a mapping at this address. 3349129198Scognet * If the physical address is different, lookup the 3350129198Scognet * vm_page. 3351129198Scognet */ 3352129198Scognet if (l2pte_pa(opte) != pa) 3353129198Scognet opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3354129198Scognet else 3355129198Scognet opg = m; 3356129198Scognet } else 3357129198Scognet opg = NULL; 3358129198Scognet 3359135641Scognet if ((prot & (VM_PROT_ALL)) || 3360135641Scognet (!m || m->md.pvh_attrs & PVF_REF)) { 3361129198Scognet /* 3362135641Scognet * - The access type indicates that we don't need 3363135641Scognet * to do referenced emulation. 3364135641Scognet * OR 3365135641Scognet * - The physical page has already been referenced 3366135641Scognet * so no need to re-do referenced emulation here. 3367129198Scognet */ 3368135641Scognet npte |= L2_S_PROTO; 3369135641Scognet 3370135641Scognet nflags |= PVF_REF; 3371135641Scognet 3372144760Scognet if (m && ((prot & VM_PROT_WRITE) != 0 || 3373144760Scognet (m->md.pvh_attrs & PVF_MOD))) { 3374129198Scognet /* 3375135641Scognet * This is a writable mapping, and the 3376135641Scognet * page's mod state indicates it has 3377135641Scognet * already been modified. Make it 3378135641Scognet * writable from the outset. 3379129198Scognet */ 3380135641Scognet nflags |= PVF_MOD; 3381144760Scognet if (!(m->md.pvh_attrs & PVF_MOD) && 3382144760Scognet pmap_track_modified(va)) 3383144760Scognet vm_page_dirty(m); 3384129198Scognet } 3385144760Scognet if (m && opte) 3386144760Scognet vm_page_flag_set(m, PG_REFERENCED); 3387135641Scognet } else { 3388135641Scognet /* 3389135641Scognet * Need to do page referenced emulation. 3390135641Scognet */ 3391135641Scognet npte |= L2_TYPE_INV; 3392135641Scognet } 3393135641Scognet 3394135641Scognet if (prot & VM_PROT_WRITE) 3395135641Scognet npte |= L2_S_PROT_W; 3396135641Scognet npte |= pte_l2_s_cache_mode; 3397135641Scognet if (m && m == opg) { 3398135641Scognet /* 3399135641Scognet * We're changing the attrs of an existing mapping. 3400135641Scognet */ 3401129198Scognet#if 0 3402135641Scognet simple_lock(&pg->mdpage.pvh_slock); 3403129198Scognet#endif 3404135641Scognet oflags = pmap_modify_pv(m, pmap, va, 3405135641Scognet PVF_WRITE | PVF_EXEC | PVF_WIRED | 3406135641Scognet PVF_MOD | PVF_REF, nflags); 3407129198Scognet#if 0 3408135641Scognet simple_unlock(&pg->mdpage.pvh_slock); 3409129198Scognet#endif 3410135641Scognet 3411135641Scognet /* 3412135641Scognet * We may need to flush the cache if we're 3413135641Scognet * doing rw-ro... 3414135641Scognet */ 3415135641Scognet if (pmap_is_current(pmap) && 3416135641Scognet (oflags & PVF_NC) == 0 && 3417129198Scognet (opte & L2_S_PROT_W) != 0 && 3418129198Scognet (prot & VM_PROT_WRITE) == 0) 3419135641Scognet cpu_dcache_wb_range(va, PAGE_SIZE); 3420129198Scognet } else { 3421129198Scognet /* 3422135641Scognet * New mapping, or changing the backing page 3423135641Scognet * of an existing mapping. 3424129198Scognet */ 3425129198Scognet if (opg) { 3426129198Scognet /* 3427135641Scognet * Replacing an existing mapping with a new one. 3428135641Scognet * It is part of our managed memory so we 3429135641Scognet * must remove it from the PV list 3430129198Scognet */ 3431129198Scognet#if 0 3432129198Scognet simple_lock(&opg->mdpage.pvh_slock); 3433129198Scognet#endif 3434129198Scognet pve = pmap_remove_pv(opg, pmap, va); 3435144760Scognet if (m && (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) && pve) 3436135641Scognet pmap_free_pv_entry(pve); 3437144760Scognet else if (!pve) 3438144760Scognet pve = pmap_get_pv_entry(); 3439135641Scognet KASSERT(pve != NULL, ("No pv")); 3440129198Scognet#if 0 3441129198Scognet simple_unlock(&opg->mdpage.pvh_slock); 3442129198Scognet#endif 3443129198Scognet oflags = pve->pv_flags; 3444135641Scognet 3445135641Scognet /* 3446135641Scognet * If the old mapping was valid (ref/mod 3447135641Scognet * emulation creates 'invalid' mappings 3448135641Scognet * initially) then make sure to frob 3449135641Scognet * the cache. 3450135641Scognet */ 3451135641Scognet if ((oflags & PVF_NC) == 0 && 3452135641Scognet l2pte_valid(opte)) { 3453135641Scognet if (PV_BEEN_EXECD(oflags)) { 3454129198Scognet pmap_idcache_wbinv_range(pmap, va, 3455129198Scognet PAGE_SIZE); 3456135641Scognet } else 3457135641Scognet if (PV_BEEN_REFD(oflags)) { 3458135641Scognet pmap_dcache_wb_range(pmap, va, 3459135641Scognet PAGE_SIZE, TRUE, 3460135641Scognet (oflags & PVF_WRITE) == 0); 3461135641Scognet } 3462129198Scognet } 3463135641Scognet } else if (m) 3464135641Scognet if ((pve = pmap_get_pv_entry()) == NULL) { 3465135641Scognet panic("pmap_enter: no pv entries"); 3466135641Scognet } 3467144760Scognet if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) 3468135641Scognet pmap_enter_pv(m, pve, pmap, va, nflags); 3469129198Scognet } 3470129198Scognet /* 3471129198Scognet * Make sure userland mappings get the right permissions 3472129198Scognet */ 3473129198Scognet if (pmap != pmap_kernel() && va != vector_page) { 3474129198Scognet npte |= L2_S_PROT_U; 3475129198Scognet } 3476129198Scognet 3477129198Scognet /* 3478129198Scognet * Keep the stats up to date 3479129198Scognet */ 3480129198Scognet if (opte == 0) { 3481129198Scognet l2b->l2b_occupancy++; 3482129198Scognet pmap->pm_stats.resident_count++; 3483129198Scognet } 3484129198Scognet 3485129198Scognet 3486129198Scognet /* 3487129198Scognet * If this is just a wiring change, the two PTEs will be 3488129198Scognet * identical, so there's no need to update the page table. 3489129198Scognet */ 3490129198Scognet if (npte != opte) { 3491135641Scognet boolean_t is_cached = pmap_is_current(pmap); 3492129198Scognet 3493129198Scognet *ptep = npte; 3494129198Scognet if (is_cached) { 3495129198Scognet /* 3496129198Scognet * We only need to frob the cache/tlb if this pmap 3497129198Scognet * is current 3498129198Scognet */ 3499129198Scognet PTE_SYNC(ptep); 3500129198Scognet if (L1_IDX(va) != L1_IDX(vector_page) && 3501129198Scognet l2pte_valid(npte)) { 3502129198Scognet /* 3503129198Scognet * This mapping is likely to be accessed as 3504129198Scognet * soon as we return to userland. Fix up the 3505129198Scognet * L1 entry to avoid taking another 3506129198Scognet * page/domain fault. 3507129198Scognet */ 3508129198Scognet pd_entry_t *pl1pd, l1pd; 3509129198Scognet 3510129198Scognet pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; 3511129198Scognet l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | 3512144760Scognet L1_C_PROTO; 3513129198Scognet if (*pl1pd != l1pd) { 3514129198Scognet *pl1pd = l1pd; 3515129198Scognet PTE_SYNC(pl1pd); 3516129198Scognet } 3517129198Scognet } 3518129198Scognet } 3519129198Scognet 3520129198Scognet if (PV_BEEN_EXECD(oflags)) 3521129198Scognet pmap_tlb_flushID_SE(pmap, va); 3522135641Scognet else if (PV_BEEN_REFD(oflags)) 3523129198Scognet pmap_tlb_flushD_SE(pmap, va); 3524129198Scognet 3525129198Scognet 3526135641Scognet pmap_vac_me_harder(m, pmap, va); 3527129198Scognet } 3528135641Scognet vm_page_unlock_queues(); 3529129198Scognet} 3530129198Scognet 3531129198Scognet/* 3532129198Scognet * this code makes some *MAJOR* assumptions: 3533129198Scognet * 1. Current pmap & pmap exists. 3534129198Scognet * 2. Not wired. 3535129198Scognet * 3. Read access. 3536129198Scognet * 4. No page table pages. 3537129198Scognet * 6. Page IS managed. 3538129198Scognet * but is *MUCH* faster than pmap_enter... 3539129198Scognet */ 3540129198Scognet 3541129198Scognetvm_page_t 3542129198Scognetpmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) 3543129198Scognet{ 3544138897Salc 3545138897Salc vm_page_busy(m); 3546138897Salc vm_page_unlock_queues(); 3547138897Salc VM_OBJECT_UNLOCK(m->object); 3548138897Salc mtx_lock(&Giant); 3549135641Scognet pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 3550146596Scognet pmap_idcache_wbinv_all(pmap); 3551138897Salc mtx_unlock(&Giant); 3552138897Salc VM_OBJECT_LOCK(m->object); 3553138897Salc vm_page_lock_queues(); 3554138897Salc vm_page_wakeup(m); 3555129198Scognet return (NULL); 3556129198Scognet} 3557129198Scognet 3558129198Scognet/* 3559129198Scognet * Routine: pmap_change_wiring 3560129198Scognet * Function: Change the wiring attribute for a map/virtual-address 3561129198Scognet * pair. 3562129198Scognet * In/out conditions: 3563129198Scognet * The mapping must already exist in the pmap. 3564129198Scognet */ 3565129198Scognetvoid 3566129198Scognetpmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3567129198Scognet{ 3568129198Scognet struct l2_bucket *l2b; 3569129198Scognet pt_entry_t *ptep, pte; 3570129198Scognet vm_page_t pg; 3571129198Scognet 3572129198Scognet l2b = pmap_get_l2_bucket(pmap, va); 3573129198Scognet KASSERT(l2b, ("No l2b bucket in pmap_change_wiring")); 3574129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 3575129198Scognet pte = *ptep; 3576129198Scognet pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3577129198Scognet if (pg) 3578129198Scognet pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired); 3579129198Scognet} 3580129198Scognet 3581129198Scognet 3582129198Scognet/* 3583129198Scognet * Copy the range specified by src_addr/len 3584129198Scognet * from the source map to the range dst_addr/len 3585129198Scognet * in the destination map. 3586129198Scognet * 3587129198Scognet * This routine is only advisory and need not do anything. 3588129198Scognet */ 3589129198Scognetvoid 3590129198Scognetpmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 3591129198Scognet vm_size_t len, vm_offset_t src_addr) 3592129198Scognet{ 3593129198Scognet} 3594129198Scognet 3595129198Scognet 3596129198Scognet/* 3597129198Scognet * Routine: pmap_extract 3598129198Scognet * Function: 3599129198Scognet * Extract the physical page address associated 3600129198Scognet * with the given map/virtual_address pair. 3601129198Scognet */ 3602131658Salcvm_paddr_t 3603129198Scognetpmap_extract(pmap_t pm, vm_offset_t va) 3604129198Scognet{ 3605129198Scognet struct l2_dtable *l2; 3606129198Scognet pd_entry_t *pl1pd, l1pd; 3607129198Scognet pt_entry_t *ptep, pte; 3608129198Scognet vm_paddr_t pa; 3609129198Scognet u_int l1idx; 3610129198Scognet l1idx = L1_IDX(va); 3611129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3612129198Scognet l1pd = *pl1pd; 3613129198Scognet 3614129198Scognet if (l1pte_section_p(l1pd)) { 3615129198Scognet /* 3616129198Scognet * These should only happen for pmap_kernel() 3617129198Scognet */ 3618129198Scognet KASSERT(pm == pmap_kernel(), ("huh")); 3619129198Scognet pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3620129198Scognet } else { 3621129198Scognet /* 3622129198Scognet * Note that we can't rely on the validity of the L1 3623129198Scognet * descriptor as an indication that a mapping exists. 3624129198Scognet * We have to look it up in the L2 dtable. 3625129198Scognet */ 3626129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 3627129198Scognet 3628129198Scognet if (l2 == NULL || 3629129198Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3630129198Scognet return (0); 3631129198Scognet } 3632129198Scognet 3633129198Scognet ptep = &ptep[l2pte_index(va)]; 3634129198Scognet pte = *ptep; 3635129198Scognet 3636129198Scognet if (pte == 0) 3637129198Scognet return (0); 3638129198Scognet 3639129198Scognet switch (pte & L2_TYPE_MASK) { 3640129198Scognet case L2_TYPE_L: 3641129198Scognet pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3642129198Scognet break; 3643129198Scognet 3644129198Scognet default: 3645129198Scognet pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3646129198Scognet break; 3647129198Scognet } 3648129198Scognet } 3649129198Scognet 3650129198Scognet return (pa); 3651129198Scognet} 3652129198Scognet 3653133453Salc/* 3654133453Salc * Atomically extract and hold the physical page with the given 3655133453Salc * pmap and virtual address pair if that mapping permits the given 3656133453Salc * protection. 3657133453Salc * 3658133453Salc */ 3659129198Scognetvm_page_t 3660129198Scognetpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3661129198Scognet{ 3662135641Scognet struct l2_dtable *l2; 3663135641Scognet pd_entry_t *pl1pd, l1pd; 3664135641Scognet pt_entry_t *ptep, pte; 3665129198Scognet vm_paddr_t pa; 3666135641Scognet vm_page_t m = NULL; 3667135641Scognet u_int l1idx; 3668135641Scognet l1idx = L1_IDX(va); 3669135641Scognet pl1pd = &pmap->pm_l1->l1_kva[l1idx]; 3670135641Scognet l1pd = *pl1pd; 3671129198Scognet 3672135641Scognet vm_page_lock_queues(); 3673135641Scognet if (l1pte_section_p(l1pd)) { 3674135641Scognet /* 3675135641Scognet * These should only happen for pmap_kernel() 3676135641Scognet */ 3677135641Scognet KASSERT(pmap == pmap_kernel(), ("huh")); 3678135641Scognet pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3679135641Scognet if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3680135641Scognet m = PHYS_TO_VM_PAGE(pa); 3681135641Scognet vm_page_hold(m); 3682135641Scognet } 3683135641Scognet 3684135641Scognet } else { 3685135641Scognet /* 3686135641Scognet * Note that we can't rely on the validity of the L1 3687135641Scognet * descriptor as an indication that a mapping exists. 3688135641Scognet * We have to look it up in the L2 dtable. 3689135641Scognet */ 3690135641Scognet l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3691135641Scognet 3692135641Scognet if (l2 == NULL || 3693135641Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3694135641Scognet return (NULL); 3695135641Scognet } 3696135641Scognet 3697135641Scognet ptep = &ptep[l2pte_index(va)]; 3698135641Scognet pte = *ptep; 3699135641Scognet 3700135641Scognet if (pte == 0) 3701135641Scognet return (NULL); 3702135641Scognet 3703135641Scognet if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3704135641Scognet switch (pte & L2_TYPE_MASK) { 3705135641Scognet case L2_TYPE_L: 3706135641Scognet pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3707135641Scognet break; 3708135641Scognet 3709135641Scognet default: 3710135641Scognet pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3711135641Scognet break; 3712135641Scognet } 3713135641Scognet m = PHYS_TO_VM_PAGE(pa); 3714135641Scognet vm_page_hold(m); 3715135641Scognet } 3716129198Scognet } 3717135641Scognet 3718135641Scognet vm_page_unlock_queues(); 3719129198Scognet return (m); 3720129198Scognet} 3721129198Scognet 3722129198Scognet/* 3723129198Scognet * Initialize a preallocated and zeroed pmap structure, 3724129198Scognet * such as one in a vmspace structure. 3725129198Scognet */ 3726129198Scognet 3727129198Scognetvoid 3728129198Scognetpmap_pinit(pmap_t pmap) 3729129198Scognet{ 3730129198Scognet PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); 3731129198Scognet 3732129198Scognet pmap_alloc_l1(pmap); 3733129198Scognet bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); 3734129198Scognet 3735129198Scognet LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 3736129198Scognet pmap->pm_count = 1; 3737129198Scognet pmap->pm_active = 0; 3738129198Scognet 3739144760Scognet TAILQ_INIT(&pmap->pm_pvlist); 3740129198Scognet bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 3741129198Scognet pmap->pm_stats.resident_count = 1; 3742129198Scognet if (vector_page < KERNBASE) { 3743129198Scognet pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), 3744129198Scognet VM_PROT_READ, 1); 3745129198Scognet } 3746129198Scognet} 3747129198Scognet 3748129198Scognet 3749129198Scognet/*************************************************** 3750129198Scognet * page management routines. 3751129198Scognet ***************************************************/ 3752129198Scognet 3753129198Scognet 3754135641Scognetstatic void 3755129198Scognetpmap_free_pv_entry(pv_entry_t pv) 3756129198Scognet{ 3757129198Scognet pv_entry_count--; 3758129198Scognet uma_zfree(pvzone, pv); 3759129198Scognet} 3760129198Scognet 3761129198Scognet 3762129198Scognet/* 3763129198Scognet * get a new pv_entry, allocating a block from the system 3764129198Scognet * when needed. 3765129198Scognet * the memory allocation is performed bypassing the malloc code 3766129198Scognet * because of the possibility of allocations at interrupt time. 3767129198Scognet */ 3768129198Scognetstatic pv_entry_t 3769129198Scognetpmap_get_pv_entry(void) 3770129198Scognet{ 3771129198Scognet pv_entry_t ret_value; 3772129198Scognet 3773129198Scognet pv_entry_count++; 3774129198Scognet if (pv_entry_high_water && 3775129198Scognet (pv_entry_count > pv_entry_high_water) && 3776129198Scognet (pmap_pagedaemon_waken == 0)) { 3777129198Scognet pmap_pagedaemon_waken = 1; 3778129198Scognet wakeup (&vm_pages_needed); 3779129198Scognet } 3780129198Scognet ret_value = uma_zalloc(pvzone, M_NOWAIT); 3781129198Scognet return ret_value; 3782129198Scognet} 3783129198Scognet 3784129198Scognet 3785129198Scognet/* 3786129198Scognet * Remove the given range of addresses from the specified map. 3787129198Scognet * 3788129198Scognet * It is assumed that the start and end are properly 3789129198Scognet * rounded to the page size. 3790129198Scognet */ 3791129198Scognet#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3792129198Scognetvoid 3793129198Scognetpmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 3794129198Scognet{ 3795129198Scognet struct l2_bucket *l2b; 3796129198Scognet vm_offset_t next_bucket; 3797129198Scognet pt_entry_t *ptep; 3798129198Scognet u_int cleanlist_idx, total, cnt; 3799129198Scognet struct { 3800129198Scognet vm_offset_t va; 3801129198Scognet pt_entry_t *pte; 3802129198Scognet } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 3803129198Scognet u_int mappings, is_exec, is_refd; 3804135641Scognet int flushall = 0; 3805129198Scognet 3806129198Scognet 3807129198Scognet /* 3808129198Scognet * we lock in the pmap => pv_head direction 3809129198Scognet */ 3810129198Scognet#if 0 3811129198Scognet PMAP_MAP_TO_HEAD_LOCK(); 3812129198Scognet pmap_acquire_pmap_lock(pm); 3813129198Scognet#endif 3814129198Scognet 3815137664Scognet vm_page_lock_queues(); 3816135641Scognet if (!pmap_is_current(pm)) { 3817129198Scognet cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3818129198Scognet } else 3819129198Scognet cleanlist_idx = 0; 3820129198Scognet 3821129198Scognet total = 0; 3822129198Scognet while (sva < eva) { 3823129198Scognet /* 3824129198Scognet * Do one L2 bucket's worth at a time. 3825129198Scognet */ 3826129198Scognet next_bucket = L2_NEXT_BUCKET(sva); 3827129198Scognet if (next_bucket > eva) 3828129198Scognet next_bucket = eva; 3829129198Scognet 3830129198Scognet l2b = pmap_get_l2_bucket(pm, sva); 3831129198Scognet if (l2b == NULL) { 3832129198Scognet sva = next_bucket; 3833129198Scognet continue; 3834129198Scognet } 3835129198Scognet 3836129198Scognet ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3837129198Scognet mappings = 0; 3838129198Scognet 3839129198Scognet while (sva < next_bucket) { 3840129198Scognet struct vm_page *pg; 3841129198Scognet pt_entry_t pte; 3842129198Scognet vm_paddr_t pa; 3843129198Scognet 3844129198Scognet pte = *ptep; 3845129198Scognet 3846129198Scognet if (pte == 0) { 3847129198Scognet /* 3848129198Scognet * Nothing here, move along 3849129198Scognet */ 3850129198Scognet sva += PAGE_SIZE; 3851129198Scognet ptep++; 3852129198Scognet continue; 3853129198Scognet } 3854129198Scognet 3855129198Scognet pm->pm_stats.resident_count--; 3856129198Scognet pa = l2pte_pa(pte); 3857129198Scognet is_exec = 0; 3858129198Scognet is_refd = 1; 3859129198Scognet 3860129198Scognet /* 3861129198Scognet * Update flags. In a number of circumstances, 3862129198Scognet * we could cluster a lot of these and do a 3863129198Scognet * number of sequential pages in one go. 3864129198Scognet */ 3865129198Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3866129198Scognet struct pv_entry *pve; 3867129198Scognet#if 0 3868129198Scognet simple_lock(&pg->mdpage.pvh_slock); 3869129198Scognet#endif 3870129198Scognet pve = pmap_remove_pv(pg, pm, sva); 3871135641Scognet if (pve) { 3872129198Scognet#if 0 3873129198Scognet simple_unlock(&pg->mdpage.pvh_slock); 3874129198Scognet#endif 3875129198Scognet is_exec = 3876129198Scognet PV_BEEN_EXECD(pve->pv_flags); 3877129198Scognet is_refd = 3878129198Scognet PV_BEEN_REFD(pve->pv_flags); 3879129198Scognet pmap_free_pv_entry(pve); 3880129198Scognet } 3881129198Scognet } 3882129198Scognet 3883129198Scognet if (!l2pte_valid(pte)) { 3884129198Scognet *ptep = 0; 3885129198Scognet PTE_SYNC_CURRENT(pm, ptep); 3886129198Scognet sva += PAGE_SIZE; 3887129198Scognet ptep++; 3888129198Scognet mappings++; 3889129198Scognet continue; 3890129198Scognet } 3891129198Scognet 3892129198Scognet if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3893129198Scognet /* Add to the clean list. */ 3894129198Scognet cleanlist[cleanlist_idx].pte = ptep; 3895129198Scognet cleanlist[cleanlist_idx].va = 3896129198Scognet sva | (is_exec & 1); 3897129198Scognet cleanlist_idx++; 3898129198Scognet } else 3899129198Scognet if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3900129198Scognet /* Nuke everything if needed. */ 3901129198Scognet pmap_idcache_wbinv_all(pm); 3902129198Scognet pmap_tlb_flushID(pm); 3903129198Scognet 3904129198Scognet /* 3905129198Scognet * Roll back the previous PTE list, 3906129198Scognet * and zero out the current PTE. 3907129198Scognet */ 3908129198Scognet for (cnt = 0; 3909129198Scognet cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { 3910129198Scognet *cleanlist[cnt].pte = 0; 3911129198Scognet } 3912129198Scognet *ptep = 0; 3913129198Scognet PTE_SYNC(ptep); 3914129198Scognet cleanlist_idx++; 3915135641Scognet flushall = 1; 3916129198Scognet } else { 3917129198Scognet *ptep = 0; 3918129198Scognet PTE_SYNC(ptep); 3919129198Scognet if (is_exec) 3920129198Scognet pmap_tlb_flushID_SE(pm, sva); 3921129198Scognet else 3922129198Scognet if (is_refd) 3923129198Scognet pmap_tlb_flushD_SE(pm, sva); 3924129198Scognet } 3925129198Scognet 3926129198Scognet sva += PAGE_SIZE; 3927129198Scognet ptep++; 3928129198Scognet mappings++; 3929129198Scognet } 3930129198Scognet 3931129198Scognet /* 3932129198Scognet * Deal with any left overs 3933129198Scognet */ 3934129198Scognet if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { 3935129198Scognet total += cleanlist_idx; 3936129198Scognet for (cnt = 0; cnt < cleanlist_idx; cnt++) { 3937135641Scognet vm_offset_t clva = 3938135641Scognet cleanlist[cnt].va & ~1; 3939135641Scognet if (cleanlist[cnt].va & 1) { 3940135641Scognet pmap_idcache_wbinv_range(pm, 3941135641Scognet clva, PAGE_SIZE); 3942135641Scognet pmap_tlb_flushID_SE(pm, clva); 3943135641Scognet } else { 3944135641Scognet pmap_dcache_wb_range(pm, 3945135641Scognet clva, PAGE_SIZE, TRUE, 3946135641Scognet FALSE); 3947135641Scognet pmap_tlb_flushD_SE(pm, clva); 3948129198Scognet } 3949129198Scognet *cleanlist[cnt].pte = 0; 3950129198Scognet PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte); 3951129198Scognet } 3952129198Scognet 3953129198Scognet if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) 3954129198Scognet cleanlist_idx = 0; 3955129198Scognet else { 3956144760Scognet /* 3957144760Scognet * We are removing so much entries it's just 3958144760Scognet * easier to flush the whole cache. 3959144760Scognet */ 3960129198Scognet cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3961129198Scognet pmap_idcache_wbinv_all(pm); 3962135641Scognet flushall = 1; 3963129198Scognet } 3964129198Scognet } 3965129198Scognet 3966129198Scognet pmap_free_l2_bucket(pm, l2b, mappings); 3967129198Scognet } 3968129198Scognet 3969137664Scognet vm_page_unlock_queues(); 3970135641Scognet if (flushall) 3971135641Scognet cpu_tlb_flushID(); 3972129198Scognet#if 0 3973129198Scognet pmap_release_pmap_lock(pm); 3974129198Scognet PMAP_MAP_TO_HEAD_UNLOCK(); 3975129198Scognet#endif 3976129198Scognet} 3977129198Scognet 3978129198Scognet 3979129198Scognet 3980129198Scognet 3981129198Scognet/* 3982129198Scognet * pmap_zero_page() 3983129198Scognet * 3984129198Scognet * Zero a given physical page by mapping it at a page hook point. 3985129198Scognet * In doing the zero page op, the page we zero is mapped cachable, as with 3986129198Scognet * StrongARM accesses to non-cached pages are non-burst making writing 3987129198Scognet * _any_ bulk data very slow. 3988129198Scognet */ 3989129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 3990129198Scognetvoid 3991129198Scognetpmap_zero_page_generic(vm_paddr_t phys, int off, int size) 3992129198Scognet{ 3993129198Scognet#ifdef DEBUG 3994129198Scognet struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 3995129198Scognet 3996129198Scognet if (pg->md.pvh_list != NULL) 3997129198Scognet panic("pmap_zero_page: page has mappings"); 3998129198Scognet#endif 3999129198Scognet 4000129198Scognet 4001129198Scognet /* 4002129198Scognet * Hook in the page, zero it, and purge the cache for that 4003129198Scognet * zeroed page. Invalidate the TLB as needed. 4004129198Scognet */ 4005129198Scognet *cdst_pte = L2_S_PROTO | phys | 4006129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4007129198Scognet PTE_SYNC(cdst_pte); 4008129198Scognet cpu_tlb_flushD_SE(cdstp); 4009129198Scognet cpu_cpwait(); 4010135641Scognet if (off || size != PAGE_SIZE) 4011129198Scognet bzero((void *)(cdstp + off), size); 4012129198Scognet else 4013129198Scognet bzero_page(cdstp); 4014129198Scognet cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4015129198Scognet} 4016129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 4017129198Scognet 4018129198Scognet#if ARM_MMU_XSCALE == 1 4019129198Scognetvoid 4020129198Scognetpmap_zero_page_xscale(vm_paddr_t phys, int off, int size) 4021129198Scognet{ 4022129198Scognet /* 4023129198Scognet * Hook in the page, zero it, and purge the cache for that 4024129198Scognet * zeroed page. Invalidate the TLB as needed. 4025129198Scognet */ 4026129198Scognet *cdst_pte = L2_S_PROTO | phys | 4027129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4028129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4029129198Scognet PTE_SYNC(cdst_pte); 4030129198Scognet cpu_tlb_flushD_SE(cdstp); 4031129198Scognet cpu_cpwait(); 4032135641Scognet if (off || size != PAGE_SIZE) 4033129198Scognet bzero((void *)(cdstp + off), size); 4034129198Scognet else 4035129198Scognet bzero_page(cdstp); 4036129198Scognet xscale_cache_clean_minidata(); 4037129198Scognet} 4038129198Scognet 4039129198Scognet/* 4040129198Scognet * Change the PTEs for the specified kernel mappings such that they 4041129198Scognet * will use the mini data cache instead of the main data cache. 4042129198Scognet */ 4043129198Scognetvoid 4044135641Scognetpmap_use_minicache(vm_offset_t va, vm_size_t size) 4045129198Scognet{ 4046129198Scognet struct l2_bucket *l2b; 4047129198Scognet pt_entry_t *ptep, *sptep, pte; 4048129198Scognet vm_offset_t next_bucket, eva; 4049129198Scognet 4050129198Scognet#if (ARM_NMMUS > 1) 4051129198Scognet if (xscale_use_minidata == 0) 4052129198Scognet return; 4053129198Scognet#endif 4054129198Scognet 4055135641Scognet eva = va + size; 4056129198Scognet 4057129198Scognet while (va < eva) { 4058129198Scognet next_bucket = L2_NEXT_BUCKET(va); 4059129198Scognet if (next_bucket > eva) 4060129198Scognet next_bucket = eva; 4061129198Scognet 4062129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 4063129198Scognet 4064129198Scognet sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 4065129198Scognet 4066129198Scognet while (va < next_bucket) { 4067129198Scognet pte = *ptep; 4068129198Scognet if (!l2pte_minidata(pte)) { 4069129198Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 4070129198Scognet cpu_tlb_flushD_SE(va); 4071129198Scognet *ptep = pte & ~L2_B; 4072129198Scognet } 4073129198Scognet ptep++; 4074129198Scognet va += PAGE_SIZE; 4075129198Scognet } 4076129198Scognet PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 4077129198Scognet } 4078129198Scognet cpu_cpwait(); 4079129198Scognet} 4080129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 4081129198Scognet 4082129198Scognet/* 4083129198Scognet * pmap_zero_page zeros the specified hardware page by mapping 4084129198Scognet * the page into KVM and using bzero to clear its contents. 4085129198Scognet */ 4086129198Scognetvoid 4087129198Scognetpmap_zero_page(vm_page_t m) 4088129198Scognet{ 4089135641Scognet pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); 4090129198Scognet} 4091129198Scognet 4092129198Scognet 4093129198Scognet/* 4094129198Scognet * pmap_zero_page_area zeros the specified hardware page by mapping 4095129198Scognet * the page into KVM and using bzero to clear its contents. 4096129198Scognet * 4097129198Scognet * off and size may not cover an area beyond a single hardware page. 4098129198Scognet */ 4099129198Scognetvoid 4100129198Scognetpmap_zero_page_area(vm_page_t m, int off, int size) 4101129198Scognet{ 4102129198Scognet 4103129198Scognet pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); 4104129198Scognet} 4105129198Scognet 4106129198Scognet 4107129198Scognet/* 4108129198Scognet * pmap_zero_page_idle zeros the specified hardware page by mapping 4109129198Scognet * the page into KVM and using bzero to clear its contents. This 4110129198Scognet * is intended to be called from the vm_pagezero process only and 4111129198Scognet * outside of Giant. 4112129198Scognet */ 4113129198Scognetvoid 4114129198Scognetpmap_zero_page_idle(vm_page_t m) 4115129198Scognet{ 4116129198Scognet 4117129198Scognet pmap_zero_page(m); 4118129198Scognet} 4119129198Scognet 4120129198Scognet/* 4121129198Scognet * pmap_clean_page() 4122129198Scognet * 4123129198Scognet * This is a local function used to work out the best strategy to clean 4124129198Scognet * a single page referenced by its entry in the PV table. It's used by 4125129198Scognet * pmap_copy_page, pmap_zero page and maybe some others later on. 4126129198Scognet * 4127129198Scognet * Its policy is effectively: 4128129198Scognet * o If there are no mappings, we don't bother doing anything with the cache. 4129129198Scognet * o If there is one mapping, we clean just that page. 4130129198Scognet * o If there are multiple mappings, we clean the entire cache. 4131129198Scognet * 4132129198Scognet * So that some functions can be further optimised, it returns 0 if it didn't 4133129198Scognet * clean the entire cache, or 1 if it did. 4134129198Scognet * 4135129198Scognet * XXX One bug in this routine is that if the pv_entry has a single page 4136129198Scognet * mapped at 0x00000000 a whole cache clean will be performed rather than 4137129198Scognet * just the 1 page. Since this should not occur in everyday use and if it does 4138129198Scognet * it will just result in not the most efficient clean for the page. 4139129198Scognet */ 4140129198Scognetstatic int 4141129198Scognetpmap_clean_page(struct pv_entry *pv, boolean_t is_src) 4142129198Scognet{ 4143129198Scognet pmap_t pm, pm_to_clean = NULL; 4144129198Scognet struct pv_entry *npv; 4145129198Scognet u_int cache_needs_cleaning = 0; 4146129198Scognet u_int flags = 0; 4147129198Scognet vm_offset_t page_to_clean = 0; 4148129198Scognet 4149129198Scognet if (pv == NULL) { 4150129198Scognet /* nothing mapped in so nothing to flush */ 4151129198Scognet return (0); 4152129198Scognet } 4153129198Scognet 4154129198Scognet /* 4155129198Scognet * Since we flush the cache each time we change to a different 4156129198Scognet * user vmspace, we only need to flush the page if it is in the 4157129198Scognet * current pmap. 4158129198Scognet */ 4159135641Scognet if (curthread) 4160135641Scognet pm = vmspace_pmap(curproc->p_vmspace); 4161129198Scognet else 4162129198Scognet pm = pmap_kernel(); 4163129198Scognet 4164129198Scognet for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { 4165129198Scognet if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { 4166129198Scognet flags |= npv->pv_flags; 4167129198Scognet /* 4168129198Scognet * The page is mapped non-cacheable in 4169129198Scognet * this map. No need to flush the cache. 4170129198Scognet */ 4171129198Scognet if (npv->pv_flags & PVF_NC) { 4172129198Scognet#ifdef DIAGNOSTIC 4173129198Scognet if (cache_needs_cleaning) 4174129198Scognet panic("pmap_clean_page: " 4175129198Scognet "cache inconsistency"); 4176129198Scognet#endif 4177129198Scognet break; 4178129198Scognet } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 4179129198Scognet continue; 4180129198Scognet if (cache_needs_cleaning) { 4181129198Scognet page_to_clean = 0; 4182129198Scognet break; 4183129198Scognet } else { 4184129198Scognet page_to_clean = npv->pv_va; 4185129198Scognet pm_to_clean = npv->pv_pmap; 4186129198Scognet } 4187129198Scognet cache_needs_cleaning = 1; 4188129198Scognet } 4189129198Scognet } 4190129198Scognet if (page_to_clean) { 4191129198Scognet if (PV_BEEN_EXECD(flags)) 4192129198Scognet pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 4193129198Scognet PAGE_SIZE); 4194129198Scognet else 4195129198Scognet pmap_dcache_wb_range(pm_to_clean, page_to_clean, 4196129198Scognet PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 4197129198Scognet } else if (cache_needs_cleaning) { 4198129198Scognet if (PV_BEEN_EXECD(flags)) 4199129198Scognet pmap_idcache_wbinv_all(pm); 4200129198Scognet else 4201129198Scognet pmap_dcache_wbinv_all(pm); 4202129198Scognet return (1); 4203129198Scognet } 4204129198Scognet return (0); 4205129198Scognet} 4206129198Scognet 4207129198Scognet/* 4208129198Scognet * pmap_copy_page copies the specified (machine independent) 4209129198Scognet * page by mapping the page into virtual memory and using 4210129198Scognet * bcopy to copy the page, one machine dependent page at a 4211129198Scognet * time. 4212129198Scognet */ 4213129198Scognet 4214129198Scognet/* 4215129198Scognet * pmap_copy_page() 4216129198Scognet * 4217129198Scognet * Copy one physical page into another, by mapping the pages into 4218129198Scognet * hook points. The same comment regarding cachability as in 4219129198Scognet * pmap_zero_page also applies here. 4220129198Scognet */ 4221129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 4222129198Scognetvoid 4223129198Scognetpmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) 4224129198Scognet{ 4225129198Scognet struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4226129198Scognet#ifdef DEBUG 4227129198Scognet struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 4228129198Scognet 4229129198Scognet if (dst_pg->md.pvh_list != NULL) 4230129198Scognet panic("pmap_copy_page: dst page has mappings"); 4231129198Scognet#endif 4232129198Scognet 4233129198Scognet 4234129198Scognet /* 4235129198Scognet * Clean the source page. Hold the source page's lock for 4236129198Scognet * the duration of the copy so that no other mappings can 4237129198Scognet * be created while we have a potentially aliased mapping. 4238129198Scognet */ 4239129198Scognet#if 0 4240129198Scognet mtx_lock(&src_pg->md.pvh_mtx); 4241129198Scognet#endif 4242129198Scognet (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4243129198Scognet 4244129198Scognet /* 4245129198Scognet * Map the pages into the page hook points, copy them, and purge 4246129198Scognet * the cache for the appropriate page. Invalidate the TLB 4247129198Scognet * as required. 4248129198Scognet */ 4249129198Scognet *csrc_pte = L2_S_PROTO | src | 4250129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4251129198Scognet PTE_SYNC(csrc_pte); 4252129198Scognet *cdst_pte = L2_S_PROTO | dst | 4253129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4254129198Scognet PTE_SYNC(cdst_pte); 4255129198Scognet cpu_tlb_flushD_SE(csrcp); 4256129198Scognet cpu_tlb_flushD_SE(cdstp); 4257129198Scognet cpu_cpwait(); 4258129198Scognet bcopy_page(csrcp, cdstp); 4259129198Scognet cpu_dcache_inv_range(csrcp, PAGE_SIZE); 4260129198Scognet#if 0 4261129198Scognet mtx_lock(&src_pg->md.pvh_mtx); 4262129198Scognet#endif 4263129198Scognet cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4264129198Scognet} 4265129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 4266129198Scognet 4267129198Scognet#if ARM_MMU_XSCALE == 1 4268129198Scognetvoid 4269129198Scognetpmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) 4270129198Scognet{ 4271129198Scognet struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4272129198Scognet#ifdef DEBUG 4273129198Scognet struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 4274129198Scognet 4275129198Scognet if (dst_pg->md.pvh_list != NULL) 4276129198Scognet panic("pmap_copy_page: dst page has mappings"); 4277129198Scognet#endif 4278129198Scognet 4279129198Scognet 4280129198Scognet /* 4281129198Scognet * Clean the source page. Hold the source page's lock for 4282129198Scognet * the duration of the copy so that no other mappings can 4283129198Scognet * be created while we have a potentially aliased mapping. 4284129198Scognet */ 4285130745Scognet (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4286129198Scognet 4287129198Scognet /* 4288129198Scognet * Map the pages into the page hook points, copy them, and purge 4289129198Scognet * the cache for the appropriate page. Invalidate the TLB 4290129198Scognet * as required. 4291129198Scognet */ 4292129198Scognet *csrc_pte = L2_S_PROTO | src | 4293129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4294129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4295129198Scognet PTE_SYNC(csrc_pte); 4296129198Scognet *cdst_pte = L2_S_PROTO | dst | 4297129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4298129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4299129198Scognet PTE_SYNC(cdst_pte); 4300129198Scognet cpu_tlb_flushD_SE(csrcp); 4301129198Scognet cpu_tlb_flushD_SE(cdstp); 4302129198Scognet cpu_cpwait(); 4303129198Scognet bcopy_page(csrcp, cdstp); 4304129198Scognet xscale_cache_clean_minidata(); 4305129198Scognet} 4306129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 4307129198Scognet 4308129198Scognetvoid 4309129198Scognetpmap_copy_page(vm_page_t src, vm_page_t dst) 4310129198Scognet{ 4311146596Scognet cpu_dcache_wbinv_all(); 4312129198Scognet pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 4313129198Scognet} 4314129198Scognet 4315129198Scognet 4316129198Scognet 4317129198Scognet 4318129198Scognet/* 4319129198Scognet * this routine returns true if a physical page resides 4320129198Scognet * in the given pmap. 4321129198Scognet */ 4322129198Scognetboolean_t 4323129198Scognetpmap_page_exists_quick(pmap_t pmap, vm_page_t m) 4324129198Scognet{ 4325129198Scognet pv_entry_t pv; 4326129198Scognet int loops = 0; 4327129198Scognet int s; 4328129198Scognet 4329147217Salc if (m->flags & PG_FICTITIOUS) 4330129198Scognet return (FALSE); 4331129198Scognet 4332129198Scognet s = splvm(); 4333129198Scognet 4334129198Scognet /* 4335129198Scognet * Not found, check current mappings returning immediately 4336129198Scognet */ 4337129198Scognet for (pv = TAILQ_FIRST(&m->md.pv_list); 4338129198Scognet pv; 4339129198Scognet pv = TAILQ_NEXT(pv, pv_list)) { 4340129198Scognet if (pv->pv_pmap == pmap) { 4341129198Scognet splx(s); 4342129198Scognet return (TRUE); 4343129198Scognet } 4344129198Scognet loops++; 4345129198Scognet if (loops >= 16) 4346129198Scognet break; 4347129198Scognet } 4348129198Scognet splx(s); 4349129198Scognet return (FALSE); 4350129198Scognet} 4351129198Scognet 4352129198Scognet 4353129198Scognet/* 4354129198Scognet * pmap_ts_referenced: 4355129198Scognet * 4356129198Scognet * Return the count of reference bits for a page, clearing all of them. 4357129198Scognet */ 4358129198Scognetint 4359129198Scognetpmap_ts_referenced(vm_page_t m) 4360129198Scognet{ 4361135641Scognet return (pmap_clearbit(m, PVF_REF)); 4362129198Scognet} 4363129198Scognet 4364129198Scognet 4365129198Scognetboolean_t 4366129198Scognetpmap_is_modified(vm_page_t m) 4367129198Scognet{ 4368135641Scognet 4369135641Scognet if (m->md.pvh_attrs & PVF_MOD) 4370135641Scognet return (TRUE); 4371129198Scognet 4372129198Scognet return(FALSE); 4373129198Scognet} 4374129198Scognet 4375129198Scognet 4376129198Scognet/* 4377129198Scognet * Clear the modify bits on the specified physical page. 4378129198Scognet */ 4379129198Scognetvoid 4380129198Scognetpmap_clear_modify(vm_page_t m) 4381129198Scognet{ 4382129198Scognet 4383129198Scognet if (m->md.pvh_attrs & PVF_MOD) 4384129198Scognet pmap_clearbit(m, PVF_MOD); 4385129198Scognet} 4386129198Scognet 4387129198Scognet 4388129198Scognet/* 4389129198Scognet * pmap_clear_reference: 4390129198Scognet * 4391129198Scognet * Clear the reference bit on the specified physical page. 4392129198Scognet */ 4393129198Scognetvoid 4394129198Scognetpmap_clear_reference(vm_page_t m) 4395129198Scognet{ 4396129198Scognet 4397129198Scognet if (m->md.pvh_attrs & PVF_REF) 4398129198Scognet pmap_clearbit(m, PVF_REF); 4399129198Scognet} 4400129198Scognet 4401129198Scognet 4402129198Scognet/* 4403129198Scognet * perform the pmap work for mincore 4404129198Scognet */ 4405129198Scognetint 4406129198Scognetpmap_mincore(pmap_t pmap, vm_offset_t addr) 4407129198Scognet{ 4408129198Scognet printf("pmap_mincore()\n"); 4409129198Scognet 4410129198Scognet return (0); 4411129198Scognet} 4412129198Scognet 4413129198Scognet 4414129198Scognetvm_offset_t 4415129198Scognetpmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 4416129198Scognet{ 4417129198Scognet 4418129198Scognet return(addr); 4419129198Scognet} 4420129198Scognet 4421129198Scognet 4422129198Scognet/* 4423129198Scognet * Map a set of physical memory pages into the kernel virtual 4424129198Scognet * address space. Return a pointer to where it is mapped. This 4425129198Scognet * routine is intended to be used for mapping device memory, 4426129198Scognet * NOT real memory. 4427129198Scognet */ 4428129198Scognetvoid * 4429129198Scognetpmap_mapdev(vm_offset_t pa, vm_size_t size) 4430129198Scognet{ 4431129198Scognet vm_offset_t va, tmpva, offset; 4432129198Scognet 4433129198Scognet offset = pa & PAGE_MASK; 4434135641Scognet size = roundup(size, PAGE_SIZE); 4435129198Scognet 4436129198Scognet GIANT_REQUIRED; 4437129198Scognet 4438132560Salc va = kmem_alloc_nofault(kernel_map, size); 4439129198Scognet if (!va) 4440129198Scognet panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 4441129198Scognet for (tmpva = va; size > 0;) { 4442135641Scognet pmap_kenter_internal(tmpva, pa, 0); 4443129198Scognet size -= PAGE_SIZE; 4444129198Scognet tmpva += PAGE_SIZE; 4445129198Scognet pa += PAGE_SIZE; 4446129198Scognet } 4447129198Scognet 4448135641Scognet return ((void *)(va)); 4449129198Scognet} 4450129198Scognet 4451129198Scognet#define BOOTSTRAP_DEBUG 4452129198Scognet 4453129198Scognet/* 4454129198Scognet * pmap_map_section: 4455129198Scognet * 4456129198Scognet * Create a single section mapping. 4457129198Scognet */ 4458129198Scognetvoid 4459129198Scognetpmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4460129198Scognet int prot, int cache) 4461129198Scognet{ 4462129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4463129198Scognet pd_entry_t fl; 4464129198Scognet 4465129198Scognet KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); 4466129198Scognet 4467129198Scognet switch (cache) { 4468129198Scognet case PTE_NOCACHE: 4469129198Scognet default: 4470129198Scognet fl = 0; 4471129198Scognet break; 4472129198Scognet 4473129198Scognet case PTE_CACHE: 4474129198Scognet fl = pte_l1_s_cache_mode; 4475129198Scognet break; 4476129198Scognet 4477129198Scognet case PTE_PAGETABLE: 4478129198Scognet fl = pte_l1_s_cache_mode_pt; 4479129198Scognet break; 4480129198Scognet } 4481129198Scognet 4482129198Scognet pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4483129198Scognet L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 4484129198Scognet PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4485129198Scognet 4486129198Scognet} 4487129198Scognet 4488129198Scognet/* 4489129198Scognet * pmap_link_l2pt: 4490129198Scognet * 4491129198Scognet * Link the L2 page table specified by "pa" into the L1 4492129198Scognet * page table at the slot for "va". 4493129198Scognet */ 4494129198Scognetvoid 4495129198Scognetpmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) 4496129198Scognet{ 4497129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 4498129198Scognet u_int slot = va >> L1_S_SHIFT; 4499129198Scognet 4500129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4501129198Scognet KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0, ("blah")); 4502129198Scognet KASSERT((l2pv->pv_pa & PAGE_MASK) == 0, ("ouin")); 4503129198Scognet#endif 4504129198Scognet 4505129198Scognet proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 4506129198Scognet 4507129198Scognet pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 4508129198Scognet#ifdef ARM32_NEW_VM_LAYOUT 4509129198Scognet PTE_SYNC(&pde[slot]); 4510129198Scognet#else 4511129198Scognet pde[slot + 1] = proto | (l2pv->pv_pa + 0x400); 4512129198Scognet pde[slot + 2] = proto | (l2pv->pv_pa + 0x800); 4513129198Scognet pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00); 4514129198Scognet PTE_SYNC_RANGE(&pde[slot + 0], 4); 4515129198Scognet#endif 4516129198Scognet 4517129198Scognet SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 4518129198Scognet 4519129198Scognet 4520129198Scognet} 4521129198Scognet 4522129198Scognet/* 4523129198Scognet * pmap_map_entry 4524129198Scognet * 4525129198Scognet * Create a single page mapping. 4526129198Scognet */ 4527129198Scognetvoid 4528129198Scognetpmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 4529129198Scognet int cache) 4530129198Scognet{ 4531129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4532129198Scognet pt_entry_t fl; 4533129198Scognet pt_entry_t *pte; 4534129198Scognet 4535129198Scognet KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); 4536129198Scognet 4537129198Scognet switch (cache) { 4538129198Scognet case PTE_NOCACHE: 4539129198Scognet default: 4540129198Scognet fl = 0; 4541129198Scognet break; 4542129198Scognet 4543129198Scognet case PTE_CACHE: 4544129198Scognet fl = pte_l2_s_cache_mode; 4545129198Scognet break; 4546129198Scognet 4547129198Scognet case PTE_PAGETABLE: 4548129198Scognet fl = pte_l2_s_cache_mode_pt; 4549129198Scognet break; 4550129198Scognet } 4551129198Scognet 4552129198Scognet if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4553129198Scognet panic("pmap_map_entry: no L2 table for VA 0x%08x", va); 4554129198Scognet 4555129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4556129198Scognet pte = (pt_entry_t *) 4557129198Scognet kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 4558129198Scognet#else 4559129198Scognet pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4560129198Scognet#endif 4561129198Scognet 4562129198Scognet if (pte == NULL) 4563129198Scognet panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); 4564129198Scognet 4565129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4566129198Scognet pte[(va >> PAGE_SHIFT) & 0x3ff] = 4567129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4568129198Scognet PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]); 4569129198Scognet#else 4570129198Scognet pte[l2pte_index(va)] = 4571129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4572129198Scognet PTE_SYNC(&pte[l2pte_index(va)]); 4573129198Scognet#endif 4574129198Scognet} 4575129198Scognet 4576129198Scognet/* 4577129198Scognet * pmap_map_chunk: 4578129198Scognet * 4579129198Scognet * Map a chunk of memory using the most efficient mappings 4580129198Scognet * possible (section. large page, small page) into the 4581129198Scognet * provided L1 and L2 tables at the specified virtual address. 4582129198Scognet */ 4583129198Scognetvm_size_t 4584129198Scognetpmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4585129198Scognet vm_size_t size, int prot, int cache) 4586129198Scognet{ 4587129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4588129198Scognet pt_entry_t *pte, f1, f2s, f2l; 4589129198Scognet vm_size_t resid; 4590129198Scognet int i; 4591129198Scognet 4592129198Scognet resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4593129198Scognet 4594129198Scognet if (l1pt == 0) 4595129198Scognet panic("pmap_map_chunk: no L1 table provided"); 4596129198Scognet 4597129198Scognet#ifdef VERBOSE_INIT_ARM 4598129198Scognet printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " 4599129198Scognet "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 4600129198Scognet#endif 4601129198Scognet 4602129198Scognet switch (cache) { 4603129198Scognet case PTE_NOCACHE: 4604129198Scognet default: 4605129198Scognet f1 = 0; 4606129198Scognet f2l = 0; 4607129198Scognet f2s = 0; 4608129198Scognet break; 4609129198Scognet 4610129198Scognet case PTE_CACHE: 4611129198Scognet f1 = pte_l1_s_cache_mode; 4612129198Scognet f2l = pte_l2_l_cache_mode; 4613129198Scognet f2s = pte_l2_s_cache_mode; 4614129198Scognet break; 4615129198Scognet 4616129198Scognet case PTE_PAGETABLE: 4617129198Scognet f1 = pte_l1_s_cache_mode_pt; 4618129198Scognet f2l = pte_l2_l_cache_mode_pt; 4619129198Scognet f2s = pte_l2_s_cache_mode_pt; 4620129198Scognet break; 4621129198Scognet } 4622129198Scognet 4623129198Scognet size = resid; 4624129198Scognet 4625129198Scognet while (resid > 0) { 4626129198Scognet /* See if we can use a section mapping. */ 4627129198Scognet if (L1_S_MAPPABLE_P(va, pa, resid)) { 4628129198Scognet#ifdef VERBOSE_INIT_ARM 4629129198Scognet printf("S"); 4630129198Scognet#endif 4631129198Scognet pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4632129198Scognet L1_S_PROT(PTE_KERNEL, prot) | f1 | 4633129198Scognet L1_S_DOM(PMAP_DOMAIN_KERNEL); 4634129198Scognet PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4635129198Scognet va += L1_S_SIZE; 4636129198Scognet pa += L1_S_SIZE; 4637129198Scognet resid -= L1_S_SIZE; 4638129198Scognet continue; 4639129198Scognet } 4640129198Scognet 4641129198Scognet /* 4642129198Scognet * Ok, we're going to use an L2 table. Make sure 4643129198Scognet * one is actually in the corresponding L1 slot 4644129198Scognet * for the current VA. 4645129198Scognet */ 4646129198Scognet if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4647129198Scognet panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); 4648129198Scognet 4649129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4650129198Scognet pte = (pt_entry_t *) 4651129198Scognet kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 4652129198Scognet#else 4653129198Scognet pte = (pt_entry_t *) kernel_pt_lookup( 4654129198Scognet pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4655129198Scognet#endif 4656129198Scognet if (pte == NULL) 4657129198Scognet panic("pmap_map_chunk: can't find L2 table for VA" 4658129198Scognet "0x%08x", va); 4659129198Scognet /* See if we can use a L2 large page mapping. */ 4660129198Scognet if (L2_L_MAPPABLE_P(va, pa, resid)) { 4661129198Scognet#ifdef VERBOSE_INIT_ARM 4662129198Scognet printf("L"); 4663129198Scognet#endif 4664129198Scognet for (i = 0; i < 16; i++) { 4665129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4666129198Scognet pte[((va >> PAGE_SHIFT) & 0x3f0) + i] = 4667129198Scognet L2_L_PROTO | pa | 4668129198Scognet L2_L_PROT(PTE_KERNEL, prot) | f2l; 4669129198Scognet PTE_SYNC(&pte[((va >> PAGE_SHIFT) & 0x3f0) + i]); 4670129198Scognet#else 4671129198Scognet pte[l2pte_index(va) + i] = 4672129198Scognet L2_L_PROTO | pa | 4673129198Scognet L2_L_PROT(PTE_KERNEL, prot) | f2l; 4674129198Scognet PTE_SYNC(&pte[l2pte_index(va) + i]); 4675129198Scognet#endif 4676129198Scognet } 4677129198Scognet va += L2_L_SIZE; 4678129198Scognet pa += L2_L_SIZE; 4679129198Scognet resid -= L2_L_SIZE; 4680129198Scognet continue; 4681129198Scognet } 4682129198Scognet 4683129198Scognet /* Use a small page mapping. */ 4684129198Scognet#ifdef VERBOSE_INIT_ARM 4685129198Scognet printf("P"); 4686129198Scognet#endif 4687129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4688129198Scognet pte[(va >> PAGE_SHIFT) & 0x3ff] = 4689129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4690129198Scognet PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]); 4691129198Scognet#else 4692129198Scognet pte[l2pte_index(va)] = 4693129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4694129198Scognet PTE_SYNC(&pte[l2pte_index(va)]); 4695129198Scognet#endif 4696129198Scognet va += PAGE_SIZE; 4697129198Scognet pa += PAGE_SIZE; 4698129198Scognet resid -= PAGE_SIZE; 4699129198Scognet } 4700129198Scognet#ifdef VERBOSE_INIT_ARM 4701129198Scognet printf("\n"); 4702129198Scognet#endif 4703129198Scognet return (size); 4704129198Scognet 4705129198Scognet} 4706129198Scognet 4707135641Scognet/********************** Static device map routines ***************************/ 4708135641Scognet 4709135641Scognetstatic const struct pmap_devmap *pmap_devmap_table; 4710135641Scognet 4711135641Scognet/* 4712135641Scognet * Register the devmap table. This is provided in case early console 4713135641Scognet * initialization needs to register mappings created by bootstrap code 4714135641Scognet * before pmap_devmap_bootstrap() is called. 4715135641Scognet */ 4716135641Scognetvoid 4717135641Scognetpmap_devmap_register(const struct pmap_devmap *table) 4718135641Scognet{ 4719135641Scognet 4720135641Scognet pmap_devmap_table = table; 4721135641Scognet} 4722135641Scognet 4723135641Scognet/* 4724135641Scognet * Map all of the static regions in the devmap table, and remember 4725135641Scognet * the devmap table so other parts of the kernel can look up entries 4726135641Scognet * later. 4727135641Scognet */ 4728135641Scognetvoid 4729135641Scognetpmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table) 4730135641Scognet{ 4731135641Scognet int i; 4732135641Scognet 4733135641Scognet pmap_devmap_table = table; 4734135641Scognet 4735135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4736135641Scognet#ifdef VERBOSE_INIT_ARM 4737135641Scognet printf("devmap: %08lx -> %08lx @ %08lx\n", 4738135641Scognet pmap_devmap_table[i].pd_pa, 4739135641Scognet pmap_devmap_table[i].pd_pa + 4740135641Scognet pmap_devmap_table[i].pd_size - 1, 4741135641Scognet pmap_devmap_table[i].pd_va); 4742135641Scognet#endif 4743135641Scognet pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va, 4744135641Scognet pmap_devmap_table[i].pd_pa, 4745135641Scognet pmap_devmap_table[i].pd_size, 4746135641Scognet pmap_devmap_table[i].pd_prot, 4747135641Scognet pmap_devmap_table[i].pd_cache); 4748135641Scognet } 4749135641Scognet} 4750135641Scognet 4751135641Scognetconst struct pmap_devmap * 4752135641Scognetpmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size) 4753135641Scognet{ 4754135641Scognet int i; 4755135641Scognet 4756135641Scognet if (pmap_devmap_table == NULL) 4757135641Scognet return (NULL); 4758135641Scognet 4759135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4760135641Scognet if (pa >= pmap_devmap_table[i].pd_pa && 4761135641Scognet pa + size <= pmap_devmap_table[i].pd_pa + 4762135641Scognet pmap_devmap_table[i].pd_size) 4763135641Scognet return (&pmap_devmap_table[i]); 4764135641Scognet } 4765135641Scognet 4766135641Scognet return (NULL); 4767135641Scognet} 4768135641Scognet 4769135641Scognetconst struct pmap_devmap * 4770135641Scognetpmap_devmap_find_va(vm_offset_t va, vm_size_t size) 4771135641Scognet{ 4772135641Scognet int i; 4773135641Scognet 4774135641Scognet if (pmap_devmap_table == NULL) 4775135641Scognet return (NULL); 4776135641Scognet 4777135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4778135641Scognet if (va >= pmap_devmap_table[i].pd_va && 4779135641Scognet va + size <= pmap_devmap_table[i].pd_va + 4780135641Scognet pmap_devmap_table[i].pd_size) 4781135641Scognet return (&pmap_devmap_table[i]); 4782135641Scognet } 4783135641Scognet 4784135641Scognet return (NULL); 4785135641Scognet} 4786135641Scognet 4787