pmap-v4.c revision 147249
1129198Scognet/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ 2139735Simp/*- 3129198Scognet * Copyright 2004 Olivier Houchard. 4129198Scognet * Copyright 2003 Wasabi Systems, Inc. 5129198Scognet * All rights reserved. 6129198Scognet * 7129198Scognet * Written by Steve C. Woodford for Wasabi Systems, Inc. 8129198Scognet * 9129198Scognet * Redistribution and use in source and binary forms, with or without 10129198Scognet * modification, are permitted provided that the following conditions 11129198Scognet * are met: 12129198Scognet * 1. Redistributions of source code must retain the above copyright 13129198Scognet * notice, this list of conditions and the following disclaimer. 14129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 15129198Scognet * notice, this list of conditions and the following disclaimer in the 16129198Scognet * documentation and/or other materials provided with the distribution. 17129198Scognet * 3. All advertising materials mentioning features or use of this software 18129198Scognet * must display the following acknowledgement: 19129198Scognet * This product includes software developed for the NetBSD Project by 20129198Scognet * Wasabi Systems, Inc. 21129198Scognet * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22129198Scognet * or promote products derived from this software without specific prior 23129198Scognet * written permission. 24129198Scognet * 25129198Scognet * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28129198Scognet * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35129198Scognet * POSSIBILITY OF SUCH DAMAGE. 36129198Scognet */ 37129198Scognet 38139735Simp/*- 39129198Scognet * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40129198Scognet * Copyright (c) 2001 Richard Earnshaw 41129198Scognet * Copyright (c) 2001-2002 Christopher Gilbert 42129198Scognet * All rights reserved. 43129198Scognet * 44129198Scognet * 1. Redistributions of source code must retain the above copyright 45129198Scognet * notice, this list of conditions and the following disclaimer. 46129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 47129198Scognet * notice, this list of conditions and the following disclaimer in the 48129198Scognet * documentation and/or other materials provided with the distribution. 49129198Scognet * 3. The name of the company nor the name of the author may be used to 50129198Scognet * endorse or promote products derived from this software without specific 51129198Scognet * prior written permission. 52129198Scognet * 53129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54129198Scognet * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55129198Scognet * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56129198Scognet * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57129198Scognet * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58129198Scognet * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59129198Scognet * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63129198Scognet * SUCH DAMAGE. 64129198Scognet */ 65129198Scognet/*- 66129198Scognet * Copyright (c) 1999 The NetBSD Foundation, Inc. 67129198Scognet * All rights reserved. 68129198Scognet * 69129198Scognet * This code is derived from software contributed to The NetBSD Foundation 70129198Scognet * by Charles M. Hannum. 71129198Scognet * 72129198Scognet * Redistribution and use in source and binary forms, with or without 73129198Scognet * modification, are permitted provided that the following conditions 74129198Scognet * are met: 75129198Scognet * 1. Redistributions of source code must retain the above copyright 76129198Scognet * notice, this list of conditions and the following disclaimer. 77129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 78129198Scognet * notice, this list of conditions and the following disclaimer in the 79129198Scognet * documentation and/or other materials provided with the distribution. 80129198Scognet * 3. All advertising materials mentioning features or use of this software 81129198Scognet * must display the following acknowledgement: 82129198Scognet * This product includes software developed by the NetBSD 83129198Scognet * Foundation, Inc. and its contributors. 84129198Scognet * 4. Neither the name of The NetBSD Foundation nor the names of its 85129198Scognet * contributors may be used to endorse or promote products derived 86129198Scognet * from this software without specific prior written permission. 87129198Scognet * 88129198Scognet * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 89129198Scognet * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 90129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 91129198Scognet * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 92129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 93129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 94129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 95129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 96129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 97129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 98129198Scognet * POSSIBILITY OF SUCH DAMAGE. 99129198Scognet */ 100129198Scognet 101139735Simp/*- 102129198Scognet * Copyright (c) 1994-1998 Mark Brinicombe. 103129198Scognet * Copyright (c) 1994 Brini. 104129198Scognet * All rights reserved. 105139735Simp * 106129198Scognet * This code is derived from software written for Brini by Mark Brinicombe 107129198Scognet * 108129198Scognet * Redistribution and use in source and binary forms, with or without 109129198Scognet * modification, are permitted provided that the following conditions 110129198Scognet * are met: 111129198Scognet * 1. Redistributions of source code must retain the above copyright 112129198Scognet * notice, this list of conditions and the following disclaimer. 113129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 114129198Scognet * notice, this list of conditions and the following disclaimer in the 115129198Scognet * documentation and/or other materials provided with the distribution. 116129198Scognet * 3. All advertising materials mentioning features or use of this software 117129198Scognet * must display the following acknowledgement: 118129198Scognet * This product includes software developed by Mark Brinicombe. 119129198Scognet * 4. The name of the author may not be used to endorse or promote products 120129198Scognet * derived from this software without specific prior written permission. 121129198Scognet * 122129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 123129198Scognet * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 124129198Scognet * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 125129198Scognet * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 126129198Scognet * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 127129198Scognet * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 128129198Scognet * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 129129198Scognet * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 130129198Scognet * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 131129198Scognet * 132129198Scognet * RiscBSD kernel project 133129198Scognet * 134129198Scognet * pmap.c 135129198Scognet * 136129198Scognet * Machine dependant vm stuff 137129198Scognet * 138129198Scognet * Created : 20/09/94 139129198Scognet */ 140129198Scognet 141129198Scognet/* 142129198Scognet * Special compilation symbols 143129198Scognet * PMAP_DEBUG - Build in pmap_debug_level code 144129198Scognet */ 145129198Scognet/* Include header files */ 146135641Scognet 147137552Scognet#include "opt_vm.h" 148137552Scognet 149129198Scognet#include <sys/cdefs.h> 150129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/pmap.c 147249 2005-06-10 13:31:30Z cognet $"); 151129198Scognet#include <sys/param.h> 152129198Scognet#include <sys/systm.h> 153129198Scognet#include <sys/kernel.h> 154129198Scognet#include <sys/proc.h> 155129198Scognet#include <sys/malloc.h> 156129198Scognet#include <sys/msgbuf.h> 157129198Scognet#include <sys/vmmeter.h> 158129198Scognet#include <sys/mman.h> 159129198Scognet#include <sys/smp.h> 160129198Scognet#include <sys/sx.h> 161129198Scognet#include <sys/sched.h> 162129198Scognet 163129198Scognet#include <vm/vm.h> 164129198Scognet#include <vm/uma.h> 165129198Scognet#include <vm/pmap.h> 166129198Scognet#include <vm/vm_kern.h> 167129198Scognet#include <vm/vm_object.h> 168129198Scognet#include <vm/vm_map.h> 169129198Scognet#include <vm/vm_page.h> 170129198Scognet#include <vm/vm_pageout.h> 171129198Scognet#include <vm/vm_extern.h> 172129198Scognet#include <sys/lock.h> 173129198Scognet#include <sys/mutex.h> 174129198Scognet#include <machine/md_var.h> 175129198Scognet#include <machine/vmparam.h> 176129198Scognet#include <machine/cpu.h> 177129198Scognet#include <machine/cpufunc.h> 178129198Scognet#include <machine/pcb.h> 179129198Scognet 180129198Scognet#ifdef PMAP_DEBUG 181129198Scognet#define PDEBUG(_lev_,_stat_) \ 182129198Scognet if (pmap_debug_level >= (_lev_)) \ 183129198Scognet ((_stat_)) 184129198Scognet#define dprintf printf 185129198Scognet 186129198Scognetint pmap_debug_level = 0; 187135641Scognet#define PMAP_INLINE 188129198Scognet#else /* PMAP_DEBUG */ 189129198Scognet#define PDEBUG(_lev_,_stat_) /* Nothing */ 190129198Scognet#define dprintf(x, arg...) 191135641Scognet#define PMAP_INLINE __inline 192129198Scognet#endif /* PMAP_DEBUG */ 193129198Scognet 194129198Scognetextern struct pv_addr systempage; 195129198Scognet/* 196129198Scognet * Internal function prototypes 197129198Scognet */ 198135641Scognetstatic void pmap_free_pv_entry (pv_entry_t); 199129198Scognetstatic pv_entry_t pmap_get_pv_entry(void); 200129198Scognet 201129198Scognetstatic void pmap_vac_me_harder(struct vm_page *, pmap_t, 202129198Scognet vm_offset_t); 203129198Scognetstatic void pmap_vac_me_kpmap(struct vm_page *, pmap_t, 204129198Scognet vm_offset_t); 205129198Scognetstatic void pmap_vac_me_user(struct vm_page *, pmap_t, vm_offset_t); 206129198Scognetstatic void pmap_alloc_l1(pmap_t); 207129198Scognetstatic void pmap_free_l1(pmap_t); 208129198Scognetstatic void pmap_use_l1(pmap_t); 209129198Scognet 210135641Scognetstatic int pmap_clearbit(struct vm_page *, u_int); 211129198Scognet 212129198Scognetstatic struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); 213129198Scognetstatic struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); 214129198Scognetstatic void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 215129198Scognetstatic vm_offset_t kernel_pt_lookup(vm_paddr_t); 216129198Scognet 217129198Scognetstatic MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); 218129198Scognet 219129198Scognetvm_offset_t avail_end; /* PA of last available physical page */ 220129198Scognetvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 221129198Scognetvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 222135641Scognetvm_offset_t pmap_curmaxkvaddr; 223129198Scognet 224129198Scognetextern void *end; 225129198Scognetvm_offset_t kernel_vm_end = 0; 226129198Scognet 227129198Scognetstruct pmap kernel_pmap_store; 228129198Scognetpmap_t kernel_pmap; 229129198Scognet 230129198Scognetstatic pt_entry_t *csrc_pte, *cdst_pte; 231129198Scognetstatic vm_offset_t csrcp, cdstp; 232129198Scognetstatic void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 233129198Scognet/* 234129198Scognet * These routines are called when the CPU type is identified to set up 235129198Scognet * the PTE prototypes, cache modes, etc. 236129198Scognet * 237129198Scognet * The variables are always here, just in case LKMs need to reference 238129198Scognet * them (though, they shouldn't). 239129198Scognet */ 240129198Scognet 241129198Scognetpt_entry_t pte_l1_s_cache_mode; 242129198Scognetpt_entry_t pte_l1_s_cache_mode_pt; 243129198Scognetpt_entry_t pte_l1_s_cache_mask; 244129198Scognet 245129198Scognetpt_entry_t pte_l2_l_cache_mode; 246129198Scognetpt_entry_t pte_l2_l_cache_mode_pt; 247129198Scognetpt_entry_t pte_l2_l_cache_mask; 248129198Scognet 249129198Scognetpt_entry_t pte_l2_s_cache_mode; 250129198Scognetpt_entry_t pte_l2_s_cache_mode_pt; 251129198Scognetpt_entry_t pte_l2_s_cache_mask; 252129198Scognet 253129198Scognetpt_entry_t pte_l2_s_prot_u; 254129198Scognetpt_entry_t pte_l2_s_prot_w; 255129198Scognetpt_entry_t pte_l2_s_prot_mask; 256129198Scognet 257129198Scognetpt_entry_t pte_l1_s_proto; 258129198Scognetpt_entry_t pte_l1_c_proto; 259129198Scognetpt_entry_t pte_l2_s_proto; 260129198Scognet 261129198Scognetvoid (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 262129198Scognetvoid (*pmap_zero_page_func)(vm_paddr_t, int, int); 263129198Scognet/* 264129198Scognet * Which pmap is currently 'live' in the cache 265129198Scognet * 266129198Scognet * XXXSCW: Fix for SMP ... 267129198Scognet */ 268129198Scognetunion pmap_cache_state *pmap_cache_state; 269129198Scognet 270129198ScognetLIST_HEAD(pmaplist, pmap); 271129198Scognetstruct pmaplist allpmaps; 272129198Scognet 273129198Scognet/* static pt_entry_t *msgbufmap;*/ 274129198Scognetstruct msgbuf *msgbufp = 0; 275129198Scognet 276129198Scognetextern void bcopy_page(vm_offset_t, vm_offset_t); 277129198Scognetextern void bzero_page(vm_offset_t); 278137362Scognet 279137362Scognetchar *_tmppt; 280137362Scognet 281129198Scognet/* 282129198Scognet * Metadata for L1 translation tables. 283129198Scognet */ 284129198Scognetstruct l1_ttable { 285129198Scognet /* Entry on the L1 Table list */ 286129198Scognet SLIST_ENTRY(l1_ttable) l1_link; 287129198Scognet 288129198Scognet /* Entry on the L1 Least Recently Used list */ 289129198Scognet TAILQ_ENTRY(l1_ttable) l1_lru; 290129198Scognet 291129198Scognet /* Track how many domains are allocated from this L1 */ 292129198Scognet volatile u_int l1_domain_use_count; 293129198Scognet 294129198Scognet /* 295129198Scognet * A free-list of domain numbers for this L1. 296129198Scognet * We avoid using ffs() and a bitmap to track domains since ffs() 297129198Scognet * is slow on ARM. 298129198Scognet */ 299129198Scognet u_int8_t l1_domain_first; 300129198Scognet u_int8_t l1_domain_free[PMAP_DOMAINS]; 301129198Scognet 302129198Scognet /* Physical address of this L1 page table */ 303129198Scognet vm_paddr_t l1_physaddr; 304129198Scognet 305129198Scognet /* KVA of this L1 page table */ 306129198Scognet pd_entry_t *l1_kva; 307129198Scognet}; 308129198Scognet 309129198Scognet/* 310129198Scognet * Convert a virtual address into its L1 table index. That is, the 311129198Scognet * index used to locate the L2 descriptor table pointer in an L1 table. 312129198Scognet * This is basically used to index l1->l1_kva[]. 313129198Scognet * 314129198Scognet * Each L2 descriptor table represents 1MB of VA space. 315129198Scognet */ 316129198Scognet#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 317129198Scognet 318129198Scognet/* 319129198Scognet * L1 Page Tables are tracked using a Least Recently Used list. 320129198Scognet * - New L1s are allocated from the HEAD. 321129198Scognet * - Freed L1s are added to the TAIl. 322129198Scognet * - Recently accessed L1s (where an 'access' is some change to one of 323129198Scognet * the userland pmaps which owns this L1) are moved to the TAIL. 324129198Scognet */ 325129198Scognetstatic TAILQ_HEAD(, l1_ttable) l1_lru_list; 326135641Scognet/* 327135641Scognet * A list of all L1 tables 328135641Scognet */ 329135641Scognetstatic SLIST_HEAD(, l1_ttable) l1_list; 330129198Scognetstatic struct mtx l1_lru_lock; 331129198Scognet 332129198Scognet/* 333129198Scognet * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 334129198Scognet * 335129198Scognet * This is normally 16MB worth L2 page descriptors for any given pmap. 336129198Scognet * Reference counts are maintained for L2 descriptors so they can be 337129198Scognet * freed when empty. 338129198Scognet */ 339129198Scognetstruct l2_dtable { 340129198Scognet /* The number of L2 page descriptors allocated to this l2_dtable */ 341129198Scognet u_int l2_occupancy; 342129198Scognet 343129198Scognet /* List of L2 page descriptors */ 344129198Scognet struct l2_bucket { 345129198Scognet pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 346129198Scognet vm_paddr_t l2b_phys; /* Physical address of same */ 347129198Scognet u_short l2b_l1idx; /* This L2 table's L1 index */ 348129198Scognet u_short l2b_occupancy; /* How many active descriptors */ 349129198Scognet } l2_bucket[L2_BUCKET_SIZE]; 350129198Scognet}; 351129198Scognet 352135641Scognet/* pmap_kenter_internal flags */ 353135641Scognet#define KENTER_CACHE 0x1 354142570Scognet#define KENTER_USER 0x2 355135641Scognet 356129198Scognet/* 357129198Scognet * Given an L1 table index, calculate the corresponding l2_dtable index 358129198Scognet * and bucket index within the l2_dtable. 359129198Scognet */ 360129198Scognet#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 361129198Scognet (L2_SIZE - 1)) 362129198Scognet#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 363129198Scognet 364129198Scognet/* 365129198Scognet * Given a virtual address, this macro returns the 366129198Scognet * virtual address required to drop into the next L2 bucket. 367129198Scognet */ 368129198Scognet#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 369129198Scognet 370129198Scognet/* 371129198Scognet * L2 allocation. 372129198Scognet */ 373129198Scognet#define pmap_alloc_l2_dtable() \ 374129198Scognet (void*)uma_zalloc(l2table_zone, M_NOWAIT) 375129198Scognet#define pmap_free_l2_dtable(l2) \ 376129198Scognet uma_zfree(l2table_zone, l2) 377129198Scognet 378129198Scognet/* 379129198Scognet * We try to map the page tables write-through, if possible. However, not 380129198Scognet * all CPUs have a write-through cache mode, so on those we have to sync 381129198Scognet * the cache when we frob page tables. 382129198Scognet * 383129198Scognet * We try to evaluate this at compile time, if possible. However, it's 384129198Scognet * not always possible to do that, hence this run-time var. 385129198Scognet */ 386129198Scognetint pmap_needs_pte_sync; 387129198Scognet 388129198Scognet/* 389129198Scognet * Macro to determine if a mapping might be resident in the 390129198Scognet * instruction cache and/or TLB 391129198Scognet */ 392129198Scognet#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 393129198Scognet 394129198Scognet/* 395129198Scognet * Macro to determine if a mapping might be resident in the 396129198Scognet * data cache and/or TLB 397129198Scognet */ 398129198Scognet#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 399129198Scognet 400129198Scognet/* 401129198Scognet * Data for the pv entry allocation mechanism 402129198Scognet */ 403144760Scognet#define MINPV 2048 404129198Scognet 405129198Scognet#ifndef PMAP_SHPGPERPROC 406129198Scognet#define PMAP_SHPGPERPROC 200 407129198Scognet#endif 408129198Scognet 409135641Scognet#define pmap_is_current(pm) ((pm) == pmap_kernel() || \ 410135641Scognet curproc->p_vmspace->vm_map.pmap == (pm)) 411129198Scognetstatic uma_zone_t pvzone; 412147114Scognetuma_zone_t l2zone; 413129198Scognetstatic uma_zone_t l2table_zone; 414135641Scognetstatic vm_offset_t pmap_kernel_l2dtable_kva; 415135641Scognetstatic vm_offset_t pmap_kernel_l2ptp_kva; 416135641Scognetstatic vm_paddr_t pmap_kernel_l2ptp_phys; 417129198Scognetstatic struct vm_object pvzone_obj; 418129198Scognetstatic struct vm_object l2zone_obj; 419129198Scognetstatic int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 420129198Scognetint pmap_pagedaemon_waken = 0; 421129198Scognet 422129198Scognet/* 423129198Scognet * This list exists for the benefit of pmap_map_chunk(). It keeps track 424129198Scognet * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 425129198Scognet * find them as necessary. 426129198Scognet * 427129198Scognet * Note that the data on this list MUST remain valid after initarm() returns, 428129198Scognet * as pmap_bootstrap() uses it to contruct L2 table metadata. 429129198Scognet */ 430129198ScognetSLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 431129198Scognet 432129198Scognetstatic void 433129198Scognetpmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 434129198Scognet{ 435129198Scognet int i; 436129198Scognet 437129198Scognet l1->l1_kva = l1pt; 438129198Scognet l1->l1_domain_use_count = 0; 439129198Scognet l1->l1_domain_first = 0; 440129198Scognet 441129198Scognet for (i = 0; i < PMAP_DOMAINS; i++) 442129198Scognet l1->l1_domain_free[i] = i + 1; 443129198Scognet 444129198Scognet /* 445129198Scognet * Copy the kernel's L1 entries to each new L1. 446129198Scognet */ 447147249Scognet if (l1pt != pmap_kernel()->pm_l1->l1_kva) 448129198Scognet memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); 449129198Scognet 450129198Scognet if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0) 451129198Scognet panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 452135641Scognet SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 453129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 454129198Scognet} 455129198Scognet 456129198Scognetstatic vm_offset_t 457129198Scognetkernel_pt_lookup(vm_paddr_t pa) 458129198Scognet{ 459129198Scognet struct pv_addr *pv; 460129198Scognet 461129198Scognet SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 462129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 463129198Scognet if (pv->pv_pa == (pa & ~PAGE_MASK)) { 464129198Scognet return (pv->pv_va | (pa & PAGE_MASK)); 465129198Scognet } 466129198Scognet#else 467129198Scognet if (pv->pv_pa == pa) 468129198Scognet return (pv->pv_va); 469129198Scognet#endif 470129198Scognet } 471129198Scognet return (0); 472129198Scognet} 473129198Scognet 474129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 475129198Scognetvoid 476129198Scognetpmap_pte_init_generic(void) 477129198Scognet{ 478129198Scognet 479129198Scognet pte_l1_s_cache_mode = L1_S_B|L1_S_C; 480129198Scognet pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 481129198Scognet 482129198Scognet pte_l2_l_cache_mode = L2_B|L2_C; 483129198Scognet pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 484129198Scognet 485129198Scognet pte_l2_s_cache_mode = L2_B|L2_C; 486129198Scognet pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 487129198Scognet 488129198Scognet /* 489129198Scognet * If we have a write-through cache, set B and C. If 490129198Scognet * we have a write-back cache, then we assume setting 491129198Scognet * only C will make those pages write-through. 492129198Scognet */ 493129198Scognet if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { 494129198Scognet pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 495129198Scognet pte_l2_l_cache_mode_pt = L2_B|L2_C; 496129198Scognet pte_l2_s_cache_mode_pt = L2_B|L2_C; 497129198Scognet } else { 498129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 499129198Scognet pte_l2_l_cache_mode_pt = L2_C; 500129198Scognet pte_l2_s_cache_mode_pt = L2_C; 501129198Scognet } 502129198Scognet 503129198Scognet pte_l2_s_prot_u = L2_S_PROT_U_generic; 504129198Scognet pte_l2_s_prot_w = L2_S_PROT_W_generic; 505129198Scognet pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 506129198Scognet 507129198Scognet pte_l1_s_proto = L1_S_PROTO_generic; 508129198Scognet pte_l1_c_proto = L1_C_PROTO_generic; 509129198Scognet pte_l2_s_proto = L2_S_PROTO_generic; 510129198Scognet 511129198Scognet pmap_copy_page_func = pmap_copy_page_generic; 512129198Scognet pmap_zero_page_func = pmap_zero_page_generic; 513129198Scognet} 514129198Scognet 515129198Scognet#if defined(CPU_ARM8) 516129198Scognetvoid 517129198Scognetpmap_pte_init_arm8(void) 518129198Scognet{ 519129198Scognet 520129198Scognet /* 521129198Scognet * ARM8 is compatible with generic, but we need to use 522129198Scognet * the page tables uncached. 523129198Scognet */ 524129198Scognet pmap_pte_init_generic(); 525129198Scognet 526129198Scognet pte_l1_s_cache_mode_pt = 0; 527129198Scognet pte_l2_l_cache_mode_pt = 0; 528129198Scognet pte_l2_s_cache_mode_pt = 0; 529129198Scognet} 530129198Scognet#endif /* CPU_ARM8 */ 531129198Scognet 532129198Scognet#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) 533129198Scognetvoid 534129198Scognetpmap_pte_init_arm9(void) 535129198Scognet{ 536129198Scognet 537129198Scognet /* 538129198Scognet * ARM9 is compatible with generic, but we want to use 539129198Scognet * write-through caching for now. 540129198Scognet */ 541129198Scognet pmap_pte_init_generic(); 542129198Scognet 543129198Scognet pte_l1_s_cache_mode = L1_S_C; 544129198Scognet pte_l2_l_cache_mode = L2_C; 545129198Scognet pte_l2_s_cache_mode = L2_C; 546129198Scognet 547129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 548129198Scognet pte_l2_l_cache_mode_pt = L2_C; 549129198Scognet pte_l2_s_cache_mode_pt = L2_C; 550129198Scognet} 551129198Scognet#endif /* CPU_ARM9 */ 552129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 553129198Scognet 554129198Scognet#if defined(CPU_ARM10) 555129198Scognetvoid 556129198Scognetpmap_pte_init_arm10(void) 557129198Scognet{ 558129198Scognet 559129198Scognet /* 560129198Scognet * ARM10 is compatible with generic, but we want to use 561129198Scognet * write-through caching for now. 562129198Scognet */ 563129198Scognet pmap_pte_init_generic(); 564129198Scognet 565129198Scognet pte_l1_s_cache_mode = L1_S_B | L1_S_C; 566129198Scognet pte_l2_l_cache_mode = L2_B | L2_C; 567129198Scognet pte_l2_s_cache_mode = L2_B | L2_C; 568129198Scognet 569129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 570129198Scognet pte_l2_l_cache_mode_pt = L2_C; 571129198Scognet pte_l2_s_cache_mode_pt = L2_C; 572129198Scognet 573129198Scognet} 574129198Scognet#endif /* CPU_ARM10 */ 575129198Scognet 576129198Scognet#if ARM_MMU_SA1 == 1 577129198Scognetvoid 578129198Scognetpmap_pte_init_sa1(void) 579129198Scognet{ 580129198Scognet 581129198Scognet /* 582129198Scognet * The StrongARM SA-1 cache does not have a write-through 583129198Scognet * mode. So, do the generic initialization, then reset 584129198Scognet * the page table cache mode to B=1,C=1, and note that 585129198Scognet * the PTEs need to be sync'd. 586129198Scognet */ 587129198Scognet pmap_pte_init_generic(); 588129198Scognet 589129198Scognet pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 590129198Scognet pte_l2_l_cache_mode_pt = L2_B|L2_C; 591129198Scognet pte_l2_s_cache_mode_pt = L2_B|L2_C; 592129198Scognet 593129198Scognet pmap_needs_pte_sync = 1; 594129198Scognet} 595129198Scognet#endif /* ARM_MMU_SA1 == 1*/ 596129198Scognet 597129198Scognet#if ARM_MMU_XSCALE == 1 598129198Scognet#if (ARM_NMMUS > 1) 599129198Scognetstatic u_int xscale_use_minidata; 600129198Scognet#endif 601129198Scognet 602129198Scognetvoid 603129198Scognetpmap_pte_init_xscale(void) 604129198Scognet{ 605129198Scognet uint32_t auxctl; 606129198Scognet int write_through = 0; 607129198Scognet 608135641Scognet pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; 609129198Scognet pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 610129198Scognet 611129198Scognet pte_l2_l_cache_mode = L2_B|L2_C; 612129198Scognet pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 613129198Scognet 614129198Scognet pte_l2_s_cache_mode = L2_B|L2_C; 615129198Scognet pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 616129198Scognet 617129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 618129198Scognet pte_l2_l_cache_mode_pt = L2_C; 619129198Scognet pte_l2_s_cache_mode_pt = L2_C; 620129198Scognet#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 621129198Scognet /* 622129198Scognet * The XScale core has an enhanced mode where writes that 623129198Scognet * miss the cache cause a cache line to be allocated. This 624129198Scognet * is significantly faster than the traditional, write-through 625129198Scognet * behavior of this case. 626129198Scognet */ 627129198Scognet pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); 628129198Scognet pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); 629129198Scognet pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); 630129198Scognet#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 631129198Scognet#ifdef XSCALE_CACHE_WRITE_THROUGH 632129198Scognet /* 633129198Scognet * Some versions of the XScale core have various bugs in 634129198Scognet * their cache units, the work-around for which is to run 635129198Scognet * the cache in write-through mode. Unfortunately, this 636129198Scognet * has a major (negative) impact on performance. So, we 637129198Scognet * go ahead and run fast-and-loose, in the hopes that we 638129198Scognet * don't line up the planets in a way that will trip the 639129198Scognet * bugs. 640129198Scognet * 641129198Scognet * However, we give you the option to be slow-but-correct. 642129198Scognet */ 643129198Scognet write_through = 1; 644129198Scognet#elif defined(XSCALE_CACHE_WRITE_BACK) 645129198Scognet /* force write back cache mode */ 646129198Scognet write_through = 0; 647129198Scognet#elif defined(CPU_XSCALE_PXA2X0) 648129198Scognet /* 649129198Scognet * Intel PXA2[15]0 processors are known to have a bug in 650129198Scognet * write-back cache on revision 4 and earlier (stepping 651129198Scognet * A[01] and B[012]). Fixed for C0 and later. 652129198Scognet */ 653129198Scognet { 654129198Scognet uint32_t id, type; 655129198Scognet 656129198Scognet id = cpufunc_id(); 657129198Scognet type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 658129198Scognet 659129198Scognet if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 660129198Scognet if ((id & CPU_ID_REVISION_MASK) < 5) { 661129198Scognet /* write through for stepping A0-1 and B0-2 */ 662129198Scognet write_through = 1; 663129198Scognet } 664129198Scognet } 665129198Scognet } 666129198Scognet#endif /* XSCALE_CACHE_WRITE_THROUGH */ 667129198Scognet 668129198Scognet if (write_through) { 669129198Scognet pte_l1_s_cache_mode = L1_S_C; 670129198Scognet pte_l2_l_cache_mode = L2_C; 671129198Scognet pte_l2_s_cache_mode = L2_C; 672129198Scognet } 673129198Scognet 674129198Scognet#if (ARM_NMMUS > 1) 675129198Scognet xscale_use_minidata = 1; 676129198Scognet#endif 677129198Scognet 678129198Scognet pte_l2_s_prot_u = L2_S_PROT_U_xscale; 679129198Scognet pte_l2_s_prot_w = L2_S_PROT_W_xscale; 680129198Scognet pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 681129198Scognet 682129198Scognet pte_l1_s_proto = L1_S_PROTO_xscale; 683129198Scognet pte_l1_c_proto = L1_C_PROTO_xscale; 684129198Scognet pte_l2_s_proto = L2_S_PROTO_xscale; 685129198Scognet 686129198Scognet pmap_copy_page_func = pmap_copy_page_xscale; 687129198Scognet pmap_zero_page_func = pmap_zero_page_xscale; 688129198Scognet 689129198Scognet /* 690129198Scognet * Disable ECC protection of page table access, for now. 691129198Scognet */ 692129198Scognet __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 693129198Scognet auxctl &= ~XSCALE_AUXCTL_P; 694129198Scognet __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 695129198Scognet} 696129198Scognet 697129198Scognet/* 698129198Scognet * xscale_setup_minidata: 699129198Scognet * 700129198Scognet * Set up the mini-data cache clean area. We require the 701129198Scognet * caller to allocate the right amount of physically and 702129198Scognet * virtually contiguous space. 703129198Scognet */ 704129198Scognetextern vm_offset_t xscale_minidata_clean_addr; 705129198Scognetextern vm_size_t xscale_minidata_clean_size; /* already initialized */ 706129198Scognetvoid 707129198Scognetxscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) 708129198Scognet{ 709129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 710129198Scognet pt_entry_t *pte; 711129198Scognet vm_size_t size; 712129198Scognet uint32_t auxctl; 713129198Scognet 714129198Scognet xscale_minidata_clean_addr = va; 715129198Scognet 716129198Scognet /* Round it to page size. */ 717129198Scognet size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 718129198Scognet 719129198Scognet for (; size != 0; 720129198Scognet va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 721129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 722129198Scognet pte = (pt_entry_t *) 723129198Scognet kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 724129198Scognet#else 725129198Scognet pte = (pt_entry_t *) kernel_pt_lookup( 726129198Scognet pde[L1_IDX(va)] & L1_C_ADDR_MASK); 727129198Scognet#endif 728129198Scognet if (pte == NULL) 729129198Scognet panic("xscale_setup_minidata: can't find L2 table for " 730129198Scognet "VA 0x%08x", (u_int32_t) va); 731129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 732129198Scognet pte[(va >> PAGE_SHIFT) & 0x3ff] = 733129198Scognet#else 734129198Scognet pte[l2pte_index(va)] = 735129198Scognet#endif 736129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 737129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 738129198Scognet } 739129198Scognet 740129198Scognet /* 741129198Scognet * Configure the mini-data cache for write-back with 742129198Scognet * read/write-allocate. 743129198Scognet * 744129198Scognet * NOTE: In order to reconfigure the mini-data cache, we must 745129198Scognet * make sure it contains no valid data! In order to do that, 746129198Scognet * we must issue a global data cache invalidate command! 747129198Scognet * 748129198Scognet * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 749129198Scognet * THIS IS VERY IMPORTANT! 750129198Scognet */ 751129198Scognet 752129198Scognet /* Invalidate data and mini-data. */ 753129198Scognet __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 754129198Scognet __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 755129198Scognet auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 756129198Scognet __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 757129198Scognet} 758129198Scognet#endif 759129198Scognet 760129198Scognet/* 761129198Scognet * Allocate an L1 translation table for the specified pmap. 762129198Scognet * This is called at pmap creation time. 763129198Scognet */ 764129198Scognetstatic void 765129198Scognetpmap_alloc_l1(pmap_t pm) 766129198Scognet{ 767129198Scognet struct l1_ttable *l1; 768129198Scognet u_int8_t domain; 769129198Scognet 770129198Scognet /* 771129198Scognet * Remove the L1 at the head of the LRU list 772129198Scognet */ 773129198Scognet mtx_lock(&l1_lru_lock); 774129198Scognet l1 = TAILQ_FIRST(&l1_lru_list); 775129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 776129198Scognet 777129198Scognet /* 778129198Scognet * Pick the first available domain number, and update 779129198Scognet * the link to the next number. 780129198Scognet */ 781129198Scognet domain = l1->l1_domain_first; 782129198Scognet l1->l1_domain_first = l1->l1_domain_free[domain]; 783129198Scognet 784129198Scognet /* 785129198Scognet * If there are still free domain numbers in this L1, 786129198Scognet * put it back on the TAIL of the LRU list. 787129198Scognet */ 788129198Scognet if (++l1->l1_domain_use_count < PMAP_DOMAINS) 789129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 790129198Scognet 791129198Scognet mtx_unlock(&l1_lru_lock); 792129198Scognet 793129198Scognet /* 794129198Scognet * Fix up the relevant bits in the pmap structure 795129198Scognet */ 796129198Scognet pm->pm_l1 = l1; 797129198Scognet pm->pm_domain = domain; 798129198Scognet} 799129198Scognet 800129198Scognet/* 801129198Scognet * Free an L1 translation table. 802129198Scognet * This is called at pmap destruction time. 803129198Scognet */ 804129198Scognetstatic void 805129198Scognetpmap_free_l1(pmap_t pm) 806129198Scognet{ 807129198Scognet struct l1_ttable *l1 = pm->pm_l1; 808129198Scognet 809129198Scognet mtx_lock(&l1_lru_lock); 810129198Scognet 811129198Scognet /* 812129198Scognet * If this L1 is currently on the LRU list, remove it. 813129198Scognet */ 814129198Scognet if (l1->l1_domain_use_count < PMAP_DOMAINS) 815129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 816129198Scognet 817129198Scognet /* 818129198Scognet * Free up the domain number which was allocated to the pmap 819129198Scognet */ 820129198Scognet l1->l1_domain_free[pm->pm_domain] = l1->l1_domain_first; 821129198Scognet l1->l1_domain_first = pm->pm_domain; 822129198Scognet l1->l1_domain_use_count--; 823129198Scognet 824129198Scognet /* 825129198Scognet * The L1 now must have at least 1 free domain, so add 826129198Scognet * it back to the LRU list. If the use count is zero, 827129198Scognet * put it at the head of the list, otherwise it goes 828129198Scognet * to the tail. 829129198Scognet */ 830129198Scognet if (l1->l1_domain_use_count == 0) { 831129198Scognet TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 832129198Scognet } else 833129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 834129198Scognet 835129198Scognet mtx_unlock(&l1_lru_lock); 836129198Scognet} 837129198Scognet 838129198Scognetstatic PMAP_INLINE void 839129198Scognetpmap_use_l1(pmap_t pm) 840129198Scognet{ 841129198Scognet struct l1_ttable *l1; 842129198Scognet 843129198Scognet /* 844129198Scognet * Do nothing if we're in interrupt context. 845129198Scognet * Access to an L1 by the kernel pmap must not affect 846129198Scognet * the LRU list. 847129198Scognet */ 848129198Scognet if (pm == pmap_kernel()) 849129198Scognet return; 850129198Scognet 851129198Scognet l1 = pm->pm_l1; 852129198Scognet 853129198Scognet /* 854129198Scognet * If the L1 is not currently on the LRU list, just return 855129198Scognet */ 856129198Scognet if (l1->l1_domain_use_count == PMAP_DOMAINS) 857129198Scognet return; 858129198Scognet 859129198Scognet mtx_lock(&l1_lru_lock); 860129198Scognet 861129198Scognet /* 862129198Scognet * Check the use count again, now that we've acquired the lock 863129198Scognet */ 864129198Scognet if (l1->l1_domain_use_count == PMAP_DOMAINS) { 865129198Scognet mtx_unlock(&l1_lru_lock); 866129198Scognet return; 867129198Scognet } 868129198Scognet 869129198Scognet /* 870129198Scognet * Move the L1 to the back of the LRU list 871129198Scognet */ 872129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 873129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 874129198Scognet 875129198Scognet mtx_unlock(&l1_lru_lock); 876129198Scognet} 877129198Scognet 878129198Scognet 879129198Scognet/* 880129198Scognet * Returns a pointer to the L2 bucket associated with the specified pmap 881129198Scognet * and VA, or NULL if no L2 bucket exists for the address. 882129198Scognet */ 883129198Scognetstatic PMAP_INLINE struct l2_bucket * 884129198Scognetpmap_get_l2_bucket(pmap_t pm, vm_offset_t va) 885129198Scognet{ 886129198Scognet struct l2_dtable *l2; 887129198Scognet struct l2_bucket *l2b; 888129198Scognet u_short l1idx; 889129198Scognet 890129198Scognet l1idx = L1_IDX(va); 891129198Scognet 892129198Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 893129198Scognet (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 894129198Scognet return (NULL); 895129198Scognet 896129198Scognet return (l2b); 897129198Scognet} 898129198Scognet 899129198Scognet/* 900129198Scognet * Returns a pointer to the L2 bucket associated with the specified pmap 901129198Scognet * and VA. 902129198Scognet * 903129198Scognet * If no L2 bucket exists, perform the necessary allocations to put an L2 904129198Scognet * bucket/page table in place. 905129198Scognet * 906129198Scognet * Note that if a new L2 bucket/page was allocated, the caller *must* 907129198Scognet * increment the bucket occupancy counter appropriately *before* 908129198Scognet * releasing the pmap's lock to ensure no other thread or cpu deallocates 909129198Scognet * the bucket/page in the meantime. 910129198Scognet */ 911129198Scognetstatic struct l2_bucket * 912129198Scognetpmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) 913129198Scognet{ 914129198Scognet struct l2_dtable *l2; 915129198Scognet struct l2_bucket *l2b; 916129198Scognet u_short l1idx; 917129198Scognet 918129198Scognet l1idx = L1_IDX(va); 919129198Scognet 920129198Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 921129198Scognet /* 922129198Scognet * No mapping at this address, as there is 923129198Scognet * no entry in the L1 table. 924129198Scognet * Need to allocate a new l2_dtable. 925129198Scognet */ 926129198Scognet if ((l2 = pmap_alloc_l2_dtable()) == NULL) { 927129198Scognet return (NULL); 928129198Scognet } 929129198Scognet bzero(l2, sizeof(*l2)); 930129198Scognet /* 931129198Scognet * Link it into the parent pmap 932129198Scognet */ 933129198Scognet pm->pm_l2[L2_IDX(l1idx)] = l2; 934129198Scognet } 935129198Scognet 936129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 937129198Scognet 938129198Scognet /* 939129198Scognet * Fetch pointer to the L2 page table associated with the address. 940129198Scognet */ 941129198Scognet if (l2b->l2b_kva == NULL) { 942129198Scognet pt_entry_t *ptep; 943129198Scognet 944129198Scognet /* 945129198Scognet * No L2 page table has been allocated. Chances are, this 946129198Scognet * is because we just allocated the l2_dtable, above. 947129198Scognet */ 948129198Scognet ptep = (void*)uma_zalloc(l2zone, M_NOWAIT); 949129198Scognet l2b->l2b_phys = vtophys(ptep); 950129198Scognet if (ptep == NULL) { 951129198Scognet /* 952129198Scognet * Oops, no more L2 page tables available at this 953129198Scognet * time. We may need to deallocate the l2_dtable 954129198Scognet * if we allocated a new one above. 955129198Scognet */ 956129198Scognet if (l2->l2_occupancy == 0) { 957129198Scognet pm->pm_l2[L2_IDX(l1idx)] = NULL; 958129198Scognet pmap_free_l2_dtable(l2); 959129198Scognet } 960129198Scognet return (NULL); 961129198Scognet } 962129198Scognet 963129198Scognet l2->l2_occupancy++; 964129198Scognet l2b->l2b_kva = ptep; 965129198Scognet l2b->l2b_l1idx = l1idx; 966129198Scognet } 967129198Scognet 968129198Scognet return (l2b); 969129198Scognet} 970129198Scognet 971129198Scognetstatic PMAP_INLINE void 972129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 973129198Scognetpmap_free_l2_ptp(pt_entry_t *l2) 974129198Scognet#else 975129198Scognetpmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) 976129198Scognet#endif 977129198Scognet{ 978129198Scognet#ifdef PMAP_INCLUDE_PTE_SYNC 979129198Scognet /* 980129198Scognet * Note: With a write-back cache, we may need to sync this 981129198Scognet * L2 table before re-using it. 982129198Scognet * This is because it may have belonged to a non-current 983129198Scognet * pmap, in which case the cache syncs would have been 984129198Scognet * skipped when the pages were being unmapped. If the 985129198Scognet * L2 table were then to be immediately re-allocated to 986129198Scognet * the *current* pmap, it may well contain stale mappings 987129198Scognet * which have not yet been cleared by a cache write-back 988129198Scognet * and so would still be visible to the mmu. 989129198Scognet */ 990129198Scognet if (need_sync) 991129198Scognet PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 992129198Scognet#endif 993129198Scognet uma_zfree(l2zone, l2); 994129198Scognet} 995129198Scognet/* 996129198Scognet * One or more mappings in the specified L2 descriptor table have just been 997129198Scognet * invalidated. 998129198Scognet * 999129198Scognet * Garbage collect the metadata and descriptor table itself if necessary. 1000129198Scognet * 1001129198Scognet * The pmap lock must be acquired when this is called (not necessary 1002129198Scognet * for the kernel pmap). 1003129198Scognet */ 1004129198Scognetstatic void 1005129198Scognetpmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 1006129198Scognet{ 1007129198Scognet struct l2_dtable *l2; 1008129198Scognet pd_entry_t *pl1pd, l1pd; 1009129198Scognet pt_entry_t *ptep; 1010129198Scognet u_short l1idx; 1011129198Scognet 1012129198Scognet 1013129198Scognet /* 1014129198Scognet * Update the bucket's reference count according to how many 1015129198Scognet * PTEs the caller has just invalidated. 1016129198Scognet */ 1017129198Scognet l2b->l2b_occupancy -= count; 1018129198Scognet 1019129198Scognet /* 1020129198Scognet * Note: 1021129198Scognet * 1022129198Scognet * Level 2 page tables allocated to the kernel pmap are never freed 1023129198Scognet * as that would require checking all Level 1 page tables and 1024129198Scognet * removing any references to the Level 2 page table. See also the 1025129198Scognet * comment elsewhere about never freeing bootstrap L2 descriptors. 1026129198Scognet * 1027129198Scognet * We make do with just invalidating the mapping in the L2 table. 1028129198Scognet * 1029129198Scognet * This isn't really a big deal in practice and, in fact, leads 1030129198Scognet * to a performance win over time as we don't need to continually 1031129198Scognet * alloc/free. 1032129198Scognet */ 1033129198Scognet if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) 1034129198Scognet return; 1035129198Scognet 1036129198Scognet /* 1037129198Scognet * There are no more valid mappings in this level 2 page table. 1038129198Scognet * Go ahead and NULL-out the pointer in the bucket, then 1039129198Scognet * free the page table. 1040129198Scognet */ 1041129198Scognet l1idx = l2b->l2b_l1idx; 1042129198Scognet ptep = l2b->l2b_kva; 1043129198Scognet l2b->l2b_kva = NULL; 1044129198Scognet 1045129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1046129198Scognet 1047129198Scognet /* 1048129198Scognet * If the L1 slot matches the pmap's domain 1049129198Scognet * number, then invalidate it. 1050129198Scognet */ 1051129198Scognet l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 1052129198Scognet if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 1053129198Scognet *pl1pd = 0; 1054129198Scognet PTE_SYNC(pl1pd); 1055129198Scognet } 1056129198Scognet 1057129198Scognet /* 1058129198Scognet * Release the L2 descriptor table back to the pool cache. 1059129198Scognet */ 1060129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 1061129198Scognet pmap_free_l2_ptp(ptep); 1062129198Scognet#else 1063135641Scognet pmap_free_l2_ptp(!pmap_is_current(pm), ptep); 1064129198Scognet#endif 1065129198Scognet 1066129198Scognet /* 1067129198Scognet * Update the reference count in the associated l2_dtable 1068129198Scognet */ 1069129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 1070129198Scognet if (--l2->l2_occupancy > 0) 1071129198Scognet return; 1072129198Scognet 1073129198Scognet /* 1074129198Scognet * There are no more valid mappings in any of the Level 1 1075129198Scognet * slots managed by this l2_dtable. Go ahead and NULL-out 1076129198Scognet * the pointer in the parent pmap and free the l2_dtable. 1077129198Scognet */ 1078129198Scognet pm->pm_l2[L2_IDX(l1idx)] = NULL; 1079129198Scognet pmap_free_l2_dtable(l2); 1080129198Scognet} 1081129198Scognet 1082129198Scognet/* 1083129198Scognet * Pool cache constructors for L2 descriptor tables, metadata and pmap 1084129198Scognet * structures. 1085129198Scognet */ 1086133237Scognetstatic int 1087133237Scognetpmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) 1088129198Scognet{ 1089129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 1090129198Scognet struct l2_bucket *l2b; 1091129198Scognet pt_entry_t *ptep, pte; 1092129198Scognet vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; 1093129198Scognet 1094129198Scognet /* 1095129198Scognet * The mappings for these page tables were initially made using 1096135641Scognet * pmap_kenter() by the pool subsystem. Therefore, the cache- 1097129198Scognet * mode will not be right for page table mappings. To avoid 1098135641Scognet * polluting the pmap_kenter() code with a special case for 1099129198Scognet * page tables, we simply fix up the cache-mode here if it's not 1100129198Scognet * correct. 1101129198Scognet */ 1102147114Scognet#ifdef ARM_USE_SMALL_ALLOC 1103147114Scognet if (flags & UMA_SLAB_KMEM) { 1104147114Scognet#endif 1105147114Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 1106147114Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1107147114Scognet pte = *ptep; 1108129198Scognet 1109147114Scognet if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1110147114Scognet /* 1111147114Scognet * Page tables must have the cache-mode set to 1112147114Scognet * Write-Thru. 1113147114Scognet */ 1114147114Scognet *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1115147114Scognet PTE_SYNC(ptep); 1116147114Scognet cpu_tlb_flushD_SE(va); 1117147114Scognet cpu_cpwait(); 1118147114Scognet } 1119147114Scognet 1120147114Scognet#ifdef ARM_USE_SMALL_ALLOC 1121129198Scognet } 1122129198Scognet#endif 1123147114Scognet#endif 1124129198Scognet memset(mem, 0, L2_TABLE_SIZE_REAL); 1125129198Scognet PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1126133237Scognet return (0); 1127129198Scognet} 1128129198Scognet 1129129198Scognet/* 1130129198Scognet * A bunch of routines to conditionally flush the caches/TLB depending 1131129198Scognet * on whether the specified pmap actually needs to be flushed at any 1132129198Scognet * given time. 1133129198Scognet */ 1134129198Scognetstatic PMAP_INLINE void 1135129198Scognetpmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) 1136129198Scognet{ 1137129198Scognet 1138135641Scognet if (pmap_is_current(pm)) 1139129198Scognet cpu_tlb_flushID_SE(va); 1140129198Scognet} 1141129198Scognet 1142129198Scognetstatic PMAP_INLINE void 1143129198Scognetpmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) 1144129198Scognet{ 1145129198Scognet 1146135641Scognet if (pmap_is_current(pm)) 1147129198Scognet cpu_tlb_flushD_SE(va); 1148129198Scognet} 1149129198Scognet 1150129198Scognetstatic PMAP_INLINE void 1151129198Scognetpmap_tlb_flushID(pmap_t pm) 1152129198Scognet{ 1153129198Scognet 1154135641Scognet if (pmap_is_current(pm)) 1155129198Scognet cpu_tlb_flushID(); 1156129198Scognet} 1157129198Scognetstatic PMAP_INLINE void 1158129198Scognetpmap_tlb_flushD(pmap_t pm) 1159129198Scognet{ 1160129198Scognet 1161135641Scognet if (pmap_is_current(pm)) 1162129198Scognet cpu_tlb_flushD(); 1163129198Scognet} 1164129198Scognet 1165129198Scognetstatic PMAP_INLINE void 1166129198Scognetpmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) 1167129198Scognet{ 1168129198Scognet 1169135641Scognet if (pmap_is_current(pm)) 1170129198Scognet cpu_idcache_wbinv_range(va, len); 1171129198Scognet} 1172129198Scognet 1173129198Scognetstatic PMAP_INLINE void 1174129198Scognetpmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, 1175129198Scognet boolean_t do_inv, boolean_t rd_only) 1176129198Scognet{ 1177129198Scognet 1178135641Scognet if (pmap_is_current(pm)) { 1179129198Scognet if (do_inv) { 1180129198Scognet if (rd_only) 1181129198Scognet cpu_dcache_inv_range(va, len); 1182129198Scognet else 1183129198Scognet cpu_dcache_wbinv_range(va, len); 1184129198Scognet } else 1185129198Scognet if (!rd_only) 1186129198Scognet cpu_dcache_wb_range(va, len); 1187129198Scognet } 1188129198Scognet} 1189129198Scognet 1190129198Scognetstatic PMAP_INLINE void 1191129198Scognetpmap_idcache_wbinv_all(pmap_t pm) 1192129198Scognet{ 1193129198Scognet 1194135641Scognet if (pmap_is_current(pm)) 1195129198Scognet cpu_idcache_wbinv_all(); 1196129198Scognet} 1197129198Scognet 1198129198Scognetstatic PMAP_INLINE void 1199129198Scognetpmap_dcache_wbinv_all(pmap_t pm) 1200129198Scognet{ 1201129198Scognet 1202135641Scognet if (pmap_is_current(pm)) 1203129198Scognet cpu_dcache_wbinv_all(); 1204129198Scognet} 1205129198Scognet 1206129198Scognet/* 1207144760Scognet * this routine defines the region(s) of memory that should 1208144760Scognet * not be tested for the modified bit. 1209144760Scognet */ 1210144760Scognetstatic PMAP_INLINE int 1211144760Scognetpmap_track_modified(vm_offset_t va) 1212144760Scognet{ 1213144760Scognet if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) 1214144760Scognet return 1; 1215144760Scognet else 1216144760Scognet return 0; 1217144760Scognet} 1218144760Scognet/* 1219129198Scognet * PTE_SYNC_CURRENT: 1220129198Scognet * 1221129198Scognet * Make sure the pte is written out to RAM. 1222129198Scognet * We need to do this for one of two cases: 1223129198Scognet * - We're dealing with the kernel pmap 1224129198Scognet * - There is no pmap active in the cache/tlb. 1225129198Scognet * - The specified pmap is 'active' in the cache/tlb. 1226129198Scognet */ 1227129198Scognet#ifdef PMAP_INCLUDE_PTE_SYNC 1228129198Scognet#define PTE_SYNC_CURRENT(pm, ptep) \ 1229129198Scognetdo { \ 1230129198Scognet if (PMAP_NEEDS_PTE_SYNC && \ 1231135641Scognet pmap_is_current(pm)) \ 1232129198Scognet PTE_SYNC(ptep); \ 1233129198Scognet} while (/*CONSTCOND*/0) 1234129198Scognet#else 1235129198Scognet#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 1236129198Scognet#endif 1237129198Scognet 1238129198Scognet/* 1239129198Scognet * Since we have a virtually indexed cache, we may need to inhibit caching if 1240129198Scognet * there is more than one mapping and at least one of them is writable. 1241129198Scognet * Since we purge the cache on every context switch, we only need to check for 1242129198Scognet * other mappings within the same pmap, or kernel_pmap. 1243129198Scognet * This function is also called when a page is unmapped, to possibly reenable 1244129198Scognet * caching on any remaining mappings. 1245129198Scognet * 1246129198Scognet * The code implements the following logic, where: 1247129198Scognet * 1248129198Scognet * KW = # of kernel read/write pages 1249129198Scognet * KR = # of kernel read only pages 1250129198Scognet * UW = # of user read/write pages 1251129198Scognet * UR = # of user read only pages 1252129198Scognet * 1253129198Scognet * KC = kernel mapping is cacheable 1254129198Scognet * UC = user mapping is cacheable 1255129198Scognet * 1256129198Scognet * KW=0,KR=0 KW=0,KR>0 KW=1,KR=0 KW>1,KR>=0 1257129198Scognet * +--------------------------------------------- 1258129198Scognet * UW=0,UR=0 | --- KC=1 KC=1 KC=0 1259129198Scognet * UW=0,UR>0 | UC=1 KC=1,UC=1 KC=0,UC=0 KC=0,UC=0 1260129198Scognet * UW=1,UR=0 | UC=1 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1261129198Scognet * UW>1,UR>=0 | UC=0 KC=0,UC=0 KC=0,UC=0 KC=0,UC=0 1262129198Scognet */ 1263129198Scognet 1264129198Scognetstatic const int pmap_vac_flags[4][4] = { 1265129198Scognet {-1, 0, 0, PVF_KNC}, 1266129198Scognet {0, 0, PVF_NC, PVF_NC}, 1267129198Scognet {0, PVF_NC, PVF_NC, PVF_NC}, 1268129198Scognet {PVF_UNC, PVF_NC, PVF_NC, PVF_NC} 1269129198Scognet}; 1270129198Scognet 1271129198Scognetstatic PMAP_INLINE int 1272129198Scognetpmap_get_vac_flags(const struct vm_page *pg) 1273129198Scognet{ 1274129198Scognet int kidx, uidx; 1275129198Scognet 1276129198Scognet kidx = 0; 1277129198Scognet if (pg->md.kro_mappings || pg->md.krw_mappings > 1) 1278129198Scognet kidx |= 1; 1279129198Scognet if (pg->md.krw_mappings) 1280129198Scognet kidx |= 2; 1281129198Scognet 1282129198Scognet uidx = 0; 1283129198Scognet if (pg->md.uro_mappings || pg->md.urw_mappings > 1) 1284129198Scognet uidx |= 1; 1285129198Scognet if (pg->md.urw_mappings) 1286129198Scognet uidx |= 2; 1287129198Scognet 1288129198Scognet return (pmap_vac_flags[uidx][kidx]); 1289129198Scognet} 1290129198Scognet 1291129198Scognetstatic __inline void 1292129198Scognetpmap_vac_me_harder(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1293129198Scognet{ 1294129198Scognet int nattr; 1295129198Scognet 1296129198Scognet nattr = pmap_get_vac_flags(pg); 1297129198Scognet 1298129198Scognet if (nattr < 0) { 1299129198Scognet pg->md.pvh_attrs &= ~PVF_NC; 1300129198Scognet return; 1301129198Scognet } 1302129198Scognet 1303129198Scognet if (nattr == 0 && (pg->md.pvh_attrs & PVF_NC) == 0) { 1304129198Scognet return; 1305129198Scognet } 1306129198Scognet 1307129198Scognet if (pm == pmap_kernel()) 1308129198Scognet pmap_vac_me_kpmap(pg, pm, va); 1309129198Scognet else 1310129198Scognet pmap_vac_me_user(pg, pm, va); 1311129198Scognet 1312129198Scognet pg->md.pvh_attrs = (pg->md.pvh_attrs & ~PVF_NC) | nattr; 1313129198Scognet} 1314129198Scognet 1315129198Scognetstatic void 1316129198Scognetpmap_vac_me_kpmap(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1317129198Scognet{ 1318129198Scognet u_int u_cacheable, u_entries; 1319129198Scognet struct pv_entry *pv; 1320129198Scognet pmap_t last_pmap = pm; 1321129198Scognet 1322129198Scognet /* 1323129198Scognet * Pass one, see if there are both kernel and user pmaps for 1324129198Scognet * this page. Calculate whether there are user-writable or 1325129198Scognet * kernel-writable pages. 1326129198Scognet */ 1327129198Scognet u_cacheable = 0; 1328129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1329129198Scognet if (pv->pv_pmap != pm && (pv->pv_flags & PVF_NC) == 0) 1330129198Scognet u_cacheable++; 1331129198Scognet } 1332129198Scognet 1333129198Scognet u_entries = pg->md.urw_mappings + pg->md.uro_mappings; 1334129198Scognet 1335129198Scognet /* 1336129198Scognet * We know we have just been updating a kernel entry, so if 1337129198Scognet * all user pages are already cacheable, then there is nothing 1338129198Scognet * further to do. 1339129198Scognet */ 1340129198Scognet if (pg->md.k_mappings == 0 && u_cacheable == u_entries) 1341129198Scognet return; 1342129198Scognet 1343129198Scognet if (u_entries) { 1344129198Scognet /* 1345129198Scognet * Scan over the list again, for each entry, if it 1346129198Scognet * might not be set correctly, call pmap_vac_me_user 1347129198Scognet * to recalculate the settings. 1348129198Scognet */ 1349129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1350129198Scognet /* 1351129198Scognet * We know kernel mappings will get set 1352129198Scognet * correctly in other calls. We also know 1353129198Scognet * that if the pmap is the same as last_pmap 1354129198Scognet * then we've just handled this entry. 1355129198Scognet */ 1356129198Scognet if (pv->pv_pmap == pm || pv->pv_pmap == last_pmap) 1357129198Scognet continue; 1358129198Scognet 1359129198Scognet /* 1360129198Scognet * If there are kernel entries and this page 1361129198Scognet * is writable but non-cacheable, then we can 1362129198Scognet * skip this entry also. 1363129198Scognet */ 1364129198Scognet if (pg->md.k_mappings && 1365129198Scognet (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 1366129198Scognet (PVF_NC | PVF_WRITE)) 1367129198Scognet continue; 1368129198Scognet 1369129198Scognet /* 1370129198Scognet * Similarly if there are no kernel-writable 1371129198Scognet * entries and the page is already 1372129198Scognet * read-only/cacheable. 1373129198Scognet */ 1374129198Scognet if (pg->md.krw_mappings == 0 && 1375129198Scognet (pv->pv_flags & (PVF_NC | PVF_WRITE)) == 0) 1376129198Scognet continue; 1377129198Scognet 1378129198Scognet /* 1379129198Scognet * For some of the remaining cases, we know 1380129198Scognet * that we must recalculate, but for others we 1381129198Scognet * can't tell if they are correct or not, so 1382129198Scognet * we recalculate anyway. 1383129198Scognet */ 1384129198Scognet pmap_vac_me_user(pg, (last_pmap = pv->pv_pmap), 0); 1385129198Scognet } 1386129198Scognet 1387129198Scognet if (pg->md.k_mappings == 0) 1388129198Scognet return; 1389129198Scognet } 1390129198Scognet 1391129198Scognet pmap_vac_me_user(pg, pm, va); 1392129198Scognet} 1393129198Scognet 1394129198Scognetstatic void 1395129198Scognetpmap_vac_me_user(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1396129198Scognet{ 1397129198Scognet pmap_t kpmap = pmap_kernel(); 1398129198Scognet struct pv_entry *pv, *npv; 1399129198Scognet struct l2_bucket *l2b; 1400129198Scognet pt_entry_t *ptep, pte; 1401129198Scognet u_int entries = 0; 1402129198Scognet u_int writable = 0; 1403129198Scognet u_int cacheable_entries = 0; 1404129198Scognet u_int kern_cacheable = 0; 1405129198Scognet u_int other_writable = 0; 1406129198Scognet 1407129198Scognet /* 1408129198Scognet * Count mappings and writable mappings in this pmap. 1409129198Scognet * Include kernel mappings as part of our own. 1410129198Scognet * Keep a pointer to the first one. 1411129198Scognet */ 1412129198Scognet npv = TAILQ_FIRST(&pg->md.pv_list); 1413129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1414129198Scognet /* Count mappings in the same pmap */ 1415129198Scognet if (pm == pv->pv_pmap || kpmap == pv->pv_pmap) { 1416129198Scognet if (entries++ == 0) 1417129198Scognet npv = pv; 1418129198Scognet 1419129198Scognet /* Cacheable mappings */ 1420129198Scognet if ((pv->pv_flags & PVF_NC) == 0) { 1421129198Scognet cacheable_entries++; 1422129198Scognet if (kpmap == pv->pv_pmap) 1423129198Scognet kern_cacheable++; 1424129198Scognet } 1425129198Scognet 1426129198Scognet /* Writable mappings */ 1427129198Scognet if (pv->pv_flags & PVF_WRITE) 1428129198Scognet ++writable; 1429129198Scognet } else 1430129198Scognet if (pv->pv_flags & PVF_WRITE) 1431129198Scognet other_writable = 1; 1432129198Scognet } 1433129198Scognet 1434129198Scognet /* 1435129198Scognet * Enable or disable caching as necessary. 1436129198Scognet * Note: the first entry might be part of the kernel pmap, 1437129198Scognet * so we can't assume this is indicative of the state of the 1438129198Scognet * other (maybe non-kpmap) entries. 1439129198Scognet */ 1440129198Scognet if ((entries > 1 && writable) || 1441129198Scognet (entries > 0 && pm == kpmap && other_writable)) { 1442129198Scognet if (cacheable_entries == 0) 1443129198Scognet return; 1444129198Scognet 1445129198Scognet for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) { 1446129198Scognet if ((pm != pv->pv_pmap && kpmap != pv->pv_pmap) || 1447129198Scognet (pv->pv_flags & PVF_NC)) 1448129198Scognet continue; 1449129198Scognet 1450129198Scognet pv->pv_flags |= PVF_NC; 1451129198Scognet 1452129198Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1453129198Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1454129198Scognet pte = *ptep & ~L2_S_CACHE_MASK; 1455129198Scognet 1456129198Scognet if ((va != pv->pv_va || pm != pv->pv_pmap) && 1457129198Scognet l2pte_valid(pte)) { 1458129198Scognet if (PV_BEEN_EXECD(pv->pv_flags)) { 1459129198Scognet pmap_idcache_wbinv_range(pv->pv_pmap, 1460129198Scognet pv->pv_va, PAGE_SIZE); 1461129198Scognet pmap_tlb_flushID_SE(pv->pv_pmap, 1462129198Scognet pv->pv_va); 1463129198Scognet } else 1464129198Scognet if (PV_BEEN_REFD(pv->pv_flags)) { 1465129198Scognet pmap_dcache_wb_range(pv->pv_pmap, 1466129198Scognet pv->pv_va, PAGE_SIZE, TRUE, 1467129198Scognet (pv->pv_flags & PVF_WRITE) == 0); 1468129198Scognet pmap_tlb_flushD_SE(pv->pv_pmap, 1469129198Scognet pv->pv_va); 1470129198Scognet } 1471129198Scognet } 1472129198Scognet 1473129198Scognet *ptep = pte; 1474129198Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1475129198Scognet } 1476129198Scognet cpu_cpwait(); 1477129198Scognet } else 1478129198Scognet if (entries > cacheable_entries) { 1479129198Scognet /* 1480129198Scognet * Turn cacheing back on for some pages. If it is a kernel 1481129198Scognet * page, only do so if there are no other writable pages. 1482129198Scognet */ 1483129198Scognet for (pv = npv; pv; pv = TAILQ_NEXT(pv, pv_list)) { 1484129198Scognet if (!(pv->pv_flags & PVF_NC) || (pm != pv->pv_pmap && 1485129198Scognet (kpmap != pv->pv_pmap || other_writable))) 1486129198Scognet continue; 1487129198Scognet 1488129198Scognet pv->pv_flags &= ~PVF_NC; 1489129198Scognet 1490129198Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1491129198Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1492129198Scognet pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1493129198Scognet 1494129198Scognet if (l2pte_valid(pte)) { 1495129198Scognet if (PV_BEEN_EXECD(pv->pv_flags)) { 1496129198Scognet pmap_tlb_flushID_SE(pv->pv_pmap, 1497129198Scognet pv->pv_va); 1498129198Scognet } else 1499129198Scognet if (PV_BEEN_REFD(pv->pv_flags)) { 1500129198Scognet pmap_tlb_flushD_SE(pv->pv_pmap, 1501129198Scognet pv->pv_va); 1502129198Scognet } 1503129198Scognet } 1504129198Scognet 1505129198Scognet *ptep = pte; 1506129198Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1507129198Scognet } 1508129198Scognet } 1509129198Scognet} 1510129198Scognet 1511129198Scognet/* 1512129198Scognet * Modify pte bits for all ptes corresponding to the given physical address. 1513129198Scognet * We use `maskbits' rather than `clearbits' because we're always passing 1514129198Scognet * constants and the latter would require an extra inversion at run-time. 1515129198Scognet */ 1516135641Scognetstatic int 1517129198Scognetpmap_clearbit(struct vm_page *pg, u_int maskbits) 1518129198Scognet{ 1519129198Scognet struct l2_bucket *l2b; 1520129198Scognet struct pv_entry *pv; 1521129198Scognet pt_entry_t *ptep, npte, opte; 1522129198Scognet pmap_t pm; 1523129198Scognet vm_offset_t va; 1524129198Scognet u_int oflags; 1525135641Scognet int count = 0; 1526129198Scognet#if 0 1527129198Scognet PMAP_HEAD_TO_MAP_LOCK(); 1528129198Scognet simple_lock(&pg->mdpage.pvh_slock); 1529129198Scognet#endif 1530129198Scognet 1531129198Scognet /* 1532129198Scognet * Clear saved attributes (modify, reference) 1533129198Scognet */ 1534129198Scognet pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 1535129198Scognet 1536129198Scognet if (TAILQ_EMPTY(&pg->md.pv_list)) { 1537129198Scognet#if 0 1538129198Scognet simple_unlock(&pg->mdpage.pvh_slock); 1539129198Scognet PMAP_HEAD_TO_MAP_UNLOCK(); 1540129198Scognet#endif 1541135641Scognet return (0); 1542129198Scognet } 1543129198Scognet 1544129198Scognet /* 1545129198Scognet * Loop over all current mappings setting/clearing as appropos 1546129198Scognet */ 1547129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1548129198Scognet va = pv->pv_va; 1549129198Scognet pm = pv->pv_pmap; 1550129198Scognet oflags = pv->pv_flags; 1551129198Scognet pv->pv_flags &= ~maskbits; 1552129198Scognet 1553129198Scognet#if 0 1554129198Scognet pmap_acquire_pmap_lock(pm); 1555129198Scognet#endif 1556129198Scognet 1557129198Scognet l2b = pmap_get_l2_bucket(pm, va); 1558129198Scognet 1559129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1560129198Scognet npte = opte = *ptep; 1561129198Scognet 1562144760Scognet if (maskbits & (PVF_WRITE|PVF_MOD) && 1563144760Scognet !pmap_track_modified(pv->pv_va)) { 1564129198Scognet if ((pv->pv_flags & PVF_NC)) { 1565129198Scognet /* 1566129198Scognet * Entry is not cacheable: 1567129198Scognet * 1568129198Scognet * Don't turn caching on again if this is a 1569129198Scognet * modified emulation. This would be 1570129198Scognet * inconsitent with the settings created by 1571129198Scognet * pmap_vac_me_harder(). Otherwise, it's safe 1572129198Scognet * to re-enable cacheing. 1573129198Scognet * 1574129198Scognet * There's no need to call pmap_vac_me_harder() 1575129198Scognet * here: all pages are losing their write 1576129198Scognet * permission. 1577129198Scognet */ 1578129198Scognet if (maskbits & PVF_WRITE) { 1579129198Scognet npte |= pte_l2_s_cache_mode; 1580129198Scognet pv->pv_flags &= ~PVF_NC; 1581129198Scognet } 1582129198Scognet } else 1583129198Scognet if (opte & L2_S_PROT_W) { 1584144760Scognet vm_page_dirty(pg); 1585129198Scognet /* 1586129198Scognet * Entry is writable/cacheable: check if pmap 1587129198Scognet * is current if it is flush it, otherwise it 1588129198Scognet * won't be in the cache 1589129198Scognet */ 1590129198Scognet if (PV_BEEN_EXECD(oflags)) 1591129198Scognet pmap_idcache_wbinv_range(pm, pv->pv_va, 1592129198Scognet PAGE_SIZE); 1593129198Scognet else 1594129198Scognet if (PV_BEEN_REFD(oflags)) 1595129198Scognet pmap_dcache_wb_range(pm, pv->pv_va, 1596129198Scognet PAGE_SIZE, 1597129198Scognet (maskbits & PVF_REF) ? TRUE : FALSE, 1598129198Scognet FALSE); 1599129198Scognet } 1600129198Scognet 1601129198Scognet /* make the pte read only */ 1602129198Scognet npte &= ~L2_S_PROT_W; 1603129198Scognet 1604129198Scognet if (maskbits & PVF_WRITE) { 1605129198Scognet /* 1606129198Scognet * Keep alias accounting up to date 1607129198Scognet */ 1608129198Scognet if (pv->pv_pmap == pmap_kernel()) { 1609129198Scognet if (oflags & PVF_WRITE) { 1610129198Scognet pg->md.krw_mappings--; 1611129198Scognet pg->md.kro_mappings++; 1612129198Scognet } 1613129198Scognet } else 1614129198Scognet if (oflags & PVF_WRITE) { 1615129198Scognet pg->md.urw_mappings--; 1616129198Scognet pg->md.uro_mappings++; 1617129198Scognet } 1618129198Scognet } 1619129198Scognet } 1620129198Scognet 1621144760Scognet if (maskbits & PVF_REF && !pmap_track_modified(pv->pv_va)) { 1622129198Scognet if ((pv->pv_flags & PVF_NC) == 0 && 1623129198Scognet (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { 1624129198Scognet /* 1625129198Scognet * Check npte here; we may have already 1626129198Scognet * done the wbinv above, and the validity 1627129198Scognet * of the PTE is the same for opte and 1628129198Scognet * npte. 1629129198Scognet */ 1630129198Scognet if (npte & L2_S_PROT_W) { 1631129198Scognet if (PV_BEEN_EXECD(oflags)) 1632129198Scognet pmap_idcache_wbinv_range(pm, 1633129198Scognet pv->pv_va, PAGE_SIZE); 1634129198Scognet else 1635129198Scognet if (PV_BEEN_REFD(oflags)) 1636129198Scognet pmap_dcache_wb_range(pm, 1637129198Scognet pv->pv_va, PAGE_SIZE, 1638129198Scognet TRUE, FALSE); 1639129198Scognet } else 1640129198Scognet if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { 1641129198Scognet /* XXXJRT need idcache_inv_range */ 1642129198Scognet if (PV_BEEN_EXECD(oflags)) 1643129198Scognet pmap_idcache_wbinv_range(pm, 1644129198Scognet pv->pv_va, PAGE_SIZE); 1645129198Scognet else 1646129198Scognet if (PV_BEEN_REFD(oflags)) 1647129198Scognet pmap_dcache_wb_range(pm, 1648129198Scognet pv->pv_va, PAGE_SIZE, 1649129198Scognet TRUE, TRUE); 1650129198Scognet } 1651129198Scognet } 1652129198Scognet 1653129198Scognet /* 1654129198Scognet * Make the PTE invalid so that we will take a 1655129198Scognet * page fault the next time the mapping is 1656129198Scognet * referenced. 1657129198Scognet */ 1658129198Scognet npte &= ~L2_TYPE_MASK; 1659129198Scognet npte |= L2_TYPE_INV; 1660129198Scognet } 1661129198Scognet 1662129198Scognet if (npte != opte) { 1663135641Scognet count++; 1664129198Scognet *ptep = npte; 1665129198Scognet PTE_SYNC(ptep); 1666129198Scognet /* Flush the TLB entry if a current pmap. */ 1667129198Scognet if (PV_BEEN_EXECD(oflags)) 1668129198Scognet pmap_tlb_flushID_SE(pm, pv->pv_va); 1669129198Scognet else 1670129198Scognet if (PV_BEEN_REFD(oflags)) 1671129198Scognet pmap_tlb_flushD_SE(pm, pv->pv_va); 1672129198Scognet } 1673129198Scognet 1674129198Scognet#if 0 1675129198Scognet pmap_release_pmap_lock(pm); 1676129198Scognet#endif 1677129198Scognet 1678129198Scognet } 1679129198Scognet 1680129198Scognet#if 0 1681129198Scognet simple_unlock(&pg->mdpage.pvh_slock); 1682129198Scognet PMAP_HEAD_TO_MAP_UNLOCK(); 1683129198Scognet#endif 1684137664Scognet if (maskbits & PVF_WRITE) 1685137664Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1686135641Scognet return (count); 1687129198Scognet} 1688129198Scognet 1689129198Scognet/* 1690129198Scognet * main pv_entry manipulation functions: 1691129198Scognet * pmap_enter_pv: enter a mapping onto a vm_page list 1692129198Scognet * pmap_remove_pv: remove a mappiing from a vm_page list 1693129198Scognet * 1694129198Scognet * NOTE: pmap_enter_pv expects to lock the pvh itself 1695129198Scognet * pmap_remove_pv expects te caller to lock the pvh before calling 1696129198Scognet */ 1697129198Scognet 1698129198Scognet/* 1699129198Scognet * pmap_enter_pv: enter a mapping onto a vm_page lst 1700129198Scognet * 1701129198Scognet * => caller should hold the proper lock on pmap_main_lock 1702129198Scognet * => caller should have pmap locked 1703129198Scognet * => we will gain the lock on the vm_page and allocate the new pv_entry 1704129198Scognet * => caller should adjust ptp's wire_count before calling 1705129198Scognet * => caller should not adjust pmap's wire_count 1706129198Scognet */ 1707129198Scognetstatic void 1708129198Scognetpmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 1709129198Scognet vm_offset_t va, u_int flags) 1710129198Scognet{ 1711129198Scognet 1712129198Scognet 1713129198Scognet pve->pv_pmap = pm; 1714129198Scognet pve->pv_va = va; 1715129198Scognet pve->pv_flags = flags; 1716129198Scognet 1717129198Scognet#if 0 1718129198Scognet mtx_lock(&pg->md.pvh_mtx); 1719129198Scognet#endif 1720129198Scognet TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1721144760Scognet TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); 1722129198Scognet pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); 1723129198Scognet if (pm == pmap_kernel()) { 1724129198Scognet if (flags & PVF_WRITE) 1725129198Scognet pg->md.krw_mappings++; 1726129198Scognet else 1727129198Scognet pg->md.kro_mappings++; 1728129198Scognet } 1729129198Scognet if (flags & PVF_WRITE) 1730129198Scognet pg->md.urw_mappings++; 1731129198Scognet else 1732129198Scognet pg->md.uro_mappings++; 1733135641Scognet pg->md.pv_list_count++; 1734129198Scognet#if 0 1735129198Scognet mtx_unlock(&pg->md.pvh_mtx); 1736129198Scognet#endif 1737129198Scognet if (pve->pv_flags & PVF_WIRED) 1738129198Scognet ++pm->pm_stats.wired_count; 1739144760Scognet vm_page_flag_set(pg, PG_REFERENCED); 1740129198Scognet} 1741129198Scognet 1742129198Scognet/* 1743129198Scognet * 1744129198Scognet * pmap_find_pv: Find a pv entry 1745129198Scognet * 1746129198Scognet * => caller should hold lock on vm_page 1747129198Scognet */ 1748129198Scognetstatic PMAP_INLINE struct pv_entry * 1749129198Scognetpmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1750129198Scognet{ 1751129198Scognet struct pv_entry *pv; 1752129198Scognet 1753129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) 1754129198Scognet if (pm == pv->pv_pmap && va == pv->pv_va) 1755129198Scognet break; 1756129198Scognet return (pv); 1757129198Scognet} 1758129198Scognet 1759129198Scognet/* 1760129198Scognet * vector_page_setprot: 1761129198Scognet * 1762129198Scognet * Manipulate the protection of the vector page. 1763129198Scognet */ 1764129198Scognetvoid 1765129198Scognetvector_page_setprot(int prot) 1766129198Scognet{ 1767129198Scognet struct l2_bucket *l2b; 1768129198Scognet pt_entry_t *ptep; 1769129198Scognet 1770129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 1771129198Scognet 1772129198Scognet ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 1773129198Scognet 1774129198Scognet *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 1775129198Scognet PTE_SYNC(ptep); 1776129198Scognet cpu_tlb_flushD_SE(vector_page); 1777129198Scognet cpu_cpwait(); 1778129198Scognet} 1779129198Scognet 1780129198Scognet/* 1781129198Scognet * pmap_remove_pv: try to remove a mapping from a pv_list 1782129198Scognet * 1783129198Scognet * => caller should hold proper lock on pmap_main_lock 1784129198Scognet * => pmap should be locked 1785129198Scognet * => caller should hold lock on vm_page [so that attrs can be adjusted] 1786129198Scognet * => caller should adjust ptp's wire_count and free PTP if needed 1787129198Scognet * => caller should NOT adjust pmap's wire_count 1788129198Scognet * => we return the removed pve 1789129198Scognet */ 1790135641Scognet 1791135641Scognetstatic void 1792135641Scognetpmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) 1793135641Scognet{ 1794135641Scognet 1795135641Scognet TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); 1796144760Scognet TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); 1797135641Scognet if (pve->pv_flags & PVF_WIRED) 1798135641Scognet --pm->pm_stats.wired_count; 1799135641Scognet pg->md.pv_list_count--; 1800144760Scognet if (pg->md.pvh_attrs & PVF_MOD) 1801144760Scognet vm_page_dirty(pg); 1802135641Scognet if (pm == pmap_kernel()) { 1803135641Scognet if (pve->pv_flags & PVF_WRITE) 1804135641Scognet pg->md.krw_mappings--; 1805135641Scognet else 1806135641Scognet pg->md.kro_mappings--; 1807135641Scognet } else 1808135641Scognet if (pve->pv_flags & PVF_WRITE) 1809135641Scognet pg->md.urw_mappings--; 1810135641Scognet else 1811135641Scognet pg->md.uro_mappings--; 1812144760Scognet if (TAILQ_FIRST(&pg->md.pv_list) == NULL || 1813144760Scognet (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0)) { 1814144760Scognet pg->md.pvh_attrs &= ~PVF_MOD; 1815144760Scognet if (TAILQ_FIRST(&pg->md.pv_list) == NULL) 1816144760Scognet pg->md.pvh_attrs &= ~PVF_REF; 1817137664Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1818146647Scognet } 1819144760Scognet if (TAILQ_FIRST(&pg->md.pv_list)) 1820144760Scognet vm_page_flag_set(pg, PG_REFERENCED); 1821144760Scognet if (pve->pv_flags & PVF_WRITE) 1822144760Scognet pmap_vac_me_harder(pg, pm, 0); 1823135641Scognet} 1824135641Scognet 1825129198Scognetstatic struct pv_entry * 1826129198Scognetpmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1827129198Scognet{ 1828135641Scognet struct pv_entry *pve; 1829129198Scognet 1830135641Scognet pve = TAILQ_FIRST(&pg->md.pv_list); 1831129198Scognet 1832129198Scognet while (pve) { 1833129198Scognet if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ 1834135641Scognet pmap_nuke_pv(pg, pm, pve); 1835129198Scognet break; 1836129198Scognet } 1837129198Scognet pve = TAILQ_NEXT(pve, pv_list); 1838129198Scognet } 1839129198Scognet 1840129198Scognet return(pve); /* return removed pve */ 1841129198Scognet} 1842129198Scognet/* 1843129198Scognet * 1844129198Scognet * pmap_modify_pv: Update pv flags 1845129198Scognet * 1846129198Scognet * => caller should hold lock on vm_page [so that attrs can be adjusted] 1847129198Scognet * => caller should NOT adjust pmap's wire_count 1848129198Scognet * => caller must call pmap_vac_me_harder() if writable status of a page 1849129198Scognet * may have changed. 1850129198Scognet * => we return the old flags 1851129198Scognet * 1852129198Scognet * Modify a physical-virtual mapping in the pv table 1853129198Scognet */ 1854129198Scognetstatic u_int 1855129198Scognetpmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, 1856129198Scognet u_int clr_mask, u_int set_mask) 1857129198Scognet{ 1858129198Scognet struct pv_entry *npv; 1859129198Scognet u_int flags, oflags; 1860129198Scognet 1861129198Scognet if ((npv = pmap_find_pv(pg, pm, va)) == NULL) 1862129198Scognet return (0); 1863129198Scognet 1864129198Scognet /* 1865129198Scognet * There is at least one VA mapping this page. 1866129198Scognet */ 1867129198Scognet 1868129198Scognet if (clr_mask & (PVF_REF | PVF_MOD)) 1869129198Scognet pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1870129198Scognet 1871129198Scognet oflags = npv->pv_flags; 1872129198Scognet npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1873129198Scognet 1874129198Scognet if ((flags ^ oflags) & PVF_WIRED) { 1875129198Scognet if (flags & PVF_WIRED) 1876129198Scognet ++pm->pm_stats.wired_count; 1877129198Scognet else 1878129198Scognet --pm->pm_stats.wired_count; 1879129198Scognet } 1880129198Scognet 1881129198Scognet if ((flags ^ oflags) & PVF_WRITE) { 1882129198Scognet if (pm == pmap_kernel()) { 1883129198Scognet if (flags & PVF_WRITE) { 1884129198Scognet pg->md.krw_mappings++; 1885129198Scognet pg->md.kro_mappings--; 1886129198Scognet } else { 1887129198Scognet pg->md.kro_mappings++; 1888129198Scognet pg->md.krw_mappings--; 1889129198Scognet } 1890129198Scognet } else 1891129198Scognet if (flags & PVF_WRITE) { 1892129198Scognet pg->md.urw_mappings++; 1893129198Scognet pg->md.uro_mappings--; 1894129198Scognet } else { 1895129198Scognet pg->md.uro_mappings++; 1896129198Scognet pg->md.urw_mappings--; 1897129198Scognet } 1898144760Scognet if (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0) { 1899144760Scognet pg->md.pvh_attrs &= ~PVF_MOD; 1900144760Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1901144760Scognet } 1902144760Scognet pmap_vac_me_harder(pg, pm, 0); 1903129198Scognet } 1904129198Scognet 1905129198Scognet return (oflags); 1906129198Scognet} 1907129198Scognet 1908129198Scognet/* Function to set the debug level of the pmap code */ 1909129198Scognet#ifdef PMAP_DEBUG 1910129198Scognetvoid 1911129198Scognetpmap_debug(int level) 1912129198Scognet{ 1913129198Scognet pmap_debug_level = level; 1914129198Scognet dprintf("pmap_debug: level=%d\n", pmap_debug_level); 1915129198Scognet} 1916129198Scognet#endif /* PMAP_DEBUG */ 1917129198Scognet 1918129198Scognetvoid 1919129198Scognetpmap_pinit0(struct pmap *pmap) 1920129198Scognet{ 1921129198Scognet PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); 1922129198Scognet 1923129198Scognet dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n", 1924129198Scognet (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir); 1925135641Scognet bcopy(kernel_pmap, pmap, sizeof(*pmap)); 1926129198Scognet} 1927129198Scognet 1928147217Salc/* 1929147217Salc * Initialize a vm_page's machine-dependent fields. 1930147217Salc */ 1931147217Salcvoid 1932147217Salcpmap_page_init(vm_page_t m) 1933147217Salc{ 1934129198Scognet 1935147217Salc TAILQ_INIT(&m->md.pv_list); 1936147217Salc m->md.pv_list_count = 0; 1937147217Salc} 1938147217Salc 1939129198Scognet/* 1940129198Scognet * Initialize the pmap module. 1941129198Scognet * Called by vm_init, to initialize any structures that the pmap 1942129198Scognet * system needs to map virtual memory. 1943129198Scognet */ 1944129198Scognetvoid 1945129198Scognetpmap_init(void) 1946129198Scognet{ 1947129198Scognet 1948129198Scognet PDEBUG(1, printf("pmap_init: phys_start = %08x\n")); 1949147114Scognet 1950129198Scognet /* 1951129198Scognet * init the pv free list 1952129198Scognet */ 1953129198Scognet pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 1954129198Scognet NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1955129198Scognet uma_prealloc(pvzone, MINPV); 1956129198Scognet /* 1957129198Scognet * Now it is safe to enable pv_table recording. 1958129198Scognet */ 1959129198Scognet PDEBUG(1, printf("pmap_init: done!\n")); 1960147114Scognet 1961129198Scognet} 1962129198Scognet 1963129198Scognetint 1964129198Scognetpmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) 1965129198Scognet{ 1966129198Scognet struct l2_dtable *l2; 1967129198Scognet struct l2_bucket *l2b; 1968129198Scognet pd_entry_t *pl1pd, l1pd; 1969129198Scognet pt_entry_t *ptep, pte; 1970129198Scognet vm_paddr_t pa; 1971129198Scognet u_int l1idx; 1972129198Scognet int rv = 0; 1973129198Scognet 1974129198Scognet#if 0 1975129198Scognet PMAP_MAP_TO_HEAD_LOCK(); 1976129198Scognet pmap_acquire_pmap_lock(pm); 1977129198Scognet#endif 1978129198Scognet l1idx = L1_IDX(va); 1979129198Scognet 1980129198Scognet /* 1981129198Scognet * If there is no l2_dtable for this address, then the process 1982129198Scognet * has no business accessing it. 1983129198Scognet * 1984129198Scognet * Note: This will catch userland processes trying to access 1985129198Scognet * kernel addresses. 1986129198Scognet */ 1987129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 1988129198Scognet if (l2 == NULL) 1989129198Scognet goto out; 1990129198Scognet 1991129198Scognet /* 1992129198Scognet * Likewise if there is no L2 descriptor table 1993129198Scognet */ 1994129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1995129198Scognet if (l2b->l2b_kva == NULL) 1996129198Scognet goto out; 1997129198Scognet 1998129198Scognet /* 1999129198Scognet * Check the PTE itself. 2000129198Scognet */ 2001129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2002129198Scognet pte = *ptep; 2003129198Scognet if (pte == 0) 2004129198Scognet goto out; 2005129198Scognet 2006129198Scognet /* 2007129198Scognet * Catch a userland access to the vector page mapped at 0x0 2008129198Scognet */ 2009129198Scognet if (user && (pte & L2_S_PROT_U) == 0) 2010129198Scognet goto out; 2011129198Scognet 2012129198Scognet pa = l2pte_pa(pte); 2013129198Scognet 2014129198Scognet if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { 2015129198Scognet /* 2016129198Scognet * This looks like a good candidate for "page modified" 2017129198Scognet * emulation... 2018129198Scognet */ 2019129198Scognet struct pv_entry *pv; 2020129198Scognet struct vm_page *pg; 2021129198Scognet 2022129198Scognet /* Extract the physical address of the page */ 2023129198Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 2024129198Scognet goto out; 2025129198Scognet } 2026129198Scognet /* Get the current flags for this page. */ 2027129198Scognet 2028129198Scognet pv = pmap_find_pv(pg, pm, va); 2029129198Scognet if (pv == NULL) { 2030129198Scognet goto out; 2031129198Scognet } 2032129198Scognet 2033129198Scognet /* 2034129198Scognet * Do the flags say this page is writable? If not then it 2035129198Scognet * is a genuine write fault. If yes then the write fault is 2036129198Scognet * our fault as we did not reflect the write access in the 2037129198Scognet * PTE. Now we know a write has occurred we can correct this 2038129198Scognet * and also set the modified bit 2039129198Scognet */ 2040129198Scognet if ((pv->pv_flags & PVF_WRITE) == 0) { 2041129198Scognet goto out; 2042129198Scognet } 2043129198Scognet 2044144760Scognet if (pmap_track_modified(pv->pv_va)) { 2045144760Scognet pg->md.pvh_attrs |= PVF_REF | PVF_MOD; 2046144760Scognet vm_page_dirty(pg); 2047144760Scognet } 2048129198Scognet pv->pv_flags |= PVF_REF | PVF_MOD; 2049129198Scognet 2050129198Scognet /* 2051129198Scognet * Re-enable write permissions for the page. No need to call 2052129198Scognet * pmap_vac_me_harder(), since this is just a 2053129198Scognet * modified-emulation fault, and the PVF_WRITE bit isn't 2054129198Scognet * changing. We've already set the cacheable bits based on 2055129198Scognet * the assumption that we can write to this page. 2056129198Scognet */ 2057147114Scognet *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; 2058129198Scognet PTE_SYNC(ptep); 2059129198Scognet rv = 1; 2060129198Scognet } else 2061129198Scognet if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 2062129198Scognet /* 2063129198Scognet * This looks like a good candidate for "page referenced" 2064129198Scognet * emulation. 2065129198Scognet */ 2066129198Scognet struct pv_entry *pv; 2067129198Scognet struct vm_page *pg; 2068129198Scognet 2069129198Scognet /* Extract the physical address of the page */ 2070144760Scognet vm_page_lock_queues(); 2071144760Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 2072144760Scognet vm_page_unlock_queues(); 2073129198Scognet goto out; 2074144760Scognet } 2075129198Scognet /* Get the current flags for this page. */ 2076129198Scognet 2077129198Scognet pv = pmap_find_pv(pg, pm, va); 2078129198Scognet if (pv == NULL) { 2079144760Scognet vm_page_unlock_queues(); 2080129198Scognet goto out; 2081129198Scognet } 2082129198Scognet 2083129198Scognet pg->md.pvh_attrs |= PVF_REF; 2084129198Scognet pv->pv_flags |= PVF_REF; 2085129198Scognet 2086129198Scognet 2087129198Scognet *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; 2088129198Scognet PTE_SYNC(ptep); 2089129198Scognet rv = 1; 2090144760Scognet vm_page_unlock_queues(); 2091129198Scognet } 2092129198Scognet 2093129198Scognet /* 2094129198Scognet * We know there is a valid mapping here, so simply 2095129198Scognet * fix up the L1 if necessary. 2096129198Scognet */ 2097129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 2098129198Scognet l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 2099129198Scognet if (*pl1pd != l1pd) { 2100129198Scognet *pl1pd = l1pd; 2101129198Scognet PTE_SYNC(pl1pd); 2102129198Scognet rv = 1; 2103129198Scognet } 2104129198Scognet 2105129198Scognet#ifdef CPU_SA110 2106129198Scognet /* 2107129198Scognet * There are bugs in the rev K SA110. This is a check for one 2108129198Scognet * of them. 2109129198Scognet */ 2110129198Scognet if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && 2111129198Scognet curcpu()->ci_arm_cpurev < 3) { 2112129198Scognet /* Always current pmap */ 2113129198Scognet if (l2pte_valid(pte)) { 2114129198Scognet extern int kernel_debug; 2115129198Scognet if (kernel_debug & 1) { 2116129198Scognet struct proc *p = curlwp->l_proc; 2117129198Scognet printf("prefetch_abort: page is already " 2118129198Scognet "mapped - pte=%p *pte=%08x\n", ptep, pte); 2119129198Scognet printf("prefetch_abort: pc=%08lx proc=%p " 2120129198Scognet "process=%s\n", va, p, p->p_comm); 2121129198Scognet printf("prefetch_abort: far=%08x fs=%x\n", 2122129198Scognet cpu_faultaddress(), cpu_faultstatus()); 2123129198Scognet } 2124129198Scognet#ifdef DDB 2125129198Scognet if (kernel_debug & 2) 2126129198Scognet Debugger(); 2127129198Scognet#endif 2128129198Scognet rv = 1; 2129129198Scognet } 2130129198Scognet } 2131129198Scognet#endif /* CPU_SA110 */ 2132129198Scognet 2133129198Scognet#ifdef DEBUG 2134129198Scognet /* 2135129198Scognet * If 'rv == 0' at this point, it generally indicates that there is a 2136129198Scognet * stale TLB entry for the faulting address. This happens when two or 2137129198Scognet * more processes are sharing an L1. Since we don't flush the TLB on 2138129198Scognet * a context switch between such processes, we can take domain faults 2139129198Scognet * for mappings which exist at the same VA in both processes. EVEN IF 2140129198Scognet * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 2141129198Scognet * example. 2142129198Scognet * 2143129198Scognet * This is extremely likely to happen if pmap_enter() updated the L1 2144129198Scognet * entry for a recently entered mapping. In this case, the TLB is 2145129198Scognet * flushed for the new mapping, but there may still be TLB entries for 2146129198Scognet * other mappings belonging to other processes in the 1MB range 2147129198Scognet * covered by the L1 entry. 2148129198Scognet * 2149129198Scognet * Since 'rv == 0', we know that the L1 already contains the correct 2150129198Scognet * value, so the fault must be due to a stale TLB entry. 2151129198Scognet * 2152129198Scognet * Since we always need to flush the TLB anyway in the case where we 2153129198Scognet * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 2154129198Scognet * stale TLB entries dynamically. 2155129198Scognet * 2156129198Scognet * However, the above condition can ONLY happen if the current L1 is 2157129198Scognet * being shared. If it happens when the L1 is unshared, it indicates 2158129198Scognet * that other parts of the pmap are not doing their job WRT managing 2159129198Scognet * the TLB. 2160129198Scognet */ 2161129198Scognet if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 2162129198Scognet extern int last_fault_code; 2163129198Scognet printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 2164129198Scognet pm, va, ftype); 2165129198Scognet printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 2166129198Scognet l2, l2b, ptep, pl1pd); 2167129198Scognet printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 2168129198Scognet pte, l1pd, last_fault_code); 2169129198Scognet#ifdef DDB 2170129198Scognet Debugger(); 2171129198Scognet#endif 2172129198Scognet } 2173129198Scognet#endif 2174129198Scognet 2175129198Scognet cpu_tlb_flushID_SE(va); 2176129198Scognet cpu_cpwait(); 2177129198Scognet 2178129198Scognet rv = 1; 2179129198Scognet 2180129198Scognetout: 2181129198Scognet#if 0 2182129198Scognet pmap_release_pmap_lock(pm); 2183129198Scognet PMAP_MAP_TO_HEAD_UNLOCK(); 2184129198Scognet#endif 2185129198Scognet return (rv); 2186129198Scognet} 2187129198Scognet 2188129198Scognet/* 2189129198Scognet * Initialize the address space (zone) for the pv_entries. Set a 2190129198Scognet * high water mark so that the system can recover from excessive 2191129198Scognet * numbers of pv entries. 2192129198Scognet */ 2193129198Scognetvoid 2194129198Scognetpmap_init2() 2195129198Scognet{ 2196129198Scognet int shpgperproc = PMAP_SHPGPERPROC; 2197129198Scognet struct l2_bucket *l2b; 2198129198Scognet struct l1_ttable *l1; 2199129198Scognet pd_entry_t *pl1pt; 2200129198Scognet pt_entry_t *ptep, pte; 2201129198Scognet vm_offset_t va, eva; 2202129198Scognet u_int loop, needed; 2203129198Scognet 2204129198Scognet TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 2205129198Scognet 2206129198Scognet pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 2207129198Scognet pv_entry_high_water = 9 * (pv_entry_max / 10); 2208129198Scognet l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, 2209129198Scognet NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 2210135641Scognet uma_prealloc(l2zone, 4096); 2211137663Scognet l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), 2212137663Scognet NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 2213137663Scognet UMA_ZONE_VM | UMA_ZONE_NOFREE); 2214137663Scognet uma_prealloc(l2table_zone, 1024); 2215137663Scognet 2216129198Scognet uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 2217129198Scognet uma_zone_set_obj(l2zone, &l2zone_obj, pv_entry_max); 2218129198Scognet 2219129198Scognet needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 2220129198Scognet needed -= 1; 2221129198Scognet l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); 2222129198Scognet 2223129198Scognet for (loop = 0; loop < needed; loop++, l1++) { 2224129198Scognet /* Allocate a L1 page table */ 2225132503Scognet va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 2226132503Scognet 0xffffffff, L1_TABLE_SIZE, 0); 2227129198Scognet 2228129198Scognet if (va == 0) 2229129198Scognet panic("Cannot allocate L1 KVM"); 2230129198Scognet 2231129198Scognet eva = va + L1_TABLE_SIZE; 2232129198Scognet pl1pt = (pd_entry_t *)va; 2233129198Scognet 2234135641Scognet while (va < eva) { 2235129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2236129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2237129198Scognet pte = *ptep; 2238129198Scognet pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 2239129198Scognet *ptep = pte; 2240129198Scognet PTE_SYNC(ptep); 2241129198Scognet cpu_tlb_flushD_SE(va); 2242129198Scognet 2243129198Scognet va += PAGE_SIZE; 2244129198Scognet } 2245129198Scognet pmap_init_l1(l1, pl1pt); 2246129198Scognet } 2247129198Scognet 2248129198Scognet 2249129198Scognet#ifdef DEBUG 2250129198Scognet printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 2251129198Scognet needed); 2252129198Scognet#endif 2253129198Scognet} 2254129198Scognet 2255129198Scognet/* 2256129198Scognet * This is used to stuff certain critical values into the PCB where they 2257129198Scognet * can be accessed quickly from cpu_switch() et al. 2258129198Scognet */ 2259129198Scognetvoid 2260129198Scognetpmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) 2261129198Scognet{ 2262129198Scognet struct l2_bucket *l2b; 2263129198Scognet 2264129198Scognet pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; 2265129198Scognet pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 2266129198Scognet (DOMAIN_CLIENT << (pm->pm_domain * 2)); 2267129198Scognet 2268129198Scognet if (vector_page < KERNBASE) { 2269129198Scognet pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 2270129198Scognet l2b = pmap_get_l2_bucket(pm, vector_page); 2271129198Scognet pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | 2272145071Scognet L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); 2273129198Scognet } else 2274129198Scognet pcb->pcb_pl1vec = NULL; 2275129198Scognet} 2276129198Scognet 2277129198Scognetvoid 2278129198Scognetpmap_activate(struct thread *td) 2279129198Scognet{ 2280129198Scognet pmap_t pm; 2281129198Scognet struct pcb *pcb; 2282129198Scognet int s; 2283129198Scognet 2284135641Scognet pm = vmspace_pmap(td->td_proc->p_vmspace); 2285129198Scognet pcb = td->td_pcb; 2286129198Scognet 2287129198Scognet critical_enter(); 2288129198Scognet pmap_set_pcb_pagedir(pm, pcb); 2289129198Scognet 2290129198Scognet if (td == curthread) { 2291129198Scognet u_int cur_dacr, cur_ttb; 2292129198Scognet 2293129198Scognet __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); 2294129198Scognet __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); 2295129198Scognet 2296129198Scognet cur_ttb &= ~(L1_TABLE_SIZE - 1); 2297129198Scognet 2298129198Scognet if (cur_ttb == (u_int)pcb->pcb_pagedir && 2299129198Scognet cur_dacr == pcb->pcb_dacr) { 2300129198Scognet /* 2301129198Scognet * No need to switch address spaces. 2302129198Scognet */ 2303129198Scognet critical_exit(); 2304129198Scognet return; 2305129198Scognet } 2306129198Scognet 2307129198Scognet 2308129198Scognet /* 2309129198Scognet * We MUST, I repeat, MUST fix up the L1 entry corresponding 2310129198Scognet * to 'vector_page' in the incoming L1 table before switching 2311129198Scognet * to it otherwise subsequent interrupts/exceptions (including 2312129198Scognet * domain faults!) will jump into hyperspace. 2313129198Scognet */ 2314129198Scognet if (pcb->pcb_pl1vec) { 2315129198Scognet 2316129198Scognet *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2317129198Scognet /* 2318129198Scognet * Don't need to PTE_SYNC() at this point since 2319129198Scognet * cpu_setttb() is about to flush both the cache 2320129198Scognet * and the TLB. 2321129198Scognet */ 2322129198Scognet } 2323129198Scognet 2324129198Scognet cpu_domains(pcb->pcb_dacr); 2325129198Scognet cpu_setttb(pcb->pcb_pagedir); 2326129198Scognet 2327129198Scognet splx(s); 2328129198Scognet } 2329129198Scognet critical_exit(); 2330129198Scognet} 2331129198Scognet 2332129198Scognetstatic int 2333129198Scognetpmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) 2334129198Scognet{ 2335129198Scognet pd_entry_t *pdep, pde; 2336129198Scognet pt_entry_t *ptep, pte; 2337129198Scognet vm_offset_t pa; 2338129198Scognet int rv = 0; 2339129198Scognet 2340129198Scognet /* 2341129198Scognet * Make sure the descriptor itself has the correct cache mode 2342129198Scognet */ 2343129198Scognet pdep = &kl1[L1_IDX(va)]; 2344129198Scognet pde = *pdep; 2345129198Scognet 2346129198Scognet if (l1pte_section_p(pde)) { 2347129198Scognet if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 2348129198Scognet *pdep = (pde & ~L1_S_CACHE_MASK) | 2349129198Scognet pte_l1_s_cache_mode_pt; 2350129198Scognet PTE_SYNC(pdep); 2351129198Scognet cpu_dcache_wbinv_range((vm_offset_t)pdep, 2352129198Scognet sizeof(*pdep)); 2353129198Scognet rv = 1; 2354129198Scognet } 2355129198Scognet } else { 2356129198Scognet pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2357129198Scognet ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2358129198Scognet if (ptep == NULL) 2359129198Scognet panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 2360129198Scognet 2361129198Scognet ptep = &ptep[l2pte_index(va)]; 2362129198Scognet pte = *ptep; 2363129198Scognet if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 2364129198Scognet *ptep = (pte & ~L2_S_CACHE_MASK) | 2365129198Scognet pte_l2_s_cache_mode_pt; 2366129198Scognet PTE_SYNC(ptep); 2367129198Scognet cpu_dcache_wbinv_range((vm_offset_t)ptep, 2368129198Scognet sizeof(*ptep)); 2369129198Scognet rv = 1; 2370129198Scognet } 2371129198Scognet } 2372129198Scognet 2373129198Scognet return (rv); 2374129198Scognet} 2375129198Scognet 2376129198Scognetstatic void 2377129198Scognetpmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, 2378129198Scognet pt_entry_t **ptep) 2379129198Scognet{ 2380129198Scognet vm_offset_t va = *availp; 2381129198Scognet struct l2_bucket *l2b; 2382129198Scognet 2383129198Scognet if (ptep) { 2384129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2385129198Scognet if (l2b == NULL) 2386129198Scognet panic("pmap_alloc_specials: no l2b for 0x%x", va); 2387129198Scognet 2388129198Scognet *ptep = &l2b->l2b_kva[l2pte_index(va)]; 2389129198Scognet } 2390129198Scognet 2391129198Scognet *vap = va; 2392129198Scognet *availp = va + (PAGE_SIZE * pages); 2393129198Scognet} 2394129198Scognet 2395129198Scognet/* 2396129198Scognet * Bootstrap the system enough to run with virtual memory. 2397129198Scognet * 2398129198Scognet * On the arm this is called after mapping has already been enabled 2399129198Scognet * and just syncs the pmap module with what has already been done. 2400129198Scognet * [We can't call it easily with mapping off since the kernel is not 2401129198Scognet * mapped with PA == VA, hence we would have to relocate every address 2402129198Scognet * from the linked base (virtual) address "KERNBASE" to the actual 2403129198Scognet * (physical) address starting relative to 0] 2404129198Scognet */ 2405129198Scognet#define PMAP_STATIC_L2_SIZE 16 2406147114Scognet#ifdef ARM_USE_SMALL_ALLOC 2407147114Scognetextern struct mtx smallalloc_mtx; 2408147114Scognetextern vm_offset_t alloc_curaddr; 2409147114Scognet#endif 2410147114Scognet 2411129198Scognetvoid 2412129198Scognetpmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt) 2413129198Scognet{ 2414129198Scognet static struct l1_ttable static_l1; 2415129198Scognet static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 2416129198Scognet struct l1_ttable *l1 = &static_l1; 2417129198Scognet struct l2_dtable *l2; 2418129198Scognet struct l2_bucket *l2b; 2419129198Scognet pd_entry_t pde; 2420129198Scognet pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; 2421129198Scognet pt_entry_t *ptep; 2422129198Scognet vm_paddr_t pa; 2423129198Scognet vm_offset_t va; 2424135641Scognet vm_size_t size; 2425129198Scognet int l1idx, l2idx, l2next = 0; 2426129198Scognet 2427129198Scognet PDEBUG(1, printf("firstaddr = %08x, loadaddr = %08x\n", 2428129198Scognet firstaddr, loadaddr)); 2429129198Scognet 2430129198Scognet virtual_avail = firstaddr; 2431129198Scognet kernel_pmap = &kernel_pmap_store; 2432129198Scognet kernel_pmap->pm_l1 = l1; 2433143192Scognet 2434143192Scognet /* 2435129198Scognet * Scan the L1 translation table created by initarm() and create 2436129198Scognet * the required metadata for all valid mappings found in it. 2437129198Scognet */ 2438129198Scognet for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 2439129198Scognet pde = kernel_l1pt[l1idx]; 2440129198Scognet 2441129198Scognet /* 2442129198Scognet * We're only interested in Coarse mappings. 2443129198Scognet * pmap_extract() can deal with section mappings without 2444129198Scognet * recourse to checking L2 metadata. 2445129198Scognet */ 2446129198Scognet if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 2447129198Scognet continue; 2448129198Scognet 2449129198Scognet /* 2450129198Scognet * Lookup the KVA of this L2 descriptor table 2451129198Scognet */ 2452129198Scognet pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2453129198Scognet ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2454129198Scognet 2455129198Scognet if (ptep == NULL) { 2456129198Scognet panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 2457129198Scognet (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); 2458129198Scognet } 2459129198Scognet 2460129198Scognet /* 2461129198Scognet * Fetch the associated L2 metadata structure. 2462129198Scognet * Allocate a new one if necessary. 2463129198Scognet */ 2464129198Scognet if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { 2465129198Scognet if (l2next == PMAP_STATIC_L2_SIZE) 2466129198Scognet panic("pmap_bootstrap: out of static L2s"); 2467129198Scognet kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = 2468129198Scognet &static_l2[l2next++]; 2469129198Scognet } 2470129198Scognet 2471129198Scognet /* 2472129198Scognet * One more L1 slot tracked... 2473129198Scognet */ 2474129198Scognet l2->l2_occupancy++; 2475129198Scognet 2476129198Scognet /* 2477129198Scognet * Fill in the details of the L2 descriptor in the 2478129198Scognet * appropriate bucket. 2479129198Scognet */ 2480129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2481129198Scognet l2b->l2b_kva = ptep; 2482129198Scognet l2b->l2b_phys = pa; 2483129198Scognet l2b->l2b_l1idx = l1idx; 2484129198Scognet 2485129198Scognet /* 2486129198Scognet * Establish an initial occupancy count for this descriptor 2487129198Scognet */ 2488129198Scognet for (l2idx = 0; 2489129198Scognet l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 2490129198Scognet l2idx++) { 2491129198Scognet if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 2492129198Scognet l2b->l2b_occupancy++; 2493129198Scognet } 2494129198Scognet } 2495129198Scognet 2496129198Scognet /* 2497129198Scognet * Make sure the descriptor itself has the correct cache mode. 2498129198Scognet * If not, fix it, but whine about the problem. Port-meisters 2499129198Scognet * should consider this a clue to fix up their initarm() 2500129198Scognet * function. :) 2501129198Scognet */ 2502129198Scognet if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { 2503129198Scognet printf("pmap_bootstrap: WARNING! wrong cache mode for " 2504129198Scognet "L2 pte @ %p\n", ptep); 2505129198Scognet } 2506129198Scognet } 2507129198Scognet 2508129198Scognet 2509129198Scognet /* 2510129198Scognet * Ensure the primary (kernel) L1 has the correct cache mode for 2511129198Scognet * a page table. Bitch if it is not correctly set. 2512129198Scognet */ 2513129198Scognet for (va = (vm_offset_t)kernel_l1pt; 2514129198Scognet va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 2515129198Scognet if (pmap_set_pt_cache_mode(kernel_l1pt, va)) 2516129198Scognet printf("pmap_bootstrap: WARNING! wrong cache mode for " 2517129198Scognet "primary L1 @ 0x%x\n", va); 2518129198Scognet } 2519129198Scognet 2520129198Scognet cpu_dcache_wbinv_all(); 2521129198Scognet cpu_tlb_flushID(); 2522129198Scognet cpu_cpwait(); 2523129198Scognet 2524129198Scognet kernel_pmap->pm_active = -1; 2525129198Scognet kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; 2526129198Scognet LIST_INIT(&allpmaps); 2527144760Scognet TAILQ_INIT(&kernel_pmap->pm_pvlist); 2528129198Scognet LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 2529129198Scognet 2530129198Scognet /* 2531129198Scognet * Reserve some special page table entries/VA space for temporary 2532129198Scognet * mapping of pages. 2533129198Scognet */ 2534129198Scognet#define SYSMAP(c, p, v, n) \ 2535129198Scognet v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 2536129198Scognet 2537129198Scognet pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); 2538129198Scognet pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); 2539129198Scognet pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); 2540129198Scognet pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); 2541135641Scognet size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; 2542135641Scognet pmap_alloc_specials(&virtual_avail, 2543135641Scognet round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 2544135641Scognet &pmap_kernel_l2ptp_kva, NULL); 2545135641Scognet 2546135641Scognet size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; 2547135641Scognet pmap_alloc_specials(&virtual_avail, 2548135641Scognet round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 2549135641Scognet &pmap_kernel_l2dtable_kva, NULL); 2550135641Scognet 2551137362Scognet pmap_alloc_specials(&virtual_avail, 2552137362Scognet 1, (vm_offset_t*)&_tmppt, NULL); 2553135641Scognet SLIST_INIT(&l1_list); 2554129198Scognet TAILQ_INIT(&l1_lru_list); 2555129198Scognet mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); 2556129198Scognet pmap_init_l1(l1, kernel_l1pt); 2557129198Scognet cpu_dcache_wbinv_all(); 2558129198Scognet 2559129198Scognet virtual_avail = round_page(virtual_avail); 2560129198Scognet virtual_end = lastaddr; 2561135641Scognet kernel_vm_end = pmap_curmaxkvaddr; 2562147114Scognet#ifdef ARM_USE_SMALL_ALLOC 2563147114Scognet mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF); 2564147114Scognet alloc_curaddr = lastaddr; 2565147114Scognet#endif 2566129198Scognet} 2567129198Scognet 2568129198Scognet/*************************************************** 2569129198Scognet * Pmap allocation/deallocation routines. 2570129198Scognet ***************************************************/ 2571129198Scognet 2572129198Scognet/* 2573129198Scognet * Release any resources held by the given physical map. 2574129198Scognet * Called when a pmap initialized by pmap_pinit is being released. 2575129198Scognet * Should only be called if the map contains no valid mappings. 2576129198Scognet */ 2577129198Scognetvoid 2578129198Scognetpmap_release(pmap_t pmap) 2579129198Scognet{ 2580135641Scognet struct pcb *pcb; 2581135641Scognet 2582135641Scognet pmap_idcache_wbinv_all(pmap); 2583135641Scognet pmap_tlb_flushID(pmap); 2584135641Scognet cpu_cpwait(); 2585135641Scognet LIST_REMOVE(pmap, pm_list); 2586135641Scognet if (vector_page < KERNBASE) { 2587135641Scognet struct pcb *curpcb = PCPU_GET(curpcb); 2588135641Scognet pcb = thread0.td_pcb; 2589135641Scognet if (pmap_is_current(pmap)) { 2590135641Scognet /* 2591135641Scognet * Frob the L1 entry corresponding to the vector 2592135641Scognet * page so that it contains the kernel pmap's domain 2593135641Scognet * number. This will ensure pmap_remove() does not 2594135641Scognet * pull the current vector page out from under us. 2595135641Scognet */ 2596135641Scognet critical_enter(); 2597135641Scognet *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2598135641Scognet cpu_domains(pcb->pcb_dacr); 2599135641Scognet cpu_setttb(pcb->pcb_pagedir); 2600135641Scognet critical_exit(); 2601135641Scognet } 2602135641Scognet pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); 2603135641Scognet /* 2604135641Scognet * Make sure cpu_switch(), et al, DTRT. This is safe to do 2605135641Scognet * since this process has no remaining mappings of its own. 2606135641Scognet */ 2607135641Scognet curpcb->pcb_pl1vec = pcb->pcb_pl1vec; 2608135641Scognet curpcb->pcb_l1vec = pcb->pcb_l1vec; 2609135641Scognet curpcb->pcb_dacr = pcb->pcb_dacr; 2610135641Scognet curpcb->pcb_pagedir = pcb->pcb_pagedir; 2611135641Scognet 2612135641Scognet } 2613129198Scognet pmap_free_l1(pmap); 2614135641Scognet 2615129198Scognet dprintf("pmap_release()\n"); 2616129198Scognet} 2617129198Scognet 2618129198Scognet 2619135641Scognet 2620129198Scognet/* 2621135641Scognet * Helper function for pmap_grow_l2_bucket() 2622135641Scognet */ 2623135641Scognetstatic __inline int 2624135641Scognetpmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) 2625135641Scognet{ 2626135641Scognet struct l2_bucket *l2b; 2627135641Scognet pt_entry_t *ptep; 2628135641Scognet vm_paddr_t pa; 2629135641Scognet struct vm_page *pg; 2630135641Scognet 2631144760Scognet pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO | 2632135641Scognet VM_ALLOC_WIRED); 2633135641Scognet if (pg == NULL) 2634135641Scognet return (1); 2635135641Scognet pa = VM_PAGE_TO_PHYS(pg); 2636135641Scognet 2637135641Scognet if (pap) 2638135641Scognet *pap = pa; 2639135641Scognet 2640135641Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2641135641Scognet 2642135641Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2643135641Scognet *ptep = L2_S_PROTO | pa | cache_mode | 2644135641Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 2645135641Scognet PTE_SYNC(ptep); 2646135641Scognet return (0); 2647135641Scognet} 2648135641Scognet 2649135641Scognet/* 2650135641Scognet * This is the same as pmap_alloc_l2_bucket(), except that it is only 2651135641Scognet * used by pmap_growkernel(). 2652135641Scognet */ 2653135641Scognetstatic __inline struct l2_bucket * 2654135641Scognetpmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) 2655135641Scognet{ 2656135641Scognet struct l2_dtable *l2; 2657135641Scognet struct l2_bucket *l2b; 2658135641Scognet struct l1_ttable *l1; 2659135641Scognet pd_entry_t *pl1pd; 2660135641Scognet u_short l1idx; 2661135641Scognet vm_offset_t nva; 2662135641Scognet 2663135641Scognet l1idx = L1_IDX(va); 2664135641Scognet 2665135641Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 2666135641Scognet /* 2667135641Scognet * No mapping at this address, as there is 2668135641Scognet * no entry in the L1 table. 2669135641Scognet * Need to allocate a new l2_dtable. 2670135641Scognet */ 2671135641Scognet nva = pmap_kernel_l2dtable_kva; 2672135641Scognet if ((nva & PAGE_MASK) == 0) { 2673135641Scognet /* 2674135641Scognet * Need to allocate a backing page 2675135641Scognet */ 2676135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2677135641Scognet return (NULL); 2678135641Scognet } 2679135641Scognet 2680135641Scognet l2 = (struct l2_dtable *)nva; 2681135641Scognet nva += sizeof(struct l2_dtable); 2682135641Scognet 2683135641Scognet if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & 2684135641Scognet PAGE_MASK)) { 2685135641Scognet /* 2686135641Scognet * The new l2_dtable straddles a page boundary. 2687135641Scognet * Map in another page to cover it. 2688135641Scognet */ 2689135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2690135641Scognet return (NULL); 2691135641Scognet } 2692135641Scognet 2693135641Scognet pmap_kernel_l2dtable_kva = nva; 2694135641Scognet 2695135641Scognet /* 2696135641Scognet * Link it into the parent pmap 2697135641Scognet */ 2698135641Scognet pm->pm_l2[L2_IDX(l1idx)] = l2; 2699135641Scognet } 2700135641Scognet 2701135641Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2702135641Scognet 2703135641Scognet /* 2704135641Scognet * Fetch pointer to the L2 page table associated with the address. 2705135641Scognet */ 2706135641Scognet if (l2b->l2b_kva == NULL) { 2707135641Scognet pt_entry_t *ptep; 2708135641Scognet 2709135641Scognet /* 2710135641Scognet * No L2 page table has been allocated. Chances are, this 2711135641Scognet * is because we just allocated the l2_dtable, above. 2712135641Scognet */ 2713135641Scognet nva = pmap_kernel_l2ptp_kva; 2714135641Scognet ptep = (pt_entry_t *)nva; 2715135641Scognet if ((nva & PAGE_MASK) == 0) { 2716135641Scognet /* 2717135641Scognet * Need to allocate a backing page 2718135641Scognet */ 2719135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 2720135641Scognet &pmap_kernel_l2ptp_phys)) 2721135641Scognet return (NULL); 2722135641Scognet PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 2723135641Scognet } 2724135641Scognet 2725135641Scognet l2->l2_occupancy++; 2726135641Scognet l2b->l2b_kva = ptep; 2727135641Scognet l2b->l2b_l1idx = l1idx; 2728135641Scognet l2b->l2b_phys = pmap_kernel_l2ptp_phys; 2729135641Scognet 2730135641Scognet pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 2731135641Scognet pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 2732135641Scognet } 2733135641Scognet 2734135641Scognet /* Distribute new L1 entry to all other L1s */ 2735135641Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2736145071Scognet pl1pd = &l1->l1_kva[L1_IDX(va)]; 2737135641Scognet *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 2738135641Scognet L1_C_PROTO; 2739135641Scognet PTE_SYNC(pl1pd); 2740135641Scognet } 2741135641Scognet 2742135641Scognet return (l2b); 2743135641Scognet} 2744135641Scognet 2745135641Scognet 2746135641Scognet/* 2747129198Scognet * grow the number of kernel page table entries, if needed 2748129198Scognet */ 2749129198Scognetvoid 2750129198Scognetpmap_growkernel(vm_offset_t addr) 2751129198Scognet{ 2752135641Scognet pmap_t kpm = pmap_kernel(); 2753135641Scognet int s; 2754129198Scognet 2755135641Scognet if (addr <= pmap_curmaxkvaddr) 2756135641Scognet return; /* we are OK */ 2757135641Scognet 2758135641Scognet /* 2759135641Scognet * whoops! we need to add kernel PTPs 2760135641Scognet */ 2761135641Scognet 2762135641Scognet s = splhigh(); /* to be safe */ 2763135641Scognet 2764135641Scognet /* Map 1MB at a time */ 2765135641Scognet for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) 2766135641Scognet pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 2767135641Scognet 2768135641Scognet /* 2769135641Scognet * flush out the cache, expensive but growkernel will happen so 2770135641Scognet * rarely 2771135641Scognet */ 2772135641Scognet cpu_dcache_wbinv_all(); 2773135641Scognet cpu_tlb_flushD(); 2774135641Scognet cpu_cpwait(); 2775135641Scognet kernel_vm_end = pmap_curmaxkvaddr; 2776135641Scognet 2777129198Scognet} 2778129198Scognet 2779129198Scognet 2780129198Scognet/* 2781129198Scognet * pmap_page_protect: 2782129198Scognet * 2783129198Scognet * Lower the permission for all mappings to a given page. 2784129198Scognet */ 2785129198Scognetvoid 2786129198Scognetpmap_page_protect(vm_page_t m, vm_prot_t prot) 2787129198Scognet{ 2788135641Scognet switch(prot) { 2789135641Scognet case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE: 2790135641Scognet case VM_PROT_READ|VM_PROT_WRITE: 2791135641Scognet return; 2792135641Scognet 2793135641Scognet case VM_PROT_READ: 2794135641Scognet case VM_PROT_READ|VM_PROT_EXECUTE: 2795135641Scognet pmap_clearbit(m, PVF_WRITE); 2796135641Scognet break; 2797135641Scognet 2798135641Scognet default: 2799135641Scognet pmap_remove_all(m); 2800135641Scognet break; 2801129198Scognet } 2802135641Scognet 2803129198Scognet} 2804129198Scognet 2805129198Scognet 2806129198Scognet/* 2807129198Scognet * Remove all pages from specified address space 2808129198Scognet * this aids process exit speeds. Also, this code 2809129198Scognet * is special cased for current process only, but 2810129198Scognet * can have the more generic (and slightly slower) 2811129198Scognet * mode enabled. This is much faster than pmap_remove 2812129198Scognet * in the case of running down an entire address space. 2813129198Scognet */ 2814129198Scognetvoid 2815129198Scognetpmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 2816129198Scognet{ 2817144760Scognet struct pv_entry *pv, *npv; 2818144760Scognet struct l2_bucket *l2b = NULL; 2819144760Scognet vm_page_t m; 2820144760Scognet pt_entry_t *pt; 2821144760Scognet 2822144760Scognet vm_page_lock_queues(); 2823144760Scognet for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { 2824144760Scognet if (pv->pv_va >= eva || pv->pv_va < sva) { 2825144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2826144760Scognet continue; 2827144760Scognet } 2828144760Scognet if (pv->pv_flags & PVF_WIRED) { 2829144760Scognet /* The page is wired, cannot remove it now. */ 2830144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2831144760Scognet continue; 2832144760Scognet } 2833144760Scognet pmap->pm_stats.resident_count--; 2834144760Scognet l2b = pmap_get_l2_bucket(pmap, pv->pv_va); 2835144760Scognet KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); 2836144760Scognet pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2837144760Scognet m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); 2838144760Scognet *pt = 0; 2839144760Scognet PTE_SYNC(pt); 2840144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2841144760Scognet pmap_nuke_pv(m, pmap, pv); 2842144760Scognet pmap_free_pv_entry(pv); 2843144760Scognet } 2844144760Scognet vm_page_unlock_queues(); 2845135641Scognet cpu_idcache_wbinv_all(); 2846135641Scognet cpu_tlb_flushID(); 2847135641Scognet cpu_cpwait(); 2848129198Scognet} 2849129198Scognet 2850129198Scognet 2851129198Scognet/*************************************************** 2852129198Scognet * Low level mapping routines..... 2853129198Scognet ***************************************************/ 2854129198Scognet 2855147114Scognet/* Map a section into the KVA. */ 2856147114Scognet 2857147114Scognetvoid 2858147114Scognetpmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) 2859147114Scognet{ 2860147114Scognet pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, 2861147114Scognet VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2862147114Scognet struct l1_ttable *l1; 2863147114Scognet 2864147114Scognet KASSERT(((va | pa) & L1_S_OFFSET) == 0, 2865147114Scognet ("Not a valid section mapping")); 2866147114Scognet if (flags & SECTION_CACHE) 2867147114Scognet pd |= pte_l1_s_cache_mode; 2868147114Scognet else if (flags & SECTION_PT) 2869147114Scognet pd |= pte_l1_s_cache_mode_pt; 2870147114Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2871147114Scognet l1->l1_kva[L1_IDX(va)] = pd; 2872147114Scognet PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2873147114Scognet } 2874147114Scognet} 2875147114Scognet 2876129198Scognet/* 2877129198Scognet * add a wired page to the kva 2878129198Scognet * note that in order for the mapping to take effect -- you 2879129198Scognet * should do a invltlb after doing the pmap_kenter... 2880129198Scognet */ 2881135641Scognetstatic PMAP_INLINE void 2882135641Scognetpmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) 2883129198Scognet{ 2884129198Scognet struct l2_bucket *l2b; 2885129198Scognet pt_entry_t *pte; 2886129198Scognet pt_entry_t opte; 2887129198Scognet PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", 2888129198Scognet (uint32_t) va, (uint32_t) pa)); 2889129198Scognet 2890129198Scognet 2891129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2892135641Scognet if (l2b == NULL) 2893135641Scognet l2b = pmap_grow_l2_bucket(pmap_kernel(), va); 2894129198Scognet KASSERT(l2b != NULL, ("No L2 Bucket")); 2895129198Scognet pte = &l2b->l2b_kva[l2pte_index(va)]; 2896129198Scognet opte = *pte; 2897129198Scognet PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", 2898129198Scognet (uint32_t) pte, opte, *pte)); 2899129198Scognet if (l2pte_valid(opte)) { 2900129198Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 2901129198Scognet cpu_tlb_flushD_SE(va); 2902129198Scognet cpu_cpwait(); 2903135641Scognet } else { 2904129198Scognet if (opte == 0) 2905129198Scognet l2b->l2b_occupancy++; 2906135641Scognet } 2907129198Scognet *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, 2908135641Scognet VM_PROT_READ | VM_PROT_WRITE); 2909135641Scognet if (flags & KENTER_CACHE) 2910135641Scognet *pte |= pte_l2_s_cache_mode; 2911142570Scognet if (flags & KENTER_USER) 2912142570Scognet *pte |= L2_S_PROT_U; 2913129198Scognet PTE_SYNC(pte); 2914135641Scognet} 2915129198Scognet 2916135641Scognetvoid 2917135641Scognetpmap_kenter(vm_offset_t va, vm_paddr_t pa) 2918135641Scognet{ 2919135641Scognet pmap_kenter_internal(va, pa, KENTER_CACHE); 2920129198Scognet} 2921129198Scognet 2922142570Scognetvoid 2923142570Scognetpmap_kenter_user(vm_offset_t va, vm_paddr_t pa) 2924142570Scognet{ 2925143192Scognet 2926142570Scognet pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); 2927143192Scognet /* 2928143192Scognet * Call pmap_fault_fixup now, to make sure we'll have no exception 2929143192Scognet * at the first use of the new address, or bad things will happen, 2930143192Scognet * as we use one of these addresses in the exception handlers. 2931143192Scognet */ 2932143192Scognet pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1); 2933142570Scognet} 2934129198Scognet 2935129198Scognet/* 2936135641Scognet * remove a page rom the kernel pagetables 2937129198Scognet */ 2938129198ScognetPMAP_INLINE void 2939129198Scognetpmap_kremove(vm_offset_t va) 2940129198Scognet{ 2941135641Scognet struct l2_bucket *l2b; 2942135641Scognet pt_entry_t *pte, opte; 2943135641Scognet 2944135641Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2945145071Scognet if (!l2b) 2946145071Scognet return; 2947135641Scognet KASSERT(l2b != NULL, ("No L2 Bucket")); 2948135641Scognet pte = &l2b->l2b_kva[l2pte_index(va)]; 2949135641Scognet opte = *pte; 2950135641Scognet if (l2pte_valid(opte)) { 2951135641Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 2952135641Scognet cpu_tlb_flushD_SE(va); 2953135641Scognet cpu_cpwait(); 2954144760Scognet *pte = 0; 2955135641Scognet } 2956129198Scognet} 2957129198Scognet 2958129198Scognet 2959129198Scognet/* 2960129198Scognet * Used to map a range of physical addresses into kernel 2961129198Scognet * virtual address space. 2962129198Scognet * 2963129198Scognet * The value passed in '*virt' is a suggested virtual address for 2964129198Scognet * the mapping. Architectures which can support a direct-mapped 2965129198Scognet * physical to virtual region can return the appropriate address 2966129198Scognet * within that region, leaving '*virt' unchanged. Other 2967129198Scognet * architectures should map the pages starting at '*virt' and 2968129198Scognet * update '*virt' with the first usable address after the mapped 2969129198Scognet * region. 2970129198Scognet */ 2971129198Scognetvm_offset_t 2972129198Scognetpmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 2973129198Scognet{ 2974129198Scognet vm_offset_t sva = *virt; 2975129198Scognet vm_offset_t va = sva; 2976129198Scognet 2977129198Scognet PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " 2978129198Scognet "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, 2979129198Scognet prot)); 2980129198Scognet 2981129198Scognet while (start < end) { 2982129198Scognet pmap_kenter(va, start); 2983129198Scognet va += PAGE_SIZE; 2984129198Scognet start += PAGE_SIZE; 2985129198Scognet } 2986129198Scognet *virt = va; 2987129198Scognet return (sva); 2988129198Scognet} 2989129198Scognet 2990143724Scognetstatic void 2991146596Scognetpmap_wb_page(vm_page_t m, boolean_t do_inv) 2992143724Scognet{ 2993143724Scognet struct pv_entry *pv; 2994129198Scognet 2995143724Scognet TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2996146596Scognet pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, do_inv, 2997144760Scognet (pv->pv_flags & PVF_WRITE) == 0); 2998143724Scognet} 2999143724Scognet 3000129198Scognet/* 3001129198Scognet * Add a list of wired pages to the kva 3002129198Scognet * this routine is only used for temporary 3003129198Scognet * kernel mappings that do not need to have 3004129198Scognet * page modification or references recorded. 3005129198Scognet * Note that old mappings are simply written 3006129198Scognet * over. The page *must* be wired. 3007129198Scognet */ 3008129198Scognetvoid 3009129198Scognetpmap_qenter(vm_offset_t va, vm_page_t *m, int count) 3010129198Scognet{ 3011129198Scognet int i; 3012129198Scognet 3013129198Scognet for (i = 0; i < count; i++) { 3014146596Scognet pmap_wb_page(m[i], TRUE); 3015135641Scognet pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), 3016135641Scognet KENTER_CACHE); 3017129198Scognet va += PAGE_SIZE; 3018129198Scognet } 3019129198Scognet} 3020129198Scognet 3021129198Scognet 3022129198Scognet/* 3023129198Scognet * this routine jerks page mappings from the 3024129198Scognet * kernel -- it is meant only for temporary mappings. 3025129198Scognet */ 3026129198Scognetvoid 3027129198Scognetpmap_qremove(vm_offset_t va, int count) 3028129198Scognet{ 3029146596Scognet vm_paddr_t pa; 3030129198Scognet int i; 3031129198Scognet 3032129198Scognet for (i = 0; i < count; i++) { 3033146596Scognet pa = vtophys(va); 3034146596Scognet if (pa) { 3035146596Scognet pmap_wb_page(PHYS_TO_VM_PAGE(pa), TRUE); 3036146596Scognet pmap_kremove(va); 3037146596Scognet } 3038129198Scognet va += PAGE_SIZE; 3039129198Scognet } 3040129198Scognet} 3041129198Scognet 3042129198Scognet 3043129198Scognet/* 3044129198Scognet * pmap_object_init_pt preloads the ptes for a given object 3045129198Scognet * into the specified pmap. This eliminates the blast of soft 3046129198Scognet * faults on process startup and immediately after an mmap. 3047129198Scognet */ 3048129198Scognetvoid 3049129198Scognetpmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3050129198Scognet vm_pindex_t pindex, vm_size_t size) 3051129198Scognet{ 3052129198Scognet printf("pmap_object_init_pt()\n"); 3053129198Scognet} 3054129198Scognet 3055129198Scognet 3056129198Scognet/* 3057129198Scognet * pmap_is_prefaultable: 3058129198Scognet * 3059129198Scognet * Return whether or not the specified virtual address is elgible 3060129198Scognet * for prefault. 3061129198Scognet */ 3062129198Scognetboolean_t 3063129198Scognetpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3064129198Scognet{ 3065135641Scognet pd_entry_t *pde; 3066129198Scognet pt_entry_t *pte; 3067129198Scognet 3068135641Scognet if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) 3069135641Scognet return (FALSE); 3070135641Scognet if (*pte == 0) 3071135641Scognet return (TRUE); 3072135641Scognet return (FALSE); 3073129198Scognet} 3074129198Scognet 3075129198Scognet/* 3076129198Scognet * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 3077129198Scognet * Returns TRUE if the mapping exists, else FALSE. 3078129198Scognet * 3079129198Scognet * NOTE: This function is only used by a couple of arm-specific modules. 3080129198Scognet * It is not safe to take any pmap locks here, since we could be right 3081129198Scognet * in the middle of debugging the pmap anyway... 3082129198Scognet * 3083129198Scognet * It is possible for this routine to return FALSE even though a valid 3084129198Scognet * mapping does exist. This is because we don't lock, so the metadata 3085129198Scognet * state may be inconsistent. 3086129198Scognet * 3087129198Scognet * NOTE: We can return a NULL *ptp in the case where the L1 pde is 3088129198Scognet * a "section" mapping. 3089129198Scognet */ 3090129198Scognetboolean_t 3091129198Scognetpmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) 3092129198Scognet{ 3093129198Scognet struct l2_dtable *l2; 3094129198Scognet pd_entry_t *pl1pd, l1pd; 3095129198Scognet pt_entry_t *ptep; 3096129198Scognet u_short l1idx; 3097129198Scognet 3098129198Scognet if (pm->pm_l1 == NULL) 3099129198Scognet return (FALSE); 3100129198Scognet 3101129198Scognet l1idx = L1_IDX(va); 3102129198Scognet *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3103129198Scognet l1pd = *pl1pd; 3104129198Scognet 3105129198Scognet if (l1pte_section_p(l1pd)) { 3106129198Scognet *ptp = NULL; 3107129198Scognet return (TRUE); 3108129198Scognet } 3109129198Scognet 3110129198Scognet if (pm->pm_l2 == NULL) 3111129198Scognet return (FALSE); 3112129198Scognet 3113129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 3114129198Scognet 3115129198Scognet if (l2 == NULL || 3116129198Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3117129198Scognet return (FALSE); 3118129198Scognet } 3119129198Scognet 3120129198Scognet *ptp = &ptep[l2pte_index(va)]; 3121129198Scognet return (TRUE); 3122129198Scognet} 3123129198Scognet 3124129198Scognet/* 3125129198Scognet * Routine: pmap_remove_all 3126129198Scognet * Function: 3127129198Scognet * Removes this physical page from 3128129198Scognet * all physical maps in which it resides. 3129129198Scognet * Reflects back modify bits to the pager. 3130129198Scognet * 3131129198Scognet * Notes: 3132129198Scognet * Original versions of this routine were very 3133129198Scognet * inefficient because they iteratively called 3134129198Scognet * pmap_remove (slow...) 3135129198Scognet */ 3136129198Scognetvoid 3137129198Scognetpmap_remove_all(vm_page_t m) 3138129198Scognet{ 3139129198Scognet pv_entry_t pv; 3140135641Scognet pt_entry_t *ptep, pte; 3141135641Scognet struct l2_bucket *l2b; 3142135641Scognet boolean_t flush = FALSE; 3143135641Scognet pmap_t curpm; 3144135641Scognet int flags = 0; 3145129198Scognet 3146129198Scognet#if defined(PMAP_DEBUG) 3147129198Scognet /* 3148129198Scognet * XXX this makes pmap_page_protect(NONE) illegal for non-managed 3149129198Scognet * pages! 3150129198Scognet */ 3151147217Salc if (m->flags & PG_FICTITIOUS) { 3152129198Scognet panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", VM_PAGE_TO_PHYS(m)); 3153129198Scognet } 3154129198Scognet#endif 3155129198Scognet 3156135641Scognet if (TAILQ_EMPTY(&m->md.pv_list)) 3157135641Scognet return; 3158135641Scognet curpm = vmspace_pmap(curproc->p_vmspace); 3159129198Scognet while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3160135641Scognet if (flush == FALSE && (pv->pv_pmap == curpm || 3161135641Scognet pv->pv_pmap == pmap_kernel())) 3162135641Scognet flush = TRUE; 3163135641Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 3164135641Scognet KASSERT(l2b != NULL, ("No l2 bucket")); 3165135641Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 3166135641Scognet pte = *ptep; 3167135641Scognet *ptep = 0; 3168135641Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 3169135641Scognet pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); 3170135641Scognet if (pv->pv_flags & PVF_WIRED) 3171135641Scognet pv->pv_pmap->pm_stats.wired_count--; 3172129198Scognet pv->pv_pmap->pm_stats.resident_count--; 3173135641Scognet flags |= pv->pv_flags; 3174135641Scognet pmap_nuke_pv(m, pv->pv_pmap, pv); 3175129198Scognet pmap_free_pv_entry(pv); 3176129198Scognet } 3177129198Scognet 3178135641Scognet if (flush) { 3179135641Scognet if (PV_BEEN_EXECD(flags)) 3180135641Scognet pmap_tlb_flushID(curpm); 3181135641Scognet else 3182135641Scognet pmap_tlb_flushD(curpm); 3183135641Scognet } 3184129198Scognet} 3185129198Scognet 3186129198Scognet 3187129198Scognet/* 3188129198Scognet * Set the physical protection on the 3189129198Scognet * specified range of this map as requested. 3190129198Scognet */ 3191129198Scognetvoid 3192129198Scognetpmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3193129198Scognet{ 3194129198Scognet struct l2_bucket *l2b; 3195129198Scognet pt_entry_t *ptep, pte; 3196129198Scognet vm_offset_t next_bucket; 3197129198Scognet u_int flags; 3198129198Scognet int flush; 3199129198Scognet 3200129198Scognet if ((prot & VM_PROT_READ) == 0) { 3201132899Salc mtx_lock(&Giant); 3202129198Scognet pmap_remove(pm, sva, eva); 3203132899Salc mtx_unlock(&Giant); 3204129198Scognet return; 3205129198Scognet } 3206129198Scognet 3207129198Scognet if (prot & VM_PROT_WRITE) { 3208129198Scognet /* 3209129198Scognet * If this is a read->write transition, just ignore it and let 3210135641Scognet * vm_fault() take care of it later. 3211129198Scognet */ 3212129198Scognet return; 3213129198Scognet } 3214129198Scognet 3215132899Salc mtx_lock(&Giant); 3216129198Scognet 3217129198Scognet /* 3218129198Scognet * OK, at this point, we know we're doing write-protect operation. 3219129198Scognet * If the pmap is active, write-back the range. 3220129198Scognet */ 3221129198Scognet pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); 3222129198Scognet 3223129198Scognet flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3224129198Scognet flags = 0; 3225129198Scognet 3226144760Scognet vm_page_lock_queues(); 3227129198Scognet while (sva < eva) { 3228129198Scognet next_bucket = L2_NEXT_BUCKET(sva); 3229129198Scognet if (next_bucket > eva) 3230129198Scognet next_bucket = eva; 3231129198Scognet 3232129198Scognet l2b = pmap_get_l2_bucket(pm, sva); 3233129198Scognet if (l2b == NULL) { 3234129198Scognet sva = next_bucket; 3235129198Scognet continue; 3236129198Scognet } 3237129198Scognet 3238129198Scognet ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3239129198Scognet 3240129198Scognet while (sva < next_bucket) { 3241129198Scognet if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { 3242129198Scognet struct vm_page *pg; 3243129198Scognet u_int f; 3244129198Scognet 3245129198Scognet pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3246129198Scognet pte &= ~L2_S_PROT_W; 3247129198Scognet *ptep = pte; 3248129198Scognet PTE_SYNC(ptep); 3249129198Scognet 3250129198Scognet if (pg != NULL) { 3251129198Scognet f = pmap_modify_pv(pg, pm, sva, 3252129198Scognet PVF_WRITE, 0); 3253129198Scognet pmap_vac_me_harder(pg, pm, sva); 3254144760Scognet if (pmap_track_modified(sva)) 3255144760Scognet vm_page_dirty(pg); 3256129198Scognet } else 3257129198Scognet f = PVF_REF | PVF_EXEC; 3258129198Scognet 3259129198Scognet if (flush >= 0) { 3260129198Scognet flush++; 3261129198Scognet flags |= f; 3262129198Scognet } else 3263129198Scognet if (PV_BEEN_EXECD(f)) 3264129198Scognet pmap_tlb_flushID_SE(pm, sva); 3265129198Scognet else 3266129198Scognet if (PV_BEEN_REFD(f)) 3267129198Scognet pmap_tlb_flushD_SE(pm, sva); 3268129198Scognet } 3269129198Scognet 3270129198Scognet sva += PAGE_SIZE; 3271129198Scognet ptep++; 3272129198Scognet } 3273129198Scognet } 3274129198Scognet 3275129198Scognet 3276129198Scognet if (flush) { 3277129198Scognet if (PV_BEEN_EXECD(flags)) 3278129198Scognet pmap_tlb_flushID(pm); 3279129198Scognet else 3280129198Scognet if (PV_BEEN_REFD(flags)) 3281129198Scognet pmap_tlb_flushD(pm); 3282129198Scognet } 3283144760Scognet vm_page_unlock_queues(); 3284129198Scognet 3285132899Salc mtx_unlock(&Giant); 3286129198Scognet} 3287129198Scognet 3288129198Scognet 3289129198Scognet/* 3290129198Scognet * Insert the given physical page (p) at 3291129198Scognet * the specified virtual address (v) in the 3292129198Scognet * target physical map with the protection requested. 3293129198Scognet * 3294129198Scognet * If specified, the page will be wired down, meaning 3295129198Scognet * that the related pte can not be reclaimed. 3296129198Scognet * 3297129198Scognet * NB: This is the only routine which MAY NOT lazy-evaluate 3298129198Scognet * or lose information. That is, this routine must actually 3299129198Scognet * insert this page into the given map NOW. 3300129198Scognet */ 3301135641Scognet 3302129198Scognetvoid 3303129198Scognetpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3304129198Scognet boolean_t wired) 3305129198Scognet{ 3306135641Scognet struct l2_bucket *l2b = NULL; 3307129198Scognet struct vm_page *opg; 3308144760Scognet struct pv_entry *pve = NULL; 3309129198Scognet pt_entry_t *ptep, npte, opte; 3310129198Scognet u_int nflags; 3311129198Scognet u_int oflags; 3312129198Scognet vm_paddr_t pa; 3313129198Scognet 3314135641Scognet vm_page_lock_queues(); 3315129198Scognet if (va == vector_page) { 3316129198Scognet pa = systempage.pv_pa; 3317129198Scognet m = NULL; 3318129198Scognet } else 3319129198Scognet pa = VM_PAGE_TO_PHYS(m); 3320129198Scognet nflags = 0; 3321129198Scognet if (prot & VM_PROT_WRITE) 3322129198Scognet nflags |= PVF_WRITE; 3323129198Scognet if (prot & VM_PROT_EXECUTE) 3324129198Scognet nflags |= PVF_EXEC; 3325129198Scognet if (wired) 3326129198Scognet nflags |= PVF_WIRED; 3327129198Scognet PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " 3328129198Scognet "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired)); 3329129198Scognet 3330135641Scognet if (pmap == pmap_kernel()) { 3331129198Scognet l2b = pmap_get_l2_bucket(pmap, va); 3332135641Scognet if (l2b == NULL) 3333135641Scognet l2b = pmap_grow_l2_bucket(pmap, va); 3334135641Scognet } else 3335129198Scognet l2b = pmap_alloc_l2_bucket(pmap, va); 3336135641Scognet KASSERT(l2b != NULL, 3337135641Scognet ("pmap_enter: failed to allocate l2 bucket")); 3338129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 3339129198Scognet 3340135641Scognet opte = *ptep; 3341129198Scognet npte = pa; 3342129198Scognet oflags = 0; 3343129198Scognet if (opte) { 3344129198Scognet /* 3345129198Scognet * There is already a mapping at this address. 3346129198Scognet * If the physical address is different, lookup the 3347129198Scognet * vm_page. 3348129198Scognet */ 3349129198Scognet if (l2pte_pa(opte) != pa) 3350129198Scognet opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3351129198Scognet else 3352129198Scognet opg = m; 3353129198Scognet } else 3354129198Scognet opg = NULL; 3355129198Scognet 3356135641Scognet if ((prot & (VM_PROT_ALL)) || 3357135641Scognet (!m || m->md.pvh_attrs & PVF_REF)) { 3358129198Scognet /* 3359135641Scognet * - The access type indicates that we don't need 3360135641Scognet * to do referenced emulation. 3361135641Scognet * OR 3362135641Scognet * - The physical page has already been referenced 3363135641Scognet * so no need to re-do referenced emulation here. 3364129198Scognet */ 3365135641Scognet npte |= L2_S_PROTO; 3366135641Scognet 3367135641Scognet nflags |= PVF_REF; 3368135641Scognet 3369144760Scognet if (m && ((prot & VM_PROT_WRITE) != 0 || 3370144760Scognet (m->md.pvh_attrs & PVF_MOD))) { 3371129198Scognet /* 3372135641Scognet * This is a writable mapping, and the 3373135641Scognet * page's mod state indicates it has 3374135641Scognet * already been modified. Make it 3375135641Scognet * writable from the outset. 3376129198Scognet */ 3377135641Scognet nflags |= PVF_MOD; 3378144760Scognet if (!(m->md.pvh_attrs & PVF_MOD) && 3379144760Scognet pmap_track_modified(va)) 3380144760Scognet vm_page_dirty(m); 3381129198Scognet } 3382144760Scognet if (m && opte) 3383144760Scognet vm_page_flag_set(m, PG_REFERENCED); 3384135641Scognet } else { 3385135641Scognet /* 3386135641Scognet * Need to do page referenced emulation. 3387135641Scognet */ 3388135641Scognet npte |= L2_TYPE_INV; 3389135641Scognet } 3390135641Scognet 3391135641Scognet if (prot & VM_PROT_WRITE) 3392135641Scognet npte |= L2_S_PROT_W; 3393135641Scognet npte |= pte_l2_s_cache_mode; 3394135641Scognet if (m && m == opg) { 3395135641Scognet /* 3396135641Scognet * We're changing the attrs of an existing mapping. 3397135641Scognet */ 3398129198Scognet#if 0 3399135641Scognet simple_lock(&pg->mdpage.pvh_slock); 3400129198Scognet#endif 3401135641Scognet oflags = pmap_modify_pv(m, pmap, va, 3402135641Scognet PVF_WRITE | PVF_EXEC | PVF_WIRED | 3403135641Scognet PVF_MOD | PVF_REF, nflags); 3404129198Scognet#if 0 3405135641Scognet simple_unlock(&pg->mdpage.pvh_slock); 3406129198Scognet#endif 3407135641Scognet 3408135641Scognet /* 3409135641Scognet * We may need to flush the cache if we're 3410135641Scognet * doing rw-ro... 3411135641Scognet */ 3412135641Scognet if (pmap_is_current(pmap) && 3413135641Scognet (oflags & PVF_NC) == 0 && 3414129198Scognet (opte & L2_S_PROT_W) != 0 && 3415129198Scognet (prot & VM_PROT_WRITE) == 0) 3416135641Scognet cpu_dcache_wb_range(va, PAGE_SIZE); 3417129198Scognet } else { 3418129198Scognet /* 3419135641Scognet * New mapping, or changing the backing page 3420135641Scognet * of an existing mapping. 3421129198Scognet */ 3422129198Scognet if (opg) { 3423129198Scognet /* 3424135641Scognet * Replacing an existing mapping with a new one. 3425135641Scognet * It is part of our managed memory so we 3426135641Scognet * must remove it from the PV list 3427129198Scognet */ 3428129198Scognet#if 0 3429129198Scognet simple_lock(&opg->mdpage.pvh_slock); 3430129198Scognet#endif 3431129198Scognet pve = pmap_remove_pv(opg, pmap, va); 3432144760Scognet if (m && (m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) && pve) 3433135641Scognet pmap_free_pv_entry(pve); 3434144760Scognet else if (!pve) 3435144760Scognet pve = pmap_get_pv_entry(); 3436135641Scognet KASSERT(pve != NULL, ("No pv")); 3437129198Scognet#if 0 3438129198Scognet simple_unlock(&opg->mdpage.pvh_slock); 3439129198Scognet#endif 3440129198Scognet oflags = pve->pv_flags; 3441135641Scognet 3442135641Scognet /* 3443135641Scognet * If the old mapping was valid (ref/mod 3444135641Scognet * emulation creates 'invalid' mappings 3445135641Scognet * initially) then make sure to frob 3446135641Scognet * the cache. 3447135641Scognet */ 3448135641Scognet if ((oflags & PVF_NC) == 0 && 3449135641Scognet l2pte_valid(opte)) { 3450135641Scognet if (PV_BEEN_EXECD(oflags)) { 3451129198Scognet pmap_idcache_wbinv_range(pmap, va, 3452129198Scognet PAGE_SIZE); 3453135641Scognet } else 3454135641Scognet if (PV_BEEN_REFD(oflags)) { 3455135641Scognet pmap_dcache_wb_range(pmap, va, 3456135641Scognet PAGE_SIZE, TRUE, 3457135641Scognet (oflags & PVF_WRITE) == 0); 3458135641Scognet } 3459129198Scognet } 3460135641Scognet } else if (m) 3461135641Scognet if ((pve = pmap_get_pv_entry()) == NULL) { 3462135641Scognet panic("pmap_enter: no pv entries"); 3463135641Scognet } 3464144760Scognet if (m && !(m->flags & (PG_UNMANAGED | PG_FICTITIOUS))) 3465135641Scognet pmap_enter_pv(m, pve, pmap, va, nflags); 3466129198Scognet } 3467129198Scognet /* 3468129198Scognet * Make sure userland mappings get the right permissions 3469129198Scognet */ 3470129198Scognet if (pmap != pmap_kernel() && va != vector_page) { 3471129198Scognet npte |= L2_S_PROT_U; 3472129198Scognet } 3473129198Scognet 3474129198Scognet /* 3475129198Scognet * Keep the stats up to date 3476129198Scognet */ 3477129198Scognet if (opte == 0) { 3478129198Scognet l2b->l2b_occupancy++; 3479129198Scognet pmap->pm_stats.resident_count++; 3480129198Scognet } 3481129198Scognet 3482129198Scognet 3483129198Scognet /* 3484129198Scognet * If this is just a wiring change, the two PTEs will be 3485129198Scognet * identical, so there's no need to update the page table. 3486129198Scognet */ 3487129198Scognet if (npte != opte) { 3488135641Scognet boolean_t is_cached = pmap_is_current(pmap); 3489129198Scognet 3490129198Scognet *ptep = npte; 3491129198Scognet if (is_cached) { 3492129198Scognet /* 3493129198Scognet * We only need to frob the cache/tlb if this pmap 3494129198Scognet * is current 3495129198Scognet */ 3496129198Scognet PTE_SYNC(ptep); 3497129198Scognet if (L1_IDX(va) != L1_IDX(vector_page) && 3498129198Scognet l2pte_valid(npte)) { 3499129198Scognet /* 3500129198Scognet * This mapping is likely to be accessed as 3501129198Scognet * soon as we return to userland. Fix up the 3502129198Scognet * L1 entry to avoid taking another 3503129198Scognet * page/domain fault. 3504129198Scognet */ 3505129198Scognet pd_entry_t *pl1pd, l1pd; 3506129198Scognet 3507129198Scognet pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; 3508129198Scognet l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | 3509144760Scognet L1_C_PROTO; 3510129198Scognet if (*pl1pd != l1pd) { 3511129198Scognet *pl1pd = l1pd; 3512129198Scognet PTE_SYNC(pl1pd); 3513129198Scognet } 3514129198Scognet } 3515129198Scognet } 3516129198Scognet 3517129198Scognet if (PV_BEEN_EXECD(oflags)) 3518129198Scognet pmap_tlb_flushID_SE(pmap, va); 3519135641Scognet else if (PV_BEEN_REFD(oflags)) 3520129198Scognet pmap_tlb_flushD_SE(pmap, va); 3521129198Scognet 3522129198Scognet 3523135641Scognet pmap_vac_me_harder(m, pmap, va); 3524129198Scognet } 3525135641Scognet vm_page_unlock_queues(); 3526129198Scognet} 3527129198Scognet 3528129198Scognet/* 3529129198Scognet * this code makes some *MAJOR* assumptions: 3530129198Scognet * 1. Current pmap & pmap exists. 3531129198Scognet * 2. Not wired. 3532129198Scognet * 3. Read access. 3533129198Scognet * 4. No page table pages. 3534129198Scognet * 6. Page IS managed. 3535129198Scognet * but is *MUCH* faster than pmap_enter... 3536129198Scognet */ 3537129198Scognet 3538129198Scognetvm_page_t 3539129198Scognetpmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) 3540129198Scognet{ 3541138897Salc 3542138897Salc vm_page_busy(m); 3543138897Salc vm_page_unlock_queues(); 3544138897Salc VM_OBJECT_UNLOCK(m->object); 3545138897Salc mtx_lock(&Giant); 3546135641Scognet pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); 3547146596Scognet pmap_idcache_wbinv_all(pmap); 3548138897Salc mtx_unlock(&Giant); 3549138897Salc VM_OBJECT_LOCK(m->object); 3550138897Salc vm_page_lock_queues(); 3551138897Salc vm_page_wakeup(m); 3552129198Scognet return (NULL); 3553129198Scognet} 3554129198Scognet 3555129198Scognet/* 3556129198Scognet * Routine: pmap_change_wiring 3557129198Scognet * Function: Change the wiring attribute for a map/virtual-address 3558129198Scognet * pair. 3559129198Scognet * In/out conditions: 3560129198Scognet * The mapping must already exist in the pmap. 3561129198Scognet */ 3562129198Scognetvoid 3563129198Scognetpmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3564129198Scognet{ 3565129198Scognet struct l2_bucket *l2b; 3566129198Scognet pt_entry_t *ptep, pte; 3567129198Scognet vm_page_t pg; 3568129198Scognet 3569129198Scognet l2b = pmap_get_l2_bucket(pmap, va); 3570129198Scognet KASSERT(l2b, ("No l2b bucket in pmap_change_wiring")); 3571129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 3572129198Scognet pte = *ptep; 3573129198Scognet pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3574129198Scognet if (pg) 3575129198Scognet pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired); 3576129198Scognet} 3577129198Scognet 3578129198Scognet 3579129198Scognet/* 3580129198Scognet * Copy the range specified by src_addr/len 3581129198Scognet * from the source map to the range dst_addr/len 3582129198Scognet * in the destination map. 3583129198Scognet * 3584129198Scognet * This routine is only advisory and need not do anything. 3585129198Scognet */ 3586129198Scognetvoid 3587129198Scognetpmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 3588129198Scognet vm_size_t len, vm_offset_t src_addr) 3589129198Scognet{ 3590129198Scognet} 3591129198Scognet 3592129198Scognet 3593129198Scognet/* 3594129198Scognet * Routine: pmap_extract 3595129198Scognet * Function: 3596129198Scognet * Extract the physical page address associated 3597129198Scognet * with the given map/virtual_address pair. 3598129198Scognet */ 3599131658Salcvm_paddr_t 3600129198Scognetpmap_extract(pmap_t pm, vm_offset_t va) 3601129198Scognet{ 3602129198Scognet struct l2_dtable *l2; 3603129198Scognet pd_entry_t *pl1pd, l1pd; 3604129198Scognet pt_entry_t *ptep, pte; 3605129198Scognet vm_paddr_t pa; 3606129198Scognet u_int l1idx; 3607129198Scognet l1idx = L1_IDX(va); 3608129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3609129198Scognet l1pd = *pl1pd; 3610129198Scognet 3611129198Scognet if (l1pte_section_p(l1pd)) { 3612129198Scognet /* 3613129198Scognet * These should only happen for pmap_kernel() 3614129198Scognet */ 3615129198Scognet KASSERT(pm == pmap_kernel(), ("huh")); 3616129198Scognet pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3617129198Scognet } else { 3618129198Scognet /* 3619129198Scognet * Note that we can't rely on the validity of the L1 3620129198Scognet * descriptor as an indication that a mapping exists. 3621129198Scognet * We have to look it up in the L2 dtable. 3622129198Scognet */ 3623129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 3624129198Scognet 3625129198Scognet if (l2 == NULL || 3626129198Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3627129198Scognet return (0); 3628129198Scognet } 3629129198Scognet 3630129198Scognet ptep = &ptep[l2pte_index(va)]; 3631129198Scognet pte = *ptep; 3632129198Scognet 3633129198Scognet if (pte == 0) 3634129198Scognet return (0); 3635129198Scognet 3636129198Scognet switch (pte & L2_TYPE_MASK) { 3637129198Scognet case L2_TYPE_L: 3638129198Scognet pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3639129198Scognet break; 3640129198Scognet 3641129198Scognet default: 3642129198Scognet pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3643129198Scognet break; 3644129198Scognet } 3645129198Scognet } 3646129198Scognet 3647129198Scognet return (pa); 3648129198Scognet} 3649129198Scognet 3650133453Salc/* 3651133453Salc * Atomically extract and hold the physical page with the given 3652133453Salc * pmap and virtual address pair if that mapping permits the given 3653133453Salc * protection. 3654133453Salc * 3655133453Salc */ 3656129198Scognetvm_page_t 3657129198Scognetpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3658129198Scognet{ 3659135641Scognet struct l2_dtable *l2; 3660135641Scognet pd_entry_t *pl1pd, l1pd; 3661135641Scognet pt_entry_t *ptep, pte; 3662129198Scognet vm_paddr_t pa; 3663135641Scognet vm_page_t m = NULL; 3664135641Scognet u_int l1idx; 3665135641Scognet l1idx = L1_IDX(va); 3666135641Scognet pl1pd = &pmap->pm_l1->l1_kva[l1idx]; 3667135641Scognet l1pd = *pl1pd; 3668129198Scognet 3669135641Scognet vm_page_lock_queues(); 3670135641Scognet if (l1pte_section_p(l1pd)) { 3671135641Scognet /* 3672135641Scognet * These should only happen for pmap_kernel() 3673135641Scognet */ 3674135641Scognet KASSERT(pmap == pmap_kernel(), ("huh")); 3675135641Scognet pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3676135641Scognet if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3677135641Scognet m = PHYS_TO_VM_PAGE(pa); 3678135641Scognet vm_page_hold(m); 3679135641Scognet } 3680135641Scognet 3681135641Scognet } else { 3682135641Scognet /* 3683135641Scognet * Note that we can't rely on the validity of the L1 3684135641Scognet * descriptor as an indication that a mapping exists. 3685135641Scognet * We have to look it up in the L2 dtable. 3686135641Scognet */ 3687135641Scognet l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3688135641Scognet 3689135641Scognet if (l2 == NULL || 3690135641Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3691135641Scognet return (NULL); 3692135641Scognet } 3693135641Scognet 3694135641Scognet ptep = &ptep[l2pte_index(va)]; 3695135641Scognet pte = *ptep; 3696135641Scognet 3697135641Scognet if (pte == 0) 3698135641Scognet return (NULL); 3699135641Scognet 3700135641Scognet if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3701135641Scognet switch (pte & L2_TYPE_MASK) { 3702135641Scognet case L2_TYPE_L: 3703135641Scognet pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3704135641Scognet break; 3705135641Scognet 3706135641Scognet default: 3707135641Scognet pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3708135641Scognet break; 3709135641Scognet } 3710135641Scognet m = PHYS_TO_VM_PAGE(pa); 3711135641Scognet vm_page_hold(m); 3712135641Scognet } 3713129198Scognet } 3714135641Scognet 3715135641Scognet vm_page_unlock_queues(); 3716129198Scognet return (m); 3717129198Scognet} 3718129198Scognet 3719129198Scognet/* 3720129198Scognet * Initialize a preallocated and zeroed pmap structure, 3721129198Scognet * such as one in a vmspace structure. 3722129198Scognet */ 3723129198Scognet 3724129198Scognetvoid 3725129198Scognetpmap_pinit(pmap_t pmap) 3726129198Scognet{ 3727129198Scognet PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); 3728129198Scognet 3729129198Scognet pmap_alloc_l1(pmap); 3730129198Scognet bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); 3731129198Scognet 3732129198Scognet LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 3733129198Scognet pmap->pm_count = 1; 3734129198Scognet pmap->pm_active = 0; 3735129198Scognet 3736144760Scognet TAILQ_INIT(&pmap->pm_pvlist); 3737129198Scognet bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 3738129198Scognet pmap->pm_stats.resident_count = 1; 3739129198Scognet if (vector_page < KERNBASE) { 3740129198Scognet pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), 3741129198Scognet VM_PROT_READ, 1); 3742129198Scognet } 3743129198Scognet} 3744129198Scognet 3745129198Scognet 3746129198Scognet/*************************************************** 3747129198Scognet * page management routines. 3748129198Scognet ***************************************************/ 3749129198Scognet 3750129198Scognet 3751135641Scognetstatic void 3752129198Scognetpmap_free_pv_entry(pv_entry_t pv) 3753129198Scognet{ 3754129198Scognet pv_entry_count--; 3755129198Scognet uma_zfree(pvzone, pv); 3756129198Scognet} 3757129198Scognet 3758129198Scognet 3759129198Scognet/* 3760129198Scognet * get a new pv_entry, allocating a block from the system 3761129198Scognet * when needed. 3762129198Scognet * the memory allocation is performed bypassing the malloc code 3763129198Scognet * because of the possibility of allocations at interrupt time. 3764129198Scognet */ 3765129198Scognetstatic pv_entry_t 3766129198Scognetpmap_get_pv_entry(void) 3767129198Scognet{ 3768129198Scognet pv_entry_t ret_value; 3769129198Scognet 3770129198Scognet pv_entry_count++; 3771129198Scognet if (pv_entry_high_water && 3772129198Scognet (pv_entry_count > pv_entry_high_water) && 3773129198Scognet (pmap_pagedaemon_waken == 0)) { 3774129198Scognet pmap_pagedaemon_waken = 1; 3775129198Scognet wakeup (&vm_pages_needed); 3776129198Scognet } 3777129198Scognet ret_value = uma_zalloc(pvzone, M_NOWAIT); 3778129198Scognet return ret_value; 3779129198Scognet} 3780129198Scognet 3781129198Scognet 3782129198Scognet/* 3783129198Scognet * Remove the given range of addresses from the specified map. 3784129198Scognet * 3785129198Scognet * It is assumed that the start and end are properly 3786129198Scognet * rounded to the page size. 3787129198Scognet */ 3788129198Scognet#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3789129198Scognetvoid 3790129198Scognetpmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 3791129198Scognet{ 3792129198Scognet struct l2_bucket *l2b; 3793129198Scognet vm_offset_t next_bucket; 3794129198Scognet pt_entry_t *ptep; 3795129198Scognet u_int cleanlist_idx, total, cnt; 3796129198Scognet struct { 3797129198Scognet vm_offset_t va; 3798129198Scognet pt_entry_t *pte; 3799129198Scognet } cleanlist[PMAP_REMOVE_CLEAN_LIST_SIZE]; 3800129198Scognet u_int mappings, is_exec, is_refd; 3801135641Scognet int flushall = 0; 3802129198Scognet 3803129198Scognet 3804129198Scognet /* 3805129198Scognet * we lock in the pmap => pv_head direction 3806129198Scognet */ 3807129198Scognet#if 0 3808129198Scognet PMAP_MAP_TO_HEAD_LOCK(); 3809129198Scognet pmap_acquire_pmap_lock(pm); 3810129198Scognet#endif 3811129198Scognet 3812137664Scognet vm_page_lock_queues(); 3813135641Scognet if (!pmap_is_current(pm)) { 3814129198Scognet cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3815129198Scognet } else 3816129198Scognet cleanlist_idx = 0; 3817129198Scognet 3818129198Scognet total = 0; 3819129198Scognet while (sva < eva) { 3820129198Scognet /* 3821129198Scognet * Do one L2 bucket's worth at a time. 3822129198Scognet */ 3823129198Scognet next_bucket = L2_NEXT_BUCKET(sva); 3824129198Scognet if (next_bucket > eva) 3825129198Scognet next_bucket = eva; 3826129198Scognet 3827129198Scognet l2b = pmap_get_l2_bucket(pm, sva); 3828129198Scognet if (l2b == NULL) { 3829129198Scognet sva = next_bucket; 3830129198Scognet continue; 3831129198Scognet } 3832129198Scognet 3833129198Scognet ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3834129198Scognet mappings = 0; 3835129198Scognet 3836129198Scognet while (sva < next_bucket) { 3837129198Scognet struct vm_page *pg; 3838129198Scognet pt_entry_t pte; 3839129198Scognet vm_paddr_t pa; 3840129198Scognet 3841129198Scognet pte = *ptep; 3842129198Scognet 3843129198Scognet if (pte == 0) { 3844129198Scognet /* 3845129198Scognet * Nothing here, move along 3846129198Scognet */ 3847129198Scognet sva += PAGE_SIZE; 3848129198Scognet ptep++; 3849129198Scognet continue; 3850129198Scognet } 3851129198Scognet 3852129198Scognet pm->pm_stats.resident_count--; 3853129198Scognet pa = l2pte_pa(pte); 3854129198Scognet is_exec = 0; 3855129198Scognet is_refd = 1; 3856129198Scognet 3857129198Scognet /* 3858129198Scognet * Update flags. In a number of circumstances, 3859129198Scognet * we could cluster a lot of these and do a 3860129198Scognet * number of sequential pages in one go. 3861129198Scognet */ 3862129198Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3863129198Scognet struct pv_entry *pve; 3864129198Scognet#if 0 3865129198Scognet simple_lock(&pg->mdpage.pvh_slock); 3866129198Scognet#endif 3867129198Scognet pve = pmap_remove_pv(pg, pm, sva); 3868135641Scognet if (pve) { 3869129198Scognet#if 0 3870129198Scognet simple_unlock(&pg->mdpage.pvh_slock); 3871129198Scognet#endif 3872129198Scognet is_exec = 3873129198Scognet PV_BEEN_EXECD(pve->pv_flags); 3874129198Scognet is_refd = 3875129198Scognet PV_BEEN_REFD(pve->pv_flags); 3876129198Scognet pmap_free_pv_entry(pve); 3877129198Scognet } 3878129198Scognet } 3879129198Scognet 3880129198Scognet if (!l2pte_valid(pte)) { 3881129198Scognet *ptep = 0; 3882129198Scognet PTE_SYNC_CURRENT(pm, ptep); 3883129198Scognet sva += PAGE_SIZE; 3884129198Scognet ptep++; 3885129198Scognet mappings++; 3886129198Scognet continue; 3887129198Scognet } 3888129198Scognet 3889129198Scognet if (cleanlist_idx < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3890129198Scognet /* Add to the clean list. */ 3891129198Scognet cleanlist[cleanlist_idx].pte = ptep; 3892129198Scognet cleanlist[cleanlist_idx].va = 3893129198Scognet sva | (is_exec & 1); 3894129198Scognet cleanlist_idx++; 3895129198Scognet } else 3896129198Scognet if (cleanlist_idx == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3897129198Scognet /* Nuke everything if needed. */ 3898129198Scognet pmap_idcache_wbinv_all(pm); 3899129198Scognet pmap_tlb_flushID(pm); 3900129198Scognet 3901129198Scognet /* 3902129198Scognet * Roll back the previous PTE list, 3903129198Scognet * and zero out the current PTE. 3904129198Scognet */ 3905129198Scognet for (cnt = 0; 3906129198Scognet cnt < PMAP_REMOVE_CLEAN_LIST_SIZE; cnt++) { 3907129198Scognet *cleanlist[cnt].pte = 0; 3908129198Scognet } 3909129198Scognet *ptep = 0; 3910129198Scognet PTE_SYNC(ptep); 3911129198Scognet cleanlist_idx++; 3912135641Scognet flushall = 1; 3913129198Scognet } else { 3914129198Scognet *ptep = 0; 3915129198Scognet PTE_SYNC(ptep); 3916129198Scognet if (is_exec) 3917129198Scognet pmap_tlb_flushID_SE(pm, sva); 3918129198Scognet else 3919129198Scognet if (is_refd) 3920129198Scognet pmap_tlb_flushD_SE(pm, sva); 3921129198Scognet } 3922129198Scognet 3923129198Scognet sva += PAGE_SIZE; 3924129198Scognet ptep++; 3925129198Scognet mappings++; 3926129198Scognet } 3927129198Scognet 3928129198Scognet /* 3929129198Scognet * Deal with any left overs 3930129198Scognet */ 3931129198Scognet if (cleanlist_idx <= PMAP_REMOVE_CLEAN_LIST_SIZE) { 3932129198Scognet total += cleanlist_idx; 3933129198Scognet for (cnt = 0; cnt < cleanlist_idx; cnt++) { 3934135641Scognet vm_offset_t clva = 3935135641Scognet cleanlist[cnt].va & ~1; 3936135641Scognet if (cleanlist[cnt].va & 1) { 3937135641Scognet pmap_idcache_wbinv_range(pm, 3938135641Scognet clva, PAGE_SIZE); 3939135641Scognet pmap_tlb_flushID_SE(pm, clva); 3940135641Scognet } else { 3941135641Scognet pmap_dcache_wb_range(pm, 3942135641Scognet clva, PAGE_SIZE, TRUE, 3943135641Scognet FALSE); 3944135641Scognet pmap_tlb_flushD_SE(pm, clva); 3945129198Scognet } 3946129198Scognet *cleanlist[cnt].pte = 0; 3947129198Scognet PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte); 3948129198Scognet } 3949129198Scognet 3950129198Scognet if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) 3951129198Scognet cleanlist_idx = 0; 3952129198Scognet else { 3953144760Scognet /* 3954144760Scognet * We are removing so much entries it's just 3955144760Scognet * easier to flush the whole cache. 3956144760Scognet */ 3957129198Scognet cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; 3958129198Scognet pmap_idcache_wbinv_all(pm); 3959135641Scognet flushall = 1; 3960129198Scognet } 3961129198Scognet } 3962129198Scognet 3963129198Scognet pmap_free_l2_bucket(pm, l2b, mappings); 3964129198Scognet } 3965129198Scognet 3966137664Scognet vm_page_unlock_queues(); 3967135641Scognet if (flushall) 3968135641Scognet cpu_tlb_flushID(); 3969129198Scognet#if 0 3970129198Scognet pmap_release_pmap_lock(pm); 3971129198Scognet PMAP_MAP_TO_HEAD_UNLOCK(); 3972129198Scognet#endif 3973129198Scognet} 3974129198Scognet 3975129198Scognet 3976129198Scognet 3977129198Scognet 3978129198Scognet/* 3979129198Scognet * pmap_zero_page() 3980129198Scognet * 3981129198Scognet * Zero a given physical page by mapping it at a page hook point. 3982129198Scognet * In doing the zero page op, the page we zero is mapped cachable, as with 3983129198Scognet * StrongARM accesses to non-cached pages are non-burst making writing 3984129198Scognet * _any_ bulk data very slow. 3985129198Scognet */ 3986129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 3987129198Scognetvoid 3988129198Scognetpmap_zero_page_generic(vm_paddr_t phys, int off, int size) 3989129198Scognet{ 3990129198Scognet#ifdef DEBUG 3991129198Scognet struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 3992129198Scognet 3993129198Scognet if (pg->md.pvh_list != NULL) 3994129198Scognet panic("pmap_zero_page: page has mappings"); 3995129198Scognet#endif 3996129198Scognet 3997129198Scognet 3998129198Scognet /* 3999129198Scognet * Hook in the page, zero it, and purge the cache for that 4000129198Scognet * zeroed page. Invalidate the TLB as needed. 4001129198Scognet */ 4002129198Scognet *cdst_pte = L2_S_PROTO | phys | 4003129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4004129198Scognet PTE_SYNC(cdst_pte); 4005129198Scognet cpu_tlb_flushD_SE(cdstp); 4006129198Scognet cpu_cpwait(); 4007135641Scognet if (off || size != PAGE_SIZE) 4008129198Scognet bzero((void *)(cdstp + off), size); 4009129198Scognet else 4010129198Scognet bzero_page(cdstp); 4011129198Scognet cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4012129198Scognet} 4013129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 4014129198Scognet 4015129198Scognet#if ARM_MMU_XSCALE == 1 4016129198Scognetvoid 4017129198Scognetpmap_zero_page_xscale(vm_paddr_t phys, int off, int size) 4018129198Scognet{ 4019129198Scognet /* 4020129198Scognet * Hook in the page, zero it, and purge the cache for that 4021129198Scognet * zeroed page. Invalidate the TLB as needed. 4022129198Scognet */ 4023129198Scognet *cdst_pte = L2_S_PROTO | phys | 4024129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4025129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4026129198Scognet PTE_SYNC(cdst_pte); 4027129198Scognet cpu_tlb_flushD_SE(cdstp); 4028129198Scognet cpu_cpwait(); 4029135641Scognet if (off || size != PAGE_SIZE) 4030129198Scognet bzero((void *)(cdstp + off), size); 4031129198Scognet else 4032129198Scognet bzero_page(cdstp); 4033129198Scognet xscale_cache_clean_minidata(); 4034129198Scognet} 4035129198Scognet 4036129198Scognet/* 4037129198Scognet * Change the PTEs for the specified kernel mappings such that they 4038129198Scognet * will use the mini data cache instead of the main data cache. 4039129198Scognet */ 4040129198Scognetvoid 4041135641Scognetpmap_use_minicache(vm_offset_t va, vm_size_t size) 4042129198Scognet{ 4043129198Scognet struct l2_bucket *l2b; 4044129198Scognet pt_entry_t *ptep, *sptep, pte; 4045129198Scognet vm_offset_t next_bucket, eva; 4046129198Scognet 4047129198Scognet#if (ARM_NMMUS > 1) 4048129198Scognet if (xscale_use_minidata == 0) 4049129198Scognet return; 4050129198Scognet#endif 4051129198Scognet 4052135641Scognet eva = va + size; 4053129198Scognet 4054129198Scognet while (va < eva) { 4055129198Scognet next_bucket = L2_NEXT_BUCKET(va); 4056129198Scognet if (next_bucket > eva) 4057129198Scognet next_bucket = eva; 4058129198Scognet 4059129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 4060129198Scognet 4061129198Scognet sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 4062129198Scognet 4063129198Scognet while (va < next_bucket) { 4064129198Scognet pte = *ptep; 4065129198Scognet if (!l2pte_minidata(pte)) { 4066129198Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 4067129198Scognet cpu_tlb_flushD_SE(va); 4068129198Scognet *ptep = pte & ~L2_B; 4069129198Scognet } 4070129198Scognet ptep++; 4071129198Scognet va += PAGE_SIZE; 4072129198Scognet } 4073129198Scognet PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 4074129198Scognet } 4075129198Scognet cpu_cpwait(); 4076129198Scognet} 4077129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 4078129198Scognet 4079129198Scognet/* 4080129198Scognet * pmap_zero_page zeros the specified hardware page by mapping 4081129198Scognet * the page into KVM and using bzero to clear its contents. 4082129198Scognet */ 4083129198Scognetvoid 4084129198Scognetpmap_zero_page(vm_page_t m) 4085129198Scognet{ 4086135641Scognet pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); 4087129198Scognet} 4088129198Scognet 4089129198Scognet 4090129198Scognet/* 4091129198Scognet * pmap_zero_page_area zeros the specified hardware page by mapping 4092129198Scognet * the page into KVM and using bzero to clear its contents. 4093129198Scognet * 4094129198Scognet * off and size may not cover an area beyond a single hardware page. 4095129198Scognet */ 4096129198Scognetvoid 4097129198Scognetpmap_zero_page_area(vm_page_t m, int off, int size) 4098129198Scognet{ 4099129198Scognet 4100129198Scognet pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); 4101129198Scognet} 4102129198Scognet 4103129198Scognet 4104129198Scognet/* 4105129198Scognet * pmap_zero_page_idle zeros the specified hardware page by mapping 4106129198Scognet * the page into KVM and using bzero to clear its contents. This 4107129198Scognet * is intended to be called from the vm_pagezero process only and 4108129198Scognet * outside of Giant. 4109129198Scognet */ 4110129198Scognetvoid 4111129198Scognetpmap_zero_page_idle(vm_page_t m) 4112129198Scognet{ 4113129198Scognet 4114129198Scognet pmap_zero_page(m); 4115129198Scognet} 4116129198Scognet 4117129198Scognet/* 4118129198Scognet * pmap_clean_page() 4119129198Scognet * 4120129198Scognet * This is a local function used to work out the best strategy to clean 4121129198Scognet * a single page referenced by its entry in the PV table. It's used by 4122129198Scognet * pmap_copy_page, pmap_zero page and maybe some others later on. 4123129198Scognet * 4124129198Scognet * Its policy is effectively: 4125129198Scognet * o If there are no mappings, we don't bother doing anything with the cache. 4126129198Scognet * o If there is one mapping, we clean just that page. 4127129198Scognet * o If there are multiple mappings, we clean the entire cache. 4128129198Scognet * 4129129198Scognet * So that some functions can be further optimised, it returns 0 if it didn't 4130129198Scognet * clean the entire cache, or 1 if it did. 4131129198Scognet * 4132129198Scognet * XXX One bug in this routine is that if the pv_entry has a single page 4133129198Scognet * mapped at 0x00000000 a whole cache clean will be performed rather than 4134129198Scognet * just the 1 page. Since this should not occur in everyday use and if it does 4135129198Scognet * it will just result in not the most efficient clean for the page. 4136129198Scognet */ 4137129198Scognetstatic int 4138129198Scognetpmap_clean_page(struct pv_entry *pv, boolean_t is_src) 4139129198Scognet{ 4140129198Scognet pmap_t pm, pm_to_clean = NULL; 4141129198Scognet struct pv_entry *npv; 4142129198Scognet u_int cache_needs_cleaning = 0; 4143129198Scognet u_int flags = 0; 4144129198Scognet vm_offset_t page_to_clean = 0; 4145129198Scognet 4146129198Scognet if (pv == NULL) { 4147129198Scognet /* nothing mapped in so nothing to flush */ 4148129198Scognet return (0); 4149129198Scognet } 4150129198Scognet 4151129198Scognet /* 4152129198Scognet * Since we flush the cache each time we change to a different 4153129198Scognet * user vmspace, we only need to flush the page if it is in the 4154129198Scognet * current pmap. 4155129198Scognet */ 4156135641Scognet if (curthread) 4157135641Scognet pm = vmspace_pmap(curproc->p_vmspace); 4158129198Scognet else 4159129198Scognet pm = pmap_kernel(); 4160129198Scognet 4161129198Scognet for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { 4162129198Scognet if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { 4163129198Scognet flags |= npv->pv_flags; 4164129198Scognet /* 4165129198Scognet * The page is mapped non-cacheable in 4166129198Scognet * this map. No need to flush the cache. 4167129198Scognet */ 4168129198Scognet if (npv->pv_flags & PVF_NC) { 4169129198Scognet#ifdef DIAGNOSTIC 4170129198Scognet if (cache_needs_cleaning) 4171129198Scognet panic("pmap_clean_page: " 4172129198Scognet "cache inconsistency"); 4173129198Scognet#endif 4174129198Scognet break; 4175129198Scognet } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 4176129198Scognet continue; 4177129198Scognet if (cache_needs_cleaning) { 4178129198Scognet page_to_clean = 0; 4179129198Scognet break; 4180129198Scognet } else { 4181129198Scognet page_to_clean = npv->pv_va; 4182129198Scognet pm_to_clean = npv->pv_pmap; 4183129198Scognet } 4184129198Scognet cache_needs_cleaning = 1; 4185129198Scognet } 4186129198Scognet } 4187129198Scognet if (page_to_clean) { 4188129198Scognet if (PV_BEEN_EXECD(flags)) 4189129198Scognet pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 4190129198Scognet PAGE_SIZE); 4191129198Scognet else 4192129198Scognet pmap_dcache_wb_range(pm_to_clean, page_to_clean, 4193129198Scognet PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 4194129198Scognet } else if (cache_needs_cleaning) { 4195129198Scognet if (PV_BEEN_EXECD(flags)) 4196129198Scognet pmap_idcache_wbinv_all(pm); 4197129198Scognet else 4198129198Scognet pmap_dcache_wbinv_all(pm); 4199129198Scognet return (1); 4200129198Scognet } 4201129198Scognet return (0); 4202129198Scognet} 4203129198Scognet 4204129198Scognet/* 4205129198Scognet * pmap_copy_page copies the specified (machine independent) 4206129198Scognet * page by mapping the page into virtual memory and using 4207129198Scognet * bcopy to copy the page, one machine dependent page at a 4208129198Scognet * time. 4209129198Scognet */ 4210129198Scognet 4211129198Scognet/* 4212129198Scognet * pmap_copy_page() 4213129198Scognet * 4214129198Scognet * Copy one physical page into another, by mapping the pages into 4215129198Scognet * hook points. The same comment regarding cachability as in 4216129198Scognet * pmap_zero_page also applies here. 4217129198Scognet */ 4218129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 4219129198Scognetvoid 4220129198Scognetpmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) 4221129198Scognet{ 4222129198Scognet struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4223129198Scognet#ifdef DEBUG 4224129198Scognet struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 4225129198Scognet 4226129198Scognet if (dst_pg->md.pvh_list != NULL) 4227129198Scognet panic("pmap_copy_page: dst page has mappings"); 4228129198Scognet#endif 4229129198Scognet 4230129198Scognet 4231129198Scognet /* 4232129198Scognet * Clean the source page. Hold the source page's lock for 4233129198Scognet * the duration of the copy so that no other mappings can 4234129198Scognet * be created while we have a potentially aliased mapping. 4235129198Scognet */ 4236129198Scognet#if 0 4237129198Scognet mtx_lock(&src_pg->md.pvh_mtx); 4238129198Scognet#endif 4239129198Scognet (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4240129198Scognet 4241129198Scognet /* 4242129198Scognet * Map the pages into the page hook points, copy them, and purge 4243129198Scognet * the cache for the appropriate page. Invalidate the TLB 4244129198Scognet * as required. 4245129198Scognet */ 4246129198Scognet *csrc_pte = L2_S_PROTO | src | 4247129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4248129198Scognet PTE_SYNC(csrc_pte); 4249129198Scognet *cdst_pte = L2_S_PROTO | dst | 4250129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4251129198Scognet PTE_SYNC(cdst_pte); 4252129198Scognet cpu_tlb_flushD_SE(csrcp); 4253129198Scognet cpu_tlb_flushD_SE(cdstp); 4254129198Scognet cpu_cpwait(); 4255129198Scognet bcopy_page(csrcp, cdstp); 4256129198Scognet cpu_dcache_inv_range(csrcp, PAGE_SIZE); 4257129198Scognet#if 0 4258129198Scognet mtx_lock(&src_pg->md.pvh_mtx); 4259129198Scognet#endif 4260129198Scognet cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4261129198Scognet} 4262129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 4263129198Scognet 4264129198Scognet#if ARM_MMU_XSCALE == 1 4265129198Scognetvoid 4266129198Scognetpmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) 4267129198Scognet{ 4268129198Scognet struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4269129198Scognet#ifdef DEBUG 4270129198Scognet struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 4271129198Scognet 4272129198Scognet if (dst_pg->md.pvh_list != NULL) 4273129198Scognet panic("pmap_copy_page: dst page has mappings"); 4274129198Scognet#endif 4275129198Scognet 4276129198Scognet 4277129198Scognet /* 4278129198Scognet * Clean the source page. Hold the source page's lock for 4279129198Scognet * the duration of the copy so that no other mappings can 4280129198Scognet * be created while we have a potentially aliased mapping. 4281129198Scognet */ 4282130745Scognet (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4283129198Scognet 4284129198Scognet /* 4285129198Scognet * Map the pages into the page hook points, copy them, and purge 4286129198Scognet * the cache for the appropriate page. Invalidate the TLB 4287129198Scognet * as required. 4288129198Scognet */ 4289129198Scognet *csrc_pte = L2_S_PROTO | src | 4290129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4291129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4292129198Scognet PTE_SYNC(csrc_pte); 4293129198Scognet *cdst_pte = L2_S_PROTO | dst | 4294129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4295129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4296129198Scognet PTE_SYNC(cdst_pte); 4297129198Scognet cpu_tlb_flushD_SE(csrcp); 4298129198Scognet cpu_tlb_flushD_SE(cdstp); 4299129198Scognet cpu_cpwait(); 4300129198Scognet bcopy_page(csrcp, cdstp); 4301129198Scognet xscale_cache_clean_minidata(); 4302129198Scognet} 4303129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 4304129198Scognet 4305129198Scognetvoid 4306129198Scognetpmap_copy_page(vm_page_t src, vm_page_t dst) 4307129198Scognet{ 4308146596Scognet cpu_dcache_wbinv_all(); 4309129198Scognet pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 4310129198Scognet} 4311129198Scognet 4312129198Scognet 4313129198Scognet 4314129198Scognet 4315129198Scognet/* 4316129198Scognet * this routine returns true if a physical page resides 4317129198Scognet * in the given pmap. 4318129198Scognet */ 4319129198Scognetboolean_t 4320129198Scognetpmap_page_exists_quick(pmap_t pmap, vm_page_t m) 4321129198Scognet{ 4322129198Scognet pv_entry_t pv; 4323129198Scognet int loops = 0; 4324129198Scognet int s; 4325129198Scognet 4326147217Salc if (m->flags & PG_FICTITIOUS) 4327129198Scognet return (FALSE); 4328129198Scognet 4329129198Scognet s = splvm(); 4330129198Scognet 4331129198Scognet /* 4332129198Scognet * Not found, check current mappings returning immediately 4333129198Scognet */ 4334129198Scognet for (pv = TAILQ_FIRST(&m->md.pv_list); 4335129198Scognet pv; 4336129198Scognet pv = TAILQ_NEXT(pv, pv_list)) { 4337129198Scognet if (pv->pv_pmap == pmap) { 4338129198Scognet splx(s); 4339129198Scognet return (TRUE); 4340129198Scognet } 4341129198Scognet loops++; 4342129198Scognet if (loops >= 16) 4343129198Scognet break; 4344129198Scognet } 4345129198Scognet splx(s); 4346129198Scognet return (FALSE); 4347129198Scognet} 4348129198Scognet 4349129198Scognet 4350129198Scognet/* 4351129198Scognet * pmap_ts_referenced: 4352129198Scognet * 4353129198Scognet * Return the count of reference bits for a page, clearing all of them. 4354129198Scognet */ 4355129198Scognetint 4356129198Scognetpmap_ts_referenced(vm_page_t m) 4357129198Scognet{ 4358135641Scognet return (pmap_clearbit(m, PVF_REF)); 4359129198Scognet} 4360129198Scognet 4361129198Scognet 4362129198Scognetboolean_t 4363129198Scognetpmap_is_modified(vm_page_t m) 4364129198Scognet{ 4365135641Scognet 4366135641Scognet if (m->md.pvh_attrs & PVF_MOD) 4367135641Scognet return (TRUE); 4368129198Scognet 4369129198Scognet return(FALSE); 4370129198Scognet} 4371129198Scognet 4372129198Scognet 4373129198Scognet/* 4374129198Scognet * Clear the modify bits on the specified physical page. 4375129198Scognet */ 4376129198Scognetvoid 4377129198Scognetpmap_clear_modify(vm_page_t m) 4378129198Scognet{ 4379129198Scognet 4380129198Scognet if (m->md.pvh_attrs & PVF_MOD) 4381129198Scognet pmap_clearbit(m, PVF_MOD); 4382129198Scognet} 4383129198Scognet 4384129198Scognet 4385129198Scognet/* 4386129198Scognet * pmap_clear_reference: 4387129198Scognet * 4388129198Scognet * Clear the reference bit on the specified physical page. 4389129198Scognet */ 4390129198Scognetvoid 4391129198Scognetpmap_clear_reference(vm_page_t m) 4392129198Scognet{ 4393129198Scognet 4394129198Scognet if (m->md.pvh_attrs & PVF_REF) 4395129198Scognet pmap_clearbit(m, PVF_REF); 4396129198Scognet} 4397129198Scognet 4398129198Scognet 4399129198Scognet/* 4400129198Scognet * perform the pmap work for mincore 4401129198Scognet */ 4402129198Scognetint 4403129198Scognetpmap_mincore(pmap_t pmap, vm_offset_t addr) 4404129198Scognet{ 4405129198Scognet printf("pmap_mincore()\n"); 4406129198Scognet 4407129198Scognet return (0); 4408129198Scognet} 4409129198Scognet 4410129198Scognet 4411129198Scognetvm_offset_t 4412129198Scognetpmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 4413129198Scognet{ 4414129198Scognet 4415129198Scognet return(addr); 4416129198Scognet} 4417129198Scognet 4418129198Scognet 4419129198Scognet/* 4420129198Scognet * Map a set of physical memory pages into the kernel virtual 4421129198Scognet * address space. Return a pointer to where it is mapped. This 4422129198Scognet * routine is intended to be used for mapping device memory, 4423129198Scognet * NOT real memory. 4424129198Scognet */ 4425129198Scognetvoid * 4426129198Scognetpmap_mapdev(vm_offset_t pa, vm_size_t size) 4427129198Scognet{ 4428129198Scognet vm_offset_t va, tmpva, offset; 4429129198Scognet 4430129198Scognet offset = pa & PAGE_MASK; 4431135641Scognet size = roundup(size, PAGE_SIZE); 4432129198Scognet 4433129198Scognet GIANT_REQUIRED; 4434129198Scognet 4435132560Salc va = kmem_alloc_nofault(kernel_map, size); 4436129198Scognet if (!va) 4437129198Scognet panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 4438129198Scognet for (tmpva = va; size > 0;) { 4439135641Scognet pmap_kenter_internal(tmpva, pa, 0); 4440129198Scognet size -= PAGE_SIZE; 4441129198Scognet tmpva += PAGE_SIZE; 4442129198Scognet pa += PAGE_SIZE; 4443129198Scognet } 4444129198Scognet 4445135641Scognet return ((void *)(va)); 4446129198Scognet} 4447129198Scognet 4448129198Scognet#define BOOTSTRAP_DEBUG 4449129198Scognet 4450129198Scognet/* 4451129198Scognet * pmap_map_section: 4452129198Scognet * 4453129198Scognet * Create a single section mapping. 4454129198Scognet */ 4455129198Scognetvoid 4456129198Scognetpmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4457129198Scognet int prot, int cache) 4458129198Scognet{ 4459129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4460129198Scognet pd_entry_t fl; 4461129198Scognet 4462129198Scognet KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); 4463129198Scognet 4464129198Scognet switch (cache) { 4465129198Scognet case PTE_NOCACHE: 4466129198Scognet default: 4467129198Scognet fl = 0; 4468129198Scognet break; 4469129198Scognet 4470129198Scognet case PTE_CACHE: 4471129198Scognet fl = pte_l1_s_cache_mode; 4472129198Scognet break; 4473129198Scognet 4474129198Scognet case PTE_PAGETABLE: 4475129198Scognet fl = pte_l1_s_cache_mode_pt; 4476129198Scognet break; 4477129198Scognet } 4478129198Scognet 4479129198Scognet pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4480129198Scognet L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 4481129198Scognet PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4482129198Scognet 4483129198Scognet} 4484129198Scognet 4485129198Scognet/* 4486129198Scognet * pmap_link_l2pt: 4487129198Scognet * 4488129198Scognet * Link the L2 page table specified by "pa" into the L1 4489129198Scognet * page table at the slot for "va". 4490129198Scognet */ 4491129198Scognetvoid 4492129198Scognetpmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) 4493129198Scognet{ 4494129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 4495129198Scognet u_int slot = va >> L1_S_SHIFT; 4496129198Scognet 4497129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4498129198Scognet KASSERT((va & ((L1_S_SIZE * 4) - 1)) == 0, ("blah")); 4499129198Scognet KASSERT((l2pv->pv_pa & PAGE_MASK) == 0, ("ouin")); 4500129198Scognet#endif 4501129198Scognet 4502129198Scognet proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 4503129198Scognet 4504129198Scognet pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 4505129198Scognet#ifdef ARM32_NEW_VM_LAYOUT 4506129198Scognet PTE_SYNC(&pde[slot]); 4507129198Scognet#else 4508129198Scognet pde[slot + 1] = proto | (l2pv->pv_pa + 0x400); 4509129198Scognet pde[slot + 2] = proto | (l2pv->pv_pa + 0x800); 4510129198Scognet pde[slot + 3] = proto | (l2pv->pv_pa + 0xc00); 4511129198Scognet PTE_SYNC_RANGE(&pde[slot + 0], 4); 4512129198Scognet#endif 4513129198Scognet 4514129198Scognet SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 4515129198Scognet 4516129198Scognet 4517129198Scognet} 4518129198Scognet 4519129198Scognet/* 4520129198Scognet * pmap_map_entry 4521129198Scognet * 4522129198Scognet * Create a single page mapping. 4523129198Scognet */ 4524129198Scognetvoid 4525129198Scognetpmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 4526129198Scognet int cache) 4527129198Scognet{ 4528129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4529129198Scognet pt_entry_t fl; 4530129198Scognet pt_entry_t *pte; 4531129198Scognet 4532129198Scognet KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); 4533129198Scognet 4534129198Scognet switch (cache) { 4535129198Scognet case PTE_NOCACHE: 4536129198Scognet default: 4537129198Scognet fl = 0; 4538129198Scognet break; 4539129198Scognet 4540129198Scognet case PTE_CACHE: 4541129198Scognet fl = pte_l2_s_cache_mode; 4542129198Scognet break; 4543129198Scognet 4544129198Scognet case PTE_PAGETABLE: 4545129198Scognet fl = pte_l2_s_cache_mode_pt; 4546129198Scognet break; 4547129198Scognet } 4548129198Scognet 4549129198Scognet if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4550129198Scognet panic("pmap_map_entry: no L2 table for VA 0x%08x", va); 4551129198Scognet 4552129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4553129198Scognet pte = (pt_entry_t *) 4554129198Scognet kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 4555129198Scognet#else 4556129198Scognet pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4557129198Scognet#endif 4558129198Scognet 4559129198Scognet if (pte == NULL) 4560129198Scognet panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); 4561129198Scognet 4562129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4563129198Scognet pte[(va >> PAGE_SHIFT) & 0x3ff] = 4564129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4565129198Scognet PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]); 4566129198Scognet#else 4567129198Scognet pte[l2pte_index(va)] = 4568129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4569129198Scognet PTE_SYNC(&pte[l2pte_index(va)]); 4570129198Scognet#endif 4571129198Scognet} 4572129198Scognet 4573129198Scognet/* 4574129198Scognet * pmap_map_chunk: 4575129198Scognet * 4576129198Scognet * Map a chunk of memory using the most efficient mappings 4577129198Scognet * possible (section. large page, small page) into the 4578129198Scognet * provided L1 and L2 tables at the specified virtual address. 4579129198Scognet */ 4580129198Scognetvm_size_t 4581129198Scognetpmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4582129198Scognet vm_size_t size, int prot, int cache) 4583129198Scognet{ 4584129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4585129198Scognet pt_entry_t *pte, f1, f2s, f2l; 4586129198Scognet vm_size_t resid; 4587129198Scognet int i; 4588129198Scognet 4589129198Scognet resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4590129198Scognet 4591129198Scognet if (l1pt == 0) 4592129198Scognet panic("pmap_map_chunk: no L1 table provided"); 4593129198Scognet 4594129198Scognet#ifdef VERBOSE_INIT_ARM 4595129198Scognet printf("pmap_map_chunk: pa=0x%lx va=0x%lx size=0x%lx resid=0x%lx " 4596129198Scognet "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 4597129198Scognet#endif 4598129198Scognet 4599129198Scognet switch (cache) { 4600129198Scognet case PTE_NOCACHE: 4601129198Scognet default: 4602129198Scognet f1 = 0; 4603129198Scognet f2l = 0; 4604129198Scognet f2s = 0; 4605129198Scognet break; 4606129198Scognet 4607129198Scognet case PTE_CACHE: 4608129198Scognet f1 = pte_l1_s_cache_mode; 4609129198Scognet f2l = pte_l2_l_cache_mode; 4610129198Scognet f2s = pte_l2_s_cache_mode; 4611129198Scognet break; 4612129198Scognet 4613129198Scognet case PTE_PAGETABLE: 4614129198Scognet f1 = pte_l1_s_cache_mode_pt; 4615129198Scognet f2l = pte_l2_l_cache_mode_pt; 4616129198Scognet f2s = pte_l2_s_cache_mode_pt; 4617129198Scognet break; 4618129198Scognet } 4619129198Scognet 4620129198Scognet size = resid; 4621129198Scognet 4622129198Scognet while (resid > 0) { 4623129198Scognet /* See if we can use a section mapping. */ 4624129198Scognet if (L1_S_MAPPABLE_P(va, pa, resid)) { 4625129198Scognet#ifdef VERBOSE_INIT_ARM 4626129198Scognet printf("S"); 4627129198Scognet#endif 4628129198Scognet pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4629129198Scognet L1_S_PROT(PTE_KERNEL, prot) | f1 | 4630129198Scognet L1_S_DOM(PMAP_DOMAIN_KERNEL); 4631129198Scognet PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4632129198Scognet va += L1_S_SIZE; 4633129198Scognet pa += L1_S_SIZE; 4634129198Scognet resid -= L1_S_SIZE; 4635129198Scognet continue; 4636129198Scognet } 4637129198Scognet 4638129198Scognet /* 4639129198Scognet * Ok, we're going to use an L2 table. Make sure 4640129198Scognet * one is actually in the corresponding L1 slot 4641129198Scognet * for the current VA. 4642129198Scognet */ 4643129198Scognet if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4644129198Scognet panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); 4645129198Scognet 4646129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4647129198Scognet pte = (pt_entry_t *) 4648129198Scognet kernel_pt_lookup(pde[va >> L1_S_SHIFT] & L2_S_FRAME); 4649129198Scognet#else 4650129198Scognet pte = (pt_entry_t *) kernel_pt_lookup( 4651129198Scognet pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4652129198Scognet#endif 4653129198Scognet if (pte == NULL) 4654129198Scognet panic("pmap_map_chunk: can't find L2 table for VA" 4655129198Scognet "0x%08x", va); 4656129198Scognet /* See if we can use a L2 large page mapping. */ 4657129198Scognet if (L2_L_MAPPABLE_P(va, pa, resid)) { 4658129198Scognet#ifdef VERBOSE_INIT_ARM 4659129198Scognet printf("L"); 4660129198Scognet#endif 4661129198Scognet for (i = 0; i < 16; i++) { 4662129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4663129198Scognet pte[((va >> PAGE_SHIFT) & 0x3f0) + i] = 4664129198Scognet L2_L_PROTO | pa | 4665129198Scognet L2_L_PROT(PTE_KERNEL, prot) | f2l; 4666129198Scognet PTE_SYNC(&pte[((va >> PAGE_SHIFT) & 0x3f0) + i]); 4667129198Scognet#else 4668129198Scognet pte[l2pte_index(va) + i] = 4669129198Scognet L2_L_PROTO | pa | 4670129198Scognet L2_L_PROT(PTE_KERNEL, prot) | f2l; 4671129198Scognet PTE_SYNC(&pte[l2pte_index(va) + i]); 4672129198Scognet#endif 4673129198Scognet } 4674129198Scognet va += L2_L_SIZE; 4675129198Scognet pa += L2_L_SIZE; 4676129198Scognet resid -= L2_L_SIZE; 4677129198Scognet continue; 4678129198Scognet } 4679129198Scognet 4680129198Scognet /* Use a small page mapping. */ 4681129198Scognet#ifdef VERBOSE_INIT_ARM 4682129198Scognet printf("P"); 4683129198Scognet#endif 4684129198Scognet#ifndef ARM32_NEW_VM_LAYOUT 4685129198Scognet pte[(va >> PAGE_SHIFT) & 0x3ff] = 4686129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4687129198Scognet PTE_SYNC(&pte[(va >> PAGE_SHIFT) & 0x3ff]); 4688129198Scognet#else 4689129198Scognet pte[l2pte_index(va)] = 4690129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4691129198Scognet PTE_SYNC(&pte[l2pte_index(va)]); 4692129198Scognet#endif 4693129198Scognet va += PAGE_SIZE; 4694129198Scognet pa += PAGE_SIZE; 4695129198Scognet resid -= PAGE_SIZE; 4696129198Scognet } 4697129198Scognet#ifdef VERBOSE_INIT_ARM 4698129198Scognet printf("\n"); 4699129198Scognet#endif 4700129198Scognet return (size); 4701129198Scognet 4702129198Scognet} 4703129198Scognet 4704135641Scognet/********************** Static device map routines ***************************/ 4705135641Scognet 4706135641Scognetstatic const struct pmap_devmap *pmap_devmap_table; 4707135641Scognet 4708135641Scognet/* 4709135641Scognet * Register the devmap table. This is provided in case early console 4710135641Scognet * initialization needs to register mappings created by bootstrap code 4711135641Scognet * before pmap_devmap_bootstrap() is called. 4712135641Scognet */ 4713135641Scognetvoid 4714135641Scognetpmap_devmap_register(const struct pmap_devmap *table) 4715135641Scognet{ 4716135641Scognet 4717135641Scognet pmap_devmap_table = table; 4718135641Scognet} 4719135641Scognet 4720135641Scognet/* 4721135641Scognet * Map all of the static regions in the devmap table, and remember 4722135641Scognet * the devmap table so other parts of the kernel can look up entries 4723135641Scognet * later. 4724135641Scognet */ 4725135641Scognetvoid 4726135641Scognetpmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table) 4727135641Scognet{ 4728135641Scognet int i; 4729135641Scognet 4730135641Scognet pmap_devmap_table = table; 4731135641Scognet 4732135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4733135641Scognet#ifdef VERBOSE_INIT_ARM 4734135641Scognet printf("devmap: %08lx -> %08lx @ %08lx\n", 4735135641Scognet pmap_devmap_table[i].pd_pa, 4736135641Scognet pmap_devmap_table[i].pd_pa + 4737135641Scognet pmap_devmap_table[i].pd_size - 1, 4738135641Scognet pmap_devmap_table[i].pd_va); 4739135641Scognet#endif 4740135641Scognet pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va, 4741135641Scognet pmap_devmap_table[i].pd_pa, 4742135641Scognet pmap_devmap_table[i].pd_size, 4743135641Scognet pmap_devmap_table[i].pd_prot, 4744135641Scognet pmap_devmap_table[i].pd_cache); 4745135641Scognet } 4746135641Scognet} 4747135641Scognet 4748135641Scognetconst struct pmap_devmap * 4749135641Scognetpmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size) 4750135641Scognet{ 4751135641Scognet int i; 4752135641Scognet 4753135641Scognet if (pmap_devmap_table == NULL) 4754135641Scognet return (NULL); 4755135641Scognet 4756135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4757135641Scognet if (pa >= pmap_devmap_table[i].pd_pa && 4758135641Scognet pa + size <= pmap_devmap_table[i].pd_pa + 4759135641Scognet pmap_devmap_table[i].pd_size) 4760135641Scognet return (&pmap_devmap_table[i]); 4761135641Scognet } 4762135641Scognet 4763135641Scognet return (NULL); 4764135641Scognet} 4765135641Scognet 4766135641Scognetconst struct pmap_devmap * 4767135641Scognetpmap_devmap_find_va(vm_offset_t va, vm_size_t size) 4768135641Scognet{ 4769135641Scognet int i; 4770135641Scognet 4771135641Scognet if (pmap_devmap_table == NULL) 4772135641Scognet return (NULL); 4773135641Scognet 4774135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4775135641Scognet if (va >= pmap_devmap_table[i].pd_va && 4776135641Scognet va + size <= pmap_devmap_table[i].pd_va + 4777135641Scognet pmap_devmap_table[i].pd_size) 4778135641Scognet return (&pmap_devmap_table[i]); 4779135641Scognet } 4780135641Scognet 4781135641Scognet return (NULL); 4782135641Scognet} 4783135641Scognet 4784