pmap-v4.c revision 221844
1129198Scognet/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ 2139735Simp/*- 3129198Scognet * Copyright 2004 Olivier Houchard. 4129198Scognet * Copyright 2003 Wasabi Systems, Inc. 5129198Scognet * All rights reserved. 6129198Scognet * 7129198Scognet * Written by Steve C. Woodford for Wasabi Systems, Inc. 8129198Scognet * 9129198Scognet * Redistribution and use in source and binary forms, with or without 10129198Scognet * modification, are permitted provided that the following conditions 11129198Scognet * are met: 12129198Scognet * 1. Redistributions of source code must retain the above copyright 13129198Scognet * notice, this list of conditions and the following disclaimer. 14129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 15129198Scognet * notice, this list of conditions and the following disclaimer in the 16129198Scognet * documentation and/or other materials provided with the distribution. 17129198Scognet * 3. All advertising materials mentioning features or use of this software 18129198Scognet * must display the following acknowledgement: 19129198Scognet * This product includes software developed for the NetBSD Project by 20129198Scognet * Wasabi Systems, Inc. 21129198Scognet * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22129198Scognet * or promote products derived from this software without specific prior 23129198Scognet * written permission. 24129198Scognet * 25129198Scognet * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26129198Scognet * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28129198Scognet * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35129198Scognet * POSSIBILITY OF SUCH DAMAGE. 36129198Scognet */ 37129198Scognet 38139735Simp/*- 39129198Scognet * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40129198Scognet * Copyright (c) 2001 Richard Earnshaw 41129198Scognet * Copyright (c) 2001-2002 Christopher Gilbert 42129198Scognet * All rights reserved. 43129198Scognet * 44129198Scognet * 1. Redistributions of source code must retain the above copyright 45129198Scognet * notice, this list of conditions and the following disclaimer. 46129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 47129198Scognet * notice, this list of conditions and the following disclaimer in the 48129198Scognet * documentation and/or other materials provided with the distribution. 49129198Scognet * 3. The name of the company nor the name of the author may be used to 50129198Scognet * endorse or promote products derived from this software without specific 51129198Scognet * prior written permission. 52129198Scognet * 53129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54129198Scognet * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55129198Scognet * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56129198Scognet * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57129198Scognet * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58129198Scognet * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59129198Scognet * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60129198Scognet * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61129198Scognet * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62129198Scognet * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63129198Scognet * SUCH DAMAGE. 64129198Scognet */ 65129198Scognet/*- 66129198Scognet * Copyright (c) 1999 The NetBSD Foundation, Inc. 67129198Scognet * All rights reserved. 68129198Scognet * 69129198Scognet * This code is derived from software contributed to The NetBSD Foundation 70129198Scognet * by Charles M. Hannum. 71129198Scognet * 72129198Scognet * Redistribution and use in source and binary forms, with or without 73129198Scognet * modification, are permitted provided that the following conditions 74129198Scognet * are met: 75129198Scognet * 1. Redistributions of source code must retain the above copyright 76129198Scognet * notice, this list of conditions and the following disclaimer. 77129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 78129198Scognet * notice, this list of conditions and the following disclaimer in the 79129198Scognet * documentation and/or other materials provided with the distribution. 80129198Scognet * 81129198Scognet * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 82129198Scognet * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 83129198Scognet * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 84129198Scognet * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 85129198Scognet * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86129198Scognet * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87129198Scognet * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88129198Scognet * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89129198Scognet * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90129198Scognet * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 91129198Scognet * POSSIBILITY OF SUCH DAMAGE. 92129198Scognet */ 93129198Scognet 94139735Simp/*- 95129198Scognet * Copyright (c) 1994-1998 Mark Brinicombe. 96129198Scognet * Copyright (c) 1994 Brini. 97129198Scognet * All rights reserved. 98139735Simp * 99129198Scognet * This code is derived from software written for Brini by Mark Brinicombe 100129198Scognet * 101129198Scognet * Redistribution and use in source and binary forms, with or without 102129198Scognet * modification, are permitted provided that the following conditions 103129198Scognet * are met: 104129198Scognet * 1. Redistributions of source code must retain the above copyright 105129198Scognet * notice, this list of conditions and the following disclaimer. 106129198Scognet * 2. Redistributions in binary form must reproduce the above copyright 107129198Scognet * notice, this list of conditions and the following disclaimer in the 108129198Scognet * documentation and/or other materials provided with the distribution. 109129198Scognet * 3. All advertising materials mentioning features or use of this software 110129198Scognet * must display the following acknowledgement: 111129198Scognet * This product includes software developed by Mark Brinicombe. 112129198Scognet * 4. The name of the author may not be used to endorse or promote products 113129198Scognet * derived from this software without specific prior written permission. 114129198Scognet * 115129198Scognet * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 116129198Scognet * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 117129198Scognet * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 118129198Scognet * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 119129198Scognet * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 120129198Scognet * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 121129198Scognet * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 122129198Scognet * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 123129198Scognet * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 124129198Scognet * 125129198Scognet * RiscBSD kernel project 126129198Scognet * 127129198Scognet * pmap.c 128129198Scognet * 129129198Scognet * Machine dependant vm stuff 130129198Scognet * 131129198Scognet * Created : 20/09/94 132129198Scognet */ 133129198Scognet 134129198Scognet/* 135129198Scognet * Special compilation symbols 136129198Scognet * PMAP_DEBUG - Build in pmap_debug_level code 137129198Scognet */ 138129198Scognet/* Include header files */ 139135641Scognet 140137552Scognet#include "opt_vm.h" 141137552Scognet 142129198Scognet#include <sys/cdefs.h> 143129198Scognet__FBSDID("$FreeBSD: head/sys/arm/arm/pmap.c 221844 2011-05-13 15:54:12Z cognet $"); 144129198Scognet#include <sys/param.h> 145129198Scognet#include <sys/systm.h> 146129198Scognet#include <sys/kernel.h> 147183838Sraj#include <sys/ktr.h> 148129198Scognet#include <sys/proc.h> 149129198Scognet#include <sys/malloc.h> 150129198Scognet#include <sys/msgbuf.h> 151129198Scognet#include <sys/vmmeter.h> 152129198Scognet#include <sys/mman.h> 153129198Scognet#include <sys/smp.h> 154129198Scognet#include <sys/sched.h> 155129198Scognet 156129198Scognet#include <vm/vm.h> 157129198Scognet#include <vm/uma.h> 158129198Scognet#include <vm/pmap.h> 159129198Scognet#include <vm/vm_kern.h> 160129198Scognet#include <vm/vm_object.h> 161129198Scognet#include <vm/vm_map.h> 162129198Scognet#include <vm/vm_page.h> 163129198Scognet#include <vm/vm_pageout.h> 164129198Scognet#include <vm/vm_extern.h> 165129198Scognet#include <sys/lock.h> 166129198Scognet#include <sys/mutex.h> 167129198Scognet#include <machine/md_var.h> 168129198Scognet#include <machine/vmparam.h> 169129198Scognet#include <machine/cpu.h> 170129198Scognet#include <machine/cpufunc.h> 171129198Scognet#include <machine/pcb.h> 172129198Scognet 173129198Scognet#ifdef PMAP_DEBUG 174129198Scognet#define PDEBUG(_lev_,_stat_) \ 175129198Scognet if (pmap_debug_level >= (_lev_)) \ 176129198Scognet ((_stat_)) 177129198Scognet#define dprintf printf 178129198Scognet 179129198Scognetint pmap_debug_level = 0; 180135641Scognet#define PMAP_INLINE 181129198Scognet#else /* PMAP_DEBUG */ 182129198Scognet#define PDEBUG(_lev_,_stat_) /* Nothing */ 183129198Scognet#define dprintf(x, arg...) 184135641Scognet#define PMAP_INLINE __inline 185129198Scognet#endif /* PMAP_DEBUG */ 186129198Scognet 187129198Scognetextern struct pv_addr systempage; 188129198Scognet/* 189129198Scognet * Internal function prototypes 190129198Scognet */ 191135641Scognetstatic void pmap_free_pv_entry (pv_entry_t); 192129198Scognetstatic pv_entry_t pmap_get_pv_entry(void); 193129198Scognet 194159127Salcstatic void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, 195160260Scognet vm_prot_t, boolean_t, int); 196194459Sthompsastatic void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); 197129198Scognetstatic void pmap_alloc_l1(pmap_t); 198129198Scognetstatic void pmap_free_l1(pmap_t); 199129198Scognet 200135641Scognetstatic int pmap_clearbit(struct vm_page *, u_int); 201129198Scognet 202129198Scognetstatic struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); 203129198Scognetstatic struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); 204129198Scognetstatic void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 205129198Scognetstatic vm_offset_t kernel_pt_lookup(vm_paddr_t); 206129198Scognet 207129198Scognetstatic MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); 208129198Scognet 209129198Scognetvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 210129198Scognetvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 211135641Scognetvm_offset_t pmap_curmaxkvaddr; 212150865Scognetvm_paddr_t kernel_l1pa; 213129198Scognet 214129198Scognetextern void *end; 215129198Scognetvm_offset_t kernel_vm_end = 0; 216129198Scognet 217129198Scognetstruct pmap kernel_pmap_store; 218129198Scognet 219129198Scognetstatic pt_entry_t *csrc_pte, *cdst_pte; 220129198Scognetstatic vm_offset_t csrcp, cdstp; 221159088Scognetstatic struct mtx cmtx; 222159088Scognet 223129198Scognetstatic void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 224129198Scognet/* 225129198Scognet * These routines are called when the CPU type is identified to set up 226129198Scognet * the PTE prototypes, cache modes, etc. 227129198Scognet * 228129198Scognet * The variables are always here, just in case LKMs need to reference 229129198Scognet * them (though, they shouldn't). 230129198Scognet */ 231129198Scognet 232129198Scognetpt_entry_t pte_l1_s_cache_mode; 233129198Scognetpt_entry_t pte_l1_s_cache_mode_pt; 234129198Scognetpt_entry_t pte_l1_s_cache_mask; 235129198Scognet 236129198Scognetpt_entry_t pte_l2_l_cache_mode; 237129198Scognetpt_entry_t pte_l2_l_cache_mode_pt; 238129198Scognetpt_entry_t pte_l2_l_cache_mask; 239129198Scognet 240129198Scognetpt_entry_t pte_l2_s_cache_mode; 241129198Scognetpt_entry_t pte_l2_s_cache_mode_pt; 242129198Scognetpt_entry_t pte_l2_s_cache_mask; 243129198Scognet 244129198Scognetpt_entry_t pte_l2_s_prot_u; 245129198Scognetpt_entry_t pte_l2_s_prot_w; 246129198Scognetpt_entry_t pte_l2_s_prot_mask; 247129198Scognet 248129198Scognetpt_entry_t pte_l1_s_proto; 249129198Scognetpt_entry_t pte_l1_c_proto; 250129198Scognetpt_entry_t pte_l2_s_proto; 251129198Scognet 252129198Scognetvoid (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 253129198Scognetvoid (*pmap_zero_page_func)(vm_paddr_t, int, int); 254129198Scognet/* 255129198Scognet * Which pmap is currently 'live' in the cache 256129198Scognet * 257129198Scognet * XXXSCW: Fix for SMP ... 258129198Scognet */ 259129198Scognetunion pmap_cache_state *pmap_cache_state; 260129198Scognet 261129198Scognetstruct msgbuf *msgbufp = 0; 262129198Scognet 263184728Sraj/* 264184728Sraj * Crashdump maps. 265184728Sraj */ 266184728Srajstatic caddr_t crashdumpmap; 267184728Sraj 268129198Scognetextern void bcopy_page(vm_offset_t, vm_offset_t); 269129198Scognetextern void bzero_page(vm_offset_t); 270137362Scognet 271164079Scognetextern vm_offset_t alloc_firstaddr; 272164079Scognet 273137362Scognetchar *_tmppt; 274137362Scognet 275129198Scognet/* 276129198Scognet * Metadata for L1 translation tables. 277129198Scognet */ 278129198Scognetstruct l1_ttable { 279129198Scognet /* Entry on the L1 Table list */ 280129198Scognet SLIST_ENTRY(l1_ttable) l1_link; 281129198Scognet 282129198Scognet /* Entry on the L1 Least Recently Used list */ 283129198Scognet TAILQ_ENTRY(l1_ttable) l1_lru; 284129198Scognet 285129198Scognet /* Track how many domains are allocated from this L1 */ 286129198Scognet volatile u_int l1_domain_use_count; 287129198Scognet 288129198Scognet /* 289129198Scognet * A free-list of domain numbers for this L1. 290129198Scognet * We avoid using ffs() and a bitmap to track domains since ffs() 291129198Scognet * is slow on ARM. 292129198Scognet */ 293129198Scognet u_int8_t l1_domain_first; 294129198Scognet u_int8_t l1_domain_free[PMAP_DOMAINS]; 295129198Scognet 296129198Scognet /* Physical address of this L1 page table */ 297129198Scognet vm_paddr_t l1_physaddr; 298129198Scognet 299129198Scognet /* KVA of this L1 page table */ 300129198Scognet pd_entry_t *l1_kva; 301129198Scognet}; 302129198Scognet 303129198Scognet/* 304129198Scognet * Convert a virtual address into its L1 table index. That is, the 305129198Scognet * index used to locate the L2 descriptor table pointer in an L1 table. 306129198Scognet * This is basically used to index l1->l1_kva[]. 307129198Scognet * 308129198Scognet * Each L2 descriptor table represents 1MB of VA space. 309129198Scognet */ 310129198Scognet#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 311129198Scognet 312129198Scognet/* 313129198Scognet * L1 Page Tables are tracked using a Least Recently Used list. 314129198Scognet * - New L1s are allocated from the HEAD. 315129198Scognet * - Freed L1s are added to the TAIl. 316129198Scognet * - Recently accessed L1s (where an 'access' is some change to one of 317129198Scognet * the userland pmaps which owns this L1) are moved to the TAIL. 318129198Scognet */ 319129198Scognetstatic TAILQ_HEAD(, l1_ttable) l1_lru_list; 320135641Scognet/* 321135641Scognet * A list of all L1 tables 322135641Scognet */ 323135641Scognetstatic SLIST_HEAD(, l1_ttable) l1_list; 324129198Scognetstatic struct mtx l1_lru_lock; 325129198Scognet 326129198Scognet/* 327129198Scognet * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 328129198Scognet * 329129198Scognet * This is normally 16MB worth L2 page descriptors for any given pmap. 330129198Scognet * Reference counts are maintained for L2 descriptors so they can be 331129198Scognet * freed when empty. 332129198Scognet */ 333129198Scognetstruct l2_dtable { 334129198Scognet /* The number of L2 page descriptors allocated to this l2_dtable */ 335129198Scognet u_int l2_occupancy; 336129198Scognet 337129198Scognet /* List of L2 page descriptors */ 338129198Scognet struct l2_bucket { 339129198Scognet pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 340129198Scognet vm_paddr_t l2b_phys; /* Physical address of same */ 341129198Scognet u_short l2b_l1idx; /* This L2 table's L1 index */ 342129198Scognet u_short l2b_occupancy; /* How many active descriptors */ 343129198Scognet } l2_bucket[L2_BUCKET_SIZE]; 344129198Scognet}; 345129198Scognet 346135641Scognet/* pmap_kenter_internal flags */ 347135641Scognet#define KENTER_CACHE 0x1 348142570Scognet#define KENTER_USER 0x2 349135641Scognet 350129198Scognet/* 351129198Scognet * Given an L1 table index, calculate the corresponding l2_dtable index 352129198Scognet * and bucket index within the l2_dtable. 353129198Scognet */ 354129198Scognet#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 355129198Scognet (L2_SIZE - 1)) 356129198Scognet#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 357129198Scognet 358129198Scognet/* 359129198Scognet * Given a virtual address, this macro returns the 360129198Scognet * virtual address required to drop into the next L2 bucket. 361129198Scognet */ 362129198Scognet#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 363129198Scognet 364129198Scognet/* 365129198Scognet * L2 allocation. 366129198Scognet */ 367129198Scognet#define pmap_alloc_l2_dtable() \ 368160260Scognet (void*)uma_zalloc(l2table_zone, M_NOWAIT|M_USE_RESERVE) 369129198Scognet#define pmap_free_l2_dtable(l2) \ 370129198Scognet uma_zfree(l2table_zone, l2) 371129198Scognet 372129198Scognet/* 373129198Scognet * We try to map the page tables write-through, if possible. However, not 374129198Scognet * all CPUs have a write-through cache mode, so on those we have to sync 375129198Scognet * the cache when we frob page tables. 376129198Scognet * 377129198Scognet * We try to evaluate this at compile time, if possible. However, it's 378129198Scognet * not always possible to do that, hence this run-time var. 379129198Scognet */ 380129198Scognetint pmap_needs_pte_sync; 381129198Scognet 382129198Scognet/* 383129198Scognet * Macro to determine if a mapping might be resident in the 384129198Scognet * instruction cache and/or TLB 385129198Scognet */ 386129198Scognet#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 387129198Scognet 388129198Scognet/* 389129198Scognet * Macro to determine if a mapping might be resident in the 390129198Scognet * data cache and/or TLB 391129198Scognet */ 392129198Scognet#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 393129198Scognet 394129198Scognet#ifndef PMAP_SHPGPERPROC 395129198Scognet#define PMAP_SHPGPERPROC 200 396129198Scognet#endif 397129198Scognet 398135641Scognet#define pmap_is_current(pm) ((pm) == pmap_kernel() || \ 399135641Scognet curproc->p_vmspace->vm_map.pmap == (pm)) 400194459Sthompsastatic uma_zone_t pvzone = NULL; 401147114Scognetuma_zone_t l2zone; 402129198Scognetstatic uma_zone_t l2table_zone; 403135641Scognetstatic vm_offset_t pmap_kernel_l2dtable_kva; 404135641Scognetstatic vm_offset_t pmap_kernel_l2ptp_kva; 405135641Scognetstatic vm_paddr_t pmap_kernel_l2ptp_phys; 406129198Scognetstatic struct vm_object pvzone_obj; 407129198Scognetstatic int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 408129198Scognet 409129198Scognet/* 410129198Scognet * This list exists for the benefit of pmap_map_chunk(). It keeps track 411129198Scognet * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 412129198Scognet * find them as necessary. 413129198Scognet * 414129198Scognet * Note that the data on this list MUST remain valid after initarm() returns, 415129198Scognet * as pmap_bootstrap() uses it to contruct L2 table metadata. 416129198Scognet */ 417129198ScognetSLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 418129198Scognet 419129198Scognetstatic void 420129198Scognetpmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 421129198Scognet{ 422129198Scognet int i; 423129198Scognet 424129198Scognet l1->l1_kva = l1pt; 425129198Scognet l1->l1_domain_use_count = 0; 426174181Scognet l1->l1_domain_first = 0; 427129198Scognet 428129198Scognet for (i = 0; i < PMAP_DOMAINS; i++) 429174181Scognet l1->l1_domain_free[i] = i + 1; 430129198Scognet 431129198Scognet /* 432129198Scognet * Copy the kernel's L1 entries to each new L1. 433129198Scognet */ 434147249Scognet if (l1pt != pmap_kernel()->pm_l1->l1_kva) 435129198Scognet memcpy(l1pt, pmap_kernel()->pm_l1->l1_kva, L1_TABLE_SIZE); 436129198Scognet 437129198Scognet if ((l1->l1_physaddr = pmap_extract(pmap_kernel(), (vm_offset_t)l1pt)) == 0) 438129198Scognet panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 439135641Scognet SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 440129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 441129198Scognet} 442129198Scognet 443129198Scognetstatic vm_offset_t 444129198Scognetkernel_pt_lookup(vm_paddr_t pa) 445129198Scognet{ 446129198Scognet struct pv_addr *pv; 447129198Scognet 448129198Scognet SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 449129198Scognet if (pv->pv_pa == pa) 450129198Scognet return (pv->pv_va); 451129198Scognet } 452129198Scognet return (0); 453129198Scognet} 454129198Scognet 455129198Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 456129198Scognetvoid 457129198Scognetpmap_pte_init_generic(void) 458129198Scognet{ 459129198Scognet 460129198Scognet pte_l1_s_cache_mode = L1_S_B|L1_S_C; 461129198Scognet pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 462129198Scognet 463129198Scognet pte_l2_l_cache_mode = L2_B|L2_C; 464129198Scognet pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 465129198Scognet 466129198Scognet pte_l2_s_cache_mode = L2_B|L2_C; 467129198Scognet pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 468129198Scognet 469129198Scognet /* 470129198Scognet * If we have a write-through cache, set B and C. If 471129198Scognet * we have a write-back cache, then we assume setting 472129198Scognet * only C will make those pages write-through. 473129198Scognet */ 474129198Scognet if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { 475129198Scognet pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 476129198Scognet pte_l2_l_cache_mode_pt = L2_B|L2_C; 477129198Scognet pte_l2_s_cache_mode_pt = L2_B|L2_C; 478129198Scognet } else { 479129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 480129198Scognet pte_l2_l_cache_mode_pt = L2_C; 481129198Scognet pte_l2_s_cache_mode_pt = L2_C; 482129198Scognet } 483129198Scognet 484129198Scognet pte_l2_s_prot_u = L2_S_PROT_U_generic; 485129198Scognet pte_l2_s_prot_w = L2_S_PROT_W_generic; 486129198Scognet pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 487129198Scognet 488129198Scognet pte_l1_s_proto = L1_S_PROTO_generic; 489129198Scognet pte_l1_c_proto = L1_C_PROTO_generic; 490129198Scognet pte_l2_s_proto = L2_S_PROTO_generic; 491129198Scognet 492129198Scognet pmap_copy_page_func = pmap_copy_page_generic; 493129198Scognet pmap_zero_page_func = pmap_zero_page_generic; 494129198Scognet} 495129198Scognet 496129198Scognet#if defined(CPU_ARM8) 497129198Scognetvoid 498129198Scognetpmap_pte_init_arm8(void) 499129198Scognet{ 500129198Scognet 501129198Scognet /* 502129198Scognet * ARM8 is compatible with generic, but we need to use 503129198Scognet * the page tables uncached. 504129198Scognet */ 505129198Scognet pmap_pte_init_generic(); 506129198Scognet 507129198Scognet pte_l1_s_cache_mode_pt = 0; 508129198Scognet pte_l2_l_cache_mode_pt = 0; 509129198Scognet pte_l2_s_cache_mode_pt = 0; 510129198Scognet} 511129198Scognet#endif /* CPU_ARM8 */ 512129198Scognet 513129198Scognet#if defined(CPU_ARM9) && defined(ARM9_CACHE_WRITE_THROUGH) 514129198Scognetvoid 515129198Scognetpmap_pte_init_arm9(void) 516129198Scognet{ 517129198Scognet 518129198Scognet /* 519129198Scognet * ARM9 is compatible with generic, but we want to use 520129198Scognet * write-through caching for now. 521129198Scognet */ 522129198Scognet pmap_pte_init_generic(); 523129198Scognet 524129198Scognet pte_l1_s_cache_mode = L1_S_C; 525129198Scognet pte_l2_l_cache_mode = L2_C; 526129198Scognet pte_l2_s_cache_mode = L2_C; 527129198Scognet 528129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 529129198Scognet pte_l2_l_cache_mode_pt = L2_C; 530129198Scognet pte_l2_s_cache_mode_pt = L2_C; 531129198Scognet} 532129198Scognet#endif /* CPU_ARM9 */ 533129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 534129198Scognet 535129198Scognet#if defined(CPU_ARM10) 536129198Scognetvoid 537129198Scognetpmap_pte_init_arm10(void) 538129198Scognet{ 539129198Scognet 540129198Scognet /* 541129198Scognet * ARM10 is compatible with generic, but we want to use 542129198Scognet * write-through caching for now. 543129198Scognet */ 544129198Scognet pmap_pte_init_generic(); 545129198Scognet 546129198Scognet pte_l1_s_cache_mode = L1_S_B | L1_S_C; 547129198Scognet pte_l2_l_cache_mode = L2_B | L2_C; 548129198Scognet pte_l2_s_cache_mode = L2_B | L2_C; 549129198Scognet 550129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 551129198Scognet pte_l2_l_cache_mode_pt = L2_C; 552129198Scognet pte_l2_s_cache_mode_pt = L2_C; 553129198Scognet 554129198Scognet} 555129198Scognet#endif /* CPU_ARM10 */ 556129198Scognet 557129198Scognet#if ARM_MMU_SA1 == 1 558129198Scognetvoid 559129198Scognetpmap_pte_init_sa1(void) 560129198Scognet{ 561129198Scognet 562129198Scognet /* 563129198Scognet * The StrongARM SA-1 cache does not have a write-through 564129198Scognet * mode. So, do the generic initialization, then reset 565129198Scognet * the page table cache mode to B=1,C=1, and note that 566129198Scognet * the PTEs need to be sync'd. 567129198Scognet */ 568129198Scognet pmap_pte_init_generic(); 569129198Scognet 570129198Scognet pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 571129198Scognet pte_l2_l_cache_mode_pt = L2_B|L2_C; 572129198Scognet pte_l2_s_cache_mode_pt = L2_B|L2_C; 573129198Scognet 574129198Scognet pmap_needs_pte_sync = 1; 575129198Scognet} 576129198Scognet#endif /* ARM_MMU_SA1 == 1*/ 577129198Scognet 578129198Scognet#if ARM_MMU_XSCALE == 1 579164778Scognet#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) 580129198Scognetstatic u_int xscale_use_minidata; 581129198Scognet#endif 582129198Scognet 583129198Scognetvoid 584129198Scognetpmap_pte_init_xscale(void) 585129198Scognet{ 586129198Scognet uint32_t auxctl; 587129198Scognet int write_through = 0; 588129198Scognet 589135641Scognet pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; 590129198Scognet pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 591129198Scognet 592129198Scognet pte_l2_l_cache_mode = L2_B|L2_C; 593129198Scognet pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 594129198Scognet 595129198Scognet pte_l2_s_cache_mode = L2_B|L2_C; 596129198Scognet pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 597129198Scognet 598129198Scognet pte_l1_s_cache_mode_pt = L1_S_C; 599129198Scognet pte_l2_l_cache_mode_pt = L2_C; 600129198Scognet pte_l2_s_cache_mode_pt = L2_C; 601129198Scognet#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 602129198Scognet /* 603129198Scognet * The XScale core has an enhanced mode where writes that 604129198Scognet * miss the cache cause a cache line to be allocated. This 605129198Scognet * is significantly faster than the traditional, write-through 606129198Scognet * behavior of this case. 607129198Scognet */ 608129198Scognet pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); 609129198Scognet pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); 610129198Scognet pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); 611129198Scognet#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 612129198Scognet#ifdef XSCALE_CACHE_WRITE_THROUGH 613129198Scognet /* 614129198Scognet * Some versions of the XScale core have various bugs in 615129198Scognet * their cache units, the work-around for which is to run 616129198Scognet * the cache in write-through mode. Unfortunately, this 617129198Scognet * has a major (negative) impact on performance. So, we 618129198Scognet * go ahead and run fast-and-loose, in the hopes that we 619129198Scognet * don't line up the planets in a way that will trip the 620129198Scognet * bugs. 621129198Scognet * 622129198Scognet * However, we give you the option to be slow-but-correct. 623129198Scognet */ 624129198Scognet write_through = 1; 625129198Scognet#elif defined(XSCALE_CACHE_WRITE_BACK) 626129198Scognet /* force write back cache mode */ 627129198Scognet write_through = 0; 628129198Scognet#elif defined(CPU_XSCALE_PXA2X0) 629129198Scognet /* 630129198Scognet * Intel PXA2[15]0 processors are known to have a bug in 631129198Scognet * write-back cache on revision 4 and earlier (stepping 632129198Scognet * A[01] and B[012]). Fixed for C0 and later. 633129198Scognet */ 634129198Scognet { 635129198Scognet uint32_t id, type; 636129198Scognet 637129198Scognet id = cpufunc_id(); 638129198Scognet type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 639129198Scognet 640129198Scognet if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 641129198Scognet if ((id & CPU_ID_REVISION_MASK) < 5) { 642129198Scognet /* write through for stepping A0-1 and B0-2 */ 643129198Scognet write_through = 1; 644129198Scognet } 645129198Scognet } 646129198Scognet } 647129198Scognet#endif /* XSCALE_CACHE_WRITE_THROUGH */ 648129198Scognet 649129198Scognet if (write_through) { 650129198Scognet pte_l1_s_cache_mode = L1_S_C; 651129198Scognet pte_l2_l_cache_mode = L2_C; 652129198Scognet pte_l2_s_cache_mode = L2_C; 653129198Scognet } 654129198Scognet 655129198Scognet#if (ARM_NMMUS > 1) 656129198Scognet xscale_use_minidata = 1; 657129198Scognet#endif 658129198Scognet 659129198Scognet pte_l2_s_prot_u = L2_S_PROT_U_xscale; 660129198Scognet pte_l2_s_prot_w = L2_S_PROT_W_xscale; 661129198Scognet pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 662129198Scognet 663129198Scognet pte_l1_s_proto = L1_S_PROTO_xscale; 664129198Scognet pte_l1_c_proto = L1_C_PROTO_xscale; 665129198Scognet pte_l2_s_proto = L2_S_PROTO_xscale; 666129198Scognet 667164778Scognet#ifdef CPU_XSCALE_CORE3 668164778Scognet pmap_copy_page_func = pmap_copy_page_generic; 669164778Scognet pmap_zero_page_func = pmap_zero_page_generic; 670164778Scognet xscale_use_minidata = 0; 671171620Scognet /* Make sure it is L2-cachable */ 672171620Scognet pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); 673171620Scognet pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; 674171620Scognet pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; 675171620Scognet pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; 676171620Scognet pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); 677171620Scognet pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; 678171620Scognet 679164778Scognet#else 680129198Scognet pmap_copy_page_func = pmap_copy_page_xscale; 681129198Scognet pmap_zero_page_func = pmap_zero_page_xscale; 682164778Scognet#endif 683129198Scognet 684129198Scognet /* 685129198Scognet * Disable ECC protection of page table access, for now. 686129198Scognet */ 687129198Scognet __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 688129198Scognet auxctl &= ~XSCALE_AUXCTL_P; 689129198Scognet __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 690129198Scognet} 691129198Scognet 692129198Scognet/* 693129198Scognet * xscale_setup_minidata: 694129198Scognet * 695129198Scognet * Set up the mini-data cache clean area. We require the 696129198Scognet * caller to allocate the right amount of physically and 697129198Scognet * virtually contiguous space. 698129198Scognet */ 699129198Scognetextern vm_offset_t xscale_minidata_clean_addr; 700129198Scognetextern vm_size_t xscale_minidata_clean_size; /* already initialized */ 701129198Scognetvoid 702129198Scognetxscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) 703129198Scognet{ 704129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 705129198Scognet pt_entry_t *pte; 706129198Scognet vm_size_t size; 707129198Scognet uint32_t auxctl; 708129198Scognet 709129198Scognet xscale_minidata_clean_addr = va; 710129198Scognet 711129198Scognet /* Round it to page size. */ 712129198Scognet size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 713129198Scognet 714129198Scognet for (; size != 0; 715129198Scognet va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 716129198Scognet pte = (pt_entry_t *) kernel_pt_lookup( 717129198Scognet pde[L1_IDX(va)] & L1_C_ADDR_MASK); 718129198Scognet if (pte == NULL) 719129198Scognet panic("xscale_setup_minidata: can't find L2 table for " 720129198Scognet "VA 0x%08x", (u_int32_t) va); 721129198Scognet pte[l2pte_index(va)] = 722129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 723129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 724129198Scognet } 725129198Scognet 726129198Scognet /* 727129198Scognet * Configure the mini-data cache for write-back with 728129198Scognet * read/write-allocate. 729129198Scognet * 730129198Scognet * NOTE: In order to reconfigure the mini-data cache, we must 731129198Scognet * make sure it contains no valid data! In order to do that, 732129198Scognet * we must issue a global data cache invalidate command! 733129198Scognet * 734129198Scognet * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 735129198Scognet * THIS IS VERY IMPORTANT! 736129198Scognet */ 737129198Scognet 738129198Scognet /* Invalidate data and mini-data. */ 739129198Scognet __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 740129198Scognet __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 741129198Scognet auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 742129198Scognet __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 743129198Scognet} 744129198Scognet#endif 745129198Scognet 746129198Scognet/* 747129198Scognet * Allocate an L1 translation table for the specified pmap. 748129198Scognet * This is called at pmap creation time. 749129198Scognet */ 750129198Scognetstatic void 751129198Scognetpmap_alloc_l1(pmap_t pm) 752129198Scognet{ 753129198Scognet struct l1_ttable *l1; 754129198Scognet u_int8_t domain; 755129198Scognet 756129198Scognet /* 757129198Scognet * Remove the L1 at the head of the LRU list 758129198Scognet */ 759129198Scognet mtx_lock(&l1_lru_lock); 760129198Scognet l1 = TAILQ_FIRST(&l1_lru_list); 761129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 762129198Scognet 763129198Scognet /* 764129198Scognet * Pick the first available domain number, and update 765129198Scognet * the link to the next number. 766129198Scognet */ 767129198Scognet domain = l1->l1_domain_first; 768129198Scognet l1->l1_domain_first = l1->l1_domain_free[domain]; 769129198Scognet 770129198Scognet /* 771129198Scognet * If there are still free domain numbers in this L1, 772129198Scognet * put it back on the TAIL of the LRU list. 773129198Scognet */ 774129198Scognet if (++l1->l1_domain_use_count < PMAP_DOMAINS) 775129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 776129198Scognet 777129198Scognet mtx_unlock(&l1_lru_lock); 778129198Scognet 779129198Scognet /* 780129198Scognet * Fix up the relevant bits in the pmap structure 781129198Scognet */ 782129198Scognet pm->pm_l1 = l1; 783174181Scognet pm->pm_domain = domain + 1; 784129198Scognet} 785129198Scognet 786129198Scognet/* 787129198Scognet * Free an L1 translation table. 788129198Scognet * This is called at pmap destruction time. 789129198Scognet */ 790129198Scognetstatic void 791129198Scognetpmap_free_l1(pmap_t pm) 792129198Scognet{ 793129198Scognet struct l1_ttable *l1 = pm->pm_l1; 794129198Scognet 795129198Scognet mtx_lock(&l1_lru_lock); 796129198Scognet 797129198Scognet /* 798129198Scognet * If this L1 is currently on the LRU list, remove it. 799129198Scognet */ 800129198Scognet if (l1->l1_domain_use_count < PMAP_DOMAINS) 801129198Scognet TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 802129198Scognet 803129198Scognet /* 804129198Scognet * Free up the domain number which was allocated to the pmap 805129198Scognet */ 806174181Scognet l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; 807174181Scognet l1->l1_domain_first = pm->pm_domain - 1; 808129198Scognet l1->l1_domain_use_count--; 809129198Scognet 810129198Scognet /* 811129198Scognet * The L1 now must have at least 1 free domain, so add 812129198Scognet * it back to the LRU list. If the use count is zero, 813129198Scognet * put it at the head of the list, otherwise it goes 814129198Scognet * to the tail. 815129198Scognet */ 816129198Scognet if (l1->l1_domain_use_count == 0) { 817129198Scognet TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 818129198Scognet } else 819129198Scognet TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 820129198Scognet 821129198Scognet mtx_unlock(&l1_lru_lock); 822129198Scognet} 823129198Scognet 824129198Scognet/* 825129198Scognet * Returns a pointer to the L2 bucket associated with the specified pmap 826129198Scognet * and VA, or NULL if no L2 bucket exists for the address. 827129198Scognet */ 828129198Scognetstatic PMAP_INLINE struct l2_bucket * 829129198Scognetpmap_get_l2_bucket(pmap_t pm, vm_offset_t va) 830129198Scognet{ 831129198Scognet struct l2_dtable *l2; 832129198Scognet struct l2_bucket *l2b; 833129198Scognet u_short l1idx; 834129198Scognet 835129198Scognet l1idx = L1_IDX(va); 836129198Scognet 837129198Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 838129198Scognet (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 839129198Scognet return (NULL); 840129198Scognet 841129198Scognet return (l2b); 842129198Scognet} 843129198Scognet 844129198Scognet/* 845129198Scognet * Returns a pointer to the L2 bucket associated with the specified pmap 846129198Scognet * and VA. 847129198Scognet * 848129198Scognet * If no L2 bucket exists, perform the necessary allocations to put an L2 849129198Scognet * bucket/page table in place. 850129198Scognet * 851129198Scognet * Note that if a new L2 bucket/page was allocated, the caller *must* 852129198Scognet * increment the bucket occupancy counter appropriately *before* 853129198Scognet * releasing the pmap's lock to ensure no other thread or cpu deallocates 854129198Scognet * the bucket/page in the meantime. 855129198Scognet */ 856129198Scognetstatic struct l2_bucket * 857129198Scognetpmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) 858129198Scognet{ 859129198Scognet struct l2_dtable *l2; 860129198Scognet struct l2_bucket *l2b; 861129198Scognet u_short l1idx; 862129198Scognet 863129198Scognet l1idx = L1_IDX(va); 864129198Scognet 865159352Salc PMAP_ASSERT_LOCKED(pm); 866159108Scognet mtx_assert(&vm_page_queue_mtx, MA_OWNED); 867129198Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 868129198Scognet /* 869129198Scognet * No mapping at this address, as there is 870129198Scognet * no entry in the L1 table. 871129198Scognet * Need to allocate a new l2_dtable. 872129198Scognet */ 873159108Scognetagain_l2table: 874159352Salc PMAP_UNLOCK(pm); 875159108Scognet vm_page_unlock_queues(); 876129198Scognet if ((l2 = pmap_alloc_l2_dtable()) == NULL) { 877159108Scognet vm_page_lock_queues(); 878159352Salc PMAP_LOCK(pm); 879129198Scognet return (NULL); 880129198Scognet } 881159108Scognet vm_page_lock_queues(); 882159352Salc PMAP_LOCK(pm); 883159108Scognet if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { 884159352Salc PMAP_UNLOCK(pm); 885159108Scognet vm_page_unlock_queues(); 886159108Scognet uma_zfree(l2table_zone, l2); 887159108Scognet vm_page_lock_queues(); 888159352Salc PMAP_LOCK(pm); 889159108Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 890159108Scognet if (l2 == NULL) 891159108Scognet goto again_l2table; 892159108Scognet /* 893159108Scognet * Someone already allocated the l2_dtable while 894159108Scognet * we were doing the same. 895159108Scognet */ 896159108Scognet } else { 897159108Scognet bzero(l2, sizeof(*l2)); 898159108Scognet /* 899159108Scognet * Link it into the parent pmap 900159108Scognet */ 901159108Scognet pm->pm_l2[L2_IDX(l1idx)] = l2; 902159108Scognet } 903129198Scognet } 904129198Scognet 905129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 906129198Scognet 907129198Scognet /* 908129198Scognet * Fetch pointer to the L2 page table associated with the address. 909129198Scognet */ 910129198Scognet if (l2b->l2b_kva == NULL) { 911129198Scognet pt_entry_t *ptep; 912129198Scognet 913129198Scognet /* 914129198Scognet * No L2 page table has been allocated. Chances are, this 915129198Scognet * is because we just allocated the l2_dtable, above. 916129198Scognet */ 917159108Scognetagain_ptep: 918159352Salc PMAP_UNLOCK(pm); 919159108Scognet vm_page_unlock_queues(); 920160260Scognet ptep = (void*)uma_zalloc(l2zone, M_NOWAIT|M_USE_RESERVE); 921159108Scognet vm_page_lock_queues(); 922159352Salc PMAP_LOCK(pm); 923159108Scognet if (l2b->l2b_kva != 0) { 924159108Scognet /* We lost the race. */ 925159352Salc PMAP_UNLOCK(pm); 926159108Scognet vm_page_unlock_queues(); 927159108Scognet uma_zfree(l2zone, ptep); 928159108Scognet vm_page_lock_queues(); 929159352Salc PMAP_LOCK(pm); 930159108Scognet if (l2b->l2b_kva == 0) 931159108Scognet goto again_ptep; 932159108Scognet return (l2b); 933159108Scognet } 934129198Scognet l2b->l2b_phys = vtophys(ptep); 935129198Scognet if (ptep == NULL) { 936129198Scognet /* 937129198Scognet * Oops, no more L2 page tables available at this 938129198Scognet * time. We may need to deallocate the l2_dtable 939129198Scognet * if we allocated a new one above. 940129198Scognet */ 941129198Scognet if (l2->l2_occupancy == 0) { 942129198Scognet pm->pm_l2[L2_IDX(l1idx)] = NULL; 943129198Scognet pmap_free_l2_dtable(l2); 944129198Scognet } 945129198Scognet return (NULL); 946129198Scognet } 947129198Scognet 948129198Scognet l2->l2_occupancy++; 949129198Scognet l2b->l2b_kva = ptep; 950129198Scognet l2b->l2b_l1idx = l1idx; 951129198Scognet } 952129198Scognet 953129198Scognet return (l2b); 954129198Scognet} 955129198Scognet 956129198Scognetstatic PMAP_INLINE void 957129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 958129198Scognetpmap_free_l2_ptp(pt_entry_t *l2) 959129198Scognet#else 960129198Scognetpmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) 961129198Scognet#endif 962129198Scognet{ 963129198Scognet#ifdef PMAP_INCLUDE_PTE_SYNC 964129198Scognet /* 965129198Scognet * Note: With a write-back cache, we may need to sync this 966129198Scognet * L2 table before re-using it. 967129198Scognet * This is because it may have belonged to a non-current 968129198Scognet * pmap, in which case the cache syncs would have been 969129198Scognet * skipped when the pages were being unmapped. If the 970129198Scognet * L2 table were then to be immediately re-allocated to 971129198Scognet * the *current* pmap, it may well contain stale mappings 972129198Scognet * which have not yet been cleared by a cache write-back 973129198Scognet * and so would still be visible to the mmu. 974129198Scognet */ 975129198Scognet if (need_sync) 976129198Scognet PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 977129198Scognet#endif 978129198Scognet uma_zfree(l2zone, l2); 979129198Scognet} 980129198Scognet/* 981129198Scognet * One or more mappings in the specified L2 descriptor table have just been 982129198Scognet * invalidated. 983129198Scognet * 984129198Scognet * Garbage collect the metadata and descriptor table itself if necessary. 985129198Scognet * 986129198Scognet * The pmap lock must be acquired when this is called (not necessary 987129198Scognet * for the kernel pmap). 988129198Scognet */ 989129198Scognetstatic void 990129198Scognetpmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 991129198Scognet{ 992129198Scognet struct l2_dtable *l2; 993129198Scognet pd_entry_t *pl1pd, l1pd; 994129198Scognet pt_entry_t *ptep; 995129198Scognet u_short l1idx; 996129198Scognet 997129198Scognet 998129198Scognet /* 999129198Scognet * Update the bucket's reference count according to how many 1000129198Scognet * PTEs the caller has just invalidated. 1001129198Scognet */ 1002129198Scognet l2b->l2b_occupancy -= count; 1003129198Scognet 1004129198Scognet /* 1005129198Scognet * Note: 1006129198Scognet * 1007129198Scognet * Level 2 page tables allocated to the kernel pmap are never freed 1008129198Scognet * as that would require checking all Level 1 page tables and 1009129198Scognet * removing any references to the Level 2 page table. See also the 1010129198Scognet * comment elsewhere about never freeing bootstrap L2 descriptors. 1011129198Scognet * 1012129198Scognet * We make do with just invalidating the mapping in the L2 table. 1013129198Scognet * 1014129198Scognet * This isn't really a big deal in practice and, in fact, leads 1015129198Scognet * to a performance win over time as we don't need to continually 1016129198Scognet * alloc/free. 1017129198Scognet */ 1018129198Scognet if (l2b->l2b_occupancy > 0 || pm == pmap_kernel()) 1019129198Scognet return; 1020129198Scognet 1021129198Scognet /* 1022129198Scognet * There are no more valid mappings in this level 2 page table. 1023129198Scognet * Go ahead and NULL-out the pointer in the bucket, then 1024129198Scognet * free the page table. 1025129198Scognet */ 1026129198Scognet l1idx = l2b->l2b_l1idx; 1027129198Scognet ptep = l2b->l2b_kva; 1028129198Scognet l2b->l2b_kva = NULL; 1029129198Scognet 1030129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1031129198Scognet 1032129198Scognet /* 1033129198Scognet * If the L1 slot matches the pmap's domain 1034129198Scognet * number, then invalidate it. 1035129198Scognet */ 1036129198Scognet l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 1037129198Scognet if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 1038129198Scognet *pl1pd = 0; 1039129198Scognet PTE_SYNC(pl1pd); 1040129198Scognet } 1041129198Scognet 1042129198Scognet /* 1043129198Scognet * Release the L2 descriptor table back to the pool cache. 1044129198Scognet */ 1045129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 1046129198Scognet pmap_free_l2_ptp(ptep); 1047129198Scognet#else 1048135641Scognet pmap_free_l2_ptp(!pmap_is_current(pm), ptep); 1049129198Scognet#endif 1050129198Scognet 1051129198Scognet /* 1052129198Scognet * Update the reference count in the associated l2_dtable 1053129198Scognet */ 1054129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 1055129198Scognet if (--l2->l2_occupancy > 0) 1056129198Scognet return; 1057129198Scognet 1058129198Scognet /* 1059129198Scognet * There are no more valid mappings in any of the Level 1 1060129198Scognet * slots managed by this l2_dtable. Go ahead and NULL-out 1061129198Scognet * the pointer in the parent pmap and free the l2_dtable. 1062129198Scognet */ 1063129198Scognet pm->pm_l2[L2_IDX(l1idx)] = NULL; 1064129198Scognet pmap_free_l2_dtable(l2); 1065129198Scognet} 1066129198Scognet 1067129198Scognet/* 1068129198Scognet * Pool cache constructors for L2 descriptor tables, metadata and pmap 1069129198Scognet * structures. 1070129198Scognet */ 1071133237Scognetstatic int 1072133237Scognetpmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) 1073129198Scognet{ 1074129198Scognet#ifndef PMAP_INCLUDE_PTE_SYNC 1075129198Scognet struct l2_bucket *l2b; 1076129198Scognet pt_entry_t *ptep, pte; 1077147417Scognet#ifdef ARM_USE_SMALL_ALLOC 1078147417Scognet pd_entry_t *pde; 1079147417Scognet#endif 1080129198Scognet vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; 1081129198Scognet 1082129198Scognet /* 1083129198Scognet * The mappings for these page tables were initially made using 1084135641Scognet * pmap_kenter() by the pool subsystem. Therefore, the cache- 1085129198Scognet * mode will not be right for page table mappings. To avoid 1086135641Scognet * polluting the pmap_kenter() code with a special case for 1087129198Scognet * page tables, we simply fix up the cache-mode here if it's not 1088129198Scognet * correct. 1089129198Scognet */ 1090147114Scognet#ifdef ARM_USE_SMALL_ALLOC 1091147417Scognet pde = &kernel_pmap->pm_l1->l1_kva[L1_IDX(va)]; 1092147417Scognet if (!l1pte_section_p(*pde)) { 1093147114Scognet#endif 1094147114Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 1095147114Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1096147114Scognet pte = *ptep; 1097161105Scognet 1098147114Scognet if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1099147114Scognet /* 1100147114Scognet * Page tables must have the cache-mode set to 1101147114Scognet * Write-Thru. 1102147114Scognet */ 1103147114Scognet *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1104147114Scognet PTE_SYNC(ptep); 1105147114Scognet cpu_tlb_flushD_SE(va); 1106147114Scognet cpu_cpwait(); 1107147114Scognet } 1108147114Scognet#ifdef ARM_USE_SMALL_ALLOC 1109129198Scognet } 1110129198Scognet#endif 1111147114Scognet#endif 1112129198Scognet memset(mem, 0, L2_TABLE_SIZE_REAL); 1113129198Scognet PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1114133237Scognet return (0); 1115129198Scognet} 1116129198Scognet 1117129198Scognet/* 1118129198Scognet * A bunch of routines to conditionally flush the caches/TLB depending 1119129198Scognet * on whether the specified pmap actually needs to be flushed at any 1120129198Scognet * given time. 1121129198Scognet */ 1122129198Scognetstatic PMAP_INLINE void 1123129198Scognetpmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) 1124129198Scognet{ 1125129198Scognet 1126135641Scognet if (pmap_is_current(pm)) 1127129198Scognet cpu_tlb_flushID_SE(va); 1128129198Scognet} 1129129198Scognet 1130129198Scognetstatic PMAP_INLINE void 1131129198Scognetpmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) 1132129198Scognet{ 1133129198Scognet 1134135641Scognet if (pmap_is_current(pm)) 1135129198Scognet cpu_tlb_flushD_SE(va); 1136129198Scognet} 1137129198Scognet 1138129198Scognetstatic PMAP_INLINE void 1139129198Scognetpmap_tlb_flushID(pmap_t pm) 1140129198Scognet{ 1141129198Scognet 1142135641Scognet if (pmap_is_current(pm)) 1143129198Scognet cpu_tlb_flushID(); 1144129198Scognet} 1145129198Scognetstatic PMAP_INLINE void 1146129198Scognetpmap_tlb_flushD(pmap_t pm) 1147129198Scognet{ 1148129198Scognet 1149135641Scognet if (pmap_is_current(pm)) 1150129198Scognet cpu_tlb_flushD(); 1151129198Scognet} 1152129198Scognet 1153203637Srajstatic int 1154203637Srajpmap_has_valid_mapping(pmap_t pm, vm_offset_t va) 1155183838Sraj{ 1156183838Sraj pd_entry_t *pde; 1157183838Sraj pt_entry_t *ptep; 1158183838Sraj 1159203637Sraj if (pmap_get_pde_pte(pm, va, &pde, &ptep) && 1160203637Sraj ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) 1161203637Sraj return (1); 1162183838Sraj 1163203637Sraj return (0); 1164183838Sraj} 1165183838Sraj 1166183838Srajstatic PMAP_INLINE void 1167129198Scognetpmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) 1168129198Scognet{ 1169183838Sraj vm_size_t rest; 1170129198Scognet 1171203637Sraj CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" 1172203637Sraj " len 0x%x ", pm, pm == pmap_kernel(), va, len); 1173183838Sraj 1174203637Sraj if (pmap_is_current(pm) || pm == pmap_kernel()) { 1175203637Sraj rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); 1176203637Sraj while (len > 0) { 1177203637Sraj if (pmap_has_valid_mapping(pm, va)) { 1178203637Sraj cpu_idcache_wbinv_range(va, rest); 1179203637Sraj cpu_l2cache_wbinv_range(va, rest); 1180203637Sraj } 1181203637Sraj len -= rest; 1182203637Sraj va += rest; 1183203637Sraj rest = MIN(PAGE_SIZE, len); 1184203637Sraj } 1185183838Sraj } 1186183838Sraj} 1187183838Sraj 1188183838Srajstatic PMAP_INLINE void 1189183838Srajpmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, 1190183838Sraj boolean_t rd_only) 1191183838Sraj{ 1192203637Sraj vm_size_t rest; 1193184730Sraj 1194183838Sraj CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " 1195183838Sraj "len 0x%x ", pm, pm == pmap_kernel(), va, len); 1196183838Sraj CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); 1197183838Sraj 1198135641Scognet if (pmap_is_current(pm)) { 1199203637Sraj rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); 1200203637Sraj while (len > 0) { 1201203637Sraj if (pmap_has_valid_mapping(pm, va)) { 1202203637Sraj if (do_inv && rd_only) { 1203203637Sraj cpu_dcache_inv_range(va, rest); 1204203637Sraj cpu_l2cache_inv_range(va, rest); 1205203637Sraj } else if (do_inv) { 1206203637Sraj cpu_dcache_wbinv_range(va, rest); 1207203637Sraj cpu_l2cache_wbinv_range(va, rest); 1208203637Sraj } else if (!rd_only) { 1209203637Sraj cpu_dcache_wb_range(va, rest); 1210203637Sraj cpu_l2cache_wb_range(va, rest); 1211203637Sraj } 1212183838Sraj } 1213203637Sraj len -= rest; 1214203637Sraj va += rest; 1215203637Sraj 1216203637Sraj rest = MIN(PAGE_SIZE, len); 1217183838Sraj } 1218129198Scognet } 1219129198Scognet} 1220129198Scognet 1221129198Scognetstatic PMAP_INLINE void 1222129198Scognetpmap_idcache_wbinv_all(pmap_t pm) 1223129198Scognet{ 1224129198Scognet 1225183838Sraj if (pmap_is_current(pm)) { 1226129198Scognet cpu_idcache_wbinv_all(); 1227183838Sraj cpu_l2cache_wbinv_all(); 1228183838Sraj } 1229129198Scognet} 1230129198Scognet 1231197770Sstas#ifdef notyet 1232129198Scognetstatic PMAP_INLINE void 1233129198Scognetpmap_dcache_wbinv_all(pmap_t pm) 1234129198Scognet{ 1235129198Scognet 1236183838Sraj if (pmap_is_current(pm)) { 1237129198Scognet cpu_dcache_wbinv_all(); 1238183838Sraj cpu_l2cache_wbinv_all(); 1239183838Sraj } 1240129198Scognet} 1241197770Sstas#endif 1242129198Scognet 1243129198Scognet/* 1244129198Scognet * PTE_SYNC_CURRENT: 1245129198Scognet * 1246129198Scognet * Make sure the pte is written out to RAM. 1247129198Scognet * We need to do this for one of two cases: 1248129198Scognet * - We're dealing with the kernel pmap 1249129198Scognet * - There is no pmap active in the cache/tlb. 1250129198Scognet * - The specified pmap is 'active' in the cache/tlb. 1251129198Scognet */ 1252129198Scognet#ifdef PMAP_INCLUDE_PTE_SYNC 1253129198Scognet#define PTE_SYNC_CURRENT(pm, ptep) \ 1254129198Scognetdo { \ 1255129198Scognet if (PMAP_NEEDS_PTE_SYNC && \ 1256135641Scognet pmap_is_current(pm)) \ 1257129198Scognet PTE_SYNC(ptep); \ 1258129198Scognet} while (/*CONSTCOND*/0) 1259129198Scognet#else 1260129198Scognet#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 1261129198Scognet#endif 1262129198Scognet 1263129198Scognet/* 1264175840Scognet * cacheable == -1 means we must make the entry uncacheable, 1 means 1265175840Scognet * cacheable; 1266129198Scognet */ 1267129198Scognetstatic __inline void 1268175840Scognetpmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) 1269129198Scognet{ 1270175840Scognet struct l2_bucket *l2b; 1271175840Scognet pt_entry_t *ptep, pte; 1272129198Scognet 1273175840Scognet l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1274175840Scognet ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1275129198Scognet 1276175840Scognet if (cacheable == 1) { 1277175840Scognet pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1278175840Scognet if (l2pte_valid(pte)) { 1279175840Scognet if (PV_BEEN_EXECD(pv->pv_flags)) { 1280175840Scognet pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 1281175840Scognet } else if (PV_BEEN_REFD(pv->pv_flags)) { 1282175840Scognet pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); 1283175840Scognet } 1284175840Scognet } 1285175840Scognet } else { 1286175840Scognet pte = *ptep &~ L2_S_CACHE_MASK; 1287175840Scognet if ((va != pv->pv_va || pm != pv->pv_pmap) && 1288175840Scognet l2pte_valid(pte)) { 1289175840Scognet if (PV_BEEN_EXECD(pv->pv_flags)) { 1290175840Scognet pmap_idcache_wbinv_range(pv->pv_pmap, 1291175840Scognet pv->pv_va, PAGE_SIZE); 1292175840Scognet pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 1293175840Scognet } else if (PV_BEEN_REFD(pv->pv_flags)) { 1294175840Scognet pmap_dcache_wb_range(pv->pv_pmap, 1295175840Scognet pv->pv_va, PAGE_SIZE, TRUE, 1296175840Scognet (pv->pv_flags & PVF_WRITE) == 0); 1297175840Scognet pmap_tlb_flushD_SE(pv->pv_pmap, 1298175840Scognet pv->pv_va); 1299175840Scognet } 1300175840Scognet } 1301129198Scognet } 1302175840Scognet *ptep = pte; 1303175840Scognet PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1304129198Scognet} 1305129198Scognet 1306129198Scognetstatic void 1307175840Scognetpmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1308129198Scognet{ 1309175840Scognet int pmwc = 0; 1310175840Scognet int writable = 0, kwritable = 0, uwritable = 0; 1311175840Scognet int entries = 0, kentries = 0, uentries = 0; 1312129198Scognet struct pv_entry *pv; 1313129198Scognet 1314175840Scognet mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1315129198Scognet 1316175840Scognet /* the cache gets written back/invalidated on context switch. 1317175840Scognet * therefore, if a user page shares an entry in the same page or 1318175840Scognet * with the kernel map and at least one is writable, then the 1319175840Scognet * cache entry must be set write-through. 1320129198Scognet */ 1321129198Scognet 1322175840Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1323175840Scognet /* generate a count of the pv_entry uses */ 1324175840Scognet if (pv->pv_flags & PVF_WRITE) { 1325175840Scognet if (pv->pv_pmap == pmap_kernel()) 1326175840Scognet kwritable++; 1327175840Scognet else if (pv->pv_pmap == pm) 1328175840Scognet uwritable++; 1329175840Scognet writable++; 1330129198Scognet } 1331175840Scognet if (pv->pv_pmap == pmap_kernel()) 1332175840Scognet kentries++; 1333175840Scognet else { 1334175840Scognet if (pv->pv_pmap == pm) 1335175840Scognet uentries++; 1336175840Scognet entries++; 1337175840Scognet } 1338129198Scognet } 1339175840Scognet /* 1340175840Scognet * check if the user duplicate mapping has 1341175840Scognet * been removed. 1342175840Scognet */ 1343175840Scognet if ((pm != pmap_kernel()) && (((uentries > 1) && uwritable) || 1344175840Scognet (uwritable > 1))) 1345175840Scognet pmwc = 1; 1346129198Scognet 1347129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1348175840Scognet /* check for user uncachable conditions - order is important */ 1349175840Scognet if (pm != pmap_kernel() && 1350175840Scognet (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel())) { 1351129198Scognet 1352175840Scognet if ((uentries > 1 && uwritable) || uwritable > 1) { 1353129198Scognet 1354175840Scognet /* user duplicate mapping */ 1355175840Scognet if (pv->pv_pmap != pmap_kernel()) 1356175840Scognet pv->pv_flags |= PVF_MWC; 1357129198Scognet 1358175840Scognet if (!(pv->pv_flags & PVF_NC)) { 1359175840Scognet pv->pv_flags |= PVF_NC; 1360175840Scognet pmap_set_cache_entry(pv, pm, va, -1); 1361175840Scognet } 1362129198Scognet continue; 1363175840Scognet } else /* no longer a duplicate user */ 1364175840Scognet pv->pv_flags &= ~PVF_MWC; 1365175840Scognet } 1366129198Scognet 1367175840Scognet /* 1368175840Scognet * check for kernel uncachable conditions 1369175840Scognet * kernel writable or kernel readable with writable user entry 1370175840Scognet */ 1371209223Scognet if ((kwritable && (entries || kentries > 1)) || 1372194459Sthompsa (kwritable > 1) || 1373175840Scognet ((kwritable != writable) && kentries && 1374175840Scognet (pv->pv_pmap == pmap_kernel() || 1375175840Scognet (pv->pv_flags & PVF_WRITE) || 1376175840Scognet (pv->pv_flags & PVF_MWC)))) { 1377129198Scognet 1378175840Scognet if (!(pv->pv_flags & PVF_NC)) { 1379175840Scognet pv->pv_flags |= PVF_NC; 1380175840Scognet pmap_set_cache_entry(pv, pm, va, -1); 1381129198Scognet } 1382175840Scognet continue; 1383129198Scognet } 1384129198Scognet 1385175840Scognet /* kernel and user are cachable */ 1386175840Scognet if ((pm == pmap_kernel()) && !(pv->pv_flags & PVF_MWC) && 1387175840Scognet (pv->pv_flags & PVF_NC)) { 1388175840Scognet 1389129198Scognet pv->pv_flags &= ~PVF_NC; 1390175840Scognet pmap_set_cache_entry(pv, pm, va, 1); 1391175840Scognet continue; 1392175840Scognet } 1393175840Scognet /* user is no longer sharable and writable */ 1394194459Sthompsa if (pm != pmap_kernel() && 1395194459Sthompsa (pv->pv_pmap == pm || pv->pv_pmap == pmap_kernel()) && 1396175840Scognet !pmwc && (pv->pv_flags & PVF_NC)) { 1397129198Scognet 1398175840Scognet pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1399175840Scognet pmap_set_cache_entry(pv, pm, va, 1); 1400129198Scognet } 1401129198Scognet } 1402175840Scognet 1403175840Scognet if ((kwritable == 0) && (writable == 0)) { 1404175840Scognet pg->md.pvh_attrs &= ~PVF_MOD; 1405175840Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1406175840Scognet return; 1407175840Scognet } 1408129198Scognet} 1409129198Scognet 1410129198Scognet/* 1411129198Scognet * Modify pte bits for all ptes corresponding to the given physical address. 1412129198Scognet * We use `maskbits' rather than `clearbits' because we're always passing 1413129198Scognet * constants and the latter would require an extra inversion at run-time. 1414129198Scognet */ 1415135641Scognetstatic int 1416129198Scognetpmap_clearbit(struct vm_page *pg, u_int maskbits) 1417129198Scognet{ 1418129198Scognet struct l2_bucket *l2b; 1419129198Scognet struct pv_entry *pv; 1420129198Scognet pt_entry_t *ptep, npte, opte; 1421129198Scognet pmap_t pm; 1422129198Scognet vm_offset_t va; 1423129198Scognet u_int oflags; 1424135641Scognet int count = 0; 1425129198Scognet 1426208990Salc vm_page_lock_queues(); 1427159352Salc 1428175840Scognet if (maskbits & PVF_WRITE) 1429175840Scognet maskbits |= PVF_MOD; 1430129198Scognet /* 1431129198Scognet * Clear saved attributes (modify, reference) 1432129198Scognet */ 1433129198Scognet pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 1434129198Scognet 1435129198Scognet if (TAILQ_EMPTY(&pg->md.pv_list)) { 1436208990Salc vm_page_unlock_queues(); 1437135641Scognet return (0); 1438129198Scognet } 1439129198Scognet 1440129198Scognet /* 1441129198Scognet * Loop over all current mappings setting/clearing as appropos 1442129198Scognet */ 1443129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1444129198Scognet va = pv->pv_va; 1445129198Scognet pm = pv->pv_pmap; 1446129198Scognet oflags = pv->pv_flags; 1447175840Scognet 1448175840Scognet if (!(oflags & maskbits)) { 1449175840Scognet if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { 1450175840Scognet /* It is safe to re-enable cacheing here. */ 1451175840Scognet PMAP_LOCK(pm); 1452175840Scognet l2b = pmap_get_l2_bucket(pm, va); 1453175840Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1454175840Scognet *ptep |= pte_l2_s_cache_mode; 1455175840Scognet PTE_SYNC(ptep); 1456175840Scognet PMAP_UNLOCK(pm); 1457175840Scognet pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1458175840Scognet 1459175840Scognet } 1460175840Scognet continue; 1461175840Scognet } 1462129198Scognet pv->pv_flags &= ~maskbits; 1463129198Scognet 1464159352Salc PMAP_LOCK(pm); 1465129198Scognet 1466129198Scognet l2b = pmap_get_l2_bucket(pm, va); 1467129198Scognet 1468129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1469129198Scognet npte = opte = *ptep; 1470129198Scognet 1471157970Scognet if (maskbits & (PVF_WRITE|PVF_MOD)) { 1472129198Scognet if ((pv->pv_flags & PVF_NC)) { 1473129198Scognet /* 1474129198Scognet * Entry is not cacheable: 1475129198Scognet * 1476129198Scognet * Don't turn caching on again if this is a 1477129198Scognet * modified emulation. This would be 1478129198Scognet * inconsitent with the settings created by 1479175840Scognet * pmap_fix_cache(). Otherwise, it's safe 1480129198Scognet * to re-enable cacheing. 1481129198Scognet * 1482175840Scognet * There's no need to call pmap_fix_cache() 1483129198Scognet * here: all pages are losing their write 1484129198Scognet * permission. 1485129198Scognet */ 1486129198Scognet if (maskbits & PVF_WRITE) { 1487129198Scognet npte |= pte_l2_s_cache_mode; 1488175840Scognet pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1489129198Scognet } 1490129198Scognet } else 1491129198Scognet if (opte & L2_S_PROT_W) { 1492144760Scognet vm_page_dirty(pg); 1493129198Scognet /* 1494129198Scognet * Entry is writable/cacheable: check if pmap 1495129198Scognet * is current if it is flush it, otherwise it 1496129198Scognet * won't be in the cache 1497129198Scognet */ 1498129198Scognet if (PV_BEEN_EXECD(oflags)) 1499129198Scognet pmap_idcache_wbinv_range(pm, pv->pv_va, 1500129198Scognet PAGE_SIZE); 1501129198Scognet else 1502129198Scognet if (PV_BEEN_REFD(oflags)) 1503129198Scognet pmap_dcache_wb_range(pm, pv->pv_va, 1504129198Scognet PAGE_SIZE, 1505129198Scognet (maskbits & PVF_REF) ? TRUE : FALSE, 1506129198Scognet FALSE); 1507129198Scognet } 1508129198Scognet 1509129198Scognet /* make the pte read only */ 1510129198Scognet npte &= ~L2_S_PROT_W; 1511129198Scognet } 1512129198Scognet 1513157970Scognet if (maskbits & PVF_REF) { 1514129198Scognet if ((pv->pv_flags & PVF_NC) == 0 && 1515129198Scognet (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { 1516129198Scognet /* 1517129198Scognet * Check npte here; we may have already 1518129198Scognet * done the wbinv above, and the validity 1519129198Scognet * of the PTE is the same for opte and 1520129198Scognet * npte. 1521129198Scognet */ 1522129198Scognet if (npte & L2_S_PROT_W) { 1523129198Scognet if (PV_BEEN_EXECD(oflags)) 1524129198Scognet pmap_idcache_wbinv_range(pm, 1525129198Scognet pv->pv_va, PAGE_SIZE); 1526129198Scognet else 1527129198Scognet if (PV_BEEN_REFD(oflags)) 1528129198Scognet pmap_dcache_wb_range(pm, 1529129198Scognet pv->pv_va, PAGE_SIZE, 1530129198Scognet TRUE, FALSE); 1531129198Scognet } else 1532129198Scognet if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { 1533129198Scognet /* XXXJRT need idcache_inv_range */ 1534129198Scognet if (PV_BEEN_EXECD(oflags)) 1535129198Scognet pmap_idcache_wbinv_range(pm, 1536129198Scognet pv->pv_va, PAGE_SIZE); 1537129198Scognet else 1538129198Scognet if (PV_BEEN_REFD(oflags)) 1539129198Scognet pmap_dcache_wb_range(pm, 1540129198Scognet pv->pv_va, PAGE_SIZE, 1541129198Scognet TRUE, TRUE); 1542129198Scognet } 1543129198Scognet } 1544129198Scognet 1545129198Scognet /* 1546129198Scognet * Make the PTE invalid so that we will take a 1547129198Scognet * page fault the next time the mapping is 1548129198Scognet * referenced. 1549129198Scognet */ 1550129198Scognet npte &= ~L2_TYPE_MASK; 1551129198Scognet npte |= L2_TYPE_INV; 1552129198Scognet } 1553129198Scognet 1554129198Scognet if (npte != opte) { 1555135641Scognet count++; 1556129198Scognet *ptep = npte; 1557129198Scognet PTE_SYNC(ptep); 1558129198Scognet /* Flush the TLB entry if a current pmap. */ 1559129198Scognet if (PV_BEEN_EXECD(oflags)) 1560129198Scognet pmap_tlb_flushID_SE(pm, pv->pv_va); 1561129198Scognet else 1562129198Scognet if (PV_BEEN_REFD(oflags)) 1563129198Scognet pmap_tlb_flushD_SE(pm, pv->pv_va); 1564129198Scognet } 1565129198Scognet 1566159352Salc PMAP_UNLOCK(pm); 1567129198Scognet 1568129198Scognet } 1569129198Scognet 1570137664Scognet if (maskbits & PVF_WRITE) 1571137664Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1572208990Salc vm_page_unlock_queues(); 1573135641Scognet return (count); 1574129198Scognet} 1575129198Scognet 1576129198Scognet/* 1577129198Scognet * main pv_entry manipulation functions: 1578129198Scognet * pmap_enter_pv: enter a mapping onto a vm_page list 1579129198Scognet * pmap_remove_pv: remove a mappiing from a vm_page list 1580129198Scognet * 1581129198Scognet * NOTE: pmap_enter_pv expects to lock the pvh itself 1582129198Scognet * pmap_remove_pv expects te caller to lock the pvh before calling 1583129198Scognet */ 1584129198Scognet 1585129198Scognet/* 1586129198Scognet * pmap_enter_pv: enter a mapping onto a vm_page lst 1587129198Scognet * 1588129198Scognet * => caller should hold the proper lock on pmap_main_lock 1589129198Scognet * => caller should have pmap locked 1590129198Scognet * => we will gain the lock on the vm_page and allocate the new pv_entry 1591129198Scognet * => caller should adjust ptp's wire_count before calling 1592129198Scognet * => caller should not adjust pmap's wire_count 1593129198Scognet */ 1594129198Scognetstatic void 1595129198Scognetpmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 1596129198Scognet vm_offset_t va, u_int flags) 1597129198Scognet{ 1598129198Scognet 1599194459Sthompsa int km; 1600194459Sthompsa 1601159352Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1602194459Sthompsa 1603194459Sthompsa if (pg->md.pv_kva) { 1604194459Sthompsa /* PMAP_ASSERT_LOCKED(pmap_kernel()); */ 1605194459Sthompsa pve->pv_pmap = pmap_kernel(); 1606194459Sthompsa pve->pv_va = pg->md.pv_kva; 1607194459Sthompsa pve->pv_flags = PVF_WRITE | PVF_UNMAN; 1608194459Sthompsa pg->md.pv_kva = 0; 1609194459Sthompsa 1610205425Scognet if (!(km = PMAP_OWNED(pmap_kernel()))) 1611205425Scognet PMAP_LOCK(pmap_kernel()); 1612194459Sthompsa TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1613205425Scognet TAILQ_INSERT_HEAD(&pve->pv_pmap->pm_pvlist, pve, pv_plist); 1614205425Scognet PMAP_UNLOCK(pmap_kernel()); 1615194459Sthompsa vm_page_unlock_queues(); 1616194459Sthompsa if ((pve = pmap_get_pv_entry()) == NULL) 1617194459Sthompsa panic("pmap_kenter_internal: no pv entries"); 1618194459Sthompsa vm_page_lock_queues(); 1619194459Sthompsa if (km) 1620194459Sthompsa PMAP_LOCK(pmap_kernel()); 1621194459Sthompsa } 1622194459Sthompsa 1623159352Salc PMAP_ASSERT_LOCKED(pm); 1624129198Scognet pve->pv_pmap = pm; 1625129198Scognet pve->pv_va = va; 1626129198Scognet pve->pv_flags = flags; 1627129198Scognet 1628129198Scognet TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1629144760Scognet TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); 1630129198Scognet pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); 1631129198Scognet if (pve->pv_flags & PVF_WIRED) 1632129198Scognet ++pm->pm_stats.wired_count; 1633144760Scognet vm_page_flag_set(pg, PG_REFERENCED); 1634129198Scognet} 1635129198Scognet 1636129198Scognet/* 1637129198Scognet * 1638129198Scognet * pmap_find_pv: Find a pv entry 1639129198Scognet * 1640129198Scognet * => caller should hold lock on vm_page 1641129198Scognet */ 1642129198Scognetstatic PMAP_INLINE struct pv_entry * 1643129198Scognetpmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1644129198Scognet{ 1645129198Scognet struct pv_entry *pv; 1646129198Scognet 1647159352Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1648129198Scognet TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) 1649129198Scognet if (pm == pv->pv_pmap && va == pv->pv_va) 1650129198Scognet break; 1651129198Scognet return (pv); 1652129198Scognet} 1653129198Scognet 1654129198Scognet/* 1655129198Scognet * vector_page_setprot: 1656129198Scognet * 1657129198Scognet * Manipulate the protection of the vector page. 1658129198Scognet */ 1659129198Scognetvoid 1660129198Scognetvector_page_setprot(int prot) 1661129198Scognet{ 1662129198Scognet struct l2_bucket *l2b; 1663129198Scognet pt_entry_t *ptep; 1664129198Scognet 1665129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page); 1666129198Scognet 1667129198Scognet ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 1668129198Scognet 1669129198Scognet *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 1670129198Scognet PTE_SYNC(ptep); 1671129198Scognet cpu_tlb_flushD_SE(vector_page); 1672129198Scognet cpu_cpwait(); 1673129198Scognet} 1674129198Scognet 1675129198Scognet/* 1676129198Scognet * pmap_remove_pv: try to remove a mapping from a pv_list 1677129198Scognet * 1678129198Scognet * => caller should hold proper lock on pmap_main_lock 1679129198Scognet * => pmap should be locked 1680129198Scognet * => caller should hold lock on vm_page [so that attrs can be adjusted] 1681129198Scognet * => caller should adjust ptp's wire_count and free PTP if needed 1682129198Scognet * => caller should NOT adjust pmap's wire_count 1683129198Scognet * => we return the removed pve 1684129198Scognet */ 1685135641Scognet 1686135641Scognetstatic void 1687135641Scognetpmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) 1688135641Scognet{ 1689135641Scognet 1690194459Sthompsa struct pv_entry *pv; 1691159352Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1692159352Salc PMAP_ASSERT_LOCKED(pm); 1693135641Scognet TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); 1694144760Scognet TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); 1695135641Scognet if (pve->pv_flags & PVF_WIRED) 1696135641Scognet --pm->pm_stats.wired_count; 1697144760Scognet if (pg->md.pvh_attrs & PVF_MOD) 1698144760Scognet vm_page_dirty(pg); 1699175840Scognet if (TAILQ_FIRST(&pg->md.pv_list) == NULL) 1700175840Scognet pg->md.pvh_attrs &= ~PVF_REF; 1701175840Scognet else 1702175840Scognet vm_page_flag_set(pg, PG_REFERENCED); 1703175840Scognet if ((pve->pv_flags & PVF_NC) && ((pm == pmap_kernel()) || 1704175840Scognet (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) 1705175840Scognet pmap_fix_cache(pg, pm, 0); 1706175840Scognet else if (pve->pv_flags & PVF_WRITE) { 1707175840Scognet TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) 1708175840Scognet if (pve->pv_flags & PVF_WRITE) 1709175840Scognet break; 1710175840Scognet if (!pve) { 1711175840Scognet pg->md.pvh_attrs &= ~PVF_MOD; 1712175840Scognet vm_page_flag_clear(pg, PG_WRITEABLE); 1713175840Scognet } 1714146647Scognet } 1715194459Sthompsa pv = TAILQ_FIRST(&pg->md.pv_list); 1716194459Sthompsa if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && 1717194459Sthompsa TAILQ_NEXT(pv, pv_list) == NULL) { 1718205425Scognet pm = kernel_pmap; 1719194459Sthompsa pg->md.pv_kva = pv->pv_va; 1720194459Sthompsa /* a recursive pmap_nuke_pv */ 1721194459Sthompsa TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); 1722194459Sthompsa TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); 1723194459Sthompsa if (pv->pv_flags & PVF_WIRED) 1724194459Sthompsa --pm->pm_stats.wired_count; 1725194459Sthompsa pg->md.pvh_attrs &= ~PVF_REF; 1726194459Sthompsa pg->md.pvh_attrs &= ~PVF_MOD; 1727194459Sthompsa vm_page_flag_clear(pg, PG_WRITEABLE); 1728194459Sthompsa pmap_free_pv_entry(pv); 1729194459Sthompsa } 1730135641Scognet} 1731135641Scognet 1732129198Scognetstatic struct pv_entry * 1733129198Scognetpmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1734129198Scognet{ 1735135641Scognet struct pv_entry *pve; 1736129198Scognet 1737159474Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1738135641Scognet pve = TAILQ_FIRST(&pg->md.pv_list); 1739129198Scognet 1740129198Scognet while (pve) { 1741129198Scognet if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ 1742135641Scognet pmap_nuke_pv(pg, pm, pve); 1743129198Scognet break; 1744129198Scognet } 1745129198Scognet pve = TAILQ_NEXT(pve, pv_list); 1746129198Scognet } 1747129198Scognet 1748194459Sthompsa if (pve == NULL && pg->md.pv_kva == va) 1749194459Sthompsa pg->md.pv_kva = 0; 1750194459Sthompsa 1751129198Scognet return(pve); /* return removed pve */ 1752129198Scognet} 1753129198Scognet/* 1754129198Scognet * 1755129198Scognet * pmap_modify_pv: Update pv flags 1756129198Scognet * 1757129198Scognet * => caller should hold lock on vm_page [so that attrs can be adjusted] 1758129198Scognet * => caller should NOT adjust pmap's wire_count 1759129198Scognet * => we return the old flags 1760129198Scognet * 1761129198Scognet * Modify a physical-virtual mapping in the pv table 1762129198Scognet */ 1763129198Scognetstatic u_int 1764129198Scognetpmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, 1765129198Scognet u_int clr_mask, u_int set_mask) 1766129198Scognet{ 1767129198Scognet struct pv_entry *npv; 1768129198Scognet u_int flags, oflags; 1769129198Scognet 1770159352Salc PMAP_ASSERT_LOCKED(pm); 1771159352Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1772129198Scognet if ((npv = pmap_find_pv(pg, pm, va)) == NULL) 1773129198Scognet return (0); 1774129198Scognet 1775129198Scognet /* 1776129198Scognet * There is at least one VA mapping this page. 1777129198Scognet */ 1778129198Scognet 1779129198Scognet if (clr_mask & (PVF_REF | PVF_MOD)) 1780129198Scognet pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1781129198Scognet 1782129198Scognet oflags = npv->pv_flags; 1783129198Scognet npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1784129198Scognet 1785129198Scognet if ((flags ^ oflags) & PVF_WIRED) { 1786129198Scognet if (flags & PVF_WIRED) 1787129198Scognet ++pm->pm_stats.wired_count; 1788129198Scognet else 1789129198Scognet --pm->pm_stats.wired_count; 1790129198Scognet } 1791129198Scognet 1792175840Scognet if ((flags ^ oflags) & PVF_WRITE) 1793175840Scognet pmap_fix_cache(pg, pm, 0); 1794129198Scognet 1795129198Scognet return (oflags); 1796129198Scognet} 1797129198Scognet 1798129198Scognet/* Function to set the debug level of the pmap code */ 1799129198Scognet#ifdef PMAP_DEBUG 1800129198Scognetvoid 1801129198Scognetpmap_debug(int level) 1802129198Scognet{ 1803129198Scognet pmap_debug_level = level; 1804129198Scognet dprintf("pmap_debug: level=%d\n", pmap_debug_level); 1805129198Scognet} 1806129198Scognet#endif /* PMAP_DEBUG */ 1807129198Scognet 1808129198Scognetvoid 1809129198Scognetpmap_pinit0(struct pmap *pmap) 1810129198Scognet{ 1811129198Scognet PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); 1812129198Scognet 1813129198Scognet dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n", 1814129198Scognet (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir); 1815135641Scognet bcopy(kernel_pmap, pmap, sizeof(*pmap)); 1816159325Salc bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); 1817159325Salc PMAP_LOCK_INIT(pmap); 1818129198Scognet} 1819129198Scognet 1820147217Salc/* 1821147217Salc * Initialize a vm_page's machine-dependent fields. 1822147217Salc */ 1823147217Salcvoid 1824147217Salcpmap_page_init(vm_page_t m) 1825147217Salc{ 1826129198Scognet 1827147217Salc TAILQ_INIT(&m->md.pv_list); 1828147217Salc} 1829147217Salc 1830129198Scognet/* 1831129198Scognet * Initialize the pmap module. 1832129198Scognet * Called by vm_init, to initialize any structures that the pmap 1833129198Scognet * system needs to map virtual memory. 1834129198Scognet */ 1835129198Scognetvoid 1836129198Scognetpmap_init(void) 1837129198Scognet{ 1838152128Scognet int shpgperproc = PMAP_SHPGPERPROC; 1839129198Scognet 1840197770Sstas PDEBUG(1, printf("pmap_init: phys_start = %08x\n", PHYSADDR)); 1841147114Scognet 1842129198Scognet /* 1843129198Scognet * init the pv free list 1844129198Scognet */ 1845129198Scognet pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 1846129198Scognet NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1847129198Scognet /* 1848129198Scognet * Now it is safe to enable pv_table recording. 1849129198Scognet */ 1850129198Scognet PDEBUG(1, printf("pmap_init: done!\n")); 1851147114Scognet 1852152128Scognet TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1853152128Scognet 1854170170Sattilio pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1855152128Scognet pv_entry_high_water = 9 * (pv_entry_max / 10); 1856152128Scognet l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, 1857152128Scognet NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1858152128Scognet l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), 1859152128Scognet NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1860152128Scognet UMA_ZONE_VM | UMA_ZONE_NOFREE); 1861152128Scognet 1862152128Scognet uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1863152128Scognet 1864129198Scognet} 1865129198Scognet 1866129198Scognetint 1867129198Scognetpmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) 1868129198Scognet{ 1869129198Scognet struct l2_dtable *l2; 1870129198Scognet struct l2_bucket *l2b; 1871129198Scognet pd_entry_t *pl1pd, l1pd; 1872129198Scognet pt_entry_t *ptep, pte; 1873129198Scognet vm_paddr_t pa; 1874129198Scognet u_int l1idx; 1875129198Scognet int rv = 0; 1876129198Scognet 1877129198Scognet l1idx = L1_IDX(va); 1878159384Salc vm_page_lock_queues(); 1879159384Salc PMAP_LOCK(pm); 1880129198Scognet 1881129198Scognet /* 1882129198Scognet * If there is no l2_dtable for this address, then the process 1883129198Scognet * has no business accessing it. 1884129198Scognet * 1885129198Scognet * Note: This will catch userland processes trying to access 1886129198Scognet * kernel addresses. 1887129198Scognet */ 1888129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 1889129198Scognet if (l2 == NULL) 1890129198Scognet goto out; 1891129198Scognet 1892129198Scognet /* 1893129198Scognet * Likewise if there is no L2 descriptor table 1894129198Scognet */ 1895129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1896129198Scognet if (l2b->l2b_kva == NULL) 1897129198Scognet goto out; 1898129198Scognet 1899129198Scognet /* 1900129198Scognet * Check the PTE itself. 1901129198Scognet */ 1902129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 1903129198Scognet pte = *ptep; 1904129198Scognet if (pte == 0) 1905129198Scognet goto out; 1906129198Scognet 1907129198Scognet /* 1908129198Scognet * Catch a userland access to the vector page mapped at 0x0 1909129198Scognet */ 1910129198Scognet if (user && (pte & L2_S_PROT_U) == 0) 1911129198Scognet goto out; 1912157027Scognet if (va == vector_page) 1913157027Scognet goto out; 1914129198Scognet 1915129198Scognet pa = l2pte_pa(pte); 1916129198Scognet 1917129198Scognet if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { 1918129198Scognet /* 1919129198Scognet * This looks like a good candidate for "page modified" 1920129198Scognet * emulation... 1921129198Scognet */ 1922129198Scognet struct pv_entry *pv; 1923129198Scognet struct vm_page *pg; 1924129198Scognet 1925129198Scognet /* Extract the physical address of the page */ 1926129198Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 1927129198Scognet goto out; 1928129198Scognet } 1929129198Scognet /* Get the current flags for this page. */ 1930129198Scognet 1931129198Scognet pv = pmap_find_pv(pg, pm, va); 1932129198Scognet if (pv == NULL) { 1933129198Scognet goto out; 1934129198Scognet } 1935129198Scognet 1936129198Scognet /* 1937129198Scognet * Do the flags say this page is writable? If not then it 1938129198Scognet * is a genuine write fault. If yes then the write fault is 1939129198Scognet * our fault as we did not reflect the write access in the 1940129198Scognet * PTE. Now we know a write has occurred we can correct this 1941129198Scognet * and also set the modified bit 1942129198Scognet */ 1943129198Scognet if ((pv->pv_flags & PVF_WRITE) == 0) { 1944129198Scognet goto out; 1945129198Scognet } 1946129198Scognet 1947157970Scognet pg->md.pvh_attrs |= PVF_REF | PVF_MOD; 1948157970Scognet vm_page_dirty(pg); 1949129198Scognet pv->pv_flags |= PVF_REF | PVF_MOD; 1950129198Scognet 1951129198Scognet /* 1952129198Scognet * Re-enable write permissions for the page. No need to call 1953175840Scognet * pmap_fix_cache(), since this is just a 1954129198Scognet * modified-emulation fault, and the PVF_WRITE bit isn't 1955129198Scognet * changing. We've already set the cacheable bits based on 1956129198Scognet * the assumption that we can write to this page. 1957129198Scognet */ 1958147114Scognet *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; 1959129198Scognet PTE_SYNC(ptep); 1960129198Scognet rv = 1; 1961129198Scognet } else 1962129198Scognet if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 1963129198Scognet /* 1964129198Scognet * This looks like a good candidate for "page referenced" 1965129198Scognet * emulation. 1966129198Scognet */ 1967129198Scognet struct pv_entry *pv; 1968129198Scognet struct vm_page *pg; 1969129198Scognet 1970129198Scognet /* Extract the physical address of the page */ 1971159384Salc if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 1972129198Scognet goto out; 1973129198Scognet /* Get the current flags for this page. */ 1974129198Scognet 1975129198Scognet pv = pmap_find_pv(pg, pm, va); 1976159384Salc if (pv == NULL) 1977129198Scognet goto out; 1978129198Scognet 1979129198Scognet pg->md.pvh_attrs |= PVF_REF; 1980129198Scognet pv->pv_flags |= PVF_REF; 1981129198Scognet 1982129198Scognet 1983129198Scognet *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; 1984129198Scognet PTE_SYNC(ptep); 1985129198Scognet rv = 1; 1986129198Scognet } 1987129198Scognet 1988129198Scognet /* 1989129198Scognet * We know there is a valid mapping here, so simply 1990129198Scognet * fix up the L1 if necessary. 1991129198Scognet */ 1992129198Scognet pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1993129198Scognet l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 1994129198Scognet if (*pl1pd != l1pd) { 1995129198Scognet *pl1pd = l1pd; 1996129198Scognet PTE_SYNC(pl1pd); 1997129198Scognet rv = 1; 1998129198Scognet } 1999129198Scognet 2000129198Scognet#ifdef CPU_SA110 2001129198Scognet /* 2002129198Scognet * There are bugs in the rev K SA110. This is a check for one 2003129198Scognet * of them. 2004129198Scognet */ 2005129198Scognet if (rv == 0 && curcpu()->ci_arm_cputype == CPU_ID_SA110 && 2006129198Scognet curcpu()->ci_arm_cpurev < 3) { 2007129198Scognet /* Always current pmap */ 2008129198Scognet if (l2pte_valid(pte)) { 2009129198Scognet extern int kernel_debug; 2010129198Scognet if (kernel_debug & 1) { 2011129198Scognet struct proc *p = curlwp->l_proc; 2012129198Scognet printf("prefetch_abort: page is already " 2013129198Scognet "mapped - pte=%p *pte=%08x\n", ptep, pte); 2014129198Scognet printf("prefetch_abort: pc=%08lx proc=%p " 2015129198Scognet "process=%s\n", va, p, p->p_comm); 2016129198Scognet printf("prefetch_abort: far=%08x fs=%x\n", 2017129198Scognet cpu_faultaddress(), cpu_faultstatus()); 2018129198Scognet } 2019129198Scognet#ifdef DDB 2020129198Scognet if (kernel_debug & 2) 2021129198Scognet Debugger(); 2022129198Scognet#endif 2023129198Scognet rv = 1; 2024129198Scognet } 2025129198Scognet } 2026129198Scognet#endif /* CPU_SA110 */ 2027129198Scognet 2028129198Scognet#ifdef DEBUG 2029129198Scognet /* 2030129198Scognet * If 'rv == 0' at this point, it generally indicates that there is a 2031129198Scognet * stale TLB entry for the faulting address. This happens when two or 2032129198Scognet * more processes are sharing an L1. Since we don't flush the TLB on 2033129198Scognet * a context switch between such processes, we can take domain faults 2034129198Scognet * for mappings which exist at the same VA in both processes. EVEN IF 2035129198Scognet * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 2036129198Scognet * example. 2037129198Scognet * 2038129198Scognet * This is extremely likely to happen if pmap_enter() updated the L1 2039129198Scognet * entry for a recently entered mapping. In this case, the TLB is 2040129198Scognet * flushed for the new mapping, but there may still be TLB entries for 2041129198Scognet * other mappings belonging to other processes in the 1MB range 2042129198Scognet * covered by the L1 entry. 2043129198Scognet * 2044129198Scognet * Since 'rv == 0', we know that the L1 already contains the correct 2045129198Scognet * value, so the fault must be due to a stale TLB entry. 2046129198Scognet * 2047129198Scognet * Since we always need to flush the TLB anyway in the case where we 2048129198Scognet * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 2049129198Scognet * stale TLB entries dynamically. 2050129198Scognet * 2051129198Scognet * However, the above condition can ONLY happen if the current L1 is 2052129198Scognet * being shared. If it happens when the L1 is unshared, it indicates 2053129198Scognet * that other parts of the pmap are not doing their job WRT managing 2054129198Scognet * the TLB. 2055129198Scognet */ 2056129198Scognet if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 2057129198Scognet extern int last_fault_code; 2058129198Scognet printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 2059129198Scognet pm, va, ftype); 2060129198Scognet printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 2061129198Scognet l2, l2b, ptep, pl1pd); 2062129198Scognet printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 2063129198Scognet pte, l1pd, last_fault_code); 2064129198Scognet#ifdef DDB 2065129198Scognet Debugger(); 2066129198Scognet#endif 2067129198Scognet } 2068129198Scognet#endif 2069129198Scognet 2070129198Scognet cpu_tlb_flushID_SE(va); 2071129198Scognet cpu_cpwait(); 2072129198Scognet 2073129198Scognet rv = 1; 2074129198Scognet 2075129198Scognetout: 2076159384Salc vm_page_unlock_queues(); 2077159384Salc PMAP_UNLOCK(pm); 2078129198Scognet return (rv); 2079129198Scognet} 2080129198Scognet 2081129198Scognetvoid 2082152128Scognetpmap_postinit(void) 2083152128Scognet{ 2084129198Scognet struct l2_bucket *l2b; 2085129198Scognet struct l1_ttable *l1; 2086129198Scognet pd_entry_t *pl1pt; 2087129198Scognet pt_entry_t *ptep, pte; 2088129198Scognet vm_offset_t va, eva; 2089129198Scognet u_int loop, needed; 2090129198Scognet 2091129198Scognet needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 2092129198Scognet needed -= 1; 2093129198Scognet l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); 2094129198Scognet 2095129198Scognet for (loop = 0; loop < needed; loop++, l1++) { 2096129198Scognet /* Allocate a L1 page table */ 2097132503Scognet va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 2098132503Scognet 0xffffffff, L1_TABLE_SIZE, 0); 2099129198Scognet 2100129198Scognet if (va == 0) 2101129198Scognet panic("Cannot allocate L1 KVM"); 2102129198Scognet 2103129198Scognet eva = va + L1_TABLE_SIZE; 2104129198Scognet pl1pt = (pd_entry_t *)va; 2105129198Scognet 2106135641Scognet while (va < eva) { 2107129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2108129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2109129198Scognet pte = *ptep; 2110129198Scognet pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 2111129198Scognet *ptep = pte; 2112129198Scognet PTE_SYNC(ptep); 2113129198Scognet cpu_tlb_flushD_SE(va); 2114129198Scognet 2115129198Scognet va += PAGE_SIZE; 2116129198Scognet } 2117129198Scognet pmap_init_l1(l1, pl1pt); 2118129198Scognet } 2119129198Scognet 2120129198Scognet 2121129198Scognet#ifdef DEBUG 2122129198Scognet printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 2123129198Scognet needed); 2124129198Scognet#endif 2125129198Scognet} 2126129198Scognet 2127129198Scognet/* 2128129198Scognet * This is used to stuff certain critical values into the PCB where they 2129129198Scognet * can be accessed quickly from cpu_switch() et al. 2130129198Scognet */ 2131129198Scognetvoid 2132129198Scognetpmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) 2133129198Scognet{ 2134129198Scognet struct l2_bucket *l2b; 2135129198Scognet 2136129198Scognet pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; 2137129198Scognet pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 2138129198Scognet (DOMAIN_CLIENT << (pm->pm_domain * 2)); 2139129198Scognet 2140129198Scognet if (vector_page < KERNBASE) { 2141129198Scognet pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 2142129198Scognet l2b = pmap_get_l2_bucket(pm, vector_page); 2143129198Scognet pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | 2144145071Scognet L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); 2145129198Scognet } else 2146129198Scognet pcb->pcb_pl1vec = NULL; 2147129198Scognet} 2148129198Scognet 2149129198Scognetvoid 2150129198Scognetpmap_activate(struct thread *td) 2151129198Scognet{ 2152129198Scognet pmap_t pm; 2153129198Scognet struct pcb *pcb; 2154129198Scognet 2155135641Scognet pm = vmspace_pmap(td->td_proc->p_vmspace); 2156129198Scognet pcb = td->td_pcb; 2157129198Scognet 2158129198Scognet critical_enter(); 2159129198Scognet pmap_set_pcb_pagedir(pm, pcb); 2160129198Scognet 2161129198Scognet if (td == curthread) { 2162129198Scognet u_int cur_dacr, cur_ttb; 2163129198Scognet 2164129198Scognet __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); 2165129198Scognet __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); 2166129198Scognet 2167129198Scognet cur_ttb &= ~(L1_TABLE_SIZE - 1); 2168129198Scognet 2169129198Scognet if (cur_ttb == (u_int)pcb->pcb_pagedir && 2170129198Scognet cur_dacr == pcb->pcb_dacr) { 2171129198Scognet /* 2172129198Scognet * No need to switch address spaces. 2173129198Scognet */ 2174129198Scognet critical_exit(); 2175129198Scognet return; 2176129198Scognet } 2177129198Scognet 2178129198Scognet 2179129198Scognet /* 2180129198Scognet * We MUST, I repeat, MUST fix up the L1 entry corresponding 2181129198Scognet * to 'vector_page' in the incoming L1 table before switching 2182129198Scognet * to it otherwise subsequent interrupts/exceptions (including 2183129198Scognet * domain faults!) will jump into hyperspace. 2184129198Scognet */ 2185129198Scognet if (pcb->pcb_pl1vec) { 2186129198Scognet 2187129198Scognet *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2188129198Scognet /* 2189129198Scognet * Don't need to PTE_SYNC() at this point since 2190129198Scognet * cpu_setttb() is about to flush both the cache 2191129198Scognet * and the TLB. 2192129198Scognet */ 2193129198Scognet } 2194129198Scognet 2195129198Scognet cpu_domains(pcb->pcb_dacr); 2196129198Scognet cpu_setttb(pcb->pcb_pagedir); 2197129198Scognet } 2198129198Scognet critical_exit(); 2199129198Scognet} 2200129198Scognet 2201129198Scognetstatic int 2202129198Scognetpmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) 2203129198Scognet{ 2204129198Scognet pd_entry_t *pdep, pde; 2205129198Scognet pt_entry_t *ptep, pte; 2206129198Scognet vm_offset_t pa; 2207129198Scognet int rv = 0; 2208129198Scognet 2209129198Scognet /* 2210129198Scognet * Make sure the descriptor itself has the correct cache mode 2211129198Scognet */ 2212129198Scognet pdep = &kl1[L1_IDX(va)]; 2213129198Scognet pde = *pdep; 2214129198Scognet 2215129198Scognet if (l1pte_section_p(pde)) { 2216129198Scognet if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 2217129198Scognet *pdep = (pde & ~L1_S_CACHE_MASK) | 2218129198Scognet pte_l1_s_cache_mode_pt; 2219129198Scognet PTE_SYNC(pdep); 2220129198Scognet cpu_dcache_wbinv_range((vm_offset_t)pdep, 2221129198Scognet sizeof(*pdep)); 2222183838Sraj cpu_l2cache_wbinv_range((vm_offset_t)pdep, 2223183838Sraj sizeof(*pdep)); 2224129198Scognet rv = 1; 2225129198Scognet } 2226129198Scognet } else { 2227129198Scognet pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2228129198Scognet ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2229129198Scognet if (ptep == NULL) 2230129198Scognet panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 2231129198Scognet 2232129198Scognet ptep = &ptep[l2pte_index(va)]; 2233129198Scognet pte = *ptep; 2234129198Scognet if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 2235129198Scognet *ptep = (pte & ~L2_S_CACHE_MASK) | 2236129198Scognet pte_l2_s_cache_mode_pt; 2237129198Scognet PTE_SYNC(ptep); 2238129198Scognet cpu_dcache_wbinv_range((vm_offset_t)ptep, 2239129198Scognet sizeof(*ptep)); 2240183838Sraj cpu_l2cache_wbinv_range((vm_offset_t)ptep, 2241183838Sraj sizeof(*ptep)); 2242129198Scognet rv = 1; 2243129198Scognet } 2244129198Scognet } 2245129198Scognet 2246129198Scognet return (rv); 2247129198Scognet} 2248129198Scognet 2249129198Scognetstatic void 2250129198Scognetpmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, 2251129198Scognet pt_entry_t **ptep) 2252129198Scognet{ 2253129198Scognet vm_offset_t va = *availp; 2254129198Scognet struct l2_bucket *l2b; 2255129198Scognet 2256129198Scognet if (ptep) { 2257129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2258129198Scognet if (l2b == NULL) 2259129198Scognet panic("pmap_alloc_specials: no l2b for 0x%x", va); 2260129198Scognet 2261129198Scognet *ptep = &l2b->l2b_kva[l2pte_index(va)]; 2262129198Scognet } 2263129198Scognet 2264129198Scognet *vap = va; 2265129198Scognet *availp = va + (PAGE_SIZE * pages); 2266129198Scognet} 2267129198Scognet 2268129198Scognet/* 2269129198Scognet * Bootstrap the system enough to run with virtual memory. 2270129198Scognet * 2271129198Scognet * On the arm this is called after mapping has already been enabled 2272129198Scognet * and just syncs the pmap module with what has already been done. 2273129198Scognet * [We can't call it easily with mapping off since the kernel is not 2274129198Scognet * mapped with PA == VA, hence we would have to relocate every address 2275129198Scognet * from the linked base (virtual) address "KERNBASE" to the actual 2276129198Scognet * (physical) address starting relative to 0] 2277129198Scognet */ 2278129198Scognet#define PMAP_STATIC_L2_SIZE 16 2279147114Scognet#ifdef ARM_USE_SMALL_ALLOC 2280147114Scognetextern struct mtx smallalloc_mtx; 2281147114Scognet#endif 2282147114Scognet 2283129198Scognetvoid 2284129198Scognetpmap_bootstrap(vm_offset_t firstaddr, vm_offset_t lastaddr, struct pv_addr *l1pt) 2285129198Scognet{ 2286129198Scognet static struct l1_ttable static_l1; 2287129198Scognet static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 2288129198Scognet struct l1_ttable *l1 = &static_l1; 2289129198Scognet struct l2_dtable *l2; 2290129198Scognet struct l2_bucket *l2b; 2291129198Scognet pd_entry_t pde; 2292129198Scognet pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; 2293129198Scognet pt_entry_t *ptep; 2294129198Scognet vm_paddr_t pa; 2295129198Scognet vm_offset_t va; 2296135641Scognet vm_size_t size; 2297129198Scognet int l1idx, l2idx, l2next = 0; 2298129198Scognet 2299197770Sstas PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", 2300197770Sstas firstaddr, lastaddr)); 2301129198Scognet 2302129198Scognet virtual_avail = firstaddr; 2303129198Scognet kernel_pmap->pm_l1 = l1; 2304150865Scognet kernel_l1pa = l1pt->pv_pa; 2305143192Scognet 2306143192Scognet /* 2307129198Scognet * Scan the L1 translation table created by initarm() and create 2308129198Scognet * the required metadata for all valid mappings found in it. 2309129198Scognet */ 2310129198Scognet for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 2311129198Scognet pde = kernel_l1pt[l1idx]; 2312129198Scognet 2313129198Scognet /* 2314129198Scognet * We're only interested in Coarse mappings. 2315129198Scognet * pmap_extract() can deal with section mappings without 2316129198Scognet * recourse to checking L2 metadata. 2317129198Scognet */ 2318129198Scognet if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 2319129198Scognet continue; 2320129198Scognet 2321129198Scognet /* 2322129198Scognet * Lookup the KVA of this L2 descriptor table 2323129198Scognet */ 2324129198Scognet pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2325129198Scognet ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2326129198Scognet 2327129198Scognet if (ptep == NULL) { 2328129198Scognet panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 2329129198Scognet (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); 2330129198Scognet } 2331129198Scognet 2332129198Scognet /* 2333129198Scognet * Fetch the associated L2 metadata structure. 2334129198Scognet * Allocate a new one if necessary. 2335129198Scognet */ 2336129198Scognet if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { 2337129198Scognet if (l2next == PMAP_STATIC_L2_SIZE) 2338129198Scognet panic("pmap_bootstrap: out of static L2s"); 2339129198Scognet kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = 2340129198Scognet &static_l2[l2next++]; 2341129198Scognet } 2342129198Scognet 2343129198Scognet /* 2344129198Scognet * One more L1 slot tracked... 2345129198Scognet */ 2346129198Scognet l2->l2_occupancy++; 2347129198Scognet 2348129198Scognet /* 2349129198Scognet * Fill in the details of the L2 descriptor in the 2350129198Scognet * appropriate bucket. 2351129198Scognet */ 2352129198Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2353129198Scognet l2b->l2b_kva = ptep; 2354129198Scognet l2b->l2b_phys = pa; 2355129198Scognet l2b->l2b_l1idx = l1idx; 2356129198Scognet 2357129198Scognet /* 2358129198Scognet * Establish an initial occupancy count for this descriptor 2359129198Scognet */ 2360129198Scognet for (l2idx = 0; 2361129198Scognet l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 2362129198Scognet l2idx++) { 2363129198Scognet if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 2364129198Scognet l2b->l2b_occupancy++; 2365129198Scognet } 2366129198Scognet } 2367129198Scognet 2368129198Scognet /* 2369129198Scognet * Make sure the descriptor itself has the correct cache mode. 2370129198Scognet * If not, fix it, but whine about the problem. Port-meisters 2371129198Scognet * should consider this a clue to fix up their initarm() 2372129198Scognet * function. :) 2373129198Scognet */ 2374129198Scognet if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { 2375129198Scognet printf("pmap_bootstrap: WARNING! wrong cache mode for " 2376129198Scognet "L2 pte @ %p\n", ptep); 2377129198Scognet } 2378129198Scognet } 2379129198Scognet 2380129198Scognet 2381129198Scognet /* 2382129198Scognet * Ensure the primary (kernel) L1 has the correct cache mode for 2383129198Scognet * a page table. Bitch if it is not correctly set. 2384129198Scognet */ 2385129198Scognet for (va = (vm_offset_t)kernel_l1pt; 2386129198Scognet va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 2387129198Scognet if (pmap_set_pt_cache_mode(kernel_l1pt, va)) 2388129198Scognet printf("pmap_bootstrap: WARNING! wrong cache mode for " 2389129198Scognet "primary L1 @ 0x%x\n", va); 2390129198Scognet } 2391129198Scognet 2392129198Scognet cpu_dcache_wbinv_all(); 2393183838Sraj cpu_l2cache_wbinv_all(); 2394129198Scognet cpu_tlb_flushID(); 2395129198Scognet cpu_cpwait(); 2396129198Scognet 2397159325Salc PMAP_LOCK_INIT(kernel_pmap); 2398129198Scognet kernel_pmap->pm_active = -1; 2399129198Scognet kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; 2400144760Scognet TAILQ_INIT(&kernel_pmap->pm_pvlist); 2401129198Scognet 2402129198Scognet /* 2403129198Scognet * Reserve some special page table entries/VA space for temporary 2404129198Scognet * mapping of pages. 2405129198Scognet */ 2406129198Scognet#define SYSMAP(c, p, v, n) \ 2407129198Scognet v = (c)va; va += ((n)*PAGE_SIZE); p = pte; pte += (n); 2408129198Scognet 2409129198Scognet pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); 2410129198Scognet pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); 2411129198Scognet pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); 2412129198Scognet pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); 2413135641Scognet size = ((lastaddr - pmap_curmaxkvaddr) + L1_S_OFFSET) / L1_S_SIZE; 2414135641Scognet pmap_alloc_specials(&virtual_avail, 2415135641Scognet round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 2416135641Scognet &pmap_kernel_l2ptp_kva, NULL); 2417135641Scognet 2418135641Scognet size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; 2419135641Scognet pmap_alloc_specials(&virtual_avail, 2420135641Scognet round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 2421135641Scognet &pmap_kernel_l2dtable_kva, NULL); 2422135641Scognet 2423137362Scognet pmap_alloc_specials(&virtual_avail, 2424137362Scognet 1, (vm_offset_t*)&_tmppt, NULL); 2425184728Sraj pmap_alloc_specials(&virtual_avail, 2426184728Sraj MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); 2427135641Scognet SLIST_INIT(&l1_list); 2428129198Scognet TAILQ_INIT(&l1_lru_list); 2429129198Scognet mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); 2430129198Scognet pmap_init_l1(l1, kernel_l1pt); 2431129198Scognet cpu_dcache_wbinv_all(); 2432183838Sraj cpu_l2cache_wbinv_all(); 2433129198Scognet 2434129198Scognet virtual_avail = round_page(virtual_avail); 2435129198Scognet virtual_end = lastaddr; 2436135641Scognet kernel_vm_end = pmap_curmaxkvaddr; 2437156191Scognet arm_nocache_startaddr = lastaddr; 2438159088Scognet mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); 2439156191Scognet 2440147114Scognet#ifdef ARM_USE_SMALL_ALLOC 2441147114Scognet mtx_init(&smallalloc_mtx, "Small alloc page list", NULL, MTX_DEF); 2442161105Scognet arm_init_smallalloc(); 2443147114Scognet#endif 2444161105Scognet pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); 2445129198Scognet} 2446129198Scognet 2447129198Scognet/*************************************************** 2448129198Scognet * Pmap allocation/deallocation routines. 2449129198Scognet ***************************************************/ 2450129198Scognet 2451129198Scognet/* 2452129198Scognet * Release any resources held by the given physical map. 2453129198Scognet * Called when a pmap initialized by pmap_pinit is being released. 2454129198Scognet * Should only be called if the map contains no valid mappings. 2455129198Scognet */ 2456129198Scognetvoid 2457129198Scognetpmap_release(pmap_t pmap) 2458129198Scognet{ 2459135641Scognet struct pcb *pcb; 2460135641Scognet 2461135641Scognet pmap_idcache_wbinv_all(pmap); 2462183838Sraj cpu_l2cache_wbinv_all(); 2463135641Scognet pmap_tlb_flushID(pmap); 2464135641Scognet cpu_cpwait(); 2465135641Scognet if (vector_page < KERNBASE) { 2466135641Scognet struct pcb *curpcb = PCPU_GET(curpcb); 2467135641Scognet pcb = thread0.td_pcb; 2468135641Scognet if (pmap_is_current(pmap)) { 2469135641Scognet /* 2470135641Scognet * Frob the L1 entry corresponding to the vector 2471135641Scognet * page so that it contains the kernel pmap's domain 2472135641Scognet * number. This will ensure pmap_remove() does not 2473135641Scognet * pull the current vector page out from under us. 2474135641Scognet */ 2475135641Scognet critical_enter(); 2476135641Scognet *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2477135641Scognet cpu_domains(pcb->pcb_dacr); 2478135641Scognet cpu_setttb(pcb->pcb_pagedir); 2479135641Scognet critical_exit(); 2480135641Scognet } 2481135641Scognet pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); 2482135641Scognet /* 2483135641Scognet * Make sure cpu_switch(), et al, DTRT. This is safe to do 2484135641Scognet * since this process has no remaining mappings of its own. 2485135641Scognet */ 2486135641Scognet curpcb->pcb_pl1vec = pcb->pcb_pl1vec; 2487135641Scognet curpcb->pcb_l1vec = pcb->pcb_l1vec; 2488135641Scognet curpcb->pcb_dacr = pcb->pcb_dacr; 2489135641Scognet curpcb->pcb_pagedir = pcb->pcb_pagedir; 2490135641Scognet 2491135641Scognet } 2492129198Scognet pmap_free_l1(pmap); 2493159325Salc PMAP_LOCK_DESTROY(pmap); 2494135641Scognet 2495129198Scognet dprintf("pmap_release()\n"); 2496129198Scognet} 2497129198Scognet 2498129198Scognet 2499135641Scognet 2500129198Scognet/* 2501135641Scognet * Helper function for pmap_grow_l2_bucket() 2502135641Scognet */ 2503135641Scognetstatic __inline int 2504135641Scognetpmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) 2505135641Scognet{ 2506135641Scognet struct l2_bucket *l2b; 2507135641Scognet pt_entry_t *ptep; 2508135641Scognet vm_paddr_t pa; 2509135641Scognet struct vm_page *pg; 2510135641Scognet 2511150865Scognet pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 2512135641Scognet if (pg == NULL) 2513135641Scognet return (1); 2514135641Scognet pa = VM_PAGE_TO_PHYS(pg); 2515135641Scognet 2516135641Scognet if (pap) 2517135641Scognet *pap = pa; 2518135641Scognet 2519135641Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2520135641Scognet 2521135641Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 2522135641Scognet *ptep = L2_S_PROTO | pa | cache_mode | 2523135641Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 2524135641Scognet PTE_SYNC(ptep); 2525135641Scognet return (0); 2526135641Scognet} 2527135641Scognet 2528135641Scognet/* 2529135641Scognet * This is the same as pmap_alloc_l2_bucket(), except that it is only 2530135641Scognet * used by pmap_growkernel(). 2531135641Scognet */ 2532135641Scognetstatic __inline struct l2_bucket * 2533135641Scognetpmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) 2534135641Scognet{ 2535135641Scognet struct l2_dtable *l2; 2536135641Scognet struct l2_bucket *l2b; 2537135641Scognet struct l1_ttable *l1; 2538135641Scognet pd_entry_t *pl1pd; 2539135641Scognet u_short l1idx; 2540135641Scognet vm_offset_t nva; 2541135641Scognet 2542135641Scognet l1idx = L1_IDX(va); 2543135641Scognet 2544135641Scognet if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 2545135641Scognet /* 2546135641Scognet * No mapping at this address, as there is 2547135641Scognet * no entry in the L1 table. 2548135641Scognet * Need to allocate a new l2_dtable. 2549135641Scognet */ 2550135641Scognet nva = pmap_kernel_l2dtable_kva; 2551135641Scognet if ((nva & PAGE_MASK) == 0) { 2552135641Scognet /* 2553135641Scognet * Need to allocate a backing page 2554135641Scognet */ 2555135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2556135641Scognet return (NULL); 2557135641Scognet } 2558135641Scognet 2559135641Scognet l2 = (struct l2_dtable *)nva; 2560135641Scognet nva += sizeof(struct l2_dtable); 2561135641Scognet 2562135641Scognet if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & 2563135641Scognet PAGE_MASK)) { 2564135641Scognet /* 2565135641Scognet * The new l2_dtable straddles a page boundary. 2566135641Scognet * Map in another page to cover it. 2567135641Scognet */ 2568135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2569135641Scognet return (NULL); 2570135641Scognet } 2571135641Scognet 2572135641Scognet pmap_kernel_l2dtable_kva = nva; 2573135641Scognet 2574135641Scognet /* 2575135641Scognet * Link it into the parent pmap 2576135641Scognet */ 2577135641Scognet pm->pm_l2[L2_IDX(l1idx)] = l2; 2578150865Scognet memset(l2, 0, sizeof(*l2)); 2579135641Scognet } 2580135641Scognet 2581135641Scognet l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2582135641Scognet 2583135641Scognet /* 2584135641Scognet * Fetch pointer to the L2 page table associated with the address. 2585135641Scognet */ 2586135641Scognet if (l2b->l2b_kva == NULL) { 2587135641Scognet pt_entry_t *ptep; 2588135641Scognet 2589135641Scognet /* 2590135641Scognet * No L2 page table has been allocated. Chances are, this 2591135641Scognet * is because we just allocated the l2_dtable, above. 2592135641Scognet */ 2593135641Scognet nva = pmap_kernel_l2ptp_kva; 2594135641Scognet ptep = (pt_entry_t *)nva; 2595135641Scognet if ((nva & PAGE_MASK) == 0) { 2596135641Scognet /* 2597135641Scognet * Need to allocate a backing page 2598135641Scognet */ 2599135641Scognet if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 2600135641Scognet &pmap_kernel_l2ptp_phys)) 2601135641Scognet return (NULL); 2602135641Scognet PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 2603135641Scognet } 2604150865Scognet memset(ptep, 0, L2_TABLE_SIZE_REAL); 2605135641Scognet l2->l2_occupancy++; 2606135641Scognet l2b->l2b_kva = ptep; 2607135641Scognet l2b->l2b_l1idx = l1idx; 2608135641Scognet l2b->l2b_phys = pmap_kernel_l2ptp_phys; 2609135641Scognet 2610135641Scognet pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 2611135641Scognet pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 2612135641Scognet } 2613135641Scognet 2614135641Scognet /* Distribute new L1 entry to all other L1s */ 2615135641Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2616145071Scognet pl1pd = &l1->l1_kva[L1_IDX(va)]; 2617135641Scognet *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 2618135641Scognet L1_C_PROTO; 2619135641Scognet PTE_SYNC(pl1pd); 2620135641Scognet } 2621135641Scognet 2622135641Scognet return (l2b); 2623135641Scognet} 2624135641Scognet 2625135641Scognet 2626135641Scognet/* 2627129198Scognet * grow the number of kernel page table entries, if needed 2628129198Scognet */ 2629129198Scognetvoid 2630129198Scognetpmap_growkernel(vm_offset_t addr) 2631129198Scognet{ 2632135641Scognet pmap_t kpm = pmap_kernel(); 2633129198Scognet 2634135641Scognet if (addr <= pmap_curmaxkvaddr) 2635135641Scognet return; /* we are OK */ 2636135641Scognet 2637135641Scognet /* 2638135641Scognet * whoops! we need to add kernel PTPs 2639135641Scognet */ 2640135641Scognet 2641135641Scognet /* Map 1MB at a time */ 2642135641Scognet for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) 2643135641Scognet pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 2644135641Scognet 2645135641Scognet /* 2646135641Scognet * flush out the cache, expensive but growkernel will happen so 2647135641Scognet * rarely 2648135641Scognet */ 2649135641Scognet cpu_dcache_wbinv_all(); 2650183838Sraj cpu_l2cache_wbinv_all(); 2651135641Scognet cpu_tlb_flushD(); 2652135641Scognet cpu_cpwait(); 2653135641Scognet kernel_vm_end = pmap_curmaxkvaddr; 2654129198Scognet} 2655129198Scognet 2656129198Scognet 2657129198Scognet/* 2658129198Scognet * Remove all pages from specified address space 2659129198Scognet * this aids process exit speeds. Also, this code 2660129198Scognet * is special cased for current process only, but 2661129198Scognet * can have the more generic (and slightly slower) 2662129198Scognet * mode enabled. This is much faster than pmap_remove 2663129198Scognet * in the case of running down an entire address space. 2664129198Scognet */ 2665129198Scognetvoid 2666157443Speterpmap_remove_pages(pmap_t pmap) 2667129198Scognet{ 2668144760Scognet struct pv_entry *pv, *npv; 2669144760Scognet struct l2_bucket *l2b = NULL; 2670144760Scognet vm_page_t m; 2671144760Scognet pt_entry_t *pt; 2672144760Scognet 2673144760Scognet vm_page_lock_queues(); 2674159352Salc PMAP_LOCK(pmap); 2675175840Scognet cpu_idcache_wbinv_all(); 2676183838Sraj cpu_l2cache_wbinv_all(); 2677144760Scognet for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { 2678194459Sthompsa if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { 2679194459Sthompsa /* Cannot remove wired or unmanaged pages now. */ 2680144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2681144760Scognet continue; 2682144760Scognet } 2683144760Scognet pmap->pm_stats.resident_count--; 2684144760Scognet l2b = pmap_get_l2_bucket(pmap, pv->pv_va); 2685144760Scognet KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); 2686144760Scognet pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2687144760Scognet m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); 2688164079Scognet#ifdef ARM_USE_SMALL_ALLOC 2689164079Scognet KASSERT((vm_offset_t)m >= alloc_firstaddr, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); 2690164079Scognet#else 2691164079Scognet KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); 2692164079Scognet#endif 2693144760Scognet *pt = 0; 2694144760Scognet PTE_SYNC(pt); 2695144760Scognet npv = TAILQ_NEXT(pv, pv_plist); 2696144760Scognet pmap_nuke_pv(m, pmap, pv); 2697150865Scognet if (TAILQ_EMPTY(&m->md.pv_list)) 2698150865Scognet vm_page_flag_clear(m, PG_WRITEABLE); 2699144760Scognet pmap_free_pv_entry(pv); 2700164874Scognet pmap_free_l2_bucket(pmap, l2b, 1); 2701144760Scognet } 2702144760Scognet vm_page_unlock_queues(); 2703135641Scognet cpu_tlb_flushID(); 2704135641Scognet cpu_cpwait(); 2705159352Salc PMAP_UNLOCK(pmap); 2706129198Scognet} 2707129198Scognet 2708129198Scognet 2709129198Scognet/*************************************************** 2710129198Scognet * Low level mapping routines..... 2711129198Scognet ***************************************************/ 2712129198Scognet 2713171620Scognet#ifdef ARM_HAVE_SUPERSECTIONS 2714170582Scognet/* Map a super section into the KVA. */ 2715170582Scognet 2716170582Scognetvoid 2717170582Scognetpmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags) 2718170582Scognet{ 2719171620Scognet pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | 2720171620Scognet (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, 2721170582Scognet VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2722170582Scognet struct l1_ttable *l1; 2723171620Scognet vm_offset_t va0, va_end; 2724170582Scognet 2725170582Scognet KASSERT(((va | pa) & L1_SUP_OFFSET) == 0, 2726171620Scognet ("Not a valid super section mapping")); 2727170582Scognet if (flags & SECTION_CACHE) 2728170582Scognet pd |= pte_l1_s_cache_mode; 2729170582Scognet else if (flags & SECTION_PT) 2730170582Scognet pd |= pte_l1_s_cache_mode_pt; 2731171620Scognet va0 = va & L1_SUP_FRAME; 2732170582Scognet va_end = va + L1_SUP_SIZE; 2733170582Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2734171620Scognet va = va0; 2735170582Scognet for (; va < va_end; va += L1_S_SIZE) { 2736170582Scognet l1->l1_kva[L1_IDX(va)] = pd; 2737170582Scognet PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2738170582Scognet } 2739170582Scognet } 2740170582Scognet} 2741171620Scognet#endif 2742170582Scognet 2743147114Scognet/* Map a section into the KVA. */ 2744147114Scognet 2745147114Scognetvoid 2746147114Scognetpmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) 2747147114Scognet{ 2748147114Scognet pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, 2749147114Scognet VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2750147114Scognet struct l1_ttable *l1; 2751147114Scognet 2752147114Scognet KASSERT(((va | pa) & L1_S_OFFSET) == 0, 2753147114Scognet ("Not a valid section mapping")); 2754147114Scognet if (flags & SECTION_CACHE) 2755147114Scognet pd |= pte_l1_s_cache_mode; 2756147114Scognet else if (flags & SECTION_PT) 2757147114Scognet pd |= pte_l1_s_cache_mode_pt; 2758147114Scognet SLIST_FOREACH(l1, &l1_list, l1_link) { 2759147114Scognet l1->l1_kva[L1_IDX(va)] = pd; 2760147114Scognet PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2761147114Scognet } 2762147114Scognet} 2763147114Scognet 2764129198Scognet/* 2765184728Sraj * Make a temporary mapping for a physical address. This is only intended 2766184728Sraj * to be used for panic dumps. 2767184728Sraj */ 2768184728Srajvoid * 2769184728Srajpmap_kenter_temp(vm_paddr_t pa, int i) 2770184728Sraj{ 2771184728Sraj vm_offset_t va; 2772184728Sraj 2773184728Sraj va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 2774184728Sraj pmap_kenter(va, pa); 2775184728Sraj return ((void *)crashdumpmap); 2776184728Sraj} 2777184728Sraj 2778184728Sraj/* 2779129198Scognet * add a wired page to the kva 2780129198Scognet * note that in order for the mapping to take effect -- you 2781129198Scognet * should do a invltlb after doing the pmap_kenter... 2782129198Scognet */ 2783135641Scognetstatic PMAP_INLINE void 2784135641Scognetpmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) 2785129198Scognet{ 2786129198Scognet struct l2_bucket *l2b; 2787129198Scognet pt_entry_t *pte; 2788129198Scognet pt_entry_t opte; 2789194459Sthompsa struct pv_entry *pve; 2790194459Sthompsa vm_page_t m; 2791194459Sthompsa 2792129198Scognet PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", 2793129198Scognet (uint32_t) va, (uint32_t) pa)); 2794129198Scognet 2795129198Scognet 2796129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2797135641Scognet if (l2b == NULL) 2798135641Scognet l2b = pmap_grow_l2_bucket(pmap_kernel(), va); 2799129198Scognet KASSERT(l2b != NULL, ("No L2 Bucket")); 2800129198Scognet pte = &l2b->l2b_kva[l2pte_index(va)]; 2801129198Scognet opte = *pte; 2802129198Scognet PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", 2803129198Scognet (uint32_t) pte, opte, *pte)); 2804129198Scognet if (l2pte_valid(opte)) { 2805194459Sthompsa pmap_kremove(va); 2806135641Scognet } else { 2807129198Scognet if (opte == 0) 2808129198Scognet l2b->l2b_occupancy++; 2809135641Scognet } 2810129198Scognet *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, 2811135641Scognet VM_PROT_READ | VM_PROT_WRITE); 2812135641Scognet if (flags & KENTER_CACHE) 2813135641Scognet *pte |= pte_l2_s_cache_mode; 2814142570Scognet if (flags & KENTER_USER) 2815142570Scognet *pte |= L2_S_PROT_U; 2816129198Scognet PTE_SYNC(pte); 2817194459Sthompsa 2818194459Sthompsa /* kernel direct mappings can be shared, so use a pv_entry 2819194459Sthompsa * to ensure proper caching. 2820194459Sthompsa * 2821194459Sthompsa * The pvzone is used to delay the recording of kernel 2822194459Sthompsa * mappings until the VM is running. 2823194459Sthompsa * 2824194459Sthompsa * This expects the physical memory to have vm_page_array entry. 2825194459Sthompsa */ 2826194459Sthompsa if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa))) { 2827194459Sthompsa vm_page_lock_queues(); 2828194459Sthompsa if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva) { 2829198341Smarcel /* release vm_page lock for pv_entry UMA */ 2830194459Sthompsa vm_page_unlock_queues(); 2831194459Sthompsa if ((pve = pmap_get_pv_entry()) == NULL) 2832194459Sthompsa panic("pmap_kenter_internal: no pv entries"); 2833194459Sthompsa vm_page_lock_queues(); 2834194459Sthompsa PMAP_LOCK(pmap_kernel()); 2835194459Sthompsa pmap_enter_pv(m, pve, pmap_kernel(), va, 2836198341Smarcel PVF_WRITE | PVF_UNMAN); 2837194459Sthompsa pmap_fix_cache(m, pmap_kernel(), va); 2838194459Sthompsa PMAP_UNLOCK(pmap_kernel()); 2839194459Sthompsa } else { 2840194459Sthompsa m->md.pv_kva = va; 2841194459Sthompsa } 2842194459Sthompsa vm_page_unlock_queues(); 2843194459Sthompsa } 2844135641Scognet} 2845129198Scognet 2846135641Scognetvoid 2847135641Scognetpmap_kenter(vm_offset_t va, vm_paddr_t pa) 2848135641Scognet{ 2849135641Scognet pmap_kenter_internal(va, pa, KENTER_CACHE); 2850129198Scognet} 2851129198Scognet 2852142570Scognetvoid 2853156191Scognetpmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) 2854156191Scognet{ 2855156191Scognet 2856156191Scognet pmap_kenter_internal(va, pa, 0); 2857156191Scognet} 2858156191Scognet 2859156191Scognetvoid 2860142570Scognetpmap_kenter_user(vm_offset_t va, vm_paddr_t pa) 2861142570Scognet{ 2862143192Scognet 2863142570Scognet pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); 2864143192Scognet /* 2865143192Scognet * Call pmap_fault_fixup now, to make sure we'll have no exception 2866143192Scognet * at the first use of the new address, or bad things will happen, 2867143192Scognet * as we use one of these addresses in the exception handlers. 2868143192Scognet */ 2869143192Scognet pmap_fault_fixup(pmap_kernel(), va, VM_PROT_READ|VM_PROT_WRITE, 1); 2870142570Scognet} 2871129198Scognet 2872129198Scognet/* 2873194908Scognet * remove a page from the kernel pagetables 2874129198Scognet */ 2875169763Scognetvoid 2876129198Scognetpmap_kremove(vm_offset_t va) 2877129198Scognet{ 2878135641Scognet struct l2_bucket *l2b; 2879135641Scognet pt_entry_t *pte, opte; 2880194459Sthompsa struct pv_entry *pve; 2881194459Sthompsa vm_page_t m; 2882194459Sthompsa vm_offset_t pa; 2883135641Scognet 2884135641Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 2885145071Scognet if (!l2b) 2886145071Scognet return; 2887135641Scognet KASSERT(l2b != NULL, ("No L2 Bucket")); 2888135641Scognet pte = &l2b->l2b_kva[l2pte_index(va)]; 2889135641Scognet opte = *pte; 2890135641Scognet if (l2pte_valid(opte)) { 2891194459Sthompsa /* pa = vtophs(va) taken from pmap_extract() */ 2892194459Sthompsa switch (opte & L2_TYPE_MASK) { 2893194459Sthompsa case L2_TYPE_L: 2894194459Sthompsa pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); 2895194459Sthompsa break; 2896194459Sthompsa default: 2897194459Sthompsa pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); 2898194459Sthompsa break; 2899194459Sthompsa } 2900194459Sthompsa /* note: should never have to remove an allocation 2901194459Sthompsa * before the pvzone is initialized. 2902194459Sthompsa */ 2903194459Sthompsa vm_page_lock_queues(); 2904194459Sthompsa PMAP_LOCK(pmap_kernel()); 2905194459Sthompsa if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && 2906194459Sthompsa (pve = pmap_remove_pv(m, pmap_kernel(), va))) 2907194459Sthompsa pmap_free_pv_entry(pve); 2908194459Sthompsa PMAP_UNLOCK(pmap_kernel()); 2909194459Sthompsa vm_page_unlock_queues(); 2910195779Sraj va = va & ~PAGE_MASK; 2911135641Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 2912183838Sraj cpu_l2cache_wbinv_range(va, PAGE_SIZE); 2913135641Scognet cpu_tlb_flushD_SE(va); 2914135641Scognet cpu_cpwait(); 2915144760Scognet *pte = 0; 2916135641Scognet } 2917129198Scognet} 2918129198Scognet 2919129198Scognet 2920129198Scognet/* 2921129198Scognet * Used to map a range of physical addresses into kernel 2922129198Scognet * virtual address space. 2923129198Scognet * 2924129198Scognet * The value passed in '*virt' is a suggested virtual address for 2925129198Scognet * the mapping. Architectures which can support a direct-mapped 2926129198Scognet * physical to virtual region can return the appropriate address 2927129198Scognet * within that region, leaving '*virt' unchanged. Other 2928129198Scognet * architectures should map the pages starting at '*virt' and 2929129198Scognet * update '*virt' with the first usable address after the mapped 2930129198Scognet * region. 2931129198Scognet */ 2932129198Scognetvm_offset_t 2933129198Scognetpmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 2934129198Scognet{ 2935161105Scognet#ifdef ARM_USE_SMALL_ALLOC 2936161105Scognet return (arm_ptovirt(start)); 2937161105Scognet#else 2938129198Scognet vm_offset_t sva = *virt; 2939129198Scognet vm_offset_t va = sva; 2940129198Scognet 2941129198Scognet PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " 2942129198Scognet "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, 2943129198Scognet prot)); 2944129198Scognet 2945129198Scognet while (start < end) { 2946129198Scognet pmap_kenter(va, start); 2947129198Scognet va += PAGE_SIZE; 2948129198Scognet start += PAGE_SIZE; 2949129198Scognet } 2950129198Scognet *virt = va; 2951129198Scognet return (sva); 2952161105Scognet#endif 2953129198Scognet} 2954129198Scognet 2955143724Scognetstatic void 2956150865Scognetpmap_wb_page(vm_page_t m) 2957143724Scognet{ 2958143724Scognet struct pv_entry *pv; 2959129198Scognet 2960143724Scognet TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2961150865Scognet pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, 2962144760Scognet (pv->pv_flags & PVF_WRITE) == 0); 2963143724Scognet} 2964143724Scognet 2965150865Scognetstatic void 2966150865Scognetpmap_inv_page(vm_page_t m) 2967150865Scognet{ 2968150865Scognet struct pv_entry *pv; 2969150865Scognet 2970150865Scognet TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2971150865Scognet pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); 2972150865Scognet} 2973129198Scognet/* 2974129198Scognet * Add a list of wired pages to the kva 2975129198Scognet * this routine is only used for temporary 2976129198Scognet * kernel mappings that do not need to have 2977129198Scognet * page modification or references recorded. 2978129198Scognet * Note that old mappings are simply written 2979129198Scognet * over. The page *must* be wired. 2980129198Scognet */ 2981129198Scognetvoid 2982129198Scognetpmap_qenter(vm_offset_t va, vm_page_t *m, int count) 2983129198Scognet{ 2984129198Scognet int i; 2985129198Scognet 2986129198Scognet for (i = 0; i < count; i++) { 2987150865Scognet pmap_wb_page(m[i]); 2988135641Scognet pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), 2989135641Scognet KENTER_CACHE); 2990129198Scognet va += PAGE_SIZE; 2991129198Scognet } 2992129198Scognet} 2993129198Scognet 2994129198Scognet 2995129198Scognet/* 2996129198Scognet * this routine jerks page mappings from the 2997129198Scognet * kernel -- it is meant only for temporary mappings. 2998129198Scognet */ 2999129198Scognetvoid 3000129198Scognetpmap_qremove(vm_offset_t va, int count) 3001129198Scognet{ 3002146596Scognet vm_paddr_t pa; 3003129198Scognet int i; 3004129198Scognet 3005129198Scognet for (i = 0; i < count; i++) { 3006146596Scognet pa = vtophys(va); 3007146596Scognet if (pa) { 3008150865Scognet pmap_inv_page(PHYS_TO_VM_PAGE(pa)); 3009146596Scognet pmap_kremove(va); 3010146596Scognet } 3011129198Scognet va += PAGE_SIZE; 3012129198Scognet } 3013129198Scognet} 3014129198Scognet 3015129198Scognet 3016129198Scognet/* 3017129198Scognet * pmap_object_init_pt preloads the ptes for a given object 3018129198Scognet * into the specified pmap. This eliminates the blast of soft 3019129198Scognet * faults on process startup and immediately after an mmap. 3020129198Scognet */ 3021129198Scognetvoid 3022129198Scognetpmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 3023129198Scognet vm_pindex_t pindex, vm_size_t size) 3024129198Scognet{ 3025157156Scognet 3026157156Scognet VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 3027195840Sjhb KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 3028157156Scognet ("pmap_object_init_pt: non-device object")); 3029129198Scognet} 3030129198Scognet 3031129198Scognet 3032129198Scognet/* 3033129198Scognet * pmap_is_prefaultable: 3034129198Scognet * 3035129198Scognet * Return whether or not the specified virtual address is elgible 3036129198Scognet * for prefault. 3037129198Scognet */ 3038129198Scognetboolean_t 3039129198Scognetpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 3040129198Scognet{ 3041135641Scognet pd_entry_t *pde; 3042129198Scognet pt_entry_t *pte; 3043129198Scognet 3044135641Scognet if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) 3045135641Scognet return (FALSE); 3046159073Scognet KASSERT(pte != NULL, ("Valid mapping but no pte ?")); 3047135641Scognet if (*pte == 0) 3048135641Scognet return (TRUE); 3049135641Scognet return (FALSE); 3050129198Scognet} 3051129198Scognet 3052129198Scognet/* 3053129198Scognet * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 3054129198Scognet * Returns TRUE if the mapping exists, else FALSE. 3055129198Scognet * 3056129198Scognet * NOTE: This function is only used by a couple of arm-specific modules. 3057129198Scognet * It is not safe to take any pmap locks here, since we could be right 3058129198Scognet * in the middle of debugging the pmap anyway... 3059129198Scognet * 3060129198Scognet * It is possible for this routine to return FALSE even though a valid 3061129198Scognet * mapping does exist. This is because we don't lock, so the metadata 3062129198Scognet * state may be inconsistent. 3063129198Scognet * 3064129198Scognet * NOTE: We can return a NULL *ptp in the case where the L1 pde is 3065129198Scognet * a "section" mapping. 3066129198Scognet */ 3067129198Scognetboolean_t 3068129198Scognetpmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) 3069129198Scognet{ 3070129198Scognet struct l2_dtable *l2; 3071129198Scognet pd_entry_t *pl1pd, l1pd; 3072129198Scognet pt_entry_t *ptep; 3073129198Scognet u_short l1idx; 3074129198Scognet 3075129198Scognet if (pm->pm_l1 == NULL) 3076129198Scognet return (FALSE); 3077129198Scognet 3078129198Scognet l1idx = L1_IDX(va); 3079129198Scognet *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 3080129198Scognet l1pd = *pl1pd; 3081129198Scognet 3082129198Scognet if (l1pte_section_p(l1pd)) { 3083129198Scognet *ptp = NULL; 3084129198Scognet return (TRUE); 3085129198Scognet } 3086129198Scognet 3087129198Scognet if (pm->pm_l2 == NULL) 3088129198Scognet return (FALSE); 3089129198Scognet 3090129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 3091129198Scognet 3092129198Scognet if (l2 == NULL || 3093129198Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3094129198Scognet return (FALSE); 3095129198Scognet } 3096129198Scognet 3097129198Scognet *ptp = &ptep[l2pte_index(va)]; 3098129198Scognet return (TRUE); 3099129198Scognet} 3100129198Scognet 3101129198Scognet/* 3102129198Scognet * Routine: pmap_remove_all 3103129198Scognet * Function: 3104129198Scognet * Removes this physical page from 3105129198Scognet * all physical maps in which it resides. 3106129198Scognet * Reflects back modify bits to the pager. 3107129198Scognet * 3108129198Scognet * Notes: 3109129198Scognet * Original versions of this routine were very 3110129198Scognet * inefficient because they iteratively called 3111129198Scognet * pmap_remove (slow...) 3112129198Scognet */ 3113129198Scognetvoid 3114129198Scognetpmap_remove_all(vm_page_t m) 3115129198Scognet{ 3116129198Scognet pv_entry_t pv; 3117188019Scognet pt_entry_t *ptep; 3118135641Scognet struct l2_bucket *l2b; 3119135641Scognet boolean_t flush = FALSE; 3120135641Scognet pmap_t curpm; 3121135641Scognet int flags = 0; 3122129198Scognet 3123207796Salc KASSERT((m->flags & PG_FICTITIOUS) == 0, 3124207796Salc ("pmap_remove_all: page %p is fictitious", m)); 3125135641Scognet if (TAILQ_EMPTY(&m->md.pv_list)) 3126135641Scognet return; 3127207796Salc vm_page_lock_queues(); 3128175840Scognet pmap_remove_write(m); 3129135641Scognet curpm = vmspace_pmap(curproc->p_vmspace); 3130129198Scognet while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3131135641Scognet if (flush == FALSE && (pv->pv_pmap == curpm || 3132135641Scognet pv->pv_pmap == pmap_kernel())) 3133135641Scognet flush = TRUE; 3134193712Sraj 3135159352Salc PMAP_LOCK(pv->pv_pmap); 3136193712Sraj /* 3137193712Sraj * Cached contents were written-back in pmap_remove_write(), 3138193712Sraj * but we still have to invalidate the cache entry to make 3139193712Sraj * sure stale data are not retrieved when another page will be 3140193712Sraj * mapped under this virtual address. 3141193712Sraj */ 3142193712Sraj if (pmap_is_current(pv->pv_pmap)) { 3143193712Sraj cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); 3144203637Sraj if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) 3145203637Sraj cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); 3146193712Sraj } 3147193712Sraj 3148194459Sthompsa if (pv->pv_flags & PVF_UNMAN) { 3149194459Sthompsa /* remove the pv entry, but do not remove the mapping 3150194459Sthompsa * and remember this is a kernel mapped page 3151194459Sthompsa */ 3152194459Sthompsa m->md.pv_kva = pv->pv_va; 3153194459Sthompsa } else { 3154194459Sthompsa /* remove the mapping and pv entry */ 3155194459Sthompsa l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 3156194459Sthompsa KASSERT(l2b != NULL, ("No l2 bucket")); 3157194459Sthompsa ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 3158194459Sthompsa *ptep = 0; 3159194459Sthompsa PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 3160194459Sthompsa pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); 3161194459Sthompsa pv->pv_pmap->pm_stats.resident_count--; 3162194459Sthompsa flags |= pv->pv_flags; 3163194459Sthompsa } 3164135641Scognet pmap_nuke_pv(m, pv->pv_pmap, pv); 3165159352Salc PMAP_UNLOCK(pv->pv_pmap); 3166129198Scognet pmap_free_pv_entry(pv); 3167129198Scognet } 3168129198Scognet 3169135641Scognet if (flush) { 3170135641Scognet if (PV_BEEN_EXECD(flags)) 3171135641Scognet pmap_tlb_flushID(curpm); 3172135641Scognet else 3173135641Scognet pmap_tlb_flushD(curpm); 3174135641Scognet } 3175150865Scognet vm_page_flag_clear(m, PG_WRITEABLE); 3176207796Salc vm_page_unlock_queues(); 3177129198Scognet} 3178129198Scognet 3179129198Scognet 3180129198Scognet/* 3181129198Scognet * Set the physical protection on the 3182129198Scognet * specified range of this map as requested. 3183129198Scognet */ 3184129198Scognetvoid 3185129198Scognetpmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3186129198Scognet{ 3187129198Scognet struct l2_bucket *l2b; 3188129198Scognet pt_entry_t *ptep, pte; 3189129198Scognet vm_offset_t next_bucket; 3190129198Scognet u_int flags; 3191129198Scognet int flush; 3192129198Scognet 3193183838Sraj CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", 3194183838Sraj pm, sva, eva, prot); 3195183838Sraj 3196129198Scognet if ((prot & VM_PROT_READ) == 0) { 3197129198Scognet pmap_remove(pm, sva, eva); 3198129198Scognet return; 3199129198Scognet } 3200129198Scognet 3201129198Scognet if (prot & VM_PROT_WRITE) { 3202129198Scognet /* 3203129198Scognet * If this is a read->write transition, just ignore it and let 3204135641Scognet * vm_fault() take care of it later. 3205129198Scognet */ 3206129198Scognet return; 3207129198Scognet } 3208129198Scognet 3209159352Salc vm_page_lock_queues(); 3210159352Salc PMAP_LOCK(pm); 3211129198Scognet 3212129198Scognet /* 3213129198Scognet * OK, at this point, we know we're doing write-protect operation. 3214129198Scognet * If the pmap is active, write-back the range. 3215129198Scognet */ 3216129198Scognet pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); 3217129198Scognet 3218129198Scognet flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3219129198Scognet flags = 0; 3220129198Scognet 3221129198Scognet while (sva < eva) { 3222129198Scognet next_bucket = L2_NEXT_BUCKET(sva); 3223129198Scognet if (next_bucket > eva) 3224129198Scognet next_bucket = eva; 3225129198Scognet 3226129198Scognet l2b = pmap_get_l2_bucket(pm, sva); 3227129198Scognet if (l2b == NULL) { 3228129198Scognet sva = next_bucket; 3229129198Scognet continue; 3230129198Scognet } 3231129198Scognet 3232129198Scognet ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3233129198Scognet 3234129198Scognet while (sva < next_bucket) { 3235129198Scognet if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { 3236129198Scognet struct vm_page *pg; 3237129198Scognet u_int f; 3238129198Scognet 3239129198Scognet pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3240129198Scognet pte &= ~L2_S_PROT_W; 3241129198Scognet *ptep = pte; 3242129198Scognet PTE_SYNC(ptep); 3243129198Scognet 3244129198Scognet if (pg != NULL) { 3245129198Scognet f = pmap_modify_pv(pg, pm, sva, 3246129198Scognet PVF_WRITE, 0); 3247157970Scognet vm_page_dirty(pg); 3248129198Scognet } else 3249129198Scognet f = PVF_REF | PVF_EXEC; 3250129198Scognet 3251129198Scognet if (flush >= 0) { 3252129198Scognet flush++; 3253129198Scognet flags |= f; 3254129198Scognet } else 3255129198Scognet if (PV_BEEN_EXECD(f)) 3256129198Scognet pmap_tlb_flushID_SE(pm, sva); 3257129198Scognet else 3258129198Scognet if (PV_BEEN_REFD(f)) 3259129198Scognet pmap_tlb_flushD_SE(pm, sva); 3260129198Scognet } 3261129198Scognet 3262129198Scognet sva += PAGE_SIZE; 3263129198Scognet ptep++; 3264129198Scognet } 3265129198Scognet } 3266129198Scognet 3267129198Scognet 3268129198Scognet if (flush) { 3269129198Scognet if (PV_BEEN_EXECD(flags)) 3270129198Scognet pmap_tlb_flushID(pm); 3271129198Scognet else 3272129198Scognet if (PV_BEEN_REFD(flags)) 3273129198Scognet pmap_tlb_flushD(pm); 3274129198Scognet } 3275144760Scognet vm_page_unlock_queues(); 3276129198Scognet 3277159352Salc PMAP_UNLOCK(pm); 3278129198Scognet} 3279129198Scognet 3280129198Scognet 3281129198Scognet/* 3282129198Scognet * Insert the given physical page (p) at 3283129198Scognet * the specified virtual address (v) in the 3284129198Scognet * target physical map with the protection requested. 3285129198Scognet * 3286129198Scognet * If specified, the page will be wired down, meaning 3287129198Scognet * that the related pte can not be reclaimed. 3288129198Scognet * 3289129198Scognet * NB: This is the only routine which MAY NOT lazy-evaluate 3290129198Scognet * or lose information. That is, this routine must actually 3291129198Scognet * insert this page into the given map NOW. 3292129198Scognet */ 3293135641Scognet 3294129198Scognetvoid 3295175067Salcpmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 3296175067Salc vm_prot_t prot, boolean_t wired) 3297129198Scognet{ 3298159127Salc 3299159127Salc vm_page_lock_queues(); 3300159352Salc PMAP_LOCK(pmap); 3301160260Scognet pmap_enter_locked(pmap, va, m, prot, wired, M_WAITOK); 3302159127Salc vm_page_unlock_queues(); 3303159352Salc PMAP_UNLOCK(pmap); 3304159127Salc} 3305159127Salc 3306159127Salc/* 3307159127Salc * The page queues and pmap must be locked. 3308159127Salc */ 3309159127Salcstatic void 3310159127Salcpmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3311160260Scognet boolean_t wired, int flags) 3312159127Salc{ 3313135641Scognet struct l2_bucket *l2b = NULL; 3314129198Scognet struct vm_page *opg; 3315144760Scognet struct pv_entry *pve = NULL; 3316129198Scognet pt_entry_t *ptep, npte, opte; 3317129198Scognet u_int nflags; 3318129198Scognet u_int oflags; 3319129198Scognet vm_paddr_t pa; 3320129198Scognet 3321159325Salc PMAP_ASSERT_LOCKED(pmap); 3322159127Salc mtx_assert(&vm_page_queue_mtx, MA_OWNED); 3323129198Scognet if (va == vector_page) { 3324129198Scognet pa = systempage.pv_pa; 3325129198Scognet m = NULL; 3326208688Salc } else { 3327209048Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 3328209048Salc (m->oflags & VPO_BUSY) != 0 || (flags & M_NOWAIT) != 0, 3329208688Salc ("pmap_enter_locked: page %p is not busy", m)); 3330129198Scognet pa = VM_PAGE_TO_PHYS(m); 3331208688Salc } 3332129198Scognet nflags = 0; 3333129198Scognet if (prot & VM_PROT_WRITE) 3334129198Scognet nflags |= PVF_WRITE; 3335129198Scognet if (prot & VM_PROT_EXECUTE) 3336129198Scognet nflags |= PVF_EXEC; 3337129198Scognet if (wired) 3338129198Scognet nflags |= PVF_WIRED; 3339129198Scognet PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " 3340129198Scognet "wired = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, wired)); 3341129198Scognet 3342135641Scognet if (pmap == pmap_kernel()) { 3343129198Scognet l2b = pmap_get_l2_bucket(pmap, va); 3344135641Scognet if (l2b == NULL) 3345135641Scognet l2b = pmap_grow_l2_bucket(pmap, va); 3346160260Scognet } else { 3347160260Scognetdo_l2b_alloc: 3348129198Scognet l2b = pmap_alloc_l2_bucket(pmap, va); 3349160260Scognet if (l2b == NULL) { 3350160260Scognet if (flags & M_WAITOK) { 3351160260Scognet PMAP_UNLOCK(pmap); 3352160260Scognet vm_page_unlock_queues(); 3353160260Scognet VM_WAIT; 3354160260Scognet vm_page_lock_queues(); 3355160260Scognet PMAP_LOCK(pmap); 3356160260Scognet goto do_l2b_alloc; 3357160260Scognet } 3358160260Scognet return; 3359160260Scognet } 3360160260Scognet } 3361160260Scognet 3362129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 3363129198Scognet 3364135641Scognet opte = *ptep; 3365129198Scognet npte = pa; 3366129198Scognet oflags = 0; 3367129198Scognet if (opte) { 3368129198Scognet /* 3369129198Scognet * There is already a mapping at this address. 3370129198Scognet * If the physical address is different, lookup the 3371129198Scognet * vm_page. 3372129198Scognet */ 3373129198Scognet if (l2pte_pa(opte) != pa) 3374129198Scognet opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3375129198Scognet else 3376129198Scognet opg = m; 3377129198Scognet } else 3378129198Scognet opg = NULL; 3379129198Scognet 3380135641Scognet if ((prot & (VM_PROT_ALL)) || 3381135641Scognet (!m || m->md.pvh_attrs & PVF_REF)) { 3382129198Scognet /* 3383135641Scognet * - The access type indicates that we don't need 3384135641Scognet * to do referenced emulation. 3385135641Scognet * OR 3386135641Scognet * - The physical page has already been referenced 3387135641Scognet * so no need to re-do referenced emulation here. 3388129198Scognet */ 3389135641Scognet npte |= L2_S_PROTO; 3390135641Scognet 3391135641Scognet nflags |= PVF_REF; 3392135641Scognet 3393144760Scognet if (m && ((prot & VM_PROT_WRITE) != 0 || 3394144760Scognet (m->md.pvh_attrs & PVF_MOD))) { 3395129198Scognet /* 3396135641Scognet * This is a writable mapping, and the 3397135641Scognet * page's mod state indicates it has 3398135641Scognet * already been modified. Make it 3399135641Scognet * writable from the outset. 3400129198Scognet */ 3401135641Scognet nflags |= PVF_MOD; 3402157970Scognet if (!(m->md.pvh_attrs & PVF_MOD)) 3403144760Scognet vm_page_dirty(m); 3404129198Scognet } 3405144760Scognet if (m && opte) 3406144760Scognet vm_page_flag_set(m, PG_REFERENCED); 3407135641Scognet } else { 3408135641Scognet /* 3409135641Scognet * Need to do page referenced emulation. 3410135641Scognet */ 3411135641Scognet npte |= L2_TYPE_INV; 3412135641Scognet } 3413135641Scognet 3414164229Salc if (prot & VM_PROT_WRITE) { 3415135641Scognet npte |= L2_S_PROT_W; 3416208846Salc if (m != NULL && 3417208846Salc (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 3418164229Salc vm_page_flag_set(m, PG_WRITEABLE); 3419164229Salc } 3420135641Scognet npte |= pte_l2_s_cache_mode; 3421135641Scognet if (m && m == opg) { 3422135641Scognet /* 3423135641Scognet * We're changing the attrs of an existing mapping. 3424135641Scognet */ 3425135641Scognet oflags = pmap_modify_pv(m, pmap, va, 3426135641Scognet PVF_WRITE | PVF_EXEC | PVF_WIRED | 3427135641Scognet PVF_MOD | PVF_REF, nflags); 3428135641Scognet 3429135641Scognet /* 3430135641Scognet * We may need to flush the cache if we're 3431135641Scognet * doing rw-ro... 3432135641Scognet */ 3433135641Scognet if (pmap_is_current(pmap) && 3434135641Scognet (oflags & PVF_NC) == 0 && 3435183838Sraj (opte & L2_S_PROT_W) != 0 && 3436203637Sraj (prot & VM_PROT_WRITE) == 0 && 3437203637Sraj (opte & L2_TYPE_MASK) != L2_TYPE_INV) { 3438135641Scognet cpu_dcache_wb_range(va, PAGE_SIZE); 3439203637Sraj cpu_l2cache_wb_range(va, PAGE_SIZE); 3440183838Sraj } 3441129198Scognet } else { 3442129198Scognet /* 3443135641Scognet * New mapping, or changing the backing page 3444135641Scognet * of an existing mapping. 3445129198Scognet */ 3446129198Scognet if (opg) { 3447129198Scognet /* 3448135641Scognet * Replacing an existing mapping with a new one. 3449135641Scognet * It is part of our managed memory so we 3450135641Scognet * must remove it from the PV list 3451129198Scognet */ 3452194459Sthompsa if ((pve = pmap_remove_pv(opg, pmap, va))) { 3453194459Sthompsa 3454194459Sthompsa /* note for patch: the oflags/invalidation was moved 3455194459Sthompsa * because PG_FICTITIOUS pages could free the pve 3456194459Sthompsa */ 3457194459Sthompsa oflags = pve->pv_flags; 3458135641Scognet /* 3459135641Scognet * If the old mapping was valid (ref/mod 3460135641Scognet * emulation creates 'invalid' mappings 3461135641Scognet * initially) then make sure to frob 3462135641Scognet * the cache. 3463135641Scognet */ 3464194459Sthompsa if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { 3465135641Scognet if (PV_BEEN_EXECD(oflags)) { 3466129198Scognet pmap_idcache_wbinv_range(pmap, va, 3467129198Scognet PAGE_SIZE); 3468135641Scognet } else 3469135641Scognet if (PV_BEEN_REFD(oflags)) { 3470135641Scognet pmap_dcache_wb_range(pmap, va, 3471135641Scognet PAGE_SIZE, TRUE, 3472135641Scognet (oflags & PVF_WRITE) == 0); 3473135641Scognet } 3474194459Sthompsa } 3475194459Sthompsa 3476194459Sthompsa /* free/allocate a pv_entry for UNMANAGED pages if 3477194459Sthompsa * this physical page is not/is already mapped. 3478194459Sthompsa */ 3479194459Sthompsa 3480194459Sthompsa if (m && ((m->flags & PG_FICTITIOUS) || 3481194459Sthompsa ((m->flags & PG_UNMANAGED) && 3482194459Sthompsa !m->md.pv_kva && 3483194459Sthompsa TAILQ_EMPTY(&m->md.pv_list)))) { 3484194459Sthompsa pmap_free_pv_entry(pve); 3485194459Sthompsa pve = NULL; 3486194459Sthompsa } 3487194459Sthompsa } else if (m && !(m->flags & PG_FICTITIOUS) && 3488194459Sthompsa (!(m->flags & PG_UNMANAGED) || m->md.pv_kva || 3489194459Sthompsa !TAILQ_EMPTY(&m->md.pv_list))) 3490194459Sthompsa pve = pmap_get_pv_entry(); 3491194459Sthompsa } else if (m && !(m->flags & PG_FICTITIOUS) && 3492194459Sthompsa (!(m->flags & PG_UNMANAGED) || m->md.pv_kva || 3493194459Sthompsa !TAILQ_EMPTY(&m->md.pv_list))) 3494194459Sthompsa pve = pmap_get_pv_entry(); 3495194459Sthompsa 3496194459Sthompsa if (m && !(m->flags & PG_FICTITIOUS)) { 3497194459Sthompsa KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 3498194459Sthompsa ("pmap_enter: managed mapping within the clean submap")); 3499194459Sthompsa if (m->flags & PG_UNMANAGED) { 3500194459Sthompsa if (!TAILQ_EMPTY(&m->md.pv_list) || 3501194459Sthompsa m->md.pv_kva) { 3502194459Sthompsa KASSERT(pve != NULL, ("No pv")); 3503194459Sthompsa nflags |= PVF_UNMAN; 3504194459Sthompsa pmap_enter_pv(m, pve, pmap, va, nflags); 3505194459Sthompsa } else 3506194459Sthompsa m->md.pv_kva = va; 3507194459Sthompsa } else { 3508194459Sthompsa KASSERT(pve != NULL, ("No pv")); 3509194459Sthompsa pmap_enter_pv(m, pve, pmap, va, nflags); 3510129198Scognet } 3511157970Scognet } 3512129198Scognet } 3513129198Scognet /* 3514129198Scognet * Make sure userland mappings get the right permissions 3515129198Scognet */ 3516129198Scognet if (pmap != pmap_kernel() && va != vector_page) { 3517129198Scognet npte |= L2_S_PROT_U; 3518129198Scognet } 3519129198Scognet 3520129198Scognet /* 3521129198Scognet * Keep the stats up to date 3522129198Scognet */ 3523129198Scognet if (opte == 0) { 3524129198Scognet l2b->l2b_occupancy++; 3525129198Scognet pmap->pm_stats.resident_count++; 3526129198Scognet } 3527129198Scognet 3528129198Scognet 3529129198Scognet /* 3530129198Scognet * If this is just a wiring change, the two PTEs will be 3531129198Scognet * identical, so there's no need to update the page table. 3532129198Scognet */ 3533129198Scognet if (npte != opte) { 3534135641Scognet boolean_t is_cached = pmap_is_current(pmap); 3535129198Scognet 3536129198Scognet *ptep = npte; 3537129198Scognet if (is_cached) { 3538129198Scognet /* 3539129198Scognet * We only need to frob the cache/tlb if this pmap 3540129198Scognet * is current 3541129198Scognet */ 3542129198Scognet PTE_SYNC(ptep); 3543161105Scognet if (L1_IDX(va) != L1_IDX(vector_page) && 3544129198Scognet l2pte_valid(npte)) { 3545129198Scognet /* 3546129198Scognet * This mapping is likely to be accessed as 3547129198Scognet * soon as we return to userland. Fix up the 3548129198Scognet * L1 entry to avoid taking another 3549129198Scognet * page/domain fault. 3550129198Scognet */ 3551129198Scognet pd_entry_t *pl1pd, l1pd; 3552129198Scognet 3553129198Scognet pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; 3554129198Scognet l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | 3555144760Scognet L1_C_PROTO; 3556129198Scognet if (*pl1pd != l1pd) { 3557129198Scognet *pl1pd = l1pd; 3558129198Scognet PTE_SYNC(pl1pd); 3559129198Scognet } 3560129198Scognet } 3561129198Scognet } 3562129198Scognet 3563129198Scognet if (PV_BEEN_EXECD(oflags)) 3564129198Scognet pmap_tlb_flushID_SE(pmap, va); 3565135641Scognet else if (PV_BEEN_REFD(oflags)) 3566129198Scognet pmap_tlb_flushD_SE(pmap, va); 3567129198Scognet 3568129198Scognet 3569157025Scognet if (m) 3570175840Scognet pmap_fix_cache(m, pmap, va); 3571129198Scognet } 3572129198Scognet} 3573129198Scognet 3574129198Scognet/* 3575159303Salc * Maps a sequence of resident pages belonging to the same object. 3576159303Salc * The sequence begins with the given page m_start. This page is 3577159303Salc * mapped at the given virtual address start. Each subsequent page is 3578159303Salc * mapped at a virtual address that is offset from start by the same 3579159303Salc * amount as the page is offset from m_start within the object. The 3580159303Salc * last page in the sequence is the page with the largest offset from 3581159303Salc * m_start that can be mapped at a virtual address less than the given 3582159303Salc * virtual address end. Not every virtual page between start and end 3583159303Salc * is mapped; only those for which a resident page exists with the 3584159303Salc * corresponding offset from m_start are mapped. 3585159303Salc */ 3586159303Salcvoid 3587159303Salcpmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 3588159303Salc vm_page_t m_start, vm_prot_t prot) 3589159303Salc{ 3590159303Salc vm_page_t m; 3591159303Salc vm_pindex_t diff, psize; 3592159303Salc 3593159303Salc psize = atop(end - start); 3594159303Salc m = m_start; 3595208574Salc vm_page_lock_queues(); 3596159325Salc PMAP_LOCK(pmap); 3597159303Salc while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 3598159303Salc pmap_enter_locked(pmap, start + ptoa(diff), m, prot & 3599160260Scognet (VM_PROT_READ | VM_PROT_EXECUTE), FALSE, M_NOWAIT); 3600159303Salc m = TAILQ_NEXT(m, listq); 3601159303Salc } 3602208574Salc vm_page_unlock_queues(); 3603159325Salc PMAP_UNLOCK(pmap); 3604159303Salc} 3605159303Salc 3606159303Salc/* 3607129198Scognet * this code makes some *MAJOR* assumptions: 3608129198Scognet * 1. Current pmap & pmap exists. 3609129198Scognet * 2. Not wired. 3610129198Scognet * 3. Read access. 3611129198Scognet * 4. No page table pages. 3612129198Scognet * but is *MUCH* faster than pmap_enter... 3613129198Scognet */ 3614129198Scognet 3615159627Supsvoid 3616159627Supspmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3617129198Scognet{ 3618138897Salc 3619207796Salc vm_page_lock_queues(); 3620159325Salc PMAP_LOCK(pmap); 3621159127Salc pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 3622160260Scognet FALSE, M_NOWAIT); 3623207796Salc vm_page_unlock_queues(); 3624159325Salc PMAP_UNLOCK(pmap); 3625129198Scognet} 3626129198Scognet 3627129198Scognet/* 3628129198Scognet * Routine: pmap_change_wiring 3629129198Scognet * Function: Change the wiring attribute for a map/virtual-address 3630129198Scognet * pair. 3631129198Scognet * In/out conditions: 3632129198Scognet * The mapping must already exist in the pmap. 3633129198Scognet */ 3634129198Scognetvoid 3635129198Scognetpmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 3636129198Scognet{ 3637129198Scognet struct l2_bucket *l2b; 3638129198Scognet pt_entry_t *ptep, pte; 3639129198Scognet vm_page_t pg; 3640129198Scognet 3641159352Salc vm_page_lock_queues(); 3642159325Salc PMAP_LOCK(pmap); 3643129198Scognet l2b = pmap_get_l2_bucket(pmap, va); 3644129198Scognet KASSERT(l2b, ("No l2b bucket in pmap_change_wiring")); 3645129198Scognet ptep = &l2b->l2b_kva[l2pte_index(va)]; 3646129198Scognet pte = *ptep; 3647129198Scognet pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3648129198Scognet if (pg) 3649221844Scognet pmap_modify_pv(pg, pmap, va, PVF_WIRED, wired ? PVF_WIRED : 0); 3650159352Salc vm_page_unlock_queues(); 3651159325Salc PMAP_UNLOCK(pmap); 3652129198Scognet} 3653129198Scognet 3654129198Scognet 3655129198Scognet/* 3656129198Scognet * Copy the range specified by src_addr/len 3657129198Scognet * from the source map to the range dst_addr/len 3658129198Scognet * in the destination map. 3659129198Scognet * 3660129198Scognet * This routine is only advisory and need not do anything. 3661129198Scognet */ 3662129198Scognetvoid 3663129198Scognetpmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 3664129198Scognet vm_size_t len, vm_offset_t src_addr) 3665129198Scognet{ 3666129198Scognet} 3667129198Scognet 3668129198Scognet 3669129198Scognet/* 3670129198Scognet * Routine: pmap_extract 3671129198Scognet * Function: 3672129198Scognet * Extract the physical page address associated 3673129198Scognet * with the given map/virtual_address pair. 3674129198Scognet */ 3675131658Salcvm_paddr_t 3676129198Scognetpmap_extract(pmap_t pm, vm_offset_t va) 3677129198Scognet{ 3678129198Scognet struct l2_dtable *l2; 3679159450Salc pd_entry_t l1pd; 3680129198Scognet pt_entry_t *ptep, pte; 3681129198Scognet vm_paddr_t pa; 3682129198Scognet u_int l1idx; 3683129198Scognet l1idx = L1_IDX(va); 3684129198Scognet 3685159450Salc PMAP_LOCK(pm); 3686159450Salc l1pd = pm->pm_l1->l1_kva[l1idx]; 3687129198Scognet if (l1pte_section_p(l1pd)) { 3688129198Scognet /* 3689129198Scognet * These should only happen for pmap_kernel() 3690129198Scognet */ 3691129198Scognet KASSERT(pm == pmap_kernel(), ("huh")); 3692171620Scognet /* XXX: what to do about the bits > 32 ? */ 3693171620Scognet if (l1pd & L1_S_SUPERSEC) 3694171620Scognet pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3695171620Scognet else 3696171620Scognet pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3697129198Scognet } else { 3698129198Scognet /* 3699129198Scognet * Note that we can't rely on the validity of the L1 3700129198Scognet * descriptor as an indication that a mapping exists. 3701129198Scognet * We have to look it up in the L2 dtable. 3702129198Scognet */ 3703129198Scognet l2 = pm->pm_l2[L2_IDX(l1idx)]; 3704129198Scognet 3705129198Scognet if (l2 == NULL || 3706129198Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3707159450Salc PMAP_UNLOCK(pm); 3708129198Scognet return (0); 3709129198Scognet } 3710129198Scognet 3711129198Scognet ptep = &ptep[l2pte_index(va)]; 3712129198Scognet pte = *ptep; 3713129198Scognet 3714159450Salc if (pte == 0) { 3715159450Salc PMAP_UNLOCK(pm); 3716129198Scognet return (0); 3717159450Salc } 3718129198Scognet 3719129198Scognet switch (pte & L2_TYPE_MASK) { 3720129198Scognet case L2_TYPE_L: 3721129198Scognet pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3722129198Scognet break; 3723129198Scognet 3724129198Scognet default: 3725129198Scognet pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3726129198Scognet break; 3727129198Scognet } 3728129198Scognet } 3729129198Scognet 3730159450Salc PMAP_UNLOCK(pm); 3731129198Scognet return (pa); 3732129198Scognet} 3733129198Scognet 3734133453Salc/* 3735133453Salc * Atomically extract and hold the physical page with the given 3736133453Salc * pmap and virtual address pair if that mapping permits the given 3737133453Salc * protection. 3738133453Salc * 3739133453Salc */ 3740129198Scognetvm_page_t 3741129198Scognetpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3742129198Scognet{ 3743135641Scognet struct l2_dtable *l2; 3744159378Salc pd_entry_t l1pd; 3745135641Scognet pt_entry_t *ptep, pte; 3746207410Skmacy vm_paddr_t pa, paddr; 3747135641Scognet vm_page_t m = NULL; 3748135641Scognet u_int l1idx; 3749135641Scognet l1idx = L1_IDX(va); 3750207410Skmacy paddr = 0; 3751129198Scognet 3752159325Salc PMAP_LOCK(pmap); 3753207410Skmacyretry: 3754159378Salc l1pd = pmap->pm_l1->l1_kva[l1idx]; 3755135641Scognet if (l1pte_section_p(l1pd)) { 3756135641Scognet /* 3757135641Scognet * These should only happen for pmap_kernel() 3758135641Scognet */ 3759135641Scognet KASSERT(pmap == pmap_kernel(), ("huh")); 3760171620Scognet /* XXX: what to do about the bits > 32 ? */ 3761171620Scognet if (l1pd & L1_S_SUPERSEC) 3762171620Scognet pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3763171620Scognet else 3764171620Scognet pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3765207410Skmacy if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) 3766207410Skmacy goto retry; 3767135641Scognet if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3768135641Scognet m = PHYS_TO_VM_PAGE(pa); 3769135641Scognet vm_page_hold(m); 3770135641Scognet } 3771135641Scognet 3772135641Scognet } else { 3773135641Scognet /* 3774135641Scognet * Note that we can't rely on the validity of the L1 3775135641Scognet * descriptor as an indication that a mapping exists. 3776135641Scognet * We have to look it up in the L2 dtable. 3777135641Scognet */ 3778135641Scognet l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3779135641Scognet 3780135641Scognet if (l2 == NULL || 3781135641Scognet (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3782159325Salc PMAP_UNLOCK(pmap); 3783135641Scognet return (NULL); 3784135641Scognet } 3785135641Scognet 3786135641Scognet ptep = &ptep[l2pte_index(va)]; 3787135641Scognet pte = *ptep; 3788135641Scognet 3789150865Scognet if (pte == 0) { 3790159325Salc PMAP_UNLOCK(pmap); 3791135641Scognet return (NULL); 3792150865Scognet } 3793135641Scognet if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3794135641Scognet switch (pte & L2_TYPE_MASK) { 3795135641Scognet case L2_TYPE_L: 3796135641Scognet pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3797135641Scognet break; 3798135641Scognet 3799135641Scognet default: 3800135641Scognet pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3801135641Scognet break; 3802135641Scognet } 3803207410Skmacy if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) 3804207410Skmacy goto retry; 3805135641Scognet m = PHYS_TO_VM_PAGE(pa); 3806135641Scognet vm_page_hold(m); 3807135641Scognet } 3808129198Scognet } 3809135641Scognet 3810159325Salc PMAP_UNLOCK(pmap); 3811207410Skmacy PA_UNLOCK_COND(paddr); 3812129198Scognet return (m); 3813129198Scognet} 3814129198Scognet 3815129198Scognet/* 3816129198Scognet * Initialize a preallocated and zeroed pmap structure, 3817129198Scognet * such as one in a vmspace structure. 3818129198Scognet */ 3819129198Scognet 3820173361Skibint 3821129198Scognetpmap_pinit(pmap_t pmap) 3822129198Scognet{ 3823129198Scognet PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); 3824129198Scognet 3825159325Salc PMAP_LOCK_INIT(pmap); 3826129198Scognet pmap_alloc_l1(pmap); 3827129198Scognet bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); 3828129198Scognet 3829129198Scognet pmap->pm_active = 0; 3830129198Scognet 3831144760Scognet TAILQ_INIT(&pmap->pm_pvlist); 3832129198Scognet bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 3833129198Scognet pmap->pm_stats.resident_count = 1; 3834129198Scognet if (vector_page < KERNBASE) { 3835175840Scognet pmap_enter(pmap, vector_page, 3836175397Scognet VM_PROT_READ, PHYS_TO_VM_PAGE(systempage.pv_pa), 3837129198Scognet VM_PROT_READ, 1); 3838129198Scognet } 3839173361Skib return (1); 3840129198Scognet} 3841129198Scognet 3842129198Scognet 3843129198Scognet/*************************************************** 3844129198Scognet * page management routines. 3845129198Scognet ***************************************************/ 3846129198Scognet 3847129198Scognet 3848135641Scognetstatic void 3849129198Scognetpmap_free_pv_entry(pv_entry_t pv) 3850129198Scognet{ 3851129198Scognet pv_entry_count--; 3852129198Scognet uma_zfree(pvzone, pv); 3853129198Scognet} 3854129198Scognet 3855129198Scognet 3856129198Scognet/* 3857129198Scognet * get a new pv_entry, allocating a block from the system 3858129198Scognet * when needed. 3859129198Scognet * the memory allocation is performed bypassing the malloc code 3860129198Scognet * because of the possibility of allocations at interrupt time. 3861129198Scognet */ 3862129198Scognetstatic pv_entry_t 3863129198Scognetpmap_get_pv_entry(void) 3864129198Scognet{ 3865129198Scognet pv_entry_t ret_value; 3866129198Scognet 3867129198Scognet pv_entry_count++; 3868159500Salc if (pv_entry_count > pv_entry_high_water) 3869159500Salc pagedaemon_wakeup(); 3870129198Scognet ret_value = uma_zalloc(pvzone, M_NOWAIT); 3871129198Scognet return ret_value; 3872129198Scognet} 3873129198Scognet 3874129198Scognet/* 3875129198Scognet * Remove the given range of addresses from the specified map. 3876129198Scognet * 3877129198Scognet * It is assumed that the start and end are properly 3878129198Scognet * rounded to the page size. 3879129198Scognet */ 3880175840Scognet#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3881129198Scognetvoid 3882129198Scognetpmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 3883129198Scognet{ 3884129198Scognet struct l2_bucket *l2b; 3885129198Scognet vm_offset_t next_bucket; 3886129198Scognet pt_entry_t *ptep; 3887175840Scognet u_int total; 3888129198Scognet u_int mappings, is_exec, is_refd; 3889135641Scognet int flushall = 0; 3890129198Scognet 3891129198Scognet 3892129198Scognet /* 3893129198Scognet * we lock in the pmap => pv_head direction 3894129198Scognet */ 3895129198Scognet 3896137664Scognet vm_page_lock_queues(); 3897159352Salc PMAP_LOCK(pm); 3898129198Scognet total = 0; 3899129198Scognet while (sva < eva) { 3900129198Scognet /* 3901129198Scognet * Do one L2 bucket's worth at a time. 3902129198Scognet */ 3903129198Scognet next_bucket = L2_NEXT_BUCKET(sva); 3904129198Scognet if (next_bucket > eva) 3905129198Scognet next_bucket = eva; 3906129198Scognet 3907129198Scognet l2b = pmap_get_l2_bucket(pm, sva); 3908129198Scognet if (l2b == NULL) { 3909129198Scognet sva = next_bucket; 3910129198Scognet continue; 3911129198Scognet } 3912129198Scognet 3913129198Scognet ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3914129198Scognet mappings = 0; 3915129198Scognet 3916129198Scognet while (sva < next_bucket) { 3917129198Scognet struct vm_page *pg; 3918129198Scognet pt_entry_t pte; 3919129198Scognet vm_paddr_t pa; 3920129198Scognet 3921129198Scognet pte = *ptep; 3922129198Scognet 3923129198Scognet if (pte == 0) { 3924129198Scognet /* 3925129198Scognet * Nothing here, move along 3926129198Scognet */ 3927129198Scognet sva += PAGE_SIZE; 3928129198Scognet ptep++; 3929129198Scognet continue; 3930129198Scognet } 3931129198Scognet 3932129198Scognet pm->pm_stats.resident_count--; 3933129198Scognet pa = l2pte_pa(pte); 3934129198Scognet is_exec = 0; 3935129198Scognet is_refd = 1; 3936129198Scognet 3937129198Scognet /* 3938129198Scognet * Update flags. In a number of circumstances, 3939129198Scognet * we could cluster a lot of these and do a 3940129198Scognet * number of sequential pages in one go. 3941129198Scognet */ 3942129198Scognet if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3943129198Scognet struct pv_entry *pve; 3944159474Salc 3945129198Scognet pve = pmap_remove_pv(pg, pm, sva); 3946135641Scognet if (pve) { 3947159474Salc is_exec = PV_BEEN_EXECD(pve->pv_flags); 3948159474Salc is_refd = PV_BEEN_REFD(pve->pv_flags); 3949129198Scognet pmap_free_pv_entry(pve); 3950129198Scognet } 3951129198Scognet } 3952129198Scognet 3953175840Scognet if (l2pte_valid(pte) && pmap_is_current(pm)) { 3954175840Scognet if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3955175840Scognet total++; 3956175840Scognet if (is_exec) { 3957175840Scognet cpu_idcache_wbinv_range(sva, 3958183838Sraj PAGE_SIZE); 3959183838Sraj cpu_l2cache_wbinv_range(sva, 3960183838Sraj PAGE_SIZE); 3961175840Scognet cpu_tlb_flushID_SE(sva); 3962175840Scognet } else if (is_refd) { 3963175840Scognet cpu_dcache_wbinv_range(sva, 3964183838Sraj PAGE_SIZE); 3965183838Sraj cpu_l2cache_wbinv_range(sva, 3966183838Sraj PAGE_SIZE); 3967175840Scognet cpu_tlb_flushD_SE(sva); 3968175840Scognet } 3969175840Scognet } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3970175840Scognet /* flushall will also only get set for 3971175840Scognet * for a current pmap 3972175840Scognet */ 3973175840Scognet cpu_idcache_wbinv_all(); 3974183838Sraj cpu_l2cache_wbinv_all(); 3975175840Scognet flushall = 1; 3976175840Scognet total++; 3977129198Scognet } 3978129198Scognet } 3979175840Scognet *ptep = 0; 3980175840Scognet PTE_SYNC(ptep); 3981129198Scognet 3982129198Scognet sva += PAGE_SIZE; 3983129198Scognet ptep++; 3984129198Scognet mappings++; 3985129198Scognet } 3986129198Scognet 3987129198Scognet pmap_free_l2_bucket(pm, l2b, mappings); 3988129198Scognet } 3989129198Scognet 3990137664Scognet vm_page_unlock_queues(); 3991135641Scognet if (flushall) 3992135641Scognet cpu_tlb_flushID(); 3993159352Salc PMAP_UNLOCK(pm); 3994129198Scognet} 3995129198Scognet 3996129198Scognet/* 3997129198Scognet * pmap_zero_page() 3998129198Scognet * 3999129198Scognet * Zero a given physical page by mapping it at a page hook point. 4000129198Scognet * In doing the zero page op, the page we zero is mapped cachable, as with 4001129198Scognet * StrongARM accesses to non-cached pages are non-burst making writing 4002129198Scognet * _any_ bulk data very slow. 4003129198Scognet */ 4004164778Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_CORE3) 4005129198Scognetvoid 4006129198Scognetpmap_zero_page_generic(vm_paddr_t phys, int off, int size) 4007129198Scognet{ 4008161105Scognet#ifdef ARM_USE_SMALL_ALLOC 4009161105Scognet char *dstpg; 4010161105Scognet#endif 4011161105Scognet 4012129198Scognet#ifdef DEBUG 4013129198Scognet struct vm_page *pg = PHYS_TO_VM_PAGE(phys); 4014129198Scognet 4015129198Scognet if (pg->md.pvh_list != NULL) 4016129198Scognet panic("pmap_zero_page: page has mappings"); 4017129198Scognet#endif 4018129198Scognet 4019172300Scognet if (_arm_bzero && size >= _min_bzero_size && 4020150865Scognet _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) 4021150865Scognet return; 4022129198Scognet 4023161105Scognet#ifdef ARM_USE_SMALL_ALLOC 4024161105Scognet dstpg = (char *)arm_ptovirt(phys); 4025161105Scognet if (off || size != PAGE_SIZE) { 4026161105Scognet bzero(dstpg + off, size); 4027161105Scognet cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size); 4028183838Sraj cpu_l2cache_wbinv_range((vm_offset_t)(dstpg + off), size); 4029161105Scognet } else { 4030161105Scognet bzero_page((vm_offset_t)dstpg); 4031161105Scognet cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE); 4032183838Sraj cpu_l2cache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE); 4033161105Scognet } 4034161105Scognet#else 4035150865Scognet 4036159088Scognet mtx_lock(&cmtx); 4037129198Scognet /* 4038183836Sraj * Hook in the page, zero it, invalidate the TLB as needed. 4039183836Sraj * 4040183836Sraj * Note the temporary zero-page mapping must be a non-cached page in 4041184730Sraj * order to work without corruption when write-allocate is enabled. 4042129198Scognet */ 4043183836Sraj *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); 4044129198Scognet cpu_tlb_flushD_SE(cdstp); 4045129198Scognet cpu_cpwait(); 4046183836Sraj if (off || size != PAGE_SIZE) 4047129198Scognet bzero((void *)(cdstp + off), size); 4048183836Sraj else 4049129198Scognet bzero_page(cdstp); 4050183836Sraj 4051159088Scognet mtx_unlock(&cmtx); 4052161105Scognet#endif 4053129198Scognet} 4054129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 4055129198Scognet 4056129198Scognet#if ARM_MMU_XSCALE == 1 4057129198Scognetvoid 4058129198Scognetpmap_zero_page_xscale(vm_paddr_t phys, int off, int size) 4059129198Scognet{ 4060172713Scognet#ifdef ARM_USE_SMALL_ALLOC 4061172713Scognet char *dstpg; 4062172713Scognet#endif 4063172713Scognet 4064172300Scognet if (_arm_bzero && size >= _min_bzero_size && 4065150865Scognet _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) 4066150865Scognet return; 4067172713Scognet#ifdef ARM_USE_SMALL_ALLOC 4068172713Scognet dstpg = (char *)arm_ptovirt(phys); 4069172713Scognet if (off || size != PAGE_SIZE) { 4070172713Scognet bzero(dstpg + off, size); 4071172713Scognet cpu_dcache_wbinv_range((vm_offset_t)(dstpg + off), size); 4072172713Scognet } else { 4073172713Scognet bzero_page((vm_offset_t)dstpg); 4074172713Scognet cpu_dcache_wbinv_range((vm_offset_t)dstpg, PAGE_SIZE); 4075172713Scognet } 4076172713Scognet#else 4077159088Scognet mtx_lock(&cmtx); 4078129198Scognet /* 4079129198Scognet * Hook in the page, zero it, and purge the cache for that 4080129198Scognet * zeroed page. Invalidate the TLB as needed. 4081129198Scognet */ 4082129198Scognet *cdst_pte = L2_S_PROTO | phys | 4083129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4084129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4085129198Scognet PTE_SYNC(cdst_pte); 4086129198Scognet cpu_tlb_flushD_SE(cdstp); 4087129198Scognet cpu_cpwait(); 4088135641Scognet if (off || size != PAGE_SIZE) 4089129198Scognet bzero((void *)(cdstp + off), size); 4090129198Scognet else 4091129198Scognet bzero_page(cdstp); 4092159088Scognet mtx_unlock(&cmtx); 4093129198Scognet xscale_cache_clean_minidata(); 4094172713Scognet#endif 4095129198Scognet} 4096129198Scognet 4097129198Scognet/* 4098129198Scognet * Change the PTEs for the specified kernel mappings such that they 4099129198Scognet * will use the mini data cache instead of the main data cache. 4100129198Scognet */ 4101129198Scognetvoid 4102135641Scognetpmap_use_minicache(vm_offset_t va, vm_size_t size) 4103129198Scognet{ 4104129198Scognet struct l2_bucket *l2b; 4105129198Scognet pt_entry_t *ptep, *sptep, pte; 4106129198Scognet vm_offset_t next_bucket, eva; 4107129198Scognet 4108164778Scognet#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) 4109129198Scognet if (xscale_use_minidata == 0) 4110129198Scognet return; 4111129198Scognet#endif 4112129198Scognet 4113135641Scognet eva = va + size; 4114129198Scognet 4115129198Scognet while (va < eva) { 4116129198Scognet next_bucket = L2_NEXT_BUCKET(va); 4117129198Scognet if (next_bucket > eva) 4118129198Scognet next_bucket = eva; 4119129198Scognet 4120129198Scognet l2b = pmap_get_l2_bucket(pmap_kernel(), va); 4121129198Scognet 4122129198Scognet sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 4123129198Scognet 4124129198Scognet while (va < next_bucket) { 4125129198Scognet pte = *ptep; 4126129198Scognet if (!l2pte_minidata(pte)) { 4127129198Scognet cpu_dcache_wbinv_range(va, PAGE_SIZE); 4128129198Scognet cpu_tlb_flushD_SE(va); 4129129198Scognet *ptep = pte & ~L2_B; 4130129198Scognet } 4131129198Scognet ptep++; 4132129198Scognet va += PAGE_SIZE; 4133129198Scognet } 4134129198Scognet PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 4135129198Scognet } 4136129198Scognet cpu_cpwait(); 4137129198Scognet} 4138129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 4139129198Scognet 4140129198Scognet/* 4141129198Scognet * pmap_zero_page zeros the specified hardware page by mapping 4142129198Scognet * the page into KVM and using bzero to clear its contents. 4143129198Scognet */ 4144129198Scognetvoid 4145129198Scognetpmap_zero_page(vm_page_t m) 4146129198Scognet{ 4147135641Scognet pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); 4148129198Scognet} 4149129198Scognet 4150129198Scognet 4151129198Scognet/* 4152129198Scognet * pmap_zero_page_area zeros the specified hardware page by mapping 4153129198Scognet * the page into KVM and using bzero to clear its contents. 4154129198Scognet * 4155129198Scognet * off and size may not cover an area beyond a single hardware page. 4156129198Scognet */ 4157129198Scognetvoid 4158129198Scognetpmap_zero_page_area(vm_page_t m, int off, int size) 4159129198Scognet{ 4160129198Scognet 4161129198Scognet pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); 4162129198Scognet} 4163129198Scognet 4164129198Scognet 4165129198Scognet/* 4166129198Scognet * pmap_zero_page_idle zeros the specified hardware page by mapping 4167129198Scognet * the page into KVM and using bzero to clear its contents. This 4168129198Scognet * is intended to be called from the vm_pagezero process only and 4169129198Scognet * outside of Giant. 4170129198Scognet */ 4171129198Scognetvoid 4172129198Scognetpmap_zero_page_idle(vm_page_t m) 4173129198Scognet{ 4174129198Scognet 4175129198Scognet pmap_zero_page(m); 4176129198Scognet} 4177129198Scognet 4178150865Scognet#if 0 4179129198Scognet/* 4180129198Scognet * pmap_clean_page() 4181129198Scognet * 4182129198Scognet * This is a local function used to work out the best strategy to clean 4183197770Sstas * a single page referenced by its entry in the PV table. It should be used by 4184129198Scognet * pmap_copy_page, pmap_zero page and maybe some others later on. 4185129198Scognet * 4186129198Scognet * Its policy is effectively: 4187129198Scognet * o If there are no mappings, we don't bother doing anything with the cache. 4188129198Scognet * o If there is one mapping, we clean just that page. 4189129198Scognet * o If there are multiple mappings, we clean the entire cache. 4190129198Scognet * 4191129198Scognet * So that some functions can be further optimised, it returns 0 if it didn't 4192129198Scognet * clean the entire cache, or 1 if it did. 4193129198Scognet * 4194129198Scognet * XXX One bug in this routine is that if the pv_entry has a single page 4195129198Scognet * mapped at 0x00000000 a whole cache clean will be performed rather than 4196129198Scognet * just the 1 page. Since this should not occur in everyday use and if it does 4197129198Scognet * it will just result in not the most efficient clean for the page. 4198197770Sstas * 4199197770Sstas * We don't yet use this function but may want to. 4200129198Scognet */ 4201129198Scognetstatic int 4202129198Scognetpmap_clean_page(struct pv_entry *pv, boolean_t is_src) 4203129198Scognet{ 4204129198Scognet pmap_t pm, pm_to_clean = NULL; 4205129198Scognet struct pv_entry *npv; 4206129198Scognet u_int cache_needs_cleaning = 0; 4207129198Scognet u_int flags = 0; 4208129198Scognet vm_offset_t page_to_clean = 0; 4209129198Scognet 4210129198Scognet if (pv == NULL) { 4211129198Scognet /* nothing mapped in so nothing to flush */ 4212129198Scognet return (0); 4213129198Scognet } 4214129198Scognet 4215129198Scognet /* 4216129198Scognet * Since we flush the cache each time we change to a different 4217129198Scognet * user vmspace, we only need to flush the page if it is in the 4218129198Scognet * current pmap. 4219129198Scognet */ 4220135641Scognet if (curthread) 4221135641Scognet pm = vmspace_pmap(curproc->p_vmspace); 4222129198Scognet else 4223129198Scognet pm = pmap_kernel(); 4224129198Scognet 4225129198Scognet for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { 4226129198Scognet if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { 4227129198Scognet flags |= npv->pv_flags; 4228129198Scognet /* 4229129198Scognet * The page is mapped non-cacheable in 4230129198Scognet * this map. No need to flush the cache. 4231129198Scognet */ 4232129198Scognet if (npv->pv_flags & PVF_NC) { 4233129198Scognet#ifdef DIAGNOSTIC 4234129198Scognet if (cache_needs_cleaning) 4235129198Scognet panic("pmap_clean_page: " 4236129198Scognet "cache inconsistency"); 4237129198Scognet#endif 4238129198Scognet break; 4239129198Scognet } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 4240129198Scognet continue; 4241129198Scognet if (cache_needs_cleaning) { 4242129198Scognet page_to_clean = 0; 4243129198Scognet break; 4244129198Scognet } else { 4245129198Scognet page_to_clean = npv->pv_va; 4246129198Scognet pm_to_clean = npv->pv_pmap; 4247129198Scognet } 4248129198Scognet cache_needs_cleaning = 1; 4249129198Scognet } 4250129198Scognet } 4251129198Scognet if (page_to_clean) { 4252129198Scognet if (PV_BEEN_EXECD(flags)) 4253129198Scognet pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 4254129198Scognet PAGE_SIZE); 4255129198Scognet else 4256129198Scognet pmap_dcache_wb_range(pm_to_clean, page_to_clean, 4257129198Scognet PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 4258129198Scognet } else if (cache_needs_cleaning) { 4259129198Scognet if (PV_BEEN_EXECD(flags)) 4260129198Scognet pmap_idcache_wbinv_all(pm); 4261129198Scognet else 4262129198Scognet pmap_dcache_wbinv_all(pm); 4263129198Scognet return (1); 4264129198Scognet } 4265129198Scognet return (0); 4266129198Scognet} 4267150865Scognet#endif 4268129198Scognet 4269129198Scognet/* 4270129198Scognet * pmap_copy_page copies the specified (machine independent) 4271129198Scognet * page by mapping the page into virtual memory and using 4272129198Scognet * bcopy to copy the page, one machine dependent page at a 4273129198Scognet * time. 4274129198Scognet */ 4275129198Scognet 4276129198Scognet/* 4277129198Scognet * pmap_copy_page() 4278129198Scognet * 4279129198Scognet * Copy one physical page into another, by mapping the pages into 4280129198Scognet * hook points. The same comment regarding cachability as in 4281129198Scognet * pmap_zero_page also applies here. 4282129198Scognet */ 4283164778Scognet#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 || defined (CPU_XSCALE_CORE3) 4284129198Scognetvoid 4285129198Scognetpmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) 4286129198Scognet{ 4287151596Scognet#if 0 4288129198Scognet struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4289151596Scognet#endif 4290129198Scognet#ifdef DEBUG 4291129198Scognet struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 4292129198Scognet 4293129198Scognet if (dst_pg->md.pvh_list != NULL) 4294129198Scognet panic("pmap_copy_page: dst page has mappings"); 4295129198Scognet#endif 4296129198Scognet 4297129198Scognet 4298129198Scognet /* 4299129198Scognet * Clean the source page. Hold the source page's lock for 4300129198Scognet * the duration of the copy so that no other mappings can 4301129198Scognet * be created while we have a potentially aliased mapping. 4302129198Scognet */ 4303129198Scognet#if 0 4304150865Scognet /* 4305150865Scognet * XXX: Not needed while we call cpu_dcache_wbinv_all() in 4306150865Scognet * pmap_copy_page(). 4307150865Scognet */ 4308129198Scognet (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4309150865Scognet#endif 4310129198Scognet /* 4311129198Scognet * Map the pages into the page hook points, copy them, and purge 4312129198Scognet * the cache for the appropriate page. Invalidate the TLB 4313129198Scognet * as required. 4314129198Scognet */ 4315159088Scognet mtx_lock(&cmtx); 4316129198Scognet *csrc_pte = L2_S_PROTO | src | 4317129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4318129198Scognet PTE_SYNC(csrc_pte); 4319129198Scognet *cdst_pte = L2_S_PROTO | dst | 4320129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4321129198Scognet PTE_SYNC(cdst_pte); 4322129198Scognet cpu_tlb_flushD_SE(csrcp); 4323129198Scognet cpu_tlb_flushD_SE(cdstp); 4324129198Scognet cpu_cpwait(); 4325129198Scognet bcopy_page(csrcp, cdstp); 4326159088Scognet mtx_unlock(&cmtx); 4327129198Scognet cpu_dcache_inv_range(csrcp, PAGE_SIZE); 4328129198Scognet cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4329183838Sraj cpu_l2cache_inv_range(csrcp, PAGE_SIZE); 4330183838Sraj cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); 4331129198Scognet} 4332129198Scognet#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */ 4333129198Scognet 4334129198Scognet#if ARM_MMU_XSCALE == 1 4335129198Scognetvoid 4336129198Scognetpmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) 4337129198Scognet{ 4338150865Scognet#if 0 4339150865Scognet /* XXX: Only needed for pmap_clean_page(), which is commented out. */ 4340129198Scognet struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4341150865Scognet#endif 4342129198Scognet#ifdef DEBUG 4343129198Scognet struct vm_page *dst_pg = PHYS_TO_VM_PAGE(dst); 4344129198Scognet 4345129198Scognet if (dst_pg->md.pvh_list != NULL) 4346129198Scognet panic("pmap_copy_page: dst page has mappings"); 4347129198Scognet#endif 4348129198Scognet 4349129198Scognet 4350129198Scognet /* 4351129198Scognet * Clean the source page. Hold the source page's lock for 4352129198Scognet * the duration of the copy so that no other mappings can 4353129198Scognet * be created while we have a potentially aliased mapping. 4354129198Scognet */ 4355150865Scognet#if 0 4356150865Scognet /* 4357150865Scognet * XXX: Not needed while we call cpu_dcache_wbinv_all() in 4358150865Scognet * pmap_copy_page(). 4359150865Scognet */ 4360130745Scognet (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4361150865Scognet#endif 4362129198Scognet /* 4363129198Scognet * Map the pages into the page hook points, copy them, and purge 4364129198Scognet * the cache for the appropriate page. Invalidate the TLB 4365129198Scognet * as required. 4366129198Scognet */ 4367159088Scognet mtx_lock(&cmtx); 4368129198Scognet *csrc_pte = L2_S_PROTO | src | 4369129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4370129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4371129198Scognet PTE_SYNC(csrc_pte); 4372129198Scognet *cdst_pte = L2_S_PROTO | dst | 4373129198Scognet L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4374129198Scognet L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4375129198Scognet PTE_SYNC(cdst_pte); 4376129198Scognet cpu_tlb_flushD_SE(csrcp); 4377129198Scognet cpu_tlb_flushD_SE(cdstp); 4378129198Scognet cpu_cpwait(); 4379129198Scognet bcopy_page(csrcp, cdstp); 4380159088Scognet mtx_unlock(&cmtx); 4381129198Scognet xscale_cache_clean_minidata(); 4382129198Scognet} 4383129198Scognet#endif /* ARM_MMU_XSCALE == 1 */ 4384129198Scognet 4385129198Scognetvoid 4386129198Scognetpmap_copy_page(vm_page_t src, vm_page_t dst) 4387129198Scognet{ 4388161105Scognet#ifdef ARM_USE_SMALL_ALLOC 4389161105Scognet vm_offset_t srcpg, dstpg; 4390161105Scognet#endif 4391161105Scognet 4392146596Scognet cpu_dcache_wbinv_all(); 4393183838Sraj cpu_l2cache_wbinv_all(); 4394172300Scognet if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && 4395150865Scognet _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), 4396150865Scognet (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) 4397150865Scognet return; 4398161105Scognet#ifdef ARM_USE_SMALL_ALLOC 4399161105Scognet srcpg = arm_ptovirt(VM_PAGE_TO_PHYS(src)); 4400161105Scognet dstpg = arm_ptovirt(VM_PAGE_TO_PHYS(dst)); 4401161105Scognet bcopy_page(srcpg, dstpg); 4402161105Scognet cpu_dcache_wbinv_range(dstpg, PAGE_SIZE); 4403183838Sraj cpu_l2cache_wbinv_range(dstpg, PAGE_SIZE); 4404161105Scognet#else 4405129198Scognet pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 4406161105Scognet#endif 4407129198Scognet} 4408129198Scognet 4409129198Scognet 4410129198Scognet 4411129198Scognet 4412129198Scognet/* 4413129198Scognet * this routine returns true if a physical page resides 4414129198Scognet * in the given pmap. 4415129198Scognet */ 4416129198Scognetboolean_t 4417129198Scognetpmap_page_exists_quick(pmap_t pmap, vm_page_t m) 4418129198Scognet{ 4419129198Scognet pv_entry_t pv; 4420129198Scognet int loops = 0; 4421208990Salc boolean_t rv; 4422129198Scognet 4423208990Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4424208990Salc ("pmap_page_exists_quick: page %p is not managed", m)); 4425208990Salc rv = FALSE; 4426208990Salc vm_page_lock_queues(); 4427208990Salc TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4428129198Scognet if (pv->pv_pmap == pmap) { 4429208990Salc rv = TRUE; 4430208990Salc break; 4431129198Scognet } 4432129198Scognet loops++; 4433129198Scognet if (loops >= 16) 4434129198Scognet break; 4435129198Scognet } 4436208990Salc vm_page_unlock_queues(); 4437208990Salc return (rv); 4438129198Scognet} 4439129198Scognet 4440173708Salc/* 4441173708Salc * pmap_page_wired_mappings: 4442173708Salc * 4443173708Salc * Return the number of managed mappings to the given physical page 4444173708Salc * that are wired. 4445173708Salc */ 4446173708Salcint 4447173708Salcpmap_page_wired_mappings(vm_page_t m) 4448173708Salc{ 4449173708Salc pv_entry_t pv; 4450173708Salc int count; 4451129198Scognet 4452173708Salc count = 0; 4453173708Salc if ((m->flags & PG_FICTITIOUS) != 0) 4454173708Salc return (count); 4455207796Salc vm_page_lock_queues(); 4456173708Salc TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 4457173708Salc if ((pv->pv_flags & PVF_WIRED) != 0) 4458173708Salc count++; 4459207796Salc vm_page_unlock_queues(); 4460173708Salc return (count); 4461173708Salc} 4462173708Salc 4463129198Scognet/* 4464129198Scognet * pmap_ts_referenced: 4465129198Scognet * 4466129198Scognet * Return the count of reference bits for a page, clearing all of them. 4467129198Scognet */ 4468129198Scognetint 4469129198Scognetpmap_ts_referenced(vm_page_t m) 4470129198Scognet{ 4471164778Scognet 4472208990Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4473208990Salc ("pmap_ts_referenced: page %p is not managed", m)); 4474135641Scognet return (pmap_clearbit(m, PVF_REF)); 4475129198Scognet} 4476129198Scognet 4477129198Scognet 4478129198Scognetboolean_t 4479129198Scognetpmap_is_modified(vm_page_t m) 4480129198Scognet{ 4481135641Scognet 4482208504Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4483208504Salc ("pmap_is_modified: page %p is not managed", m)); 4484135641Scognet if (m->md.pvh_attrs & PVF_MOD) 4485135641Scognet return (TRUE); 4486129198Scognet 4487129198Scognet return(FALSE); 4488129198Scognet} 4489129198Scognet 4490129198Scognet 4491129198Scognet/* 4492129198Scognet * Clear the modify bits on the specified physical page. 4493129198Scognet */ 4494129198Scognetvoid 4495129198Scognetpmap_clear_modify(vm_page_t m) 4496129198Scognet{ 4497129198Scognet 4498208504Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4499208504Salc ("pmap_clear_modify: page %p is not managed", m)); 4500208504Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 4501208504Salc KASSERT((m->oflags & VPO_BUSY) == 0, 4502208504Salc ("pmap_clear_modify: page %p is busy", m)); 4503208504Salc 4504208504Salc /* 4505208504Salc * If the page is not PG_WRITEABLE, then no mappings can be modified. 4506208504Salc * If the object containing the page is locked and the page is not 4507208504Salc * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 4508208504Salc */ 4509208504Salc if ((m->flags & PG_WRITEABLE) == 0) 4510208504Salc return; 4511129198Scognet if (m->md.pvh_attrs & PVF_MOD) 4512129198Scognet pmap_clearbit(m, PVF_MOD); 4513129198Scognet} 4514129198Scognet 4515129198Scognet 4516129198Scognet/* 4517207155Salc * pmap_is_referenced: 4518207155Salc * 4519207155Salc * Return whether or not the specified physical page was referenced 4520207155Salc * in any physical maps. 4521207155Salc */ 4522207155Salcboolean_t 4523207155Salcpmap_is_referenced(vm_page_t m) 4524207155Salc{ 4525207155Salc 4526208574Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4527208574Salc ("pmap_is_referenced: page %p is not managed", m)); 4528208574Salc return ((m->md.pvh_attrs & PVF_REF) != 0); 4529207155Salc} 4530207155Salc 4531207155Salc/* 4532129198Scognet * pmap_clear_reference: 4533129198Scognet * 4534129198Scognet * Clear the reference bit on the specified physical page. 4535129198Scognet */ 4536129198Scognetvoid 4537129198Scognetpmap_clear_reference(vm_page_t m) 4538129198Scognet{ 4539129198Scognet 4540208504Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4541208504Salc ("pmap_clear_reference: page %p is not managed", m)); 4542129198Scognet if (m->md.pvh_attrs & PVF_REF) 4543129198Scognet pmap_clearbit(m, PVF_REF); 4544129198Scognet} 4545129198Scognet 4546129198Scognet 4547129198Scognet/* 4548160537Salc * Clear the write and modified bits in each of the given page's mappings. 4549160537Salc */ 4550160537Salcvoid 4551160889Salcpmap_remove_write(vm_page_t m) 4552160537Salc{ 4553160537Salc 4554208175Salc KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 4555208175Salc ("pmap_remove_write: page %p is not managed", m)); 4556208175Salc 4557208175Salc /* 4558208175Salc * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 4559208175Salc * another thread while the object is locked. Thus, if PG_WRITEABLE 4560208175Salc * is clear, no page table entries need updating. 4561208175Salc */ 4562208175Salc VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 4563208175Salc if ((m->oflags & VPO_BUSY) != 0 || 4564208990Salc (m->flags & PG_WRITEABLE) != 0) 4565160537Salc pmap_clearbit(m, PVF_WRITE); 4566160537Salc} 4567160537Salc 4568160537Salc 4569160537Salc/* 4570129198Scognet * perform the pmap work for mincore 4571129198Scognet */ 4572129198Scognetint 4573208504Salcpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4574129198Scognet{ 4575129198Scognet printf("pmap_mincore()\n"); 4576129198Scognet 4577129198Scognet return (0); 4578129198Scognet} 4579129198Scognet 4580129198Scognet 4581198341Smarcelvoid 4582198341Smarcelpmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4583198341Smarcel{ 4584198341Smarcel} 4585198341Smarcel 4586198341Smarcel 4587178893Salc/* 4588178893Salc * Increase the starting virtual address of the given mapping if a 4589178893Salc * different alignment might result in more superpage mappings. 4590178893Salc */ 4591178893Salcvoid 4592178893Salcpmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4593178893Salc vm_offset_t *addr, vm_size_t size) 4594178893Salc{ 4595178893Salc} 4596129198Scognet 4597178893Salc 4598129198Scognet/* 4599129198Scognet * Map a set of physical memory pages into the kernel virtual 4600129198Scognet * address space. Return a pointer to where it is mapped. This 4601129198Scognet * routine is intended to be used for mapping device memory, 4602129198Scognet * NOT real memory. 4603129198Scognet */ 4604129198Scognetvoid * 4605129198Scognetpmap_mapdev(vm_offset_t pa, vm_size_t size) 4606129198Scognet{ 4607129198Scognet vm_offset_t va, tmpva, offset; 4608129198Scognet 4609129198Scognet offset = pa & PAGE_MASK; 4610135641Scognet size = roundup(size, PAGE_SIZE); 4611129198Scognet 4612129198Scognet GIANT_REQUIRED; 4613129198Scognet 4614132560Salc va = kmem_alloc_nofault(kernel_map, size); 4615129198Scognet if (!va) 4616129198Scognet panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 4617129198Scognet for (tmpva = va; size > 0;) { 4618135641Scognet pmap_kenter_internal(tmpva, pa, 0); 4619129198Scognet size -= PAGE_SIZE; 4620129198Scognet tmpva += PAGE_SIZE; 4621129198Scognet pa += PAGE_SIZE; 4622129198Scognet } 4623129198Scognet 4624159068Sbenno return ((void *)(va + offset)); 4625129198Scognet} 4626129198Scognet 4627129198Scognet#define BOOTSTRAP_DEBUG 4628129198Scognet 4629129198Scognet/* 4630129198Scognet * pmap_map_section: 4631129198Scognet * 4632129198Scognet * Create a single section mapping. 4633129198Scognet */ 4634129198Scognetvoid 4635129198Scognetpmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4636129198Scognet int prot, int cache) 4637129198Scognet{ 4638129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4639129198Scognet pd_entry_t fl; 4640129198Scognet 4641129198Scognet KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); 4642129198Scognet 4643129198Scognet switch (cache) { 4644129198Scognet case PTE_NOCACHE: 4645129198Scognet default: 4646129198Scognet fl = 0; 4647129198Scognet break; 4648129198Scognet 4649129198Scognet case PTE_CACHE: 4650129198Scognet fl = pte_l1_s_cache_mode; 4651129198Scognet break; 4652129198Scognet 4653129198Scognet case PTE_PAGETABLE: 4654129198Scognet fl = pte_l1_s_cache_mode_pt; 4655129198Scognet break; 4656129198Scognet } 4657129198Scognet 4658129198Scognet pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4659129198Scognet L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 4660129198Scognet PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4661129198Scognet 4662129198Scognet} 4663129198Scognet 4664129198Scognet/* 4665129198Scognet * pmap_link_l2pt: 4666129198Scognet * 4667164079Scognet * Link the L2 page table specified by l2pv.pv_pa into the L1 4668129198Scognet * page table at the slot for "va". 4669129198Scognet */ 4670129198Scognetvoid 4671129198Scognetpmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) 4672129198Scognet{ 4673129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 4674129198Scognet u_int slot = va >> L1_S_SHIFT; 4675129198Scognet 4676129198Scognet proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 4677129198Scognet 4678164079Scognet#ifdef VERBOSE_INIT_ARM 4679164079Scognet printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); 4680164079Scognet#endif 4681164079Scognet 4682129198Scognet pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 4683164079Scognet 4684129198Scognet PTE_SYNC(&pde[slot]); 4685129198Scognet 4686129198Scognet SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 4687129198Scognet 4688129198Scognet 4689129198Scognet} 4690129198Scognet 4691129198Scognet/* 4692129198Scognet * pmap_map_entry 4693129198Scognet * 4694129198Scognet * Create a single page mapping. 4695129198Scognet */ 4696129198Scognetvoid 4697129198Scognetpmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 4698129198Scognet int cache) 4699129198Scognet{ 4700129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4701129198Scognet pt_entry_t fl; 4702129198Scognet pt_entry_t *pte; 4703129198Scognet 4704129198Scognet KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); 4705129198Scognet 4706129198Scognet switch (cache) { 4707129198Scognet case PTE_NOCACHE: 4708129198Scognet default: 4709129198Scognet fl = 0; 4710129198Scognet break; 4711129198Scognet 4712129198Scognet case PTE_CACHE: 4713129198Scognet fl = pte_l2_s_cache_mode; 4714129198Scognet break; 4715129198Scognet 4716129198Scognet case PTE_PAGETABLE: 4717129198Scognet fl = pte_l2_s_cache_mode_pt; 4718129198Scognet break; 4719129198Scognet } 4720129198Scognet 4721129198Scognet if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4722129198Scognet panic("pmap_map_entry: no L2 table for VA 0x%08x", va); 4723129198Scognet 4724129198Scognet pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4725129198Scognet 4726129198Scognet if (pte == NULL) 4727129198Scognet panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); 4728129198Scognet 4729129198Scognet pte[l2pte_index(va)] = 4730129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4731129198Scognet PTE_SYNC(&pte[l2pte_index(va)]); 4732129198Scognet} 4733129198Scognet 4734129198Scognet/* 4735129198Scognet * pmap_map_chunk: 4736129198Scognet * 4737129198Scognet * Map a chunk of memory using the most efficient mappings 4738129198Scognet * possible (section. large page, small page) into the 4739129198Scognet * provided L1 and L2 tables at the specified virtual address. 4740129198Scognet */ 4741129198Scognetvm_size_t 4742129198Scognetpmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4743129198Scognet vm_size_t size, int prot, int cache) 4744129198Scognet{ 4745129198Scognet pd_entry_t *pde = (pd_entry_t *) l1pt; 4746129198Scognet pt_entry_t *pte, f1, f2s, f2l; 4747129198Scognet vm_size_t resid; 4748129198Scognet int i; 4749129198Scognet 4750129198Scognet resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4751129198Scognet 4752129198Scognet if (l1pt == 0) 4753129198Scognet panic("pmap_map_chunk: no L1 table provided"); 4754129198Scognet 4755129198Scognet#ifdef VERBOSE_INIT_ARM 4756159322Scognet printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " 4757129198Scognet "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 4758129198Scognet#endif 4759129198Scognet 4760129198Scognet switch (cache) { 4761129198Scognet case PTE_NOCACHE: 4762129198Scognet default: 4763129198Scognet f1 = 0; 4764129198Scognet f2l = 0; 4765129198Scognet f2s = 0; 4766129198Scognet break; 4767129198Scognet 4768129198Scognet case PTE_CACHE: 4769129198Scognet f1 = pte_l1_s_cache_mode; 4770129198Scognet f2l = pte_l2_l_cache_mode; 4771129198Scognet f2s = pte_l2_s_cache_mode; 4772129198Scognet break; 4773129198Scognet 4774129198Scognet case PTE_PAGETABLE: 4775129198Scognet f1 = pte_l1_s_cache_mode_pt; 4776129198Scognet f2l = pte_l2_l_cache_mode_pt; 4777129198Scognet f2s = pte_l2_s_cache_mode_pt; 4778129198Scognet break; 4779129198Scognet } 4780129198Scognet 4781129198Scognet size = resid; 4782129198Scognet 4783129198Scognet while (resid > 0) { 4784129198Scognet /* See if we can use a section mapping. */ 4785129198Scognet if (L1_S_MAPPABLE_P(va, pa, resid)) { 4786129198Scognet#ifdef VERBOSE_INIT_ARM 4787129198Scognet printf("S"); 4788129198Scognet#endif 4789129198Scognet pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4790129198Scognet L1_S_PROT(PTE_KERNEL, prot) | f1 | 4791129198Scognet L1_S_DOM(PMAP_DOMAIN_KERNEL); 4792129198Scognet PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4793129198Scognet va += L1_S_SIZE; 4794129198Scognet pa += L1_S_SIZE; 4795129198Scognet resid -= L1_S_SIZE; 4796129198Scognet continue; 4797129198Scognet } 4798129198Scognet 4799129198Scognet /* 4800129198Scognet * Ok, we're going to use an L2 table. Make sure 4801129198Scognet * one is actually in the corresponding L1 slot 4802129198Scognet * for the current VA. 4803129198Scognet */ 4804129198Scognet if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4805129198Scognet panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); 4806129198Scognet 4807129198Scognet pte = (pt_entry_t *) kernel_pt_lookup( 4808129198Scognet pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4809129198Scognet if (pte == NULL) 4810129198Scognet panic("pmap_map_chunk: can't find L2 table for VA" 4811129198Scognet "0x%08x", va); 4812129198Scognet /* See if we can use a L2 large page mapping. */ 4813129198Scognet if (L2_L_MAPPABLE_P(va, pa, resid)) { 4814129198Scognet#ifdef VERBOSE_INIT_ARM 4815129198Scognet printf("L"); 4816129198Scognet#endif 4817129198Scognet for (i = 0; i < 16; i++) { 4818129198Scognet pte[l2pte_index(va) + i] = 4819129198Scognet L2_L_PROTO | pa | 4820129198Scognet L2_L_PROT(PTE_KERNEL, prot) | f2l; 4821129198Scognet PTE_SYNC(&pte[l2pte_index(va) + i]); 4822129198Scognet } 4823129198Scognet va += L2_L_SIZE; 4824129198Scognet pa += L2_L_SIZE; 4825129198Scognet resid -= L2_L_SIZE; 4826129198Scognet continue; 4827129198Scognet } 4828129198Scognet 4829129198Scognet /* Use a small page mapping. */ 4830129198Scognet#ifdef VERBOSE_INIT_ARM 4831129198Scognet printf("P"); 4832129198Scognet#endif 4833129198Scognet pte[l2pte_index(va)] = 4834129198Scognet L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4835129198Scognet PTE_SYNC(&pte[l2pte_index(va)]); 4836129198Scognet va += PAGE_SIZE; 4837129198Scognet pa += PAGE_SIZE; 4838129198Scognet resid -= PAGE_SIZE; 4839129198Scognet } 4840129198Scognet#ifdef VERBOSE_INIT_ARM 4841129198Scognet printf("\n"); 4842129198Scognet#endif 4843129198Scognet return (size); 4844129198Scognet 4845129198Scognet} 4846129198Scognet 4847135641Scognet/********************** Static device map routines ***************************/ 4848135641Scognet 4849135641Scognetstatic const struct pmap_devmap *pmap_devmap_table; 4850135641Scognet 4851135641Scognet/* 4852135641Scognet * Register the devmap table. This is provided in case early console 4853135641Scognet * initialization needs to register mappings created by bootstrap code 4854135641Scognet * before pmap_devmap_bootstrap() is called. 4855135641Scognet */ 4856135641Scognetvoid 4857135641Scognetpmap_devmap_register(const struct pmap_devmap *table) 4858135641Scognet{ 4859135641Scognet 4860135641Scognet pmap_devmap_table = table; 4861135641Scognet} 4862135641Scognet 4863135641Scognet/* 4864135641Scognet * Map all of the static regions in the devmap table, and remember 4865135641Scognet * the devmap table so other parts of the kernel can look up entries 4866135641Scognet * later. 4867135641Scognet */ 4868135641Scognetvoid 4869135641Scognetpmap_devmap_bootstrap(vm_offset_t l1pt, const struct pmap_devmap *table) 4870135641Scognet{ 4871135641Scognet int i; 4872135641Scognet 4873135641Scognet pmap_devmap_table = table; 4874135641Scognet 4875135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4876135641Scognet#ifdef VERBOSE_INIT_ARM 4877159322Scognet printf("devmap: %08x -> %08x @ %08x\n", 4878135641Scognet pmap_devmap_table[i].pd_pa, 4879135641Scognet pmap_devmap_table[i].pd_pa + 4880135641Scognet pmap_devmap_table[i].pd_size - 1, 4881135641Scognet pmap_devmap_table[i].pd_va); 4882135641Scognet#endif 4883135641Scognet pmap_map_chunk(l1pt, pmap_devmap_table[i].pd_va, 4884135641Scognet pmap_devmap_table[i].pd_pa, 4885135641Scognet pmap_devmap_table[i].pd_size, 4886135641Scognet pmap_devmap_table[i].pd_prot, 4887135641Scognet pmap_devmap_table[i].pd_cache); 4888135641Scognet } 4889135641Scognet} 4890135641Scognet 4891135641Scognetconst struct pmap_devmap * 4892135641Scognetpmap_devmap_find_pa(vm_paddr_t pa, vm_size_t size) 4893135641Scognet{ 4894135641Scognet int i; 4895135641Scognet 4896135641Scognet if (pmap_devmap_table == NULL) 4897135641Scognet return (NULL); 4898135641Scognet 4899135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4900135641Scognet if (pa >= pmap_devmap_table[i].pd_pa && 4901135641Scognet pa + size <= pmap_devmap_table[i].pd_pa + 4902135641Scognet pmap_devmap_table[i].pd_size) 4903135641Scognet return (&pmap_devmap_table[i]); 4904135641Scognet } 4905135641Scognet 4906135641Scognet return (NULL); 4907135641Scognet} 4908135641Scognet 4909135641Scognetconst struct pmap_devmap * 4910135641Scognetpmap_devmap_find_va(vm_offset_t va, vm_size_t size) 4911135641Scognet{ 4912135641Scognet int i; 4913135641Scognet 4914135641Scognet if (pmap_devmap_table == NULL) 4915135641Scognet return (NULL); 4916135641Scognet 4917135641Scognet for (i = 0; pmap_devmap_table[i].pd_size != 0; i++) { 4918135641Scognet if (va >= pmap_devmap_table[i].pd_va && 4919135641Scognet va + size <= pmap_devmap_table[i].pd_va + 4920135641Scognet pmap_devmap_table[i].pd_size) 4921135641Scognet return (&pmap_devmap_table[i]); 4922135641Scognet } 4923135641Scognet 4924135641Scognet return (NULL); 4925135641Scognet} 4926135641Scognet 4927