1/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ 2/*- 3 * Copyright 2004 Olivier Houchard. 4 * Copyright 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38/*- 39 * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40 * Copyright (c) 2001 Richard Earnshaw 41 * Copyright (c) 2001-2002 Christopher Gilbert 42 * All rights reserved. 43 * 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. The name of the company nor the name of the author may be used to 50 * endorse or promote products derived from this software without specific 51 * prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 */ 65/*- 66 * Copyright (c) 1999 The NetBSD Foundation, Inc. 67 * All rights reserved. 68 * 69 * This code is derived from software contributed to The NetBSD Foundation 70 * by Charles M. Hannum. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 82 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 83 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 84 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 85 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 91 * POSSIBILITY OF SUCH DAMAGE. 92 */ 93 94/*- 95 * Copyright (c) 1994-1998 Mark Brinicombe. 96 * Copyright (c) 1994 Brini. 97 * All rights reserved. 98 * 99 * This code is derived from software written for Brini by Mark Brinicombe 100 * 101 * Redistribution and use in source and binary forms, with or without 102 * modification, are permitted provided that the following conditions 103 * are met: 104 * 1. Redistributions of source code must retain the above copyright 105 * notice, this list of conditions and the following disclaimer. 106 * 2. Redistributions in binary form must reproduce the above copyright 107 * notice, this list of conditions and the following disclaimer in the 108 * documentation and/or other materials provided with the distribution. 109 * 3. All advertising materials mentioning features or use of this software 110 * must display the following acknowledgement: 111 * This product includes software developed by Mark Brinicombe. 112 * 4. The name of the author may not be used to endorse or promote products 113 * derived from this software without specific prior written permission. 114 * 115 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 116 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 117 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 118 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 119 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 120 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 121 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 122 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 123 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 124 * 125 * RiscBSD kernel project 126 * 127 * pmap.c 128 * 129 * Machine dependent vm stuff 130 * 131 * Created : 20/09/94 132 */ 133 134/* 135 * Special compilation symbols 136 * PMAP_DEBUG - Build in pmap_debug_level code 137 * 138 * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c 139 */ 140/* Include header files */ 141 142#include "opt_vm.h" 143 144#include <sys/cdefs.h> 145__FBSDID("$FreeBSD: stable/11/sys/arm/arm/pmap-v4.c 331520 2018-03-25 01:47:57Z ian $"); 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/kernel.h> 149#include <sys/ktr.h> 150#include <sys/lock.h> 151#include <sys/proc.h> 152#include <sys/malloc.h> 153#include <sys/msgbuf.h> 154#include <sys/mutex.h> 155#include <sys/vmmeter.h> 156#include <sys/mman.h> 157#include <sys/rwlock.h> 158#include <sys/smp.h> 159#include <sys/sched.h> 160 161#include <vm/vm.h> 162#include <vm/vm_param.h> 163#include <vm/uma.h> 164#include <vm/pmap.h> 165#include <vm/vm_kern.h> 166#include <vm/vm_object.h> 167#include <vm/vm_map.h> 168#include <vm/vm_page.h> 169#include <vm/vm_pageout.h> 170#include <vm/vm_phys.h> 171#include <vm/vm_extern.h> 172 173#include <machine/md_var.h> 174#include <machine/cpu.h> 175#include <machine/cpufunc.h> 176#include <machine/pcb.h> 177 178#ifdef PMAP_DEBUG 179#define PDEBUG(_lev_,_stat_) \ 180 if (pmap_debug_level >= (_lev_)) \ 181 ((_stat_)) 182#define dprintf printf 183 184int pmap_debug_level = 0; 185#define PMAP_INLINE 186#else /* PMAP_DEBUG */ 187#define PDEBUG(_lev_,_stat_) /* Nothing */ 188#define dprintf(x, arg...) 189#define PMAP_INLINE __inline 190#endif /* PMAP_DEBUG */ 191 192extern struct pv_addr systempage; 193 194extern int last_fault_code; 195 196/* 197 * Internal function prototypes 198 */ 199static void pmap_free_pv_entry (pv_entry_t); 200static pv_entry_t pmap_get_pv_entry(void); 201 202static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, 203 vm_prot_t, u_int); 204static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); 205static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); 206static void pmap_alloc_l1(pmap_t); 207static void pmap_free_l1(pmap_t); 208 209static int pmap_clearbit(struct vm_page *, u_int); 210 211static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); 212static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); 213static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 214static vm_offset_t kernel_pt_lookup(vm_paddr_t); 215 216static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); 217 218vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 219vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 220vm_offset_t pmap_curmaxkvaddr; 221vm_paddr_t kernel_l1pa; 222 223vm_offset_t kernel_vm_end = 0; 224 225vm_offset_t vm_max_kernel_address; 226 227struct pmap kernel_pmap_store; 228 229static pt_entry_t *csrc_pte, *cdst_pte; 230static vm_offset_t csrcp, cdstp, qmap_addr; 231static struct mtx cmtx, qmap_mtx; 232 233static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 234/* 235 * These routines are called when the CPU type is identified to set up 236 * the PTE prototypes, cache modes, etc. 237 * 238 * The variables are always here, just in case LKMs need to reference 239 * them (though, they shouldn't). 240 */ 241 242pt_entry_t pte_l1_s_cache_mode; 243pt_entry_t pte_l1_s_cache_mode_pt; 244pt_entry_t pte_l1_s_cache_mask; 245 246pt_entry_t pte_l2_l_cache_mode; 247pt_entry_t pte_l2_l_cache_mode_pt; 248pt_entry_t pte_l2_l_cache_mask; 249 250pt_entry_t pte_l2_s_cache_mode; 251pt_entry_t pte_l2_s_cache_mode_pt; 252pt_entry_t pte_l2_s_cache_mask; 253 254pt_entry_t pte_l2_s_prot_u; 255pt_entry_t pte_l2_s_prot_w; 256pt_entry_t pte_l2_s_prot_mask; 257 258pt_entry_t pte_l1_s_proto; 259pt_entry_t pte_l1_c_proto; 260pt_entry_t pte_l2_s_proto; 261 262void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 263void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 264 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, 265 int cnt); 266void (*pmap_zero_page_func)(vm_paddr_t, int, int); 267 268struct msgbuf *msgbufp = NULL; 269 270/* 271 * Crashdump maps. 272 */ 273static caddr_t crashdumpmap; 274 275extern void bcopy_page(vm_offset_t, vm_offset_t); 276extern void bzero_page(vm_offset_t); 277 278extern vm_offset_t alloc_firstaddr; 279 280char *_tmppt; 281 282/* 283 * Metadata for L1 translation tables. 284 */ 285struct l1_ttable { 286 /* Entry on the L1 Table list */ 287 SLIST_ENTRY(l1_ttable) l1_link; 288 289 /* Entry on the L1 Least Recently Used list */ 290 TAILQ_ENTRY(l1_ttable) l1_lru; 291 292 /* Track how many domains are allocated from this L1 */ 293 volatile u_int l1_domain_use_count; 294 295 /* 296 * A free-list of domain numbers for this L1. 297 * We avoid using ffs() and a bitmap to track domains since ffs() 298 * is slow on ARM. 299 */ 300 u_int8_t l1_domain_first; 301 u_int8_t l1_domain_free[PMAP_DOMAINS]; 302 303 /* Physical address of this L1 page table */ 304 vm_paddr_t l1_physaddr; 305 306 /* KVA of this L1 page table */ 307 pd_entry_t *l1_kva; 308}; 309 310/* 311 * Convert a virtual address into its L1 table index. That is, the 312 * index used to locate the L2 descriptor table pointer in an L1 table. 313 * This is basically used to index l1->l1_kva[]. 314 * 315 * Each L2 descriptor table represents 1MB of VA space. 316 */ 317#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 318 319/* 320 * L1 Page Tables are tracked using a Least Recently Used list. 321 * - New L1s are allocated from the HEAD. 322 * - Freed L1s are added to the TAIl. 323 * - Recently accessed L1s (where an 'access' is some change to one of 324 * the userland pmaps which owns this L1) are moved to the TAIL. 325 */ 326static TAILQ_HEAD(, l1_ttable) l1_lru_list; 327/* 328 * A list of all L1 tables 329 */ 330static SLIST_HEAD(, l1_ttable) l1_list; 331static struct mtx l1_lru_lock; 332 333/* 334 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 335 * 336 * This is normally 16MB worth L2 page descriptors for any given pmap. 337 * Reference counts are maintained for L2 descriptors so they can be 338 * freed when empty. 339 */ 340struct l2_dtable { 341 /* The number of L2 page descriptors allocated to this l2_dtable */ 342 u_int l2_occupancy; 343 344 /* List of L2 page descriptors */ 345 struct l2_bucket { 346 pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 347 vm_paddr_t l2b_phys; /* Physical address of same */ 348 u_short l2b_l1idx; /* This L2 table's L1 index */ 349 u_short l2b_occupancy; /* How many active descriptors */ 350 } l2_bucket[L2_BUCKET_SIZE]; 351}; 352 353/* pmap_kenter_internal flags */ 354#define KENTER_CACHE 0x1 355#define KENTER_USER 0x2 356 357/* 358 * Given an L1 table index, calculate the corresponding l2_dtable index 359 * and bucket index within the l2_dtable. 360 */ 361#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 362 (L2_SIZE - 1)) 363#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 364 365/* 366 * Given a virtual address, this macro returns the 367 * virtual address required to drop into the next L2 bucket. 368 */ 369#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 370 371/* 372 * We try to map the page tables write-through, if possible. However, not 373 * all CPUs have a write-through cache mode, so on those we have to sync 374 * the cache when we frob page tables. 375 * 376 * We try to evaluate this at compile time, if possible. However, it's 377 * not always possible to do that, hence this run-time var. 378 */ 379int pmap_needs_pte_sync; 380 381/* 382 * Macro to determine if a mapping might be resident in the 383 * instruction cache and/or TLB 384 */ 385#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 386 387/* 388 * Macro to determine if a mapping might be resident in the 389 * data cache and/or TLB 390 */ 391#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 392 393#ifndef PMAP_SHPGPERPROC 394#define PMAP_SHPGPERPROC 200 395#endif 396 397#define pmap_is_current(pm) ((pm) == kernel_pmap || \ 398 curproc->p_vmspace->vm_map.pmap == (pm)) 399static uma_zone_t pvzone = NULL; 400uma_zone_t l2zone; 401static uma_zone_t l2table_zone; 402static vm_offset_t pmap_kernel_l2dtable_kva; 403static vm_offset_t pmap_kernel_l2ptp_kva; 404static vm_paddr_t pmap_kernel_l2ptp_phys; 405static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 406static struct rwlock pvh_global_lock; 407 408void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, 409 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 410#if ARM_MMU_XSCALE == 1 411void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, 412 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 413#endif 414 415/* 416 * This list exists for the benefit of pmap_map_chunk(). It keeps track 417 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 418 * find them as necessary. 419 * 420 * Note that the data on this list MUST remain valid after initarm() returns, 421 * as pmap_bootstrap() uses it to contruct L2 table metadata. 422 */ 423SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 424 425static void 426pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 427{ 428 int i; 429 430 l1->l1_kva = l1pt; 431 l1->l1_domain_use_count = 0; 432 l1->l1_domain_first = 0; 433 434 for (i = 0; i < PMAP_DOMAINS; i++) 435 l1->l1_domain_free[i] = i + 1; 436 437 /* 438 * Copy the kernel's L1 entries to each new L1. 439 */ 440 if (l1pt != kernel_pmap->pm_l1->l1_kva) 441 memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); 442 443 if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) 444 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 445 SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 446 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 447} 448 449static vm_offset_t 450kernel_pt_lookup(vm_paddr_t pa) 451{ 452 struct pv_addr *pv; 453 454 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 455 if (pv->pv_pa == pa) 456 return (pv->pv_va); 457 } 458 return (0); 459} 460 461#if ARM_MMU_GENERIC != 0 462void 463pmap_pte_init_generic(void) 464{ 465 466 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 467 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 468 469 pte_l2_l_cache_mode = L2_B|L2_C; 470 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 471 472 pte_l2_s_cache_mode = L2_B|L2_C; 473 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 474 475 /* 476 * If we have a write-through cache, set B and C. If 477 * we have a write-back cache, then we assume setting 478 * only C will make those pages write-through. 479 */ 480 if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { 481 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 482 pte_l2_l_cache_mode_pt = L2_B|L2_C; 483 pte_l2_s_cache_mode_pt = L2_B|L2_C; 484 } else { 485 pte_l1_s_cache_mode_pt = L1_S_C; 486 pte_l2_l_cache_mode_pt = L2_C; 487 pte_l2_s_cache_mode_pt = L2_C; 488 } 489 490 pte_l2_s_prot_u = L2_S_PROT_U_generic; 491 pte_l2_s_prot_w = L2_S_PROT_W_generic; 492 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 493 494 pte_l1_s_proto = L1_S_PROTO_generic; 495 pte_l1_c_proto = L1_C_PROTO_generic; 496 pte_l2_s_proto = L2_S_PROTO_generic; 497 498 pmap_copy_page_func = pmap_copy_page_generic; 499 pmap_copy_page_offs_func = pmap_copy_page_offs_generic; 500 pmap_zero_page_func = pmap_zero_page_generic; 501} 502 503#endif /* ARM_MMU_GENERIC != 0 */ 504 505#if ARM_MMU_XSCALE == 1 506#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) 507static u_int xscale_use_minidata; 508#endif 509 510void 511pmap_pte_init_xscale(void) 512{ 513 uint32_t auxctl; 514 int write_through = 0; 515 516 pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; 517 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 518 519 pte_l2_l_cache_mode = L2_B|L2_C; 520 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 521 522 pte_l2_s_cache_mode = L2_B|L2_C; 523 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 524 525 pte_l1_s_cache_mode_pt = L1_S_C; 526 pte_l2_l_cache_mode_pt = L2_C; 527 pte_l2_s_cache_mode_pt = L2_C; 528#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 529 /* 530 * The XScale core has an enhanced mode where writes that 531 * miss the cache cause a cache line to be allocated. This 532 * is significantly faster than the traditional, write-through 533 * behavior of this case. 534 */ 535 pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); 536 pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); 537 pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); 538#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 539#ifdef XSCALE_CACHE_WRITE_THROUGH 540 /* 541 * Some versions of the XScale core have various bugs in 542 * their cache units, the work-around for which is to run 543 * the cache in write-through mode. Unfortunately, this 544 * has a major (negative) impact on performance. So, we 545 * go ahead and run fast-and-loose, in the hopes that we 546 * don't line up the planets in a way that will trip the 547 * bugs. 548 * 549 * However, we give you the option to be slow-but-correct. 550 */ 551 write_through = 1; 552#elif defined(XSCALE_CACHE_WRITE_BACK) 553 /* force write back cache mode */ 554 write_through = 0; 555#elif defined(CPU_XSCALE_PXA2X0) 556 /* 557 * Intel PXA2[15]0 processors are known to have a bug in 558 * write-back cache on revision 4 and earlier (stepping 559 * A[01] and B[012]). Fixed for C0 and later. 560 */ 561 { 562 uint32_t id, type; 563 564 id = cpu_ident(); 565 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 566 567 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 568 if ((id & CPU_ID_REVISION_MASK) < 5) { 569 /* write through for stepping A0-1 and B0-2 */ 570 write_through = 1; 571 } 572 } 573 } 574#endif /* XSCALE_CACHE_WRITE_THROUGH */ 575 576 if (write_through) { 577 pte_l1_s_cache_mode = L1_S_C; 578 pte_l2_l_cache_mode = L2_C; 579 pte_l2_s_cache_mode = L2_C; 580 } 581 582#if (ARM_NMMUS > 1) 583 xscale_use_minidata = 1; 584#endif 585 586 pte_l2_s_prot_u = L2_S_PROT_U_xscale; 587 pte_l2_s_prot_w = L2_S_PROT_W_xscale; 588 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 589 590 pte_l1_s_proto = L1_S_PROTO_xscale; 591 pte_l1_c_proto = L1_C_PROTO_xscale; 592 pte_l2_s_proto = L2_S_PROTO_xscale; 593 594#ifdef CPU_XSCALE_CORE3 595 pmap_copy_page_func = pmap_copy_page_generic; 596 pmap_copy_page_offs_func = pmap_copy_page_offs_generic; 597 pmap_zero_page_func = pmap_zero_page_generic; 598 xscale_use_minidata = 0; 599 /* Make sure it is L2-cachable */ 600 pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); 601 pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; 602 pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; 603 pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; 604 pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); 605 pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; 606 607#else 608 pmap_copy_page_func = pmap_copy_page_xscale; 609 pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; 610 pmap_zero_page_func = pmap_zero_page_xscale; 611#endif 612 613 /* 614 * Disable ECC protection of page table access, for now. 615 */ 616 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 617 auxctl &= ~XSCALE_AUXCTL_P; 618 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 619} 620 621/* 622 * xscale_setup_minidata: 623 * 624 * Set up the mini-data cache clean area. We require the 625 * caller to allocate the right amount of physically and 626 * virtually contiguous space. 627 */ 628extern vm_offset_t xscale_minidata_clean_addr; 629extern vm_size_t xscale_minidata_clean_size; /* already initialized */ 630void 631xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) 632{ 633 pd_entry_t *pde = (pd_entry_t *) l1pt; 634 pt_entry_t *pte; 635 vm_size_t size; 636 uint32_t auxctl; 637 638 xscale_minidata_clean_addr = va; 639 640 /* Round it to page size. */ 641 size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 642 643 for (; size != 0; 644 va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 645 pte = (pt_entry_t *) kernel_pt_lookup( 646 pde[L1_IDX(va)] & L1_C_ADDR_MASK); 647 if (pte == NULL) 648 panic("xscale_setup_minidata: can't find L2 table for " 649 "VA 0x%08x", (u_int32_t) va); 650 pte[l2pte_index(va)] = 651 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 652 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 653 } 654 655 /* 656 * Configure the mini-data cache for write-back with 657 * read/write-allocate. 658 * 659 * NOTE: In order to reconfigure the mini-data cache, we must 660 * make sure it contains no valid data! In order to do that, 661 * we must issue a global data cache invalidate command! 662 * 663 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 664 * THIS IS VERY IMPORTANT! 665 */ 666 667 /* Invalidate data and mini-data. */ 668 __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 669 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 670 auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 671 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 672} 673#endif 674 675/* 676 * Allocate an L1 translation table for the specified pmap. 677 * This is called at pmap creation time. 678 */ 679static void 680pmap_alloc_l1(pmap_t pm) 681{ 682 struct l1_ttable *l1; 683 u_int8_t domain; 684 685 /* 686 * Remove the L1 at the head of the LRU list 687 */ 688 mtx_lock(&l1_lru_lock); 689 l1 = TAILQ_FIRST(&l1_lru_list); 690 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 691 692 /* 693 * Pick the first available domain number, and update 694 * the link to the next number. 695 */ 696 domain = l1->l1_domain_first; 697 l1->l1_domain_first = l1->l1_domain_free[domain]; 698 699 /* 700 * If there are still free domain numbers in this L1, 701 * put it back on the TAIL of the LRU list. 702 */ 703 if (++l1->l1_domain_use_count < PMAP_DOMAINS) 704 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 705 706 mtx_unlock(&l1_lru_lock); 707 708 /* 709 * Fix up the relevant bits in the pmap structure 710 */ 711 pm->pm_l1 = l1; 712 pm->pm_domain = domain + 1; 713} 714 715/* 716 * Free an L1 translation table. 717 * This is called at pmap destruction time. 718 */ 719static void 720pmap_free_l1(pmap_t pm) 721{ 722 struct l1_ttable *l1 = pm->pm_l1; 723 724 mtx_lock(&l1_lru_lock); 725 726 /* 727 * If this L1 is currently on the LRU list, remove it. 728 */ 729 if (l1->l1_domain_use_count < PMAP_DOMAINS) 730 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 731 732 /* 733 * Free up the domain number which was allocated to the pmap 734 */ 735 l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; 736 l1->l1_domain_first = pm->pm_domain - 1; 737 l1->l1_domain_use_count--; 738 739 /* 740 * The L1 now must have at least 1 free domain, so add 741 * it back to the LRU list. If the use count is zero, 742 * put it at the head of the list, otherwise it goes 743 * to the tail. 744 */ 745 if (l1->l1_domain_use_count == 0) { 746 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 747 } else 748 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 749 750 mtx_unlock(&l1_lru_lock); 751} 752 753/* 754 * Returns a pointer to the L2 bucket associated with the specified pmap 755 * and VA, or NULL if no L2 bucket exists for the address. 756 */ 757static PMAP_INLINE struct l2_bucket * 758pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) 759{ 760 struct l2_dtable *l2; 761 struct l2_bucket *l2b; 762 u_short l1idx; 763 764 l1idx = L1_IDX(va); 765 766 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 767 (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 768 return (NULL); 769 770 return (l2b); 771} 772 773/* 774 * Returns a pointer to the L2 bucket associated with the specified pmap 775 * and VA. 776 * 777 * If no L2 bucket exists, perform the necessary allocations to put an L2 778 * bucket/page table in place. 779 * 780 * Note that if a new L2 bucket/page was allocated, the caller *must* 781 * increment the bucket occupancy counter appropriately *before* 782 * releasing the pmap's lock to ensure no other thread or cpu deallocates 783 * the bucket/page in the meantime. 784 */ 785static struct l2_bucket * 786pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) 787{ 788 struct l2_dtable *l2; 789 struct l2_bucket *l2b; 790 u_short l1idx; 791 792 l1idx = L1_IDX(va); 793 794 PMAP_ASSERT_LOCKED(pm); 795 rw_assert(&pvh_global_lock, RA_WLOCKED); 796 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 797 /* 798 * No mapping at this address, as there is 799 * no entry in the L1 table. 800 * Need to allocate a new l2_dtable. 801 */ 802 PMAP_UNLOCK(pm); 803 rw_wunlock(&pvh_global_lock); 804 if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) { 805 rw_wlock(&pvh_global_lock); 806 PMAP_LOCK(pm); 807 return (NULL); 808 } 809 rw_wlock(&pvh_global_lock); 810 PMAP_LOCK(pm); 811 if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { 812 /* 813 * Someone already allocated the l2_dtable while 814 * we were doing the same. 815 */ 816 uma_zfree(l2table_zone, l2); 817 l2 = pm->pm_l2[L2_IDX(l1idx)]; 818 } else { 819 bzero(l2, sizeof(*l2)); 820 /* 821 * Link it into the parent pmap 822 */ 823 pm->pm_l2[L2_IDX(l1idx)] = l2; 824 } 825 } 826 827 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 828 829 /* 830 * Fetch pointer to the L2 page table associated with the address. 831 */ 832 if (l2b->l2b_kva == NULL) { 833 pt_entry_t *ptep; 834 835 /* 836 * No L2 page table has been allocated. Chances are, this 837 * is because we just allocated the l2_dtable, above. 838 */ 839 l2->l2_occupancy++; 840 PMAP_UNLOCK(pm); 841 rw_wunlock(&pvh_global_lock); 842 ptep = uma_zalloc(l2zone, M_NOWAIT); 843 rw_wlock(&pvh_global_lock); 844 PMAP_LOCK(pm); 845 if (l2b->l2b_kva != NULL) { 846 /* We lost the race. */ 847 l2->l2_occupancy--; 848 uma_zfree(l2zone, ptep); 849 return (l2b); 850 } 851 l2b->l2b_phys = vtophys(ptep); 852 if (ptep == NULL) { 853 /* 854 * Oops, no more L2 page tables available at this 855 * time. We may need to deallocate the l2_dtable 856 * if we allocated a new one above. 857 */ 858 l2->l2_occupancy--; 859 if (l2->l2_occupancy == 0) { 860 pm->pm_l2[L2_IDX(l1idx)] = NULL; 861 uma_zfree(l2table_zone, l2); 862 } 863 return (NULL); 864 } 865 866 l2b->l2b_kva = ptep; 867 l2b->l2b_l1idx = l1idx; 868 } 869 870 return (l2b); 871} 872 873static PMAP_INLINE void 874#ifndef PMAP_INCLUDE_PTE_SYNC 875pmap_free_l2_ptp(pt_entry_t *l2) 876#else 877pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) 878#endif 879{ 880#ifdef PMAP_INCLUDE_PTE_SYNC 881 /* 882 * Note: With a write-back cache, we may need to sync this 883 * L2 table before re-using it. 884 * This is because it may have belonged to a non-current 885 * pmap, in which case the cache syncs would have been 886 * skipped when the pages were being unmapped. If the 887 * L2 table were then to be immediately re-allocated to 888 * the *current* pmap, it may well contain stale mappings 889 * which have not yet been cleared by a cache write-back 890 * and so would still be visible to the mmu. 891 */ 892 if (need_sync) 893 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 894#endif 895 uma_zfree(l2zone, l2); 896} 897/* 898 * One or more mappings in the specified L2 descriptor table have just been 899 * invalidated. 900 * 901 * Garbage collect the metadata and descriptor table itself if necessary. 902 * 903 * The pmap lock must be acquired when this is called (not necessary 904 * for the kernel pmap). 905 */ 906static void 907pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 908{ 909 struct l2_dtable *l2; 910 pd_entry_t *pl1pd, l1pd; 911 pt_entry_t *ptep; 912 u_short l1idx; 913 914 915 /* 916 * Update the bucket's reference count according to how many 917 * PTEs the caller has just invalidated. 918 */ 919 l2b->l2b_occupancy -= count; 920 921 /* 922 * Note: 923 * 924 * Level 2 page tables allocated to the kernel pmap are never freed 925 * as that would require checking all Level 1 page tables and 926 * removing any references to the Level 2 page table. See also the 927 * comment elsewhere about never freeing bootstrap L2 descriptors. 928 * 929 * We make do with just invalidating the mapping in the L2 table. 930 * 931 * This isn't really a big deal in practice and, in fact, leads 932 * to a performance win over time as we don't need to continually 933 * alloc/free. 934 */ 935 if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) 936 return; 937 938 /* 939 * There are no more valid mappings in this level 2 page table. 940 * Go ahead and NULL-out the pointer in the bucket, then 941 * free the page table. 942 */ 943 l1idx = l2b->l2b_l1idx; 944 ptep = l2b->l2b_kva; 945 l2b->l2b_kva = NULL; 946 947 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 948 949 /* 950 * If the L1 slot matches the pmap's domain 951 * number, then invalidate it. 952 */ 953 l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 954 if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 955 *pl1pd = 0; 956 PTE_SYNC(pl1pd); 957 } 958 959 /* 960 * Release the L2 descriptor table back to the pool cache. 961 */ 962#ifndef PMAP_INCLUDE_PTE_SYNC 963 pmap_free_l2_ptp(ptep); 964#else 965 pmap_free_l2_ptp(!pmap_is_current(pm), ptep); 966#endif 967 968 /* 969 * Update the reference count in the associated l2_dtable 970 */ 971 l2 = pm->pm_l2[L2_IDX(l1idx)]; 972 if (--l2->l2_occupancy > 0) 973 return; 974 975 /* 976 * There are no more valid mappings in any of the Level 1 977 * slots managed by this l2_dtable. Go ahead and NULL-out 978 * the pointer in the parent pmap and free the l2_dtable. 979 */ 980 pm->pm_l2[L2_IDX(l1idx)] = NULL; 981 uma_zfree(l2table_zone, l2); 982} 983 984/* 985 * Pool cache constructors for L2 descriptor tables, metadata and pmap 986 * structures. 987 */ 988static int 989pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) 990{ 991#ifndef PMAP_INCLUDE_PTE_SYNC 992 struct l2_bucket *l2b; 993 pt_entry_t *ptep, pte; 994 995 vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; 996 997 /* 998 * The mappings for these page tables were initially made using 999 * pmap_kenter() by the pool subsystem. Therefore, the cache- 1000 * mode will not be right for page table mappings. To avoid 1001 * polluting the pmap_kenter() code with a special case for 1002 * page tables, we simply fix up the cache-mode here if it's not 1003 * correct. 1004 */ 1005 l2b = pmap_get_l2_bucket(kernel_pmap, va); 1006 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1007 pte = *ptep; 1008 1009 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1010 /* 1011 * Page tables must have the cache-mode set to 1012 * Write-Thru. 1013 */ 1014 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1015 PTE_SYNC(ptep); 1016 cpu_tlb_flushD_SE(va); 1017 cpu_cpwait(); 1018 } 1019#endif 1020 memset(mem, 0, L2_TABLE_SIZE_REAL); 1021 PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1022 return (0); 1023} 1024 1025/* 1026 * A bunch of routines to conditionally flush the caches/TLB depending 1027 * on whether the specified pmap actually needs to be flushed at any 1028 * given time. 1029 */ 1030static PMAP_INLINE void 1031pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) 1032{ 1033 1034 if (pmap_is_current(pm)) 1035 cpu_tlb_flushID_SE(va); 1036} 1037 1038static PMAP_INLINE void 1039pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) 1040{ 1041 1042 if (pmap_is_current(pm)) 1043 cpu_tlb_flushD_SE(va); 1044} 1045 1046static PMAP_INLINE void 1047pmap_tlb_flushID(pmap_t pm) 1048{ 1049 1050 if (pmap_is_current(pm)) 1051 cpu_tlb_flushID(); 1052} 1053static PMAP_INLINE void 1054pmap_tlb_flushD(pmap_t pm) 1055{ 1056 1057 if (pmap_is_current(pm)) 1058 cpu_tlb_flushD(); 1059} 1060 1061static int 1062pmap_has_valid_mapping(pmap_t pm, vm_offset_t va) 1063{ 1064 pd_entry_t *pde; 1065 pt_entry_t *ptep; 1066 1067 if (pmap_get_pde_pte(pm, va, &pde, &ptep) && 1068 ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) 1069 return (1); 1070 1071 return (0); 1072} 1073 1074static PMAP_INLINE void 1075pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) 1076{ 1077 vm_size_t rest; 1078 1079 CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" 1080 " len 0x%x ", pm, pm == kernel_pmap, va, len); 1081 1082 if (pmap_is_current(pm) || pm == kernel_pmap) { 1083 rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); 1084 while (len > 0) { 1085 if (pmap_has_valid_mapping(pm, va)) { 1086 cpu_idcache_wbinv_range(va, rest); 1087 cpu_l2cache_wbinv_range(va, rest); 1088 } 1089 len -= rest; 1090 va += rest; 1091 rest = MIN(PAGE_SIZE, len); 1092 } 1093 } 1094} 1095 1096static PMAP_INLINE void 1097pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, 1098 boolean_t rd_only) 1099{ 1100 vm_size_t rest; 1101 1102 CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " 1103 "len 0x%x ", pm, pm == kernel_pmap, va, len); 1104 CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); 1105 1106 if (pmap_is_current(pm)) { 1107 rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); 1108 while (len > 0) { 1109 if (pmap_has_valid_mapping(pm, va)) { 1110 if (do_inv && rd_only) { 1111 cpu_dcache_inv_range(va, rest); 1112 cpu_l2cache_inv_range(va, rest); 1113 } else if (do_inv) { 1114 cpu_dcache_wbinv_range(va, rest); 1115 cpu_l2cache_wbinv_range(va, rest); 1116 } else if (!rd_only) { 1117 cpu_dcache_wb_range(va, rest); 1118 cpu_l2cache_wb_range(va, rest); 1119 } 1120 } 1121 len -= rest; 1122 va += rest; 1123 1124 rest = MIN(PAGE_SIZE, len); 1125 } 1126 } 1127} 1128 1129static PMAP_INLINE void 1130pmap_idcache_wbinv_all(pmap_t pm) 1131{ 1132 1133 if (pmap_is_current(pm)) { 1134 cpu_idcache_wbinv_all(); 1135 cpu_l2cache_wbinv_all(); 1136 } 1137} 1138 1139#ifdef notyet 1140static PMAP_INLINE void 1141pmap_dcache_wbinv_all(pmap_t pm) 1142{ 1143 1144 if (pmap_is_current(pm)) { 1145 cpu_dcache_wbinv_all(); 1146 cpu_l2cache_wbinv_all(); 1147 } 1148} 1149#endif 1150 1151/* 1152 * PTE_SYNC_CURRENT: 1153 * 1154 * Make sure the pte is written out to RAM. 1155 * We need to do this for one of two cases: 1156 * - We're dealing with the kernel pmap 1157 * - There is no pmap active in the cache/tlb. 1158 * - The specified pmap is 'active' in the cache/tlb. 1159 */ 1160#ifdef PMAP_INCLUDE_PTE_SYNC 1161#define PTE_SYNC_CURRENT(pm, ptep) \ 1162do { \ 1163 if (PMAP_NEEDS_PTE_SYNC && \ 1164 pmap_is_current(pm)) \ 1165 PTE_SYNC(ptep); \ 1166} while (/*CONSTCOND*/0) 1167#else 1168#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 1169#endif 1170 1171/* 1172 * cacheable == -1 means we must make the entry uncacheable, 1 means 1173 * cacheable; 1174 */ 1175static __inline void 1176pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) 1177{ 1178 struct l2_bucket *l2b; 1179 pt_entry_t *ptep, pte; 1180 1181 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1182 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1183 1184 if (cacheable == 1) { 1185 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1186 if (l2pte_valid(pte)) { 1187 if (PV_BEEN_EXECD(pv->pv_flags)) { 1188 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 1189 } else if (PV_BEEN_REFD(pv->pv_flags)) { 1190 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); 1191 } 1192 } 1193 } else { 1194 pte = *ptep &~ L2_S_CACHE_MASK; 1195 if ((va != pv->pv_va || pm != pv->pv_pmap) && 1196 l2pte_valid(pte)) { 1197 if (PV_BEEN_EXECD(pv->pv_flags)) { 1198 pmap_idcache_wbinv_range(pv->pv_pmap, 1199 pv->pv_va, PAGE_SIZE); 1200 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 1201 } else if (PV_BEEN_REFD(pv->pv_flags)) { 1202 pmap_dcache_wb_range(pv->pv_pmap, 1203 pv->pv_va, PAGE_SIZE, TRUE, 1204 (pv->pv_flags & PVF_WRITE) == 0); 1205 pmap_tlb_flushD_SE(pv->pv_pmap, 1206 pv->pv_va); 1207 } 1208 } 1209 } 1210 *ptep = pte; 1211 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1212} 1213 1214static void 1215pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1216{ 1217 int pmwc = 0; 1218 int writable = 0, kwritable = 0, uwritable = 0; 1219 int entries = 0, kentries = 0, uentries = 0; 1220 struct pv_entry *pv; 1221 1222 rw_assert(&pvh_global_lock, RA_WLOCKED); 1223 1224 /* the cache gets written back/invalidated on context switch. 1225 * therefore, if a user page shares an entry in the same page or 1226 * with the kernel map and at least one is writable, then the 1227 * cache entry must be set write-through. 1228 */ 1229 1230 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1231 /* generate a count of the pv_entry uses */ 1232 if (pv->pv_flags & PVF_WRITE) { 1233 if (pv->pv_pmap == kernel_pmap) 1234 kwritable++; 1235 else if (pv->pv_pmap == pm) 1236 uwritable++; 1237 writable++; 1238 } 1239 if (pv->pv_pmap == kernel_pmap) 1240 kentries++; 1241 else { 1242 if (pv->pv_pmap == pm) 1243 uentries++; 1244 entries++; 1245 } 1246 } 1247 /* 1248 * check if the user duplicate mapping has 1249 * been removed. 1250 */ 1251 if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || 1252 (uwritable > 1))) 1253 pmwc = 1; 1254 1255 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1256 /* check for user uncachable conditions - order is important */ 1257 if (pm != kernel_pmap && 1258 (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { 1259 1260 if ((uentries > 1 && uwritable) || uwritable > 1) { 1261 1262 /* user duplicate mapping */ 1263 if (pv->pv_pmap != kernel_pmap) 1264 pv->pv_flags |= PVF_MWC; 1265 1266 if (!(pv->pv_flags & PVF_NC)) { 1267 pv->pv_flags |= PVF_NC; 1268 pmap_set_cache_entry(pv, pm, va, -1); 1269 } 1270 continue; 1271 } else /* no longer a duplicate user */ 1272 pv->pv_flags &= ~PVF_MWC; 1273 } 1274 1275 /* 1276 * check for kernel uncachable conditions 1277 * kernel writable or kernel readable with writable user entry 1278 */ 1279 if ((kwritable && (entries || kentries > 1)) || 1280 (kwritable > 1) || 1281 ((kwritable != writable) && kentries && 1282 (pv->pv_pmap == kernel_pmap || 1283 (pv->pv_flags & PVF_WRITE) || 1284 (pv->pv_flags & PVF_MWC)))) { 1285 1286 if (!(pv->pv_flags & PVF_NC)) { 1287 pv->pv_flags |= PVF_NC; 1288 pmap_set_cache_entry(pv, pm, va, -1); 1289 } 1290 continue; 1291 } 1292 1293 /* kernel and user are cachable */ 1294 if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && 1295 (pv->pv_flags & PVF_NC)) { 1296 1297 pv->pv_flags &= ~PVF_NC; 1298 if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) 1299 pmap_set_cache_entry(pv, pm, va, 1); 1300 continue; 1301 } 1302 /* user is no longer sharable and writable */ 1303 if (pm != kernel_pmap && 1304 (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && 1305 !pmwc && (pv->pv_flags & PVF_NC)) { 1306 1307 pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1308 if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) 1309 pmap_set_cache_entry(pv, pm, va, 1); 1310 } 1311 } 1312 1313 if ((kwritable == 0) && (writable == 0)) { 1314 pg->md.pvh_attrs &= ~PVF_MOD; 1315 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1316 return; 1317 } 1318} 1319 1320/* 1321 * Modify pte bits for all ptes corresponding to the given physical address. 1322 * We use `maskbits' rather than `clearbits' because we're always passing 1323 * constants and the latter would require an extra inversion at run-time. 1324 */ 1325static int 1326pmap_clearbit(struct vm_page *pg, u_int maskbits) 1327{ 1328 struct l2_bucket *l2b; 1329 struct pv_entry *pv; 1330 pt_entry_t *ptep, npte, opte; 1331 pmap_t pm; 1332 vm_offset_t va; 1333 u_int oflags; 1334 int count = 0; 1335 1336 rw_wlock(&pvh_global_lock); 1337 1338 if (maskbits & PVF_WRITE) 1339 maskbits |= PVF_MOD; 1340 /* 1341 * Clear saved attributes (modify, reference) 1342 */ 1343 pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 1344 1345 if (TAILQ_EMPTY(&pg->md.pv_list)) { 1346 rw_wunlock(&pvh_global_lock); 1347 return (0); 1348 } 1349 1350 /* 1351 * Loop over all current mappings setting/clearing as appropos 1352 */ 1353 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1354 va = pv->pv_va; 1355 pm = pv->pv_pmap; 1356 oflags = pv->pv_flags; 1357 1358 if (!(oflags & maskbits)) { 1359 if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { 1360 if (pg->md.pv_memattr != 1361 VM_MEMATTR_UNCACHEABLE) { 1362 PMAP_LOCK(pm); 1363 l2b = pmap_get_l2_bucket(pm, va); 1364 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1365 *ptep |= pte_l2_s_cache_mode; 1366 PTE_SYNC(ptep); 1367 PMAP_UNLOCK(pm); 1368 } 1369 pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1370 } 1371 continue; 1372 } 1373 pv->pv_flags &= ~maskbits; 1374 1375 PMAP_LOCK(pm); 1376 1377 l2b = pmap_get_l2_bucket(pm, va); 1378 1379 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1380 npte = opte = *ptep; 1381 1382 if (maskbits & (PVF_WRITE|PVF_MOD)) { 1383 if ((pv->pv_flags & PVF_NC)) { 1384 /* 1385 * Entry is not cacheable: 1386 * 1387 * Don't turn caching on again if this is a 1388 * modified emulation. This would be 1389 * inconsistent with the settings created by 1390 * pmap_fix_cache(). Otherwise, it's safe 1391 * to re-enable caching. 1392 * 1393 * There's no need to call pmap_fix_cache() 1394 * here: all pages are losing their write 1395 * permission. 1396 */ 1397 if (maskbits & PVF_WRITE) { 1398 if (pg->md.pv_memattr != 1399 VM_MEMATTR_UNCACHEABLE) 1400 npte |= pte_l2_s_cache_mode; 1401 pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1402 } 1403 } else 1404 if (opte & L2_S_PROT_W) { 1405 vm_page_dirty(pg); 1406 /* 1407 * Entry is writable/cacheable: check if pmap 1408 * is current if it is flush it, otherwise it 1409 * won't be in the cache 1410 */ 1411 if (PV_BEEN_EXECD(oflags)) 1412 pmap_idcache_wbinv_range(pm, pv->pv_va, 1413 PAGE_SIZE); 1414 else 1415 if (PV_BEEN_REFD(oflags)) 1416 pmap_dcache_wb_range(pm, pv->pv_va, 1417 PAGE_SIZE, 1418 (maskbits & PVF_REF) ? TRUE : FALSE, 1419 FALSE); 1420 } 1421 1422 /* make the pte read only */ 1423 npte &= ~L2_S_PROT_W; 1424 } 1425 1426 if (maskbits & PVF_REF) { 1427 if ((pv->pv_flags & PVF_NC) == 0 && 1428 (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { 1429 /* 1430 * Check npte here; we may have already 1431 * done the wbinv above, and the validity 1432 * of the PTE is the same for opte and 1433 * npte. 1434 */ 1435 if (npte & L2_S_PROT_W) { 1436 if (PV_BEEN_EXECD(oflags)) 1437 pmap_idcache_wbinv_range(pm, 1438 pv->pv_va, PAGE_SIZE); 1439 else 1440 if (PV_BEEN_REFD(oflags)) 1441 pmap_dcache_wb_range(pm, 1442 pv->pv_va, PAGE_SIZE, 1443 TRUE, FALSE); 1444 } else 1445 if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { 1446 /* XXXJRT need idcache_inv_range */ 1447 if (PV_BEEN_EXECD(oflags)) 1448 pmap_idcache_wbinv_range(pm, 1449 pv->pv_va, PAGE_SIZE); 1450 else 1451 if (PV_BEEN_REFD(oflags)) 1452 pmap_dcache_wb_range(pm, 1453 pv->pv_va, PAGE_SIZE, 1454 TRUE, TRUE); 1455 } 1456 } 1457 1458 /* 1459 * Make the PTE invalid so that we will take a 1460 * page fault the next time the mapping is 1461 * referenced. 1462 */ 1463 npte &= ~L2_TYPE_MASK; 1464 npte |= L2_TYPE_INV; 1465 } 1466 1467 if (npte != opte) { 1468 count++; 1469 *ptep = npte; 1470 PTE_SYNC(ptep); 1471 /* Flush the TLB entry if a current pmap. */ 1472 if (PV_BEEN_EXECD(oflags)) 1473 pmap_tlb_flushID_SE(pm, pv->pv_va); 1474 else 1475 if (PV_BEEN_REFD(oflags)) 1476 pmap_tlb_flushD_SE(pm, pv->pv_va); 1477 } 1478 1479 PMAP_UNLOCK(pm); 1480 1481 } 1482 1483 if (maskbits & PVF_WRITE) 1484 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1485 rw_wunlock(&pvh_global_lock); 1486 return (count); 1487} 1488 1489/* 1490 * main pv_entry manipulation functions: 1491 * pmap_enter_pv: enter a mapping onto a vm_page list 1492 * pmap_remove_pv: remove a mappiing from a vm_page list 1493 * 1494 * NOTE: pmap_enter_pv expects to lock the pvh itself 1495 * pmap_remove_pv expects the caller to lock the pvh before calling 1496 */ 1497 1498/* 1499 * pmap_enter_pv: enter a mapping onto a vm_page's PV list 1500 * 1501 * => caller should hold the proper lock on pvh_global_lock 1502 * => caller should have pmap locked 1503 * => we will (someday) gain the lock on the vm_page's PV list 1504 * => caller should adjust ptp's wire_count before calling 1505 * => caller should not adjust pmap's wire_count 1506 */ 1507static void 1508pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 1509 vm_offset_t va, u_int flags) 1510{ 1511 1512 rw_assert(&pvh_global_lock, RA_WLOCKED); 1513 PMAP_ASSERT_LOCKED(pm); 1514 if (pg->md.pv_kva != 0) { 1515 pve->pv_pmap = kernel_pmap; 1516 pve->pv_va = pg->md.pv_kva; 1517 pve->pv_flags = PVF_WRITE | PVF_UNMAN; 1518 if (pm != kernel_pmap) 1519 PMAP_LOCK(kernel_pmap); 1520 TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1521 TAILQ_INSERT_HEAD(&kernel_pmap->pm_pvlist, pve, pv_plist); 1522 if (pm != kernel_pmap) 1523 PMAP_UNLOCK(kernel_pmap); 1524 pg->md.pv_kva = 0; 1525 if ((pve = pmap_get_pv_entry()) == NULL) 1526 panic("pmap_kenter_pv: no pv entries"); 1527 } 1528 pve->pv_pmap = pm; 1529 pve->pv_va = va; 1530 pve->pv_flags = flags; 1531 TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1532 TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); 1533 pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); 1534 if (pve->pv_flags & PVF_WIRED) 1535 ++pm->pm_stats.wired_count; 1536 vm_page_aflag_set(pg, PGA_REFERENCED); 1537} 1538 1539/* 1540 * 1541 * pmap_find_pv: Find a pv entry 1542 * 1543 * => caller should hold lock on vm_page 1544 */ 1545static PMAP_INLINE struct pv_entry * 1546pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1547{ 1548 struct pv_entry *pv; 1549 1550 rw_assert(&pvh_global_lock, RA_WLOCKED); 1551 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) 1552 if (pm == pv->pv_pmap && va == pv->pv_va) 1553 break; 1554 return (pv); 1555} 1556 1557/* 1558 * vector_page_setprot: 1559 * 1560 * Manipulate the protection of the vector page. 1561 */ 1562void 1563vector_page_setprot(int prot) 1564{ 1565 struct l2_bucket *l2b; 1566 pt_entry_t *ptep; 1567 1568 l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); 1569 1570 ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 1571 1572 *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 1573 PTE_SYNC(ptep); 1574 cpu_tlb_flushD_SE(vector_page); 1575 cpu_cpwait(); 1576} 1577 1578/* 1579 * pmap_remove_pv: try to remove a mapping from a pv_list 1580 * 1581 * => caller should hold proper lock on pmap_main_lock 1582 * => pmap should be locked 1583 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1584 * => caller should adjust ptp's wire_count and free PTP if needed 1585 * => caller should NOT adjust pmap's wire_count 1586 * => we return the removed pve 1587 */ 1588 1589static void 1590pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) 1591{ 1592 1593 struct pv_entry *pv; 1594 rw_assert(&pvh_global_lock, RA_WLOCKED); 1595 PMAP_ASSERT_LOCKED(pm); 1596 TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); 1597 TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); 1598 if (pve->pv_flags & PVF_WIRED) 1599 --pm->pm_stats.wired_count; 1600 if (pg->md.pvh_attrs & PVF_MOD) 1601 vm_page_dirty(pg); 1602 if (TAILQ_FIRST(&pg->md.pv_list) == NULL) 1603 pg->md.pvh_attrs &= ~PVF_REF; 1604 else 1605 vm_page_aflag_set(pg, PGA_REFERENCED); 1606 if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || 1607 (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) 1608 pmap_fix_cache(pg, pm, 0); 1609 else if (pve->pv_flags & PVF_WRITE) { 1610 TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) 1611 if (pve->pv_flags & PVF_WRITE) 1612 break; 1613 if (!pve) { 1614 pg->md.pvh_attrs &= ~PVF_MOD; 1615 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1616 } 1617 } 1618 pv = TAILQ_FIRST(&pg->md.pv_list); 1619 if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && 1620 TAILQ_NEXT(pv, pv_list) == NULL) { 1621 pm = kernel_pmap; 1622 pg->md.pv_kva = pv->pv_va; 1623 /* a recursive pmap_nuke_pv */ 1624 TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); 1625 TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); 1626 if (pv->pv_flags & PVF_WIRED) 1627 --pm->pm_stats.wired_count; 1628 pg->md.pvh_attrs &= ~PVF_REF; 1629 pg->md.pvh_attrs &= ~PVF_MOD; 1630 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1631 pmap_free_pv_entry(pv); 1632 } 1633} 1634 1635static struct pv_entry * 1636pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1637{ 1638 struct pv_entry *pve; 1639 1640 rw_assert(&pvh_global_lock, RA_WLOCKED); 1641 pve = TAILQ_FIRST(&pg->md.pv_list); 1642 1643 while (pve) { 1644 if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ 1645 pmap_nuke_pv(pg, pm, pve); 1646 break; 1647 } 1648 pve = TAILQ_NEXT(pve, pv_list); 1649 } 1650 1651 if (pve == NULL && pg->md.pv_kva == va) 1652 pg->md.pv_kva = 0; 1653 1654 return(pve); /* return removed pve */ 1655} 1656/* 1657 * 1658 * pmap_modify_pv: Update pv flags 1659 * 1660 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1661 * => caller should NOT adjust pmap's wire_count 1662 * => we return the old flags 1663 * 1664 * Modify a physical-virtual mapping in the pv table 1665 */ 1666static u_int 1667pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, 1668 u_int clr_mask, u_int set_mask) 1669{ 1670 struct pv_entry *npv; 1671 u_int flags, oflags; 1672 1673 PMAP_ASSERT_LOCKED(pm); 1674 rw_assert(&pvh_global_lock, RA_WLOCKED); 1675 if ((npv = pmap_find_pv(pg, pm, va)) == NULL) 1676 return (0); 1677 1678 /* 1679 * There is at least one VA mapping this page. 1680 */ 1681 1682 if (clr_mask & (PVF_REF | PVF_MOD)) 1683 pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1684 1685 oflags = npv->pv_flags; 1686 npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1687 1688 if ((flags ^ oflags) & PVF_WIRED) { 1689 if (flags & PVF_WIRED) 1690 ++pm->pm_stats.wired_count; 1691 else 1692 --pm->pm_stats.wired_count; 1693 } 1694 1695 if ((flags ^ oflags) & PVF_WRITE) 1696 pmap_fix_cache(pg, pm, 0); 1697 1698 return (oflags); 1699} 1700 1701/* Function to set the debug level of the pmap code */ 1702#ifdef PMAP_DEBUG 1703void 1704pmap_debug(int level) 1705{ 1706 pmap_debug_level = level; 1707 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 1708} 1709#endif /* PMAP_DEBUG */ 1710 1711void 1712pmap_pinit0(struct pmap *pmap) 1713{ 1714 PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); 1715 1716 bcopy(kernel_pmap, pmap, sizeof(*pmap)); 1717 bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); 1718 PMAP_LOCK_INIT(pmap); 1719} 1720 1721/* 1722 * Initialize a vm_page's machine-dependent fields. 1723 */ 1724void 1725pmap_page_init(vm_page_t m) 1726{ 1727 1728 TAILQ_INIT(&m->md.pv_list); 1729 m->md.pv_memattr = VM_MEMATTR_DEFAULT; 1730 m->md.pvh_attrs = 0; 1731 m->md.pv_kva = 0; 1732} 1733 1734/* 1735 * Initialize the pmap module. 1736 * Called by vm_init, to initialize any structures that the pmap 1737 * system needs to map virtual memory. 1738 */ 1739void 1740pmap_init(void) 1741{ 1742 int shpgperproc = PMAP_SHPGPERPROC; 1743 1744 l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, 1745 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1746 l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, 1747 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1748 1749 /* 1750 * Initialize the PV entry allocator. 1751 */ 1752 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 1753 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1754 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1755 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1756 uma_zone_reserve_kva(pvzone, pv_entry_max); 1757 pv_entry_high_water = 9 * (pv_entry_max / 10); 1758 1759 /* 1760 * Now it is safe to enable pv_table recording. 1761 */ 1762 PDEBUG(1, printf("pmap_init: done!\n")); 1763} 1764 1765int 1766pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) 1767{ 1768 struct l2_dtable *l2; 1769 struct l2_bucket *l2b; 1770 pd_entry_t *pl1pd, l1pd; 1771 pt_entry_t *ptep, pte; 1772 vm_paddr_t pa; 1773 u_int l1idx; 1774 int rv = 0; 1775 1776 l1idx = L1_IDX(va); 1777 rw_wlock(&pvh_global_lock); 1778 PMAP_LOCK(pm); 1779 1780 /* 1781 * If there is no l2_dtable for this address, then the process 1782 * has no business accessing it. 1783 * 1784 * Note: This will catch userland processes trying to access 1785 * kernel addresses. 1786 */ 1787 l2 = pm->pm_l2[L2_IDX(l1idx)]; 1788 if (l2 == NULL) 1789 goto out; 1790 1791 /* 1792 * Likewise if there is no L2 descriptor table 1793 */ 1794 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1795 if (l2b->l2b_kva == NULL) 1796 goto out; 1797 1798 /* 1799 * Check the PTE itself. 1800 */ 1801 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1802 pte = *ptep; 1803 if (pte == 0) 1804 goto out; 1805 1806 /* 1807 * Catch a userland access to the vector page mapped at 0x0 1808 */ 1809 if (user && (pte & L2_S_PROT_U) == 0) 1810 goto out; 1811 if (va == vector_page) 1812 goto out; 1813 1814 pa = l2pte_pa(pte); 1815 1816 if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { 1817 /* 1818 * This looks like a good candidate for "page modified" 1819 * emulation... 1820 */ 1821 struct pv_entry *pv; 1822 struct vm_page *pg; 1823 1824 /* Extract the physical address of the page */ 1825 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 1826 goto out; 1827 } 1828 /* Get the current flags for this page. */ 1829 1830 pv = pmap_find_pv(pg, pm, va); 1831 if (pv == NULL) { 1832 goto out; 1833 } 1834 1835 /* 1836 * Do the flags say this page is writable? If not then it 1837 * is a genuine write fault. If yes then the write fault is 1838 * our fault as we did not reflect the write access in the 1839 * PTE. Now we know a write has occurred we can correct this 1840 * and also set the modified bit 1841 */ 1842 if ((pv->pv_flags & PVF_WRITE) == 0) { 1843 goto out; 1844 } 1845 1846 pg->md.pvh_attrs |= PVF_REF | PVF_MOD; 1847 vm_page_dirty(pg); 1848 pv->pv_flags |= PVF_REF | PVF_MOD; 1849 1850 /* 1851 * Re-enable write permissions for the page. No need to call 1852 * pmap_fix_cache(), since this is just a 1853 * modified-emulation fault, and the PVF_WRITE bit isn't 1854 * changing. We've already set the cacheable bits based on 1855 * the assumption that we can write to this page. 1856 */ 1857 *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; 1858 PTE_SYNC(ptep); 1859 rv = 1; 1860 } else 1861 if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 1862 /* 1863 * This looks like a good candidate for "page referenced" 1864 * emulation. 1865 */ 1866 struct pv_entry *pv; 1867 struct vm_page *pg; 1868 1869 /* Extract the physical address of the page */ 1870 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 1871 goto out; 1872 /* Get the current flags for this page. */ 1873 1874 pv = pmap_find_pv(pg, pm, va); 1875 if (pv == NULL) 1876 goto out; 1877 1878 pg->md.pvh_attrs |= PVF_REF; 1879 pv->pv_flags |= PVF_REF; 1880 1881 1882 *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; 1883 PTE_SYNC(ptep); 1884 rv = 1; 1885 } 1886 1887 /* 1888 * We know there is a valid mapping here, so simply 1889 * fix up the L1 if necessary. 1890 */ 1891 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1892 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 1893 if (*pl1pd != l1pd) { 1894 *pl1pd = l1pd; 1895 PTE_SYNC(pl1pd); 1896 rv = 1; 1897 } 1898 1899#ifdef DEBUG 1900 /* 1901 * If 'rv == 0' at this point, it generally indicates that there is a 1902 * stale TLB entry for the faulting address. This happens when two or 1903 * more processes are sharing an L1. Since we don't flush the TLB on 1904 * a context switch between such processes, we can take domain faults 1905 * for mappings which exist at the same VA in both processes. EVEN IF 1906 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 1907 * example. 1908 * 1909 * This is extremely likely to happen if pmap_enter() updated the L1 1910 * entry for a recently entered mapping. In this case, the TLB is 1911 * flushed for the new mapping, but there may still be TLB entries for 1912 * other mappings belonging to other processes in the 1MB range 1913 * covered by the L1 entry. 1914 * 1915 * Since 'rv == 0', we know that the L1 already contains the correct 1916 * value, so the fault must be due to a stale TLB entry. 1917 * 1918 * Since we always need to flush the TLB anyway in the case where we 1919 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 1920 * stale TLB entries dynamically. 1921 * 1922 * However, the above condition can ONLY happen if the current L1 is 1923 * being shared. If it happens when the L1 is unshared, it indicates 1924 * that other parts of the pmap are not doing their job WRT managing 1925 * the TLB. 1926 */ 1927 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 1928 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 1929 pm, (u_long)va, ftype); 1930 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 1931 l2, l2b, ptep, pl1pd); 1932 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 1933 pte, l1pd, last_fault_code); 1934#ifdef DDB 1935 Debugger(); 1936#endif 1937 } 1938#endif 1939 1940 cpu_tlb_flushID_SE(va); 1941 cpu_cpwait(); 1942 1943 rv = 1; 1944 1945out: 1946 rw_wunlock(&pvh_global_lock); 1947 PMAP_UNLOCK(pm); 1948 return (rv); 1949} 1950 1951void 1952pmap_postinit(void) 1953{ 1954 struct l2_bucket *l2b; 1955 struct l1_ttable *l1; 1956 pd_entry_t *pl1pt; 1957 pt_entry_t *ptep, pte; 1958 vm_offset_t va, eva; 1959 u_int loop, needed; 1960 1961 needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 1962 needed -= 1; 1963 l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); 1964 1965 for (loop = 0; loop < needed; loop++, l1++) { 1966 /* Allocate a L1 page table */ 1967 va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 1968 0xffffffff, L1_TABLE_SIZE, 0); 1969 1970 if (va == 0) 1971 panic("Cannot allocate L1 KVM"); 1972 1973 eva = va + L1_TABLE_SIZE; 1974 pl1pt = (pd_entry_t *)va; 1975 1976 while (va < eva) { 1977 l2b = pmap_get_l2_bucket(kernel_pmap, va); 1978 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1979 pte = *ptep; 1980 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1981 *ptep = pte; 1982 PTE_SYNC(ptep); 1983 cpu_tlb_flushD_SE(va); 1984 1985 va += PAGE_SIZE; 1986 } 1987 pmap_init_l1(l1, pl1pt); 1988 } 1989 1990 1991#ifdef DEBUG 1992 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 1993 needed); 1994#endif 1995} 1996 1997/* 1998 * This is used to stuff certain critical values into the PCB where they 1999 * can be accessed quickly from cpu_switch() et al. 2000 */ 2001void 2002pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) 2003{ 2004 struct l2_bucket *l2b; 2005 2006 pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; 2007 pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 2008 (DOMAIN_CLIENT << (pm->pm_domain * 2)); 2009 2010 if (vector_page < KERNBASE) { 2011 pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 2012 l2b = pmap_get_l2_bucket(pm, vector_page); 2013 pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | 2014 L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); 2015 } else 2016 pcb->pcb_pl1vec = NULL; 2017} 2018 2019void 2020pmap_activate(struct thread *td) 2021{ 2022 pmap_t pm; 2023 struct pcb *pcb; 2024 2025 pm = vmspace_pmap(td->td_proc->p_vmspace); 2026 pcb = td->td_pcb; 2027 2028 critical_enter(); 2029 pmap_set_pcb_pagedir(pm, pcb); 2030 2031 if (td == curthread) { 2032 u_int cur_dacr, cur_ttb; 2033 2034 __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); 2035 __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); 2036 2037 cur_ttb &= ~(L1_TABLE_SIZE - 1); 2038 2039 if (cur_ttb == (u_int)pcb->pcb_pagedir && 2040 cur_dacr == pcb->pcb_dacr) { 2041 /* 2042 * No need to switch address spaces. 2043 */ 2044 critical_exit(); 2045 return; 2046 } 2047 2048 2049 /* 2050 * We MUST, I repeat, MUST fix up the L1 entry corresponding 2051 * to 'vector_page' in the incoming L1 table before switching 2052 * to it otherwise subsequent interrupts/exceptions (including 2053 * domain faults!) will jump into hyperspace. 2054 */ 2055 if (pcb->pcb_pl1vec) { 2056 2057 *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2058 /* 2059 * Don't need to PTE_SYNC() at this point since 2060 * cpu_setttb() is about to flush both the cache 2061 * and the TLB. 2062 */ 2063 } 2064 2065 cpu_domains(pcb->pcb_dacr); 2066 cpu_setttb(pcb->pcb_pagedir); 2067 } 2068 critical_exit(); 2069} 2070 2071static int 2072pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) 2073{ 2074 pd_entry_t *pdep, pde; 2075 pt_entry_t *ptep, pte; 2076 vm_offset_t pa; 2077 int rv = 0; 2078 2079 /* 2080 * Make sure the descriptor itself has the correct cache mode 2081 */ 2082 pdep = &kl1[L1_IDX(va)]; 2083 pde = *pdep; 2084 2085 if (l1pte_section_p(pde)) { 2086 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 2087 *pdep = (pde & ~L1_S_CACHE_MASK) | 2088 pte_l1_s_cache_mode_pt; 2089 PTE_SYNC(pdep); 2090 cpu_dcache_wbinv_range((vm_offset_t)pdep, 2091 sizeof(*pdep)); 2092 cpu_l2cache_wbinv_range((vm_offset_t)pdep, 2093 sizeof(*pdep)); 2094 rv = 1; 2095 } 2096 } else { 2097 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2098 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2099 if (ptep == NULL) 2100 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 2101 2102 ptep = &ptep[l2pte_index(va)]; 2103 pte = *ptep; 2104 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 2105 *ptep = (pte & ~L2_S_CACHE_MASK) | 2106 pte_l2_s_cache_mode_pt; 2107 PTE_SYNC(ptep); 2108 cpu_dcache_wbinv_range((vm_offset_t)ptep, 2109 sizeof(*ptep)); 2110 cpu_l2cache_wbinv_range((vm_offset_t)ptep, 2111 sizeof(*ptep)); 2112 rv = 1; 2113 } 2114 } 2115 2116 return (rv); 2117} 2118 2119static void 2120pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, 2121 pt_entry_t **ptep) 2122{ 2123 vm_offset_t va = *availp; 2124 struct l2_bucket *l2b; 2125 2126 if (ptep) { 2127 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2128 if (l2b == NULL) 2129 panic("pmap_alloc_specials: no l2b for 0x%x", va); 2130 2131 *ptep = &l2b->l2b_kva[l2pte_index(va)]; 2132 } 2133 2134 *vap = va; 2135 *availp = va + (PAGE_SIZE * pages); 2136} 2137 2138/* 2139 * Bootstrap the system enough to run with virtual memory. 2140 * 2141 * On the arm this is called after mapping has already been enabled 2142 * and just syncs the pmap module with what has already been done. 2143 * [We can't call it easily with mapping off since the kernel is not 2144 * mapped with PA == VA, hence we would have to relocate every address 2145 * from the linked base (virtual) address "KERNBASE" to the actual 2146 * (physical) address starting relative to 0] 2147 */ 2148#define PMAP_STATIC_L2_SIZE 16 2149void 2150pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt) 2151{ 2152 static struct l1_ttable static_l1; 2153 static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 2154 struct l1_ttable *l1 = &static_l1; 2155 struct l2_dtable *l2; 2156 struct l2_bucket *l2b; 2157 pd_entry_t pde; 2158 pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; 2159 pt_entry_t *ptep; 2160 pt_entry_t *qmap_pte; 2161 vm_paddr_t pa; 2162 vm_offset_t va; 2163 vm_size_t size; 2164 int l1idx, l2idx, l2next = 0; 2165 2166 PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", 2167 firstaddr, vm_max_kernel_address)); 2168 2169 virtual_avail = firstaddr; 2170 kernel_pmap->pm_l1 = l1; 2171 kernel_l1pa = l1pt->pv_pa; 2172 2173 /* 2174 * Scan the L1 translation table created by initarm() and create 2175 * the required metadata for all valid mappings found in it. 2176 */ 2177 for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 2178 pde = kernel_l1pt[l1idx]; 2179 2180 /* 2181 * We're only interested in Coarse mappings. 2182 * pmap_extract() can deal with section mappings without 2183 * recourse to checking L2 metadata. 2184 */ 2185 if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 2186 continue; 2187 2188 /* 2189 * Lookup the KVA of this L2 descriptor table 2190 */ 2191 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2192 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2193 2194 if (ptep == NULL) { 2195 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 2196 (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); 2197 } 2198 2199 /* 2200 * Fetch the associated L2 metadata structure. 2201 * Allocate a new one if necessary. 2202 */ 2203 if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { 2204 if (l2next == PMAP_STATIC_L2_SIZE) 2205 panic("pmap_bootstrap: out of static L2s"); 2206 kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = 2207 &static_l2[l2next++]; 2208 } 2209 2210 /* 2211 * One more L1 slot tracked... 2212 */ 2213 l2->l2_occupancy++; 2214 2215 /* 2216 * Fill in the details of the L2 descriptor in the 2217 * appropriate bucket. 2218 */ 2219 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2220 l2b->l2b_kva = ptep; 2221 l2b->l2b_phys = pa; 2222 l2b->l2b_l1idx = l1idx; 2223 2224 /* 2225 * Establish an initial occupancy count for this descriptor 2226 */ 2227 for (l2idx = 0; 2228 l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 2229 l2idx++) { 2230 if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 2231 l2b->l2b_occupancy++; 2232 } 2233 } 2234 2235 /* 2236 * Make sure the descriptor itself has the correct cache mode. 2237 * If not, fix it, but whine about the problem. Port-meisters 2238 * should consider this a clue to fix up their initarm() 2239 * function. :) 2240 */ 2241 if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { 2242 printf("pmap_bootstrap: WARNING! wrong cache mode for " 2243 "L2 pte @ %p\n", ptep); 2244 } 2245 } 2246 2247 2248 /* 2249 * Ensure the primary (kernel) L1 has the correct cache mode for 2250 * a page table. Bitch if it is not correctly set. 2251 */ 2252 for (va = (vm_offset_t)kernel_l1pt; 2253 va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 2254 if (pmap_set_pt_cache_mode(kernel_l1pt, va)) 2255 printf("pmap_bootstrap: WARNING! wrong cache mode for " 2256 "primary L1 @ 0x%x\n", va); 2257 } 2258 2259 cpu_dcache_wbinv_all(); 2260 cpu_l2cache_wbinv_all(); 2261 cpu_tlb_flushID(); 2262 cpu_cpwait(); 2263 2264 PMAP_LOCK_INIT(kernel_pmap); 2265 CPU_FILL(&kernel_pmap->pm_active); 2266 kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; 2267 TAILQ_INIT(&kernel_pmap->pm_pvlist); 2268 2269 /* 2270 * Initialize the global pv list lock. 2271 */ 2272 rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); 2273 2274 /* 2275 * Reserve some special page table entries/VA space for temporary 2276 * mapping of pages. 2277 */ 2278 pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); 2279 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); 2280 pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); 2281 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); 2282 pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte); 2283 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte); 2284 size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) / 2285 L1_S_SIZE; 2286 pmap_alloc_specials(&virtual_avail, 2287 round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 2288 &pmap_kernel_l2ptp_kva, NULL); 2289 2290 size = howmany(size, L2_BUCKET_SIZE); 2291 pmap_alloc_specials(&virtual_avail, 2292 round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 2293 &pmap_kernel_l2dtable_kva, NULL); 2294 2295 pmap_alloc_specials(&virtual_avail, 2296 1, (vm_offset_t*)&_tmppt, NULL); 2297 pmap_alloc_specials(&virtual_avail, 2298 MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); 2299 SLIST_INIT(&l1_list); 2300 TAILQ_INIT(&l1_lru_list); 2301 mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); 2302 pmap_init_l1(l1, kernel_l1pt); 2303 cpu_dcache_wbinv_all(); 2304 cpu_l2cache_wbinv_all(); 2305 2306 virtual_avail = round_page(virtual_avail); 2307 virtual_end = vm_max_kernel_address; 2308 kernel_vm_end = pmap_curmaxkvaddr; 2309 mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); 2310 mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF); 2311 2312 pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); 2313} 2314 2315/*************************************************** 2316 * Pmap allocation/deallocation routines. 2317 ***************************************************/ 2318 2319/* 2320 * Release any resources held by the given physical map. 2321 * Called when a pmap initialized by pmap_pinit is being released. 2322 * Should only be called if the map contains no valid mappings. 2323 */ 2324void 2325pmap_release(pmap_t pmap) 2326{ 2327 struct pcb *pcb; 2328 2329 pmap_idcache_wbinv_all(pmap); 2330 cpu_l2cache_wbinv_all(); 2331 pmap_tlb_flushID(pmap); 2332 cpu_cpwait(); 2333 if (vector_page < KERNBASE) { 2334 struct pcb *curpcb = PCPU_GET(curpcb); 2335 pcb = thread0.td_pcb; 2336 if (pmap_is_current(pmap)) { 2337 /* 2338 * Frob the L1 entry corresponding to the vector 2339 * page so that it contains the kernel pmap's domain 2340 * number. This will ensure pmap_remove() does not 2341 * pull the current vector page out from under us. 2342 */ 2343 critical_enter(); 2344 *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2345 cpu_domains(pcb->pcb_dacr); 2346 cpu_setttb(pcb->pcb_pagedir); 2347 critical_exit(); 2348 } 2349 pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); 2350 /* 2351 * Make sure cpu_switch(), et al, DTRT. This is safe to do 2352 * since this process has no remaining mappings of its own. 2353 */ 2354 curpcb->pcb_pl1vec = pcb->pcb_pl1vec; 2355 curpcb->pcb_l1vec = pcb->pcb_l1vec; 2356 curpcb->pcb_dacr = pcb->pcb_dacr; 2357 curpcb->pcb_pagedir = pcb->pcb_pagedir; 2358 2359 } 2360 pmap_free_l1(pmap); 2361 2362 dprintf("pmap_release()\n"); 2363} 2364 2365 2366 2367/* 2368 * Helper function for pmap_grow_l2_bucket() 2369 */ 2370static __inline int 2371pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) 2372{ 2373 struct l2_bucket *l2b; 2374 pt_entry_t *ptep; 2375 vm_paddr_t pa; 2376 struct vm_page *pg; 2377 2378 pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 2379 if (pg == NULL) 2380 return (1); 2381 pa = VM_PAGE_TO_PHYS(pg); 2382 2383 if (pap) 2384 *pap = pa; 2385 2386 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2387 2388 ptep = &l2b->l2b_kva[l2pte_index(va)]; 2389 *ptep = L2_S_PROTO | pa | cache_mode | 2390 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 2391 PTE_SYNC(ptep); 2392 return (0); 2393} 2394 2395/* 2396 * This is the same as pmap_alloc_l2_bucket(), except that it is only 2397 * used by pmap_growkernel(). 2398 */ 2399static __inline struct l2_bucket * 2400pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) 2401{ 2402 struct l2_dtable *l2; 2403 struct l2_bucket *l2b; 2404 struct l1_ttable *l1; 2405 pd_entry_t *pl1pd; 2406 u_short l1idx; 2407 vm_offset_t nva; 2408 2409 l1idx = L1_IDX(va); 2410 2411 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 2412 /* 2413 * No mapping at this address, as there is 2414 * no entry in the L1 table. 2415 * Need to allocate a new l2_dtable. 2416 */ 2417 nva = pmap_kernel_l2dtable_kva; 2418 if ((nva & PAGE_MASK) == 0) { 2419 /* 2420 * Need to allocate a backing page 2421 */ 2422 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2423 return (NULL); 2424 } 2425 2426 l2 = (struct l2_dtable *)nva; 2427 nva += sizeof(struct l2_dtable); 2428 2429 if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & 2430 PAGE_MASK)) { 2431 /* 2432 * The new l2_dtable straddles a page boundary. 2433 * Map in another page to cover it. 2434 */ 2435 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2436 return (NULL); 2437 } 2438 2439 pmap_kernel_l2dtable_kva = nva; 2440 2441 /* 2442 * Link it into the parent pmap 2443 */ 2444 pm->pm_l2[L2_IDX(l1idx)] = l2; 2445 memset(l2, 0, sizeof(*l2)); 2446 } 2447 2448 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2449 2450 /* 2451 * Fetch pointer to the L2 page table associated with the address. 2452 */ 2453 if (l2b->l2b_kva == NULL) { 2454 pt_entry_t *ptep; 2455 2456 /* 2457 * No L2 page table has been allocated. Chances are, this 2458 * is because we just allocated the l2_dtable, above. 2459 */ 2460 nva = pmap_kernel_l2ptp_kva; 2461 ptep = (pt_entry_t *)nva; 2462 if ((nva & PAGE_MASK) == 0) { 2463 /* 2464 * Need to allocate a backing page 2465 */ 2466 if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 2467 &pmap_kernel_l2ptp_phys)) 2468 return (NULL); 2469 PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 2470 } 2471 memset(ptep, 0, L2_TABLE_SIZE_REAL); 2472 l2->l2_occupancy++; 2473 l2b->l2b_kva = ptep; 2474 l2b->l2b_l1idx = l1idx; 2475 l2b->l2b_phys = pmap_kernel_l2ptp_phys; 2476 2477 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 2478 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 2479 } 2480 2481 /* Distribute new L1 entry to all other L1s */ 2482 SLIST_FOREACH(l1, &l1_list, l1_link) { 2483 pl1pd = &l1->l1_kva[L1_IDX(va)]; 2484 *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 2485 L1_C_PROTO; 2486 PTE_SYNC(pl1pd); 2487 } 2488 2489 return (l2b); 2490} 2491 2492 2493/* 2494 * grow the number of kernel page table entries, if needed 2495 */ 2496void 2497pmap_growkernel(vm_offset_t addr) 2498{ 2499 pmap_t kpm = kernel_pmap; 2500 2501 if (addr <= pmap_curmaxkvaddr) 2502 return; /* we are OK */ 2503 2504 /* 2505 * whoops! we need to add kernel PTPs 2506 */ 2507 2508 /* Map 1MB at a time */ 2509 for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) 2510 pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 2511 2512 /* 2513 * flush out the cache, expensive but growkernel will happen so 2514 * rarely 2515 */ 2516 cpu_dcache_wbinv_all(); 2517 cpu_l2cache_wbinv_all(); 2518 cpu_tlb_flushD(); 2519 cpu_cpwait(); 2520 kernel_vm_end = pmap_curmaxkvaddr; 2521} 2522 2523 2524/* 2525 * Remove all pages from specified address space 2526 * this aids process exit speeds. Also, this code 2527 * is special cased for current process only, but 2528 * can have the more generic (and slightly slower) 2529 * mode enabled. This is much faster than pmap_remove 2530 * in the case of running down an entire address space. 2531 */ 2532void 2533pmap_remove_pages(pmap_t pmap) 2534{ 2535 struct pv_entry *pv, *npv; 2536 struct l2_bucket *l2b = NULL; 2537 vm_page_t m; 2538 pt_entry_t *pt; 2539 2540 rw_wlock(&pvh_global_lock); 2541 PMAP_LOCK(pmap); 2542 cpu_idcache_wbinv_all(); 2543 cpu_l2cache_wbinv_all(); 2544 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { 2545 if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { 2546 /* Cannot remove wired or unmanaged pages now. */ 2547 npv = TAILQ_NEXT(pv, pv_plist); 2548 continue; 2549 } 2550 pmap->pm_stats.resident_count--; 2551 l2b = pmap_get_l2_bucket(pmap, pv->pv_va); 2552 KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); 2553 pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2554 m = PHYS_TO_VM_PAGE(*pt & L2_S_FRAME); 2555 KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); 2556 *pt = 0; 2557 PTE_SYNC(pt); 2558 npv = TAILQ_NEXT(pv, pv_plist); 2559 pmap_nuke_pv(m, pmap, pv); 2560 if (TAILQ_EMPTY(&m->md.pv_list)) 2561 vm_page_aflag_clear(m, PGA_WRITEABLE); 2562 pmap_free_pv_entry(pv); 2563 pmap_free_l2_bucket(pmap, l2b, 1); 2564 } 2565 rw_wunlock(&pvh_global_lock); 2566 cpu_tlb_flushID(); 2567 cpu_cpwait(); 2568 PMAP_UNLOCK(pmap); 2569} 2570 2571 2572/*************************************************** 2573 * Low level mapping routines..... 2574 ***************************************************/ 2575 2576#ifdef ARM_HAVE_SUPERSECTIONS 2577/* Map a super section into the KVA. */ 2578 2579void 2580pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags) 2581{ 2582 pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | 2583 (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, 2584 VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2585 struct l1_ttable *l1; 2586 vm_offset_t va0, va_end; 2587 2588 KASSERT(((va | pa) & L1_SUP_OFFSET) == 0, 2589 ("Not a valid super section mapping")); 2590 if (flags & SECTION_CACHE) 2591 pd |= pte_l1_s_cache_mode; 2592 else if (flags & SECTION_PT) 2593 pd |= pte_l1_s_cache_mode_pt; 2594 va0 = va & L1_SUP_FRAME; 2595 va_end = va + L1_SUP_SIZE; 2596 SLIST_FOREACH(l1, &l1_list, l1_link) { 2597 va = va0; 2598 for (; va < va_end; va += L1_S_SIZE) { 2599 l1->l1_kva[L1_IDX(va)] = pd; 2600 PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2601 } 2602 } 2603} 2604#endif 2605 2606/* Map a section into the KVA. */ 2607 2608void 2609pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) 2610{ 2611 pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, 2612 VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2613 struct l1_ttable *l1; 2614 2615 KASSERT(((va | pa) & L1_S_OFFSET) == 0, 2616 ("Not a valid section mapping")); 2617 if (flags & SECTION_CACHE) 2618 pd |= pte_l1_s_cache_mode; 2619 else if (flags & SECTION_PT) 2620 pd |= pte_l1_s_cache_mode_pt; 2621 SLIST_FOREACH(l1, &l1_list, l1_link) { 2622 l1->l1_kva[L1_IDX(va)] = pd; 2623 PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2624 } 2625} 2626 2627/* 2628 * Make a temporary mapping for a physical address. This is only intended 2629 * to be used for panic dumps. 2630 */ 2631void * 2632pmap_kenter_temporary(vm_paddr_t pa, int i) 2633{ 2634 vm_offset_t va; 2635 2636 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 2637 pmap_kenter(va, pa); 2638 return ((void *)crashdumpmap); 2639} 2640 2641/* 2642 * add a wired page to the kva 2643 * note that in order for the mapping to take effect -- you 2644 * should do a invltlb after doing the pmap_kenter... 2645 */ 2646static PMAP_INLINE void 2647pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) 2648{ 2649 struct l2_bucket *l2b; 2650 pt_entry_t *pte; 2651 pt_entry_t opte; 2652 struct pv_entry *pve; 2653 vm_page_t m; 2654 2655 PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", 2656 (uint32_t) va, (uint32_t) pa)); 2657 2658 2659 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2660 if (l2b == NULL) 2661 l2b = pmap_grow_l2_bucket(kernel_pmap, va); 2662 KASSERT(l2b != NULL, ("No L2 Bucket")); 2663 pte = &l2b->l2b_kva[l2pte_index(va)]; 2664 opte = *pte; 2665 PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", 2666 (uint32_t) pte, opte, *pte)); 2667 if (l2pte_valid(opte)) { 2668 pmap_kremove(va); 2669 } else { 2670 if (opte == 0) 2671 l2b->l2b_occupancy++; 2672 } 2673 *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, 2674 VM_PROT_READ | VM_PROT_WRITE); 2675 if (flags & KENTER_CACHE) 2676 *pte |= pte_l2_s_cache_mode; 2677 if (flags & KENTER_USER) 2678 *pte |= L2_S_PROT_U; 2679 PTE_SYNC(pte); 2680 2681 /* 2682 * A kernel mapping may not be the page's only mapping, so create a PV 2683 * entry to ensure proper caching. 2684 * 2685 * The existence test for the pvzone is used to delay the recording of 2686 * kernel mappings until the VM system is fully initialized. 2687 * 2688 * This expects the physical memory to have a vm_page_array entry. 2689 */ 2690 if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) != NULL) { 2691 rw_wlock(&pvh_global_lock); 2692 if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { 2693 if ((pve = pmap_get_pv_entry()) == NULL) 2694 panic("pmap_kenter_internal: no pv entries"); 2695 PMAP_LOCK(kernel_pmap); 2696 pmap_enter_pv(m, pve, kernel_pmap, va, 2697 PVF_WRITE | PVF_UNMAN); 2698 pmap_fix_cache(m, kernel_pmap, va); 2699 PMAP_UNLOCK(kernel_pmap); 2700 } else { 2701 m->md.pv_kva = va; 2702 } 2703 rw_wunlock(&pvh_global_lock); 2704 } 2705} 2706 2707void 2708pmap_kenter(vm_offset_t va, vm_paddr_t pa) 2709{ 2710 pmap_kenter_internal(va, pa, KENTER_CACHE); 2711} 2712 2713void 2714pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) 2715{ 2716 2717 pmap_kenter_internal(va, pa, 0); 2718} 2719 2720void 2721pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 2722{ 2723 vm_offset_t sva; 2724 2725 KASSERT((size & PAGE_MASK) == 0, 2726 ("%s: device mapping not page-sized", __func__)); 2727 2728 sva = va; 2729 while (size != 0) { 2730 pmap_kenter_internal(va, pa, 0); 2731 va += PAGE_SIZE; 2732 pa += PAGE_SIZE; 2733 size -= PAGE_SIZE; 2734 } 2735} 2736 2737void 2738pmap_kremove_device(vm_offset_t va, vm_size_t size) 2739{ 2740 vm_offset_t sva; 2741 2742 KASSERT((size & PAGE_MASK) == 0, 2743 ("%s: device mapping not page-sized", __func__)); 2744 2745 sva = va; 2746 while (size != 0) { 2747 pmap_kremove(va); 2748 va += PAGE_SIZE; 2749 size -= PAGE_SIZE; 2750 } 2751} 2752 2753void 2754pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) 2755{ 2756 2757 pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); 2758 /* 2759 * Call pmap_fault_fixup now, to make sure we'll have no exception 2760 * at the first use of the new address, or bad things will happen, 2761 * as we use one of these addresses in the exception handlers. 2762 */ 2763 pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); 2764} 2765 2766vm_paddr_t 2767pmap_kextract(vm_offset_t va) 2768{ 2769 2770 return (pmap_extract_locked(kernel_pmap, va)); 2771} 2772 2773/* 2774 * remove a page from the kernel pagetables 2775 */ 2776void 2777pmap_kremove(vm_offset_t va) 2778{ 2779 struct l2_bucket *l2b; 2780 pt_entry_t *pte, opte; 2781 struct pv_entry *pve; 2782 vm_page_t m; 2783 vm_offset_t pa; 2784 2785 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2786 if (!l2b) 2787 return; 2788 KASSERT(l2b != NULL, ("No L2 Bucket")); 2789 pte = &l2b->l2b_kva[l2pte_index(va)]; 2790 opte = *pte; 2791 if (l2pte_valid(opte)) { 2792 /* pa = vtophs(va) taken from pmap_extract() */ 2793 if ((opte & L2_TYPE_MASK) == L2_TYPE_L) 2794 pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); 2795 else 2796 pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); 2797 /* note: should never have to remove an allocation 2798 * before the pvzone is initialized. 2799 */ 2800 rw_wlock(&pvh_global_lock); 2801 PMAP_LOCK(kernel_pmap); 2802 if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && 2803 (pve = pmap_remove_pv(m, kernel_pmap, va))) 2804 pmap_free_pv_entry(pve); 2805 PMAP_UNLOCK(kernel_pmap); 2806 rw_wunlock(&pvh_global_lock); 2807 va = va & ~PAGE_MASK; 2808 cpu_dcache_wbinv_range(va, PAGE_SIZE); 2809 cpu_l2cache_wbinv_range(va, PAGE_SIZE); 2810 cpu_tlb_flushD_SE(va); 2811 cpu_cpwait(); 2812 *pte = 0; 2813 } 2814} 2815 2816 2817/* 2818 * Used to map a range of physical addresses into kernel 2819 * virtual address space. 2820 * 2821 * The value passed in '*virt' is a suggested virtual address for 2822 * the mapping. Architectures which can support a direct-mapped 2823 * physical to virtual region can return the appropriate address 2824 * within that region, leaving '*virt' unchanged. Other 2825 * architectures should map the pages starting at '*virt' and 2826 * update '*virt' with the first usable address after the mapped 2827 * region. 2828 */ 2829vm_offset_t 2830pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 2831{ 2832 vm_offset_t sva = *virt; 2833 vm_offset_t va = sva; 2834 2835 PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " 2836 "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, 2837 prot)); 2838 2839 while (start < end) { 2840 pmap_kenter(va, start); 2841 va += PAGE_SIZE; 2842 start += PAGE_SIZE; 2843 } 2844 *virt = va; 2845 return (sva); 2846} 2847 2848static void 2849pmap_wb_page(vm_page_t m) 2850{ 2851 struct pv_entry *pv; 2852 2853 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2854 pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, 2855 (pv->pv_flags & PVF_WRITE) == 0); 2856} 2857 2858static void 2859pmap_inv_page(vm_page_t m) 2860{ 2861 struct pv_entry *pv; 2862 2863 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2864 pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); 2865} 2866/* 2867 * Add a list of wired pages to the kva 2868 * this routine is only used for temporary 2869 * kernel mappings that do not need to have 2870 * page modification or references recorded. 2871 * Note that old mappings are simply written 2872 * over. The page *must* be wired. 2873 */ 2874void 2875pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 2876{ 2877 int i; 2878 2879 for (i = 0; i < count; i++) { 2880 pmap_wb_page(m[i]); 2881 pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), 2882 KENTER_CACHE); 2883 va += PAGE_SIZE; 2884 } 2885} 2886 2887 2888/* 2889 * this routine jerks page mappings from the 2890 * kernel -- it is meant only for temporary mappings. 2891 */ 2892void 2893pmap_qremove(vm_offset_t va, int count) 2894{ 2895 vm_paddr_t pa; 2896 int i; 2897 2898 for (i = 0; i < count; i++) { 2899 pa = vtophys(va); 2900 if (pa) { 2901 pmap_inv_page(PHYS_TO_VM_PAGE(pa)); 2902 pmap_kremove(va); 2903 } 2904 va += PAGE_SIZE; 2905 } 2906} 2907 2908 2909/* 2910 * pmap_object_init_pt preloads the ptes for a given object 2911 * into the specified pmap. This eliminates the blast of soft 2912 * faults on process startup and immediately after an mmap. 2913 */ 2914void 2915pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 2916 vm_pindex_t pindex, vm_size_t size) 2917{ 2918 2919 VM_OBJECT_ASSERT_WLOCKED(object); 2920 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2921 ("pmap_object_init_pt: non-device object")); 2922} 2923 2924 2925/* 2926 * pmap_is_prefaultable: 2927 * 2928 * Return whether or not the specified virtual address is elgible 2929 * for prefault. 2930 */ 2931boolean_t 2932pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2933{ 2934 pd_entry_t *pde; 2935 pt_entry_t *pte; 2936 2937 if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) 2938 return (FALSE); 2939 KASSERT(pte != NULL, ("Valid mapping but no pte ?")); 2940 if (*pte == 0) 2941 return (TRUE); 2942 return (FALSE); 2943} 2944 2945/* 2946 * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 2947 * Returns TRUE if the mapping exists, else FALSE. 2948 * 2949 * NOTE: This function is only used by a couple of arm-specific modules. 2950 * It is not safe to take any pmap locks here, since we could be right 2951 * in the middle of debugging the pmap anyway... 2952 * 2953 * It is possible for this routine to return FALSE even though a valid 2954 * mapping does exist. This is because we don't lock, so the metadata 2955 * state may be inconsistent. 2956 * 2957 * NOTE: We can return a NULL *ptp in the case where the L1 pde is 2958 * a "section" mapping. 2959 */ 2960boolean_t 2961pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) 2962{ 2963 struct l2_dtable *l2; 2964 pd_entry_t *pl1pd, l1pd; 2965 pt_entry_t *ptep; 2966 u_short l1idx; 2967 2968 if (pm->pm_l1 == NULL) 2969 return (FALSE); 2970 2971 l1idx = L1_IDX(va); 2972 *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 2973 l1pd = *pl1pd; 2974 2975 if (l1pte_section_p(l1pd)) { 2976 *ptp = NULL; 2977 return (TRUE); 2978 } 2979 2980 if (pm->pm_l2 == NULL) 2981 return (FALSE); 2982 2983 l2 = pm->pm_l2[L2_IDX(l1idx)]; 2984 2985 if (l2 == NULL || 2986 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 2987 return (FALSE); 2988 } 2989 2990 *ptp = &ptep[l2pte_index(va)]; 2991 return (TRUE); 2992} 2993 2994/* 2995 * Routine: pmap_remove_all 2996 * Function: 2997 * Removes this physical page from 2998 * all physical maps in which it resides. 2999 * Reflects back modify bits to the pager. 3000 * 3001 * Notes: 3002 * Original versions of this routine were very 3003 * inefficient because they iteratively called 3004 * pmap_remove (slow...) 3005 */ 3006void 3007pmap_remove_all(vm_page_t m) 3008{ 3009 pv_entry_t pv; 3010 pt_entry_t *ptep; 3011 struct l2_bucket *l2b; 3012 boolean_t flush = FALSE; 3013 pmap_t curpm; 3014 int flags = 0; 3015 3016 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3017 ("pmap_remove_all: page %p is not managed", m)); 3018 if (TAILQ_EMPTY(&m->md.pv_list)) 3019 return; 3020 rw_wlock(&pvh_global_lock); 3021 3022 /* 3023 * XXX This call shouldn't exist. Iterating over the PV list twice, 3024 * once in pmap_clearbit() and again below, is both unnecessary and 3025 * inefficient. The below code should itself write back the cache 3026 * entry before it destroys the mapping. 3027 */ 3028 pmap_clearbit(m, PVF_WRITE); 3029 curpm = vmspace_pmap(curproc->p_vmspace); 3030 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3031 if (flush == FALSE && (pv->pv_pmap == curpm || 3032 pv->pv_pmap == kernel_pmap)) 3033 flush = TRUE; 3034 3035 PMAP_LOCK(pv->pv_pmap); 3036 /* 3037 * Cached contents were written-back in pmap_clearbit(), 3038 * but we still have to invalidate the cache entry to make 3039 * sure stale data are not retrieved when another page will be 3040 * mapped under this virtual address. 3041 */ 3042 if (pmap_is_current(pv->pv_pmap)) { 3043 cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); 3044 if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) 3045 cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); 3046 } 3047 3048 if (pv->pv_flags & PVF_UNMAN) { 3049 /* remove the pv entry, but do not remove the mapping 3050 * and remember this is a kernel mapped page 3051 */ 3052 m->md.pv_kva = pv->pv_va; 3053 } else { 3054 /* remove the mapping and pv entry */ 3055 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 3056 KASSERT(l2b != NULL, ("No l2 bucket")); 3057 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 3058 *ptep = 0; 3059 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 3060 pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); 3061 pv->pv_pmap->pm_stats.resident_count--; 3062 flags |= pv->pv_flags; 3063 } 3064 pmap_nuke_pv(m, pv->pv_pmap, pv); 3065 PMAP_UNLOCK(pv->pv_pmap); 3066 pmap_free_pv_entry(pv); 3067 } 3068 3069 if (flush) { 3070 if (PV_BEEN_EXECD(flags)) 3071 pmap_tlb_flushID(curpm); 3072 else 3073 pmap_tlb_flushD(curpm); 3074 } 3075 vm_page_aflag_clear(m, PGA_WRITEABLE); 3076 rw_wunlock(&pvh_global_lock); 3077} 3078 3079 3080/* 3081 * Set the physical protection on the 3082 * specified range of this map as requested. 3083 */ 3084void 3085pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3086{ 3087 struct l2_bucket *l2b; 3088 pt_entry_t *ptep, pte; 3089 vm_offset_t next_bucket; 3090 u_int flags; 3091 int flush; 3092 3093 CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", 3094 pm, sva, eva, prot); 3095 3096 if ((prot & VM_PROT_READ) == 0) { 3097 pmap_remove(pm, sva, eva); 3098 return; 3099 } 3100 3101 if (prot & VM_PROT_WRITE) { 3102 /* 3103 * If this is a read->write transition, just ignore it and let 3104 * vm_fault() take care of it later. 3105 */ 3106 return; 3107 } 3108 3109 rw_wlock(&pvh_global_lock); 3110 PMAP_LOCK(pm); 3111 3112 /* 3113 * OK, at this point, we know we're doing write-protect operation. 3114 * If the pmap is active, write-back the range. 3115 */ 3116 pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); 3117 3118 flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3119 flags = 0; 3120 3121 while (sva < eva) { 3122 next_bucket = L2_NEXT_BUCKET(sva); 3123 if (next_bucket > eva) 3124 next_bucket = eva; 3125 3126 l2b = pmap_get_l2_bucket(pm, sva); 3127 if (l2b == NULL) { 3128 sva = next_bucket; 3129 continue; 3130 } 3131 3132 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3133 3134 while (sva < next_bucket) { 3135 if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { 3136 struct vm_page *pg; 3137 u_int f; 3138 3139 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3140 pte &= ~L2_S_PROT_W; 3141 *ptep = pte; 3142 PTE_SYNC(ptep); 3143 3144 if (!(pg->oflags & VPO_UNMANAGED)) { 3145 f = pmap_modify_pv(pg, pm, sva, 3146 PVF_WRITE, 0); 3147 if (f & PVF_WRITE) 3148 vm_page_dirty(pg); 3149 } else 3150 f = 0; 3151 3152 if (flush >= 0) { 3153 flush++; 3154 flags |= f; 3155 } else 3156 if (PV_BEEN_EXECD(f)) 3157 pmap_tlb_flushID_SE(pm, sva); 3158 else 3159 if (PV_BEEN_REFD(f)) 3160 pmap_tlb_flushD_SE(pm, sva); 3161 } 3162 3163 sva += PAGE_SIZE; 3164 ptep++; 3165 } 3166 } 3167 3168 3169 if (flush) { 3170 if (PV_BEEN_EXECD(flags)) 3171 pmap_tlb_flushID(pm); 3172 else 3173 if (PV_BEEN_REFD(flags)) 3174 pmap_tlb_flushD(pm); 3175 } 3176 rw_wunlock(&pvh_global_lock); 3177 3178 PMAP_UNLOCK(pm); 3179} 3180 3181 3182/* 3183 * Insert the given physical page (p) at 3184 * the specified virtual address (v) in the 3185 * target physical map with the protection requested. 3186 * 3187 * If specified, the page will be wired down, meaning 3188 * that the related pte can not be reclaimed. 3189 * 3190 * NB: This is the only routine which MAY NOT lazy-evaluate 3191 * or lose information. That is, this routine must actually 3192 * insert this page into the given map NOW. 3193 */ 3194 3195int 3196pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3197 u_int flags, int8_t psind __unused) 3198{ 3199 int rv; 3200 3201 rw_wlock(&pvh_global_lock); 3202 PMAP_LOCK(pmap); 3203 rv = pmap_enter_locked(pmap, va, m, prot, flags); 3204 rw_wunlock(&pvh_global_lock); 3205 PMAP_UNLOCK(pmap); 3206 return (rv); 3207} 3208 3209/* 3210 * The pvh global and pmap locks must be held. 3211 */ 3212static int 3213pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3214 u_int flags) 3215{ 3216 struct l2_bucket *l2b = NULL; 3217 struct vm_page *opg; 3218 struct pv_entry *pve = NULL; 3219 pt_entry_t *ptep, npte, opte; 3220 u_int nflags; 3221 u_int oflags; 3222 vm_paddr_t pa; 3223 3224 PMAP_ASSERT_LOCKED(pmap); 3225 rw_assert(&pvh_global_lock, RA_WLOCKED); 3226 if (va == vector_page) { 3227 pa = systempage.pv_pa; 3228 m = NULL; 3229 } else { 3230 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 3231 VM_OBJECT_ASSERT_LOCKED(m->object); 3232 pa = VM_PAGE_TO_PHYS(m); 3233 } 3234 nflags = 0; 3235 if (prot & VM_PROT_WRITE) 3236 nflags |= PVF_WRITE; 3237 if (prot & VM_PROT_EXECUTE) 3238 nflags |= PVF_EXEC; 3239 if ((flags & PMAP_ENTER_WIRED) != 0) 3240 nflags |= PVF_WIRED; 3241 PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " 3242 "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); 3243 3244 if (pmap == kernel_pmap) { 3245 l2b = pmap_get_l2_bucket(pmap, va); 3246 if (l2b == NULL) 3247 l2b = pmap_grow_l2_bucket(pmap, va); 3248 } else { 3249do_l2b_alloc: 3250 l2b = pmap_alloc_l2_bucket(pmap, va); 3251 if (l2b == NULL) { 3252 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 3253 PMAP_UNLOCK(pmap); 3254 rw_wunlock(&pvh_global_lock); 3255 VM_WAIT; 3256 rw_wlock(&pvh_global_lock); 3257 PMAP_LOCK(pmap); 3258 goto do_l2b_alloc; 3259 } 3260 return (KERN_RESOURCE_SHORTAGE); 3261 } 3262 } 3263 3264 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3265 3266 opte = *ptep; 3267 npte = pa; 3268 oflags = 0; 3269 if (opte) { 3270 /* 3271 * There is already a mapping at this address. 3272 * If the physical address is different, lookup the 3273 * vm_page. 3274 */ 3275 if (l2pte_pa(opte) != pa) 3276 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3277 else 3278 opg = m; 3279 } else 3280 opg = NULL; 3281 3282 if ((prot & (VM_PROT_ALL)) || 3283 (!m || m->md.pvh_attrs & PVF_REF)) { 3284 /* 3285 * - The access type indicates that we don't need 3286 * to do referenced emulation. 3287 * OR 3288 * - The physical page has already been referenced 3289 * so no need to re-do referenced emulation here. 3290 */ 3291 npte |= L2_S_PROTO; 3292 3293 nflags |= PVF_REF; 3294 3295 if (m && ((prot & VM_PROT_WRITE) != 0 || 3296 (m->md.pvh_attrs & PVF_MOD))) { 3297 /* 3298 * This is a writable mapping, and the 3299 * page's mod state indicates it has 3300 * already been modified. Make it 3301 * writable from the outset. 3302 */ 3303 nflags |= PVF_MOD; 3304 if (!(m->md.pvh_attrs & PVF_MOD)) 3305 vm_page_dirty(m); 3306 } 3307 if (m && opte) 3308 vm_page_aflag_set(m, PGA_REFERENCED); 3309 } else { 3310 /* 3311 * Need to do page referenced emulation. 3312 */ 3313 npte |= L2_TYPE_INV; 3314 } 3315 3316 if (prot & VM_PROT_WRITE) { 3317 npte |= L2_S_PROT_W; 3318 if (m != NULL && 3319 (m->oflags & VPO_UNMANAGED) == 0) 3320 vm_page_aflag_set(m, PGA_WRITEABLE); 3321 } 3322 if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) 3323 npte |= pte_l2_s_cache_mode; 3324 if (m && m == opg) { 3325 /* 3326 * We're changing the attrs of an existing mapping. 3327 */ 3328 oflags = pmap_modify_pv(m, pmap, va, 3329 PVF_WRITE | PVF_EXEC | PVF_WIRED | 3330 PVF_MOD | PVF_REF, nflags); 3331 3332 /* 3333 * We may need to flush the cache if we're 3334 * doing rw-ro... 3335 */ 3336 if (pmap_is_current(pmap) && 3337 (oflags & PVF_NC) == 0 && 3338 (opte & L2_S_PROT_W) != 0 && 3339 (prot & VM_PROT_WRITE) == 0 && 3340 (opte & L2_TYPE_MASK) != L2_TYPE_INV) { 3341 cpu_dcache_wb_range(va, PAGE_SIZE); 3342 cpu_l2cache_wb_range(va, PAGE_SIZE); 3343 } 3344 } else { 3345 /* 3346 * New mapping, or changing the backing page 3347 * of an existing mapping. 3348 */ 3349 if (opg) { 3350 /* 3351 * Replacing an existing mapping with a new one. 3352 * It is part of our managed memory so we 3353 * must remove it from the PV list 3354 */ 3355 if ((pve = pmap_remove_pv(opg, pmap, va))) { 3356 3357 /* note for patch: the oflags/invalidation was moved 3358 * because PG_FICTITIOUS pages could free the pve 3359 */ 3360 oflags = pve->pv_flags; 3361 /* 3362 * If the old mapping was valid (ref/mod 3363 * emulation creates 'invalid' mappings 3364 * initially) then make sure to frob 3365 * the cache. 3366 */ 3367 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { 3368 if (PV_BEEN_EXECD(oflags)) { 3369 pmap_idcache_wbinv_range(pmap, va, 3370 PAGE_SIZE); 3371 } else 3372 if (PV_BEEN_REFD(oflags)) { 3373 pmap_dcache_wb_range(pmap, va, 3374 PAGE_SIZE, TRUE, 3375 (oflags & PVF_WRITE) == 0); 3376 } 3377 } 3378 3379 /* free/allocate a pv_entry for UNMANAGED pages if 3380 * this physical page is not/is already mapped. 3381 */ 3382 3383 if (m && (m->oflags & VPO_UNMANAGED) && 3384 !m->md.pv_kva && 3385 TAILQ_EMPTY(&m->md.pv_list)) { 3386 pmap_free_pv_entry(pve); 3387 pve = NULL; 3388 } 3389 } else if (m && 3390 (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || 3391 !TAILQ_EMPTY(&m->md.pv_list))) 3392 pve = pmap_get_pv_entry(); 3393 } else if (m && 3394 (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || 3395 !TAILQ_EMPTY(&m->md.pv_list))) 3396 pve = pmap_get_pv_entry(); 3397 3398 if (m) { 3399 if ((m->oflags & VPO_UNMANAGED)) { 3400 if (!TAILQ_EMPTY(&m->md.pv_list) || 3401 m->md.pv_kva) { 3402 KASSERT(pve != NULL, ("No pv")); 3403 nflags |= PVF_UNMAN; 3404 pmap_enter_pv(m, pve, pmap, va, nflags); 3405 } else 3406 m->md.pv_kva = va; 3407 } else { 3408 KASSERT(va < kmi.clean_sva || 3409 va >= kmi.clean_eva, 3410 ("pmap_enter: managed mapping within the clean submap")); 3411 KASSERT(pve != NULL, ("No pv")); 3412 pmap_enter_pv(m, pve, pmap, va, nflags); 3413 } 3414 } 3415 } 3416 /* 3417 * Make sure userland mappings get the right permissions 3418 */ 3419 if (pmap != kernel_pmap && va != vector_page) { 3420 npte |= L2_S_PROT_U; 3421 } 3422 3423 /* 3424 * Keep the stats up to date 3425 */ 3426 if (opte == 0) { 3427 l2b->l2b_occupancy++; 3428 pmap->pm_stats.resident_count++; 3429 } 3430 3431 /* 3432 * If this is just a wiring change, the two PTEs will be 3433 * identical, so there's no need to update the page table. 3434 */ 3435 if (npte != opte) { 3436 boolean_t is_cached = pmap_is_current(pmap); 3437 3438 *ptep = npte; 3439 if (is_cached) { 3440 /* 3441 * We only need to frob the cache/tlb if this pmap 3442 * is current 3443 */ 3444 PTE_SYNC(ptep); 3445 if (L1_IDX(va) != L1_IDX(vector_page) && 3446 l2pte_valid(npte)) { 3447 /* 3448 * This mapping is likely to be accessed as 3449 * soon as we return to userland. Fix up the 3450 * L1 entry to avoid taking another 3451 * page/domain fault. 3452 */ 3453 pd_entry_t *pl1pd, l1pd; 3454 3455 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; 3456 l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | 3457 L1_C_PROTO; 3458 if (*pl1pd != l1pd) { 3459 *pl1pd = l1pd; 3460 PTE_SYNC(pl1pd); 3461 } 3462 } 3463 } 3464 3465 if (PV_BEEN_EXECD(oflags)) 3466 pmap_tlb_flushID_SE(pmap, va); 3467 else if (PV_BEEN_REFD(oflags)) 3468 pmap_tlb_flushD_SE(pmap, va); 3469 3470 3471 if (m) 3472 pmap_fix_cache(m, pmap, va); 3473 } 3474 return (KERN_SUCCESS); 3475} 3476 3477/* 3478 * Maps a sequence of resident pages belonging to the same object. 3479 * The sequence begins with the given page m_start. This page is 3480 * mapped at the given virtual address start. Each subsequent page is 3481 * mapped at a virtual address that is offset from start by the same 3482 * amount as the page is offset from m_start within the object. The 3483 * last page in the sequence is the page with the largest offset from 3484 * m_start that can be mapped at a virtual address less than the given 3485 * virtual address end. Not every virtual page between start and end 3486 * is mapped; only those for which a resident page exists with the 3487 * corresponding offset from m_start are mapped. 3488 */ 3489void 3490pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 3491 vm_page_t m_start, vm_prot_t prot) 3492{ 3493 vm_page_t m; 3494 vm_pindex_t diff, psize; 3495 3496 VM_OBJECT_ASSERT_LOCKED(m_start->object); 3497 3498 psize = atop(end - start); 3499 m = m_start; 3500 rw_wlock(&pvh_global_lock); 3501 PMAP_LOCK(pmap); 3502 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 3503 pmap_enter_locked(pmap, start + ptoa(diff), m, prot & 3504 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); 3505 m = TAILQ_NEXT(m, listq); 3506 } 3507 rw_wunlock(&pvh_global_lock); 3508 PMAP_UNLOCK(pmap); 3509} 3510 3511/* 3512 * this code makes some *MAJOR* assumptions: 3513 * 1. Current pmap & pmap exists. 3514 * 2. Not wired. 3515 * 3. Read access. 3516 * 4. No page table pages. 3517 * but is *MUCH* faster than pmap_enter... 3518 */ 3519 3520void 3521pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3522{ 3523 3524 rw_wlock(&pvh_global_lock); 3525 PMAP_LOCK(pmap); 3526 pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 3527 PMAP_ENTER_NOSLEEP); 3528 rw_wunlock(&pvh_global_lock); 3529 PMAP_UNLOCK(pmap); 3530} 3531 3532/* 3533 * Clear the wired attribute from the mappings for the specified range of 3534 * addresses in the given pmap. Every valid mapping within that range 3535 * must have the wired attribute set. In contrast, invalid mappings 3536 * cannot have the wired attribute set, so they are ignored. 3537 * 3538 * XXX Wired mappings of unmanaged pages cannot be counted by this pmap 3539 * implementation. 3540 */ 3541void 3542pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3543{ 3544 struct l2_bucket *l2b; 3545 pt_entry_t *ptep, pte; 3546 pv_entry_t pv; 3547 vm_offset_t next_bucket; 3548 vm_page_t m; 3549 3550 rw_wlock(&pvh_global_lock); 3551 PMAP_LOCK(pmap); 3552 while (sva < eva) { 3553 next_bucket = L2_NEXT_BUCKET(sva); 3554 if (next_bucket > eva) 3555 next_bucket = eva; 3556 l2b = pmap_get_l2_bucket(pmap, sva); 3557 if (l2b == NULL) { 3558 sva = next_bucket; 3559 continue; 3560 } 3561 for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket; 3562 sva += PAGE_SIZE, ptep++) { 3563 if ((pte = *ptep) == 0 || 3564 (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL || 3565 (m->oflags & VPO_UNMANAGED) != 0) 3566 continue; 3567 pv = pmap_find_pv(m, pmap, sva); 3568 if ((pv->pv_flags & PVF_WIRED) == 0) 3569 panic("pmap_unwire: pv %p isn't wired", pv); 3570 pv->pv_flags &= ~PVF_WIRED; 3571 pmap->pm_stats.wired_count--; 3572 } 3573 } 3574 rw_wunlock(&pvh_global_lock); 3575 PMAP_UNLOCK(pmap); 3576} 3577 3578 3579/* 3580 * Copy the range specified by src_addr/len 3581 * from the source map to the range dst_addr/len 3582 * in the destination map. 3583 * 3584 * This routine is only advisory and need not do anything. 3585 */ 3586void 3587pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 3588 vm_size_t len, vm_offset_t src_addr) 3589{ 3590} 3591 3592 3593/* 3594 * Routine: pmap_extract 3595 * Function: 3596 * Extract the physical page address associated 3597 * with the given map/virtual_address pair. 3598 */ 3599vm_paddr_t 3600pmap_extract(pmap_t pmap, vm_offset_t va) 3601{ 3602 vm_paddr_t pa; 3603 3604 PMAP_LOCK(pmap); 3605 pa = pmap_extract_locked(pmap, va); 3606 PMAP_UNLOCK(pmap); 3607 return (pa); 3608} 3609 3610static vm_paddr_t 3611pmap_extract_locked(pmap_t pmap, vm_offset_t va) 3612{ 3613 struct l2_dtable *l2; 3614 pd_entry_t l1pd; 3615 pt_entry_t *ptep, pte; 3616 vm_paddr_t pa; 3617 u_int l1idx; 3618 3619 if (pmap != kernel_pmap) 3620 PMAP_ASSERT_LOCKED(pmap); 3621 l1idx = L1_IDX(va); 3622 l1pd = pmap->pm_l1->l1_kva[l1idx]; 3623 if (l1pte_section_p(l1pd)) { 3624 /* 3625 * These should only happen for the kernel pmap. 3626 */ 3627 KASSERT(pmap == kernel_pmap, ("unexpected section")); 3628 /* XXX: what to do about the bits > 32 ? */ 3629 if (l1pd & L1_S_SUPERSEC) 3630 pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3631 else 3632 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3633 } else { 3634 /* 3635 * Note that we can't rely on the validity of the L1 3636 * descriptor as an indication that a mapping exists. 3637 * We have to look it up in the L2 dtable. 3638 */ 3639 l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3640 if (l2 == NULL || 3641 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) 3642 return (0); 3643 pte = ptep[l2pte_index(va)]; 3644 if (pte == 0) 3645 return (0); 3646 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) 3647 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3648 else 3649 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3650 } 3651 return (pa); 3652} 3653 3654/* 3655 * Atomically extract and hold the physical page with the given 3656 * pmap and virtual address pair if that mapping permits the given 3657 * protection. 3658 * 3659 */ 3660vm_page_t 3661pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3662{ 3663 struct l2_dtable *l2; 3664 pd_entry_t l1pd; 3665 pt_entry_t *ptep, pte; 3666 vm_paddr_t pa, paddr; 3667 vm_page_t m = NULL; 3668 u_int l1idx; 3669 l1idx = L1_IDX(va); 3670 paddr = 0; 3671 3672 PMAP_LOCK(pmap); 3673retry: 3674 l1pd = pmap->pm_l1->l1_kva[l1idx]; 3675 if (l1pte_section_p(l1pd)) { 3676 /* 3677 * These should only happen for kernel_pmap 3678 */ 3679 KASSERT(pmap == kernel_pmap, ("huh")); 3680 /* XXX: what to do about the bits > 32 ? */ 3681 if (l1pd & L1_S_SUPERSEC) 3682 pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3683 else 3684 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3685 if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) 3686 goto retry; 3687 if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3688 m = PHYS_TO_VM_PAGE(pa); 3689 vm_page_hold(m); 3690 } 3691 3692 } else { 3693 /* 3694 * Note that we can't rely on the validity of the L1 3695 * descriptor as an indication that a mapping exists. 3696 * We have to look it up in the L2 dtable. 3697 */ 3698 l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3699 3700 if (l2 == NULL || 3701 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3702 PMAP_UNLOCK(pmap); 3703 return (NULL); 3704 } 3705 3706 ptep = &ptep[l2pte_index(va)]; 3707 pte = *ptep; 3708 3709 if (pte == 0) { 3710 PMAP_UNLOCK(pmap); 3711 return (NULL); 3712 } 3713 if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3714 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) 3715 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3716 else 3717 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3718 if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) 3719 goto retry; 3720 m = PHYS_TO_VM_PAGE(pa); 3721 vm_page_hold(m); 3722 } 3723 } 3724 3725 PMAP_UNLOCK(pmap); 3726 PA_UNLOCK_COND(paddr); 3727 return (m); 3728} 3729 3730vm_paddr_t 3731pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 3732{ 3733 struct l2_dtable *l2; 3734 pd_entry_t l1pd; 3735 pt_entry_t *ptep, pte; 3736 vm_paddr_t pa; 3737 u_int l1idx; 3738 3739 l1idx = L1_IDX(va); 3740 l1pd = kernel_pmap->pm_l1->l1_kva[l1idx]; 3741 if (l1pte_section_p(l1pd)) { 3742 if (l1pd & L1_S_SUPERSEC) 3743 pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3744 else 3745 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3746 pte = L2_S_PROTO | pa | 3747 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 3748 } else { 3749 l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]; 3750 if (l2 == NULL || 3751 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3752 pte = 0; 3753 pa = 0; 3754 goto out; 3755 } 3756 pte = ptep[l2pte_index(va)]; 3757 if (pte == 0) { 3758 pa = 0; 3759 goto out; 3760 } 3761 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) 3762 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3763 else 3764 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3765 } 3766out: 3767 if (pte2p != NULL) 3768 *pte2p = pte; 3769 return (pa); 3770} 3771 3772/* 3773 * Initialize a preallocated and zeroed pmap structure, 3774 * such as one in a vmspace structure. 3775 */ 3776 3777int 3778pmap_pinit(pmap_t pmap) 3779{ 3780 PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); 3781 3782 pmap_alloc_l1(pmap); 3783 bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); 3784 3785 CPU_ZERO(&pmap->pm_active); 3786 3787 TAILQ_INIT(&pmap->pm_pvlist); 3788 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 3789 pmap->pm_stats.resident_count = 1; 3790 if (vector_page < KERNBASE) { 3791 pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), 3792 VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); 3793 } 3794 return (1); 3795} 3796 3797 3798/*************************************************** 3799 * page management routines. 3800 ***************************************************/ 3801 3802 3803static void 3804pmap_free_pv_entry(pv_entry_t pv) 3805{ 3806 pv_entry_count--; 3807 uma_zfree(pvzone, pv); 3808} 3809 3810 3811/* 3812 * get a new pv_entry, allocating a block from the system 3813 * when needed. 3814 * the memory allocation is performed bypassing the malloc code 3815 * because of the possibility of allocations at interrupt time. 3816 */ 3817static pv_entry_t 3818pmap_get_pv_entry(void) 3819{ 3820 pv_entry_t ret_value; 3821 3822 pv_entry_count++; 3823 if (pv_entry_count > pv_entry_high_water) 3824 pagedaemon_wakeup(); 3825 ret_value = uma_zalloc(pvzone, M_NOWAIT); 3826 return ret_value; 3827} 3828 3829/* 3830 * Remove the given range of addresses from the specified map. 3831 * 3832 * It is assumed that the start and end are properly 3833 * rounded to the page size. 3834 */ 3835#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3836void 3837pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 3838{ 3839 struct l2_bucket *l2b; 3840 vm_offset_t next_bucket; 3841 pt_entry_t *ptep; 3842 u_int total; 3843 u_int mappings, is_exec, is_refd; 3844 int flushall = 0; 3845 3846 3847 /* 3848 * we lock in the pmap => pv_head direction 3849 */ 3850 3851 rw_wlock(&pvh_global_lock); 3852 PMAP_LOCK(pm); 3853 total = 0; 3854 while (sva < eva) { 3855 /* 3856 * Do one L2 bucket's worth at a time. 3857 */ 3858 next_bucket = L2_NEXT_BUCKET(sva); 3859 if (next_bucket > eva) 3860 next_bucket = eva; 3861 3862 l2b = pmap_get_l2_bucket(pm, sva); 3863 if (l2b == NULL) { 3864 sva = next_bucket; 3865 continue; 3866 } 3867 3868 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3869 mappings = 0; 3870 3871 while (sva < next_bucket) { 3872 struct vm_page *pg; 3873 pt_entry_t pte; 3874 vm_paddr_t pa; 3875 3876 pte = *ptep; 3877 3878 if (pte == 0) { 3879 /* 3880 * Nothing here, move along 3881 */ 3882 sva += PAGE_SIZE; 3883 ptep++; 3884 continue; 3885 } 3886 3887 pm->pm_stats.resident_count--; 3888 pa = l2pte_pa(pte); 3889 is_exec = 0; 3890 is_refd = 1; 3891 3892 /* 3893 * Update flags. In a number of circumstances, 3894 * we could cluster a lot of these and do a 3895 * number of sequential pages in one go. 3896 */ 3897 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3898 struct pv_entry *pve; 3899 3900 pve = pmap_remove_pv(pg, pm, sva); 3901 if (pve) { 3902 is_exec = PV_BEEN_EXECD(pve->pv_flags); 3903 is_refd = PV_BEEN_REFD(pve->pv_flags); 3904 pmap_free_pv_entry(pve); 3905 } 3906 } 3907 3908 if (l2pte_valid(pte) && pmap_is_current(pm)) { 3909 if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3910 total++; 3911 if (is_exec) { 3912 cpu_idcache_wbinv_range(sva, 3913 PAGE_SIZE); 3914 cpu_l2cache_wbinv_range(sva, 3915 PAGE_SIZE); 3916 cpu_tlb_flushID_SE(sva); 3917 } else if (is_refd) { 3918 cpu_dcache_wbinv_range(sva, 3919 PAGE_SIZE); 3920 cpu_l2cache_wbinv_range(sva, 3921 PAGE_SIZE); 3922 cpu_tlb_flushD_SE(sva); 3923 } 3924 } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3925 /* flushall will also only get set for 3926 * for a current pmap 3927 */ 3928 cpu_idcache_wbinv_all(); 3929 cpu_l2cache_wbinv_all(); 3930 flushall = 1; 3931 total++; 3932 } 3933 } 3934 *ptep = 0; 3935 PTE_SYNC(ptep); 3936 3937 sva += PAGE_SIZE; 3938 ptep++; 3939 mappings++; 3940 } 3941 3942 pmap_free_l2_bucket(pm, l2b, mappings); 3943 } 3944 3945 rw_wunlock(&pvh_global_lock); 3946 if (flushall) 3947 cpu_tlb_flushID(); 3948 PMAP_UNLOCK(pm); 3949} 3950 3951/* 3952 * pmap_zero_page() 3953 * 3954 * Zero a given physical page by mapping it at a page hook point. 3955 * In doing the zero page op, the page we zero is mapped cachable, as with 3956 * StrongARM accesses to non-cached pages are non-burst making writing 3957 * _any_ bulk data very slow. 3958 */ 3959#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_CORE3) 3960void 3961pmap_zero_page_generic(vm_paddr_t phys, int off, int size) 3962{ 3963 3964 if (_arm_bzero && size >= _min_bzero_size && 3965 _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) 3966 return; 3967 3968 mtx_lock(&cmtx); 3969 /* 3970 * Hook in the page, zero it, invalidate the TLB as needed. 3971 * 3972 * Note the temporary zero-page mapping must be a non-cached page in 3973 * order to work without corruption when write-allocate is enabled. 3974 */ 3975 *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); 3976 PTE_SYNC(cdst_pte); 3977 cpu_tlb_flushD_SE(cdstp); 3978 cpu_cpwait(); 3979 if (off || size != PAGE_SIZE) 3980 bzero((void *)(cdstp + off), size); 3981 else 3982 bzero_page(cdstp); 3983 3984 mtx_unlock(&cmtx); 3985} 3986#endif /* ARM_MMU_GENERIC != 0 */ 3987 3988#if ARM_MMU_XSCALE == 1 3989void 3990pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) 3991{ 3992 3993 if (_arm_bzero && size >= _min_bzero_size && 3994 _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) 3995 return; 3996 3997 mtx_lock(&cmtx); 3998 /* 3999 * Hook in the page, zero it, and purge the cache for that 4000 * zeroed page. Invalidate the TLB as needed. 4001 */ 4002 *cdst_pte = L2_S_PROTO | phys | 4003 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4004 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4005 PTE_SYNC(cdst_pte); 4006 cpu_tlb_flushD_SE(cdstp); 4007 cpu_cpwait(); 4008 if (off || size != PAGE_SIZE) 4009 bzero((void *)(cdstp + off), size); 4010 else 4011 bzero_page(cdstp); 4012 mtx_unlock(&cmtx); 4013 xscale_cache_clean_minidata(); 4014} 4015 4016/* 4017 * Change the PTEs for the specified kernel mappings such that they 4018 * will use the mini data cache instead of the main data cache. 4019 */ 4020void 4021pmap_use_minicache(vm_offset_t va, vm_size_t size) 4022{ 4023 struct l2_bucket *l2b; 4024 pt_entry_t *ptep, *sptep, pte; 4025 vm_offset_t next_bucket, eva; 4026 4027#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) 4028 if (xscale_use_minidata == 0) 4029 return; 4030#endif 4031 4032 eva = va + size; 4033 4034 while (va < eva) { 4035 next_bucket = L2_NEXT_BUCKET(va); 4036 if (next_bucket > eva) 4037 next_bucket = eva; 4038 4039 l2b = pmap_get_l2_bucket(kernel_pmap, va); 4040 4041 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 4042 4043 while (va < next_bucket) { 4044 pte = *ptep; 4045 if (!l2pte_minidata(pte)) { 4046 cpu_dcache_wbinv_range(va, PAGE_SIZE); 4047 cpu_tlb_flushD_SE(va); 4048 *ptep = pte & ~L2_B; 4049 } 4050 ptep++; 4051 va += PAGE_SIZE; 4052 } 4053 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 4054 } 4055 cpu_cpwait(); 4056} 4057#endif /* ARM_MMU_XSCALE == 1 */ 4058 4059/* 4060 * pmap_zero_page zeros the specified hardware page by mapping 4061 * the page into KVM and using bzero to clear its contents. 4062 */ 4063void 4064pmap_zero_page(vm_page_t m) 4065{ 4066 pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); 4067} 4068 4069 4070/* 4071 * pmap_zero_page_area zeros the specified hardware page by mapping 4072 * the page into KVM and using bzero to clear its contents. 4073 * 4074 * off and size may not cover an area beyond a single hardware page. 4075 */ 4076void 4077pmap_zero_page_area(vm_page_t m, int off, int size) 4078{ 4079 4080 pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); 4081} 4082 4083 4084/* 4085 * pmap_zero_page_idle zeros the specified hardware page by mapping 4086 * the page into KVM and using bzero to clear its contents. This 4087 * is intended to be called from the vm_pagezero process only and 4088 * outside of Giant. 4089 */ 4090void 4091pmap_zero_page_idle(vm_page_t m) 4092{ 4093 4094 pmap_zero_page(m); 4095} 4096 4097#if 0 4098/* 4099 * pmap_clean_page() 4100 * 4101 * This is a local function used to work out the best strategy to clean 4102 * a single page referenced by its entry in the PV table. It should be used by 4103 * pmap_copy_page, pmap_zero page and maybe some others later on. 4104 * 4105 * Its policy is effectively: 4106 * o If there are no mappings, we don't bother doing anything with the cache. 4107 * o If there is one mapping, we clean just that page. 4108 * o If there are multiple mappings, we clean the entire cache. 4109 * 4110 * So that some functions can be further optimised, it returns 0 if it didn't 4111 * clean the entire cache, or 1 if it did. 4112 * 4113 * XXX One bug in this routine is that if the pv_entry has a single page 4114 * mapped at 0x00000000 a whole cache clean will be performed rather than 4115 * just the 1 page. Since this should not occur in everyday use and if it does 4116 * it will just result in not the most efficient clean for the page. 4117 * 4118 * We don't yet use this function but may want to. 4119 */ 4120static int 4121pmap_clean_page(struct pv_entry *pv, boolean_t is_src) 4122{ 4123 pmap_t pm, pm_to_clean = NULL; 4124 struct pv_entry *npv; 4125 u_int cache_needs_cleaning = 0; 4126 u_int flags = 0; 4127 vm_offset_t page_to_clean = 0; 4128 4129 if (pv == NULL) { 4130 /* nothing mapped in so nothing to flush */ 4131 return (0); 4132 } 4133 4134 /* 4135 * Since we flush the cache each time we change to a different 4136 * user vmspace, we only need to flush the page if it is in the 4137 * current pmap. 4138 */ 4139 if (curthread) 4140 pm = vmspace_pmap(curproc->p_vmspace); 4141 else 4142 pm = kernel_pmap; 4143 4144 for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { 4145 if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { 4146 flags |= npv->pv_flags; 4147 /* 4148 * The page is mapped non-cacheable in 4149 * this map. No need to flush the cache. 4150 */ 4151 if (npv->pv_flags & PVF_NC) { 4152#ifdef DIAGNOSTIC 4153 if (cache_needs_cleaning) 4154 panic("pmap_clean_page: " 4155 "cache inconsistency"); 4156#endif 4157 break; 4158 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 4159 continue; 4160 if (cache_needs_cleaning) { 4161 page_to_clean = 0; 4162 break; 4163 } else { 4164 page_to_clean = npv->pv_va; 4165 pm_to_clean = npv->pv_pmap; 4166 } 4167 cache_needs_cleaning = 1; 4168 } 4169 } 4170 if (page_to_clean) { 4171 if (PV_BEEN_EXECD(flags)) 4172 pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 4173 PAGE_SIZE); 4174 else 4175 pmap_dcache_wb_range(pm_to_clean, page_to_clean, 4176 PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 4177 } else if (cache_needs_cleaning) { 4178 if (PV_BEEN_EXECD(flags)) 4179 pmap_idcache_wbinv_all(pm); 4180 else 4181 pmap_dcache_wbinv_all(pm); 4182 return (1); 4183 } 4184 return (0); 4185} 4186#endif 4187 4188/* 4189 * pmap_copy_page copies the specified (machine independent) 4190 * page by mapping the page into virtual memory and using 4191 * bcopy to copy the page, one machine dependent page at a 4192 * time. 4193 */ 4194 4195/* 4196 * pmap_copy_page() 4197 * 4198 * Copy one physical page into another, by mapping the pages into 4199 * hook points. The same comment regarding cachability as in 4200 * pmap_zero_page also applies here. 4201 */ 4202#if ARM_MMU_GENERIC != 0 || defined (CPU_XSCALE_CORE3) 4203void 4204pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) 4205{ 4206#if 0 4207 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4208#endif 4209 4210 /* 4211 * Clean the source page. Hold the source page's lock for 4212 * the duration of the copy so that no other mappings can 4213 * be created while we have a potentially aliased mapping. 4214 */ 4215#if 0 4216 /* 4217 * XXX: Not needed while we call cpu_dcache_wbinv_all() in 4218 * pmap_copy_page(). 4219 */ 4220 (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4221#endif 4222 /* 4223 * Map the pages into the page hook points, copy them, and purge 4224 * the cache for the appropriate page. Invalidate the TLB 4225 * as required. 4226 */ 4227 mtx_lock(&cmtx); 4228 *csrc_pte = L2_S_PROTO | src | 4229 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4230 PTE_SYNC(csrc_pte); 4231 *cdst_pte = L2_S_PROTO | dst | 4232 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4233 PTE_SYNC(cdst_pte); 4234 cpu_tlb_flushD_SE(csrcp); 4235 cpu_tlb_flushD_SE(cdstp); 4236 cpu_cpwait(); 4237 bcopy_page(csrcp, cdstp); 4238 mtx_unlock(&cmtx); 4239 cpu_dcache_inv_range(csrcp, PAGE_SIZE); 4240 cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4241 cpu_l2cache_inv_range(csrcp, PAGE_SIZE); 4242 cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); 4243} 4244 4245void 4246pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, 4247 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) 4248{ 4249 4250 mtx_lock(&cmtx); 4251 *csrc_pte = L2_S_PROTO | a_phys | 4252 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4253 PTE_SYNC(csrc_pte); 4254 *cdst_pte = L2_S_PROTO | b_phys | 4255 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4256 PTE_SYNC(cdst_pte); 4257 cpu_tlb_flushD_SE(csrcp); 4258 cpu_tlb_flushD_SE(cdstp); 4259 cpu_cpwait(); 4260 bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); 4261 mtx_unlock(&cmtx); 4262 cpu_dcache_inv_range(csrcp + a_offs, cnt); 4263 cpu_dcache_wbinv_range(cdstp + b_offs, cnt); 4264 cpu_l2cache_inv_range(csrcp + a_offs, cnt); 4265 cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); 4266} 4267#endif /* ARM_MMU_GENERIC != 0 */ 4268 4269#if ARM_MMU_XSCALE == 1 4270void 4271pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) 4272{ 4273#if 0 4274 /* XXX: Only needed for pmap_clean_page(), which is commented out. */ 4275 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4276#endif 4277 4278 /* 4279 * Clean the source page. Hold the source page's lock for 4280 * the duration of the copy so that no other mappings can 4281 * be created while we have a potentially aliased mapping. 4282 */ 4283#if 0 4284 /* 4285 * XXX: Not needed while we call cpu_dcache_wbinv_all() in 4286 * pmap_copy_page(). 4287 */ 4288 (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4289#endif 4290 /* 4291 * Map the pages into the page hook points, copy them, and purge 4292 * the cache for the appropriate page. Invalidate the TLB 4293 * as required. 4294 */ 4295 mtx_lock(&cmtx); 4296 *csrc_pte = L2_S_PROTO | src | 4297 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4298 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4299 PTE_SYNC(csrc_pte); 4300 *cdst_pte = L2_S_PROTO | dst | 4301 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4302 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4303 PTE_SYNC(cdst_pte); 4304 cpu_tlb_flushD_SE(csrcp); 4305 cpu_tlb_flushD_SE(cdstp); 4306 cpu_cpwait(); 4307 bcopy_page(csrcp, cdstp); 4308 mtx_unlock(&cmtx); 4309 xscale_cache_clean_minidata(); 4310} 4311 4312void 4313pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, 4314 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) 4315{ 4316 4317 mtx_lock(&cmtx); 4318 *csrc_pte = L2_S_PROTO | a_phys | 4319 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4320 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 4321 PTE_SYNC(csrc_pte); 4322 *cdst_pte = L2_S_PROTO | b_phys | 4323 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4324 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 4325 PTE_SYNC(cdst_pte); 4326 cpu_tlb_flushD_SE(csrcp); 4327 cpu_tlb_flushD_SE(cdstp); 4328 cpu_cpwait(); 4329 bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); 4330 mtx_unlock(&cmtx); 4331 xscale_cache_clean_minidata(); 4332} 4333#endif /* ARM_MMU_XSCALE == 1 */ 4334 4335void 4336pmap_copy_page(vm_page_t src, vm_page_t dst) 4337{ 4338 4339 cpu_dcache_wbinv_all(); 4340 cpu_l2cache_wbinv_all(); 4341 if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && 4342 _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), 4343 (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) 4344 return; 4345 pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 4346} 4347 4348/* 4349 * We have code to do unmapped I/O. However, it isn't quite right and 4350 * causes un-page-aligned I/O to devices to fail (most notably newfs 4351 * or fsck). We give up a little performance to not allow unmapped I/O 4352 * to gain stability. 4353 */ 4354int unmapped_buf_allowed = 0; 4355 4356void 4357pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 4358 vm_offset_t b_offset, int xfersize) 4359{ 4360 vm_page_t a_pg, b_pg; 4361 vm_offset_t a_pg_offset, b_pg_offset; 4362 int cnt; 4363 4364 cpu_dcache_wbinv_all(); 4365 cpu_l2cache_wbinv_all(); 4366 while (xfersize > 0) { 4367 a_pg = ma[a_offset >> PAGE_SHIFT]; 4368 a_pg_offset = a_offset & PAGE_MASK; 4369 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4370 b_pg = mb[b_offset >> PAGE_SHIFT]; 4371 b_pg_offset = b_offset & PAGE_MASK; 4372 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4373 pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, 4374 VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); 4375 xfersize -= cnt; 4376 a_offset += cnt; 4377 b_offset += cnt; 4378 } 4379} 4380 4381vm_offset_t 4382pmap_quick_enter_page(vm_page_t m) 4383{ 4384 /* 4385 * Don't bother with a PCPU pageframe, since we don't support 4386 * SMP for anything pre-armv7. Use pmap_kenter() to ensure 4387 * caching is handled correctly for multiple mappings of the 4388 * same physical page. 4389 */ 4390 4391 mtx_assert(&qmap_mtx, MA_NOTOWNED); 4392 mtx_lock(&qmap_mtx); 4393 4394 pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m)); 4395 4396 return (qmap_addr); 4397} 4398 4399void 4400pmap_quick_remove_page(vm_offset_t addr) 4401{ 4402 KASSERT(addr == qmap_addr, 4403 ("pmap_quick_remove_page: invalid address")); 4404 mtx_assert(&qmap_mtx, MA_OWNED); 4405 pmap_kremove(addr); 4406 mtx_unlock(&qmap_mtx); 4407} 4408 4409/* 4410 * this routine returns true if a physical page resides 4411 * in the given pmap. 4412 */ 4413boolean_t 4414pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 4415{ 4416 pv_entry_t pv; 4417 int loops = 0; 4418 boolean_t rv; 4419 4420 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4421 ("pmap_page_exists_quick: page %p is not managed", m)); 4422 rv = FALSE; 4423 rw_wlock(&pvh_global_lock); 4424 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4425 if (pv->pv_pmap == pmap) { 4426 rv = TRUE; 4427 break; 4428 } 4429 loops++; 4430 if (loops >= 16) 4431 break; 4432 } 4433 rw_wunlock(&pvh_global_lock); 4434 return (rv); 4435} 4436 4437/* 4438 * pmap_page_wired_mappings: 4439 * 4440 * Return the number of managed mappings to the given physical page 4441 * that are wired. 4442 */ 4443int 4444pmap_page_wired_mappings(vm_page_t m) 4445{ 4446 pv_entry_t pv; 4447 int count; 4448 4449 count = 0; 4450 if ((m->oflags & VPO_UNMANAGED) != 0) 4451 return (count); 4452 rw_wlock(&pvh_global_lock); 4453 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 4454 if ((pv->pv_flags & PVF_WIRED) != 0) 4455 count++; 4456 rw_wunlock(&pvh_global_lock); 4457 return (count); 4458} 4459 4460/* 4461 * This function is advisory. 4462 */ 4463void 4464pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 4465{ 4466} 4467 4468/* 4469 * pmap_ts_referenced: 4470 * 4471 * Return the count of reference bits for a page, clearing all of them. 4472 */ 4473int 4474pmap_ts_referenced(vm_page_t m) 4475{ 4476 4477 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4478 ("pmap_ts_referenced: page %p is not managed", m)); 4479 return (pmap_clearbit(m, PVF_REF)); 4480} 4481 4482 4483boolean_t 4484pmap_is_modified(vm_page_t m) 4485{ 4486 4487 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4488 ("pmap_is_modified: page %p is not managed", m)); 4489 if (m->md.pvh_attrs & PVF_MOD) 4490 return (TRUE); 4491 4492 return(FALSE); 4493} 4494 4495 4496/* 4497 * Clear the modify bits on the specified physical page. 4498 */ 4499void 4500pmap_clear_modify(vm_page_t m) 4501{ 4502 4503 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4504 ("pmap_clear_modify: page %p is not managed", m)); 4505 VM_OBJECT_ASSERT_WLOCKED(m->object); 4506 KASSERT(!vm_page_xbusied(m), 4507 ("pmap_clear_modify: page %p is exclusive busied", m)); 4508 4509 /* 4510 * If the page is not PGA_WRITEABLE, then no mappings can be modified. 4511 * If the object containing the page is locked and the page is not 4512 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 4513 */ 4514 if ((m->aflags & PGA_WRITEABLE) == 0) 4515 return; 4516 if (m->md.pvh_attrs & PVF_MOD) 4517 pmap_clearbit(m, PVF_MOD); 4518} 4519 4520 4521/* 4522 * pmap_is_referenced: 4523 * 4524 * Return whether or not the specified physical page was referenced 4525 * in any physical maps. 4526 */ 4527boolean_t 4528pmap_is_referenced(vm_page_t m) 4529{ 4530 4531 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4532 ("pmap_is_referenced: page %p is not managed", m)); 4533 return ((m->md.pvh_attrs & PVF_REF) != 0); 4534} 4535 4536 4537/* 4538 * Clear the write and modified bits in each of the given page's mappings. 4539 */ 4540void 4541pmap_remove_write(vm_page_t m) 4542{ 4543 4544 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4545 ("pmap_remove_write: page %p is not managed", m)); 4546 4547 /* 4548 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 4549 * set by another thread while the object is locked. Thus, 4550 * if PGA_WRITEABLE is clear, no page table entries need updating. 4551 */ 4552 VM_OBJECT_ASSERT_WLOCKED(m->object); 4553 if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0) 4554 pmap_clearbit(m, PVF_WRITE); 4555} 4556 4557 4558/* 4559 * perform the pmap work for mincore 4560 */ 4561int 4562pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4563{ 4564 struct l2_bucket *l2b; 4565 pt_entry_t *ptep, pte; 4566 vm_paddr_t pa; 4567 vm_page_t m; 4568 int val; 4569 boolean_t managed; 4570 4571 PMAP_LOCK(pmap); 4572retry: 4573 l2b = pmap_get_l2_bucket(pmap, addr); 4574 if (l2b == NULL) { 4575 val = 0; 4576 goto out; 4577 } 4578 ptep = &l2b->l2b_kva[l2pte_index(addr)]; 4579 pte = *ptep; 4580 if (!l2pte_valid(pte)) { 4581 val = 0; 4582 goto out; 4583 } 4584 val = MINCORE_INCORE; 4585 if (pte & L2_S_PROT_W) 4586 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4587 managed = false; 4588 pa = l2pte_pa(pte); 4589 m = PHYS_TO_VM_PAGE(pa); 4590 if (m != NULL && !(m->oflags & VPO_UNMANAGED)) 4591 managed = true; 4592 if (managed) { 4593 /* 4594 * The ARM pmap tries to maintain a per-mapping 4595 * reference bit. The trouble is that it's kept in 4596 * the PV entry, not the PTE, so it's costly to access 4597 * here. You would need to acquire the pvh global 4598 * lock, call pmap_find_pv(), and introduce a custom 4599 * version of vm_page_pa_tryrelock() that releases and 4600 * reacquires the pvh global lock. In the end, I 4601 * doubt it's worthwhile. This may falsely report 4602 * the given address as referenced. 4603 */ 4604 if ((m->md.pvh_attrs & PVF_REF) != 0) 4605 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4606 } 4607 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4608 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 4609 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4610 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4611 goto retry; 4612 } else 4613out: 4614 PA_UNLOCK_COND(*locked_pa); 4615 PMAP_UNLOCK(pmap); 4616 return (val); 4617} 4618 4619 4620void 4621pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4622{ 4623} 4624 4625 4626/* 4627 * Increase the starting virtual address of the given mapping if a 4628 * different alignment might result in more superpage mappings. 4629 */ 4630void 4631pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4632 vm_offset_t *addr, vm_size_t size) 4633{ 4634} 4635 4636#define BOOTSTRAP_DEBUG 4637 4638/* 4639 * pmap_map_section: 4640 * 4641 * Create a single section mapping. 4642 */ 4643void 4644pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4645 int prot, int cache) 4646{ 4647 pd_entry_t *pde = (pd_entry_t *) l1pt; 4648 pd_entry_t fl; 4649 4650 KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); 4651 4652 switch (cache) { 4653 case PTE_NOCACHE: 4654 default: 4655 fl = 0; 4656 break; 4657 4658 case PTE_CACHE: 4659 fl = pte_l1_s_cache_mode; 4660 break; 4661 4662 case PTE_PAGETABLE: 4663 fl = pte_l1_s_cache_mode_pt; 4664 break; 4665 } 4666 4667 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4668 L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 4669 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4670 4671} 4672 4673/* 4674 * pmap_link_l2pt: 4675 * 4676 * Link the L2 page table specified by l2pv.pv_pa into the L1 4677 * page table at the slot for "va". 4678 */ 4679void 4680pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) 4681{ 4682 pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 4683 u_int slot = va >> L1_S_SHIFT; 4684 4685 proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 4686 4687#ifdef VERBOSE_INIT_ARM 4688 printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); 4689#endif 4690 4691 pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 4692 4693 PTE_SYNC(&pde[slot]); 4694 4695 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 4696 4697 4698} 4699 4700/* 4701 * pmap_map_entry 4702 * 4703 * Create a single page mapping. 4704 */ 4705void 4706pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 4707 int cache) 4708{ 4709 pd_entry_t *pde = (pd_entry_t *) l1pt; 4710 pt_entry_t fl; 4711 pt_entry_t *pte; 4712 4713 KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); 4714 4715 switch (cache) { 4716 case PTE_NOCACHE: 4717 default: 4718 fl = 0; 4719 break; 4720 4721 case PTE_CACHE: 4722 fl = pte_l2_s_cache_mode; 4723 break; 4724 4725 case PTE_PAGETABLE: 4726 fl = pte_l2_s_cache_mode_pt; 4727 break; 4728 } 4729 4730 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4731 panic("pmap_map_entry: no L2 table for VA 0x%08x", va); 4732 4733 pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4734 4735 if (pte == NULL) 4736 panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); 4737 4738 pte[l2pte_index(va)] = 4739 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4740 PTE_SYNC(&pte[l2pte_index(va)]); 4741} 4742 4743/* 4744 * pmap_map_chunk: 4745 * 4746 * Map a chunk of memory using the most efficient mappings 4747 * possible (section. large page, small page) into the 4748 * provided L1 and L2 tables at the specified virtual address. 4749 */ 4750vm_size_t 4751pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4752 vm_size_t size, int prot, int cache) 4753{ 4754 pd_entry_t *pde = (pd_entry_t *) l1pt; 4755 pt_entry_t *pte, f1, f2s, f2l; 4756 vm_size_t resid; 4757 int i; 4758 4759 resid = roundup2(size, PAGE_SIZE); 4760 4761 if (l1pt == 0) 4762 panic("pmap_map_chunk: no L1 table provided"); 4763 4764#ifdef VERBOSE_INIT_ARM 4765 printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " 4766 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 4767#endif 4768 4769 switch (cache) { 4770 case PTE_NOCACHE: 4771 default: 4772 f1 = 0; 4773 f2l = 0; 4774 f2s = 0; 4775 break; 4776 4777 case PTE_CACHE: 4778 f1 = pte_l1_s_cache_mode; 4779 f2l = pte_l2_l_cache_mode; 4780 f2s = pte_l2_s_cache_mode; 4781 break; 4782 4783 case PTE_PAGETABLE: 4784 f1 = pte_l1_s_cache_mode_pt; 4785 f2l = pte_l2_l_cache_mode_pt; 4786 f2s = pte_l2_s_cache_mode_pt; 4787 break; 4788 } 4789 4790 size = resid; 4791 4792 while (resid > 0) { 4793 /* See if we can use a section mapping. */ 4794 if (L1_S_MAPPABLE_P(va, pa, resid)) { 4795#ifdef VERBOSE_INIT_ARM 4796 printf("S"); 4797#endif 4798 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4799 L1_S_PROT(PTE_KERNEL, prot) | f1 | 4800 L1_S_DOM(PMAP_DOMAIN_KERNEL); 4801 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4802 va += L1_S_SIZE; 4803 pa += L1_S_SIZE; 4804 resid -= L1_S_SIZE; 4805 continue; 4806 } 4807 4808 /* 4809 * Ok, we're going to use an L2 table. Make sure 4810 * one is actually in the corresponding L1 slot 4811 * for the current VA. 4812 */ 4813 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4814 panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); 4815 4816 pte = (pt_entry_t *) kernel_pt_lookup( 4817 pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4818 if (pte == NULL) 4819 panic("pmap_map_chunk: can't find L2 table for VA" 4820 "0x%08x", va); 4821 /* See if we can use a L2 large page mapping. */ 4822 if (L2_L_MAPPABLE_P(va, pa, resid)) { 4823#ifdef VERBOSE_INIT_ARM 4824 printf("L"); 4825#endif 4826 for (i = 0; i < 16; i++) { 4827 pte[l2pte_index(va) + i] = 4828 L2_L_PROTO | pa | 4829 L2_L_PROT(PTE_KERNEL, prot) | f2l; 4830 PTE_SYNC(&pte[l2pte_index(va) + i]); 4831 } 4832 va += L2_L_SIZE; 4833 pa += L2_L_SIZE; 4834 resid -= L2_L_SIZE; 4835 continue; 4836 } 4837 4838 /* Use a small page mapping. */ 4839#ifdef VERBOSE_INIT_ARM 4840 printf("P"); 4841#endif 4842 pte[l2pte_index(va)] = 4843 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4844 PTE_SYNC(&pte[l2pte_index(va)]); 4845 va += PAGE_SIZE; 4846 pa += PAGE_SIZE; 4847 resid -= PAGE_SIZE; 4848 } 4849#ifdef VERBOSE_INIT_ARM 4850 printf("\n"); 4851#endif 4852 return (size); 4853 4854} 4855 4856void 4857pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4858{ 4859 /* 4860 * Remember the memattr in a field that gets used to set the appropriate 4861 * bits in the PTEs as mappings are established. 4862 */ 4863 m->md.pv_memattr = ma; 4864 4865 /* 4866 * It appears that this function can only be called before any mappings 4867 * for the page are established on ARM. If this ever changes, this code 4868 * will need to walk the pv_list and make each of the existing mappings 4869 * uncacheable, being careful to sync caches and PTEs (and maybe 4870 * invalidate TLB?) for any current mapping it modifies. 4871 */ 4872 if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) 4873 panic("Can't change memattr on page with existing mappings"); 4874} 4875 4876 4877