pmap-v4.c revision 295042
1/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ 2/*- 3 * Copyright 2004 Olivier Houchard. 4 * Copyright 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38/*- 39 * Copyright (c) 2002-2003 Wasabi Systems, Inc. 40 * Copyright (c) 2001 Richard Earnshaw 41 * Copyright (c) 2001-2002 Christopher Gilbert 42 * All rights reserved. 43 * 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. The name of the company nor the name of the author may be used to 50 * endorse or promote products derived from this software without specific 51 * prior written permission. 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 54 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 55 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 57 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 58 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 59 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 63 * SUCH DAMAGE. 64 */ 65/*- 66 * Copyright (c) 1999 The NetBSD Foundation, Inc. 67 * All rights reserved. 68 * 69 * This code is derived from software contributed to The NetBSD Foundation 70 * by Charles M. Hannum. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 82 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 83 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 84 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 85 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 86 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 87 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 88 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 89 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 90 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 91 * POSSIBILITY OF SUCH DAMAGE. 92 */ 93 94/*- 95 * Copyright (c) 1994-1998 Mark Brinicombe. 96 * Copyright (c) 1994 Brini. 97 * All rights reserved. 98 * 99 * This code is derived from software written for Brini by Mark Brinicombe 100 * 101 * Redistribution and use in source and binary forms, with or without 102 * modification, are permitted provided that the following conditions 103 * are met: 104 * 1. Redistributions of source code must retain the above copyright 105 * notice, this list of conditions and the following disclaimer. 106 * 2. Redistributions in binary form must reproduce the above copyright 107 * notice, this list of conditions and the following disclaimer in the 108 * documentation and/or other materials provided with the distribution. 109 * 3. All advertising materials mentioning features or use of this software 110 * must display the following acknowledgement: 111 * This product includes software developed by Mark Brinicombe. 112 * 4. The name of the author may not be used to endorse or promote products 113 * derived from this software without specific prior written permission. 114 * 115 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 116 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 117 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 118 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 119 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 120 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 121 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 122 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 123 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 124 * 125 * RiscBSD kernel project 126 * 127 * pmap.c 128 * 129 * Machine dependant vm stuff 130 * 131 * Created : 20/09/94 132 */ 133 134/* 135 * Special compilation symbols 136 * PMAP_DEBUG - Build in pmap_debug_level code 137 * 138 * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c 139 */ 140/* Include header files */ 141 142#include "opt_vm.h" 143 144#include <sys/cdefs.h> 145__FBSDID("$FreeBSD: head/sys/arm/arm/pmap.c 295042 2016-01-29 16:01:37Z skra $"); 146#include <sys/param.h> 147#include <sys/systm.h> 148#include <sys/kernel.h> 149#include <sys/ktr.h> 150#include <sys/lock.h> 151#include <sys/proc.h> 152#include <sys/malloc.h> 153#include <sys/msgbuf.h> 154#include <sys/mutex.h> 155#include <sys/vmmeter.h> 156#include <sys/mman.h> 157#include <sys/rwlock.h> 158#include <sys/smp.h> 159#include <sys/sched.h> 160 161#include <vm/vm.h> 162#include <vm/vm_param.h> 163#include <vm/uma.h> 164#include <vm/pmap.h> 165#include <vm/vm_kern.h> 166#include <vm/vm_object.h> 167#include <vm/vm_map.h> 168#include <vm/vm_page.h> 169#include <vm/vm_pageout.h> 170#include <vm/vm_phys.h> 171#include <vm/vm_extern.h> 172 173#include <machine/md_var.h> 174#include <machine/cpu.h> 175#include <machine/cpufunc.h> 176#include <machine/pcb.h> 177 178#ifdef PMAP_DEBUG 179#define PDEBUG(_lev_,_stat_) \ 180 if (pmap_debug_level >= (_lev_)) \ 181 ((_stat_)) 182#define dprintf printf 183 184int pmap_debug_level = 0; 185#define PMAP_INLINE 186#else /* PMAP_DEBUG */ 187#define PDEBUG(_lev_,_stat_) /* Nothing */ 188#define dprintf(x, arg...) 189#define PMAP_INLINE __inline 190#endif /* PMAP_DEBUG */ 191 192extern struct pv_addr systempage; 193 194extern int last_fault_code; 195 196/* 197 * Internal function prototypes 198 */ 199static void pmap_free_pv_entry (pv_entry_t); 200static pv_entry_t pmap_get_pv_entry(void); 201 202static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, 203 vm_prot_t, u_int); 204static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); 205static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); 206static void pmap_alloc_l1(pmap_t); 207static void pmap_free_l1(pmap_t); 208 209static int pmap_clearbit(struct vm_page *, u_int); 210 211static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); 212static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); 213static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); 214static vm_offset_t kernel_pt_lookup(vm_paddr_t); 215 216static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); 217 218vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 219vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 220vm_offset_t pmap_curmaxkvaddr; 221vm_paddr_t kernel_l1pa; 222 223vm_offset_t kernel_vm_end = 0; 224 225vm_offset_t vm_max_kernel_address; 226 227struct pmap kernel_pmap_store; 228 229static pt_entry_t *csrc_pte, *cdst_pte; 230static vm_offset_t csrcp, cdstp, qmap_addr; 231static struct mtx cmtx, qmap_mtx; 232 233static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); 234/* 235 * These routines are called when the CPU type is identified to set up 236 * the PTE prototypes, cache modes, etc. 237 * 238 * The variables are always here, just in case LKMs need to reference 239 * them (though, they shouldn't). 240 */ 241 242pt_entry_t pte_l1_s_cache_mode; 243pt_entry_t pte_l1_s_cache_mode_pt; 244pt_entry_t pte_l1_s_cache_mask; 245 246pt_entry_t pte_l2_l_cache_mode; 247pt_entry_t pte_l2_l_cache_mode_pt; 248pt_entry_t pte_l2_l_cache_mask; 249 250pt_entry_t pte_l2_s_cache_mode; 251pt_entry_t pte_l2_s_cache_mode_pt; 252pt_entry_t pte_l2_s_cache_mask; 253 254pt_entry_t pte_l2_s_prot_u; 255pt_entry_t pte_l2_s_prot_w; 256pt_entry_t pte_l2_s_prot_mask; 257 258pt_entry_t pte_l1_s_proto; 259pt_entry_t pte_l1_c_proto; 260pt_entry_t pte_l2_s_proto; 261 262void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); 263void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, 264 vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, 265 int cnt); 266void (*pmap_zero_page_func)(vm_paddr_t, int, int); 267 268struct msgbuf *msgbufp = 0; 269 270/* 271 * Crashdump maps. 272 */ 273static caddr_t crashdumpmap; 274 275extern void bcopy_page(vm_offset_t, vm_offset_t); 276extern void bzero_page(vm_offset_t); 277 278extern vm_offset_t alloc_firstaddr; 279 280char *_tmppt; 281 282/* 283 * Metadata for L1 translation tables. 284 */ 285struct l1_ttable { 286 /* Entry on the L1 Table list */ 287 SLIST_ENTRY(l1_ttable) l1_link; 288 289 /* Entry on the L1 Least Recently Used list */ 290 TAILQ_ENTRY(l1_ttable) l1_lru; 291 292 /* Track how many domains are allocated from this L1 */ 293 volatile u_int l1_domain_use_count; 294 295 /* 296 * A free-list of domain numbers for this L1. 297 * We avoid using ffs() and a bitmap to track domains since ffs() 298 * is slow on ARM. 299 */ 300 u_int8_t l1_domain_first; 301 u_int8_t l1_domain_free[PMAP_DOMAINS]; 302 303 /* Physical address of this L1 page table */ 304 vm_paddr_t l1_physaddr; 305 306 /* KVA of this L1 page table */ 307 pd_entry_t *l1_kva; 308}; 309 310/* 311 * Convert a virtual address into its L1 table index. That is, the 312 * index used to locate the L2 descriptor table pointer in an L1 table. 313 * This is basically used to index l1->l1_kva[]. 314 * 315 * Each L2 descriptor table represents 1MB of VA space. 316 */ 317#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) 318 319/* 320 * L1 Page Tables are tracked using a Least Recently Used list. 321 * - New L1s are allocated from the HEAD. 322 * - Freed L1s are added to the TAIl. 323 * - Recently accessed L1s (where an 'access' is some change to one of 324 * the userland pmaps which owns this L1) are moved to the TAIL. 325 */ 326static TAILQ_HEAD(, l1_ttable) l1_lru_list; 327/* 328 * A list of all L1 tables 329 */ 330static SLIST_HEAD(, l1_ttable) l1_list; 331static struct mtx l1_lru_lock; 332 333/* 334 * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. 335 * 336 * This is normally 16MB worth L2 page descriptors for any given pmap. 337 * Reference counts are maintained for L2 descriptors so they can be 338 * freed when empty. 339 */ 340struct l2_dtable { 341 /* The number of L2 page descriptors allocated to this l2_dtable */ 342 u_int l2_occupancy; 343 344 /* List of L2 page descriptors */ 345 struct l2_bucket { 346 pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ 347 vm_paddr_t l2b_phys; /* Physical address of same */ 348 u_short l2b_l1idx; /* This L2 table's L1 index */ 349 u_short l2b_occupancy; /* How many active descriptors */ 350 } l2_bucket[L2_BUCKET_SIZE]; 351}; 352 353/* pmap_kenter_internal flags */ 354#define KENTER_CACHE 0x1 355#define KENTER_USER 0x2 356 357/* 358 * Given an L1 table index, calculate the corresponding l2_dtable index 359 * and bucket index within the l2_dtable. 360 */ 361#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ 362 (L2_SIZE - 1)) 363#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) 364 365/* 366 * Given a virtual address, this macro returns the 367 * virtual address required to drop into the next L2 bucket. 368 */ 369#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) 370 371/* 372 * We try to map the page tables write-through, if possible. However, not 373 * all CPUs have a write-through cache mode, so on those we have to sync 374 * the cache when we frob page tables. 375 * 376 * We try to evaluate this at compile time, if possible. However, it's 377 * not always possible to do that, hence this run-time var. 378 */ 379int pmap_needs_pte_sync; 380 381/* 382 * Macro to determine if a mapping might be resident in the 383 * instruction cache and/or TLB 384 */ 385#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) 386 387/* 388 * Macro to determine if a mapping might be resident in the 389 * data cache and/or TLB 390 */ 391#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) 392 393#ifndef PMAP_SHPGPERPROC 394#define PMAP_SHPGPERPROC 200 395#endif 396 397#define pmap_is_current(pm) ((pm) == kernel_pmap || \ 398 curproc->p_vmspace->vm_map.pmap == (pm)) 399static uma_zone_t pvzone = NULL; 400uma_zone_t l2zone; 401static uma_zone_t l2table_zone; 402static vm_offset_t pmap_kernel_l2dtable_kva; 403static vm_offset_t pmap_kernel_l2ptp_kva; 404static vm_paddr_t pmap_kernel_l2ptp_phys; 405static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 406static struct rwlock pvh_global_lock; 407 408void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, 409 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 410#if ARM_MMU_XSCALE == 1 411void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, 412 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); 413#endif 414 415/* 416 * This list exists for the benefit of pmap_map_chunk(). It keeps track 417 * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can 418 * find them as necessary. 419 * 420 * Note that the data on this list MUST remain valid after initarm() returns, 421 * as pmap_bootstrap() uses it to contruct L2 table metadata. 422 */ 423SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); 424 425static void 426pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) 427{ 428 int i; 429 430 l1->l1_kva = l1pt; 431 l1->l1_domain_use_count = 0; 432 l1->l1_domain_first = 0; 433 434 for (i = 0; i < PMAP_DOMAINS; i++) 435 l1->l1_domain_free[i] = i + 1; 436 437 /* 438 * Copy the kernel's L1 entries to each new L1. 439 */ 440 if (l1pt != kernel_pmap->pm_l1->l1_kva) 441 memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); 442 443 if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) 444 panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); 445 SLIST_INSERT_HEAD(&l1_list, l1, l1_link); 446 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 447} 448 449static vm_offset_t 450kernel_pt_lookup(vm_paddr_t pa) 451{ 452 struct pv_addr *pv; 453 454 SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { 455 if (pv->pv_pa == pa) 456 return (pv->pv_va); 457 } 458 return (0); 459} 460 461#if ARM_MMU_GENERIC != 0 462void 463pmap_pte_init_generic(void) 464{ 465 466 pte_l1_s_cache_mode = L1_S_B|L1_S_C; 467 pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; 468 469 pte_l2_l_cache_mode = L2_B|L2_C; 470 pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; 471 472 pte_l2_s_cache_mode = L2_B|L2_C; 473 pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; 474 475 /* 476 * If we have a write-through cache, set B and C. If 477 * we have a write-back cache, then we assume setting 478 * only C will make those pages write-through. 479 */ 480 if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { 481 pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; 482 pte_l2_l_cache_mode_pt = L2_B|L2_C; 483 pte_l2_s_cache_mode_pt = L2_B|L2_C; 484 } else { 485 pte_l1_s_cache_mode_pt = L1_S_C; 486 pte_l2_l_cache_mode_pt = L2_C; 487 pte_l2_s_cache_mode_pt = L2_C; 488 } 489 490 pte_l2_s_prot_u = L2_S_PROT_U_generic; 491 pte_l2_s_prot_w = L2_S_PROT_W_generic; 492 pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; 493 494 pte_l1_s_proto = L1_S_PROTO_generic; 495 pte_l1_c_proto = L1_C_PROTO_generic; 496 pte_l2_s_proto = L2_S_PROTO_generic; 497 498 pmap_copy_page_func = pmap_copy_page_generic; 499 pmap_copy_page_offs_func = pmap_copy_page_offs_generic; 500 pmap_zero_page_func = pmap_zero_page_generic; 501} 502 503#endif /* ARM_MMU_GENERIC != 0 */ 504 505#if ARM_MMU_XSCALE == 1 506#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) 507static u_int xscale_use_minidata; 508#endif 509 510void 511pmap_pte_init_xscale(void) 512{ 513 uint32_t auxctl; 514 int write_through = 0; 515 516 pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; 517 pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; 518 519 pte_l2_l_cache_mode = L2_B|L2_C; 520 pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; 521 522 pte_l2_s_cache_mode = L2_B|L2_C; 523 pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; 524 525 pte_l1_s_cache_mode_pt = L1_S_C; 526 pte_l2_l_cache_mode_pt = L2_C; 527 pte_l2_s_cache_mode_pt = L2_C; 528#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE 529 /* 530 * The XScale core has an enhanced mode where writes that 531 * miss the cache cause a cache line to be allocated. This 532 * is significantly faster than the traditional, write-through 533 * behavior of this case. 534 */ 535 pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); 536 pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); 537 pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); 538#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ 539#ifdef XSCALE_CACHE_WRITE_THROUGH 540 /* 541 * Some versions of the XScale core have various bugs in 542 * their cache units, the work-around for which is to run 543 * the cache in write-through mode. Unfortunately, this 544 * has a major (negative) impact on performance. So, we 545 * go ahead and run fast-and-loose, in the hopes that we 546 * don't line up the planets in a way that will trip the 547 * bugs. 548 * 549 * However, we give you the option to be slow-but-correct. 550 */ 551 write_through = 1; 552#elif defined(XSCALE_CACHE_WRITE_BACK) 553 /* force write back cache mode */ 554 write_through = 0; 555#elif defined(CPU_XSCALE_PXA2X0) 556 /* 557 * Intel PXA2[15]0 processors are known to have a bug in 558 * write-back cache on revision 4 and earlier (stepping 559 * A[01] and B[012]). Fixed for C0 and later. 560 */ 561 { 562 uint32_t id, type; 563 564 id = cpufunc_id(); 565 type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); 566 567 if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { 568 if ((id & CPU_ID_REVISION_MASK) < 5) { 569 /* write through for stepping A0-1 and B0-2 */ 570 write_through = 1; 571 } 572 } 573 } 574#endif /* XSCALE_CACHE_WRITE_THROUGH */ 575 576 if (write_through) { 577 pte_l1_s_cache_mode = L1_S_C; 578 pte_l2_l_cache_mode = L2_C; 579 pte_l2_s_cache_mode = L2_C; 580 } 581 582#if (ARM_NMMUS > 1) 583 xscale_use_minidata = 1; 584#endif 585 586 pte_l2_s_prot_u = L2_S_PROT_U_xscale; 587 pte_l2_s_prot_w = L2_S_PROT_W_xscale; 588 pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; 589 590 pte_l1_s_proto = L1_S_PROTO_xscale; 591 pte_l1_c_proto = L1_C_PROTO_xscale; 592 pte_l2_s_proto = L2_S_PROTO_xscale; 593 594#ifdef CPU_XSCALE_CORE3 595 pmap_copy_page_func = pmap_copy_page_generic; 596 pmap_copy_page_offs_func = pmap_copy_page_offs_generic; 597 pmap_zero_page_func = pmap_zero_page_generic; 598 xscale_use_minidata = 0; 599 /* Make sure it is L2-cachable */ 600 pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); 601 pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; 602 pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; 603 pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; 604 pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); 605 pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; 606 607#else 608 pmap_copy_page_func = pmap_copy_page_xscale; 609 pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; 610 pmap_zero_page_func = pmap_zero_page_xscale; 611#endif 612 613 /* 614 * Disable ECC protection of page table access, for now. 615 */ 616 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 617 auxctl &= ~XSCALE_AUXCTL_P; 618 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 619} 620 621/* 622 * xscale_setup_minidata: 623 * 624 * Set up the mini-data cache clean area. We require the 625 * caller to allocate the right amount of physically and 626 * virtually contiguous space. 627 */ 628extern vm_offset_t xscale_minidata_clean_addr; 629extern vm_size_t xscale_minidata_clean_size; /* already initialized */ 630void 631xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) 632{ 633 pd_entry_t *pde = (pd_entry_t *) l1pt; 634 pt_entry_t *pte; 635 vm_size_t size; 636 uint32_t auxctl; 637 638 xscale_minidata_clean_addr = va; 639 640 /* Round it to page size. */ 641 size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; 642 643 for (; size != 0; 644 va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { 645 pte = (pt_entry_t *) kernel_pt_lookup( 646 pde[L1_IDX(va)] & L1_C_ADDR_MASK); 647 if (pte == NULL) 648 panic("xscale_setup_minidata: can't find L2 table for " 649 "VA 0x%08x", (u_int32_t) va); 650 pte[l2pte_index(va)] = 651 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 652 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 653 } 654 655 /* 656 * Configure the mini-data cache for write-back with 657 * read/write-allocate. 658 * 659 * NOTE: In order to reconfigure the mini-data cache, we must 660 * make sure it contains no valid data! In order to do that, 661 * we must issue a global data cache invalidate command! 662 * 663 * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! 664 * THIS IS VERY IMPORTANT! 665 */ 666 667 /* Invalidate data and mini-data. */ 668 __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); 669 __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); 670 auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; 671 __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); 672} 673#endif 674 675/* 676 * Allocate an L1 translation table for the specified pmap. 677 * This is called at pmap creation time. 678 */ 679static void 680pmap_alloc_l1(pmap_t pm) 681{ 682 struct l1_ttable *l1; 683 u_int8_t domain; 684 685 /* 686 * Remove the L1 at the head of the LRU list 687 */ 688 mtx_lock(&l1_lru_lock); 689 l1 = TAILQ_FIRST(&l1_lru_list); 690 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 691 692 /* 693 * Pick the first available domain number, and update 694 * the link to the next number. 695 */ 696 domain = l1->l1_domain_first; 697 l1->l1_domain_first = l1->l1_domain_free[domain]; 698 699 /* 700 * If there are still free domain numbers in this L1, 701 * put it back on the TAIL of the LRU list. 702 */ 703 if (++l1->l1_domain_use_count < PMAP_DOMAINS) 704 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 705 706 mtx_unlock(&l1_lru_lock); 707 708 /* 709 * Fix up the relevant bits in the pmap structure 710 */ 711 pm->pm_l1 = l1; 712 pm->pm_domain = domain + 1; 713} 714 715/* 716 * Free an L1 translation table. 717 * This is called at pmap destruction time. 718 */ 719static void 720pmap_free_l1(pmap_t pm) 721{ 722 struct l1_ttable *l1 = pm->pm_l1; 723 724 mtx_lock(&l1_lru_lock); 725 726 /* 727 * If this L1 is currently on the LRU list, remove it. 728 */ 729 if (l1->l1_domain_use_count < PMAP_DOMAINS) 730 TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); 731 732 /* 733 * Free up the domain number which was allocated to the pmap 734 */ 735 l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; 736 l1->l1_domain_first = pm->pm_domain - 1; 737 l1->l1_domain_use_count--; 738 739 /* 740 * The L1 now must have at least 1 free domain, so add 741 * it back to the LRU list. If the use count is zero, 742 * put it at the head of the list, otherwise it goes 743 * to the tail. 744 */ 745 if (l1->l1_domain_use_count == 0) { 746 TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); 747 } else 748 TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); 749 750 mtx_unlock(&l1_lru_lock); 751} 752 753/* 754 * Returns a pointer to the L2 bucket associated with the specified pmap 755 * and VA, or NULL if no L2 bucket exists for the address. 756 */ 757static PMAP_INLINE struct l2_bucket * 758pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) 759{ 760 struct l2_dtable *l2; 761 struct l2_bucket *l2b; 762 u_short l1idx; 763 764 l1idx = L1_IDX(va); 765 766 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || 767 (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) 768 return (NULL); 769 770 return (l2b); 771} 772 773/* 774 * Returns a pointer to the L2 bucket associated with the specified pmap 775 * and VA. 776 * 777 * If no L2 bucket exists, perform the necessary allocations to put an L2 778 * bucket/page table in place. 779 * 780 * Note that if a new L2 bucket/page was allocated, the caller *must* 781 * increment the bucket occupancy counter appropriately *before* 782 * releasing the pmap's lock to ensure no other thread or cpu deallocates 783 * the bucket/page in the meantime. 784 */ 785static struct l2_bucket * 786pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) 787{ 788 struct l2_dtable *l2; 789 struct l2_bucket *l2b; 790 u_short l1idx; 791 792 l1idx = L1_IDX(va); 793 794 PMAP_ASSERT_LOCKED(pm); 795 rw_assert(&pvh_global_lock, RA_WLOCKED); 796 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 797 /* 798 * No mapping at this address, as there is 799 * no entry in the L1 table. 800 * Need to allocate a new l2_dtable. 801 */ 802 PMAP_UNLOCK(pm); 803 rw_wunlock(&pvh_global_lock); 804 if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) { 805 rw_wlock(&pvh_global_lock); 806 PMAP_LOCK(pm); 807 return (NULL); 808 } 809 rw_wlock(&pvh_global_lock); 810 PMAP_LOCK(pm); 811 if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { 812 /* 813 * Someone already allocated the l2_dtable while 814 * we were doing the same. 815 */ 816 uma_zfree(l2table_zone, l2); 817 l2 = pm->pm_l2[L2_IDX(l1idx)]; 818 } else { 819 bzero(l2, sizeof(*l2)); 820 /* 821 * Link it into the parent pmap 822 */ 823 pm->pm_l2[L2_IDX(l1idx)] = l2; 824 } 825 } 826 827 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 828 829 /* 830 * Fetch pointer to the L2 page table associated with the address. 831 */ 832 if (l2b->l2b_kva == NULL) { 833 pt_entry_t *ptep; 834 835 /* 836 * No L2 page table has been allocated. Chances are, this 837 * is because we just allocated the l2_dtable, above. 838 */ 839 l2->l2_occupancy++; 840 PMAP_UNLOCK(pm); 841 rw_wunlock(&pvh_global_lock); 842 ptep = uma_zalloc(l2zone, M_NOWAIT); 843 rw_wlock(&pvh_global_lock); 844 PMAP_LOCK(pm); 845 if (l2b->l2b_kva != 0) { 846 /* We lost the race. */ 847 l2->l2_occupancy--; 848 uma_zfree(l2zone, ptep); 849 return (l2b); 850 } 851 l2b->l2b_phys = vtophys(ptep); 852 if (ptep == NULL) { 853 /* 854 * Oops, no more L2 page tables available at this 855 * time. We may need to deallocate the l2_dtable 856 * if we allocated a new one above. 857 */ 858 l2->l2_occupancy--; 859 if (l2->l2_occupancy == 0) { 860 pm->pm_l2[L2_IDX(l1idx)] = NULL; 861 uma_zfree(l2table_zone, l2); 862 } 863 return (NULL); 864 } 865 866 l2b->l2b_kva = ptep; 867 l2b->l2b_l1idx = l1idx; 868 } 869 870 return (l2b); 871} 872 873static PMAP_INLINE void 874#ifndef PMAP_INCLUDE_PTE_SYNC 875pmap_free_l2_ptp(pt_entry_t *l2) 876#else 877pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) 878#endif 879{ 880#ifdef PMAP_INCLUDE_PTE_SYNC 881 /* 882 * Note: With a write-back cache, we may need to sync this 883 * L2 table before re-using it. 884 * This is because it may have belonged to a non-current 885 * pmap, in which case the cache syncs would have been 886 * skipped when the pages were being unmapped. If the 887 * L2 table were then to be immediately re-allocated to 888 * the *current* pmap, it may well contain stale mappings 889 * which have not yet been cleared by a cache write-back 890 * and so would still be visible to the mmu. 891 */ 892 if (need_sync) 893 PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 894#endif 895 uma_zfree(l2zone, l2); 896} 897/* 898 * One or more mappings in the specified L2 descriptor table have just been 899 * invalidated. 900 * 901 * Garbage collect the metadata and descriptor table itself if necessary. 902 * 903 * The pmap lock must be acquired when this is called (not necessary 904 * for the kernel pmap). 905 */ 906static void 907pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) 908{ 909 struct l2_dtable *l2; 910 pd_entry_t *pl1pd, l1pd; 911 pt_entry_t *ptep; 912 u_short l1idx; 913 914 915 /* 916 * Update the bucket's reference count according to how many 917 * PTEs the caller has just invalidated. 918 */ 919 l2b->l2b_occupancy -= count; 920 921 /* 922 * Note: 923 * 924 * Level 2 page tables allocated to the kernel pmap are never freed 925 * as that would require checking all Level 1 page tables and 926 * removing any references to the Level 2 page table. See also the 927 * comment elsewhere about never freeing bootstrap L2 descriptors. 928 * 929 * We make do with just invalidating the mapping in the L2 table. 930 * 931 * This isn't really a big deal in practice and, in fact, leads 932 * to a performance win over time as we don't need to continually 933 * alloc/free. 934 */ 935 if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) 936 return; 937 938 /* 939 * There are no more valid mappings in this level 2 page table. 940 * Go ahead and NULL-out the pointer in the bucket, then 941 * free the page table. 942 */ 943 l1idx = l2b->l2b_l1idx; 944 ptep = l2b->l2b_kva; 945 l2b->l2b_kva = NULL; 946 947 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 948 949 /* 950 * If the L1 slot matches the pmap's domain 951 * number, then invalidate it. 952 */ 953 l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); 954 if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { 955 *pl1pd = 0; 956 PTE_SYNC(pl1pd); 957 } 958 959 /* 960 * Release the L2 descriptor table back to the pool cache. 961 */ 962#ifndef PMAP_INCLUDE_PTE_SYNC 963 pmap_free_l2_ptp(ptep); 964#else 965 pmap_free_l2_ptp(!pmap_is_current(pm), ptep); 966#endif 967 968 /* 969 * Update the reference count in the associated l2_dtable 970 */ 971 l2 = pm->pm_l2[L2_IDX(l1idx)]; 972 if (--l2->l2_occupancy > 0) 973 return; 974 975 /* 976 * There are no more valid mappings in any of the Level 1 977 * slots managed by this l2_dtable. Go ahead and NULL-out 978 * the pointer in the parent pmap and free the l2_dtable. 979 */ 980 pm->pm_l2[L2_IDX(l1idx)] = NULL; 981 uma_zfree(l2table_zone, l2); 982} 983 984/* 985 * Pool cache constructors for L2 descriptor tables, metadata and pmap 986 * structures. 987 */ 988static int 989pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) 990{ 991#ifndef PMAP_INCLUDE_PTE_SYNC 992 struct l2_bucket *l2b; 993 pt_entry_t *ptep, pte; 994 995 vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; 996 997 /* 998 * The mappings for these page tables were initially made using 999 * pmap_kenter() by the pool subsystem. Therefore, the cache- 1000 * mode will not be right for page table mappings. To avoid 1001 * polluting the pmap_kenter() code with a special case for 1002 * page tables, we simply fix up the cache-mode here if it's not 1003 * correct. 1004 */ 1005 l2b = pmap_get_l2_bucket(kernel_pmap, va); 1006 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1007 pte = *ptep; 1008 1009 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 1010 /* 1011 * Page tables must have the cache-mode set to 1012 * Write-Thru. 1013 */ 1014 *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1015 PTE_SYNC(ptep); 1016 cpu_tlb_flushD_SE(va); 1017 cpu_cpwait(); 1018 } 1019#endif 1020 memset(mem, 0, L2_TABLE_SIZE_REAL); 1021 PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 1022 return (0); 1023} 1024 1025/* 1026 * A bunch of routines to conditionally flush the caches/TLB depending 1027 * on whether the specified pmap actually needs to be flushed at any 1028 * given time. 1029 */ 1030static PMAP_INLINE void 1031pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) 1032{ 1033 1034 if (pmap_is_current(pm)) 1035 cpu_tlb_flushID_SE(va); 1036} 1037 1038static PMAP_INLINE void 1039pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) 1040{ 1041 1042 if (pmap_is_current(pm)) 1043 cpu_tlb_flushD_SE(va); 1044} 1045 1046static PMAP_INLINE void 1047pmap_tlb_flushID(pmap_t pm) 1048{ 1049 1050 if (pmap_is_current(pm)) 1051 cpu_tlb_flushID(); 1052} 1053static PMAP_INLINE void 1054pmap_tlb_flushD(pmap_t pm) 1055{ 1056 1057 if (pmap_is_current(pm)) 1058 cpu_tlb_flushD(); 1059} 1060 1061static int 1062pmap_has_valid_mapping(pmap_t pm, vm_offset_t va) 1063{ 1064 pd_entry_t *pde; 1065 pt_entry_t *ptep; 1066 1067 if (pmap_get_pde_pte(pm, va, &pde, &ptep) && 1068 ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) 1069 return (1); 1070 1071 return (0); 1072} 1073 1074static PMAP_INLINE void 1075pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) 1076{ 1077 vm_size_t rest; 1078 1079 CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" 1080 " len 0x%x ", pm, pm == kernel_pmap, va, len); 1081 1082 if (pmap_is_current(pm) || pm == kernel_pmap) { 1083 rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); 1084 while (len > 0) { 1085 if (pmap_has_valid_mapping(pm, va)) { 1086 cpu_idcache_wbinv_range(va, rest); 1087 cpu_l2cache_wbinv_range(va, rest); 1088 } 1089 len -= rest; 1090 va += rest; 1091 rest = MIN(PAGE_SIZE, len); 1092 } 1093 } 1094} 1095 1096static PMAP_INLINE void 1097pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, 1098 boolean_t rd_only) 1099{ 1100 vm_size_t rest; 1101 1102 CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " 1103 "len 0x%x ", pm, pm == kernel_pmap, va, len); 1104 CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); 1105 1106 if (pmap_is_current(pm)) { 1107 rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); 1108 while (len > 0) { 1109 if (pmap_has_valid_mapping(pm, va)) { 1110 if (do_inv && rd_only) { 1111 cpu_dcache_inv_range(va, rest); 1112 cpu_l2cache_inv_range(va, rest); 1113 } else if (do_inv) { 1114 cpu_dcache_wbinv_range(va, rest); 1115 cpu_l2cache_wbinv_range(va, rest); 1116 } else if (!rd_only) { 1117 cpu_dcache_wb_range(va, rest); 1118 cpu_l2cache_wb_range(va, rest); 1119 } 1120 } 1121 len -= rest; 1122 va += rest; 1123 1124 rest = MIN(PAGE_SIZE, len); 1125 } 1126 } 1127} 1128 1129static PMAP_INLINE void 1130pmap_idcache_wbinv_all(pmap_t pm) 1131{ 1132 1133 if (pmap_is_current(pm)) { 1134 cpu_idcache_wbinv_all(); 1135 cpu_l2cache_wbinv_all(); 1136 } 1137} 1138 1139#ifdef notyet 1140static PMAP_INLINE void 1141pmap_dcache_wbinv_all(pmap_t pm) 1142{ 1143 1144 if (pmap_is_current(pm)) { 1145 cpu_dcache_wbinv_all(); 1146 cpu_l2cache_wbinv_all(); 1147 } 1148} 1149#endif 1150 1151/* 1152 * PTE_SYNC_CURRENT: 1153 * 1154 * Make sure the pte is written out to RAM. 1155 * We need to do this for one of two cases: 1156 * - We're dealing with the kernel pmap 1157 * - There is no pmap active in the cache/tlb. 1158 * - The specified pmap is 'active' in the cache/tlb. 1159 */ 1160#ifdef PMAP_INCLUDE_PTE_SYNC 1161#define PTE_SYNC_CURRENT(pm, ptep) \ 1162do { \ 1163 if (PMAP_NEEDS_PTE_SYNC && \ 1164 pmap_is_current(pm)) \ 1165 PTE_SYNC(ptep); \ 1166} while (/*CONSTCOND*/0) 1167#else 1168#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ 1169#endif 1170 1171/* 1172 * cacheable == -1 means we must make the entry uncacheable, 1 means 1173 * cacheable; 1174 */ 1175static __inline void 1176pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) 1177{ 1178 struct l2_bucket *l2b; 1179 pt_entry_t *ptep, pte; 1180 1181 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 1182 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 1183 1184 if (cacheable == 1) { 1185 pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; 1186 if (l2pte_valid(pte)) { 1187 if (PV_BEEN_EXECD(pv->pv_flags)) { 1188 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 1189 } else if (PV_BEEN_REFD(pv->pv_flags)) { 1190 pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); 1191 } 1192 } 1193 } else { 1194 pte = *ptep &~ L2_S_CACHE_MASK; 1195 if ((va != pv->pv_va || pm != pv->pv_pmap) && 1196 l2pte_valid(pte)) { 1197 if (PV_BEEN_EXECD(pv->pv_flags)) { 1198 pmap_idcache_wbinv_range(pv->pv_pmap, 1199 pv->pv_va, PAGE_SIZE); 1200 pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); 1201 } else if (PV_BEEN_REFD(pv->pv_flags)) { 1202 pmap_dcache_wb_range(pv->pv_pmap, 1203 pv->pv_va, PAGE_SIZE, TRUE, 1204 (pv->pv_flags & PVF_WRITE) == 0); 1205 pmap_tlb_flushD_SE(pv->pv_pmap, 1206 pv->pv_va); 1207 } 1208 } 1209 } 1210 *ptep = pte; 1211 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 1212} 1213 1214static void 1215pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1216{ 1217 int pmwc = 0; 1218 int writable = 0, kwritable = 0, uwritable = 0; 1219 int entries = 0, kentries = 0, uentries = 0; 1220 struct pv_entry *pv; 1221 1222 rw_assert(&pvh_global_lock, RA_WLOCKED); 1223 1224 /* the cache gets written back/invalidated on context switch. 1225 * therefore, if a user page shares an entry in the same page or 1226 * with the kernel map and at least one is writable, then the 1227 * cache entry must be set write-through. 1228 */ 1229 1230 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1231 /* generate a count of the pv_entry uses */ 1232 if (pv->pv_flags & PVF_WRITE) { 1233 if (pv->pv_pmap == kernel_pmap) 1234 kwritable++; 1235 else if (pv->pv_pmap == pm) 1236 uwritable++; 1237 writable++; 1238 } 1239 if (pv->pv_pmap == kernel_pmap) 1240 kentries++; 1241 else { 1242 if (pv->pv_pmap == pm) 1243 uentries++; 1244 entries++; 1245 } 1246 } 1247 /* 1248 * check if the user duplicate mapping has 1249 * been removed. 1250 */ 1251 if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || 1252 (uwritable > 1))) 1253 pmwc = 1; 1254 1255 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1256 /* check for user uncachable conditions - order is important */ 1257 if (pm != kernel_pmap && 1258 (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { 1259 1260 if ((uentries > 1 && uwritable) || uwritable > 1) { 1261 1262 /* user duplicate mapping */ 1263 if (pv->pv_pmap != kernel_pmap) 1264 pv->pv_flags |= PVF_MWC; 1265 1266 if (!(pv->pv_flags & PVF_NC)) { 1267 pv->pv_flags |= PVF_NC; 1268 pmap_set_cache_entry(pv, pm, va, -1); 1269 } 1270 continue; 1271 } else /* no longer a duplicate user */ 1272 pv->pv_flags &= ~PVF_MWC; 1273 } 1274 1275 /* 1276 * check for kernel uncachable conditions 1277 * kernel writable or kernel readable with writable user entry 1278 */ 1279 if ((kwritable && (entries || kentries > 1)) || 1280 (kwritable > 1) || 1281 ((kwritable != writable) && kentries && 1282 (pv->pv_pmap == kernel_pmap || 1283 (pv->pv_flags & PVF_WRITE) || 1284 (pv->pv_flags & PVF_MWC)))) { 1285 1286 if (!(pv->pv_flags & PVF_NC)) { 1287 pv->pv_flags |= PVF_NC; 1288 pmap_set_cache_entry(pv, pm, va, -1); 1289 } 1290 continue; 1291 } 1292 1293 /* kernel and user are cachable */ 1294 if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && 1295 (pv->pv_flags & PVF_NC)) { 1296 1297 pv->pv_flags &= ~PVF_NC; 1298 if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) 1299 pmap_set_cache_entry(pv, pm, va, 1); 1300 continue; 1301 } 1302 /* user is no longer sharable and writable */ 1303 if (pm != kernel_pmap && 1304 (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && 1305 !pmwc && (pv->pv_flags & PVF_NC)) { 1306 1307 pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1308 if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) 1309 pmap_set_cache_entry(pv, pm, va, 1); 1310 } 1311 } 1312 1313 if ((kwritable == 0) && (writable == 0)) { 1314 pg->md.pvh_attrs &= ~PVF_MOD; 1315 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1316 return; 1317 } 1318} 1319 1320/* 1321 * Modify pte bits for all ptes corresponding to the given physical address. 1322 * We use `maskbits' rather than `clearbits' because we're always passing 1323 * constants and the latter would require an extra inversion at run-time. 1324 */ 1325static int 1326pmap_clearbit(struct vm_page *pg, u_int maskbits) 1327{ 1328 struct l2_bucket *l2b; 1329 struct pv_entry *pv; 1330 pt_entry_t *ptep, npte, opte; 1331 pmap_t pm; 1332 vm_offset_t va; 1333 u_int oflags; 1334 int count = 0; 1335 1336 rw_wlock(&pvh_global_lock); 1337 1338 if (maskbits & PVF_WRITE) 1339 maskbits |= PVF_MOD; 1340 /* 1341 * Clear saved attributes (modify, reference) 1342 */ 1343 pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); 1344 1345 if (TAILQ_EMPTY(&pg->md.pv_list)) { 1346 rw_wunlock(&pvh_global_lock); 1347 return (0); 1348 } 1349 1350 /* 1351 * Loop over all current mappings setting/clearing as appropos 1352 */ 1353 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { 1354 va = pv->pv_va; 1355 pm = pv->pv_pmap; 1356 oflags = pv->pv_flags; 1357 1358 if (!(oflags & maskbits)) { 1359 if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { 1360 if (pg->md.pv_memattr != 1361 VM_MEMATTR_UNCACHEABLE) { 1362 PMAP_LOCK(pm); 1363 l2b = pmap_get_l2_bucket(pm, va); 1364 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1365 *ptep |= pte_l2_s_cache_mode; 1366 PTE_SYNC(ptep); 1367 PMAP_UNLOCK(pm); 1368 } 1369 pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1370 } 1371 continue; 1372 } 1373 pv->pv_flags &= ~maskbits; 1374 1375 PMAP_LOCK(pm); 1376 1377 l2b = pmap_get_l2_bucket(pm, va); 1378 1379 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1380 npte = opte = *ptep; 1381 1382 if (maskbits & (PVF_WRITE|PVF_MOD)) { 1383 if ((pv->pv_flags & PVF_NC)) { 1384 /* 1385 * Entry is not cacheable: 1386 * 1387 * Don't turn caching on again if this is a 1388 * modified emulation. This would be 1389 * inconsitent with the settings created by 1390 * pmap_fix_cache(). Otherwise, it's safe 1391 * to re-enable cacheing. 1392 * 1393 * There's no need to call pmap_fix_cache() 1394 * here: all pages are losing their write 1395 * permission. 1396 */ 1397 if (maskbits & PVF_WRITE) { 1398 if (pg->md.pv_memattr != 1399 VM_MEMATTR_UNCACHEABLE) 1400 npte |= pte_l2_s_cache_mode; 1401 pv->pv_flags &= ~(PVF_NC | PVF_MWC); 1402 } 1403 } else 1404 if (opte & L2_S_PROT_W) { 1405 vm_page_dirty(pg); 1406 /* 1407 * Entry is writable/cacheable: check if pmap 1408 * is current if it is flush it, otherwise it 1409 * won't be in the cache 1410 */ 1411 if (PV_BEEN_EXECD(oflags)) 1412 pmap_idcache_wbinv_range(pm, pv->pv_va, 1413 PAGE_SIZE); 1414 else 1415 if (PV_BEEN_REFD(oflags)) 1416 pmap_dcache_wb_range(pm, pv->pv_va, 1417 PAGE_SIZE, 1418 (maskbits & PVF_REF) ? TRUE : FALSE, 1419 FALSE); 1420 } 1421 1422 /* make the pte read only */ 1423 npte &= ~L2_S_PROT_W; 1424 } 1425 1426 if (maskbits & PVF_REF) { 1427 if ((pv->pv_flags & PVF_NC) == 0 && 1428 (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { 1429 /* 1430 * Check npte here; we may have already 1431 * done the wbinv above, and the validity 1432 * of the PTE is the same for opte and 1433 * npte. 1434 */ 1435 if (npte & L2_S_PROT_W) { 1436 if (PV_BEEN_EXECD(oflags)) 1437 pmap_idcache_wbinv_range(pm, 1438 pv->pv_va, PAGE_SIZE); 1439 else 1440 if (PV_BEEN_REFD(oflags)) 1441 pmap_dcache_wb_range(pm, 1442 pv->pv_va, PAGE_SIZE, 1443 TRUE, FALSE); 1444 } else 1445 if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { 1446 /* XXXJRT need idcache_inv_range */ 1447 if (PV_BEEN_EXECD(oflags)) 1448 pmap_idcache_wbinv_range(pm, 1449 pv->pv_va, PAGE_SIZE); 1450 else 1451 if (PV_BEEN_REFD(oflags)) 1452 pmap_dcache_wb_range(pm, 1453 pv->pv_va, PAGE_SIZE, 1454 TRUE, TRUE); 1455 } 1456 } 1457 1458 /* 1459 * Make the PTE invalid so that we will take a 1460 * page fault the next time the mapping is 1461 * referenced. 1462 */ 1463 npte &= ~L2_TYPE_MASK; 1464 npte |= L2_TYPE_INV; 1465 } 1466 1467 if (npte != opte) { 1468 count++; 1469 *ptep = npte; 1470 PTE_SYNC(ptep); 1471 /* Flush the TLB entry if a current pmap. */ 1472 if (PV_BEEN_EXECD(oflags)) 1473 pmap_tlb_flushID_SE(pm, pv->pv_va); 1474 else 1475 if (PV_BEEN_REFD(oflags)) 1476 pmap_tlb_flushD_SE(pm, pv->pv_va); 1477 } 1478 1479 PMAP_UNLOCK(pm); 1480 1481 } 1482 1483 if (maskbits & PVF_WRITE) 1484 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1485 rw_wunlock(&pvh_global_lock); 1486 return (count); 1487} 1488 1489/* 1490 * main pv_entry manipulation functions: 1491 * pmap_enter_pv: enter a mapping onto a vm_page list 1492 * pmap_remove_pv: remove a mappiing from a vm_page list 1493 * 1494 * NOTE: pmap_enter_pv expects to lock the pvh itself 1495 * pmap_remove_pv expects the caller to lock the pvh before calling 1496 */ 1497 1498/* 1499 * pmap_enter_pv: enter a mapping onto a vm_page's PV list 1500 * 1501 * => caller should hold the proper lock on pvh_global_lock 1502 * => caller should have pmap locked 1503 * => we will (someday) gain the lock on the vm_page's PV list 1504 * => caller should adjust ptp's wire_count before calling 1505 * => caller should not adjust pmap's wire_count 1506 */ 1507static void 1508pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, 1509 vm_offset_t va, u_int flags) 1510{ 1511 1512 rw_assert(&pvh_global_lock, RA_WLOCKED); 1513 PMAP_ASSERT_LOCKED(pm); 1514 if (pg->md.pv_kva != 0) { 1515 pve->pv_pmap = kernel_pmap; 1516 pve->pv_va = pg->md.pv_kva; 1517 pve->pv_flags = PVF_WRITE | PVF_UNMAN; 1518 if (pm != kernel_pmap) 1519 PMAP_LOCK(kernel_pmap); 1520 TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1521 TAILQ_INSERT_HEAD(&kernel_pmap->pm_pvlist, pve, pv_plist); 1522 if (pm != kernel_pmap) 1523 PMAP_UNLOCK(kernel_pmap); 1524 pg->md.pv_kva = 0; 1525 if ((pve = pmap_get_pv_entry()) == NULL) 1526 panic("pmap_kenter_pv: no pv entries"); 1527 } 1528 pve->pv_pmap = pm; 1529 pve->pv_va = va; 1530 pve->pv_flags = flags; 1531 TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); 1532 TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); 1533 pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); 1534 if (pve->pv_flags & PVF_WIRED) 1535 ++pm->pm_stats.wired_count; 1536 vm_page_aflag_set(pg, PGA_REFERENCED); 1537} 1538 1539/* 1540 * 1541 * pmap_find_pv: Find a pv entry 1542 * 1543 * => caller should hold lock on vm_page 1544 */ 1545static PMAP_INLINE struct pv_entry * 1546pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1547{ 1548 struct pv_entry *pv; 1549 1550 rw_assert(&pvh_global_lock, RA_WLOCKED); 1551 TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) 1552 if (pm == pv->pv_pmap && va == pv->pv_va) 1553 break; 1554 return (pv); 1555} 1556 1557/* 1558 * vector_page_setprot: 1559 * 1560 * Manipulate the protection of the vector page. 1561 */ 1562void 1563vector_page_setprot(int prot) 1564{ 1565 struct l2_bucket *l2b; 1566 pt_entry_t *ptep; 1567 1568 l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); 1569 1570 ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; 1571 1572 *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); 1573 PTE_SYNC(ptep); 1574 cpu_tlb_flushD_SE(vector_page); 1575 cpu_cpwait(); 1576} 1577 1578/* 1579 * pmap_remove_pv: try to remove a mapping from a pv_list 1580 * 1581 * => caller should hold proper lock on pmap_main_lock 1582 * => pmap should be locked 1583 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1584 * => caller should adjust ptp's wire_count and free PTP if needed 1585 * => caller should NOT adjust pmap's wire_count 1586 * => we return the removed pve 1587 */ 1588 1589static void 1590pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) 1591{ 1592 1593 struct pv_entry *pv; 1594 rw_assert(&pvh_global_lock, RA_WLOCKED); 1595 PMAP_ASSERT_LOCKED(pm); 1596 TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); 1597 TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); 1598 if (pve->pv_flags & PVF_WIRED) 1599 --pm->pm_stats.wired_count; 1600 if (pg->md.pvh_attrs & PVF_MOD) 1601 vm_page_dirty(pg); 1602 if (TAILQ_FIRST(&pg->md.pv_list) == NULL) 1603 pg->md.pvh_attrs &= ~PVF_REF; 1604 else 1605 vm_page_aflag_set(pg, PGA_REFERENCED); 1606 if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || 1607 (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) 1608 pmap_fix_cache(pg, pm, 0); 1609 else if (pve->pv_flags & PVF_WRITE) { 1610 TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) 1611 if (pve->pv_flags & PVF_WRITE) 1612 break; 1613 if (!pve) { 1614 pg->md.pvh_attrs &= ~PVF_MOD; 1615 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1616 } 1617 } 1618 pv = TAILQ_FIRST(&pg->md.pv_list); 1619 if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && 1620 TAILQ_NEXT(pv, pv_list) == NULL) { 1621 pm = kernel_pmap; 1622 pg->md.pv_kva = pv->pv_va; 1623 /* a recursive pmap_nuke_pv */ 1624 TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); 1625 TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); 1626 if (pv->pv_flags & PVF_WIRED) 1627 --pm->pm_stats.wired_count; 1628 pg->md.pvh_attrs &= ~PVF_REF; 1629 pg->md.pvh_attrs &= ~PVF_MOD; 1630 vm_page_aflag_clear(pg, PGA_WRITEABLE); 1631 pmap_free_pv_entry(pv); 1632 } 1633} 1634 1635static struct pv_entry * 1636pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) 1637{ 1638 struct pv_entry *pve; 1639 1640 rw_assert(&pvh_global_lock, RA_WLOCKED); 1641 pve = TAILQ_FIRST(&pg->md.pv_list); 1642 1643 while (pve) { 1644 if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ 1645 pmap_nuke_pv(pg, pm, pve); 1646 break; 1647 } 1648 pve = TAILQ_NEXT(pve, pv_list); 1649 } 1650 1651 if (pve == NULL && pg->md.pv_kva == va) 1652 pg->md.pv_kva = 0; 1653 1654 return(pve); /* return removed pve */ 1655} 1656/* 1657 * 1658 * pmap_modify_pv: Update pv flags 1659 * 1660 * => caller should hold lock on vm_page [so that attrs can be adjusted] 1661 * => caller should NOT adjust pmap's wire_count 1662 * => we return the old flags 1663 * 1664 * Modify a physical-virtual mapping in the pv table 1665 */ 1666static u_int 1667pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, 1668 u_int clr_mask, u_int set_mask) 1669{ 1670 struct pv_entry *npv; 1671 u_int flags, oflags; 1672 1673 PMAP_ASSERT_LOCKED(pm); 1674 rw_assert(&pvh_global_lock, RA_WLOCKED); 1675 if ((npv = pmap_find_pv(pg, pm, va)) == NULL) 1676 return (0); 1677 1678 /* 1679 * There is at least one VA mapping this page. 1680 */ 1681 1682 if (clr_mask & (PVF_REF | PVF_MOD)) 1683 pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); 1684 1685 oflags = npv->pv_flags; 1686 npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; 1687 1688 if ((flags ^ oflags) & PVF_WIRED) { 1689 if (flags & PVF_WIRED) 1690 ++pm->pm_stats.wired_count; 1691 else 1692 --pm->pm_stats.wired_count; 1693 } 1694 1695 if ((flags ^ oflags) & PVF_WRITE) 1696 pmap_fix_cache(pg, pm, 0); 1697 1698 return (oflags); 1699} 1700 1701/* Function to set the debug level of the pmap code */ 1702#ifdef PMAP_DEBUG 1703void 1704pmap_debug(int level) 1705{ 1706 pmap_debug_level = level; 1707 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 1708} 1709#endif /* PMAP_DEBUG */ 1710 1711void 1712pmap_pinit0(struct pmap *pmap) 1713{ 1714 PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); 1715 1716 bcopy(kernel_pmap, pmap, sizeof(*pmap)); 1717 bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); 1718 PMAP_LOCK_INIT(pmap); 1719} 1720 1721/* 1722 * Initialize a vm_page's machine-dependent fields. 1723 */ 1724void 1725pmap_page_init(vm_page_t m) 1726{ 1727 1728 TAILQ_INIT(&m->md.pv_list); 1729 m->md.pv_memattr = VM_MEMATTR_DEFAULT; 1730} 1731 1732/* 1733 * Initialize the pmap module. 1734 * Called by vm_init, to initialize any structures that the pmap 1735 * system needs to map virtual memory. 1736 */ 1737void 1738pmap_init(void) 1739{ 1740 int shpgperproc = PMAP_SHPGPERPROC; 1741 1742 l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, 1743 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1744 l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, 1745 NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1746 1747 /* 1748 * Initialize the PV entry allocator. 1749 */ 1750 pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, 1751 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1752 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1753 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1754 uma_zone_reserve_kva(pvzone, pv_entry_max); 1755 pv_entry_high_water = 9 * (pv_entry_max / 10); 1756 1757 /* 1758 * Now it is safe to enable pv_table recording. 1759 */ 1760 PDEBUG(1, printf("pmap_init: done!\n")); 1761} 1762 1763int 1764pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) 1765{ 1766 struct l2_dtable *l2; 1767 struct l2_bucket *l2b; 1768 pd_entry_t *pl1pd, l1pd; 1769 pt_entry_t *ptep, pte; 1770 vm_paddr_t pa; 1771 u_int l1idx; 1772 int rv = 0; 1773 1774 l1idx = L1_IDX(va); 1775 rw_wlock(&pvh_global_lock); 1776 PMAP_LOCK(pm); 1777 1778 /* 1779 * If there is no l2_dtable for this address, then the process 1780 * has no business accessing it. 1781 * 1782 * Note: This will catch userland processes trying to access 1783 * kernel addresses. 1784 */ 1785 l2 = pm->pm_l2[L2_IDX(l1idx)]; 1786 if (l2 == NULL) 1787 goto out; 1788 1789 /* 1790 * Likewise if there is no L2 descriptor table 1791 */ 1792 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 1793 if (l2b->l2b_kva == NULL) 1794 goto out; 1795 1796 /* 1797 * Check the PTE itself. 1798 */ 1799 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1800 pte = *ptep; 1801 if (pte == 0) 1802 goto out; 1803 1804 /* 1805 * Catch a userland access to the vector page mapped at 0x0 1806 */ 1807 if (user && (pte & L2_S_PROT_U) == 0) 1808 goto out; 1809 if (va == vector_page) 1810 goto out; 1811 1812 pa = l2pte_pa(pte); 1813 1814 if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { 1815 /* 1816 * This looks like a good candidate for "page modified" 1817 * emulation... 1818 */ 1819 struct pv_entry *pv; 1820 struct vm_page *pg; 1821 1822 /* Extract the physical address of the page */ 1823 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { 1824 goto out; 1825 } 1826 /* Get the current flags for this page. */ 1827 1828 pv = pmap_find_pv(pg, pm, va); 1829 if (pv == NULL) { 1830 goto out; 1831 } 1832 1833 /* 1834 * Do the flags say this page is writable? If not then it 1835 * is a genuine write fault. If yes then the write fault is 1836 * our fault as we did not reflect the write access in the 1837 * PTE. Now we know a write has occurred we can correct this 1838 * and also set the modified bit 1839 */ 1840 if ((pv->pv_flags & PVF_WRITE) == 0) { 1841 goto out; 1842 } 1843 1844 pg->md.pvh_attrs |= PVF_REF | PVF_MOD; 1845 vm_page_dirty(pg); 1846 pv->pv_flags |= PVF_REF | PVF_MOD; 1847 1848 /* 1849 * Re-enable write permissions for the page. No need to call 1850 * pmap_fix_cache(), since this is just a 1851 * modified-emulation fault, and the PVF_WRITE bit isn't 1852 * changing. We've already set the cacheable bits based on 1853 * the assumption that we can write to this page. 1854 */ 1855 *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; 1856 PTE_SYNC(ptep); 1857 rv = 1; 1858 } else 1859 if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { 1860 /* 1861 * This looks like a good candidate for "page referenced" 1862 * emulation. 1863 */ 1864 struct pv_entry *pv; 1865 struct vm_page *pg; 1866 1867 /* Extract the physical address of the page */ 1868 if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) 1869 goto out; 1870 /* Get the current flags for this page. */ 1871 1872 pv = pmap_find_pv(pg, pm, va); 1873 if (pv == NULL) 1874 goto out; 1875 1876 pg->md.pvh_attrs |= PVF_REF; 1877 pv->pv_flags |= PVF_REF; 1878 1879 1880 *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; 1881 PTE_SYNC(ptep); 1882 rv = 1; 1883 } 1884 1885 /* 1886 * We know there is a valid mapping here, so simply 1887 * fix up the L1 if necessary. 1888 */ 1889 pl1pd = &pm->pm_l1->l1_kva[l1idx]; 1890 l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; 1891 if (*pl1pd != l1pd) { 1892 *pl1pd = l1pd; 1893 PTE_SYNC(pl1pd); 1894 rv = 1; 1895 } 1896 1897#ifdef DEBUG 1898 /* 1899 * If 'rv == 0' at this point, it generally indicates that there is a 1900 * stale TLB entry for the faulting address. This happens when two or 1901 * more processes are sharing an L1. Since we don't flush the TLB on 1902 * a context switch between such processes, we can take domain faults 1903 * for mappings which exist at the same VA in both processes. EVEN IF 1904 * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for 1905 * example. 1906 * 1907 * This is extremely likely to happen if pmap_enter() updated the L1 1908 * entry for a recently entered mapping. In this case, the TLB is 1909 * flushed for the new mapping, but there may still be TLB entries for 1910 * other mappings belonging to other processes in the 1MB range 1911 * covered by the L1 entry. 1912 * 1913 * Since 'rv == 0', we know that the L1 already contains the correct 1914 * value, so the fault must be due to a stale TLB entry. 1915 * 1916 * Since we always need to flush the TLB anyway in the case where we 1917 * fixed up the L1, or frobbed the L2 PTE, we effectively deal with 1918 * stale TLB entries dynamically. 1919 * 1920 * However, the above condition can ONLY happen if the current L1 is 1921 * being shared. If it happens when the L1 is unshared, it indicates 1922 * that other parts of the pmap are not doing their job WRT managing 1923 * the TLB. 1924 */ 1925 if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { 1926 printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", 1927 pm, (u_long)va, ftype); 1928 printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", 1929 l2, l2b, ptep, pl1pd); 1930 printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", 1931 pte, l1pd, last_fault_code); 1932#ifdef DDB 1933 Debugger(); 1934#endif 1935 } 1936#endif 1937 1938 cpu_tlb_flushID_SE(va); 1939 cpu_cpwait(); 1940 1941 rv = 1; 1942 1943out: 1944 rw_wunlock(&pvh_global_lock); 1945 PMAP_UNLOCK(pm); 1946 return (rv); 1947} 1948 1949void 1950pmap_postinit(void) 1951{ 1952 struct l2_bucket *l2b; 1953 struct l1_ttable *l1; 1954 pd_entry_t *pl1pt; 1955 pt_entry_t *ptep, pte; 1956 vm_offset_t va, eva; 1957 u_int loop, needed; 1958 1959 needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); 1960 needed -= 1; 1961 l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); 1962 1963 for (loop = 0; loop < needed; loop++, l1++) { 1964 /* Allocate a L1 page table */ 1965 va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, 1966 0xffffffff, L1_TABLE_SIZE, 0); 1967 1968 if (va == 0) 1969 panic("Cannot allocate L1 KVM"); 1970 1971 eva = va + L1_TABLE_SIZE; 1972 pl1pt = (pd_entry_t *)va; 1973 1974 while (va < eva) { 1975 l2b = pmap_get_l2_bucket(kernel_pmap, va); 1976 ptep = &l2b->l2b_kva[l2pte_index(va)]; 1977 pte = *ptep; 1978 pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; 1979 *ptep = pte; 1980 PTE_SYNC(ptep); 1981 cpu_tlb_flushD_SE(va); 1982 1983 va += PAGE_SIZE; 1984 } 1985 pmap_init_l1(l1, pl1pt); 1986 } 1987 1988 1989#ifdef DEBUG 1990 printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", 1991 needed); 1992#endif 1993} 1994 1995/* 1996 * This is used to stuff certain critical values into the PCB where they 1997 * can be accessed quickly from cpu_switch() et al. 1998 */ 1999void 2000pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) 2001{ 2002 struct l2_bucket *l2b; 2003 2004 pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; 2005 pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | 2006 (DOMAIN_CLIENT << (pm->pm_domain * 2)); 2007 2008 if (vector_page < KERNBASE) { 2009 pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; 2010 l2b = pmap_get_l2_bucket(pm, vector_page); 2011 pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | 2012 L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); 2013 } else 2014 pcb->pcb_pl1vec = NULL; 2015} 2016 2017void 2018pmap_activate(struct thread *td) 2019{ 2020 pmap_t pm; 2021 struct pcb *pcb; 2022 2023 pm = vmspace_pmap(td->td_proc->p_vmspace); 2024 pcb = td->td_pcb; 2025 2026 critical_enter(); 2027 pmap_set_pcb_pagedir(pm, pcb); 2028 2029 if (td == curthread) { 2030 u_int cur_dacr, cur_ttb; 2031 2032 __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); 2033 __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); 2034 2035 cur_ttb &= ~(L1_TABLE_SIZE - 1); 2036 2037 if (cur_ttb == (u_int)pcb->pcb_pagedir && 2038 cur_dacr == pcb->pcb_dacr) { 2039 /* 2040 * No need to switch address spaces. 2041 */ 2042 critical_exit(); 2043 return; 2044 } 2045 2046 2047 /* 2048 * We MUST, I repeat, MUST fix up the L1 entry corresponding 2049 * to 'vector_page' in the incoming L1 table before switching 2050 * to it otherwise subsequent interrupts/exceptions (including 2051 * domain faults!) will jump into hyperspace. 2052 */ 2053 if (pcb->pcb_pl1vec) { 2054 2055 *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2056 /* 2057 * Don't need to PTE_SYNC() at this point since 2058 * cpu_setttb() is about to flush both the cache 2059 * and the TLB. 2060 */ 2061 } 2062 2063 cpu_domains(pcb->pcb_dacr); 2064 cpu_setttb(pcb->pcb_pagedir); 2065 } 2066 critical_exit(); 2067} 2068 2069static int 2070pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) 2071{ 2072 pd_entry_t *pdep, pde; 2073 pt_entry_t *ptep, pte; 2074 vm_offset_t pa; 2075 int rv = 0; 2076 2077 /* 2078 * Make sure the descriptor itself has the correct cache mode 2079 */ 2080 pdep = &kl1[L1_IDX(va)]; 2081 pde = *pdep; 2082 2083 if (l1pte_section_p(pde)) { 2084 if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { 2085 *pdep = (pde & ~L1_S_CACHE_MASK) | 2086 pte_l1_s_cache_mode_pt; 2087 PTE_SYNC(pdep); 2088 cpu_dcache_wbinv_range((vm_offset_t)pdep, 2089 sizeof(*pdep)); 2090 cpu_l2cache_wbinv_range((vm_offset_t)pdep, 2091 sizeof(*pdep)); 2092 rv = 1; 2093 } 2094 } else { 2095 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2096 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2097 if (ptep == NULL) 2098 panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); 2099 2100 ptep = &ptep[l2pte_index(va)]; 2101 pte = *ptep; 2102 if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { 2103 *ptep = (pte & ~L2_S_CACHE_MASK) | 2104 pte_l2_s_cache_mode_pt; 2105 PTE_SYNC(ptep); 2106 cpu_dcache_wbinv_range((vm_offset_t)ptep, 2107 sizeof(*ptep)); 2108 cpu_l2cache_wbinv_range((vm_offset_t)ptep, 2109 sizeof(*ptep)); 2110 rv = 1; 2111 } 2112 } 2113 2114 return (rv); 2115} 2116 2117static void 2118pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, 2119 pt_entry_t **ptep) 2120{ 2121 vm_offset_t va = *availp; 2122 struct l2_bucket *l2b; 2123 2124 if (ptep) { 2125 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2126 if (l2b == NULL) 2127 panic("pmap_alloc_specials: no l2b for 0x%x", va); 2128 2129 *ptep = &l2b->l2b_kva[l2pte_index(va)]; 2130 } 2131 2132 *vap = va; 2133 *availp = va + (PAGE_SIZE * pages); 2134} 2135 2136/* 2137 * Bootstrap the system enough to run with virtual memory. 2138 * 2139 * On the arm this is called after mapping has already been enabled 2140 * and just syncs the pmap module with what has already been done. 2141 * [We can't call it easily with mapping off since the kernel is not 2142 * mapped with PA == VA, hence we would have to relocate every address 2143 * from the linked base (virtual) address "KERNBASE" to the actual 2144 * (physical) address starting relative to 0] 2145 */ 2146#define PMAP_STATIC_L2_SIZE 16 2147void 2148pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt) 2149{ 2150 static struct l1_ttable static_l1; 2151 static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; 2152 struct l1_ttable *l1 = &static_l1; 2153 struct l2_dtable *l2; 2154 struct l2_bucket *l2b; 2155 pd_entry_t pde; 2156 pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; 2157 pt_entry_t *ptep; 2158 pt_entry_t *qmap_pte; 2159 vm_paddr_t pa; 2160 vm_offset_t va; 2161 vm_size_t size; 2162 int l1idx, l2idx, l2next = 0; 2163 2164 PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", 2165 firstaddr, vm_max_kernel_address)); 2166 2167 virtual_avail = firstaddr; 2168 kernel_pmap->pm_l1 = l1; 2169 kernel_l1pa = l1pt->pv_pa; 2170 2171 /* 2172 * Scan the L1 translation table created by initarm() and create 2173 * the required metadata for all valid mappings found in it. 2174 */ 2175 for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { 2176 pde = kernel_l1pt[l1idx]; 2177 2178 /* 2179 * We're only interested in Coarse mappings. 2180 * pmap_extract() can deal with section mappings without 2181 * recourse to checking L2 metadata. 2182 */ 2183 if ((pde & L1_TYPE_MASK) != L1_TYPE_C) 2184 continue; 2185 2186 /* 2187 * Lookup the KVA of this L2 descriptor table 2188 */ 2189 pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); 2190 ptep = (pt_entry_t *)kernel_pt_lookup(pa); 2191 2192 if (ptep == NULL) { 2193 panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", 2194 (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); 2195 } 2196 2197 /* 2198 * Fetch the associated L2 metadata structure. 2199 * Allocate a new one if necessary. 2200 */ 2201 if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { 2202 if (l2next == PMAP_STATIC_L2_SIZE) 2203 panic("pmap_bootstrap: out of static L2s"); 2204 kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = 2205 &static_l2[l2next++]; 2206 } 2207 2208 /* 2209 * One more L1 slot tracked... 2210 */ 2211 l2->l2_occupancy++; 2212 2213 /* 2214 * Fill in the details of the L2 descriptor in the 2215 * appropriate bucket. 2216 */ 2217 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2218 l2b->l2b_kva = ptep; 2219 l2b->l2b_phys = pa; 2220 l2b->l2b_l1idx = l1idx; 2221 2222 /* 2223 * Establish an initial occupancy count for this descriptor 2224 */ 2225 for (l2idx = 0; 2226 l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); 2227 l2idx++) { 2228 if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { 2229 l2b->l2b_occupancy++; 2230 } 2231 } 2232 2233 /* 2234 * Make sure the descriptor itself has the correct cache mode. 2235 * If not, fix it, but whine about the problem. Port-meisters 2236 * should consider this a clue to fix up their initarm() 2237 * function. :) 2238 */ 2239 if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { 2240 printf("pmap_bootstrap: WARNING! wrong cache mode for " 2241 "L2 pte @ %p\n", ptep); 2242 } 2243 } 2244 2245 2246 /* 2247 * Ensure the primary (kernel) L1 has the correct cache mode for 2248 * a page table. Bitch if it is not correctly set. 2249 */ 2250 for (va = (vm_offset_t)kernel_l1pt; 2251 va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { 2252 if (pmap_set_pt_cache_mode(kernel_l1pt, va)) 2253 printf("pmap_bootstrap: WARNING! wrong cache mode for " 2254 "primary L1 @ 0x%x\n", va); 2255 } 2256 2257 cpu_dcache_wbinv_all(); 2258 cpu_l2cache_wbinv_all(); 2259 cpu_tlb_flushID(); 2260 cpu_cpwait(); 2261 2262 PMAP_LOCK_INIT(kernel_pmap); 2263 CPU_FILL(&kernel_pmap->pm_active); 2264 kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; 2265 TAILQ_INIT(&kernel_pmap->pm_pvlist); 2266 2267 /* 2268 * Initialize the global pv list lock. 2269 */ 2270 rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); 2271 2272 /* 2273 * Reserve some special page table entries/VA space for temporary 2274 * mapping of pages. 2275 */ 2276 pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); 2277 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); 2278 pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); 2279 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); 2280 pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte); 2281 pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte); 2282 size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) / 2283 L1_S_SIZE; 2284 pmap_alloc_specials(&virtual_avail, 2285 round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, 2286 &pmap_kernel_l2ptp_kva, NULL); 2287 2288 size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; 2289 pmap_alloc_specials(&virtual_avail, 2290 round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, 2291 &pmap_kernel_l2dtable_kva, NULL); 2292 2293 pmap_alloc_specials(&virtual_avail, 2294 1, (vm_offset_t*)&_tmppt, NULL); 2295 pmap_alloc_specials(&virtual_avail, 2296 MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); 2297 SLIST_INIT(&l1_list); 2298 TAILQ_INIT(&l1_lru_list); 2299 mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); 2300 pmap_init_l1(l1, kernel_l1pt); 2301 cpu_dcache_wbinv_all(); 2302 cpu_l2cache_wbinv_all(); 2303 2304 virtual_avail = round_page(virtual_avail); 2305 virtual_end = vm_max_kernel_address; 2306 kernel_vm_end = pmap_curmaxkvaddr; 2307 mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); 2308 mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF); 2309 2310 pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); 2311} 2312 2313/*************************************************** 2314 * Pmap allocation/deallocation routines. 2315 ***************************************************/ 2316 2317/* 2318 * Release any resources held by the given physical map. 2319 * Called when a pmap initialized by pmap_pinit is being released. 2320 * Should only be called if the map contains no valid mappings. 2321 */ 2322void 2323pmap_release(pmap_t pmap) 2324{ 2325 struct pcb *pcb; 2326 2327 pmap_idcache_wbinv_all(pmap); 2328 cpu_l2cache_wbinv_all(); 2329 pmap_tlb_flushID(pmap); 2330 cpu_cpwait(); 2331 if (vector_page < KERNBASE) { 2332 struct pcb *curpcb = PCPU_GET(curpcb); 2333 pcb = thread0.td_pcb; 2334 if (pmap_is_current(pmap)) { 2335 /* 2336 * Frob the L1 entry corresponding to the vector 2337 * page so that it contains the kernel pmap's domain 2338 * number. This will ensure pmap_remove() does not 2339 * pull the current vector page out from under us. 2340 */ 2341 critical_enter(); 2342 *pcb->pcb_pl1vec = pcb->pcb_l1vec; 2343 cpu_domains(pcb->pcb_dacr); 2344 cpu_setttb(pcb->pcb_pagedir); 2345 critical_exit(); 2346 } 2347 pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); 2348 /* 2349 * Make sure cpu_switch(), et al, DTRT. This is safe to do 2350 * since this process has no remaining mappings of its own. 2351 */ 2352 curpcb->pcb_pl1vec = pcb->pcb_pl1vec; 2353 curpcb->pcb_l1vec = pcb->pcb_l1vec; 2354 curpcb->pcb_dacr = pcb->pcb_dacr; 2355 curpcb->pcb_pagedir = pcb->pcb_pagedir; 2356 2357 } 2358 pmap_free_l1(pmap); 2359 2360 dprintf("pmap_release()\n"); 2361} 2362 2363 2364 2365/* 2366 * Helper function for pmap_grow_l2_bucket() 2367 */ 2368static __inline int 2369pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) 2370{ 2371 struct l2_bucket *l2b; 2372 pt_entry_t *ptep; 2373 vm_paddr_t pa; 2374 struct vm_page *pg; 2375 2376 pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); 2377 if (pg == NULL) 2378 return (1); 2379 pa = VM_PAGE_TO_PHYS(pg); 2380 2381 if (pap) 2382 *pap = pa; 2383 2384 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2385 2386 ptep = &l2b->l2b_kva[l2pte_index(va)]; 2387 *ptep = L2_S_PROTO | pa | cache_mode | 2388 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 2389 PTE_SYNC(ptep); 2390 return (0); 2391} 2392 2393/* 2394 * This is the same as pmap_alloc_l2_bucket(), except that it is only 2395 * used by pmap_growkernel(). 2396 */ 2397static __inline struct l2_bucket * 2398pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) 2399{ 2400 struct l2_dtable *l2; 2401 struct l2_bucket *l2b; 2402 struct l1_ttable *l1; 2403 pd_entry_t *pl1pd; 2404 u_short l1idx; 2405 vm_offset_t nva; 2406 2407 l1idx = L1_IDX(va); 2408 2409 if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { 2410 /* 2411 * No mapping at this address, as there is 2412 * no entry in the L1 table. 2413 * Need to allocate a new l2_dtable. 2414 */ 2415 nva = pmap_kernel_l2dtable_kva; 2416 if ((nva & PAGE_MASK) == 0) { 2417 /* 2418 * Need to allocate a backing page 2419 */ 2420 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2421 return (NULL); 2422 } 2423 2424 l2 = (struct l2_dtable *)nva; 2425 nva += sizeof(struct l2_dtable); 2426 2427 if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & 2428 PAGE_MASK)) { 2429 /* 2430 * The new l2_dtable straddles a page boundary. 2431 * Map in another page to cover it. 2432 */ 2433 if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) 2434 return (NULL); 2435 } 2436 2437 pmap_kernel_l2dtable_kva = nva; 2438 2439 /* 2440 * Link it into the parent pmap 2441 */ 2442 pm->pm_l2[L2_IDX(l1idx)] = l2; 2443 memset(l2, 0, sizeof(*l2)); 2444 } 2445 2446 l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; 2447 2448 /* 2449 * Fetch pointer to the L2 page table associated with the address. 2450 */ 2451 if (l2b->l2b_kva == NULL) { 2452 pt_entry_t *ptep; 2453 2454 /* 2455 * No L2 page table has been allocated. Chances are, this 2456 * is because we just allocated the l2_dtable, above. 2457 */ 2458 nva = pmap_kernel_l2ptp_kva; 2459 ptep = (pt_entry_t *)nva; 2460 if ((nva & PAGE_MASK) == 0) { 2461 /* 2462 * Need to allocate a backing page 2463 */ 2464 if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, 2465 &pmap_kernel_l2ptp_phys)) 2466 return (NULL); 2467 PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); 2468 } 2469 memset(ptep, 0, L2_TABLE_SIZE_REAL); 2470 l2->l2_occupancy++; 2471 l2b->l2b_kva = ptep; 2472 l2b->l2b_l1idx = l1idx; 2473 l2b->l2b_phys = pmap_kernel_l2ptp_phys; 2474 2475 pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; 2476 pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; 2477 } 2478 2479 /* Distribute new L1 entry to all other L1s */ 2480 SLIST_FOREACH(l1, &l1_list, l1_link) { 2481 pl1pd = &l1->l1_kva[L1_IDX(va)]; 2482 *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | 2483 L1_C_PROTO; 2484 PTE_SYNC(pl1pd); 2485 } 2486 2487 return (l2b); 2488} 2489 2490 2491/* 2492 * grow the number of kernel page table entries, if needed 2493 */ 2494void 2495pmap_growkernel(vm_offset_t addr) 2496{ 2497 pmap_t kpm = kernel_pmap; 2498 2499 if (addr <= pmap_curmaxkvaddr) 2500 return; /* we are OK */ 2501 2502 /* 2503 * whoops! we need to add kernel PTPs 2504 */ 2505 2506 /* Map 1MB at a time */ 2507 for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) 2508 pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); 2509 2510 /* 2511 * flush out the cache, expensive but growkernel will happen so 2512 * rarely 2513 */ 2514 cpu_dcache_wbinv_all(); 2515 cpu_l2cache_wbinv_all(); 2516 cpu_tlb_flushD(); 2517 cpu_cpwait(); 2518 kernel_vm_end = pmap_curmaxkvaddr; 2519} 2520 2521 2522/* 2523 * Remove all pages from specified address space 2524 * this aids process exit speeds. Also, this code 2525 * is special cased for current process only, but 2526 * can have the more generic (and slightly slower) 2527 * mode enabled. This is much faster than pmap_remove 2528 * in the case of running down an entire address space. 2529 */ 2530void 2531pmap_remove_pages(pmap_t pmap) 2532{ 2533 struct pv_entry *pv, *npv; 2534 struct l2_bucket *l2b = NULL; 2535 vm_page_t m; 2536 pt_entry_t *pt; 2537 2538 rw_wlock(&pvh_global_lock); 2539 PMAP_LOCK(pmap); 2540 cpu_idcache_wbinv_all(); 2541 cpu_l2cache_wbinv_all(); 2542 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { 2543 if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { 2544 /* Cannot remove wired or unmanaged pages now. */ 2545 npv = TAILQ_NEXT(pv, pv_plist); 2546 continue; 2547 } 2548 pmap->pm_stats.resident_count--; 2549 l2b = pmap_get_l2_bucket(pmap, pv->pv_va); 2550 KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); 2551 pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 2552 m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); 2553 KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); 2554 *pt = 0; 2555 PTE_SYNC(pt); 2556 npv = TAILQ_NEXT(pv, pv_plist); 2557 pmap_nuke_pv(m, pmap, pv); 2558 if (TAILQ_EMPTY(&m->md.pv_list)) 2559 vm_page_aflag_clear(m, PGA_WRITEABLE); 2560 pmap_free_pv_entry(pv); 2561 pmap_free_l2_bucket(pmap, l2b, 1); 2562 } 2563 rw_wunlock(&pvh_global_lock); 2564 cpu_tlb_flushID(); 2565 cpu_cpwait(); 2566 PMAP_UNLOCK(pmap); 2567} 2568 2569 2570/*************************************************** 2571 * Low level mapping routines..... 2572 ***************************************************/ 2573 2574#ifdef ARM_HAVE_SUPERSECTIONS 2575/* Map a super section into the KVA. */ 2576 2577void 2578pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags) 2579{ 2580 pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | 2581 (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, 2582 VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2583 struct l1_ttable *l1; 2584 vm_offset_t va0, va_end; 2585 2586 KASSERT(((va | pa) & L1_SUP_OFFSET) == 0, 2587 ("Not a valid super section mapping")); 2588 if (flags & SECTION_CACHE) 2589 pd |= pte_l1_s_cache_mode; 2590 else if (flags & SECTION_PT) 2591 pd |= pte_l1_s_cache_mode_pt; 2592 va0 = va & L1_SUP_FRAME; 2593 va_end = va + L1_SUP_SIZE; 2594 SLIST_FOREACH(l1, &l1_list, l1_link) { 2595 va = va0; 2596 for (; va < va_end; va += L1_S_SIZE) { 2597 l1->l1_kva[L1_IDX(va)] = pd; 2598 PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2599 } 2600 } 2601} 2602#endif 2603 2604/* Map a section into the KVA. */ 2605 2606void 2607pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) 2608{ 2609 pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, 2610 VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); 2611 struct l1_ttable *l1; 2612 2613 KASSERT(((va | pa) & L1_S_OFFSET) == 0, 2614 ("Not a valid section mapping")); 2615 if (flags & SECTION_CACHE) 2616 pd |= pte_l1_s_cache_mode; 2617 else if (flags & SECTION_PT) 2618 pd |= pte_l1_s_cache_mode_pt; 2619 SLIST_FOREACH(l1, &l1_list, l1_link) { 2620 l1->l1_kva[L1_IDX(va)] = pd; 2621 PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); 2622 } 2623} 2624 2625/* 2626 * Make a temporary mapping for a physical address. This is only intended 2627 * to be used for panic dumps. 2628 */ 2629void * 2630pmap_kenter_temporary(vm_paddr_t pa, int i) 2631{ 2632 vm_offset_t va; 2633 2634 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 2635 pmap_kenter(va, pa); 2636 return ((void *)crashdumpmap); 2637} 2638 2639/* 2640 * add a wired page to the kva 2641 * note that in order for the mapping to take effect -- you 2642 * should do a invltlb after doing the pmap_kenter... 2643 */ 2644static PMAP_INLINE void 2645pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) 2646{ 2647 struct l2_bucket *l2b; 2648 pt_entry_t *pte; 2649 pt_entry_t opte; 2650 struct pv_entry *pve; 2651 vm_page_t m; 2652 2653 PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", 2654 (uint32_t) va, (uint32_t) pa)); 2655 2656 2657 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2658 if (l2b == NULL) 2659 l2b = pmap_grow_l2_bucket(kernel_pmap, va); 2660 KASSERT(l2b != NULL, ("No L2 Bucket")); 2661 pte = &l2b->l2b_kva[l2pte_index(va)]; 2662 opte = *pte; 2663 PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", 2664 (uint32_t) pte, opte, *pte)); 2665 if (l2pte_valid(opte)) { 2666 pmap_kremove(va); 2667 } else { 2668 if (opte == 0) 2669 l2b->l2b_occupancy++; 2670 } 2671 *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, 2672 VM_PROT_READ | VM_PROT_WRITE); 2673 if (flags & KENTER_CACHE) 2674 *pte |= pte_l2_s_cache_mode; 2675 if (flags & KENTER_USER) 2676 *pte |= L2_S_PROT_U; 2677 PTE_SYNC(pte); 2678 2679 /* 2680 * A kernel mapping may not be the page's only mapping, so create a PV 2681 * entry to ensure proper caching. 2682 * 2683 * The existence test for the pvzone is used to delay the recording of 2684 * kernel mappings until the VM system is fully initialized. 2685 * 2686 * This expects the physical memory to have a vm_page_array entry. 2687 */ 2688 if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) != NULL) { 2689 rw_wlock(&pvh_global_lock); 2690 if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { 2691 if ((pve = pmap_get_pv_entry()) == NULL) 2692 panic("pmap_kenter_internal: no pv entries"); 2693 PMAP_LOCK(kernel_pmap); 2694 pmap_enter_pv(m, pve, kernel_pmap, va, 2695 PVF_WRITE | PVF_UNMAN); 2696 pmap_fix_cache(m, kernel_pmap, va); 2697 PMAP_UNLOCK(kernel_pmap); 2698 } else { 2699 m->md.pv_kva = va; 2700 } 2701 rw_wunlock(&pvh_global_lock); 2702 } 2703} 2704 2705void 2706pmap_kenter(vm_offset_t va, vm_paddr_t pa) 2707{ 2708 pmap_kenter_internal(va, pa, KENTER_CACHE); 2709} 2710 2711void 2712pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) 2713{ 2714 2715 pmap_kenter_internal(va, pa, 0); 2716} 2717 2718void 2719pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 2720{ 2721 vm_offset_t sva; 2722 2723 KASSERT((size & PAGE_MASK) == 0, 2724 ("%s: device mapping not page-sized", __func__)); 2725 2726 sva = va; 2727 while (size != 0) { 2728 pmap_kenter_internal(va, pa, 0); 2729 va += PAGE_SIZE; 2730 pa += PAGE_SIZE; 2731 size -= PAGE_SIZE; 2732 } 2733} 2734 2735void 2736pmap_kremove_device(vm_offset_t va, vm_size_t size) 2737{ 2738 vm_offset_t sva; 2739 2740 KASSERT((size & PAGE_MASK) == 0, 2741 ("%s: device mapping not page-sized", __func__)); 2742 2743 sva = va; 2744 while (size != 0) { 2745 pmap_kremove(va); 2746 va += PAGE_SIZE; 2747 size -= PAGE_SIZE; 2748 } 2749} 2750 2751void 2752pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) 2753{ 2754 2755 pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); 2756 /* 2757 * Call pmap_fault_fixup now, to make sure we'll have no exception 2758 * at the first use of the new address, or bad things will happen, 2759 * as we use one of these addresses in the exception handlers. 2760 */ 2761 pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); 2762} 2763 2764vm_paddr_t 2765pmap_kextract(vm_offset_t va) 2766{ 2767 2768 return (pmap_extract_locked(kernel_pmap, va)); 2769} 2770 2771/* 2772 * remove a page from the kernel pagetables 2773 */ 2774void 2775pmap_kremove(vm_offset_t va) 2776{ 2777 struct l2_bucket *l2b; 2778 pt_entry_t *pte, opte; 2779 struct pv_entry *pve; 2780 vm_page_t m; 2781 vm_offset_t pa; 2782 2783 l2b = pmap_get_l2_bucket(kernel_pmap, va); 2784 if (!l2b) 2785 return; 2786 KASSERT(l2b != NULL, ("No L2 Bucket")); 2787 pte = &l2b->l2b_kva[l2pte_index(va)]; 2788 opte = *pte; 2789 if (l2pte_valid(opte)) { 2790 /* pa = vtophs(va) taken from pmap_extract() */ 2791 if ((opte & L2_TYPE_MASK) == L2_TYPE_L) 2792 pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); 2793 else 2794 pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); 2795 /* note: should never have to remove an allocation 2796 * before the pvzone is initialized. 2797 */ 2798 rw_wlock(&pvh_global_lock); 2799 PMAP_LOCK(kernel_pmap); 2800 if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && 2801 (pve = pmap_remove_pv(m, kernel_pmap, va))) 2802 pmap_free_pv_entry(pve); 2803 PMAP_UNLOCK(kernel_pmap); 2804 rw_wunlock(&pvh_global_lock); 2805 va = va & ~PAGE_MASK; 2806 cpu_dcache_wbinv_range(va, PAGE_SIZE); 2807 cpu_l2cache_wbinv_range(va, PAGE_SIZE); 2808 cpu_tlb_flushD_SE(va); 2809 cpu_cpwait(); 2810 *pte = 0; 2811 } 2812} 2813 2814 2815/* 2816 * Used to map a range of physical addresses into kernel 2817 * virtual address space. 2818 * 2819 * The value passed in '*virt' is a suggested virtual address for 2820 * the mapping. Architectures which can support a direct-mapped 2821 * physical to virtual region can return the appropriate address 2822 * within that region, leaving '*virt' unchanged. Other 2823 * architectures should map the pages starting at '*virt' and 2824 * update '*virt' with the first usable address after the mapped 2825 * region. 2826 */ 2827vm_offset_t 2828pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 2829{ 2830 vm_offset_t sva = *virt; 2831 vm_offset_t va = sva; 2832 2833 PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " 2834 "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, 2835 prot)); 2836 2837 while (start < end) { 2838 pmap_kenter(va, start); 2839 va += PAGE_SIZE; 2840 start += PAGE_SIZE; 2841 } 2842 *virt = va; 2843 return (sva); 2844} 2845 2846static void 2847pmap_wb_page(vm_page_t m) 2848{ 2849 struct pv_entry *pv; 2850 2851 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2852 pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, 2853 (pv->pv_flags & PVF_WRITE) == 0); 2854} 2855 2856static void 2857pmap_inv_page(vm_page_t m) 2858{ 2859 struct pv_entry *pv; 2860 2861 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 2862 pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); 2863} 2864/* 2865 * Add a list of wired pages to the kva 2866 * this routine is only used for temporary 2867 * kernel mappings that do not need to have 2868 * page modification or references recorded. 2869 * Note that old mappings are simply written 2870 * over. The page *must* be wired. 2871 */ 2872void 2873pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 2874{ 2875 int i; 2876 2877 for (i = 0; i < count; i++) { 2878 pmap_wb_page(m[i]); 2879 pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), 2880 KENTER_CACHE); 2881 va += PAGE_SIZE; 2882 } 2883} 2884 2885 2886/* 2887 * this routine jerks page mappings from the 2888 * kernel -- it is meant only for temporary mappings. 2889 */ 2890void 2891pmap_qremove(vm_offset_t va, int count) 2892{ 2893 vm_paddr_t pa; 2894 int i; 2895 2896 for (i = 0; i < count; i++) { 2897 pa = vtophys(va); 2898 if (pa) { 2899 pmap_inv_page(PHYS_TO_VM_PAGE(pa)); 2900 pmap_kremove(va); 2901 } 2902 va += PAGE_SIZE; 2903 } 2904} 2905 2906 2907/* 2908 * pmap_object_init_pt preloads the ptes for a given object 2909 * into the specified pmap. This eliminates the blast of soft 2910 * faults on process startup and immediately after an mmap. 2911 */ 2912void 2913pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 2914 vm_pindex_t pindex, vm_size_t size) 2915{ 2916 2917 VM_OBJECT_ASSERT_WLOCKED(object); 2918 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2919 ("pmap_object_init_pt: non-device object")); 2920} 2921 2922 2923/* 2924 * pmap_is_prefaultable: 2925 * 2926 * Return whether or not the specified virtual address is elgible 2927 * for prefault. 2928 */ 2929boolean_t 2930pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2931{ 2932 pd_entry_t *pde; 2933 pt_entry_t *pte; 2934 2935 if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) 2936 return (FALSE); 2937 KASSERT(pte != NULL, ("Valid mapping but no pte ?")); 2938 if (*pte == 0) 2939 return (TRUE); 2940 return (FALSE); 2941} 2942 2943/* 2944 * Fetch pointers to the PDE/PTE for the given pmap/VA pair. 2945 * Returns TRUE if the mapping exists, else FALSE. 2946 * 2947 * NOTE: This function is only used by a couple of arm-specific modules. 2948 * It is not safe to take any pmap locks here, since we could be right 2949 * in the middle of debugging the pmap anyway... 2950 * 2951 * It is possible for this routine to return FALSE even though a valid 2952 * mapping does exist. This is because we don't lock, so the metadata 2953 * state may be inconsistent. 2954 * 2955 * NOTE: We can return a NULL *ptp in the case where the L1 pde is 2956 * a "section" mapping. 2957 */ 2958boolean_t 2959pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) 2960{ 2961 struct l2_dtable *l2; 2962 pd_entry_t *pl1pd, l1pd; 2963 pt_entry_t *ptep; 2964 u_short l1idx; 2965 2966 if (pm->pm_l1 == NULL) 2967 return (FALSE); 2968 2969 l1idx = L1_IDX(va); 2970 *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; 2971 l1pd = *pl1pd; 2972 2973 if (l1pte_section_p(l1pd)) { 2974 *ptp = NULL; 2975 return (TRUE); 2976 } 2977 2978 if (pm->pm_l2 == NULL) 2979 return (FALSE); 2980 2981 l2 = pm->pm_l2[L2_IDX(l1idx)]; 2982 2983 if (l2 == NULL || 2984 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 2985 return (FALSE); 2986 } 2987 2988 *ptp = &ptep[l2pte_index(va)]; 2989 return (TRUE); 2990} 2991 2992/* 2993 * Routine: pmap_remove_all 2994 * Function: 2995 * Removes this physical page from 2996 * all physical maps in which it resides. 2997 * Reflects back modify bits to the pager. 2998 * 2999 * Notes: 3000 * Original versions of this routine were very 3001 * inefficient because they iteratively called 3002 * pmap_remove (slow...) 3003 */ 3004void 3005pmap_remove_all(vm_page_t m) 3006{ 3007 pv_entry_t pv; 3008 pt_entry_t *ptep; 3009 struct l2_bucket *l2b; 3010 boolean_t flush = FALSE; 3011 pmap_t curpm; 3012 int flags = 0; 3013 3014 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3015 ("pmap_remove_all: page %p is not managed", m)); 3016 if (TAILQ_EMPTY(&m->md.pv_list)) 3017 return; 3018 rw_wlock(&pvh_global_lock); 3019 3020 /* 3021 * XXX This call shouldn't exist. Iterating over the PV list twice, 3022 * once in pmap_clearbit() and again below, is both unnecessary and 3023 * inefficient. The below code should itself write back the cache 3024 * entry before it destroys the mapping. 3025 */ 3026 pmap_clearbit(m, PVF_WRITE); 3027 curpm = vmspace_pmap(curproc->p_vmspace); 3028 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 3029 if (flush == FALSE && (pv->pv_pmap == curpm || 3030 pv->pv_pmap == kernel_pmap)) 3031 flush = TRUE; 3032 3033 PMAP_LOCK(pv->pv_pmap); 3034 /* 3035 * Cached contents were written-back in pmap_clearbit(), 3036 * but we still have to invalidate the cache entry to make 3037 * sure stale data are not retrieved when another page will be 3038 * mapped under this virtual address. 3039 */ 3040 if (pmap_is_current(pv->pv_pmap)) { 3041 cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); 3042 if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) 3043 cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); 3044 } 3045 3046 if (pv->pv_flags & PVF_UNMAN) { 3047 /* remove the pv entry, but do not remove the mapping 3048 * and remember this is a kernel mapped page 3049 */ 3050 m->md.pv_kva = pv->pv_va; 3051 } else { 3052 /* remove the mapping and pv entry */ 3053 l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); 3054 KASSERT(l2b != NULL, ("No l2 bucket")); 3055 ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; 3056 *ptep = 0; 3057 PTE_SYNC_CURRENT(pv->pv_pmap, ptep); 3058 pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); 3059 pv->pv_pmap->pm_stats.resident_count--; 3060 flags |= pv->pv_flags; 3061 } 3062 pmap_nuke_pv(m, pv->pv_pmap, pv); 3063 PMAP_UNLOCK(pv->pv_pmap); 3064 pmap_free_pv_entry(pv); 3065 } 3066 3067 if (flush) { 3068 if (PV_BEEN_EXECD(flags)) 3069 pmap_tlb_flushID(curpm); 3070 else 3071 pmap_tlb_flushD(curpm); 3072 } 3073 vm_page_aflag_clear(m, PGA_WRITEABLE); 3074 rw_wunlock(&pvh_global_lock); 3075} 3076 3077 3078/* 3079 * Set the physical protection on the 3080 * specified range of this map as requested. 3081 */ 3082void 3083pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 3084{ 3085 struct l2_bucket *l2b; 3086 pt_entry_t *ptep, pte; 3087 vm_offset_t next_bucket; 3088 u_int flags; 3089 int flush; 3090 3091 CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", 3092 pm, sva, eva, prot); 3093 3094 if ((prot & VM_PROT_READ) == 0) { 3095 pmap_remove(pm, sva, eva); 3096 return; 3097 } 3098 3099 if (prot & VM_PROT_WRITE) { 3100 /* 3101 * If this is a read->write transition, just ignore it and let 3102 * vm_fault() take care of it later. 3103 */ 3104 return; 3105 } 3106 3107 rw_wlock(&pvh_global_lock); 3108 PMAP_LOCK(pm); 3109 3110 /* 3111 * OK, at this point, we know we're doing write-protect operation. 3112 * If the pmap is active, write-back the range. 3113 */ 3114 pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); 3115 3116 flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; 3117 flags = 0; 3118 3119 while (sva < eva) { 3120 next_bucket = L2_NEXT_BUCKET(sva); 3121 if (next_bucket > eva) 3122 next_bucket = eva; 3123 3124 l2b = pmap_get_l2_bucket(pm, sva); 3125 if (l2b == NULL) { 3126 sva = next_bucket; 3127 continue; 3128 } 3129 3130 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3131 3132 while (sva < next_bucket) { 3133 if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { 3134 struct vm_page *pg; 3135 u_int f; 3136 3137 pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); 3138 pte &= ~L2_S_PROT_W; 3139 *ptep = pte; 3140 PTE_SYNC(ptep); 3141 3142 if (!(pg->oflags & VPO_UNMANAGED)) { 3143 f = pmap_modify_pv(pg, pm, sva, 3144 PVF_WRITE, 0); 3145 if (f & PVF_WRITE) 3146 vm_page_dirty(pg); 3147 } else 3148 f = 0; 3149 3150 if (flush >= 0) { 3151 flush++; 3152 flags |= f; 3153 } else 3154 if (PV_BEEN_EXECD(f)) 3155 pmap_tlb_flushID_SE(pm, sva); 3156 else 3157 if (PV_BEEN_REFD(f)) 3158 pmap_tlb_flushD_SE(pm, sva); 3159 } 3160 3161 sva += PAGE_SIZE; 3162 ptep++; 3163 } 3164 } 3165 3166 3167 if (flush) { 3168 if (PV_BEEN_EXECD(flags)) 3169 pmap_tlb_flushID(pm); 3170 else 3171 if (PV_BEEN_REFD(flags)) 3172 pmap_tlb_flushD(pm); 3173 } 3174 rw_wunlock(&pvh_global_lock); 3175 3176 PMAP_UNLOCK(pm); 3177} 3178 3179 3180/* 3181 * Insert the given physical page (p) at 3182 * the specified virtual address (v) in the 3183 * target physical map with the protection requested. 3184 * 3185 * If specified, the page will be wired down, meaning 3186 * that the related pte can not be reclaimed. 3187 * 3188 * NB: This is the only routine which MAY NOT lazy-evaluate 3189 * or lose information. That is, this routine must actually 3190 * insert this page into the given map NOW. 3191 */ 3192 3193int 3194pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3195 u_int flags, int8_t psind __unused) 3196{ 3197 int rv; 3198 3199 rw_wlock(&pvh_global_lock); 3200 PMAP_LOCK(pmap); 3201 rv = pmap_enter_locked(pmap, va, m, prot, flags); 3202 rw_wunlock(&pvh_global_lock); 3203 PMAP_UNLOCK(pmap); 3204 return (rv); 3205} 3206 3207/* 3208 * The pvh global and pmap locks must be held. 3209 */ 3210static int 3211pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3212 u_int flags) 3213{ 3214 struct l2_bucket *l2b = NULL; 3215 struct vm_page *opg; 3216 struct pv_entry *pve = NULL; 3217 pt_entry_t *ptep, npte, opte; 3218 u_int nflags; 3219 u_int oflags; 3220 vm_paddr_t pa; 3221 3222 PMAP_ASSERT_LOCKED(pmap); 3223 rw_assert(&pvh_global_lock, RA_WLOCKED); 3224 if (va == vector_page) { 3225 pa = systempage.pv_pa; 3226 m = NULL; 3227 } else { 3228 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 3229 VM_OBJECT_ASSERT_LOCKED(m->object); 3230 pa = VM_PAGE_TO_PHYS(m); 3231 } 3232 nflags = 0; 3233 if (prot & VM_PROT_WRITE) 3234 nflags |= PVF_WRITE; 3235 if (prot & VM_PROT_EXECUTE) 3236 nflags |= PVF_EXEC; 3237 if ((flags & PMAP_ENTER_WIRED) != 0) 3238 nflags |= PVF_WIRED; 3239 PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " 3240 "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); 3241 3242 if (pmap == kernel_pmap) { 3243 l2b = pmap_get_l2_bucket(pmap, va); 3244 if (l2b == NULL) 3245 l2b = pmap_grow_l2_bucket(pmap, va); 3246 } else { 3247do_l2b_alloc: 3248 l2b = pmap_alloc_l2_bucket(pmap, va); 3249 if (l2b == NULL) { 3250 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 3251 PMAP_UNLOCK(pmap); 3252 rw_wunlock(&pvh_global_lock); 3253 VM_WAIT; 3254 rw_wlock(&pvh_global_lock); 3255 PMAP_LOCK(pmap); 3256 goto do_l2b_alloc; 3257 } 3258 return (KERN_RESOURCE_SHORTAGE); 3259 } 3260 } 3261 3262 ptep = &l2b->l2b_kva[l2pte_index(va)]; 3263 3264 opte = *ptep; 3265 npte = pa; 3266 oflags = 0; 3267 if (opte) { 3268 /* 3269 * There is already a mapping at this address. 3270 * If the physical address is different, lookup the 3271 * vm_page. 3272 */ 3273 if (l2pte_pa(opte) != pa) 3274 opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); 3275 else 3276 opg = m; 3277 } else 3278 opg = NULL; 3279 3280 if ((prot & (VM_PROT_ALL)) || 3281 (!m || m->md.pvh_attrs & PVF_REF)) { 3282 /* 3283 * - The access type indicates that we don't need 3284 * to do referenced emulation. 3285 * OR 3286 * - The physical page has already been referenced 3287 * so no need to re-do referenced emulation here. 3288 */ 3289 npte |= L2_S_PROTO; 3290 3291 nflags |= PVF_REF; 3292 3293 if (m && ((prot & VM_PROT_WRITE) != 0 || 3294 (m->md.pvh_attrs & PVF_MOD))) { 3295 /* 3296 * This is a writable mapping, and the 3297 * page's mod state indicates it has 3298 * already been modified. Make it 3299 * writable from the outset. 3300 */ 3301 nflags |= PVF_MOD; 3302 if (!(m->md.pvh_attrs & PVF_MOD)) 3303 vm_page_dirty(m); 3304 } 3305 if (m && opte) 3306 vm_page_aflag_set(m, PGA_REFERENCED); 3307 } else { 3308 /* 3309 * Need to do page referenced emulation. 3310 */ 3311 npte |= L2_TYPE_INV; 3312 } 3313 3314 if (prot & VM_PROT_WRITE) { 3315 npte |= L2_S_PROT_W; 3316 if (m != NULL && 3317 (m->oflags & VPO_UNMANAGED) == 0) 3318 vm_page_aflag_set(m, PGA_WRITEABLE); 3319 } 3320 if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) 3321 npte |= pte_l2_s_cache_mode; 3322 if (m && m == opg) { 3323 /* 3324 * We're changing the attrs of an existing mapping. 3325 */ 3326 oflags = pmap_modify_pv(m, pmap, va, 3327 PVF_WRITE | PVF_EXEC | PVF_WIRED | 3328 PVF_MOD | PVF_REF, nflags); 3329 3330 /* 3331 * We may need to flush the cache if we're 3332 * doing rw-ro... 3333 */ 3334 if (pmap_is_current(pmap) && 3335 (oflags & PVF_NC) == 0 && 3336 (opte & L2_S_PROT_W) != 0 && 3337 (prot & VM_PROT_WRITE) == 0 && 3338 (opte & L2_TYPE_MASK) != L2_TYPE_INV) { 3339 cpu_dcache_wb_range(va, PAGE_SIZE); 3340 cpu_l2cache_wb_range(va, PAGE_SIZE); 3341 } 3342 } else { 3343 /* 3344 * New mapping, or changing the backing page 3345 * of an existing mapping. 3346 */ 3347 if (opg) { 3348 /* 3349 * Replacing an existing mapping with a new one. 3350 * It is part of our managed memory so we 3351 * must remove it from the PV list 3352 */ 3353 if ((pve = pmap_remove_pv(opg, pmap, va))) { 3354 3355 /* note for patch: the oflags/invalidation was moved 3356 * because PG_FICTITIOUS pages could free the pve 3357 */ 3358 oflags = pve->pv_flags; 3359 /* 3360 * If the old mapping was valid (ref/mod 3361 * emulation creates 'invalid' mappings 3362 * initially) then make sure to frob 3363 * the cache. 3364 */ 3365 if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { 3366 if (PV_BEEN_EXECD(oflags)) { 3367 pmap_idcache_wbinv_range(pmap, va, 3368 PAGE_SIZE); 3369 } else 3370 if (PV_BEEN_REFD(oflags)) { 3371 pmap_dcache_wb_range(pmap, va, 3372 PAGE_SIZE, TRUE, 3373 (oflags & PVF_WRITE) == 0); 3374 } 3375 } 3376 3377 /* free/allocate a pv_entry for UNMANAGED pages if 3378 * this physical page is not/is already mapped. 3379 */ 3380 3381 if (m && (m->oflags & VPO_UNMANAGED) && 3382 !m->md.pv_kva && 3383 TAILQ_EMPTY(&m->md.pv_list)) { 3384 pmap_free_pv_entry(pve); 3385 pve = NULL; 3386 } 3387 } else if (m && 3388 (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || 3389 !TAILQ_EMPTY(&m->md.pv_list))) 3390 pve = pmap_get_pv_entry(); 3391 } else if (m && 3392 (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || 3393 !TAILQ_EMPTY(&m->md.pv_list))) 3394 pve = pmap_get_pv_entry(); 3395 3396 if (m) { 3397 if ((m->oflags & VPO_UNMANAGED)) { 3398 if (!TAILQ_EMPTY(&m->md.pv_list) || 3399 m->md.pv_kva) { 3400 KASSERT(pve != NULL, ("No pv")); 3401 nflags |= PVF_UNMAN; 3402 pmap_enter_pv(m, pve, pmap, va, nflags); 3403 } else 3404 m->md.pv_kva = va; 3405 } else { 3406 KASSERT(va < kmi.clean_sva || 3407 va >= kmi.clean_eva, 3408 ("pmap_enter: managed mapping within the clean submap")); 3409 KASSERT(pve != NULL, ("No pv")); 3410 pmap_enter_pv(m, pve, pmap, va, nflags); 3411 } 3412 } 3413 } 3414 /* 3415 * Make sure userland mappings get the right permissions 3416 */ 3417 if (pmap != kernel_pmap && va != vector_page) { 3418 npte |= L2_S_PROT_U; 3419 } 3420 3421 /* 3422 * Keep the stats up to date 3423 */ 3424 if (opte == 0) { 3425 l2b->l2b_occupancy++; 3426 pmap->pm_stats.resident_count++; 3427 } 3428 3429 /* 3430 * If this is just a wiring change, the two PTEs will be 3431 * identical, so there's no need to update the page table. 3432 */ 3433 if (npte != opte) { 3434 boolean_t is_cached = pmap_is_current(pmap); 3435 3436 *ptep = npte; 3437 if (is_cached) { 3438 /* 3439 * We only need to frob the cache/tlb if this pmap 3440 * is current 3441 */ 3442 PTE_SYNC(ptep); 3443 if (L1_IDX(va) != L1_IDX(vector_page) && 3444 l2pte_valid(npte)) { 3445 /* 3446 * This mapping is likely to be accessed as 3447 * soon as we return to userland. Fix up the 3448 * L1 entry to avoid taking another 3449 * page/domain fault. 3450 */ 3451 pd_entry_t *pl1pd, l1pd; 3452 3453 pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; 3454 l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | 3455 L1_C_PROTO; 3456 if (*pl1pd != l1pd) { 3457 *pl1pd = l1pd; 3458 PTE_SYNC(pl1pd); 3459 } 3460 } 3461 } 3462 3463 if (PV_BEEN_EXECD(oflags)) 3464 pmap_tlb_flushID_SE(pmap, va); 3465 else if (PV_BEEN_REFD(oflags)) 3466 pmap_tlb_flushD_SE(pmap, va); 3467 3468 3469 if (m) 3470 pmap_fix_cache(m, pmap, va); 3471 } 3472 return (KERN_SUCCESS); 3473} 3474 3475/* 3476 * Maps a sequence of resident pages belonging to the same object. 3477 * The sequence begins with the given page m_start. This page is 3478 * mapped at the given virtual address start. Each subsequent page is 3479 * mapped at a virtual address that is offset from start by the same 3480 * amount as the page is offset from m_start within the object. The 3481 * last page in the sequence is the page with the largest offset from 3482 * m_start that can be mapped at a virtual address less than the given 3483 * virtual address end. Not every virtual page between start and end 3484 * is mapped; only those for which a resident page exists with the 3485 * corresponding offset from m_start are mapped. 3486 */ 3487void 3488pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 3489 vm_page_t m_start, vm_prot_t prot) 3490{ 3491 vm_page_t m; 3492 vm_pindex_t diff, psize; 3493 3494 VM_OBJECT_ASSERT_LOCKED(m_start->object); 3495 3496 psize = atop(end - start); 3497 m = m_start; 3498 rw_wlock(&pvh_global_lock); 3499 PMAP_LOCK(pmap); 3500 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 3501 pmap_enter_locked(pmap, start + ptoa(diff), m, prot & 3502 (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); 3503 m = TAILQ_NEXT(m, listq); 3504 } 3505 rw_wunlock(&pvh_global_lock); 3506 PMAP_UNLOCK(pmap); 3507} 3508 3509/* 3510 * this code makes some *MAJOR* assumptions: 3511 * 1. Current pmap & pmap exists. 3512 * 2. Not wired. 3513 * 3. Read access. 3514 * 4. No page table pages. 3515 * but is *MUCH* faster than pmap_enter... 3516 */ 3517 3518void 3519pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 3520{ 3521 3522 rw_wlock(&pvh_global_lock); 3523 PMAP_LOCK(pmap); 3524 pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 3525 PMAP_ENTER_NOSLEEP); 3526 rw_wunlock(&pvh_global_lock); 3527 PMAP_UNLOCK(pmap); 3528} 3529 3530/* 3531 * Clear the wired attribute from the mappings for the specified range of 3532 * addresses in the given pmap. Every valid mapping within that range 3533 * must have the wired attribute set. In contrast, invalid mappings 3534 * cannot have the wired attribute set, so they are ignored. 3535 * 3536 * XXX Wired mappings of unmanaged pages cannot be counted by this pmap 3537 * implementation. 3538 */ 3539void 3540pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 3541{ 3542 struct l2_bucket *l2b; 3543 pt_entry_t *ptep, pte; 3544 pv_entry_t pv; 3545 vm_offset_t next_bucket; 3546 vm_page_t m; 3547 3548 rw_wlock(&pvh_global_lock); 3549 PMAP_LOCK(pmap); 3550 while (sva < eva) { 3551 next_bucket = L2_NEXT_BUCKET(sva); 3552 if (next_bucket > eva) 3553 next_bucket = eva; 3554 l2b = pmap_get_l2_bucket(pmap, sva); 3555 if (l2b == NULL) { 3556 sva = next_bucket; 3557 continue; 3558 } 3559 for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket; 3560 sva += PAGE_SIZE, ptep++) { 3561 if ((pte = *ptep) == 0 || 3562 (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL || 3563 (m->oflags & VPO_UNMANAGED) != 0) 3564 continue; 3565 pv = pmap_find_pv(m, pmap, sva); 3566 if ((pv->pv_flags & PVF_WIRED) == 0) 3567 panic("pmap_unwire: pv %p isn't wired", pv); 3568 pv->pv_flags &= ~PVF_WIRED; 3569 pmap->pm_stats.wired_count--; 3570 } 3571 } 3572 rw_wunlock(&pvh_global_lock); 3573 PMAP_UNLOCK(pmap); 3574} 3575 3576 3577/* 3578 * Copy the range specified by src_addr/len 3579 * from the source map to the range dst_addr/len 3580 * in the destination map. 3581 * 3582 * This routine is only advisory and need not do anything. 3583 */ 3584void 3585pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 3586 vm_size_t len, vm_offset_t src_addr) 3587{ 3588} 3589 3590 3591/* 3592 * Routine: pmap_extract 3593 * Function: 3594 * Extract the physical page address associated 3595 * with the given map/virtual_address pair. 3596 */ 3597vm_paddr_t 3598pmap_extract(pmap_t pmap, vm_offset_t va) 3599{ 3600 vm_paddr_t pa; 3601 3602 PMAP_LOCK(pmap); 3603 pa = pmap_extract_locked(pmap, va); 3604 PMAP_UNLOCK(pmap); 3605 return (pa); 3606} 3607 3608static vm_paddr_t 3609pmap_extract_locked(pmap_t pmap, vm_offset_t va) 3610{ 3611 struct l2_dtable *l2; 3612 pd_entry_t l1pd; 3613 pt_entry_t *ptep, pte; 3614 vm_paddr_t pa; 3615 u_int l1idx; 3616 3617 if (pmap != kernel_pmap) 3618 PMAP_ASSERT_LOCKED(pmap); 3619 l1idx = L1_IDX(va); 3620 l1pd = pmap->pm_l1->l1_kva[l1idx]; 3621 if (l1pte_section_p(l1pd)) { 3622 /* 3623 * These should only happen for the kernel pmap. 3624 */ 3625 KASSERT(pmap == kernel_pmap, ("unexpected section")); 3626 /* XXX: what to do about the bits > 32 ? */ 3627 if (l1pd & L1_S_SUPERSEC) 3628 pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3629 else 3630 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3631 } else { 3632 /* 3633 * Note that we can't rely on the validity of the L1 3634 * descriptor as an indication that a mapping exists. 3635 * We have to look it up in the L2 dtable. 3636 */ 3637 l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3638 if (l2 == NULL || 3639 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) 3640 return (0); 3641 pte = ptep[l2pte_index(va)]; 3642 if (pte == 0) 3643 return (0); 3644 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) 3645 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3646 else 3647 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3648 } 3649 return (pa); 3650} 3651 3652/* 3653 * Atomically extract and hold the physical page with the given 3654 * pmap and virtual address pair if that mapping permits the given 3655 * protection. 3656 * 3657 */ 3658vm_page_t 3659pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 3660{ 3661 struct l2_dtable *l2; 3662 pd_entry_t l1pd; 3663 pt_entry_t *ptep, pte; 3664 vm_paddr_t pa, paddr; 3665 vm_page_t m = NULL; 3666 u_int l1idx; 3667 l1idx = L1_IDX(va); 3668 paddr = 0; 3669 3670 PMAP_LOCK(pmap); 3671retry: 3672 l1pd = pmap->pm_l1->l1_kva[l1idx]; 3673 if (l1pte_section_p(l1pd)) { 3674 /* 3675 * These should only happen for kernel_pmap 3676 */ 3677 KASSERT(pmap == kernel_pmap, ("huh")); 3678 /* XXX: what to do about the bits > 32 ? */ 3679 if (l1pd & L1_S_SUPERSEC) 3680 pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3681 else 3682 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3683 if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) 3684 goto retry; 3685 if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3686 m = PHYS_TO_VM_PAGE(pa); 3687 vm_page_hold(m); 3688 } 3689 3690 } else { 3691 /* 3692 * Note that we can't rely on the validity of the L1 3693 * descriptor as an indication that a mapping exists. 3694 * We have to look it up in the L2 dtable. 3695 */ 3696 l2 = pmap->pm_l2[L2_IDX(l1idx)]; 3697 3698 if (l2 == NULL || 3699 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3700 PMAP_UNLOCK(pmap); 3701 return (NULL); 3702 } 3703 3704 ptep = &ptep[l2pte_index(va)]; 3705 pte = *ptep; 3706 3707 if (pte == 0) { 3708 PMAP_UNLOCK(pmap); 3709 return (NULL); 3710 } 3711 if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { 3712 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) 3713 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3714 else 3715 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3716 if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) 3717 goto retry; 3718 m = PHYS_TO_VM_PAGE(pa); 3719 vm_page_hold(m); 3720 } 3721 } 3722 3723 PMAP_UNLOCK(pmap); 3724 PA_UNLOCK_COND(paddr); 3725 return (m); 3726} 3727 3728vm_paddr_t 3729pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 3730{ 3731 struct l2_dtable *l2; 3732 pd_entry_t l1pd; 3733 pt_entry_t *ptep, pte; 3734 vm_paddr_t pa; 3735 u_int l1idx; 3736 3737 l1idx = L1_IDX(va); 3738 l1pd = kernel_pmap->pm_l1->l1_kva[l1idx]; 3739 if (l1pte_section_p(l1pd)) { 3740 if (l1pd & L1_S_SUPERSEC) 3741 pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); 3742 else 3743 pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); 3744 pte = L2_S_PROTO | pa | 3745 L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); 3746 } else { 3747 l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]; 3748 if (l2 == NULL || 3749 (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { 3750 pte = 0; 3751 pa = 0; 3752 goto out; 3753 } 3754 pte = ptep[l2pte_index(va)]; 3755 if (pte == 0) { 3756 pa = 0; 3757 goto out; 3758 } 3759 if ((pte & L2_TYPE_MASK) == L2_TYPE_L) 3760 pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); 3761 else 3762 pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); 3763 } 3764out: 3765 if (pte2p != NULL) 3766 *pte2p = pte; 3767 return (pa); 3768} 3769 3770/* 3771 * Initialize a preallocated and zeroed pmap structure, 3772 * such as one in a vmspace structure. 3773 */ 3774 3775int 3776pmap_pinit(pmap_t pmap) 3777{ 3778 PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); 3779 3780 pmap_alloc_l1(pmap); 3781 bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); 3782 3783 CPU_ZERO(&pmap->pm_active); 3784 3785 TAILQ_INIT(&pmap->pm_pvlist); 3786 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 3787 pmap->pm_stats.resident_count = 1; 3788 if (vector_page < KERNBASE) { 3789 pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), 3790 VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); 3791 } 3792 return (1); 3793} 3794 3795 3796/*************************************************** 3797 * page management routines. 3798 ***************************************************/ 3799 3800 3801static void 3802pmap_free_pv_entry(pv_entry_t pv) 3803{ 3804 pv_entry_count--; 3805 uma_zfree(pvzone, pv); 3806} 3807 3808 3809/* 3810 * get a new pv_entry, allocating a block from the system 3811 * when needed. 3812 * the memory allocation is performed bypassing the malloc code 3813 * because of the possibility of allocations at interrupt time. 3814 */ 3815static pv_entry_t 3816pmap_get_pv_entry(void) 3817{ 3818 pv_entry_t ret_value; 3819 3820 pv_entry_count++; 3821 if (pv_entry_count > pv_entry_high_water) 3822 pagedaemon_wakeup(); 3823 ret_value = uma_zalloc(pvzone, M_NOWAIT); 3824 return ret_value; 3825} 3826 3827/* 3828 * Remove the given range of addresses from the specified map. 3829 * 3830 * It is assumed that the start and end are properly 3831 * rounded to the page size. 3832 */ 3833#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 3834void 3835pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 3836{ 3837 struct l2_bucket *l2b; 3838 vm_offset_t next_bucket; 3839 pt_entry_t *ptep; 3840 u_int total; 3841 u_int mappings, is_exec, is_refd; 3842 int flushall = 0; 3843 3844 3845 /* 3846 * we lock in the pmap => pv_head direction 3847 */ 3848 3849 rw_wlock(&pvh_global_lock); 3850 PMAP_LOCK(pm); 3851 total = 0; 3852 while (sva < eva) { 3853 /* 3854 * Do one L2 bucket's worth at a time. 3855 */ 3856 next_bucket = L2_NEXT_BUCKET(sva); 3857 if (next_bucket > eva) 3858 next_bucket = eva; 3859 3860 l2b = pmap_get_l2_bucket(pm, sva); 3861 if (l2b == NULL) { 3862 sva = next_bucket; 3863 continue; 3864 } 3865 3866 ptep = &l2b->l2b_kva[l2pte_index(sva)]; 3867 mappings = 0; 3868 3869 while (sva < next_bucket) { 3870 struct vm_page *pg; 3871 pt_entry_t pte; 3872 vm_paddr_t pa; 3873 3874 pte = *ptep; 3875 3876 if (pte == 0) { 3877 /* 3878 * Nothing here, move along 3879 */ 3880 sva += PAGE_SIZE; 3881 ptep++; 3882 continue; 3883 } 3884 3885 pm->pm_stats.resident_count--; 3886 pa = l2pte_pa(pte); 3887 is_exec = 0; 3888 is_refd = 1; 3889 3890 /* 3891 * Update flags. In a number of circumstances, 3892 * we could cluster a lot of these and do a 3893 * number of sequential pages in one go. 3894 */ 3895 if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { 3896 struct pv_entry *pve; 3897 3898 pve = pmap_remove_pv(pg, pm, sva); 3899 if (pve) { 3900 is_exec = PV_BEEN_EXECD(pve->pv_flags); 3901 is_refd = PV_BEEN_REFD(pve->pv_flags); 3902 pmap_free_pv_entry(pve); 3903 } 3904 } 3905 3906 if (l2pte_valid(pte) && pmap_is_current(pm)) { 3907 if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { 3908 total++; 3909 if (is_exec) { 3910 cpu_idcache_wbinv_range(sva, 3911 PAGE_SIZE); 3912 cpu_l2cache_wbinv_range(sva, 3913 PAGE_SIZE); 3914 cpu_tlb_flushID_SE(sva); 3915 } else if (is_refd) { 3916 cpu_dcache_wbinv_range(sva, 3917 PAGE_SIZE); 3918 cpu_l2cache_wbinv_range(sva, 3919 PAGE_SIZE); 3920 cpu_tlb_flushD_SE(sva); 3921 } 3922 } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { 3923 /* flushall will also only get set for 3924 * for a current pmap 3925 */ 3926 cpu_idcache_wbinv_all(); 3927 cpu_l2cache_wbinv_all(); 3928 flushall = 1; 3929 total++; 3930 } 3931 } 3932 *ptep = 0; 3933 PTE_SYNC(ptep); 3934 3935 sva += PAGE_SIZE; 3936 ptep++; 3937 mappings++; 3938 } 3939 3940 pmap_free_l2_bucket(pm, l2b, mappings); 3941 } 3942 3943 rw_wunlock(&pvh_global_lock); 3944 if (flushall) 3945 cpu_tlb_flushID(); 3946 PMAP_UNLOCK(pm); 3947} 3948 3949/* 3950 * pmap_zero_page() 3951 * 3952 * Zero a given physical page by mapping it at a page hook point. 3953 * In doing the zero page op, the page we zero is mapped cachable, as with 3954 * StrongARM accesses to non-cached pages are non-burst making writing 3955 * _any_ bulk data very slow. 3956 */ 3957#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_CORE3) 3958void 3959pmap_zero_page_generic(vm_paddr_t phys, int off, int size) 3960{ 3961 3962 if (_arm_bzero && size >= _min_bzero_size && 3963 _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) 3964 return; 3965 3966 mtx_lock(&cmtx); 3967 /* 3968 * Hook in the page, zero it, invalidate the TLB as needed. 3969 * 3970 * Note the temporary zero-page mapping must be a non-cached page in 3971 * order to work without corruption when write-allocate is enabled. 3972 */ 3973 *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); 3974 PTE_SYNC(cdst_pte); 3975 cpu_tlb_flushD_SE(cdstp); 3976 cpu_cpwait(); 3977 if (off || size != PAGE_SIZE) 3978 bzero((void *)(cdstp + off), size); 3979 else 3980 bzero_page(cdstp); 3981 3982 mtx_unlock(&cmtx); 3983} 3984#endif /* ARM_MMU_GENERIC != 0 */ 3985 3986#if ARM_MMU_XSCALE == 1 3987void 3988pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) 3989{ 3990 3991 if (_arm_bzero && size >= _min_bzero_size && 3992 _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) 3993 return; 3994 3995 mtx_lock(&cmtx); 3996 /* 3997 * Hook in the page, zero it, and purge the cache for that 3998 * zeroed page. Invalidate the TLB as needed. 3999 */ 4000 *cdst_pte = L2_S_PROTO | phys | 4001 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4002 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4003 PTE_SYNC(cdst_pte); 4004 cpu_tlb_flushD_SE(cdstp); 4005 cpu_cpwait(); 4006 if (off || size != PAGE_SIZE) 4007 bzero((void *)(cdstp + off), size); 4008 else 4009 bzero_page(cdstp); 4010 mtx_unlock(&cmtx); 4011 xscale_cache_clean_minidata(); 4012} 4013 4014/* 4015 * Change the PTEs for the specified kernel mappings such that they 4016 * will use the mini data cache instead of the main data cache. 4017 */ 4018void 4019pmap_use_minicache(vm_offset_t va, vm_size_t size) 4020{ 4021 struct l2_bucket *l2b; 4022 pt_entry_t *ptep, *sptep, pte; 4023 vm_offset_t next_bucket, eva; 4024 4025#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) 4026 if (xscale_use_minidata == 0) 4027 return; 4028#endif 4029 4030 eva = va + size; 4031 4032 while (va < eva) { 4033 next_bucket = L2_NEXT_BUCKET(va); 4034 if (next_bucket > eva) 4035 next_bucket = eva; 4036 4037 l2b = pmap_get_l2_bucket(kernel_pmap, va); 4038 4039 sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; 4040 4041 while (va < next_bucket) { 4042 pte = *ptep; 4043 if (!l2pte_minidata(pte)) { 4044 cpu_dcache_wbinv_range(va, PAGE_SIZE); 4045 cpu_tlb_flushD_SE(va); 4046 *ptep = pte & ~L2_B; 4047 } 4048 ptep++; 4049 va += PAGE_SIZE; 4050 } 4051 PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); 4052 } 4053 cpu_cpwait(); 4054} 4055#endif /* ARM_MMU_XSCALE == 1 */ 4056 4057/* 4058 * pmap_zero_page zeros the specified hardware page by mapping 4059 * the page into KVM and using bzero to clear its contents. 4060 */ 4061void 4062pmap_zero_page(vm_page_t m) 4063{ 4064 pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); 4065} 4066 4067 4068/* 4069 * pmap_zero_page_area zeros the specified hardware page by mapping 4070 * the page into KVM and using bzero to clear its contents. 4071 * 4072 * off and size may not cover an area beyond a single hardware page. 4073 */ 4074void 4075pmap_zero_page_area(vm_page_t m, int off, int size) 4076{ 4077 4078 pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); 4079} 4080 4081 4082/* 4083 * pmap_zero_page_idle zeros the specified hardware page by mapping 4084 * the page into KVM and using bzero to clear its contents. This 4085 * is intended to be called from the vm_pagezero process only and 4086 * outside of Giant. 4087 */ 4088void 4089pmap_zero_page_idle(vm_page_t m) 4090{ 4091 4092 pmap_zero_page(m); 4093} 4094 4095#if 0 4096/* 4097 * pmap_clean_page() 4098 * 4099 * This is a local function used to work out the best strategy to clean 4100 * a single page referenced by its entry in the PV table. It should be used by 4101 * pmap_copy_page, pmap_zero page and maybe some others later on. 4102 * 4103 * Its policy is effectively: 4104 * o If there are no mappings, we don't bother doing anything with the cache. 4105 * o If there is one mapping, we clean just that page. 4106 * o If there are multiple mappings, we clean the entire cache. 4107 * 4108 * So that some functions can be further optimised, it returns 0 if it didn't 4109 * clean the entire cache, or 1 if it did. 4110 * 4111 * XXX One bug in this routine is that if the pv_entry has a single page 4112 * mapped at 0x00000000 a whole cache clean will be performed rather than 4113 * just the 1 page. Since this should not occur in everyday use and if it does 4114 * it will just result in not the most efficient clean for the page. 4115 * 4116 * We don't yet use this function but may want to. 4117 */ 4118static int 4119pmap_clean_page(struct pv_entry *pv, boolean_t is_src) 4120{ 4121 pmap_t pm, pm_to_clean = NULL; 4122 struct pv_entry *npv; 4123 u_int cache_needs_cleaning = 0; 4124 u_int flags = 0; 4125 vm_offset_t page_to_clean = 0; 4126 4127 if (pv == NULL) { 4128 /* nothing mapped in so nothing to flush */ 4129 return (0); 4130 } 4131 4132 /* 4133 * Since we flush the cache each time we change to a different 4134 * user vmspace, we only need to flush the page if it is in the 4135 * current pmap. 4136 */ 4137 if (curthread) 4138 pm = vmspace_pmap(curproc->p_vmspace); 4139 else 4140 pm = kernel_pmap; 4141 4142 for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { 4143 if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { 4144 flags |= npv->pv_flags; 4145 /* 4146 * The page is mapped non-cacheable in 4147 * this map. No need to flush the cache. 4148 */ 4149 if (npv->pv_flags & PVF_NC) { 4150#ifdef DIAGNOSTIC 4151 if (cache_needs_cleaning) 4152 panic("pmap_clean_page: " 4153 "cache inconsistency"); 4154#endif 4155 break; 4156 } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) 4157 continue; 4158 if (cache_needs_cleaning) { 4159 page_to_clean = 0; 4160 break; 4161 } else { 4162 page_to_clean = npv->pv_va; 4163 pm_to_clean = npv->pv_pmap; 4164 } 4165 cache_needs_cleaning = 1; 4166 } 4167 } 4168 if (page_to_clean) { 4169 if (PV_BEEN_EXECD(flags)) 4170 pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, 4171 PAGE_SIZE); 4172 else 4173 pmap_dcache_wb_range(pm_to_clean, page_to_clean, 4174 PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); 4175 } else if (cache_needs_cleaning) { 4176 if (PV_BEEN_EXECD(flags)) 4177 pmap_idcache_wbinv_all(pm); 4178 else 4179 pmap_dcache_wbinv_all(pm); 4180 return (1); 4181 } 4182 return (0); 4183} 4184#endif 4185 4186/* 4187 * pmap_copy_page copies the specified (machine independent) 4188 * page by mapping the page into virtual memory and using 4189 * bcopy to copy the page, one machine dependent page at a 4190 * time. 4191 */ 4192 4193/* 4194 * pmap_copy_page() 4195 * 4196 * Copy one physical page into another, by mapping the pages into 4197 * hook points. The same comment regarding cachability as in 4198 * pmap_zero_page also applies here. 4199 */ 4200#if ARM_MMU_GENERIC != 0 || defined (CPU_XSCALE_CORE3) 4201void 4202pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) 4203{ 4204#if 0 4205 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4206#endif 4207 4208 /* 4209 * Clean the source page. Hold the source page's lock for 4210 * the duration of the copy so that no other mappings can 4211 * be created while we have a potentially aliased mapping. 4212 */ 4213#if 0 4214 /* 4215 * XXX: Not needed while we call cpu_dcache_wbinv_all() in 4216 * pmap_copy_page(). 4217 */ 4218 (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4219#endif 4220 /* 4221 * Map the pages into the page hook points, copy them, and purge 4222 * the cache for the appropriate page. Invalidate the TLB 4223 * as required. 4224 */ 4225 mtx_lock(&cmtx); 4226 *csrc_pte = L2_S_PROTO | src | 4227 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4228 PTE_SYNC(csrc_pte); 4229 *cdst_pte = L2_S_PROTO | dst | 4230 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4231 PTE_SYNC(cdst_pte); 4232 cpu_tlb_flushD_SE(csrcp); 4233 cpu_tlb_flushD_SE(cdstp); 4234 cpu_cpwait(); 4235 bcopy_page(csrcp, cdstp); 4236 mtx_unlock(&cmtx); 4237 cpu_dcache_inv_range(csrcp, PAGE_SIZE); 4238 cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); 4239 cpu_l2cache_inv_range(csrcp, PAGE_SIZE); 4240 cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); 4241} 4242 4243void 4244pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, 4245 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) 4246{ 4247 4248 mtx_lock(&cmtx); 4249 *csrc_pte = L2_S_PROTO | a_phys | 4250 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; 4251 PTE_SYNC(csrc_pte); 4252 *cdst_pte = L2_S_PROTO | b_phys | 4253 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; 4254 PTE_SYNC(cdst_pte); 4255 cpu_tlb_flushD_SE(csrcp); 4256 cpu_tlb_flushD_SE(cdstp); 4257 cpu_cpwait(); 4258 bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); 4259 mtx_unlock(&cmtx); 4260 cpu_dcache_inv_range(csrcp + a_offs, cnt); 4261 cpu_dcache_wbinv_range(cdstp + b_offs, cnt); 4262 cpu_l2cache_inv_range(csrcp + a_offs, cnt); 4263 cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); 4264} 4265#endif /* ARM_MMU_GENERIC != 0 */ 4266 4267#if ARM_MMU_XSCALE == 1 4268void 4269pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) 4270{ 4271#if 0 4272 /* XXX: Only needed for pmap_clean_page(), which is commented out. */ 4273 struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); 4274#endif 4275 4276 /* 4277 * Clean the source page. Hold the source page's lock for 4278 * the duration of the copy so that no other mappings can 4279 * be created while we have a potentially aliased mapping. 4280 */ 4281#if 0 4282 /* 4283 * XXX: Not needed while we call cpu_dcache_wbinv_all() in 4284 * pmap_copy_page(). 4285 */ 4286 (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); 4287#endif 4288 /* 4289 * Map the pages into the page hook points, copy them, and purge 4290 * the cache for the appropriate page. Invalidate the TLB 4291 * as required. 4292 */ 4293 mtx_lock(&cmtx); 4294 *csrc_pte = L2_S_PROTO | src | 4295 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4296 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4297 PTE_SYNC(csrc_pte); 4298 *cdst_pte = L2_S_PROTO | dst | 4299 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4300 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ 4301 PTE_SYNC(cdst_pte); 4302 cpu_tlb_flushD_SE(csrcp); 4303 cpu_tlb_flushD_SE(cdstp); 4304 cpu_cpwait(); 4305 bcopy_page(csrcp, cdstp); 4306 mtx_unlock(&cmtx); 4307 xscale_cache_clean_minidata(); 4308} 4309 4310void 4311pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, 4312 vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) 4313{ 4314 4315 mtx_lock(&cmtx); 4316 *csrc_pte = L2_S_PROTO | a_phys | 4317 L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | 4318 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 4319 PTE_SYNC(csrc_pte); 4320 *cdst_pte = L2_S_PROTO | b_phys | 4321 L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | 4322 L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); 4323 PTE_SYNC(cdst_pte); 4324 cpu_tlb_flushD_SE(csrcp); 4325 cpu_tlb_flushD_SE(cdstp); 4326 cpu_cpwait(); 4327 bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); 4328 mtx_unlock(&cmtx); 4329 xscale_cache_clean_minidata(); 4330} 4331#endif /* ARM_MMU_XSCALE == 1 */ 4332 4333void 4334pmap_copy_page(vm_page_t src, vm_page_t dst) 4335{ 4336 4337 cpu_dcache_wbinv_all(); 4338 cpu_l2cache_wbinv_all(); 4339 if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && 4340 _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), 4341 (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) 4342 return; 4343 pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); 4344} 4345 4346/* 4347 * We have code to do unmapped I/O. However, it isn't quite right and 4348 * causes un-page-aligned I/O to devices to fail (most notably newfs 4349 * or fsck). We give up a little performance to not allow unmapped I/O 4350 * to gain stability. 4351 */ 4352int unmapped_buf_allowed = 0; 4353 4354void 4355pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 4356 vm_offset_t b_offset, int xfersize) 4357{ 4358 vm_page_t a_pg, b_pg; 4359 vm_offset_t a_pg_offset, b_pg_offset; 4360 int cnt; 4361 4362 cpu_dcache_wbinv_all(); 4363 cpu_l2cache_wbinv_all(); 4364 while (xfersize > 0) { 4365 a_pg = ma[a_offset >> PAGE_SHIFT]; 4366 a_pg_offset = a_offset & PAGE_MASK; 4367 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 4368 b_pg = mb[b_offset >> PAGE_SHIFT]; 4369 b_pg_offset = b_offset & PAGE_MASK; 4370 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 4371 pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, 4372 VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); 4373 xfersize -= cnt; 4374 a_offset += cnt; 4375 b_offset += cnt; 4376 } 4377} 4378 4379vm_offset_t 4380pmap_quick_enter_page(vm_page_t m) 4381{ 4382 /* 4383 * Don't bother with a PCPU pageframe, since we don't support 4384 * SMP for anything pre-armv7. Use pmap_kenter() to ensure 4385 * caching is handled correctly for multiple mappings of the 4386 * same physical page. 4387 */ 4388 4389 mtx_assert(&qmap_mtx, MA_NOTOWNED); 4390 mtx_lock(&qmap_mtx); 4391 4392 pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m)); 4393 4394 return (qmap_addr); 4395} 4396 4397void 4398pmap_quick_remove_page(vm_offset_t addr) 4399{ 4400 KASSERT(addr == qmap_addr, 4401 ("pmap_quick_remove_page: invalid address")); 4402 mtx_assert(&qmap_mtx, MA_OWNED); 4403 pmap_kremove(addr); 4404 mtx_unlock(&qmap_mtx); 4405} 4406 4407/* 4408 * this routine returns true if a physical page resides 4409 * in the given pmap. 4410 */ 4411boolean_t 4412pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 4413{ 4414 pv_entry_t pv; 4415 int loops = 0; 4416 boolean_t rv; 4417 4418 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4419 ("pmap_page_exists_quick: page %p is not managed", m)); 4420 rv = FALSE; 4421 rw_wlock(&pvh_global_lock); 4422 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 4423 if (pv->pv_pmap == pmap) { 4424 rv = TRUE; 4425 break; 4426 } 4427 loops++; 4428 if (loops >= 16) 4429 break; 4430 } 4431 rw_wunlock(&pvh_global_lock); 4432 return (rv); 4433} 4434 4435/* 4436 * pmap_page_wired_mappings: 4437 * 4438 * Return the number of managed mappings to the given physical page 4439 * that are wired. 4440 */ 4441int 4442pmap_page_wired_mappings(vm_page_t m) 4443{ 4444 pv_entry_t pv; 4445 int count; 4446 4447 count = 0; 4448 if ((m->oflags & VPO_UNMANAGED) != 0) 4449 return (count); 4450 rw_wlock(&pvh_global_lock); 4451 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) 4452 if ((pv->pv_flags & PVF_WIRED) != 0) 4453 count++; 4454 rw_wunlock(&pvh_global_lock); 4455 return (count); 4456} 4457 4458/* 4459 * This function is advisory. 4460 */ 4461void 4462pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 4463{ 4464} 4465 4466/* 4467 * pmap_ts_referenced: 4468 * 4469 * Return the count of reference bits for a page, clearing all of them. 4470 */ 4471int 4472pmap_ts_referenced(vm_page_t m) 4473{ 4474 4475 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4476 ("pmap_ts_referenced: page %p is not managed", m)); 4477 return (pmap_clearbit(m, PVF_REF)); 4478} 4479 4480 4481boolean_t 4482pmap_is_modified(vm_page_t m) 4483{ 4484 4485 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4486 ("pmap_is_modified: page %p is not managed", m)); 4487 if (m->md.pvh_attrs & PVF_MOD) 4488 return (TRUE); 4489 4490 return(FALSE); 4491} 4492 4493 4494/* 4495 * Clear the modify bits on the specified physical page. 4496 */ 4497void 4498pmap_clear_modify(vm_page_t m) 4499{ 4500 4501 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4502 ("pmap_clear_modify: page %p is not managed", m)); 4503 VM_OBJECT_ASSERT_WLOCKED(m->object); 4504 KASSERT(!vm_page_xbusied(m), 4505 ("pmap_clear_modify: page %p is exclusive busied", m)); 4506 4507 /* 4508 * If the page is not PGA_WRITEABLE, then no mappings can be modified. 4509 * If the object containing the page is locked and the page is not 4510 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 4511 */ 4512 if ((m->aflags & PGA_WRITEABLE) == 0) 4513 return; 4514 if (m->md.pvh_attrs & PVF_MOD) 4515 pmap_clearbit(m, PVF_MOD); 4516} 4517 4518 4519/* 4520 * pmap_is_referenced: 4521 * 4522 * Return whether or not the specified physical page was referenced 4523 * in any physical maps. 4524 */ 4525boolean_t 4526pmap_is_referenced(vm_page_t m) 4527{ 4528 4529 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4530 ("pmap_is_referenced: page %p is not managed", m)); 4531 return ((m->md.pvh_attrs & PVF_REF) != 0); 4532} 4533 4534 4535/* 4536 * Clear the write and modified bits in each of the given page's mappings. 4537 */ 4538void 4539pmap_remove_write(vm_page_t m) 4540{ 4541 4542 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4543 ("pmap_remove_write: page %p is not managed", m)); 4544 4545 /* 4546 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 4547 * set by another thread while the object is locked. Thus, 4548 * if PGA_WRITEABLE is clear, no page table entries need updating. 4549 */ 4550 VM_OBJECT_ASSERT_WLOCKED(m->object); 4551 if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0) 4552 pmap_clearbit(m, PVF_WRITE); 4553} 4554 4555 4556/* 4557 * perform the pmap work for mincore 4558 */ 4559int 4560pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 4561{ 4562 struct l2_bucket *l2b; 4563 pt_entry_t *ptep, pte; 4564 vm_paddr_t pa; 4565 vm_page_t m; 4566 int val; 4567 boolean_t managed; 4568 4569 PMAP_LOCK(pmap); 4570retry: 4571 l2b = pmap_get_l2_bucket(pmap, addr); 4572 if (l2b == NULL) { 4573 val = 0; 4574 goto out; 4575 } 4576 ptep = &l2b->l2b_kva[l2pte_index(addr)]; 4577 pte = *ptep; 4578 if (!l2pte_valid(pte)) { 4579 val = 0; 4580 goto out; 4581 } 4582 val = MINCORE_INCORE; 4583 if (pte & L2_S_PROT_W) 4584 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 4585 managed = false; 4586 pa = l2pte_pa(pte); 4587 m = PHYS_TO_VM_PAGE(pa); 4588 if (m != NULL && !(m->oflags & VPO_UNMANAGED)) 4589 managed = true; 4590 if (managed) { 4591 /* 4592 * The ARM pmap tries to maintain a per-mapping 4593 * reference bit. The trouble is that it's kept in 4594 * the PV entry, not the PTE, so it's costly to access 4595 * here. You would need to acquire the pvh global 4596 * lock, call pmap_find_pv(), and introduce a custom 4597 * version of vm_page_pa_tryrelock() that releases and 4598 * reacquires the pvh global lock. In the end, I 4599 * doubt it's worthwhile. This may falsely report 4600 * the given address as referenced. 4601 */ 4602 if ((m->md.pvh_attrs & PVF_REF) != 0) 4603 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 4604 } 4605 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 4606 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 4607 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 4608 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 4609 goto retry; 4610 } else 4611out: 4612 PA_UNLOCK_COND(*locked_pa); 4613 PMAP_UNLOCK(pmap); 4614 return (val); 4615} 4616 4617 4618void 4619pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 4620{ 4621} 4622 4623 4624/* 4625 * Increase the starting virtual address of the given mapping if a 4626 * different alignment might result in more superpage mappings. 4627 */ 4628void 4629pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 4630 vm_offset_t *addr, vm_size_t size) 4631{ 4632} 4633 4634#define BOOTSTRAP_DEBUG 4635 4636/* 4637 * pmap_map_section: 4638 * 4639 * Create a single section mapping. 4640 */ 4641void 4642pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4643 int prot, int cache) 4644{ 4645 pd_entry_t *pde = (pd_entry_t *) l1pt; 4646 pd_entry_t fl; 4647 4648 KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); 4649 4650 switch (cache) { 4651 case PTE_NOCACHE: 4652 default: 4653 fl = 0; 4654 break; 4655 4656 case PTE_CACHE: 4657 fl = pte_l1_s_cache_mode; 4658 break; 4659 4660 case PTE_PAGETABLE: 4661 fl = pte_l1_s_cache_mode_pt; 4662 break; 4663 } 4664 4665 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4666 L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); 4667 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4668 4669} 4670 4671/* 4672 * pmap_link_l2pt: 4673 * 4674 * Link the L2 page table specified by l2pv.pv_pa into the L1 4675 * page table at the slot for "va". 4676 */ 4677void 4678pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) 4679{ 4680 pd_entry_t *pde = (pd_entry_t *) l1pt, proto; 4681 u_int slot = va >> L1_S_SHIFT; 4682 4683 proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; 4684 4685#ifdef VERBOSE_INIT_ARM 4686 printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); 4687#endif 4688 4689 pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); 4690 4691 PTE_SYNC(&pde[slot]); 4692 4693 SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); 4694 4695 4696} 4697 4698/* 4699 * pmap_map_entry 4700 * 4701 * Create a single page mapping. 4702 */ 4703void 4704pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, 4705 int cache) 4706{ 4707 pd_entry_t *pde = (pd_entry_t *) l1pt; 4708 pt_entry_t fl; 4709 pt_entry_t *pte; 4710 4711 KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); 4712 4713 switch (cache) { 4714 case PTE_NOCACHE: 4715 default: 4716 fl = 0; 4717 break; 4718 4719 case PTE_CACHE: 4720 fl = pte_l2_s_cache_mode; 4721 break; 4722 4723 case PTE_PAGETABLE: 4724 fl = pte_l2_s_cache_mode_pt; 4725 break; 4726 } 4727 4728 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4729 panic("pmap_map_entry: no L2 table for VA 0x%08x", va); 4730 4731 pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4732 4733 if (pte == NULL) 4734 panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); 4735 4736 pte[l2pte_index(va)] = 4737 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; 4738 PTE_SYNC(&pte[l2pte_index(va)]); 4739} 4740 4741/* 4742 * pmap_map_chunk: 4743 * 4744 * Map a chunk of memory using the most efficient mappings 4745 * possible (section. large page, small page) into the 4746 * provided L1 and L2 tables at the specified virtual address. 4747 */ 4748vm_size_t 4749pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, 4750 vm_size_t size, int prot, int cache) 4751{ 4752 pd_entry_t *pde = (pd_entry_t *) l1pt; 4753 pt_entry_t *pte, f1, f2s, f2l; 4754 vm_size_t resid; 4755 int i; 4756 4757 resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); 4758 4759 if (l1pt == 0) 4760 panic("pmap_map_chunk: no L1 table provided"); 4761 4762#ifdef VERBOSE_INIT_ARM 4763 printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " 4764 "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); 4765#endif 4766 4767 switch (cache) { 4768 case PTE_NOCACHE: 4769 default: 4770 f1 = 0; 4771 f2l = 0; 4772 f2s = 0; 4773 break; 4774 4775 case PTE_CACHE: 4776 f1 = pte_l1_s_cache_mode; 4777 f2l = pte_l2_l_cache_mode; 4778 f2s = pte_l2_s_cache_mode; 4779 break; 4780 4781 case PTE_PAGETABLE: 4782 f1 = pte_l1_s_cache_mode_pt; 4783 f2l = pte_l2_l_cache_mode_pt; 4784 f2s = pte_l2_s_cache_mode_pt; 4785 break; 4786 } 4787 4788 size = resid; 4789 4790 while (resid > 0) { 4791 /* See if we can use a section mapping. */ 4792 if (L1_S_MAPPABLE_P(va, pa, resid)) { 4793#ifdef VERBOSE_INIT_ARM 4794 printf("S"); 4795#endif 4796 pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | 4797 L1_S_PROT(PTE_KERNEL, prot) | f1 | 4798 L1_S_DOM(PMAP_DOMAIN_KERNEL); 4799 PTE_SYNC(&pde[va >> L1_S_SHIFT]); 4800 va += L1_S_SIZE; 4801 pa += L1_S_SIZE; 4802 resid -= L1_S_SIZE; 4803 continue; 4804 } 4805 4806 /* 4807 * Ok, we're going to use an L2 table. Make sure 4808 * one is actually in the corresponding L1 slot 4809 * for the current VA. 4810 */ 4811 if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) 4812 panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); 4813 4814 pte = (pt_entry_t *) kernel_pt_lookup( 4815 pde[L1_IDX(va)] & L1_C_ADDR_MASK); 4816 if (pte == NULL) 4817 panic("pmap_map_chunk: can't find L2 table for VA" 4818 "0x%08x", va); 4819 /* See if we can use a L2 large page mapping. */ 4820 if (L2_L_MAPPABLE_P(va, pa, resid)) { 4821#ifdef VERBOSE_INIT_ARM 4822 printf("L"); 4823#endif 4824 for (i = 0; i < 16; i++) { 4825 pte[l2pte_index(va) + i] = 4826 L2_L_PROTO | pa | 4827 L2_L_PROT(PTE_KERNEL, prot) | f2l; 4828 PTE_SYNC(&pte[l2pte_index(va) + i]); 4829 } 4830 va += L2_L_SIZE; 4831 pa += L2_L_SIZE; 4832 resid -= L2_L_SIZE; 4833 continue; 4834 } 4835 4836 /* Use a small page mapping. */ 4837#ifdef VERBOSE_INIT_ARM 4838 printf("P"); 4839#endif 4840 pte[l2pte_index(va)] = 4841 L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; 4842 PTE_SYNC(&pte[l2pte_index(va)]); 4843 va += PAGE_SIZE; 4844 pa += PAGE_SIZE; 4845 resid -= PAGE_SIZE; 4846 } 4847#ifdef VERBOSE_INIT_ARM 4848 printf("\n"); 4849#endif 4850 return (size); 4851 4852} 4853 4854void 4855pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 4856{ 4857 /* 4858 * Remember the memattr in a field that gets used to set the appropriate 4859 * bits in the PTEs as mappings are established. 4860 */ 4861 m->md.pv_memattr = ma; 4862 4863 /* 4864 * It appears that this function can only be called before any mappings 4865 * for the page are established on ARM. If this ever changes, this code 4866 * will need to walk the pv_list and make each of the existing mappings 4867 * uncacheable, being careful to sync caches and PTEs (and maybe 4868 * invalidate TLB?) for any current mapping it modifies. 4869 */ 4870 if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) 4871 panic("Can't change memattr on page with existing mappings"); 4872} 4873 4874 4875