pmap-v6.c revision 327656
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1994 David Greenman 5 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 6 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org> 7 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org> 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department and William Jolitz of UUNET Technologies Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 39 */ 40/*- 41 * Copyright (c) 2003 Networks Associates Technology, Inc. 42 * All rights reserved. 43 * 44 * This software was developed for the FreeBSD Project by Jake Burkholder, 45 * Safeport Network Services, and Network Associates Laboratories, the 46 * Security Research Division of Network Associates, Inc. under 47 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 48 * CHATS research program. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 */ 71 72#include <sys/cdefs.h> 73__FBSDID("$FreeBSD: stable/11/sys/arm/arm/pmap-v6.c 327656 2018-01-06 23:24:52Z ian $"); 74 75/* 76 * Manages physical address maps. 77 * 78 * Since the information managed by this module is 79 * also stored by the logical address mapping module, 80 * this module may throw away valid virtual-to-physical 81 * mappings at almost any time. However, invalidations 82 * of virtual-to-physical mappings must be done as 83 * requested. 84 * 85 * In order to cope with hardware architectures which 86 * make virtual-to-physical map invalidates expensive, 87 * this module may delay invalidate or reduced protection 88 * operations until such time as they are actually 89 * necessary. This module is given full information as 90 * to which processors are currently using which maps, 91 * and to when physical maps must be made correct. 92 */ 93 94#include "opt_vm.h" 95#include "opt_pmap.h" 96#include "opt_ddb.h" 97 98#include <sys/param.h> 99#include <sys/systm.h> 100#include <sys/kernel.h> 101#include <sys/ktr.h> 102#include <sys/lock.h> 103#include <sys/proc.h> 104#include <sys/rwlock.h> 105#include <sys/malloc.h> 106#include <sys/vmmeter.h> 107#include <sys/malloc.h> 108#include <sys/mman.h> 109#include <sys/sf_buf.h> 110#include <sys/smp.h> 111#include <sys/sched.h> 112#include <sys/sysctl.h> 113 114#ifdef DDB 115#include <ddb/ddb.h> 116#endif 117 118#include <machine/physmem.h> 119 120#include <vm/vm.h> 121#include <vm/uma.h> 122#include <vm/pmap.h> 123#include <vm/vm_param.h> 124#include <vm/vm_kern.h> 125#include <vm/vm_object.h> 126#include <vm/vm_map.h> 127#include <vm/vm_page.h> 128#include <vm/vm_pageout.h> 129#include <vm/vm_phys.h> 130#include <vm/vm_extern.h> 131#include <vm/vm_reserv.h> 132#include <sys/lock.h> 133#include <sys/mutex.h> 134 135#include <machine/md_var.h> 136#include <machine/pmap_var.h> 137#include <machine/cpu.h> 138#include <machine/pcb.h> 139#include <machine/sf_buf.h> 140#ifdef SMP 141#include <machine/smp.h> 142#endif 143 144#ifndef PMAP_SHPGPERPROC 145#define PMAP_SHPGPERPROC 200 146#endif 147 148#ifndef DIAGNOSTIC 149#define PMAP_INLINE __inline 150#else 151#define PMAP_INLINE 152#endif 153 154#ifdef PMAP_DEBUG 155static void pmap_zero_page_check(vm_page_t m); 156void pmap_debug(int level); 157int pmap_pid_dump(int pid); 158 159#define PDEBUG(_lev_,_stat_) \ 160 if (pmap_debug_level >= (_lev_)) \ 161 ((_stat_)) 162#define dprintf printf 163int pmap_debug_level = 1; 164#else /* PMAP_DEBUG */ 165#define PDEBUG(_lev_,_stat_) /* Nothing */ 166#define dprintf(x, arg...) 167#endif /* PMAP_DEBUG */ 168 169/* 170 * Level 2 page tables map definion ('max' is excluded). 171 */ 172 173#define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 174#define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE) 175 176#define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 177#define UPT2V_MAX_ADDRESS \ 178 ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT))) 179 180/* 181 * Promotion to a 1MB (PTE1) page mapping requires that the corresponding 182 * 4KB (PTE2) page mappings have identical settings for the following fields: 183 */ 184#define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \ 185 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \ 186 PTE2_ATTR_MASK) 187 188#define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \ 189 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \ 190 PTE1_ATTR_MASK) 191 192#define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \ 193 (((l2_attr) & L2_C) ? L1_S_C : 0) | \ 194 (((l2_attr) & L2_B) ? L1_S_B : 0) | \ 195 (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \ 196 (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \ 197 (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \ 198 (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \ 199 (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \ 200 (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \ 201 (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \ 202 (((l2_attr) & PTE2_W) ? PTE1_W : 0)) 203 204#define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \ 205 (((l1_attr) & L1_S_C) ? L2_C : 0) | \ 206 (((l1_attr) & L1_S_B) ? L2_B : 0) | \ 207 (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \ 208 (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \ 209 (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \ 210 (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \ 211 (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \ 212 (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \ 213 (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \ 214 (((l1_attr) & PTE1_W) ? PTE2_W : 0)) 215 216/* 217 * PTE2 descriptors creation macros. 218 */ 219#define PTE2_ATTR_DEFAULT vm_memattr_to_pte2(VM_MEMATTR_DEFAULT) 220#define PTE2_ATTR_PT vm_memattr_to_pte2(pt_memattr) 221 222#define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 223#define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 224 225#define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT) 226#define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT) 227 228#define PV_STATS 229#ifdef PV_STATS 230#define PV_STAT(x) do { x ; } while (0) 231#else 232#define PV_STAT(x) do { } while (0) 233#endif 234 235/* 236 * The boot_pt1 is used temporary in very early boot stage as L1 page table. 237 * We can init many things with no memory allocation thanks to its static 238 * allocation and this brings two main advantages: 239 * (1) other cores can be started very simply, 240 * (2) various boot loaders can be supported as its arguments can be processed 241 * in virtual address space and can be moved to safe location before 242 * first allocation happened. 243 * Only disadvantage is that boot_pt1 is used only in very early boot stage. 244 * However, the table is uninitialized and so lays in bss. Therefore kernel 245 * image size is not influenced. 246 * 247 * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and 248 * CPU suspend/resume game. 249 */ 250extern pt1_entry_t boot_pt1[]; 251 252vm_paddr_t base_pt1; 253pt1_entry_t *kern_pt1; 254pt2_entry_t *kern_pt2tab; 255pt2_entry_t *PT2MAP; 256 257static uint32_t ttb_flags; 258static vm_memattr_t pt_memattr; 259ttb_entry_t pmap_kern_ttb; 260 261struct pmap kernel_pmap_store; 262LIST_HEAD(pmaplist, pmap); 263static struct pmaplist allpmaps; 264static struct mtx allpmaps_lock; 265 266vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 267vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 268 269static vm_offset_t kernel_vm_end_new; 270vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE; 271vm_offset_t vm_max_kernel_address; 272vm_paddr_t kernel_l1pa; 273 274static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock; 275 276/* 277 * Data for the pv entry allocation mechanism 278 */ 279static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 280static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 281static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */ 282static int shpgperproc = PMAP_SHPGPERPROC; 283 284struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 285int pv_maxchunks; /* How many chunks we have KVA for */ 286vm_offset_t pv_vafree; /* freelist stored in the PTE */ 287 288vm_paddr_t first_managed_pa; 289#define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) 290 291/* 292 * All those kernel PT submaps that BSD is so fond of 293 */ 294static pt2_entry_t *CMAP3; 295static caddr_t CADDR3; 296caddr_t _tmppt = 0; 297 298struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */ 299 300/* 301 * Crashdump maps. 302 */ 303static caddr_t crashdumpmap; 304 305static pt2_entry_t *PMAP1 = NULL, *PMAP2; 306static pt2_entry_t *PADDR1 = NULL, *PADDR2; 307#ifdef DDB 308static pt2_entry_t *PMAP3; 309static pt2_entry_t *PADDR3; 310static int PMAP3cpu __unused; /* for SMP only */ 311#endif 312#ifdef SMP 313static int PMAP1cpu; 314static int PMAP1changedcpu; 315SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 316 &PMAP1changedcpu, 0, 317 "Number of times pmap_pte2_quick changed CPU with same PMAP1"); 318#endif 319static int PMAP1changed; 320SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 321 &PMAP1changed, 0, 322 "Number of times pmap_pte2_quick changed PMAP1"); 323static int PMAP1unchanged; 324SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 325 &PMAP1unchanged, 0, 326 "Number of times pmap_pte2_quick didn't change PMAP1"); 327static struct mtx PMAP2mutex; 328 329static __inline void pt2_wirecount_init(vm_page_t m); 330static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, 331 vm_offset_t va); 332void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size); 333 334/* 335 * Function to set the debug level of the pmap code. 336 */ 337#ifdef PMAP_DEBUG 338void 339pmap_debug(int level) 340{ 341 342 pmap_debug_level = level; 343 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 344} 345#endif /* PMAP_DEBUG */ 346 347/* 348 * This table must corespond with memory attribute configuration in vm.h. 349 * First entry is used for normal system mapping. 350 * 351 * Device memory is always marked as shared. 352 * Normal memory is shared only in SMP . 353 * Not outer shareable bits are not used yet. 354 * Class 6 cannot be used on ARM11. 355 */ 356#define TEXDEF_TYPE_SHIFT 0 357#define TEXDEF_TYPE_MASK 0x3 358#define TEXDEF_INNER_SHIFT 2 359#define TEXDEF_INNER_MASK 0x3 360#define TEXDEF_OUTER_SHIFT 4 361#define TEXDEF_OUTER_MASK 0x3 362#define TEXDEF_NOS_SHIFT 6 363#define TEXDEF_NOS_MASK 0x1 364 365#define TEX(t, i, o, s) \ 366 ((t) << TEXDEF_TYPE_SHIFT) | \ 367 ((i) << TEXDEF_INNER_SHIFT) | \ 368 ((o) << TEXDEF_OUTER_SHIFT | \ 369 ((s) << TEXDEF_NOS_SHIFT)) 370 371static uint32_t tex_class[8] = { 372/* type inner cache outer cache */ 373 TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */ 374 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */ 375 TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */ 376 TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */ 377 TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */ 378 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */ 379 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */ 380 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */ 381}; 382#undef TEX 383 384static uint32_t pte2_attr_tab[8] = { 385 PTE2_ATTR_WB_WA, /* 0 - VM_MEMATTR_WB_WA */ 386 PTE2_ATTR_NOCACHE, /* 1 - VM_MEMATTR_NOCACHE */ 387 PTE2_ATTR_DEVICE, /* 2 - VM_MEMATTR_DEVICE */ 388 PTE2_ATTR_SO, /* 3 - VM_MEMATTR_SO */ 389 PTE2_ATTR_WT, /* 4 - VM_MEMATTR_WRITE_THROUGH */ 390 0, /* 5 - NOT USED YET */ 391 0, /* 6 - NOT USED YET */ 392 0 /* 7 - NOT USED YET */ 393}; 394CTASSERT(VM_MEMATTR_WB_WA == 0); 395CTASSERT(VM_MEMATTR_NOCACHE == 1); 396CTASSERT(VM_MEMATTR_DEVICE == 2); 397CTASSERT(VM_MEMATTR_SO == 3); 398CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4); 399 400static inline uint32_t 401vm_memattr_to_pte2(vm_memattr_t ma) 402{ 403 404 KASSERT((u_int)ma < 5, ("%s: bad vm_memattr_t %d", __func__, ma)); 405 return (pte2_attr_tab[(u_int)ma]); 406} 407 408static inline uint32_t 409vm_page_pte2_attr(vm_page_t m) 410{ 411 412 return (vm_memattr_to_pte2(m->md.pat_mode)); 413} 414 415/* 416 * Convert TEX definition entry to TTB flags. 417 */ 418static uint32_t 419encode_ttb_flags(int idx) 420{ 421 uint32_t inner, outer, nos, reg; 422 423 inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) & 424 TEXDEF_INNER_MASK; 425 outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) & 426 TEXDEF_OUTER_MASK; 427 nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) & 428 TEXDEF_NOS_MASK; 429 430 reg = nos << 5; 431 reg |= outer << 3; 432 if (cpuinfo.coherent_walk) 433 reg |= (inner & 0x1) << 6; 434 reg |= (inner & 0x2) >> 1; 435#ifdef SMP 436 reg |= 1 << 1; 437#endif 438 return reg; 439} 440 441/* 442 * Set TEX remapping registers in current CPU. 443 */ 444void 445pmap_set_tex(void) 446{ 447 uint32_t prrr, nmrr; 448 uint32_t type, inner, outer, nos; 449 int i; 450 451#ifdef PMAP_PTE_NOCACHE 452 /* XXX fixme */ 453 if (cpuinfo.coherent_walk) { 454 pt_memattr = VM_MEMATTR_WB_WA; 455 ttb_flags = encode_ttb_flags(0); 456 } 457 else { 458 pt_memattr = VM_MEMATTR_NOCACHE; 459 ttb_flags = encode_ttb_flags(1); 460 } 461#else 462 pt_memattr = VM_MEMATTR_WB_WA; 463 ttb_flags = encode_ttb_flags(0); 464#endif 465 466 prrr = 0; 467 nmrr = 0; 468 469 /* Build remapping register from TEX classes. */ 470 for (i = 0; i < 8; i++) { 471 type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) & 472 TEXDEF_TYPE_MASK; 473 inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) & 474 TEXDEF_INNER_MASK; 475 outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) & 476 TEXDEF_OUTER_MASK; 477 nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) & 478 TEXDEF_NOS_MASK; 479 480 prrr |= type << (i * 2); 481 prrr |= nos << (i + 24); 482 nmrr |= inner << (i * 2); 483 nmrr |= outer << (i * 2 + 16); 484 } 485 /* Add shareable bits for device memory. */ 486 prrr |= PRRR_DS0 | PRRR_DS1; 487 488 /* Add shareable bits for normal memory in SMP case. */ 489#ifdef SMP 490 prrr |= PRRR_NS1; 491#endif 492 cp15_prrr_set(prrr); 493 cp15_nmrr_set(nmrr); 494 495 /* Caches are disabled, so full TLB flush should be enough. */ 496 tlb_flush_all_local(); 497} 498 499/* 500 * Remap one vm_meattr class to another one. This can be useful as 501 * workaround for SOC errata, e.g. if devices must be accessed using 502 * SO memory class. 503 * 504 * !!! Please note that this function is absolutely last resort thing. 505 * It should not be used under normal circumstances. !!! 506 * 507 * Usage rules: 508 * - it shall be called after pmap_bootstrap_prepare() and before 509 * cpu_mp_start() (thus only on boot CPU). In practice, it's expected 510 * to be called from platform_attach() or platform_late_init(). 511 * 512 * - if remapping doesn't change caching mode, or until uncached class 513 * is remapped to any kind of cached one, then no other restriction exists. 514 * 515 * - if pmap_remap_vm_attr() changes caching mode, but both (original and 516 * remapped) remain cached, then caller is resposible for calling 517 * of dcache_wbinv_poc_all(). 518 * 519 * - remapping of any kind of cached class to uncached is not permitted. 520 */ 521void 522pmap_remap_vm_attr(vm_memattr_t old_attr, vm_memattr_t new_attr) 523{ 524 int old_idx, new_idx; 525 526 /* Map VM memattrs to indexes to tex_class table. */ 527 old_idx = pte2_attr_tab[(int)old_attr]; 528 new_idx = pte2_attr_tab[(int)new_attr]; 529 530 /* Replace TEX attribute and apply it. */ 531 tex_class[old_idx] = tex_class[new_idx]; 532 pmap_set_tex(); 533} 534 535/* 536 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words, 537 * KERNBASE is mapped by first L2 page table in L2 page table page. It 538 * meets same constrain due to PT2MAP being placed just under KERNBASE. 539 */ 540CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0); 541CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE); 542 543/* 544 * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general. 545 * For now, anyhow, the following check must be fulfilled. 546 */ 547CTASSERT(PAGE_SIZE == PTE2_SIZE); 548/* 549 * We don't want to mess up MI code with all MMU and PMAP definitions, 550 * so some things, which depend on other ones, are defined independently. 551 * Now, it is time to check that we don't screw up something. 552 */ 553CTASSERT(PDRSHIFT == PTE1_SHIFT); 554/* 555 * Check L1 and L2 page table entries definitions consistency. 556 */ 557CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1)); 558CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2)); 559/* 560 * Check L2 page tables page consistency. 561 */ 562CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2)); 563CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG); 564/* 565 * Check PT2TAB consistency. 566 * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG. 567 * This should be done without remainder. 568 */ 569CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG)); 570 571/* 572 * A PT2MAP magic. 573 * 574 * All level 2 page tables (PT2s) are mapped continuously and accordingly 575 * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can 576 * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page 577 * must be used together, but not necessary at once. The first PT2 in a page 578 * must map things on correctly aligned address and the others must follow 579 * in right order. 580 */ 581#define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t)) 582#define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2) 583#define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE) 584 585/* 586 * Check PT2TAB consistency. 587 * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2. 588 * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE. 589 * The both should be done without remainder. 590 */ 591CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2)); 592CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE)); 593/* 594 * The implementation was made general, however, with the assumption 595 * bellow in mind. In case of another value of NPG_IN_PT2TAB, 596 * the code should be once more rechecked. 597 */ 598CTASSERT(NPG_IN_PT2TAB == 1); 599 600/* 601 * Get offset of PT2 in a page 602 * associated with given PT1 index. 603 */ 604static __inline u_int 605page_pt2off(u_int pt1_idx) 606{ 607 608 return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2); 609} 610 611/* 612 * Get physical address of PT2 613 * associated with given PT2s page and PT1 index. 614 */ 615static __inline vm_paddr_t 616page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx) 617{ 618 619 return (pgpa + page_pt2off(pt1_idx)); 620} 621 622/* 623 * Get first entry of PT2 624 * associated with given PT2s page and PT1 index. 625 */ 626static __inline pt2_entry_t * 627page_pt2(vm_offset_t pgva, u_int pt1_idx) 628{ 629 630 return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx))); 631} 632 633/* 634 * Get virtual address of PT2s page (mapped in PT2MAP) 635 * which holds PT2 which holds entry which maps given virtual address. 636 */ 637static __inline vm_offset_t 638pt2map_pt2pg(vm_offset_t va) 639{ 640 641 va &= ~(NPT2_IN_PG * PTE1_SIZE - 1); 642 return ((vm_offset_t)pt2map_entry(va)); 643} 644 645/***************************************************************************** 646 * 647 * THREE pmap initialization milestones exist: 648 * 649 * locore.S 650 * -> fundamental init (including MMU) in ASM 651 * 652 * initarm() 653 * -> fundamental init continues in C 654 * -> first available physical address is known 655 * 656 * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins) 657 * -> basic (safe) interface for physical address allocation is made 658 * -> basic (safe) interface for virtual mapping is made 659 * -> limited not SMP coherent work is possible 660 * 661 * -> more fundamental init continues in C 662 * -> locks and some more things are available 663 * -> all fundamental allocations and mappings are done 664 * 665 * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins) 666 * -> phys_avail[] and virtual_avail is set 667 * -> control is passed to vm subsystem 668 * -> physical and virtual address allocation are off limit 669 * -> low level mapping functions, some SMP coherent, 670 * are available, which cannot be used before vm subsystem 671 * is being inited 672 * 673 * mi_startup() 674 * -> vm subsystem is being inited 675 * 676 * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins) 677 * -> pmap is fully inited 678 * 679 *****************************************************************************/ 680 681/***************************************************************************** 682 * 683 * PMAP first stage initialization and utility functions 684 * for pre-bootstrap epoch. 685 * 686 * After pmap_bootstrap_prepare() is called, the following functions 687 * can be used: 688 * 689 * (1) strictly only for this stage functions for physical page allocations, 690 * virtual space allocations, and mappings: 691 * 692 * vm_paddr_t pmap_preboot_get_pages(u_int num); 693 * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num); 694 * vm_offset_t pmap_preboot_reserve_pages(u_int num); 695 * vm_offset_t pmap_preboot_get_vpages(u_int num); 696 * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 697 * vm_prot_t prot, vm_memattr_t attr); 698 * 699 * (2) for all stages: 700 * 701 * vm_paddr_t pmap_kextract(vm_offset_t va); 702 * 703 * NOTE: This is not SMP coherent stage. 704 * 705 *****************************************************************************/ 706 707#define KERNEL_P2V(pa) \ 708 ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR)) 709#define KERNEL_V2P(va) \ 710 ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr)) 711 712static vm_paddr_t last_paddr; 713 714/* 715 * Pre-bootstrap epoch page allocator. 716 */ 717vm_paddr_t 718pmap_preboot_get_pages(u_int num) 719{ 720 vm_paddr_t ret; 721 722 ret = last_paddr; 723 last_paddr += num * PAGE_SIZE; 724 725 return (ret); 726} 727 728/* 729 * The fundamental initialization of PMAP stuff. 730 * 731 * Some things already happened in locore.S and some things could happen 732 * before pmap_bootstrap_prepare() is called, so let's recall what is done: 733 * 1. Caches are disabled. 734 * 2. We are running on virtual addresses already with 'boot_pt1' 735 * as L1 page table. 736 * 3. So far, all virtual addresses can be converted to physical ones and 737 * vice versa by the following macros: 738 * KERNEL_P2V(pa) .... physical to virtual ones, 739 * KERNEL_V2P(va) .... virtual to physical ones. 740 * 741 * What is done herein: 742 * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'. 743 * 2. PT2MAP magic is brought to live. 744 * 3. Basic preboot functions for page allocations and mappings can be used. 745 * 4. Everything is prepared for L1 cache enabling. 746 * 747 * Variations: 748 * 1. To use second TTB register, so kernel and users page tables will be 749 * separated. This way process forking - pmap_pinit() - could be faster, 750 * it saves physical pages and KVA per a process, and it's simple change. 751 * However, it will lead, due to hardware matter, to the following: 752 * (a) 2G space for kernel and 2G space for users. 753 * (b) 1G space for kernel in low addresses and 3G for users above it. 754 * A question is: Is the case (b) really an option? Note that case (b) 755 * does save neither physical memory and KVA. 756 */ 757void 758pmap_bootstrap_prepare(vm_paddr_t last) 759{ 760 vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size; 761 vm_offset_t pt2pg_va; 762 pt1_entry_t *pte1p; 763 pt2_entry_t *pte2p; 764 u_int i; 765 uint32_t actlr_mask, actlr_set, l1_attr; 766 767 /* 768 * Now, we are going to make real kernel mapping. Note that we are 769 * already running on some mapping made in locore.S and we expect 770 * that it's large enough to ensure nofault access to physical memory 771 * allocated herein before switch. 772 * 773 * As kernel image and everything needed before are and will be mapped 774 * by section mappings, we align last physical address to PTE1_SIZE. 775 */ 776 last_paddr = pte1_roundup(last); 777 778 /* 779 * Allocate and zero page(s) for kernel L1 page table. 780 * 781 * Note that it's first allocation on space which was PTE1_SIZE 782 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too. 783 */ 784 base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1); 785 kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1); 786 bzero((void*)kern_pt1, NB_IN_PT1); 787 pte1_sync_range(kern_pt1, NB_IN_PT1); 788 789 /* Allocate and zero page(s) for kernel PT2TAB. */ 790 pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB); 791 kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa); 792 bzero(kern_pt2tab, NB_IN_PT2TAB); 793 pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB); 794 795 /* Allocate and zero page(s) for kernel L2 page tables. */ 796 pt2pg_pa = pmap_preboot_get_pages(NKPT2PG); 797 pt2pg_va = KERNEL_P2V(pt2pg_pa); 798 size = NKPT2PG * PAGE_SIZE; 799 bzero((void*)pt2pg_va, size); 800 pte2_sync_range((pt2_entry_t *)pt2pg_va, size); 801 802 /* 803 * Add a physical memory segment (vm_phys_seg) corresponding to the 804 * preallocated pages for kernel L2 page tables so that vm_page 805 * structures representing these pages will be created. The vm_page 806 * structures are required for promotion of the corresponding kernel 807 * virtual addresses to section mappings. 808 */ 809 vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0)); 810 811 /* 812 * Insert allocated L2 page table pages to PT2TAB and make 813 * link to all PT2s in L1 page table. See how kernel_vm_end 814 * is initialized. 815 * 816 * We play simple and safe. So every KVA will have underlaying 817 * L2 page table, even kernel image mapped by sections. 818 */ 819 pte2p = kern_pt2tab_entry(KERNBASE); 820 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE) 821 pt2tab_store(pte2p++, PTE2_KPT(pa)); 822 823 pte1p = kern_pte1(KERNBASE); 824 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2) 825 pte1_store(pte1p++, PTE1_LINK(pa)); 826 827 /* Make section mappings for kernel. */ 828 l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT); 829 pte1p = kern_pte1(KERNBASE); 830 for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE) 831 pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr)); 832 833 /* 834 * Get free and aligned space for PT2MAP and make L1 page table links 835 * to L2 page tables held in PT2TAB. 836 * 837 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t 838 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus 839 * each entry in PT2TAB maps all PT2s in a page. This implies that 840 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE. 841 */ 842 PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE); 843 pte1p = kern_pte1((vm_offset_t)PT2MAP); 844 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 845 pte1_store(pte1p++, PTE1_LINK(pa)); 846 } 847 848 /* 849 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping. 850 * Each pmap will hold own PT2TAB, so the mapping should be not global. 851 */ 852 pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP); 853 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 854 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 855 } 856 857 /* 858 * Choose correct L2 page table and make mappings for allocations 859 * made herein which replaces temporary locore.S mappings after a while. 860 * Note that PT2MAP cannot be used until we switch to kern_pt1. 861 * 862 * Note, that these allocations started aligned on 1M section and 863 * kernel PT1 was allocated first. Making of mappings must follow 864 * order of physical allocations as we've used KERNEL_P2V() macro 865 * for virtual addresses resolution. 866 */ 867 pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1); 868 pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p))); 869 870 pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1)); 871 872 /* Make mapping for kernel L1 page table. */ 873 for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE) 874 pte2_store(pte2p++, PTE2_KPT(pa)); 875 876 /* Make mapping for kernel PT2TAB. */ 877 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) 878 pte2_store(pte2p++, PTE2_KPT(pa)); 879 880 /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */ 881 pmap_kern_ttb = base_pt1 | ttb_flags; 882 cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); 883 reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set); 884 /* 885 * Initialize the first available KVA. As kernel image is mapped by 886 * sections, we are leaving some gap behind. 887 */ 888 virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE; 889} 890 891/* 892 * Setup L2 page table page for given KVA. 893 * Used in pre-bootstrap epoch. 894 * 895 * Note that we have allocated NKPT2PG pages for L2 page tables in advance 896 * and used them for mapping KVA starting from KERNBASE. However, this is not 897 * enough. Vectors and devices need L2 page tables too. Note that they are 898 * even above VM_MAX_KERNEL_ADDRESS. 899 */ 900static __inline vm_paddr_t 901pmap_preboot_pt2pg_setup(vm_offset_t va) 902{ 903 pt2_entry_t *pte2p, pte2; 904 vm_paddr_t pt2pg_pa; 905 906 /* Get associated entry in PT2TAB. */ 907 pte2p = kern_pt2tab_entry(va); 908 909 /* Just return, if PT2s page exists already. */ 910 pte2 = pt2tab_load(pte2p); 911 if (pte2_is_valid(pte2)) 912 return (pte2_pa(pte2)); 913 914 KASSERT(va >= VM_MAX_KERNEL_ADDRESS, 915 ("%s: NKPT2PG too small", __func__)); 916 917 /* 918 * Allocate page for PT2s and insert it to PT2TAB. 919 * In other words, map it into PT2MAP space. 920 */ 921 pt2pg_pa = pmap_preboot_get_pages(1); 922 pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa)); 923 924 /* Zero all PT2s in allocated page. */ 925 bzero((void*)pt2map_pt2pg(va), PAGE_SIZE); 926 pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE); 927 928 return (pt2pg_pa); 929} 930 931/* 932 * Setup L2 page table for given KVA. 933 * Used in pre-bootstrap epoch. 934 */ 935static void 936pmap_preboot_pt2_setup(vm_offset_t va) 937{ 938 pt1_entry_t *pte1p; 939 vm_paddr_t pt2pg_pa, pt2_pa; 940 941 /* Setup PT2's page. */ 942 pt2pg_pa = pmap_preboot_pt2pg_setup(va); 943 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va)); 944 945 /* Insert PT2 to PT1. */ 946 pte1p = kern_pte1(va); 947 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 948} 949 950/* 951 * Get L2 page entry associated with given KVA. 952 * Used in pre-bootstrap epoch. 953 */ 954static __inline pt2_entry_t* 955pmap_preboot_vtopte2(vm_offset_t va) 956{ 957 pt1_entry_t *pte1p; 958 959 /* Setup PT2 if needed. */ 960 pte1p = kern_pte1(va); 961 if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */ 962 pmap_preboot_pt2_setup(va); 963 964 return (pt2map_entry(va)); 965} 966 967/* 968 * Pre-bootstrap epoch page(s) mapping(s). 969 */ 970void 971pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num) 972{ 973 u_int i; 974 pt2_entry_t *pte2p; 975 976 /* Map all the pages. */ 977 for (i = 0; i < num; i++) { 978 pte2p = pmap_preboot_vtopte2(va); 979 pte2_store(pte2p, PTE2_KRW(pa)); 980 va += PAGE_SIZE; 981 pa += PAGE_SIZE; 982 } 983} 984 985/* 986 * Pre-bootstrap epoch virtual space alocator. 987 */ 988vm_offset_t 989pmap_preboot_reserve_pages(u_int num) 990{ 991 u_int i; 992 vm_offset_t start, va; 993 pt2_entry_t *pte2p; 994 995 /* Allocate virtual space. */ 996 start = va = virtual_avail; 997 virtual_avail += num * PAGE_SIZE; 998 999 /* Zero the mapping. */ 1000 for (i = 0; i < num; i++) { 1001 pte2p = pmap_preboot_vtopte2(va); 1002 pte2_store(pte2p, 0); 1003 va += PAGE_SIZE; 1004 } 1005 1006 return (start); 1007} 1008 1009/* 1010 * Pre-bootstrap epoch page(s) allocation and mapping(s). 1011 */ 1012vm_offset_t 1013pmap_preboot_get_vpages(u_int num) 1014{ 1015 vm_paddr_t pa; 1016 vm_offset_t va; 1017 1018 /* Allocate physical page(s). */ 1019 pa = pmap_preboot_get_pages(num); 1020 1021 /* Allocate virtual space. */ 1022 va = virtual_avail; 1023 virtual_avail += num * PAGE_SIZE; 1024 1025 /* Map and zero all. */ 1026 pmap_preboot_map_pages(pa, va, num); 1027 bzero((void *)va, num * PAGE_SIZE); 1028 1029 return (va); 1030} 1031 1032/* 1033 * Pre-bootstrap epoch page mapping(s) with attributes. 1034 */ 1035void 1036pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 1037 vm_prot_t prot, vm_memattr_t attr) 1038{ 1039 u_int num; 1040 u_int l1_attr, l1_prot, l2_prot, l2_attr; 1041 pt1_entry_t *pte1p; 1042 pt2_entry_t *pte2p; 1043 1044 l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR; 1045 l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1046 l2_attr = vm_memattr_to_pte2(attr); 1047 l1_prot = ATTR_TO_L1(l2_prot); 1048 l1_attr = ATTR_TO_L1(l2_attr); 1049 1050 /* Map all the pages. */ 1051 num = round_page(size); 1052 while (num > 0) { 1053 if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) { 1054 pte1p = kern_pte1(va); 1055 pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr)); 1056 va += PTE1_SIZE; 1057 pa += PTE1_SIZE; 1058 num -= PTE1_SIZE; 1059 } else { 1060 pte2p = pmap_preboot_vtopte2(va); 1061 pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr)); 1062 va += PAGE_SIZE; 1063 pa += PAGE_SIZE; 1064 num -= PAGE_SIZE; 1065 } 1066 } 1067} 1068 1069/* 1070 * Extract from the kernel page table the physical address 1071 * that is mapped by the given virtual address "va". 1072 */ 1073vm_paddr_t 1074pmap_kextract(vm_offset_t va) 1075{ 1076 vm_paddr_t pa; 1077 pt1_entry_t pte1; 1078 pt2_entry_t pte2; 1079 1080 pte1 = pte1_load(kern_pte1(va)); 1081 if (pte1_is_section(pte1)) { 1082 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1083 } else if (pte1_is_link(pte1)) { 1084 /* 1085 * We should beware of concurrent promotion that changes 1086 * pte1 at this point. However, it's not a problem as PT2 1087 * page is preserved by promotion in PT2TAB. So even if 1088 * it happens, using of PT2MAP is still safe. 1089 * 1090 * QQQ: However, concurrent removing is a problem which 1091 * ends in abort on PT2MAP space. Locking must be used 1092 * to deal with this. 1093 */ 1094 pte2 = pte2_load(pt2map_entry(va)); 1095 pa = pte2_pa(pte2) | (va & PTE2_OFFSET); 1096 } 1097 else { 1098 panic("%s: va %#x pte1 %#x", __func__, va, pte1); 1099 } 1100 return (pa); 1101} 1102 1103/* 1104 * Extract from the kernel page table the physical address 1105 * that is mapped by the given virtual address "va". Also 1106 * return L2 page table entry which maps the address. 1107 * 1108 * This is only intended to be used for panic dumps. 1109 */ 1110vm_paddr_t 1111pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 1112{ 1113 vm_paddr_t pa; 1114 pt1_entry_t pte1; 1115 pt2_entry_t pte2; 1116 1117 pte1 = pte1_load(kern_pte1(va)); 1118 if (pte1_is_section(pte1)) { 1119 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1120 pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V; 1121 } else if (pte1_is_link(pte1)) { 1122 pte2 = pte2_load(pt2map_entry(va)); 1123 pa = pte2_pa(pte2); 1124 } else { 1125 pte2 = 0; 1126 pa = 0; 1127 } 1128 if (pte2p != NULL) 1129 *pte2p = pte2; 1130 return (pa); 1131} 1132 1133/***************************************************************************** 1134 * 1135 * PMAP second stage initialization and utility functions 1136 * for bootstrap epoch. 1137 * 1138 * After pmap_bootstrap() is called, the following functions for 1139 * mappings can be used: 1140 * 1141 * void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 1142 * void pmap_kremove(vm_offset_t va); 1143 * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1144 * int prot); 1145 * 1146 * NOTE: This is not SMP coherent stage. And physical page allocation is not 1147 * allowed during this stage. 1148 * 1149 *****************************************************************************/ 1150 1151/* 1152 * Initialize kernel PMAP locks and lists, kernel_pmap itself, and 1153 * reserve various virtual spaces for temporary mappings. 1154 */ 1155void 1156pmap_bootstrap(vm_offset_t firstaddr) 1157{ 1158 pt2_entry_t *unused __unused; 1159 struct pcpu *pc; 1160 1161 /* 1162 * Initialize the kernel pmap (which is statically allocated). 1163 */ 1164 PMAP_LOCK_INIT(kernel_pmap); 1165 kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ 1166 kernel_pmap->pm_pt1 = kern_pt1; 1167 kernel_pmap->pm_pt2tab = kern_pt2tab; 1168 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 1169 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 1170 1171 /* 1172 * Initialize the global pv list lock. 1173 */ 1174 rw_init(&pvh_global_lock, "pmap pv global"); 1175 1176 LIST_INIT(&allpmaps); 1177 1178 /* 1179 * Request a spin mutex so that changes to allpmaps cannot be 1180 * preempted by smp_rendezvous_cpus(). 1181 */ 1182 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 1183 mtx_lock_spin(&allpmaps_lock); 1184 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 1185 mtx_unlock_spin(&allpmaps_lock); 1186 1187 /* 1188 * Reserve some special page table entries/VA space for temporary 1189 * mapping of pages. 1190 */ 1191#define SYSMAP(c, p, v, n) do { \ 1192 v = (c)pmap_preboot_reserve_pages(n); \ 1193 p = pt2map_entry((vm_offset_t)v); \ 1194 } while (0) 1195 1196 /* 1197 * Local CMAP1/CMAP2 are used for zeroing and copying pages. 1198 * Local CMAP2 is also used for data cache cleaning. 1199 * Global CMAP3 is used for the idle process page zeroing. 1200 */ 1201 pc = get_pcpu(); 1202 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1203 SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); 1204 SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); 1205 SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); 1206 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 1207 1208 /* 1209 * Crashdump maps. 1210 */ 1211 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); 1212 1213 /* 1214 * _tmppt is used for reading arbitrary physical pages via /dev/mem. 1215 */ 1216 SYSMAP(caddr_t, unused, _tmppt, 1); 1217 1218 /* 1219 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(), 1220 * respectively. PADDR3 is used by pmap_pte2_ddb(). 1221 */ 1222 SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1); 1223 SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1); 1224#ifdef DDB 1225 SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1); 1226#endif 1227 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 1228 1229 /* 1230 * Note that in very short time in initarm(), we are going to 1231 * initialize phys_avail[] array and no further page allocation 1232 * can happen after that until vm subsystem will be initialized. 1233 */ 1234 kernel_vm_end_new = kernel_vm_end; 1235 virtual_end = vm_max_kernel_address; 1236} 1237 1238static void 1239pmap_init_reserved_pages(void) 1240{ 1241 struct pcpu *pc; 1242 vm_offset_t pages; 1243 int i; 1244 1245 CPU_FOREACH(i) { 1246 pc = pcpu_find(i); 1247 /* 1248 * Skip if the mapping has already been initialized, 1249 * i.e. this is the BSP. 1250 */ 1251 if (pc->pc_cmap1_addr != 0) 1252 continue; 1253 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1254 pages = kva_alloc(PAGE_SIZE * 3); 1255 if (pages == 0) 1256 panic("%s: unable to allocate KVA", __func__); 1257 pc->pc_cmap1_pte2p = pt2map_entry(pages); 1258 pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); 1259 pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2)); 1260 pc->pc_cmap1_addr = (caddr_t)pages; 1261 pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); 1262 pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); 1263 } 1264} 1265SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 1266 1267/* 1268 * The function can already be use in second initialization stage. 1269 * As such, the function DOES NOT call pmap_growkernel() where PT2 1270 * allocation can happen. So if used, be sure that PT2 for given 1271 * virtual address is allocated already! 1272 * 1273 * Add a wired page to the kva. 1274 * Note: not SMP coherent. 1275 */ 1276static __inline void 1277pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot, 1278 uint32_t attr) 1279{ 1280 pt1_entry_t *pte1p; 1281 pt2_entry_t *pte2p; 1282 1283 pte1p = kern_pte1(va); 1284 if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */ 1285 /* 1286 * This is a very low level function, so PT2 and particularly 1287 * PT2PG associated with given virtual address must be already 1288 * allocated. It's a pain mainly during pmap initialization 1289 * stage. However, called after pmap initialization with 1290 * virtual address not under kernel_vm_end will lead to 1291 * the same misery. 1292 */ 1293 if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va)))) 1294 panic("%s: kernel PT2 not allocated!", __func__); 1295 } 1296 1297 pte2p = pt2map_entry(va); 1298 pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); 1299} 1300 1301PMAP_INLINE void 1302pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1303{ 1304 1305 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT); 1306} 1307 1308/* 1309 * Remove a page from the kernel pagetables. 1310 * Note: not SMP coherent. 1311 */ 1312PMAP_INLINE void 1313pmap_kremove(vm_offset_t va) 1314{ 1315 pt1_entry_t *pte1p; 1316 pt2_entry_t *pte2p; 1317 1318 pte1p = kern_pte1(va); 1319 if (pte1_is_section(pte1_load(pte1p))) { 1320 pte1_clear(pte1p); 1321 } else { 1322 pte2p = pt2map_entry(va); 1323 pte2_clear(pte2p); 1324 } 1325} 1326 1327/* 1328 * Share new kernel PT2PG with all pmaps. 1329 * The caller is responsible for maintaining TLB consistency. 1330 */ 1331static void 1332pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2) 1333{ 1334 pmap_t pmap; 1335 pt2_entry_t *pte2p; 1336 1337 mtx_lock_spin(&allpmaps_lock); 1338 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1339 pte2p = pmap_pt2tab_entry(pmap, va); 1340 pt2tab_store(pte2p, npte2); 1341 } 1342 mtx_unlock_spin(&allpmaps_lock); 1343} 1344 1345/* 1346 * Share new kernel PTE1 with all pmaps. 1347 * The caller is responsible for maintaining TLB consistency. 1348 */ 1349static void 1350pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1) 1351{ 1352 pmap_t pmap; 1353 pt1_entry_t *pte1p; 1354 1355 mtx_lock_spin(&allpmaps_lock); 1356 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1357 pte1p = pmap_pte1(pmap, va); 1358 pte1_store(pte1p, npte1); 1359 } 1360 mtx_unlock_spin(&allpmaps_lock); 1361} 1362 1363/* 1364 * Used to map a range of physical addresses into kernel 1365 * virtual address space. 1366 * 1367 * The value passed in '*virt' is a suggested virtual address for 1368 * the mapping. Architectures which can support a direct-mapped 1369 * physical to virtual region can return the appropriate address 1370 * within that region, leaving '*virt' unchanged. Other 1371 * architectures should map the pages starting at '*virt' and 1372 * update '*virt' with the first usable address after the mapped 1373 * region. 1374 * 1375 * NOTE: Read the comments above pmap_kenter_prot_attr() as 1376 * the function is used herein! 1377 */ 1378vm_offset_t 1379pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1380{ 1381 vm_offset_t va, sva; 1382 vm_paddr_t pte1_offset; 1383 pt1_entry_t npte1; 1384 uint32_t l1prot, l2prot; 1385 uint32_t l1attr, l2attr; 1386 1387 PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x)," 1388 " prot = %d\n", __func__, *virt, start, end, end - start, prot)); 1389 1390 l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR; 1391 l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1392 l1prot = ATTR_TO_L1(l2prot); 1393 1394 l2attr = PTE2_ATTR_DEFAULT; 1395 l1attr = ATTR_TO_L1(l2attr); 1396 1397 va = *virt; 1398 /* 1399 * Does the physical address range's size and alignment permit at 1400 * least one section mapping to be created? 1401 */ 1402 pte1_offset = start & PTE1_OFFSET; 1403 if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >= 1404 PTE1_SIZE) { 1405 /* 1406 * Increase the starting virtual address so that its alignment 1407 * does not preclude the use of section mappings. 1408 */ 1409 if ((va & PTE1_OFFSET) < pte1_offset) 1410 va = pte1_trunc(va) + pte1_offset; 1411 else if ((va & PTE1_OFFSET) > pte1_offset) 1412 va = pte1_roundup(va) + pte1_offset; 1413 } 1414 sva = va; 1415 while (start < end) { 1416 if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) { 1417 KASSERT((va & PTE1_OFFSET) == 0, 1418 ("%s: misaligned va %#x", __func__, va)); 1419 npte1 = PTE1_KERN(start, l1prot, l1attr); 1420 pmap_kenter_pte1(va, npte1); 1421 va += PTE1_SIZE; 1422 start += PTE1_SIZE; 1423 } else { 1424 pmap_kenter_prot_attr(va, start, l2prot, l2attr); 1425 va += PAGE_SIZE; 1426 start += PAGE_SIZE; 1427 } 1428 } 1429 tlb_flush_range(sva, va - sva); 1430 *virt = va; 1431 return (sva); 1432} 1433 1434/* 1435 * Make a temporary mapping for a physical address. 1436 * This is only intended to be used for panic dumps. 1437 */ 1438void * 1439pmap_kenter_temporary(vm_paddr_t pa, int i) 1440{ 1441 vm_offset_t va; 1442 1443 /* QQQ: 'i' should be less or equal to MAXDUMPPGS. */ 1444 1445 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 1446 pmap_kenter(va, pa); 1447 tlb_flush_local(va); 1448 return ((void *)crashdumpmap); 1449} 1450 1451 1452/************************************* 1453 * 1454 * TLB & cache maintenance routines. 1455 * 1456 *************************************/ 1457 1458/* 1459 * We inline these within pmap.c for speed. 1460 */ 1461PMAP_INLINE void 1462pmap_tlb_flush(pmap_t pmap, vm_offset_t va) 1463{ 1464 1465 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1466 tlb_flush(va); 1467} 1468 1469PMAP_INLINE void 1470pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size) 1471{ 1472 1473 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1474 tlb_flush_range(sva, size); 1475} 1476 1477/* 1478 * Abuse the pte2 nodes for unmapped kva to thread a kva freelist through. 1479 * Requirements: 1480 * - Must deal with pages in order to ensure that none of the PTE2_* bits 1481 * are ever set, PTE2_V in particular. 1482 * - Assumes we can write to pte2s without pte2_store() atomic ops. 1483 * - Assumes nothing will ever test these addresses for 0 to indicate 1484 * no mapping instead of correctly checking PTE2_V. 1485 * - Assumes a vm_offset_t will fit in a pte2 (true for arm). 1486 * Because PTE2_V is never set, there can be no mappings to invalidate. 1487 */ 1488static vm_offset_t 1489pmap_pte2list_alloc(vm_offset_t *head) 1490{ 1491 pt2_entry_t *pte2p; 1492 vm_offset_t va; 1493 1494 va = *head; 1495 if (va == 0) 1496 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 1497 pte2p = pt2map_entry(va); 1498 *head = *pte2p; 1499 if (*head & PTE2_V) 1500 panic("%s: va with PTE2_V set!", __func__); 1501 *pte2p = 0; 1502 return (va); 1503} 1504 1505static void 1506pmap_pte2list_free(vm_offset_t *head, vm_offset_t va) 1507{ 1508 pt2_entry_t *pte2p; 1509 1510 if (va & PTE2_V) 1511 panic("%s: freeing va with PTE2_V set!", __func__); 1512 pte2p = pt2map_entry(va); 1513 *pte2p = *head; /* virtual! PTE2_V is 0 though */ 1514 *head = va; 1515} 1516 1517static void 1518pmap_pte2list_init(vm_offset_t *head, void *base, int npages) 1519{ 1520 int i; 1521 vm_offset_t va; 1522 1523 *head = 0; 1524 for (i = npages - 1; i >= 0; i--) { 1525 va = (vm_offset_t)base + i * PAGE_SIZE; 1526 pmap_pte2list_free(head, va); 1527 } 1528} 1529 1530/***************************************************************************** 1531 * 1532 * PMAP third and final stage initialization. 1533 * 1534 * After pmap_init() is called, PMAP subsystem is fully initialized. 1535 * 1536 *****************************************************************************/ 1537 1538SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 1539 1540SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 1541 "Max number of PV entries"); 1542SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 1543 "Page share factor per proc"); 1544 1545static u_long nkpt2pg = NKPT2PG; 1546SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD, 1547 &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s"); 1548 1549static int sp_enabled = 1; 1550SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 1551 &sp_enabled, 0, "Are large page mappings enabled?"); 1552 1553static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD, 0, 1554 "1MB page mapping counters"); 1555 1556static u_long pmap_pte1_demotions; 1557SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD, 1558 &pmap_pte1_demotions, 0, "1MB page demotions"); 1559 1560static u_long pmap_pte1_mappings; 1561SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD, 1562 &pmap_pte1_mappings, 0, "1MB page mappings"); 1563 1564static u_long pmap_pte1_p_failures; 1565SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD, 1566 &pmap_pte1_p_failures, 0, "1MB page promotion failures"); 1567 1568static u_long pmap_pte1_promotions; 1569SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD, 1570 &pmap_pte1_promotions, 0, "1MB page promotions"); 1571 1572static u_long pmap_pte1_kern_demotions; 1573SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD, 1574 &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions"); 1575 1576static u_long pmap_pte1_kern_promotions; 1577SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD, 1578 &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions"); 1579 1580static __inline ttb_entry_t 1581pmap_ttb_get(pmap_t pmap) 1582{ 1583 1584 return (vtophys(pmap->pm_pt1) | ttb_flags); 1585} 1586 1587/* 1588 * Initialize a vm_page's machine-dependent fields. 1589 * 1590 * Variations: 1591 * 1. Pages for L2 page tables are always not managed. So, pv_list and 1592 * pt2_wirecount can share same physical space. However, proper 1593 * initialization on a page alloc for page tables and reinitialization 1594 * on the page free must be ensured. 1595 */ 1596void 1597pmap_page_init(vm_page_t m) 1598{ 1599 1600 TAILQ_INIT(&m->md.pv_list); 1601 pt2_wirecount_init(m); 1602 m->md.pat_mode = VM_MEMATTR_DEFAULT; 1603} 1604 1605/* 1606 * Virtualization for faster way how to zero whole page. 1607 */ 1608static __inline void 1609pagezero(void *page) 1610{ 1611 1612 bzero(page, PAGE_SIZE); 1613} 1614 1615/* 1616 * Zero L2 page table page. 1617 * Use same KVA as in pmap_zero_page(). 1618 */ 1619static __inline vm_paddr_t 1620pmap_pt2pg_zero(vm_page_t m) 1621{ 1622 pt2_entry_t *cmap2_pte2p; 1623 vm_paddr_t pa; 1624 struct pcpu *pc; 1625 1626 pa = VM_PAGE_TO_PHYS(m); 1627 1628 /* 1629 * XXX: For now, we map whole page even if it's already zero, 1630 * to sync it even if the sync is only DSB. 1631 */ 1632 sched_pin(); 1633 pc = get_pcpu(); 1634 cmap2_pte2p = pc->pc_cmap2_pte2p; 1635 mtx_lock(&pc->pc_cmap_lock); 1636 if (pte2_load(cmap2_pte2p) != 0) 1637 panic("%s: CMAP2 busy", __func__); 1638 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 1639 vm_page_pte2_attr(m))); 1640 /* Even VM_ALLOC_ZERO request is only advisory. */ 1641 if ((m->flags & PG_ZERO) == 0) 1642 pagezero(pc->pc_cmap2_addr); 1643 pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE); 1644 pte2_clear(cmap2_pte2p); 1645 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 1646 1647 /* 1648 * Unpin the thread before releasing the lock. Otherwise the thread 1649 * could be rescheduled while still bound to the current CPU, only 1650 * to unpin itself immediately upon resuming execution. 1651 */ 1652 sched_unpin(); 1653 mtx_unlock(&pc->pc_cmap_lock); 1654 1655 return (pa); 1656} 1657 1658/* 1659 * Init just allocated page as L2 page table(s) holder 1660 * and return its physical address. 1661 */ 1662static __inline vm_paddr_t 1663pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m) 1664{ 1665 vm_paddr_t pa; 1666 pt2_entry_t *pte2p; 1667 1668 /* Check page attributes. */ 1669 if (m->md.pat_mode != pt_memattr) 1670 pmap_page_set_memattr(m, pt_memattr); 1671 1672 /* Zero page and init wire counts. */ 1673 pa = pmap_pt2pg_zero(m); 1674 pt2_wirecount_init(m); 1675 1676 /* 1677 * Map page to PT2MAP address space for given pmap. 1678 * Note that PT2MAP space is shared with all pmaps. 1679 */ 1680 if (pmap == kernel_pmap) 1681 pmap_kenter_pt2tab(va, PTE2_KPT(pa)); 1682 else { 1683 pte2p = pmap_pt2tab_entry(pmap, va); 1684 pt2tab_store(pte2p, PTE2_KPT_NG(pa)); 1685 } 1686 1687 return (pa); 1688} 1689 1690/* 1691 * Initialize the pmap module. 1692 * Called by vm_init, to initialize any structures that the pmap 1693 * system needs to map virtual memory. 1694 */ 1695void 1696pmap_init(void) 1697{ 1698 vm_size_t s; 1699 pt2_entry_t *pte2p, pte2; 1700 u_int i, pte1_idx, pv_npg; 1701 1702 PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR)); 1703 1704 /* 1705 * Initialize the vm page array entries for kernel pmap's 1706 * L2 page table pages allocated in advance. 1707 */ 1708 pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE); 1709 pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE); 1710 for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) { 1711 vm_paddr_t pa; 1712 vm_page_t m; 1713 1714 pte2 = pte2_load(pte2p); 1715 KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__)); 1716 1717 pa = pte2_pa(pte2); 1718 m = PHYS_TO_VM_PAGE(pa); 1719 KASSERT(m >= vm_page_array && 1720 m < &vm_page_array[vm_page_array_size], 1721 ("%s: L2 page table page is out of range", __func__)); 1722 1723 m->pindex = pte1_idx; 1724 m->phys_addr = pa; 1725 pte1_idx += NPT2_IN_PG; 1726 } 1727 1728 /* 1729 * Initialize the address space (zone) for the pv entries. Set a 1730 * high water mark so that the system can recover from excessive 1731 * numbers of pv entries. 1732 */ 1733 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1734 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1735 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1736 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1737 pv_entry_high_water = 9 * (pv_entry_max / 10); 1738 1739 /* 1740 * Are large page mappings enabled? 1741 */ 1742 TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled); 1743 if (sp_enabled) { 1744 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1745 ("%s: can't assign to pagesizes[1]", __func__)); 1746 pagesizes[1] = PTE1_SIZE; 1747 } 1748 1749 /* 1750 * Calculate the size of the pv head table for sections. 1751 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1752 * Note that the table is only for sections which could be promoted. 1753 */ 1754 first_managed_pa = pte1_trunc(vm_phys_segs[0].start); 1755 pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE) 1756 - first_managed_pa) / PTE1_SIZE + 1; 1757 1758 /* 1759 * Allocate memory for the pv head table for sections. 1760 */ 1761 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1762 s = round_page(s); 1763 pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, 1764 M_WAITOK | M_ZERO); 1765 for (i = 0; i < pv_npg; i++) 1766 TAILQ_INIT(&pv_table[i].pv_list); 1767 1768 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1769 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1770 if (pv_chunkbase == NULL) 1771 panic("%s: not enough kvm for pv chunks", __func__); 1772 pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1773} 1774 1775/* 1776 * Add a list of wired pages to the kva 1777 * this routine is only used for temporary 1778 * kernel mappings that do not need to have 1779 * page modification or references recorded. 1780 * Note that old mappings are simply written 1781 * over. The page *must* be wired. 1782 * Note: SMP coherent. Uses a ranged shootdown IPI. 1783 */ 1784void 1785pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1786{ 1787 u_int anychanged; 1788 pt2_entry_t *epte2p, *pte2p, pte2; 1789 vm_page_t m; 1790 vm_paddr_t pa; 1791 1792 anychanged = 0; 1793 pte2p = pt2map_entry(sva); 1794 epte2p = pte2p + count; 1795 while (pte2p < epte2p) { 1796 m = *ma++; 1797 pa = VM_PAGE_TO_PHYS(m); 1798 pte2 = pte2_load(pte2p); 1799 if ((pte2_pa(pte2) != pa) || 1800 (pte2_attr(pte2) != vm_page_pte2_attr(m))) { 1801 anychanged++; 1802 pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW, 1803 vm_page_pte2_attr(m))); 1804 } 1805 pte2p++; 1806 } 1807 if (__predict_false(anychanged)) 1808 tlb_flush_range(sva, count * PAGE_SIZE); 1809} 1810 1811/* 1812 * This routine tears out page mappings from the 1813 * kernel -- it is meant only for temporary mappings. 1814 * Note: SMP coherent. Uses a ranged shootdown IPI. 1815 */ 1816void 1817pmap_qremove(vm_offset_t sva, int count) 1818{ 1819 vm_offset_t va; 1820 1821 va = sva; 1822 while (count-- > 0) { 1823 pmap_kremove(va); 1824 va += PAGE_SIZE; 1825 } 1826 tlb_flush_range(sva, va - sva); 1827} 1828 1829/* 1830 * Are we current address space or kernel? 1831 */ 1832static __inline int 1833pmap_is_current(pmap_t pmap) 1834{ 1835 1836 return (pmap == kernel_pmap || 1837 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace))); 1838} 1839 1840/* 1841 * If the given pmap is not the current or kernel pmap, the returned 1842 * pte2 must be released by passing it to pmap_pte2_release(). 1843 */ 1844static pt2_entry_t * 1845pmap_pte2(pmap_t pmap, vm_offset_t va) 1846{ 1847 pt1_entry_t pte1; 1848 vm_paddr_t pt2pg_pa; 1849 1850 pte1 = pte1_load(pmap_pte1(pmap, va)); 1851 if (pte1_is_section(pte1)) 1852 panic("%s: attempt to map PTE1", __func__); 1853 if (pte1_is_link(pte1)) { 1854 /* Are we current address space or kernel? */ 1855 if (pmap_is_current(pmap)) 1856 return (pt2map_entry(va)); 1857 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1858 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1859 mtx_lock(&PMAP2mutex); 1860 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 1861 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 1862 tlb_flush((vm_offset_t)PADDR2); 1863 } 1864 return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1865 } 1866 return (NULL); 1867} 1868 1869/* 1870 * Releases a pte2 that was obtained from pmap_pte2(). 1871 * Be prepared for the pte2p being NULL. 1872 */ 1873static __inline void 1874pmap_pte2_release(pt2_entry_t *pte2p) 1875{ 1876 1877 if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) { 1878 mtx_unlock(&PMAP2mutex); 1879 } 1880} 1881 1882/* 1883 * Super fast pmap_pte2 routine best used when scanning 1884 * the pv lists. This eliminates many coarse-grained 1885 * invltlb calls. Note that many of the pv list 1886 * scans are across different pmaps. It is very wasteful 1887 * to do an entire tlb flush for checking a single mapping. 1888 * 1889 * If the given pmap is not the current pmap, pvh_global_lock 1890 * must be held and curthread pinned to a CPU. 1891 */ 1892static pt2_entry_t * 1893pmap_pte2_quick(pmap_t pmap, vm_offset_t va) 1894{ 1895 pt1_entry_t pte1; 1896 vm_paddr_t pt2pg_pa; 1897 1898 pte1 = pte1_load(pmap_pte1(pmap, va)); 1899 if (pte1_is_section(pte1)) 1900 panic("%s: attempt to map PTE1", __func__); 1901 if (pte1_is_link(pte1)) { 1902 /* Are we current address space or kernel? */ 1903 if (pmap_is_current(pmap)) 1904 return (pt2map_entry(va)); 1905 rw_assert(&pvh_global_lock, RA_WLOCKED); 1906 KASSERT(curthread->td_pinned > 0, 1907 ("%s: curthread not pinned", __func__)); 1908 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1909 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1910 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 1911 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 1912#ifdef SMP 1913 PMAP1cpu = PCPU_GET(cpuid); 1914#endif 1915 tlb_flush_local((vm_offset_t)PADDR1); 1916 PMAP1changed++; 1917 } else 1918#ifdef SMP 1919 if (PMAP1cpu != PCPU_GET(cpuid)) { 1920 PMAP1cpu = PCPU_GET(cpuid); 1921 tlb_flush_local((vm_offset_t)PADDR1); 1922 PMAP1changedcpu++; 1923 } else 1924#endif 1925 PMAP1unchanged++; 1926 return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1927 } 1928 return (NULL); 1929} 1930 1931/* 1932 * Routine: pmap_extract 1933 * Function: 1934 * Extract the physical page address associated 1935 * with the given map/virtual_address pair. 1936 */ 1937vm_paddr_t 1938pmap_extract(pmap_t pmap, vm_offset_t va) 1939{ 1940 vm_paddr_t pa; 1941 pt1_entry_t pte1; 1942 pt2_entry_t *pte2p; 1943 1944 PMAP_LOCK(pmap); 1945 pte1 = pte1_load(pmap_pte1(pmap, va)); 1946 if (pte1_is_section(pte1)) 1947 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1948 else if (pte1_is_link(pte1)) { 1949 pte2p = pmap_pte2(pmap, va); 1950 pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET); 1951 pmap_pte2_release(pte2p); 1952 } else 1953 pa = 0; 1954 PMAP_UNLOCK(pmap); 1955 return (pa); 1956} 1957 1958/* 1959 * Routine: pmap_extract_and_hold 1960 * Function: 1961 * Atomically extract and hold the physical page 1962 * with the given pmap and virtual address pair 1963 * if that mapping permits the given protection. 1964 */ 1965vm_page_t 1966pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1967{ 1968 vm_paddr_t pa, lockpa; 1969 pt1_entry_t pte1; 1970 pt2_entry_t pte2, *pte2p; 1971 vm_page_t m; 1972 1973 lockpa = 0; 1974 m = NULL; 1975 PMAP_LOCK(pmap); 1976retry: 1977 pte1 = pte1_load(pmap_pte1(pmap, va)); 1978 if (pte1_is_section(pte1)) { 1979 if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) { 1980 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1981 if (vm_page_pa_tryrelock(pmap, pa, &lockpa)) 1982 goto retry; 1983 m = PHYS_TO_VM_PAGE(pa); 1984 vm_page_hold(m); 1985 } 1986 } else if (pte1_is_link(pte1)) { 1987 pte2p = pmap_pte2(pmap, va); 1988 pte2 = pte2_load(pte2p); 1989 pmap_pte2_release(pte2p); 1990 if (pte2_is_valid(pte2) && 1991 (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) { 1992 pa = pte2_pa(pte2); 1993 if (vm_page_pa_tryrelock(pmap, pa, &lockpa)) 1994 goto retry; 1995 m = PHYS_TO_VM_PAGE(pa); 1996 vm_page_hold(m); 1997 } 1998 } 1999 PA_UNLOCK_COND(lockpa); 2000 PMAP_UNLOCK(pmap); 2001 return (m); 2002} 2003 2004/* 2005 * Grow the number of kernel L2 page table entries, if needed. 2006 */ 2007void 2008pmap_growkernel(vm_offset_t addr) 2009{ 2010 vm_page_t m; 2011 vm_paddr_t pt2pg_pa, pt2_pa; 2012 pt1_entry_t pte1; 2013 pt2_entry_t pte2; 2014 2015 PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr)); 2016 /* 2017 * All the time kernel_vm_end is first KVA for which underlying 2018 * L2 page table is either not allocated or linked from L1 page table 2019 * (not considering sections). Except for two possible cases: 2020 * 2021 * (1) in the very beginning as long as pmap_growkernel() was 2022 * not called, it could be first unused KVA (which is not 2023 * rounded up to PTE1_SIZE), 2024 * 2025 * (2) when all KVA space is mapped and kernel_map->max_offset 2026 * address is not rounded up to PTE1_SIZE. (For example, 2027 * it could be 0xFFFFFFFF.) 2028 */ 2029 kernel_vm_end = pte1_roundup(kernel_vm_end); 2030 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 2031 addr = roundup2(addr, PTE1_SIZE); 2032 if (addr - 1 >= kernel_map->max_offset) 2033 addr = kernel_map->max_offset; 2034 while (kernel_vm_end < addr) { 2035 pte1 = pte1_load(kern_pte1(kernel_vm_end)); 2036 if (pte1_is_valid(pte1)) { 2037 kernel_vm_end += PTE1_SIZE; 2038 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 2039 kernel_vm_end = kernel_map->max_offset; 2040 break; 2041 } 2042 continue; 2043 } 2044 2045 /* 2046 * kernel_vm_end_new is used in pmap_pinit() when kernel 2047 * mappings are entered to new pmap all at once to avoid race 2048 * between pmap_kenter_pte1() and kernel_vm_end increase. 2049 * The same aplies to pmap_kenter_pt2tab(). 2050 */ 2051 kernel_vm_end_new = kernel_vm_end + PTE1_SIZE; 2052 2053 pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end)); 2054 if (!pte2_is_valid(pte2)) { 2055 /* 2056 * Install new PT2s page into kernel PT2TAB. 2057 */ 2058 m = vm_page_alloc(NULL, 2059 pte1_index(kernel_vm_end) & ~PT2PG_MASK, 2060 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 2061 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2062 if (m == NULL) 2063 panic("%s: no memory to grow kernel", __func__); 2064 /* 2065 * QQQ: To link all new L2 page tables from L1 page 2066 * table now and so pmap_kenter_pte1() them 2067 * at once together with pmap_kenter_pt2tab() 2068 * could be nice speed up. However, 2069 * pmap_growkernel() does not happen so often... 2070 * QQQ: The other TTBR is another option. 2071 */ 2072 pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end, 2073 m); 2074 } else 2075 pt2pg_pa = pte2_pa(pte2); 2076 2077 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end)); 2078 pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa)); 2079 2080 kernel_vm_end = kernel_vm_end_new; 2081 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 2082 kernel_vm_end = kernel_map->max_offset; 2083 break; 2084 } 2085 } 2086} 2087 2088static int 2089kvm_size(SYSCTL_HANDLER_ARGS) 2090{ 2091 unsigned long ksize = vm_max_kernel_address - KERNBASE; 2092 2093 return (sysctl_handle_long(oidp, &ksize, 0, req)); 2094} 2095SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 2096 0, 0, kvm_size, "IU", "Size of KVM"); 2097 2098static int 2099kvm_free(SYSCTL_HANDLER_ARGS) 2100{ 2101 unsigned long kfree = vm_max_kernel_address - kernel_vm_end; 2102 2103 return (sysctl_handle_long(oidp, &kfree, 0, req)); 2104} 2105SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 2106 0, 0, kvm_free, "IU", "Amount of KVM free"); 2107 2108/*********************************************** 2109 * 2110 * Pmap allocation/deallocation routines. 2111 * 2112 ***********************************************/ 2113 2114/* 2115 * Initialize the pmap for the swapper process. 2116 */ 2117void 2118pmap_pinit0(pmap_t pmap) 2119{ 2120 PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap)); 2121 2122 PMAP_LOCK_INIT(pmap); 2123 2124 /* 2125 * Kernel page table directory and pmap stuff around is already 2126 * initialized, we are using it right now and here. So, finish 2127 * only PMAP structures initialization for process0 ... 2128 * 2129 * Since the L1 page table and PT2TAB is shared with the kernel pmap, 2130 * which is already included in the list "allpmaps", this pmap does 2131 * not need to be inserted into that list. 2132 */ 2133 pmap->pm_pt1 = kern_pt1; 2134 pmap->pm_pt2tab = kern_pt2tab; 2135 CPU_ZERO(&pmap->pm_active); 2136 PCPU_SET(curpmap, pmap); 2137 TAILQ_INIT(&pmap->pm_pvchunk); 2138 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2139 CPU_SET(0, &pmap->pm_active); 2140} 2141 2142static __inline void 2143pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva, 2144 vm_offset_t eva) 2145{ 2146 u_int idx, count; 2147 2148 idx = pte1_index(sva); 2149 count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t); 2150 bcopy(spte1p + idx, dpte1p + idx, count); 2151} 2152 2153static __inline void 2154pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva, 2155 vm_offset_t eva) 2156{ 2157 u_int idx, count; 2158 2159 idx = pt2tab_index(sva); 2160 count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t); 2161 bcopy(spte2p + idx, dpte2p + idx, count); 2162} 2163 2164/* 2165 * Initialize a preallocated and zeroed pmap structure, 2166 * such as one in a vmspace structure. 2167 */ 2168int 2169pmap_pinit(pmap_t pmap) 2170{ 2171 pt1_entry_t *pte1p; 2172 pt2_entry_t *pte2p; 2173 vm_paddr_t pa, pt2tab_pa; 2174 u_int i; 2175 2176 PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap, 2177 pmap->pm_pt1)); 2178 2179 /* 2180 * No need to allocate L2 page table space yet but we do need 2181 * a valid L1 page table and PT2TAB table. 2182 * 2183 * Install shared kernel mappings to these tables. It's a little 2184 * tricky as some parts of KVA are reserved for vectors, devices, 2185 * and whatever else. These parts are supposed to be above 2186 * vm_max_kernel_address. Thus two regions should be installed: 2187 * 2188 * (1) <KERNBASE, kernel_vm_end), 2189 * (2) <vm_max_kernel_address, 0xFFFFFFFF>. 2190 * 2191 * QQQ: The second region should be stable enough to be installed 2192 * only once in time when the tables are allocated. 2193 * QQQ: Maybe copy of both regions at once could be faster ... 2194 * QQQ: Maybe the other TTBR is an option. 2195 * 2196 * Finally, install own PT2TAB table to these tables. 2197 */ 2198 2199 if (pmap->pm_pt1 == NULL) { 2200 pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(kernel_arena, 2201 NB_IN_PT1, M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, 2202 pt_memattr); 2203 if (pmap->pm_pt1 == NULL) 2204 return (0); 2205 } 2206 if (pmap->pm_pt2tab == NULL) { 2207 /* 2208 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page 2209 * only, what should be the only size for 32 bit systems, 2210 * then we could allocate it with vm_page_alloc() and all 2211 * the stuff needed as other L2 page table pages. 2212 * (2) Note that a process PT2TAB is special L2 page table 2213 * page. Its mapping in kernel_arena is permanent and can 2214 * be used no matter which process is current. Its mapping 2215 * in PT2MAP can be used only for current process. 2216 */ 2217 pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(kernel_arena, 2218 NB_IN_PT2TAB, M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr); 2219 if (pmap->pm_pt2tab == NULL) { 2220 /* 2221 * QQQ: As struct pmap is allocated from UMA with 2222 * UMA_ZONE_NOFREE flag, it's important to leave 2223 * no allocation in pmap if initialization failed. 2224 */ 2225 kmem_free(kernel_arena, (vm_offset_t)pmap->pm_pt1, 2226 NB_IN_PT1); 2227 pmap->pm_pt1 = NULL; 2228 return (0); 2229 } 2230 /* 2231 * QQQ: Each L2 page table page vm_page_t has pindex set to 2232 * pte1 index of virtual address mapped by this page. 2233 * It's not valid for non kernel PT2TABs themselves. 2234 * The pindex of these pages can not be altered because 2235 * of the way how they are allocated now. However, it 2236 * should not be a problem. 2237 */ 2238 } 2239 2240 mtx_lock_spin(&allpmaps_lock); 2241 /* 2242 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(), 2243 * kernel_vm_end_new is used here instead of kernel_vm_end. 2244 */ 2245 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE, 2246 kernel_vm_end_new - 1); 2247 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address, 2248 0xFFFFFFFF); 2249 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE, 2250 kernel_vm_end_new - 1); 2251 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address, 2252 0xFFFFFFFF); 2253 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 2254 mtx_unlock_spin(&allpmaps_lock); 2255 2256 /* 2257 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself. 2258 * I.e. self reference mapping. The PT2TAB is private, however mapped 2259 * into shared PT2MAP space, so the mapping should be not global. 2260 */ 2261 pt2tab_pa = vtophys(pmap->pm_pt2tab); 2262 pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP); 2263 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 2264 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 2265 } 2266 2267 /* Insert PT2MAP PT2s into pmap PT1. */ 2268 pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP); 2269 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 2270 pte1_store(pte1p++, PTE1_LINK(pa)); 2271 } 2272 2273 /* 2274 * Now synchronize new mapping which was made above. 2275 */ 2276 pte1_sync_range(pmap->pm_pt1, NB_IN_PT1); 2277 pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB); 2278 2279 CPU_ZERO(&pmap->pm_active); 2280 TAILQ_INIT(&pmap->pm_pvchunk); 2281 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2282 2283 return (1); 2284} 2285 2286#ifdef INVARIANTS 2287static boolean_t 2288pt2tab_user_is_empty(pt2_entry_t *tab) 2289{ 2290 u_int i, end; 2291 2292 end = pt2tab_index(VM_MAXUSER_ADDRESS); 2293 for (i = 0; i < end; i++) 2294 if (tab[i] != 0) return (FALSE); 2295 return (TRUE); 2296} 2297#endif 2298/* 2299 * Release any resources held by the given physical map. 2300 * Called when a pmap initialized by pmap_pinit is being released. 2301 * Should only be called if the map contains no valid mappings. 2302 */ 2303void 2304pmap_release(pmap_t pmap) 2305{ 2306#ifdef INVARIANTS 2307 vm_offset_t start, end; 2308#endif 2309 KASSERT(pmap->pm_stats.resident_count == 0, 2310 ("%s: pmap resident count %ld != 0", __func__, 2311 pmap->pm_stats.resident_count)); 2312 KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab), 2313 ("%s: has allocated user PT2(s)", __func__)); 2314 KASSERT(CPU_EMPTY(&pmap->pm_active), 2315 ("%s: pmap %p is active on some CPU(s)", __func__, pmap)); 2316 2317 mtx_lock_spin(&allpmaps_lock); 2318 LIST_REMOVE(pmap, pm_list); 2319 mtx_unlock_spin(&allpmaps_lock); 2320 2321#ifdef INVARIANTS 2322 start = pte1_index(KERNBASE) * sizeof(pt1_entry_t); 2323 end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t); 2324 bzero((char *)pmap->pm_pt1 + start, end - start); 2325 2326 start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t); 2327 end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t); 2328 bzero((char *)pmap->pm_pt2tab + start, end - start); 2329#endif 2330 /* 2331 * We are leaving PT1 and PT2TAB allocated on released pmap, 2332 * so hopefully UMA vmspace_zone will always be inited with 2333 * UMA_ZONE_NOFREE flag. 2334 */ 2335} 2336 2337/********************************************************* 2338 * 2339 * L2 table pages and their pages management routines. 2340 * 2341 *********************************************************/ 2342 2343/* 2344 * Virtual interface for L2 page table wire counting. 2345 * 2346 * Each L2 page table in a page has own counter which counts a number of 2347 * valid mappings in a table. Global page counter counts mappings in all 2348 * tables in a page plus a single itself mapping in PT2TAB. 2349 * 2350 * During a promotion we leave the associated L2 page table counter 2351 * untouched, so the table (strictly speaking a page which holds it) 2352 * is never freed if promoted. 2353 * 2354 * If a page m->wire_count == 1 then no valid mappings exist in any L2 page 2355 * table in the page and the page itself is only mapped in PT2TAB. 2356 */ 2357 2358static __inline void 2359pt2_wirecount_init(vm_page_t m) 2360{ 2361 u_int i; 2362 2363 /* 2364 * Note: A page m is allocated with VM_ALLOC_WIRED flag and 2365 * m->wire_count should be already set correctly. 2366 * So, there is no need to set it again herein. 2367 */ 2368 for (i = 0; i < NPT2_IN_PG; i++) 2369 m->md.pt2_wirecount[i] = 0; 2370} 2371 2372static __inline void 2373pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx) 2374{ 2375 2376 /* 2377 * Note: A just modificated pte2 (i.e. already allocated) 2378 * is acquiring one extra reference which must be 2379 * explicitly cleared. It influences the KASSERTs herein. 2380 * All L2 page tables in a page always belong to the same 2381 * pmap, so we allow only one extra reference for the page. 2382 */ 2383 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1), 2384 ("%s: PT2 is overflowing ...", __func__)); 2385 KASSERT(m->wire_count <= (NPTE2_IN_PG + 1), 2386 ("%s: PT2PG is overflowing ...", __func__)); 2387 2388 m->wire_count++; 2389 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++; 2390} 2391 2392static __inline void 2393pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx) 2394{ 2395 2396 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0, 2397 ("%s: PT2 is underflowing ...", __func__)); 2398 KASSERT(m->wire_count > 1, 2399 ("%s: PT2PG is underflowing ...", __func__)); 2400 2401 m->wire_count--; 2402 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--; 2403} 2404 2405static __inline void 2406pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count) 2407{ 2408 2409 KASSERT(count <= NPTE2_IN_PT2, 2410 ("%s: invalid count %u", __func__, count)); 2411 KASSERT(m->wire_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK], 2412 ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->wire_count, 2413 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK])); 2414 2415 m->wire_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]; 2416 m->wire_count += count; 2417 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count; 2418 2419 KASSERT(m->wire_count <= (NPTE2_IN_PG + 1), 2420 ("%s: PT2PG is overflowed (%u) ...", __func__, m->wire_count)); 2421} 2422 2423static __inline uint32_t 2424pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx) 2425{ 2426 2427 return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]); 2428} 2429 2430static __inline boolean_t 2431pt2_is_empty(vm_page_t m, vm_offset_t va) 2432{ 2433 2434 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0); 2435} 2436 2437static __inline boolean_t 2438pt2_is_full(vm_page_t m, vm_offset_t va) 2439{ 2440 2441 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 2442 NPTE2_IN_PT2); 2443} 2444 2445static __inline boolean_t 2446pt2pg_is_empty(vm_page_t m) 2447{ 2448 2449 return (m->wire_count == 1); 2450} 2451 2452/* 2453 * This routine is called if the L2 page table 2454 * is not mapped correctly. 2455 */ 2456static vm_page_t 2457_pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2458{ 2459 uint32_t pte1_idx; 2460 pt1_entry_t *pte1p; 2461 pt2_entry_t pte2; 2462 vm_page_t m; 2463 vm_paddr_t pt2pg_pa, pt2_pa; 2464 2465 pte1_idx = pte1_index(va); 2466 pte1p = pmap->pm_pt1 + pte1_idx; 2467 2468 KASSERT(pte1_load(pte1p) == 0, 2469 ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx, 2470 pte1_load(pte1p))); 2471 2472 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va)); 2473 if (!pte2_is_valid(pte2)) { 2474 /* 2475 * Install new PT2s page into pmap PT2TAB. 2476 */ 2477 m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK, 2478 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2479 if (m == NULL) { 2480 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2481 PMAP_UNLOCK(pmap); 2482 rw_wunlock(&pvh_global_lock); 2483 VM_WAIT; 2484 rw_wlock(&pvh_global_lock); 2485 PMAP_LOCK(pmap); 2486 } 2487 2488 /* 2489 * Indicate the need to retry. While waiting, 2490 * the L2 page table page may have been allocated. 2491 */ 2492 return (NULL); 2493 } 2494 pmap->pm_stats.resident_count++; 2495 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 2496 } else { 2497 pt2pg_pa = pte2_pa(pte2); 2498 m = PHYS_TO_VM_PAGE(pt2pg_pa); 2499 } 2500 2501 pt2_wirecount_inc(m, pte1_idx); 2502 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 2503 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 2504 2505 return (m); 2506} 2507 2508static vm_page_t 2509pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2510{ 2511 u_int pte1_idx; 2512 pt1_entry_t *pte1p, pte1; 2513 vm_page_t m; 2514 2515 pte1_idx = pte1_index(va); 2516retry: 2517 pte1p = pmap->pm_pt1 + pte1_idx; 2518 pte1 = pte1_load(pte1p); 2519 2520 /* 2521 * This supports switching from a 1MB page to a 2522 * normal 4K page. 2523 */ 2524 if (pte1_is_section(pte1)) { 2525 (void)pmap_demote_pte1(pmap, pte1p, va); 2526 /* 2527 * Reload pte1 after demotion. 2528 * 2529 * Note: Demotion can even fail as either PT2 is not find for 2530 * the virtual address or PT2PG can not be allocated. 2531 */ 2532 pte1 = pte1_load(pte1p); 2533 } 2534 2535 /* 2536 * If the L2 page table page is mapped, we just increment the 2537 * hold count, and activate it. 2538 */ 2539 if (pte1_is_link(pte1)) { 2540 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2541 pt2_wirecount_inc(m, pte1_idx); 2542 } else { 2543 /* 2544 * Here if the PT2 isn't mapped, or if it has 2545 * been deallocated. 2546 */ 2547 m = _pmap_allocpte2(pmap, va, flags); 2548 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2549 goto retry; 2550 } 2551 2552 return (m); 2553} 2554 2555static __inline void 2556pmap_free_zero_pages(struct spglist *free) 2557{ 2558 vm_page_t m; 2559 2560 while ((m = SLIST_FIRST(free)) != NULL) { 2561 SLIST_REMOVE_HEAD(free, plinks.s.ss); 2562 /* Preserve the page's PG_ZERO setting. */ 2563 vm_page_free_toq(m); 2564 } 2565} 2566 2567/* 2568 * Schedule the specified unused L2 page table page to be freed. Specifically, 2569 * add the page to the specified list of pages that will be released to the 2570 * physical memory manager after the TLB has been updated. 2571 */ 2572static __inline void 2573pmap_add_delayed_free_list(vm_page_t m, struct spglist *free) 2574{ 2575 2576 /* 2577 * Put page on a list so that it is released after 2578 * *ALL* TLB shootdown is done 2579 */ 2580#ifdef PMAP_DEBUG 2581 pmap_zero_page_check(m); 2582#endif 2583 m->flags |= PG_ZERO; 2584 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 2585} 2586 2587/* 2588 * Unwire L2 page tables page. 2589 */ 2590static void 2591pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m) 2592{ 2593 pt1_entry_t *pte1p, opte1 __unused; 2594 pt2_entry_t *pte2p; 2595 uint32_t i; 2596 2597 KASSERT(pt2pg_is_empty(m), 2598 ("%s: pmap %p PT2PG %p wired", __func__, pmap, m)); 2599 2600 /* 2601 * Unmap all L2 page tables in the page from L1 page table. 2602 * 2603 * QQQ: Individual L2 page tables (except the last one) can be unmapped 2604 * earlier. However, we are doing that this way. 2605 */ 2606 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 2607 ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m)); 2608 pte1p = pmap->pm_pt1 + m->pindex; 2609 for (i = 0; i < NPT2_IN_PG; i++, pte1p++) { 2610 KASSERT(m->md.pt2_wirecount[i] == 0, 2611 ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m)); 2612 opte1 = pte1_load(pte1p); 2613 if (pte1_is_link(opte1)) { 2614 pte1_clear(pte1p); 2615 /* 2616 * Flush intermediate TLB cache. 2617 */ 2618 pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT); 2619 } 2620#ifdef INVARIANTS 2621 else 2622 KASSERT((opte1 == 0) || pte1_is_section(opte1), 2623 ("%s: pmap %p va %#x bad pte1 %x at %u", __func__, 2624 pmap, va, opte1, i)); 2625#endif 2626 } 2627 2628 /* 2629 * Unmap the page from PT2TAB. 2630 */ 2631 pte2p = pmap_pt2tab_entry(pmap, va); 2632 (void)pt2tab_load_clear(pte2p); 2633 pmap_tlb_flush(pmap, pt2map_pt2pg(va)); 2634 2635 m->wire_count = 0; 2636 pmap->pm_stats.resident_count--; 2637 2638 /* 2639 * This is a release store so that the ordinary store unmapping 2640 * the L2 page table page is globally performed before TLB shoot- 2641 * down is begun. 2642 */ 2643 atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); 2644} 2645 2646/* 2647 * Decrements a L2 page table page's wire count, which is used to record the 2648 * number of valid page table entries within the page. If the wire count 2649 * drops to zero, then the page table page is unmapped. Returns TRUE if the 2650 * page table page was unmapped and FALSE otherwise. 2651 */ 2652static __inline boolean_t 2653pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2654{ 2655 pt2_wirecount_dec(m, pte1_index(va)); 2656 if (pt2pg_is_empty(m)) { 2657 /* 2658 * QQQ: Wire count is zero, so whole page should be zero and 2659 * we can set PG_ZERO flag to it. 2660 * Note that when promotion is enabled, it takes some 2661 * more efforts. See pmap_unwire_pt2_all() below. 2662 */ 2663 pmap_unwire_pt2pg(pmap, va, m); 2664 pmap_add_delayed_free_list(m, free); 2665 return (TRUE); 2666 } else 2667 return (FALSE); 2668} 2669 2670/* 2671 * Drop a L2 page table page's wire count at once, which is used to record 2672 * the number of valid L2 page table entries within the page. If the wire 2673 * count drops to zero, then the L2 page table page is unmapped. 2674 */ 2675static __inline void 2676pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m, 2677 struct spglist *free) 2678{ 2679 u_int pte1_idx = pte1_index(va); 2680 2681 KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK), 2682 ("%s: PT2 page's pindex is wrong", __func__)); 2683 KASSERT(m->wire_count > pt2_wirecount_get(m, pte1_idx), 2684 ("%s: bad pt2 wire count %u > %u", __func__, m->wire_count, 2685 pt2_wirecount_get(m, pte1_idx))); 2686 2687 /* 2688 * It's possible that the L2 page table was never used. 2689 * It happened in case that a section was created without promotion. 2690 */ 2691 if (pt2_is_full(m, va)) { 2692 pt2_wirecount_set(m, pte1_idx, 0); 2693 2694 /* 2695 * QQQ: We clear L2 page table now, so when L2 page table page 2696 * is going to be freed, we can set it PG_ZERO flag ... 2697 * This function is called only on section mappings, so 2698 * hopefully it's not to big overload. 2699 * 2700 * XXX: If pmap is current, existing PT2MAP mapping could be 2701 * used for zeroing. 2702 */ 2703 pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2); 2704 } 2705#ifdef INVARIANTS 2706 else 2707 KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)", 2708 __func__, pt2_wirecount_get(m, pte1_idx))); 2709#endif 2710 if (pt2pg_is_empty(m)) { 2711 pmap_unwire_pt2pg(pmap, va, m); 2712 pmap_add_delayed_free_list(m, free); 2713 } 2714} 2715 2716/* 2717 * After removing a L2 page table entry, this routine is used to 2718 * conditionally free the page, and manage the hold/wire counts. 2719 */ 2720static boolean_t 2721pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free) 2722{ 2723 pt1_entry_t pte1; 2724 vm_page_t mpte; 2725 2726 if (va >= VM_MAXUSER_ADDRESS) 2727 return (FALSE); 2728 pte1 = pte1_load(pmap_pte1(pmap, va)); 2729 mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2730 return (pmap_unwire_pt2(pmap, va, mpte, free)); 2731} 2732 2733/************************************* 2734 * 2735 * Page management routines. 2736 * 2737 *************************************/ 2738 2739CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2740CTASSERT(_NPCM == 11); 2741CTASSERT(_NPCPV == 336); 2742 2743static __inline struct pv_chunk * 2744pv_to_chunk(pv_entry_t pv) 2745{ 2746 2747 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2748} 2749 2750#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2751 2752#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2753#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2754 2755static const uint32_t pc_freemask[_NPCM] = { 2756 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2757 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2758 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2759 PC_FREE0_9, PC_FREE10 2760}; 2761 2762SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2763 "Current number of pv entries"); 2764 2765#ifdef PV_STATS 2766static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2767 2768SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2769 "Current number of pv entry chunks"); 2770SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2771 "Current number of pv entry chunks allocated"); 2772SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2773 "Current number of pv entry chunks frees"); 2774SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 2775 0, "Number of times tried to get a chunk page but failed."); 2776 2777static long pv_entry_frees, pv_entry_allocs; 2778static int pv_entry_spare; 2779 2780SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2781 "Current number of pv entry frees"); 2782SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 2783 0, "Current number of pv entry allocs"); 2784SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2785 "Current number of spare pv entries"); 2786#endif 2787 2788/* 2789 * Is given page managed? 2790 */ 2791static __inline bool 2792is_managed(vm_paddr_t pa) 2793{ 2794 vm_page_t m; 2795 2796 m = PHYS_TO_VM_PAGE(pa); 2797 if (m == NULL) 2798 return (false); 2799 return ((m->oflags & VPO_UNMANAGED) == 0); 2800} 2801 2802static __inline bool 2803pte1_is_managed(pt1_entry_t pte1) 2804{ 2805 2806 return (is_managed(pte1_pa(pte1))); 2807} 2808 2809static __inline bool 2810pte2_is_managed(pt2_entry_t pte2) 2811{ 2812 2813 return (is_managed(pte2_pa(pte2))); 2814} 2815 2816/* 2817 * We are in a serious low memory condition. Resort to 2818 * drastic measures to free some pages so we can allocate 2819 * another pv entry chunk. 2820 */ 2821static vm_page_t 2822pmap_pv_reclaim(pmap_t locked_pmap) 2823{ 2824 struct pch newtail; 2825 struct pv_chunk *pc; 2826 struct md_page *pvh; 2827 pt1_entry_t *pte1p; 2828 pmap_t pmap; 2829 pt2_entry_t *pte2p, tpte2; 2830 pv_entry_t pv; 2831 vm_offset_t va; 2832 vm_page_t m, m_pc; 2833 struct spglist free; 2834 uint32_t inuse; 2835 int bit, field, freed; 2836 2837 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2838 pmap = NULL; 2839 m_pc = NULL; 2840 SLIST_INIT(&free); 2841 TAILQ_INIT(&newtail); 2842 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2843 SLIST_EMPTY(&free))) { 2844 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2845 if (pmap != pc->pc_pmap) { 2846 if (pmap != NULL) { 2847 if (pmap != locked_pmap) 2848 PMAP_UNLOCK(pmap); 2849 } 2850 pmap = pc->pc_pmap; 2851 /* Avoid deadlock and lock recursion. */ 2852 if (pmap > locked_pmap) 2853 PMAP_LOCK(pmap); 2854 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2855 pmap = NULL; 2856 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2857 continue; 2858 } 2859 } 2860 2861 /* 2862 * Destroy every non-wired, 4 KB page mapping in the chunk. 2863 */ 2864 freed = 0; 2865 for (field = 0; field < _NPCM; field++) { 2866 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2867 inuse != 0; inuse &= ~(1UL << bit)) { 2868 bit = ffs(inuse) - 1; 2869 pv = &pc->pc_pventry[field * 32 + bit]; 2870 va = pv->pv_va; 2871 pte1p = pmap_pte1(pmap, va); 2872 if (pte1_is_section(pte1_load(pte1p))) 2873 continue; 2874 pte2p = pmap_pte2(pmap, va); 2875 tpte2 = pte2_load(pte2p); 2876 if ((tpte2 & PTE2_W) == 0) 2877 tpte2 = pte2_load_clear(pte2p); 2878 pmap_pte2_release(pte2p); 2879 if ((tpte2 & PTE2_W) != 0) 2880 continue; 2881 KASSERT(tpte2 != 0, 2882 ("pmap_pv_reclaim: pmap %p va %#x zero pte", 2883 pmap, va)); 2884 pmap_tlb_flush(pmap, va); 2885 m = PHYS_TO_VM_PAGE(pte2_pa(tpte2)); 2886 if (pte2_is_dirty(tpte2)) 2887 vm_page_dirty(m); 2888 if ((tpte2 & PTE2_A) != 0) 2889 vm_page_aflag_set(m, PGA_REFERENCED); 2890 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2891 if (TAILQ_EMPTY(&m->md.pv_list) && 2892 (m->flags & PG_FICTITIOUS) == 0) { 2893 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2894 if (TAILQ_EMPTY(&pvh->pv_list)) { 2895 vm_page_aflag_clear(m, 2896 PGA_WRITEABLE); 2897 } 2898 } 2899 pc->pc_map[field] |= 1UL << bit; 2900 pmap_unuse_pt2(pmap, va, &free); 2901 freed++; 2902 } 2903 } 2904 if (freed == 0) { 2905 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2906 continue; 2907 } 2908 /* Every freed mapping is for a 4 KB page. */ 2909 pmap->pm_stats.resident_count -= freed; 2910 PV_STAT(pv_entry_frees += freed); 2911 PV_STAT(pv_entry_spare += freed); 2912 pv_entry_count -= freed; 2913 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2914 for (field = 0; field < _NPCM; field++) 2915 if (pc->pc_map[field] != pc_freemask[field]) { 2916 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2917 pc_list); 2918 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2919 2920 /* 2921 * One freed pv entry in locked_pmap is 2922 * sufficient. 2923 */ 2924 if (pmap == locked_pmap) 2925 goto out; 2926 break; 2927 } 2928 if (field == _NPCM) { 2929 PV_STAT(pv_entry_spare -= _NPCPV); 2930 PV_STAT(pc_chunk_count--); 2931 PV_STAT(pc_chunk_frees++); 2932 /* Entire chunk is free; return it. */ 2933 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2934 pmap_qremove((vm_offset_t)pc, 1); 2935 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2936 break; 2937 } 2938 } 2939out: 2940 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2941 if (pmap != NULL) { 2942 if (pmap != locked_pmap) 2943 PMAP_UNLOCK(pmap); 2944 } 2945 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2946 m_pc = SLIST_FIRST(&free); 2947 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2948 /* Recycle a freed page table page. */ 2949 m_pc->wire_count = 1; 2950 atomic_add_int(&vm_cnt.v_wire_count, 1); 2951 } 2952 pmap_free_zero_pages(&free); 2953 return (m_pc); 2954} 2955 2956static void 2957free_pv_chunk(struct pv_chunk *pc) 2958{ 2959 vm_page_t m; 2960 2961 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2962 PV_STAT(pv_entry_spare -= _NPCPV); 2963 PV_STAT(pc_chunk_count--); 2964 PV_STAT(pc_chunk_frees++); 2965 /* entire chunk is free, return it */ 2966 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2967 pmap_qremove((vm_offset_t)pc, 1); 2968 vm_page_unwire(m, PQ_NONE); 2969 vm_page_free(m); 2970 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2971} 2972 2973/* 2974 * Free the pv_entry back to the free list. 2975 */ 2976static void 2977free_pv_entry(pmap_t pmap, pv_entry_t pv) 2978{ 2979 struct pv_chunk *pc; 2980 int idx, field, bit; 2981 2982 rw_assert(&pvh_global_lock, RA_WLOCKED); 2983 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2984 PV_STAT(pv_entry_frees++); 2985 PV_STAT(pv_entry_spare++); 2986 pv_entry_count--; 2987 pc = pv_to_chunk(pv); 2988 idx = pv - &pc->pc_pventry[0]; 2989 field = idx / 32; 2990 bit = idx % 32; 2991 pc->pc_map[field] |= 1ul << bit; 2992 for (idx = 0; idx < _NPCM; idx++) 2993 if (pc->pc_map[idx] != pc_freemask[idx]) { 2994 /* 2995 * 98% of the time, pc is already at the head of the 2996 * list. If it isn't already, move it to the head. 2997 */ 2998 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2999 pc)) { 3000 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3001 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 3002 pc_list); 3003 } 3004 return; 3005 } 3006 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3007 free_pv_chunk(pc); 3008} 3009 3010/* 3011 * Get a new pv_entry, allocating a block from the system 3012 * when needed. 3013 */ 3014static pv_entry_t 3015get_pv_entry(pmap_t pmap, boolean_t try) 3016{ 3017 static const struct timeval printinterval = { 60, 0 }; 3018 static struct timeval lastprint; 3019 int bit, field; 3020 pv_entry_t pv; 3021 struct pv_chunk *pc; 3022 vm_page_t m; 3023 3024 rw_assert(&pvh_global_lock, RA_WLOCKED); 3025 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3026 PV_STAT(pv_entry_allocs++); 3027 pv_entry_count++; 3028 if (pv_entry_count > pv_entry_high_water) 3029 if (ratecheck(&lastprint, &printinterval)) 3030 printf("Approaching the limit on PV entries, consider " 3031 "increasing either the vm.pmap.shpgperproc or the " 3032 "vm.pmap.pv_entry_max tunable.\n"); 3033retry: 3034 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 3035 if (pc != NULL) { 3036 for (field = 0; field < _NPCM; field++) { 3037 if (pc->pc_map[field]) { 3038 bit = ffs(pc->pc_map[field]) - 1; 3039 break; 3040 } 3041 } 3042 if (field < _NPCM) { 3043 pv = &pc->pc_pventry[field * 32 + bit]; 3044 pc->pc_map[field] &= ~(1ul << bit); 3045 /* If this was the last item, move it to tail */ 3046 for (field = 0; field < _NPCM; field++) 3047 if (pc->pc_map[field] != 0) { 3048 PV_STAT(pv_entry_spare--); 3049 return (pv); /* not full, return */ 3050 } 3051 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3052 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3053 PV_STAT(pv_entry_spare--); 3054 return (pv); 3055 } 3056 } 3057 /* 3058 * Access to the pte2list "pv_vafree" is synchronized by the pvh 3059 * global lock. If "pv_vafree" is currently non-empty, it will 3060 * remain non-empty until pmap_pte2list_alloc() completes. 3061 */ 3062 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 3063 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 3064 if (try) { 3065 pv_entry_count--; 3066 PV_STAT(pc_chunk_tryfail++); 3067 return (NULL); 3068 } 3069 m = pmap_pv_reclaim(pmap); 3070 if (m == NULL) 3071 goto retry; 3072 } 3073 PV_STAT(pc_chunk_count++); 3074 PV_STAT(pc_chunk_allocs++); 3075 pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree); 3076 pmap_qenter((vm_offset_t)pc, &m, 1); 3077 pc->pc_pmap = pmap; 3078 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 3079 for (field = 1; field < _NPCM; field++) 3080 pc->pc_map[field] = pc_freemask[field]; 3081 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 3082 pv = &pc->pc_pventry[0]; 3083 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3084 PV_STAT(pv_entry_spare += _NPCPV - 1); 3085 return (pv); 3086} 3087 3088/* 3089 * Create a pv entry for page at pa for 3090 * (pmap, va). 3091 */ 3092static void 3093pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3094{ 3095 pv_entry_t pv; 3096 3097 rw_assert(&pvh_global_lock, RA_WLOCKED); 3098 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3099 pv = get_pv_entry(pmap, FALSE); 3100 pv->pv_va = va; 3101 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3102} 3103 3104static __inline pv_entry_t 3105pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3106{ 3107 pv_entry_t pv; 3108 3109 rw_assert(&pvh_global_lock, RA_WLOCKED); 3110 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 3111 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 3112 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 3113 break; 3114 } 3115 } 3116 return (pv); 3117} 3118 3119static void 3120pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3121{ 3122 pv_entry_t pv; 3123 3124 pv = pmap_pvh_remove(pvh, pmap, va); 3125 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 3126 free_pv_entry(pmap, pv); 3127} 3128 3129static void 3130pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 3131{ 3132 struct md_page *pvh; 3133 3134 rw_assert(&pvh_global_lock, RA_WLOCKED); 3135 pmap_pvh_free(&m->md, pmap, va); 3136 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 3137 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3138 if (TAILQ_EMPTY(&pvh->pv_list)) 3139 vm_page_aflag_clear(m, PGA_WRITEABLE); 3140 } 3141} 3142 3143static void 3144pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3145{ 3146 struct md_page *pvh; 3147 pv_entry_t pv; 3148 vm_offset_t va_last; 3149 vm_page_t m; 3150 3151 rw_assert(&pvh_global_lock, RA_WLOCKED); 3152 KASSERT((pa & PTE1_OFFSET) == 0, 3153 ("pmap_pv_demote_pte1: pa is not 1mpage aligned")); 3154 3155 /* 3156 * Transfer the 1mpage's pv entry for this mapping to the first 3157 * page's pv list. 3158 */ 3159 pvh = pa_to_pvh(pa); 3160 va = pte1_trunc(va); 3161 pv = pmap_pvh_remove(pvh, pmap, va); 3162 KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found")); 3163 m = PHYS_TO_VM_PAGE(pa); 3164 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3165 /* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3166 va_last = va + PTE1_SIZE - PAGE_SIZE; 3167 do { 3168 m++; 3169 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3170 ("pmap_pv_demote_pte1: page %p is not managed", m)); 3171 va += PAGE_SIZE; 3172 pmap_insert_entry(pmap, va, m); 3173 } while (va < va_last); 3174} 3175 3176#if VM_NRESERVLEVEL > 0 3177static void 3178pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3179{ 3180 struct md_page *pvh; 3181 pv_entry_t pv; 3182 vm_offset_t va_last; 3183 vm_page_t m; 3184 3185 rw_assert(&pvh_global_lock, RA_WLOCKED); 3186 KASSERT((pa & PTE1_OFFSET) == 0, 3187 ("pmap_pv_promote_pte1: pa is not 1mpage aligned")); 3188 3189 /* 3190 * Transfer the first page's pv entry for this mapping to the 3191 * 1mpage's pv list. Aside from avoiding the cost of a call 3192 * to get_pv_entry(), a transfer avoids the possibility that 3193 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim() 3194 * removes one of the mappings that is being promoted. 3195 */ 3196 m = PHYS_TO_VM_PAGE(pa); 3197 va = pte1_trunc(va); 3198 pv = pmap_pvh_remove(&m->md, pmap, va); 3199 KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found")); 3200 pvh = pa_to_pvh(pa); 3201 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3202 /* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3203 va_last = va + PTE1_SIZE - PAGE_SIZE; 3204 do { 3205 m++; 3206 va += PAGE_SIZE; 3207 pmap_pvh_free(&m->md, pmap, va); 3208 } while (va < va_last); 3209} 3210#endif 3211 3212/* 3213 * Conditionally create a pv entry. 3214 */ 3215static boolean_t 3216pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3217{ 3218 pv_entry_t pv; 3219 3220 rw_assert(&pvh_global_lock, RA_WLOCKED); 3221 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3222 if (pv_entry_count < pv_entry_high_water && 3223 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3224 pv->pv_va = va; 3225 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3226 return (TRUE); 3227 } else 3228 return (FALSE); 3229} 3230 3231/* 3232 * Create the pv entries for each of the pages within a section. 3233 */ 3234static boolean_t 3235pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3236{ 3237 struct md_page *pvh; 3238 pv_entry_t pv; 3239 3240 rw_assert(&pvh_global_lock, RA_WLOCKED); 3241 if (pv_entry_count < pv_entry_high_water && 3242 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3243 pv->pv_va = va; 3244 pvh = pa_to_pvh(pa); 3245 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3246 return (TRUE); 3247 } else 3248 return (FALSE); 3249} 3250 3251static inline void 3252pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1) 3253{ 3254 3255 /* Kill all the small mappings or the big one only. */ 3256 if (pte1_is_section(npte1)) 3257 pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE); 3258 else 3259 pmap_tlb_flush(pmap, pte1_trunc(va)); 3260} 3261 3262/* 3263 * Update kernel pte1 on all pmaps. 3264 * 3265 * The following function is called only on one cpu with disabled interrupts. 3266 * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way 3267 * nobody can invoke explicit hardware table walk during the update of pte1. 3268 * Unsolicited hardware table walk can still happen, invoked by speculative 3269 * data or instruction prefetch or even by speculative hardware table walk. 3270 * 3271 * The break-before-make approach should be implemented here. However, it's 3272 * not so easy to do that for kernel mappings as it would be unhappy to unmap 3273 * itself unexpectedly but voluntarily. 3274 */ 3275static void 3276pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1) 3277{ 3278 pmap_t pmap; 3279 pt1_entry_t *pte1p; 3280 3281 /* 3282 * Get current pmap. Interrupts should be disabled here 3283 * so PCPU_GET() is done atomically. 3284 */ 3285 pmap = PCPU_GET(curpmap); 3286 if (pmap == NULL) 3287 pmap = kernel_pmap; 3288 3289 /* 3290 * (1) Change pte1 on current pmap. 3291 * (2) Flush all obsolete TLB entries on current CPU. 3292 * (3) Change pte1 on all pmaps. 3293 * (4) Flush all obsolete TLB entries on all CPUs in SMP case. 3294 */ 3295 3296 pte1p = pmap_pte1(pmap, va); 3297 pte1_store(pte1p, npte1); 3298 3299 /* Kill all the small mappings or the big one only. */ 3300 if (pte1_is_section(npte1)) { 3301 pmap_pte1_kern_promotions++; 3302 tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE); 3303 } else { 3304 pmap_pte1_kern_demotions++; 3305 tlb_flush_local(pte1_trunc(va)); 3306 } 3307 3308 /* 3309 * In SMP case, this function is called when all cpus are at smp 3310 * rendezvous, so there is no need to use 'allpmaps_lock' lock here. 3311 * In UP case, the function is called with this lock locked. 3312 */ 3313 LIST_FOREACH(pmap, &allpmaps, pm_list) { 3314 pte1p = pmap_pte1(pmap, va); 3315 pte1_store(pte1p, npte1); 3316 } 3317 3318#ifdef SMP 3319 /* Kill all the small mappings or the big one only. */ 3320 if (pte1_is_section(npte1)) 3321 tlb_flush_range(pte1_trunc(va), PTE1_SIZE); 3322 else 3323 tlb_flush(pte1_trunc(va)); 3324#endif 3325} 3326 3327#ifdef SMP 3328struct pte1_action { 3329 vm_offset_t va; 3330 pt1_entry_t npte1; 3331 u_int update; /* CPU that updates the PTE1 */ 3332}; 3333 3334static void 3335pmap_update_pte1_action(void *arg) 3336{ 3337 struct pte1_action *act = arg; 3338 3339 if (act->update == PCPU_GET(cpuid)) 3340 pmap_update_pte1_kernel(act->va, act->npte1); 3341} 3342 3343/* 3344 * Change pte1 on current pmap. 3345 * Note that kernel pte1 must be changed on all pmaps. 3346 * 3347 * According to the architecture reference manual published by ARM, 3348 * the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA. 3349 * According to this manual, UNPREDICTABLE behaviours must never happen in 3350 * a viable system. In contrast, on x86 processors, it is not specified which 3351 * TLB entry mapping the virtual address will be used, but the MMU doesn't 3352 * generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone 3353 * Black). 3354 * 3355 * It's a problem when either promotion or demotion is being done. The pte1 3356 * update and appropriate TLB flush must be done atomically in general. 3357 */ 3358static void 3359pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3360 pt1_entry_t npte1) 3361{ 3362 3363 if (pmap == kernel_pmap) { 3364 struct pte1_action act; 3365 3366 sched_pin(); 3367 act.va = va; 3368 act.npte1 = npte1; 3369 act.update = PCPU_GET(cpuid); 3370 smp_rendezvous_cpus(all_cpus, smp_no_rendevous_barrier, 3371 pmap_update_pte1_action, NULL, &act); 3372 sched_unpin(); 3373 } else { 3374 register_t cspr; 3375 3376 /* 3377 * Use break-before-make approach for changing userland 3378 * mappings. It can cause L1 translation aborts on other 3379 * cores in SMP case. So, special treatment is implemented 3380 * in pmap_fault(). To reduce the likelihood that another core 3381 * will be affected by the broken mapping, disable interrupts 3382 * until the mapping change is completed. 3383 */ 3384 cspr = disable_interrupts(PSR_I | PSR_F); 3385 pte1_clear(pte1p); 3386 pmap_tlb_flush_pte1(pmap, va, npte1); 3387 pte1_store(pte1p, npte1); 3388 restore_interrupts(cspr); 3389 } 3390} 3391#else 3392static void 3393pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3394 pt1_entry_t npte1) 3395{ 3396 3397 if (pmap == kernel_pmap) { 3398 mtx_lock_spin(&allpmaps_lock); 3399 pmap_update_pte1_kernel(va, npte1); 3400 mtx_unlock_spin(&allpmaps_lock); 3401 } else { 3402 register_t cspr; 3403 3404 /* 3405 * Use break-before-make approach for changing userland 3406 * mappings. It's absolutely safe in UP case when interrupts 3407 * are disabled. 3408 */ 3409 cspr = disable_interrupts(PSR_I | PSR_F); 3410 pte1_clear(pte1p); 3411 pmap_tlb_flush_pte1(pmap, va, npte1); 3412 pte1_store(pte1p, npte1); 3413 restore_interrupts(cspr); 3414 } 3415} 3416#endif 3417 3418#if VM_NRESERVLEVEL > 0 3419/* 3420 * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are 3421 * within a single page table page (PT2) to a single 1MB page mapping. 3422 * For promotion to occur, two conditions must be met: (1) the 4KB page 3423 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3424 * mappings must have identical characteristics. 3425 * 3426 * Managed (PG_MANAGED) mappings within the kernel address space are not 3427 * promoted. The reason is that kernel PTE1s are replicated in each pmap but 3428 * pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only 3429 * read the PTE1 from the kernel pmap. 3430 */ 3431static void 3432pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3433{ 3434 pt1_entry_t npte1; 3435 pt2_entry_t *fpte2p, fpte2, fpte2_fav; 3436 pt2_entry_t *pte2p, pte2; 3437 vm_offset_t pteva __unused; 3438 vm_page_t m __unused; 3439 3440 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3441 pmap, va, pte1_load(pte1p), pte1p)); 3442 3443 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3444 3445 /* 3446 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is 3447 * either invalid, unused, or does not map the first 4KB physical page 3448 * within a 1MB page. 3449 */ 3450 fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va)); 3451 fpte2 = pte2_load(fpte2p); 3452 if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) != 3453 (PTE2_A | PTE2_V)) { 3454 pmap_pte1_p_failures++; 3455 CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p", 3456 __func__, va, pmap); 3457 return; 3458 } 3459 if (pte2_is_managed(fpte2) && pmap == kernel_pmap) { 3460 pmap_pte1_p_failures++; 3461 CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p", 3462 __func__, va, pmap); 3463 return; 3464 } 3465 if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3466 /* 3467 * When page is not modified, PTE2_RO can be set without 3468 * a TLB invalidation. 3469 */ 3470 fpte2 |= PTE2_RO; 3471 pte2_store(fpte2p, fpte2); 3472 } 3473 3474 /* 3475 * Examine each of the other PTE2s in the specified PT2. Abort if this 3476 * PTE2 maps an unexpected 4KB physical page or does not have identical 3477 * characteristics to the first PTE2. 3478 */ 3479 fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V)); 3480 fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */ 3481 for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) { 3482 pte2 = pte2_load(pte2p); 3483 if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) { 3484 pmap_pte1_p_failures++; 3485 CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p", 3486 __func__, va, pmap); 3487 return; 3488 } 3489 if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3490 /* 3491 * When page is not modified, PTE2_RO can be set 3492 * without a TLB invalidation. See note above. 3493 */ 3494 pte2 |= PTE2_RO; 3495 pte2_store(pte2p, pte2); 3496 pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET & 3497 PTE2_FRAME); 3498 CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p", 3499 __func__, pteva, pmap); 3500 } 3501 if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) { 3502 pmap_pte1_p_failures++; 3503 CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p", 3504 __func__, va, pmap); 3505 return; 3506 } 3507 3508 fpte2_fav -= PTE2_SIZE; 3509 } 3510 /* 3511 * The page table page in its current state will stay in PT2TAB 3512 * until the PTE1 mapping the section is demoted by pmap_demote_pte1() 3513 * or destroyed by pmap_remove_pte1(). 3514 * 3515 * Note that L2 page table size is not equal to PAGE_SIZE. 3516 */ 3517 m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p)))); 3518 KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size], 3519 ("%s: PT2 page is out of range", __func__)); 3520 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 3521 ("%s: PT2 page's pindex is wrong", __func__)); 3522 3523 /* 3524 * Get pte1 from pte2 format. 3525 */ 3526 npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V; 3527 3528 /* 3529 * Promote the pv entries. 3530 */ 3531 if (pte2_is_managed(fpte2)) 3532 pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1)); 3533 3534 /* 3535 * Promote the mappings. 3536 */ 3537 pmap_change_pte1(pmap, pte1p, va, npte1); 3538 3539 pmap_pte1_promotions++; 3540 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3541 __func__, va, pmap); 3542 3543 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3544 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3545} 3546#endif /* VM_NRESERVLEVEL > 0 */ 3547 3548/* 3549 * Zero L2 page table page. 3550 */ 3551static __inline void 3552pmap_clear_pt2(pt2_entry_t *fpte2p) 3553{ 3554 pt2_entry_t *pte2p; 3555 3556 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) 3557 pte2_clear(pte2p); 3558 3559} 3560 3561/* 3562 * Removes a 1MB page mapping from the kernel pmap. 3563 */ 3564static void 3565pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3566{ 3567 vm_page_t m; 3568 uint32_t pte1_idx; 3569 pt2_entry_t *fpte2p; 3570 vm_paddr_t pt2_pa; 3571 3572 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3573 m = pmap_pt2_page(pmap, va); 3574 if (m == NULL) 3575 /* 3576 * QQQ: Is this function called only on promoted pte1? 3577 * We certainly do section mappings directly 3578 * (without promotion) in kernel !!! 3579 */ 3580 panic("%s: missing pt2 page", __func__); 3581 3582 pte1_idx = pte1_index(va); 3583 3584 /* 3585 * Initialize the L2 page table. 3586 */ 3587 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3588 pmap_clear_pt2(fpte2p); 3589 3590 /* 3591 * Remove the mapping. 3592 */ 3593 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx); 3594 pmap_kenter_pte1(va, PTE1_LINK(pt2_pa)); 3595 3596 /* 3597 * QQQ: We do not need to invalidate PT2MAP mapping 3598 * as we did not change it. I.e. the L2 page table page 3599 * was and still is mapped the same way. 3600 */ 3601} 3602 3603/* 3604 * Do the things to unmap a section in a process 3605 */ 3606static void 3607pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 3608 struct spglist *free) 3609{ 3610 pt1_entry_t opte1; 3611 struct md_page *pvh; 3612 vm_offset_t eva, va; 3613 vm_page_t m; 3614 3615 PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva, 3616 pte1_load(pte1p), pte1p)); 3617 3618 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3619 KASSERT((sva & PTE1_OFFSET) == 0, 3620 ("%s: sva is not 1mpage aligned", __func__)); 3621 3622 /* 3623 * Clear and invalidate the mapping. It should occupy one and only TLB 3624 * entry. So, pmap_tlb_flush() called with aligned address should be 3625 * sufficient. 3626 */ 3627 opte1 = pte1_load_clear(pte1p); 3628 pmap_tlb_flush(pmap, sva); 3629 3630 if (pte1_is_wired(opte1)) 3631 pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE; 3632 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 3633 if (pte1_is_managed(opte1)) { 3634 pvh = pa_to_pvh(pte1_pa(opte1)); 3635 pmap_pvh_free(pvh, pmap, sva); 3636 eva = sva + PTE1_SIZE; 3637 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 3638 va < eva; va += PAGE_SIZE, m++) { 3639 if (pte1_is_dirty(opte1)) 3640 vm_page_dirty(m); 3641 if (opte1 & PTE1_A) 3642 vm_page_aflag_set(m, PGA_REFERENCED); 3643 if (TAILQ_EMPTY(&m->md.pv_list) && 3644 TAILQ_EMPTY(&pvh->pv_list)) 3645 vm_page_aflag_clear(m, PGA_WRITEABLE); 3646 } 3647 } 3648 if (pmap == kernel_pmap) { 3649 /* 3650 * L2 page table(s) can't be removed from kernel map as 3651 * kernel counts on it (stuff around pmap_growkernel()). 3652 */ 3653 pmap_remove_kernel_pte1(pmap, pte1p, sva); 3654 } else { 3655 /* 3656 * Get associated L2 page table page. 3657 * It's possible that the page was never allocated. 3658 */ 3659 m = pmap_pt2_page(pmap, sva); 3660 if (m != NULL) 3661 pmap_unwire_pt2_all(pmap, sva, m, free); 3662 } 3663} 3664 3665/* 3666 * Fills L2 page table page with mappings to consecutive physical pages. 3667 */ 3668static __inline void 3669pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2) 3670{ 3671 pt2_entry_t *pte2p; 3672 3673 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) { 3674 pte2_store(pte2p, npte2); 3675 npte2 += PTE2_SIZE; 3676 } 3677} 3678 3679/* 3680 * Tries to demote a 1MB page mapping. If demotion fails, the 3681 * 1MB page mapping is invalidated. 3682 */ 3683static boolean_t 3684pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3685{ 3686 pt1_entry_t opte1, npte1; 3687 pt2_entry_t *fpte2p, npte2; 3688 vm_paddr_t pt2pg_pa, pt2_pa; 3689 vm_page_t m; 3690 struct spglist free; 3691 uint32_t pte1_idx, isnew = 0; 3692 3693 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3694 pmap, va, pte1_load(pte1p), pte1p)); 3695 3696 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3697 3698 opte1 = pte1_load(pte1p); 3699 KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__)); 3700 3701 if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) { 3702 KASSERT(!pte1_is_wired(opte1), 3703 ("%s: PT2 page for a wired mapping is missing", __func__)); 3704 3705 /* 3706 * Invalidate the 1MB page mapping and return 3707 * "failure" if the mapping was never accessed or the 3708 * allocation of the new page table page fails. 3709 */ 3710 if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL, 3711 pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ | 3712 VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { 3713 SLIST_INIT(&free); 3714 pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); 3715 pmap_free_zero_pages(&free); 3716 CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", 3717 __func__, va, pmap); 3718 return (FALSE); 3719 } 3720 if (va < VM_MAXUSER_ADDRESS) 3721 pmap->pm_stats.resident_count++; 3722 3723 isnew = 1; 3724 3725 /* 3726 * We init all L2 page tables in the page even if 3727 * we are going to change everything for one L2 page 3728 * table in a while. 3729 */ 3730 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 3731 } else { 3732 if (va < VM_MAXUSER_ADDRESS) { 3733 if (pt2_is_empty(m, va)) 3734 isnew = 1; /* Demoting section w/o promotion. */ 3735#ifdef INVARIANTS 3736 else 3737 KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire" 3738 " count %u", __func__, 3739 pt2_wirecount_get(m, pte1_index(va)))); 3740#endif 3741 } 3742 } 3743 3744 pt2pg_pa = VM_PAGE_TO_PHYS(m); 3745 pte1_idx = pte1_index(va); 3746 /* 3747 * If the pmap is current, then the PT2MAP can provide access to 3748 * the page table page (promoted L2 page tables are not unmapped). 3749 * Otherwise, temporarily map the L2 page table page (m) into 3750 * the kernel's address space at either PADDR1 or PADDR2. 3751 * 3752 * Note that L2 page table size is not equal to PAGE_SIZE. 3753 */ 3754 if (pmap_is_current(pmap)) 3755 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3756 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 3757 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 3758 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 3759#ifdef SMP 3760 PMAP1cpu = PCPU_GET(cpuid); 3761#endif 3762 tlb_flush_local((vm_offset_t)PADDR1); 3763 PMAP1changed++; 3764 } else 3765#ifdef SMP 3766 if (PMAP1cpu != PCPU_GET(cpuid)) { 3767 PMAP1cpu = PCPU_GET(cpuid); 3768 tlb_flush_local((vm_offset_t)PADDR1); 3769 PMAP1changedcpu++; 3770 } else 3771#endif 3772 PMAP1unchanged++; 3773 fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx); 3774 } else { 3775 mtx_lock(&PMAP2mutex); 3776 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 3777 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 3778 tlb_flush((vm_offset_t)PADDR2); 3779 } 3780 fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx); 3781 } 3782 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 3783 npte1 = PTE1_LINK(pt2_pa); 3784 3785 KASSERT((opte1 & PTE1_A) != 0, 3786 ("%s: opte1 is missing PTE1_A", __func__)); 3787 KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM, 3788 ("%s: opte1 has PTE1_NM", __func__)); 3789 3790 /* 3791 * Get pte2 from pte1 format. 3792 */ 3793 npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V; 3794 3795 /* 3796 * If the L2 page table page is new, initialize it. If the mapping 3797 * has changed attributes, update the page table entries. 3798 */ 3799 if (isnew != 0) { 3800 pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2); 3801 pmap_fill_pt2(fpte2p, npte2); 3802 } else if ((pte2_load(fpte2p) & PTE2_PROMOTE) != 3803 (npte2 & PTE2_PROMOTE)) 3804 pmap_fill_pt2(fpte2p, npte2); 3805 3806 KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2), 3807 ("%s: fpte2p and npte2 map different physical addresses", 3808 __func__)); 3809 3810 if (fpte2p == PADDR2) 3811 mtx_unlock(&PMAP2mutex); 3812 3813 /* 3814 * Demote the mapping. This pmap is locked. The old PTE1 has 3815 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also 3816 * has not PTE1_NM set. Thus, there is no danger of a race with 3817 * another processor changing the setting of PTE1_A and/or PTE1_NM 3818 * between the read above and the store below. 3819 */ 3820 pmap_change_pte1(pmap, pte1p, va, npte1); 3821 3822 /* 3823 * Demote the pv entry. This depends on the earlier demotion 3824 * of the mapping. Specifically, the (re)creation of a per- 3825 * page pv entry might trigger the execution of pmap_pv_reclaim(), 3826 * which might reclaim a newly (re)created per-page pv entry 3827 * and destroy the associated mapping. In order to destroy 3828 * the mapping, the PTE1 must have already changed from mapping 3829 * the 1mpage to referencing the page table page. 3830 */ 3831 if (pte1_is_managed(opte1)) 3832 pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1)); 3833 3834 pmap_pte1_demotions++; 3835 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3836 __func__, va, pmap); 3837 3838 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3839 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3840 return (TRUE); 3841} 3842 3843/* 3844 * Insert the given physical page (p) at 3845 * the specified virtual address (v) in the 3846 * target physical map with the protection requested. 3847 * 3848 * If specified, the page will be wired down, meaning 3849 * that the related pte can not be reclaimed. 3850 * 3851 * NB: This is the only routine which MAY NOT lazy-evaluate 3852 * or lose information. That is, this routine must actually 3853 * insert this page into the given map NOW. 3854 */ 3855int 3856pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3857 u_int flags, int8_t psind) 3858{ 3859 pt1_entry_t *pte1p; 3860 pt2_entry_t *pte2p; 3861 pt2_entry_t npte2, opte2; 3862 pv_entry_t pv; 3863 vm_paddr_t opa, pa; 3864 vm_page_t mpte2, om; 3865 boolean_t wired; 3866 3867 va = trunc_page(va); 3868 mpte2 = NULL; 3869 wired = (flags & PMAP_ENTER_WIRED) != 0; 3870 3871 KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__)); 3872 KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, 3873 ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, 3874 va)); 3875 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 3876 VM_OBJECT_ASSERT_LOCKED(m->object); 3877 3878 rw_wlock(&pvh_global_lock); 3879 PMAP_LOCK(pmap); 3880 sched_pin(); 3881 3882 /* 3883 * In the case that a page table page is not 3884 * resident, we are creating it here. 3885 */ 3886 if (va < VM_MAXUSER_ADDRESS) { 3887 mpte2 = pmap_allocpte2(pmap, va, flags); 3888 if (mpte2 == NULL) { 3889 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3890 ("pmap_allocpte2 failed with sleep allowed")); 3891 sched_unpin(); 3892 rw_wunlock(&pvh_global_lock); 3893 PMAP_UNLOCK(pmap); 3894 return (KERN_RESOURCE_SHORTAGE); 3895 } 3896 } 3897 pte1p = pmap_pte1(pmap, va); 3898 if (pte1_is_section(pte1_load(pte1p))) 3899 panic("%s: attempted on 1MB page", __func__); 3900 pte2p = pmap_pte2_quick(pmap, va); 3901 if (pte2p == NULL) 3902 panic("%s: invalid L1 page table entry va=%#x", __func__, va); 3903 3904 om = NULL; 3905 pa = VM_PAGE_TO_PHYS(m); 3906 opte2 = pte2_load(pte2p); 3907 opa = pte2_pa(opte2); 3908 /* 3909 * Mapping has not changed, must be protection or wiring change. 3910 */ 3911 if (pte2_is_valid(opte2) && (opa == pa)) { 3912 /* 3913 * Wiring change, just update stats. We don't worry about 3914 * wiring PT2 pages as they remain resident as long as there 3915 * are valid mappings in them. Hence, if a user page is wired, 3916 * the PT2 page will be also. 3917 */ 3918 if (wired && !pte2_is_wired(opte2)) 3919 pmap->pm_stats.wired_count++; 3920 else if (!wired && pte2_is_wired(opte2)) 3921 pmap->pm_stats.wired_count--; 3922 3923 /* 3924 * Remove extra pte2 reference 3925 */ 3926 if (mpte2) 3927 pt2_wirecount_dec(mpte2, pte1_index(va)); 3928 if (pte2_is_managed(opte2)) 3929 om = m; 3930 goto validate; 3931 } 3932 3933 /* 3934 * QQQ: We think that changing physical address on writeable mapping 3935 * is not safe. Well, maybe on kernel address space with correct 3936 * locking, it can make a sense. However, we have no idea why 3937 * anyone should do that on user address space. Are we wrong? 3938 */ 3939 KASSERT((opa == 0) || (opa == pa) || 3940 !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0), 3941 ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!", 3942 __func__, pmap, va, opte2, opa, pa, flags, prot)); 3943 3944 pv = NULL; 3945 3946 /* 3947 * Mapping has changed, invalidate old range and fall through to 3948 * handle validating new mapping. 3949 */ 3950 if (opa) { 3951 if (pte2_is_wired(opte2)) 3952 pmap->pm_stats.wired_count--; 3953 if (pte2_is_managed(opte2)) { 3954 om = PHYS_TO_VM_PAGE(opa); 3955 pv = pmap_pvh_remove(&om->md, pmap, va); 3956 } 3957 /* 3958 * Remove extra pte2 reference 3959 */ 3960 if (mpte2 != NULL) 3961 pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT); 3962 } else 3963 pmap->pm_stats.resident_count++; 3964 3965 /* 3966 * Enter on the PV list if part of our managed memory. 3967 */ 3968 if ((m->oflags & VPO_UNMANAGED) == 0) { 3969 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 3970 ("%s: managed mapping within the clean submap", __func__)); 3971 if (pv == NULL) 3972 pv = get_pv_entry(pmap, FALSE); 3973 pv->pv_va = va; 3974 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3975 } else if (pv != NULL) 3976 free_pv_entry(pmap, pv); 3977 3978 /* 3979 * Increment counters 3980 */ 3981 if (wired) 3982 pmap->pm_stats.wired_count++; 3983 3984validate: 3985 /* 3986 * Now validate mapping with desired protection/wiring. 3987 */ 3988 npte2 = PTE2(pa, PTE2_NM, vm_page_pte2_attr(m)); 3989 if (prot & VM_PROT_WRITE) { 3990 if (pte2_is_managed(npte2)) 3991 vm_page_aflag_set(m, PGA_WRITEABLE); 3992 } 3993 else 3994 npte2 |= PTE2_RO; 3995 if ((prot & VM_PROT_EXECUTE) == 0) 3996 npte2 |= PTE2_NX; 3997 if (wired) 3998 npte2 |= PTE2_W; 3999 if (va < VM_MAXUSER_ADDRESS) 4000 npte2 |= PTE2_U; 4001 if (pmap != kernel_pmap) 4002 npte2 |= PTE2_NG; 4003 4004 /* 4005 * If the mapping or permission bits are different, we need 4006 * to update the pte2. 4007 * 4008 * QQQ: Think again and again what to do 4009 * if the mapping is going to be changed! 4010 */ 4011 if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) { 4012 /* 4013 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4014 * is set. Do it now, before the mapping is stored and made 4015 * valid for hardware table walk. If done later, there is a race 4016 * for other threads of current process in lazy loading case. 4017 * Don't do it for kernel memory which is mapped with exec 4018 * permission even if the memory isn't going to hold executable 4019 * code. The only time when icache sync is needed is after 4020 * kernel module is loaded and the relocation info is processed. 4021 * And it's done in elf_cpu_load_file(). 4022 * 4023 * QQQ: (1) Does it exist any better way where 4024 * or how to sync icache? 4025 * (2) Now, we do it on a page basis. 4026 */ 4027 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && 4028 m->md.pat_mode == VM_MEMATTR_WB_WA && 4029 (opa != pa || (opte2 & PTE2_NX))) 4030 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4031 4032 npte2 |= PTE2_A; 4033 if (flags & VM_PROT_WRITE) 4034 npte2 &= ~PTE2_NM; 4035 if (opte2 & PTE2_V) { 4036 /* Change mapping with break-before-make approach. */ 4037 opte2 = pte2_load_clear(pte2p); 4038 pmap_tlb_flush(pmap, va); 4039 pte2_store(pte2p, npte2); 4040 if (opte2 & PTE2_A) { 4041 if (pte2_is_managed(opte2)) 4042 vm_page_aflag_set(om, PGA_REFERENCED); 4043 } 4044 if (pte2_is_dirty(opte2)) { 4045 if (pte2_is_managed(opte2)) 4046 vm_page_dirty(om); 4047 } 4048 if (pte2_is_managed(opte2) && 4049 TAILQ_EMPTY(&om->md.pv_list) && 4050 ((om->flags & PG_FICTITIOUS) != 0 || 4051 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 4052 vm_page_aflag_clear(om, PGA_WRITEABLE); 4053 } else 4054 pte2_store(pte2p, npte2); 4055 } 4056#if 0 4057 else { 4058 /* 4059 * QQQ: In time when both access and not mofified bits are 4060 * emulated by software, this should not happen. Some 4061 * analysis is need, if this really happen. Missing 4062 * tlb flush somewhere could be the reason. 4063 */ 4064 panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap, 4065 va, opte2, npte2); 4066 } 4067#endif 4068 4069#if VM_NRESERVLEVEL > 0 4070 /* 4071 * If both the L2 page table page and the reservation are fully 4072 * populated, then attempt promotion. 4073 */ 4074 if ((mpte2 == NULL || pt2_is_full(mpte2, va)) && 4075 sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && 4076 vm_reserv_level_iffullpop(m) == 0) 4077 pmap_promote_pte1(pmap, pte1p, va); 4078#endif 4079 sched_unpin(); 4080 rw_wunlock(&pvh_global_lock); 4081 PMAP_UNLOCK(pmap); 4082 return (KERN_SUCCESS); 4083} 4084 4085/* 4086 * Do the things to unmap a page in a process. 4087 */ 4088static int 4089pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va, 4090 struct spglist *free) 4091{ 4092 pt2_entry_t opte2; 4093 vm_page_t m; 4094 4095 rw_assert(&pvh_global_lock, RA_WLOCKED); 4096 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4097 4098 /* Clear and invalidate the mapping. */ 4099 opte2 = pte2_load_clear(pte2p); 4100 pmap_tlb_flush(pmap, va); 4101 4102 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x", 4103 __func__, pmap, va, opte2)); 4104 4105 if (opte2 & PTE2_W) 4106 pmap->pm_stats.wired_count -= 1; 4107 pmap->pm_stats.resident_count -= 1; 4108 if (pte2_is_managed(opte2)) { 4109 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4110 if (pte2_is_dirty(opte2)) 4111 vm_page_dirty(m); 4112 if (opte2 & PTE2_A) 4113 vm_page_aflag_set(m, PGA_REFERENCED); 4114 pmap_remove_entry(pmap, m, va); 4115 } 4116 return (pmap_unuse_pt2(pmap, va, free)); 4117} 4118 4119/* 4120 * Remove a single page from a process address space. 4121 */ 4122static void 4123pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 4124{ 4125 pt2_entry_t *pte2p; 4126 4127 rw_assert(&pvh_global_lock, RA_WLOCKED); 4128 KASSERT(curthread->td_pinned > 0, 4129 ("%s: curthread not pinned", __func__)); 4130 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4131 if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL || 4132 !pte2_is_valid(pte2_load(pte2p))) 4133 return; 4134 pmap_remove_pte2(pmap, pte2p, va, free); 4135} 4136 4137/* 4138 * Remove the given range of addresses from the specified map. 4139 * 4140 * It is assumed that the start and end are properly 4141 * rounded to the page size. 4142 */ 4143void 4144pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4145{ 4146 vm_offset_t nextva; 4147 pt1_entry_t *pte1p, pte1; 4148 pt2_entry_t *pte2p, pte2; 4149 struct spglist free; 4150 4151 /* 4152 * Perform an unsynchronized read. This is, however, safe. 4153 */ 4154 if (pmap->pm_stats.resident_count == 0) 4155 return; 4156 4157 SLIST_INIT(&free); 4158 4159 rw_wlock(&pvh_global_lock); 4160 sched_pin(); 4161 PMAP_LOCK(pmap); 4162 4163 /* 4164 * Special handling of removing one page. A very common 4165 * operation and easy to short circuit some code. 4166 */ 4167 if (sva + PAGE_SIZE == eva) { 4168 pte1 = pte1_load(pmap_pte1(pmap, sva)); 4169 if (pte1_is_link(pte1)) { 4170 pmap_remove_page(pmap, sva, &free); 4171 goto out; 4172 } 4173 } 4174 4175 for (; sva < eva; sva = nextva) { 4176 /* 4177 * Calculate address for next L2 page table. 4178 */ 4179 nextva = pte1_trunc(sva + PTE1_SIZE); 4180 if (nextva < sva) 4181 nextva = eva; 4182 if (pmap->pm_stats.resident_count == 0) 4183 break; 4184 4185 pte1p = pmap_pte1(pmap, sva); 4186 pte1 = pte1_load(pte1p); 4187 4188 /* 4189 * Weed out invalid mappings. Note: we assume that the L1 page 4190 * table is always allocated, and in kernel virtual. 4191 */ 4192 if (pte1 == 0) 4193 continue; 4194 4195 if (pte1_is_section(pte1)) { 4196 /* 4197 * Are we removing the entire large page? If not, 4198 * demote the mapping and fall through. 4199 */ 4200 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4201 pmap_remove_pte1(pmap, pte1p, sva, &free); 4202 continue; 4203 } else if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4204 /* The large page mapping was destroyed. */ 4205 continue; 4206 } 4207#ifdef INVARIANTS 4208 else { 4209 /* Update pte1 after demotion. */ 4210 pte1 = pte1_load(pte1p); 4211 } 4212#endif 4213 } 4214 4215 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4216 " is not link", __func__, pmap, sva, pte1, pte1p)); 4217 4218 /* 4219 * Limit our scan to either the end of the va represented 4220 * by the current L2 page table page, or to the end of the 4221 * range being removed. 4222 */ 4223 if (nextva > eva) 4224 nextva = eva; 4225 4226 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; 4227 pte2p++, sva += PAGE_SIZE) { 4228 pte2 = pte2_load(pte2p); 4229 if (!pte2_is_valid(pte2)) 4230 continue; 4231 if (pmap_remove_pte2(pmap, pte2p, sva, &free)) 4232 break; 4233 } 4234 } 4235out: 4236 sched_unpin(); 4237 rw_wunlock(&pvh_global_lock); 4238 PMAP_UNLOCK(pmap); 4239 pmap_free_zero_pages(&free); 4240} 4241 4242/* 4243 * Routine: pmap_remove_all 4244 * Function: 4245 * Removes this physical page from 4246 * all physical maps in which it resides. 4247 * Reflects back modify bits to the pager. 4248 * 4249 * Notes: 4250 * Original versions of this routine were very 4251 * inefficient because they iteratively called 4252 * pmap_remove (slow...) 4253 */ 4254 4255void 4256pmap_remove_all(vm_page_t m) 4257{ 4258 struct md_page *pvh; 4259 pv_entry_t pv; 4260 pmap_t pmap; 4261 pt2_entry_t *pte2p, opte2; 4262 pt1_entry_t *pte1p; 4263 vm_offset_t va; 4264 struct spglist free; 4265 4266 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4267 ("%s: page %p is not managed", __func__, m)); 4268 SLIST_INIT(&free); 4269 rw_wlock(&pvh_global_lock); 4270 sched_pin(); 4271 if ((m->flags & PG_FICTITIOUS) != 0) 4272 goto small_mappings; 4273 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4274 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 4275 va = pv->pv_va; 4276 pmap = PV_PMAP(pv); 4277 PMAP_LOCK(pmap); 4278 pte1p = pmap_pte1(pmap, va); 4279 (void)pmap_demote_pte1(pmap, pte1p, va); 4280 PMAP_UNLOCK(pmap); 4281 } 4282small_mappings: 4283 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 4284 pmap = PV_PMAP(pv); 4285 PMAP_LOCK(pmap); 4286 pmap->pm_stats.resident_count--; 4287 pte1p = pmap_pte1(pmap, pv->pv_va); 4288 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found " 4289 "a 1mpage in page %p's pv list", __func__, m)); 4290 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 4291 opte2 = pte2_load_clear(pte2p); 4292 pmap_tlb_flush(pmap, pv->pv_va); 4293 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2", 4294 __func__, pmap, pv->pv_va)); 4295 if (pte2_is_wired(opte2)) 4296 pmap->pm_stats.wired_count--; 4297 if (opte2 & PTE2_A) 4298 vm_page_aflag_set(m, PGA_REFERENCED); 4299 4300 /* 4301 * Update the vm_page_t clean and reference bits. 4302 */ 4303 if (pte2_is_dirty(opte2)) 4304 vm_page_dirty(m); 4305 pmap_unuse_pt2(pmap, pv->pv_va, &free); 4306 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4307 free_pv_entry(pmap, pv); 4308 PMAP_UNLOCK(pmap); 4309 } 4310 vm_page_aflag_clear(m, PGA_WRITEABLE); 4311 sched_unpin(); 4312 rw_wunlock(&pvh_global_lock); 4313 pmap_free_zero_pages(&free); 4314} 4315 4316/* 4317 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4318 * good coding style, a.k.a. 80 character line width limit hell. 4319 */ 4320static __inline void 4321pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv, 4322 struct spglist *free) 4323{ 4324 vm_paddr_t pa; 4325 vm_page_t m, mt, mpt2pg; 4326 struct md_page *pvh; 4327 4328 pa = pte1_pa(pte1); 4329 m = PHYS_TO_VM_PAGE(pa); 4330 4331 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4332 __func__, m, m->phys_addr, pa)); 4333 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4334 m < &vm_page_array[vm_page_array_size], 4335 ("%s: bad pte1 %#x", __func__, pte1)); 4336 4337 if (pte1_is_dirty(pte1)) { 4338 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4339 vm_page_dirty(mt); 4340 } 4341 4342 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 4343 pvh = pa_to_pvh(pa); 4344 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4345 if (TAILQ_EMPTY(&pvh->pv_list)) { 4346 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4347 if (TAILQ_EMPTY(&mt->md.pv_list)) 4348 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4349 } 4350 mpt2pg = pmap_pt2_page(pmap, pv->pv_va); 4351 if (mpt2pg != NULL) 4352 pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free); 4353} 4354 4355/* 4356 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4357 * good coding style, a.k.a. 80 character line width limit hell. 4358 */ 4359static __inline void 4360pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv, 4361 struct spglist *free) 4362{ 4363 vm_paddr_t pa; 4364 vm_page_t m; 4365 struct md_page *pvh; 4366 4367 pa = pte2_pa(pte2); 4368 m = PHYS_TO_VM_PAGE(pa); 4369 4370 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4371 __func__, m, m->phys_addr, pa)); 4372 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4373 m < &vm_page_array[vm_page_array_size], 4374 ("%s: bad pte2 %#x", __func__, pte2)); 4375 4376 if (pte2_is_dirty(pte2)) 4377 vm_page_dirty(m); 4378 4379 pmap->pm_stats.resident_count--; 4380 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4381 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 4382 pvh = pa_to_pvh(pa); 4383 if (TAILQ_EMPTY(&pvh->pv_list)) 4384 vm_page_aflag_clear(m, PGA_WRITEABLE); 4385 } 4386 pmap_unuse_pt2(pmap, pv->pv_va, free); 4387} 4388 4389/* 4390 * Remove all pages from specified address space this aids process 4391 * exit speeds. Also, this code is special cased for current process 4392 * only, but can have the more generic (and slightly slower) mode enabled. 4393 * This is much faster than pmap_remove in the case of running down 4394 * an entire address space. 4395 */ 4396void 4397pmap_remove_pages(pmap_t pmap) 4398{ 4399 pt1_entry_t *pte1p, pte1; 4400 pt2_entry_t *pte2p, pte2; 4401 pv_entry_t pv; 4402 struct pv_chunk *pc, *npc; 4403 struct spglist free; 4404 int field, idx; 4405 int32_t bit; 4406 uint32_t inuse, bitmask; 4407 boolean_t allfree; 4408 4409 /* 4410 * Assert that the given pmap is only active on the current 4411 * CPU. Unfortunately, we cannot block another CPU from 4412 * activating the pmap while this function is executing. 4413 */ 4414 KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace), 4415 ("%s: non-current pmap %p", __func__, pmap)); 4416#if defined(SMP) && defined(INVARIANTS) 4417 { 4418 cpuset_t other_cpus; 4419 4420 sched_pin(); 4421 other_cpus = pmap->pm_active; 4422 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 4423 sched_unpin(); 4424 KASSERT(CPU_EMPTY(&other_cpus), 4425 ("%s: pmap %p active on other cpus", __func__, pmap)); 4426 } 4427#endif 4428 SLIST_INIT(&free); 4429 rw_wlock(&pvh_global_lock); 4430 PMAP_LOCK(pmap); 4431 sched_pin(); 4432 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4433 KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p", 4434 __func__, pmap, pc->pc_pmap)); 4435 allfree = TRUE; 4436 for (field = 0; field < _NPCM; field++) { 4437 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 4438 while (inuse != 0) { 4439 bit = ffs(inuse) - 1; 4440 bitmask = 1UL << bit; 4441 idx = field * 32 + bit; 4442 pv = &pc->pc_pventry[idx]; 4443 inuse &= ~bitmask; 4444 4445 /* 4446 * Note that we cannot remove wired pages 4447 * from a process' mapping at this time 4448 */ 4449 pte1p = pmap_pte1(pmap, pv->pv_va); 4450 pte1 = pte1_load(pte1p); 4451 if (pte1_is_section(pte1)) { 4452 if (pte1_is_wired(pte1)) { 4453 allfree = FALSE; 4454 continue; 4455 } 4456 pte1_clear(pte1p); 4457 pmap_remove_pte1_quick(pmap, pte1, pv, 4458 &free); 4459 } 4460 else if (pte1_is_link(pte1)) { 4461 pte2p = pt2map_entry(pv->pv_va); 4462 pte2 = pte2_load(pte2p); 4463 4464 if (!pte2_is_valid(pte2)) { 4465 printf("%s: pmap %p va %#x " 4466 "pte2 %#x\n", __func__, 4467 pmap, pv->pv_va, pte2); 4468 panic("bad pte2"); 4469 } 4470 4471 if (pte2_is_wired(pte2)) { 4472 allfree = FALSE; 4473 continue; 4474 } 4475 pte2_clear(pte2p); 4476 pmap_remove_pte2_quick(pmap, pte2, pv, 4477 &free); 4478 } else { 4479 printf("%s: pmap %p va %#x pte1 %#x\n", 4480 __func__, pmap, pv->pv_va, pte1); 4481 panic("bad pte1"); 4482 } 4483 4484 /* Mark free */ 4485 PV_STAT(pv_entry_frees++); 4486 PV_STAT(pv_entry_spare++); 4487 pv_entry_count--; 4488 pc->pc_map[field] |= bitmask; 4489 } 4490 } 4491 if (allfree) { 4492 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4493 free_pv_chunk(pc); 4494 } 4495 } 4496 tlb_flush_all_ng_local(); 4497 sched_unpin(); 4498 rw_wunlock(&pvh_global_lock); 4499 PMAP_UNLOCK(pmap); 4500 pmap_free_zero_pages(&free); 4501} 4502 4503/* 4504 * This code makes some *MAJOR* assumptions: 4505 * 1. Current pmap & pmap exists. 4506 * 2. Not wired. 4507 * 3. Read access. 4508 * 4. No L2 page table pages. 4509 * but is *MUCH* faster than pmap_enter... 4510 */ 4511static vm_page_t 4512pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4513 vm_prot_t prot, vm_page_t mpt2pg) 4514{ 4515 pt2_entry_t *pte2p, pte2; 4516 vm_paddr_t pa; 4517 struct spglist free; 4518 uint32_t l2prot; 4519 4520 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 4521 (m->oflags & VPO_UNMANAGED) != 0, 4522 ("%s: managed mapping within the clean submap", __func__)); 4523 rw_assert(&pvh_global_lock, RA_WLOCKED); 4524 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4525 4526 /* 4527 * In the case that a L2 page table page is not 4528 * resident, we are creating it here. 4529 */ 4530 if (va < VM_MAXUSER_ADDRESS) { 4531 u_int pte1_idx; 4532 pt1_entry_t pte1, *pte1p; 4533 vm_paddr_t pt2_pa; 4534 4535 /* 4536 * Get L1 page table things. 4537 */ 4538 pte1_idx = pte1_index(va); 4539 pte1p = pmap_pte1(pmap, va); 4540 pte1 = pte1_load(pte1p); 4541 4542 if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) { 4543 /* 4544 * Each of NPT2_IN_PG L2 page tables on the page can 4545 * come here. Make sure that associated L1 page table 4546 * link is established. 4547 * 4548 * QQQ: It comes that we don't establish all links to 4549 * L2 page tables for newly allocated L2 page 4550 * tables page. 4551 */ 4552 KASSERT(!pte1_is_section(pte1), 4553 ("%s: pte1 %#x is section", __func__, pte1)); 4554 if (!pte1_is_link(pte1)) { 4555 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg), 4556 pte1_idx); 4557 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 4558 } 4559 pt2_wirecount_inc(mpt2pg, pte1_idx); 4560 } else { 4561 /* 4562 * If the L2 page table page is mapped, we just 4563 * increment the hold count, and activate it. 4564 */ 4565 if (pte1_is_section(pte1)) { 4566 return (NULL); 4567 } else if (pte1_is_link(pte1)) { 4568 mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 4569 pt2_wirecount_inc(mpt2pg, pte1_idx); 4570 } else { 4571 mpt2pg = _pmap_allocpte2(pmap, va, 4572 PMAP_ENTER_NOSLEEP); 4573 if (mpt2pg == NULL) 4574 return (NULL); 4575 } 4576 } 4577 } else { 4578 mpt2pg = NULL; 4579 } 4580 4581 /* 4582 * This call to pt2map_entry() makes the assumption that we are 4583 * entering the page into the current pmap. In order to support 4584 * quick entry into any pmap, one would likely use pmap_pte2_quick(). 4585 * But that isn't as quick as pt2map_entry(). 4586 */ 4587 pte2p = pt2map_entry(va); 4588 pte2 = pte2_load(pte2p); 4589 if (pte2_is_valid(pte2)) { 4590 if (mpt2pg != NULL) { 4591 /* 4592 * Remove extra pte2 reference 4593 */ 4594 pt2_wirecount_dec(mpt2pg, pte1_index(va)); 4595 mpt2pg = NULL; 4596 } 4597 return (NULL); 4598 } 4599 4600 /* 4601 * Enter on the PV list if part of our managed memory. 4602 */ 4603 if ((m->oflags & VPO_UNMANAGED) == 0 && 4604 !pmap_try_insert_pv_entry(pmap, va, m)) { 4605 if (mpt2pg != NULL) { 4606 SLIST_INIT(&free); 4607 if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) { 4608 pmap_tlb_flush(pmap, va); 4609 pmap_free_zero_pages(&free); 4610 } 4611 4612 mpt2pg = NULL; 4613 } 4614 return (NULL); 4615 } 4616 4617 /* 4618 * Increment counters 4619 */ 4620 pmap->pm_stats.resident_count++; 4621 4622 /* 4623 * Now validate mapping with RO protection 4624 */ 4625 pa = VM_PAGE_TO_PHYS(m); 4626 l2prot = PTE2_RO | PTE2_NM; 4627 if (va < VM_MAXUSER_ADDRESS) 4628 l2prot |= PTE2_U | PTE2_NG; 4629 if ((prot & VM_PROT_EXECUTE) == 0) 4630 l2prot |= PTE2_NX; 4631 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4632 /* 4633 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4634 * is set. QQQ: For more info, see comments in pmap_enter(). 4635 */ 4636 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4637 } 4638 pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m))); 4639 4640 return (mpt2pg); 4641} 4642 4643void 4644pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4645{ 4646 4647 rw_wlock(&pvh_global_lock); 4648 PMAP_LOCK(pmap); 4649 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4650 rw_wunlock(&pvh_global_lock); 4651 PMAP_UNLOCK(pmap); 4652} 4653 4654/* 4655 * Tries to create 1MB page mapping. Returns TRUE if successful and 4656 * FALSE otherwise. Fails if (1) a page table page cannot be allocated without 4657 * blocking, (2) a mapping already exists at the specified virtual address, or 4658 * (3) a pv entry cannot be allocated without reclaiming another pv entry. 4659 */ 4660static boolean_t 4661pmap_enter_pte1(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4662{ 4663 pt1_entry_t *pte1p; 4664 vm_paddr_t pa; 4665 uint32_t l1prot; 4666 4667 rw_assert(&pvh_global_lock, RA_WLOCKED); 4668 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4669 pte1p = pmap_pte1(pmap, va); 4670 if (pte1_is_valid(pte1_load(pte1p))) { 4671 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", __func__, 4672 va, pmap); 4673 return (FALSE); 4674 } 4675 if ((m->oflags & VPO_UNMANAGED) == 0) { 4676 /* 4677 * Abort this mapping if its PV entry could not be created. 4678 */ 4679 if (!pmap_pv_insert_pte1(pmap, va, VM_PAGE_TO_PHYS(m))) { 4680 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4681 __func__, va, pmap); 4682 return (FALSE); 4683 } 4684 } 4685 /* 4686 * Increment counters. 4687 */ 4688 pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE; 4689 4690 /* 4691 * Map the section. 4692 * 4693 * QQQ: Why VM_PROT_WRITE is not evaluated and the mapping is 4694 * made readonly? 4695 */ 4696 pa = VM_PAGE_TO_PHYS(m); 4697 l1prot = PTE1_RO | PTE1_NM; 4698 if (va < VM_MAXUSER_ADDRESS) 4699 l1prot |= PTE1_U | PTE1_NG; 4700 if ((prot & VM_PROT_EXECUTE) == 0) 4701 l1prot |= PTE1_NX; 4702 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4703 /* 4704 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4705 * is set. QQQ: For more info, see comments in pmap_enter(). 4706 */ 4707 cache_icache_sync_fresh(va, pa, PTE1_SIZE); 4708 } 4709 pte1_store(pte1p, PTE1(pa, l1prot, ATTR_TO_L1(vm_page_pte2_attr(m)))); 4710 4711 pmap_pte1_mappings++; 4712 CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, 4713 pmap); 4714 return (TRUE); 4715} 4716 4717/* 4718 * Maps a sequence of resident pages belonging to the same object. 4719 * The sequence begins with the given page m_start. This page is 4720 * mapped at the given virtual address start. Each subsequent page is 4721 * mapped at a virtual address that is offset from start by the same 4722 * amount as the page is offset from m_start within the object. The 4723 * last page in the sequence is the page with the largest offset from 4724 * m_start that can be mapped at a virtual address less than the given 4725 * virtual address end. Not every virtual page between start and end 4726 * is mapped; only those for which a resident page exists with the 4727 * corresponding offset from m_start are mapped. 4728 */ 4729void 4730pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4731 vm_page_t m_start, vm_prot_t prot) 4732{ 4733 vm_offset_t va; 4734 vm_page_t m, mpt2pg; 4735 vm_pindex_t diff, psize; 4736 4737 PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n", 4738 __func__, pmap, start, end, m_start, prot)); 4739 4740 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4741 psize = atop(end - start); 4742 mpt2pg = NULL; 4743 m = m_start; 4744 rw_wlock(&pvh_global_lock); 4745 PMAP_LOCK(pmap); 4746 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4747 va = start + ptoa(diff); 4748 if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end && 4749 m->psind == 1 && sp_enabled && 4750 pmap_enter_pte1(pmap, va, m, prot)) 4751 m = &m[PTE1_SIZE / PAGE_SIZE - 1]; 4752 else 4753 mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot, 4754 mpt2pg); 4755 m = TAILQ_NEXT(m, listq); 4756 } 4757 rw_wunlock(&pvh_global_lock); 4758 PMAP_UNLOCK(pmap); 4759} 4760 4761/* 4762 * This code maps large physical mmap regions into the 4763 * processor address space. Note that some shortcuts 4764 * are taken, but the code works. 4765 */ 4766void 4767pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 4768 vm_pindex_t pindex, vm_size_t size) 4769{ 4770 pt1_entry_t *pte1p; 4771 vm_paddr_t pa, pte2_pa; 4772 vm_page_t p; 4773 vm_memattr_t pat_mode; 4774 u_int l1attr, l1prot; 4775 4776 VM_OBJECT_ASSERT_WLOCKED(object); 4777 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4778 ("%s: non-device object", __func__)); 4779 if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) { 4780 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4781 return; 4782 p = vm_page_lookup(object, pindex); 4783 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4784 ("%s: invalid page %p", __func__, p)); 4785 pat_mode = p->md.pat_mode; 4786 4787 /* 4788 * Abort the mapping if the first page is not physically 4789 * aligned to a 1MB page boundary. 4790 */ 4791 pte2_pa = VM_PAGE_TO_PHYS(p); 4792 if (pte2_pa & PTE1_OFFSET) 4793 return; 4794 4795 /* 4796 * Skip the first page. Abort the mapping if the rest of 4797 * the pages are not physically contiguous or have differing 4798 * memory attributes. 4799 */ 4800 p = TAILQ_NEXT(p, listq); 4801 for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size; 4802 pa += PAGE_SIZE) { 4803 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4804 ("%s: invalid page %p", __func__, p)); 4805 if (pa != VM_PAGE_TO_PHYS(p) || 4806 pat_mode != p->md.pat_mode) 4807 return; 4808 p = TAILQ_NEXT(p, listq); 4809 } 4810 4811 /* 4812 * Map using 1MB pages. 4813 * 4814 * QQQ: Well, we are mapping a section, so same condition must 4815 * be hold like during promotion. It looks that only RW mapping 4816 * is done here, so readonly mapping must be done elsewhere. 4817 */ 4818 l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A; 4819 l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode)); 4820 PMAP_LOCK(pmap); 4821 for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) { 4822 pte1p = pmap_pte1(pmap, addr); 4823 if (!pte1_is_valid(pte1_load(pte1p))) { 4824 pte1_store(pte1p, PTE1(pa, l1prot, l1attr)); 4825 pmap->pm_stats.resident_count += PTE1_SIZE / 4826 PAGE_SIZE; 4827 pmap_pte1_mappings++; 4828 } 4829 /* Else continue on if the PTE1 is already valid. */ 4830 addr += PTE1_SIZE; 4831 } 4832 PMAP_UNLOCK(pmap); 4833 } 4834} 4835 4836/* 4837 * Do the things to protect a 1mpage in a process. 4838 */ 4839static void 4840pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 4841 vm_prot_t prot) 4842{ 4843 pt1_entry_t npte1, opte1; 4844 vm_offset_t eva, va; 4845 vm_page_t m; 4846 4847 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4848 KASSERT((sva & PTE1_OFFSET) == 0, 4849 ("%s: sva is not 1mpage aligned", __func__)); 4850 4851 opte1 = npte1 = pte1_load(pte1p); 4852 if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) { 4853 eva = sva + PTE1_SIZE; 4854 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 4855 va < eva; va += PAGE_SIZE, m++) 4856 vm_page_dirty(m); 4857 } 4858 if ((prot & VM_PROT_WRITE) == 0) 4859 npte1 |= PTE1_RO | PTE1_NM; 4860 if ((prot & VM_PROT_EXECUTE) == 0) 4861 npte1 |= PTE1_NX; 4862 4863 /* 4864 * QQQ: Herein, execute permission is never set. 4865 * It only can be cleared. So, no icache 4866 * syncing is needed. 4867 */ 4868 4869 if (npte1 != opte1) { 4870 pte1_store(pte1p, npte1); 4871 pmap_tlb_flush(pmap, sva); 4872 } 4873} 4874 4875/* 4876 * Set the physical protection on the 4877 * specified range of this map as requested. 4878 */ 4879void 4880pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 4881{ 4882 boolean_t pv_lists_locked; 4883 vm_offset_t nextva; 4884 pt1_entry_t *pte1p, pte1; 4885 pt2_entry_t *pte2p, opte2, npte2; 4886 4887 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4888 if (prot == VM_PROT_NONE) { 4889 pmap_remove(pmap, sva, eva); 4890 return; 4891 } 4892 4893 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 4894 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 4895 return; 4896 4897 if (pmap_is_current(pmap)) 4898 pv_lists_locked = FALSE; 4899 else { 4900 pv_lists_locked = TRUE; 4901resume: 4902 rw_wlock(&pvh_global_lock); 4903 sched_pin(); 4904 } 4905 4906 PMAP_LOCK(pmap); 4907 for (; sva < eva; sva = nextva) { 4908 /* 4909 * Calculate address for next L2 page table. 4910 */ 4911 nextva = pte1_trunc(sva + PTE1_SIZE); 4912 if (nextva < sva) 4913 nextva = eva; 4914 4915 pte1p = pmap_pte1(pmap, sva); 4916 pte1 = pte1_load(pte1p); 4917 4918 /* 4919 * Weed out invalid mappings. Note: we assume that L1 page 4920 * page table is always allocated, and in kernel virtual. 4921 */ 4922 if (pte1 == 0) 4923 continue; 4924 4925 if (pte1_is_section(pte1)) { 4926 /* 4927 * Are we protecting the entire large page? If not, 4928 * demote the mapping and fall through. 4929 */ 4930 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4931 pmap_protect_pte1(pmap, pte1p, sva, prot); 4932 continue; 4933 } else { 4934 if (!pv_lists_locked) { 4935 pv_lists_locked = TRUE; 4936 if (!rw_try_wlock(&pvh_global_lock)) { 4937 PMAP_UNLOCK(pmap); 4938 goto resume; 4939 } 4940 sched_pin(); 4941 } 4942 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4943 /* 4944 * The large page mapping 4945 * was destroyed. 4946 */ 4947 continue; 4948 } 4949#ifdef INVARIANTS 4950 else { 4951 /* Update pte1 after demotion */ 4952 pte1 = pte1_load(pte1p); 4953 } 4954#endif 4955 } 4956 } 4957 4958 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4959 " is not link", __func__, pmap, sva, pte1, pte1p)); 4960 4961 /* 4962 * Limit our scan to either the end of the va represented 4963 * by the current L2 page table page, or to the end of the 4964 * range being protected. 4965 */ 4966 if (nextva > eva) 4967 nextva = eva; 4968 4969 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 4970 sva += PAGE_SIZE) { 4971 vm_page_t m; 4972 4973 opte2 = npte2 = pte2_load(pte2p); 4974 if (!pte2_is_valid(opte2)) 4975 continue; 4976 4977 if ((prot & VM_PROT_WRITE) == 0) { 4978 if (pte2_is_managed(opte2) && 4979 pte2_is_dirty(opte2)) { 4980 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4981 vm_page_dirty(m); 4982 } 4983 npte2 |= PTE2_RO | PTE2_NM; 4984 } 4985 4986 if ((prot & VM_PROT_EXECUTE) == 0) 4987 npte2 |= PTE2_NX; 4988 4989 /* 4990 * QQQ: Herein, execute permission is never set. 4991 * It only can be cleared. So, no icache 4992 * syncing is needed. 4993 */ 4994 4995 if (npte2 != opte2) { 4996 pte2_store(pte2p, npte2); 4997 pmap_tlb_flush(pmap, sva); 4998 } 4999 } 5000 } 5001 if (pv_lists_locked) { 5002 sched_unpin(); 5003 rw_wunlock(&pvh_global_lock); 5004 } 5005 PMAP_UNLOCK(pmap); 5006} 5007 5008/* 5009 * pmap_pvh_wired_mappings: 5010 * 5011 * Return the updated number "count" of managed mappings that are wired. 5012 */ 5013static int 5014pmap_pvh_wired_mappings(struct md_page *pvh, int count) 5015{ 5016 pmap_t pmap; 5017 pt1_entry_t pte1; 5018 pt2_entry_t pte2; 5019 pv_entry_t pv; 5020 5021 rw_assert(&pvh_global_lock, RA_WLOCKED); 5022 sched_pin(); 5023 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5024 pmap = PV_PMAP(pv); 5025 PMAP_LOCK(pmap); 5026 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5027 if (pte1_is_section(pte1)) { 5028 if (pte1_is_wired(pte1)) 5029 count++; 5030 } else { 5031 KASSERT(pte1_is_link(pte1), 5032 ("%s: pte1 %#x is not link", __func__, pte1)); 5033 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5034 if (pte2_is_wired(pte2)) 5035 count++; 5036 } 5037 PMAP_UNLOCK(pmap); 5038 } 5039 sched_unpin(); 5040 return (count); 5041} 5042 5043/* 5044 * pmap_page_wired_mappings: 5045 * 5046 * Return the number of managed mappings to the given physical page 5047 * that are wired. 5048 */ 5049int 5050pmap_page_wired_mappings(vm_page_t m) 5051{ 5052 int count; 5053 5054 count = 0; 5055 if ((m->oflags & VPO_UNMANAGED) != 0) 5056 return (count); 5057 rw_wlock(&pvh_global_lock); 5058 count = pmap_pvh_wired_mappings(&m->md, count); 5059 if ((m->flags & PG_FICTITIOUS) == 0) { 5060 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 5061 count); 5062 } 5063 rw_wunlock(&pvh_global_lock); 5064 return (count); 5065} 5066 5067/* 5068 * Returns TRUE if any of the given mappings were used to modify 5069 * physical memory. Otherwise, returns FALSE. Both page and 1mpage 5070 * mappings are supported. 5071 */ 5072static boolean_t 5073pmap_is_modified_pvh(struct md_page *pvh) 5074{ 5075 pv_entry_t pv; 5076 pt1_entry_t pte1; 5077 pt2_entry_t pte2; 5078 pmap_t pmap; 5079 boolean_t rv; 5080 5081 rw_assert(&pvh_global_lock, RA_WLOCKED); 5082 rv = FALSE; 5083 sched_pin(); 5084 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5085 pmap = PV_PMAP(pv); 5086 PMAP_LOCK(pmap); 5087 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5088 if (pte1_is_section(pte1)) { 5089 rv = pte1_is_dirty(pte1); 5090 } else { 5091 KASSERT(pte1_is_link(pte1), 5092 ("%s: pte1 %#x is not link", __func__, pte1)); 5093 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5094 rv = pte2_is_dirty(pte2); 5095 } 5096 PMAP_UNLOCK(pmap); 5097 if (rv) 5098 break; 5099 } 5100 sched_unpin(); 5101 return (rv); 5102} 5103 5104/* 5105 * pmap_is_modified: 5106 * 5107 * Return whether or not the specified physical page was modified 5108 * in any physical maps. 5109 */ 5110boolean_t 5111pmap_is_modified(vm_page_t m) 5112{ 5113 boolean_t rv; 5114 5115 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5116 ("%s: page %p is not managed", __func__, m)); 5117 5118 /* 5119 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5120 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 5121 * is clear, no PTE2s can have PG_M set. 5122 */ 5123 VM_OBJECT_ASSERT_WLOCKED(m->object); 5124 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5125 return (FALSE); 5126 rw_wlock(&pvh_global_lock); 5127 rv = pmap_is_modified_pvh(&m->md) || 5128 ((m->flags & PG_FICTITIOUS) == 0 && 5129 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5130 rw_wunlock(&pvh_global_lock); 5131 return (rv); 5132} 5133 5134/* 5135 * pmap_is_prefaultable: 5136 * 5137 * Return whether or not the specified virtual address is eligible 5138 * for prefault. 5139 */ 5140boolean_t 5141pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 5142{ 5143 pt1_entry_t pte1; 5144 pt2_entry_t pte2; 5145 boolean_t rv; 5146 5147 rv = FALSE; 5148 PMAP_LOCK(pmap); 5149 pte1 = pte1_load(pmap_pte1(pmap, addr)); 5150 if (pte1_is_link(pte1)) { 5151 pte2 = pte2_load(pt2map_entry(addr)); 5152 rv = !pte2_is_valid(pte2) ; 5153 } 5154 PMAP_UNLOCK(pmap); 5155 return (rv); 5156} 5157 5158/* 5159 * Returns TRUE if any of the given mappings were referenced and FALSE 5160 * otherwise. Both page and 1mpage mappings are supported. 5161 */ 5162static boolean_t 5163pmap_is_referenced_pvh(struct md_page *pvh) 5164{ 5165 5166 pv_entry_t pv; 5167 pt1_entry_t pte1; 5168 pt2_entry_t pte2; 5169 pmap_t pmap; 5170 boolean_t rv; 5171 5172 rw_assert(&pvh_global_lock, RA_WLOCKED); 5173 rv = FALSE; 5174 sched_pin(); 5175 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5176 pmap = PV_PMAP(pv); 5177 PMAP_LOCK(pmap); 5178 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5179 if (pte1_is_section(pte1)) { 5180 rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V); 5181 } else { 5182 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5183 rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V); 5184 } 5185 PMAP_UNLOCK(pmap); 5186 if (rv) 5187 break; 5188 } 5189 sched_unpin(); 5190 return (rv); 5191} 5192 5193/* 5194 * pmap_is_referenced: 5195 * 5196 * Return whether or not the specified physical page was referenced 5197 * in any physical maps. 5198 */ 5199boolean_t 5200pmap_is_referenced(vm_page_t m) 5201{ 5202 boolean_t rv; 5203 5204 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5205 ("%s: page %p is not managed", __func__, m)); 5206 rw_wlock(&pvh_global_lock); 5207 rv = pmap_is_referenced_pvh(&m->md) || 5208 ((m->flags & PG_FICTITIOUS) == 0 && 5209 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5210 rw_wunlock(&pvh_global_lock); 5211 return (rv); 5212} 5213 5214/* 5215 * pmap_ts_referenced: 5216 * 5217 * Return a count of reference bits for a page, clearing those bits. 5218 * It is not necessary for every reference bit to be cleared, but it 5219 * is necessary that 0 only be returned when there are truly no 5220 * reference bits set. 5221 * 5222 * As an optimization, update the page's dirty field if a modified bit is 5223 * found while counting reference bits. This opportunistic update can be 5224 * performed at low cost and can eliminate the need for some future calls 5225 * to pmap_is_modified(). However, since this function stops after 5226 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5227 * dirty pages. Those dirty pages will only be detected by a future call 5228 * to pmap_is_modified(). 5229 */ 5230int 5231pmap_ts_referenced(vm_page_t m) 5232{ 5233 struct md_page *pvh; 5234 pv_entry_t pv, pvf; 5235 pmap_t pmap; 5236 pt1_entry_t *pte1p, opte1; 5237 pt2_entry_t *pte2p, opte2; 5238 vm_paddr_t pa; 5239 int rtval = 0; 5240 5241 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5242 ("%s: page %p is not managed", __func__, m)); 5243 pa = VM_PAGE_TO_PHYS(m); 5244 pvh = pa_to_pvh(pa); 5245 rw_wlock(&pvh_global_lock); 5246 sched_pin(); 5247 if ((m->flags & PG_FICTITIOUS) != 0 || 5248 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5249 goto small_mappings; 5250 pv = pvf; 5251 do { 5252 pmap = PV_PMAP(pv); 5253 PMAP_LOCK(pmap); 5254 pte1p = pmap_pte1(pmap, pv->pv_va); 5255 opte1 = pte1_load(pte1p); 5256 if (pte1_is_dirty(opte1)) { 5257 /* 5258 * Although "opte1" is mapping a 1MB page, because 5259 * this function is called at a 4KB page granularity, 5260 * we only update the 4KB page under test. 5261 */ 5262 vm_page_dirty(m); 5263 } 5264 if ((opte1 & PTE1_A) != 0) { 5265 /* 5266 * Since this reference bit is shared by 256 4KB pages, 5267 * it should not be cleared every time it is tested. 5268 * Apply a simple "hash" function on the physical page 5269 * number, the virtual section number, and the pmap 5270 * address to select one 4KB page out of the 256 5271 * on which testing the reference bit will result 5272 * in clearing that bit. This function is designed 5273 * to avoid the selection of the same 4KB page 5274 * for every 1MB page mapping. 5275 * 5276 * On demotion, a mapping that hasn't been referenced 5277 * is simply destroyed. To avoid the possibility of a 5278 * subsequent page fault on a demoted wired mapping, 5279 * always leave its reference bit set. Moreover, 5280 * since the section is wired, the current state of 5281 * its reference bit won't affect page replacement. 5282 */ 5283 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^ 5284 (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 && 5285 !pte1_is_wired(opte1)) { 5286 pte1_clear_bit(pte1p, PTE1_A); 5287 pmap_tlb_flush(pmap, pv->pv_va); 5288 } 5289 rtval++; 5290 } 5291 PMAP_UNLOCK(pmap); 5292 /* Rotate the PV list if it has more than one entry. */ 5293 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5294 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5295 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5296 } 5297 if (rtval >= PMAP_TS_REFERENCED_MAX) 5298 goto out; 5299 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5300small_mappings: 5301 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5302 goto out; 5303 pv = pvf; 5304 do { 5305 pmap = PV_PMAP(pv); 5306 PMAP_LOCK(pmap); 5307 pte1p = pmap_pte1(pmap, pv->pv_va); 5308 KASSERT(pte1_is_link(pte1_load(pte1p)), 5309 ("%s: not found a link in page %p's pv list", __func__, m)); 5310 5311 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5312 opte2 = pte2_load(pte2p); 5313 if (pte2_is_dirty(opte2)) 5314 vm_page_dirty(m); 5315 if ((opte2 & PTE2_A) != 0) { 5316 pte2_clear_bit(pte2p, PTE2_A); 5317 pmap_tlb_flush(pmap, pv->pv_va); 5318 rtval++; 5319 } 5320 PMAP_UNLOCK(pmap); 5321 /* Rotate the PV list if it has more than one entry. */ 5322 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5323 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5324 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5325 } 5326 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5327 PMAP_TS_REFERENCED_MAX); 5328out: 5329 sched_unpin(); 5330 rw_wunlock(&pvh_global_lock); 5331 return (rtval); 5332} 5333 5334/* 5335 * Clear the wired attribute from the mappings for the specified range of 5336 * addresses in the given pmap. Every valid mapping within that range 5337 * must have the wired attribute set. In contrast, invalid mappings 5338 * cannot have the wired attribute set, so they are ignored. 5339 * 5340 * The wired attribute of the page table entry is not a hardware feature, 5341 * so there is no need to invalidate any TLB entries. 5342 */ 5343void 5344pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5345{ 5346 vm_offset_t nextva; 5347 pt1_entry_t *pte1p, pte1; 5348 pt2_entry_t *pte2p, pte2; 5349 boolean_t pv_lists_locked; 5350 5351 if (pmap_is_current(pmap)) 5352 pv_lists_locked = FALSE; 5353 else { 5354 pv_lists_locked = TRUE; 5355resume: 5356 rw_wlock(&pvh_global_lock); 5357 sched_pin(); 5358 } 5359 PMAP_LOCK(pmap); 5360 for (; sva < eva; sva = nextva) { 5361 nextva = pte1_trunc(sva + PTE1_SIZE); 5362 if (nextva < sva) 5363 nextva = eva; 5364 5365 pte1p = pmap_pte1(pmap, sva); 5366 pte1 = pte1_load(pte1p); 5367 5368 /* 5369 * Weed out invalid mappings. Note: we assume that L1 page 5370 * page table is always allocated, and in kernel virtual. 5371 */ 5372 if (pte1 == 0) 5373 continue; 5374 5375 if (pte1_is_section(pte1)) { 5376 if (!pte1_is_wired(pte1)) 5377 panic("%s: pte1 %#x not wired", __func__, pte1); 5378 5379 /* 5380 * Are we unwiring the entire large page? If not, 5381 * demote the mapping and fall through. 5382 */ 5383 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5384 pte1_clear_bit(pte1p, PTE1_W); 5385 pmap->pm_stats.wired_count -= PTE1_SIZE / 5386 PAGE_SIZE; 5387 continue; 5388 } else { 5389 if (!pv_lists_locked) { 5390 pv_lists_locked = TRUE; 5391 if (!rw_try_wlock(&pvh_global_lock)) { 5392 PMAP_UNLOCK(pmap); 5393 /* Repeat sva. */ 5394 goto resume; 5395 } 5396 sched_pin(); 5397 } 5398 if (!pmap_demote_pte1(pmap, pte1p, sva)) 5399 panic("%s: demotion failed", __func__); 5400#ifdef INVARIANTS 5401 else { 5402 /* Update pte1 after demotion */ 5403 pte1 = pte1_load(pte1p); 5404 } 5405#endif 5406 } 5407 } 5408 5409 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5410 " is not link", __func__, pmap, sva, pte1, pte1p)); 5411 5412 /* 5413 * Limit our scan to either the end of the va represented 5414 * by the current L2 page table page, or to the end of the 5415 * range being protected. 5416 */ 5417 if (nextva > eva) 5418 nextva = eva; 5419 5420 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5421 sva += PAGE_SIZE) { 5422 pte2 = pte2_load(pte2p); 5423 if (!pte2_is_valid(pte2)) 5424 continue; 5425 if (!pte2_is_wired(pte2)) 5426 panic("%s: pte2 %#x is missing PTE2_W", 5427 __func__, pte2); 5428 5429 /* 5430 * PTE2_W must be cleared atomically. Although the pmap 5431 * lock synchronizes access to PTE2_W, another processor 5432 * could be changing PTE2_NM and/or PTE2_A concurrently. 5433 */ 5434 pte2_clear_bit(pte2p, PTE2_W); 5435 pmap->pm_stats.wired_count--; 5436 } 5437 } 5438 if (pv_lists_locked) { 5439 sched_unpin(); 5440 rw_wunlock(&pvh_global_lock); 5441 } 5442 PMAP_UNLOCK(pmap); 5443} 5444 5445/* 5446 * Clear the write and modified bits in each of the given page's mappings. 5447 */ 5448void 5449pmap_remove_write(vm_page_t m) 5450{ 5451 struct md_page *pvh; 5452 pv_entry_t next_pv, pv; 5453 pmap_t pmap; 5454 pt1_entry_t *pte1p; 5455 pt2_entry_t *pte2p, opte2; 5456 vm_offset_t va; 5457 5458 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5459 ("%s: page %p is not managed", __func__, m)); 5460 5461 /* 5462 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5463 * set by another thread while the object is locked. Thus, 5464 * if PGA_WRITEABLE is clear, no page table entries need updating. 5465 */ 5466 VM_OBJECT_ASSERT_WLOCKED(m->object); 5467 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5468 return; 5469 rw_wlock(&pvh_global_lock); 5470 sched_pin(); 5471 if ((m->flags & PG_FICTITIOUS) != 0) 5472 goto small_mappings; 5473 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5474 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5475 va = pv->pv_va; 5476 pmap = PV_PMAP(pv); 5477 PMAP_LOCK(pmap); 5478 pte1p = pmap_pte1(pmap, va); 5479 if (!(pte1_load(pte1p) & PTE1_RO)) 5480 (void)pmap_demote_pte1(pmap, pte1p, va); 5481 PMAP_UNLOCK(pmap); 5482 } 5483small_mappings: 5484 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5485 pmap = PV_PMAP(pv); 5486 PMAP_LOCK(pmap); 5487 pte1p = pmap_pte1(pmap, pv->pv_va); 5488 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5489 " a section in page %p's pv list", __func__, m)); 5490 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5491 opte2 = pte2_load(pte2p); 5492 if (!(opte2 & PTE2_RO)) { 5493 pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM); 5494 if (pte2_is_dirty(opte2)) 5495 vm_page_dirty(m); 5496 pmap_tlb_flush(pmap, pv->pv_va); 5497 } 5498 PMAP_UNLOCK(pmap); 5499 } 5500 vm_page_aflag_clear(m, PGA_WRITEABLE); 5501 sched_unpin(); 5502 rw_wunlock(&pvh_global_lock); 5503} 5504 5505/* 5506 * Apply the given advice to the specified range of addresses within the 5507 * given pmap. Depending on the advice, clear the referenced and/or 5508 * modified flags in each mapping and set the mapped page's dirty field. 5509 */ 5510void 5511pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 5512{ 5513 pt1_entry_t *pte1p, opte1; 5514 pt2_entry_t *pte2p, pte2; 5515 vm_offset_t pdnxt; 5516 vm_page_t m; 5517 boolean_t pv_lists_locked; 5518 5519 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5520 return; 5521 if (pmap_is_current(pmap)) 5522 pv_lists_locked = FALSE; 5523 else { 5524 pv_lists_locked = TRUE; 5525resume: 5526 rw_wlock(&pvh_global_lock); 5527 sched_pin(); 5528 } 5529 PMAP_LOCK(pmap); 5530 for (; sva < eva; sva = pdnxt) { 5531 pdnxt = pte1_trunc(sva + PTE1_SIZE); 5532 if (pdnxt < sva) 5533 pdnxt = eva; 5534 pte1p = pmap_pte1(pmap, sva); 5535 opte1 = pte1_load(pte1p); 5536 if (!pte1_is_valid(opte1)) /* XXX */ 5537 continue; 5538 else if (pte1_is_section(opte1)) { 5539 if (!pte1_is_managed(opte1)) 5540 continue; 5541 if (!pv_lists_locked) { 5542 pv_lists_locked = TRUE; 5543 if (!rw_try_wlock(&pvh_global_lock)) { 5544 PMAP_UNLOCK(pmap); 5545 goto resume; 5546 } 5547 sched_pin(); 5548 } 5549 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5550 /* 5551 * The large page mapping was destroyed. 5552 */ 5553 continue; 5554 } 5555 5556 /* 5557 * Unless the page mappings are wired, remove the 5558 * mapping to a single page so that a subsequent 5559 * access may repromote. Since the underlying L2 page 5560 * table is fully populated, this removal never 5561 * frees a L2 page table page. 5562 */ 5563 if (!pte1_is_wired(opte1)) { 5564 pte2p = pmap_pte2_quick(pmap, sva); 5565 KASSERT(pte2_is_valid(pte2_load(pte2p)), 5566 ("%s: invalid PTE2", __func__)); 5567 pmap_remove_pte2(pmap, pte2p, sva, NULL); 5568 } 5569 } 5570 if (pdnxt > eva) 5571 pdnxt = eva; 5572 for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++, 5573 sva += PAGE_SIZE) { 5574 pte2 = pte2_load(pte2p); 5575 if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2)) 5576 continue; 5577 else if (pte2_is_dirty(pte2)) { 5578 if (advice == MADV_DONTNEED) { 5579 /* 5580 * Future calls to pmap_is_modified() 5581 * can be avoided by making the page 5582 * dirty now. 5583 */ 5584 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 5585 vm_page_dirty(m); 5586 } 5587 pte2_set_bit(pte2p, PTE2_NM); 5588 pte2_clear_bit(pte2p, PTE2_A); 5589 } else if ((pte2 & PTE2_A) != 0) 5590 pte2_clear_bit(pte2p, PTE2_A); 5591 else 5592 continue; 5593 pmap_tlb_flush(pmap, sva); 5594 } 5595 } 5596 if (pv_lists_locked) { 5597 sched_unpin(); 5598 rw_wunlock(&pvh_global_lock); 5599 } 5600 PMAP_UNLOCK(pmap); 5601} 5602 5603/* 5604 * Clear the modify bits on the specified physical page. 5605 */ 5606void 5607pmap_clear_modify(vm_page_t m) 5608{ 5609 struct md_page *pvh; 5610 pv_entry_t next_pv, pv; 5611 pmap_t pmap; 5612 pt1_entry_t *pte1p, opte1; 5613 pt2_entry_t *pte2p, opte2; 5614 vm_offset_t va; 5615 5616 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5617 ("%s: page %p is not managed", __func__, m)); 5618 VM_OBJECT_ASSERT_WLOCKED(m->object); 5619 KASSERT(!vm_page_xbusied(m), 5620 ("%s: page %p is exclusive busy", __func__, m)); 5621 5622 /* 5623 * If the page is not PGA_WRITEABLE, then no PTE2s can have PTE2_NM 5624 * cleared. If the object containing the page is locked and the page 5625 * is not exclusive busied, then PGA_WRITEABLE cannot be concurrently 5626 * set. 5627 */ 5628 if ((m->flags & PGA_WRITEABLE) == 0) 5629 return; 5630 rw_wlock(&pvh_global_lock); 5631 sched_pin(); 5632 if ((m->flags & PG_FICTITIOUS) != 0) 5633 goto small_mappings; 5634 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5635 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5636 va = pv->pv_va; 5637 pmap = PV_PMAP(pv); 5638 PMAP_LOCK(pmap); 5639 pte1p = pmap_pte1(pmap, va); 5640 opte1 = pte1_load(pte1p); 5641 if (!(opte1 & PTE1_RO)) { 5642 if (pmap_demote_pte1(pmap, pte1p, va) && 5643 !pte1_is_wired(opte1)) { 5644 /* 5645 * Write protect the mapping to a 5646 * single page so that a subsequent 5647 * write access may repromote. 5648 */ 5649 va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1); 5650 pte2p = pmap_pte2_quick(pmap, va); 5651 opte2 = pte2_load(pte2p); 5652 if ((opte2 & PTE2_V)) { 5653 pte2_set_bit(pte2p, PTE2_NM | PTE2_RO); 5654 vm_page_dirty(m); 5655 pmap_tlb_flush(pmap, va); 5656 } 5657 } 5658 } 5659 PMAP_UNLOCK(pmap); 5660 } 5661small_mappings: 5662 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5663 pmap = PV_PMAP(pv); 5664 PMAP_LOCK(pmap); 5665 pte1p = pmap_pte1(pmap, pv->pv_va); 5666 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5667 " a section in page %p's pv list", __func__, m)); 5668 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5669 if (pte2_is_dirty(pte2_load(pte2p))) { 5670 pte2_set_bit(pte2p, PTE2_NM); 5671 pmap_tlb_flush(pmap, pv->pv_va); 5672 } 5673 PMAP_UNLOCK(pmap); 5674 } 5675 sched_unpin(); 5676 rw_wunlock(&pvh_global_lock); 5677} 5678 5679 5680/* 5681 * Sets the memory attribute for the specified page. 5682 */ 5683void 5684pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5685{ 5686 pt2_entry_t *cmap2_pte2p; 5687 vm_memattr_t oma; 5688 vm_paddr_t pa; 5689 struct pcpu *pc; 5690 5691 oma = m->md.pat_mode; 5692 m->md.pat_mode = ma; 5693 5694 CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, 5695 VM_PAGE_TO_PHYS(m), oma, ma); 5696 if ((m->flags & PG_FICTITIOUS) != 0) 5697 return; 5698#if 0 5699 /* 5700 * If "m" is a normal page, flush it from the cache. 5701 * 5702 * First, try to find an existing mapping of the page by sf 5703 * buffer. sf_buf_invalidate_cache() modifies mapping and 5704 * flushes the cache. 5705 */ 5706 if (sf_buf_invalidate_cache(m, oma)) 5707 return; 5708#endif 5709 /* 5710 * If page is not mapped by sf buffer, map the page 5711 * transient and do invalidation. 5712 */ 5713 if (ma != oma) { 5714 pa = VM_PAGE_TO_PHYS(m); 5715 sched_pin(); 5716 pc = get_pcpu(); 5717 cmap2_pte2p = pc->pc_cmap2_pte2p; 5718 mtx_lock(&pc->pc_cmap_lock); 5719 if (pte2_load(cmap2_pte2p) != 0) 5720 panic("%s: CMAP2 busy", __func__); 5721 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 5722 vm_memattr_to_pte2(ma))); 5723 dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); 5724 pte2_clear(cmap2_pte2p); 5725 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5726 sched_unpin(); 5727 mtx_unlock(&pc->pc_cmap_lock); 5728 } 5729} 5730 5731/* 5732 * Miscellaneous support routines follow 5733 */ 5734 5735/* 5736 * Returns TRUE if the given page is mapped individually or as part of 5737 * a 1mpage. Otherwise, returns FALSE. 5738 */ 5739boolean_t 5740pmap_page_is_mapped(vm_page_t m) 5741{ 5742 boolean_t rv; 5743 5744 if ((m->oflags & VPO_UNMANAGED) != 0) 5745 return (FALSE); 5746 rw_wlock(&pvh_global_lock); 5747 rv = !TAILQ_EMPTY(&m->md.pv_list) || 5748 ((m->flags & PG_FICTITIOUS) == 0 && 5749 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 5750 rw_wunlock(&pvh_global_lock); 5751 return (rv); 5752} 5753 5754/* 5755 * Returns true if the pmap's pv is one of the first 5756 * 16 pvs linked to from this page. This count may 5757 * be changed upwards or downwards in the future; it 5758 * is only necessary that true be returned for a small 5759 * subset of pmaps for proper page aging. 5760 */ 5761boolean_t 5762pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 5763{ 5764 struct md_page *pvh; 5765 pv_entry_t pv; 5766 int loops = 0; 5767 boolean_t rv; 5768 5769 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5770 ("%s: page %p is not managed", __func__, m)); 5771 rv = FALSE; 5772 rw_wlock(&pvh_global_lock); 5773 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5774 if (PV_PMAP(pv) == pmap) { 5775 rv = TRUE; 5776 break; 5777 } 5778 loops++; 5779 if (loops >= 16) 5780 break; 5781 } 5782 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 5783 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5784 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5785 if (PV_PMAP(pv) == pmap) { 5786 rv = TRUE; 5787 break; 5788 } 5789 loops++; 5790 if (loops >= 16) 5791 break; 5792 } 5793 } 5794 rw_wunlock(&pvh_global_lock); 5795 return (rv); 5796} 5797 5798/* 5799 * pmap_zero_page zeros the specified hardware page by mapping 5800 * the page into KVM and using bzero to clear its contents. 5801 */ 5802void 5803pmap_zero_page(vm_page_t m) 5804{ 5805 pt2_entry_t *cmap2_pte2p; 5806 struct pcpu *pc; 5807 5808 sched_pin(); 5809 pc = get_pcpu(); 5810 cmap2_pte2p = pc->pc_cmap2_pte2p; 5811 mtx_lock(&pc->pc_cmap_lock); 5812 if (pte2_load(cmap2_pte2p) != 0) 5813 panic("%s: CMAP2 busy", __func__); 5814 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5815 vm_page_pte2_attr(m))); 5816 pagezero(pc->pc_cmap2_addr); 5817 pte2_clear(cmap2_pte2p); 5818 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5819 sched_unpin(); 5820 mtx_unlock(&pc->pc_cmap_lock); 5821} 5822 5823/* 5824 * pmap_zero_page_area zeros the specified hardware page by mapping 5825 * the page into KVM and using bzero to clear its contents. 5826 * 5827 * off and size may not cover an area beyond a single hardware page. 5828 */ 5829void 5830pmap_zero_page_area(vm_page_t m, int off, int size) 5831{ 5832 pt2_entry_t *cmap2_pte2p; 5833 struct pcpu *pc; 5834 5835 sched_pin(); 5836 pc = get_pcpu(); 5837 cmap2_pte2p = pc->pc_cmap2_pte2p; 5838 mtx_lock(&pc->pc_cmap_lock); 5839 if (pte2_load(cmap2_pte2p) != 0) 5840 panic("%s: CMAP2 busy", __func__); 5841 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5842 vm_page_pte2_attr(m))); 5843 if (off == 0 && size == PAGE_SIZE) 5844 pagezero(pc->pc_cmap2_addr); 5845 else 5846 bzero(pc->pc_cmap2_addr + off, size); 5847 pte2_clear(cmap2_pte2p); 5848 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5849 sched_unpin(); 5850 mtx_unlock(&pc->pc_cmap_lock); 5851} 5852 5853/* 5854 * pmap_zero_page_idle zeros the specified hardware page by mapping 5855 * the page into KVM and using bzero to clear its contents. This 5856 * is intended to be called from the vm_pagezero process only and 5857 * outside of Giant. 5858 */ 5859void 5860pmap_zero_page_idle(vm_page_t m) 5861{ 5862 5863 if (pte2_load(CMAP3) != 0) 5864 panic("%s: CMAP3 busy", __func__); 5865 sched_pin(); 5866 pte2_store(CMAP3, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5867 vm_page_pte2_attr(m))); 5868 pagezero(CADDR3); 5869 pte2_clear(CMAP3); 5870 tlb_flush((vm_offset_t)CADDR3); 5871 sched_unpin(); 5872} 5873 5874/* 5875 * pmap_copy_page copies the specified (machine independent) 5876 * page by mapping the page into virtual memory and using 5877 * bcopy to copy the page, one machine dependent page at a 5878 * time. 5879 */ 5880void 5881pmap_copy_page(vm_page_t src, vm_page_t dst) 5882{ 5883 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5884 struct pcpu *pc; 5885 5886 sched_pin(); 5887 pc = get_pcpu(); 5888 cmap1_pte2p = pc->pc_cmap1_pte2p; 5889 cmap2_pte2p = pc->pc_cmap2_pte2p; 5890 mtx_lock(&pc->pc_cmap_lock); 5891 if (pte2_load(cmap1_pte2p) != 0) 5892 panic("%s: CMAP1 busy", __func__); 5893 if (pte2_load(cmap2_pte2p) != 0) 5894 panic("%s: CMAP2 busy", __func__); 5895 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), 5896 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); 5897 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), 5898 PTE2_AP_KRW, vm_page_pte2_attr(dst))); 5899 bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE); 5900 pte2_clear(cmap1_pte2p); 5901 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5902 pte2_clear(cmap2_pte2p); 5903 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5904 sched_unpin(); 5905 mtx_unlock(&pc->pc_cmap_lock); 5906} 5907 5908int unmapped_buf_allowed = 1; 5909 5910void 5911pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 5912 vm_offset_t b_offset, int xfersize) 5913{ 5914 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5915 vm_page_t a_pg, b_pg; 5916 char *a_cp, *b_cp; 5917 vm_offset_t a_pg_offset, b_pg_offset; 5918 struct pcpu *pc; 5919 int cnt; 5920 5921 sched_pin(); 5922 pc = get_pcpu(); 5923 cmap1_pte2p = pc->pc_cmap1_pte2p; 5924 cmap2_pte2p = pc->pc_cmap2_pte2p; 5925 mtx_lock(&pc->pc_cmap_lock); 5926 if (pte2_load(cmap1_pte2p) != 0) 5927 panic("pmap_copy_pages: CMAP1 busy"); 5928 if (pte2_load(cmap2_pte2p) != 0) 5929 panic("pmap_copy_pages: CMAP2 busy"); 5930 while (xfersize > 0) { 5931 a_pg = ma[a_offset >> PAGE_SHIFT]; 5932 a_pg_offset = a_offset & PAGE_MASK; 5933 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 5934 b_pg = mb[b_offset >> PAGE_SHIFT]; 5935 b_pg_offset = b_offset & PAGE_MASK; 5936 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 5937 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), 5938 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); 5939 tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr); 5940 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), 5941 PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); 5942 tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr); 5943 a_cp = pc->pc_cmap1_addr + a_pg_offset; 5944 b_cp = pc->pc_cmap2_addr + b_pg_offset; 5945 bcopy(a_cp, b_cp, cnt); 5946 a_offset += cnt; 5947 b_offset += cnt; 5948 xfersize -= cnt; 5949 } 5950 pte2_clear(cmap1_pte2p); 5951 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5952 pte2_clear(cmap2_pte2p); 5953 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5954 sched_unpin(); 5955 mtx_unlock(&pc->pc_cmap_lock); 5956} 5957 5958vm_offset_t 5959pmap_quick_enter_page(vm_page_t m) 5960{ 5961 struct pcpu *pc; 5962 pt2_entry_t *pte2p; 5963 5964 critical_enter(); 5965 pc = get_pcpu(); 5966 pte2p = pc->pc_qmap_pte2p; 5967 5968 KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__)); 5969 5970 pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5971 vm_page_pte2_attr(m))); 5972 return (pc->pc_qmap_addr); 5973} 5974 5975void 5976pmap_quick_remove_page(vm_offset_t addr) 5977{ 5978 struct pcpu *pc; 5979 pt2_entry_t *pte2p; 5980 5981 pc = get_pcpu(); 5982 pte2p = pc->pc_qmap_pte2p; 5983 5984 KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__)); 5985 KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__)); 5986 5987 pte2_clear(pte2p); 5988 tlb_flush(pc->pc_qmap_addr); 5989 critical_exit(); 5990} 5991 5992/* 5993 * Copy the range specified by src_addr/len 5994 * from the source map to the range dst_addr/len 5995 * in the destination map. 5996 * 5997 * This routine is only advisory and need not do anything. 5998 */ 5999void 6000pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 6001 vm_offset_t src_addr) 6002{ 6003 struct spglist free; 6004 vm_offset_t addr; 6005 vm_offset_t end_addr = src_addr + len; 6006 vm_offset_t nextva; 6007 6008 if (dst_addr != src_addr) 6009 return; 6010 6011 if (!pmap_is_current(src_pmap)) 6012 return; 6013 6014 rw_wlock(&pvh_global_lock); 6015 if (dst_pmap < src_pmap) { 6016 PMAP_LOCK(dst_pmap); 6017 PMAP_LOCK(src_pmap); 6018 } else { 6019 PMAP_LOCK(src_pmap); 6020 PMAP_LOCK(dst_pmap); 6021 } 6022 sched_pin(); 6023 for (addr = src_addr; addr < end_addr; addr = nextva) { 6024 pt2_entry_t *src_pte2p, *dst_pte2p; 6025 vm_page_t dst_mpt2pg, src_mpt2pg; 6026 pt1_entry_t src_pte1; 6027 u_int pte1_idx; 6028 6029 KASSERT(addr < VM_MAXUSER_ADDRESS, 6030 ("%s: invalid to pmap_copy page tables", __func__)); 6031 6032 nextva = pte1_trunc(addr + PTE1_SIZE); 6033 if (nextva < addr) 6034 nextva = end_addr; 6035 6036 pte1_idx = pte1_index(addr); 6037 src_pte1 = src_pmap->pm_pt1[pte1_idx]; 6038 if (pte1_is_section(src_pte1)) { 6039 if ((addr & PTE1_OFFSET) != 0 || 6040 (addr + PTE1_SIZE) > end_addr) 6041 continue; 6042 if (dst_pmap->pm_pt1[pte1_idx] == 0 && 6043 (!pte1_is_managed(src_pte1) || 6044 pmap_pv_insert_pte1(dst_pmap, addr, 6045 pte1_pa(src_pte1)))) { 6046 dst_pmap->pm_pt1[pte1_idx] = src_pte1 & 6047 ~PTE1_W; 6048 dst_pmap->pm_stats.resident_count += 6049 PTE1_SIZE / PAGE_SIZE; 6050 pmap_pte1_mappings++; 6051 } 6052 continue; 6053 } else if (!pte1_is_link(src_pte1)) 6054 continue; 6055 6056 src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1)); 6057 6058 /* 6059 * We leave PT2s to be linked from PT1 even if they are not 6060 * referenced until all PT2s in a page are without reference. 6061 * 6062 * QQQ: It could be changed ... 6063 */ 6064#if 0 /* single_pt2_link_is_cleared */ 6065 KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0, 6066 ("%s: source page table page is unused", __func__)); 6067#else 6068 if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0) 6069 continue; 6070#endif 6071 if (nextva > end_addr) 6072 nextva = end_addr; 6073 6074 src_pte2p = pt2map_entry(addr); 6075 while (addr < nextva) { 6076 pt2_entry_t temp_pte2; 6077 temp_pte2 = pte2_load(src_pte2p); 6078 /* 6079 * we only virtual copy managed pages 6080 */ 6081 if (pte2_is_managed(temp_pte2)) { 6082 dst_mpt2pg = pmap_allocpte2(dst_pmap, addr, 6083 PMAP_ENTER_NOSLEEP); 6084 if (dst_mpt2pg == NULL) 6085 goto out; 6086 dst_pte2p = pmap_pte2_quick(dst_pmap, addr); 6087 if (!pte2_is_valid(pte2_load(dst_pte2p)) && 6088 pmap_try_insert_pv_entry(dst_pmap, addr, 6089 PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) { 6090 /* 6091 * Clear the wired, modified, and 6092 * accessed (referenced) bits 6093 * during the copy. 6094 */ 6095 temp_pte2 &= ~(PTE2_W | PTE2_A); 6096 temp_pte2 |= PTE2_NM; 6097 pte2_store(dst_pte2p, temp_pte2); 6098 dst_pmap->pm_stats.resident_count++; 6099 } else { 6100 SLIST_INIT(&free); 6101 if (pmap_unwire_pt2(dst_pmap, addr, 6102 dst_mpt2pg, &free)) { 6103 pmap_tlb_flush(dst_pmap, addr); 6104 pmap_free_zero_pages(&free); 6105 } 6106 goto out; 6107 } 6108 if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >= 6109 pt2_wirecount_get(src_mpt2pg, pte1_idx)) 6110 break; 6111 } 6112 addr += PAGE_SIZE; 6113 src_pte2p++; 6114 } 6115 } 6116out: 6117 sched_unpin(); 6118 rw_wunlock(&pvh_global_lock); 6119 PMAP_UNLOCK(src_pmap); 6120 PMAP_UNLOCK(dst_pmap); 6121} 6122 6123/* 6124 * Increase the starting virtual address of the given mapping if a 6125 * different alignment might result in more section mappings. 6126 */ 6127void 6128pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 6129 vm_offset_t *addr, vm_size_t size) 6130{ 6131 vm_offset_t pte1_offset; 6132 6133 if (size < PTE1_SIZE) 6134 return; 6135 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 6136 offset += ptoa(object->pg_color); 6137 pte1_offset = offset & PTE1_OFFSET; 6138 if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE || 6139 (*addr & PTE1_OFFSET) == pte1_offset) 6140 return; 6141 if ((*addr & PTE1_OFFSET) < pte1_offset) 6142 *addr = pte1_trunc(*addr) + pte1_offset; 6143 else 6144 *addr = pte1_roundup(*addr) + pte1_offset; 6145} 6146 6147void 6148pmap_activate(struct thread *td) 6149{ 6150 pmap_t pmap, oldpmap; 6151 u_int cpuid, ttb; 6152 6153 PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td)); 6154 6155 critical_enter(); 6156 pmap = vmspace_pmap(td->td_proc->p_vmspace); 6157 oldpmap = PCPU_GET(curpmap); 6158 cpuid = PCPU_GET(cpuid); 6159 6160#if defined(SMP) 6161 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 6162 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 6163#else 6164 CPU_CLR(cpuid, &oldpmap->pm_active); 6165 CPU_SET(cpuid, &pmap->pm_active); 6166#endif 6167 6168 ttb = pmap_ttb_get(pmap); 6169 6170 /* 6171 * pmap_activate is for the current thread on the current cpu 6172 */ 6173 td->td_pcb->pcb_pagedir = ttb; 6174 cp15_ttbr_set(ttb); 6175 PCPU_SET(curpmap, pmap); 6176 critical_exit(); 6177} 6178 6179/* 6180 * Perform the pmap work for mincore. 6181 */ 6182int 6183pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 6184{ 6185 pt1_entry_t *pte1p, pte1; 6186 pt2_entry_t *pte2p, pte2; 6187 vm_paddr_t pa; 6188 bool managed; 6189 int val; 6190 6191 PMAP_LOCK(pmap); 6192retry: 6193 pte1p = pmap_pte1(pmap, addr); 6194 pte1 = pte1_load(pte1p); 6195 if (pte1_is_section(pte1)) { 6196 pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET)); 6197 managed = pte1_is_managed(pte1); 6198 val = MINCORE_SUPER | MINCORE_INCORE; 6199 if (pte1_is_dirty(pte1)) 6200 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6201 if (pte1 & PTE1_A) 6202 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6203 } else if (pte1_is_link(pte1)) { 6204 pte2p = pmap_pte2(pmap, addr); 6205 pte2 = pte2_load(pte2p); 6206 pmap_pte2_release(pte2p); 6207 pa = pte2_pa(pte2); 6208 managed = pte2_is_managed(pte2); 6209 val = MINCORE_INCORE; 6210 if (pte2_is_dirty(pte2)) 6211 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6212 if (pte2 & PTE2_A) 6213 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6214 } else { 6215 managed = false; 6216 val = 0; 6217 } 6218 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 6219 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 6220 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 6221 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 6222 goto retry; 6223 } else 6224 PA_UNLOCK_COND(*locked_pa); 6225 PMAP_UNLOCK(pmap); 6226 return (val); 6227} 6228 6229void 6230pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 6231{ 6232 vm_offset_t sva; 6233 uint32_t l2attr; 6234 6235 KASSERT((size & PAGE_MASK) == 0, 6236 ("%s: device mapping not page-sized", __func__)); 6237 6238 sva = va; 6239 l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE); 6240 while (size != 0) { 6241 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr); 6242 va += PAGE_SIZE; 6243 pa += PAGE_SIZE; 6244 size -= PAGE_SIZE; 6245 } 6246 tlb_flush_range(sva, va - sva); 6247} 6248 6249void 6250pmap_kremove_device(vm_offset_t va, vm_size_t size) 6251{ 6252 vm_offset_t sva; 6253 6254 KASSERT((size & PAGE_MASK) == 0, 6255 ("%s: device mapping not page-sized", __func__)); 6256 6257 sva = va; 6258 while (size != 0) { 6259 pmap_kremove(va); 6260 va += PAGE_SIZE; 6261 size -= PAGE_SIZE; 6262 } 6263 tlb_flush_range(sva, va - sva); 6264} 6265 6266void 6267pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb) 6268{ 6269 6270 pcb->pcb_pagedir = pmap_ttb_get(pmap); 6271} 6272 6273 6274/* 6275 * Clean L1 data cache range by physical address. 6276 * The range must be within a single page. 6277 */ 6278static void 6279pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) 6280{ 6281 pt2_entry_t *cmap2_pte2p; 6282 struct pcpu *pc; 6283 6284 KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, 6285 ("%s: not on single page", __func__)); 6286 6287 sched_pin(); 6288 pc = get_pcpu(); 6289 cmap2_pte2p = pc->pc_cmap2_pte2p; 6290 mtx_lock(&pc->pc_cmap_lock); 6291 if (pte2_load(cmap2_pte2p) != 0) 6292 panic("%s: CMAP2 busy", __func__); 6293 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); 6294 dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size); 6295 pte2_clear(cmap2_pte2p); 6296 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6297 sched_unpin(); 6298 mtx_unlock(&pc->pc_cmap_lock); 6299} 6300 6301/* 6302 * Sync instruction cache range which is not mapped yet. 6303 */ 6304void 6305cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 6306{ 6307 uint32_t len, offset; 6308 vm_page_t m; 6309 6310 /* Write back d-cache on given address range. */ 6311 offset = pa & PAGE_MASK; 6312 for ( ; size != 0; size -= len, pa += len, offset = 0) { 6313 len = min(PAGE_SIZE - offset, size); 6314 m = PHYS_TO_VM_PAGE(pa); 6315 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6316 __func__, pa)); 6317 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6318 } 6319 /* 6320 * I-cache is VIPT. Only way how to flush all virtual mappings 6321 * on given physical address is to invalidate all i-cache. 6322 */ 6323 icache_inv_all(); 6324} 6325 6326void 6327pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size) 6328{ 6329 6330 /* Write back d-cache on given address range. */ 6331 if (va >= VM_MIN_KERNEL_ADDRESS) { 6332 dcache_wb_pou(va, size); 6333 } else { 6334 uint32_t len, offset; 6335 vm_paddr_t pa; 6336 vm_page_t m; 6337 6338 offset = va & PAGE_MASK; 6339 for ( ; size != 0; size -= len, va += len, offset = 0) { 6340 pa = pmap_extract(pmap, va); /* offset is preserved */ 6341 len = min(PAGE_SIZE - offset, size); 6342 m = PHYS_TO_VM_PAGE(pa); 6343 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6344 __func__, pa)); 6345 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6346 } 6347 } 6348 /* 6349 * I-cache is VIPT. Only way how to flush all virtual mappings 6350 * on given physical address is to invalidate all i-cache. 6351 */ 6352 icache_inv_all(); 6353} 6354 6355/* 6356 * The implementation of pmap_fault() uses IN_RANGE2() macro which 6357 * depends on the fact that given range size is a power of 2. 6358 */ 6359CTASSERT(powerof2(NB_IN_PT1)); 6360CTASSERT(powerof2(PT2MAP_SIZE)); 6361 6362#define IN_RANGE2(addr, start, size) \ 6363 ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1))) 6364 6365/* 6366 * Handle access and R/W emulation faults. 6367 */ 6368int 6369pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode) 6370{ 6371 pt1_entry_t *pte1p, pte1; 6372 pt2_entry_t *pte2p, pte2; 6373 6374 if (pmap == NULL) 6375 pmap = kernel_pmap; 6376 6377 /* 6378 * In kernel, we should never get abort with FAR which is in range of 6379 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here 6380 * and print out a useful abort message and even get to the debugger 6381 * otherwise it likely ends with never ending loop of aborts. 6382 */ 6383 if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) { 6384 /* 6385 * All L1 tables should always be mapped and present. 6386 * However, we check only current one herein. For user mode, 6387 * only permission abort from malicious user is not fatal. 6388 * And alignment abort as it may have higher priority. 6389 */ 6390 if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) { 6391 CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x", 6392 __func__, pmap, pmap->pm_pt1, far); 6393 panic("%s: pm_pt1 abort", __func__); 6394 } 6395 return (KERN_INVALID_ADDRESS); 6396 } 6397 if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) { 6398 /* 6399 * PT2MAP should be always mapped and present in current 6400 * L1 table. However, only existing L2 tables are mapped 6401 * in PT2MAP. For user mode, only L2 translation abort and 6402 * permission abort from malicious user is not fatal. 6403 * And alignment abort as it may have higher priority. 6404 */ 6405 if (!usermode || (idx != FAULT_ALIGN && 6406 idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) { 6407 CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x", 6408 __func__, pmap, PT2MAP, far); 6409 panic("%s: PT2MAP abort", __func__); 6410 } 6411 return (KERN_INVALID_ADDRESS); 6412 } 6413 6414 /* 6415 * A pmap lock is used below for handling of access and R/W emulation 6416 * aborts. They were handled by atomic operations before so some 6417 * analysis of new situation is needed to answer the following question: 6418 * Is it safe to use the lock even for these aborts? 6419 * 6420 * There may happen two cases in general: 6421 * 6422 * (1) Aborts while the pmap lock is locked already - this should not 6423 * happen as pmap lock is not recursive. However, under pmap lock only 6424 * internal kernel data should be accessed and such data should be 6425 * mapped with A bit set and NM bit cleared. If double abort happens, 6426 * then a mapping of data which has caused it must be fixed. Further, 6427 * all new mappings are always made with A bit set and the bit can be 6428 * cleared only on managed mappings. 6429 * 6430 * (2) Aborts while another lock(s) is/are locked - this already can 6431 * happen. However, there is no difference here if it's either access or 6432 * R/W emulation abort, or if it's some other abort. 6433 */ 6434 6435 PMAP_LOCK(pmap); 6436#ifdef SMP 6437 /* 6438 * Special treatment is due to break-before-make approach done when 6439 * pte1 is updated for userland mapping during section promotion or 6440 * demotion. If not caught here, pmap_enter() can find a section 6441 * mapping on faulting address. That is not allowed. 6442 */ 6443 if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) { 6444 PMAP_UNLOCK(pmap); 6445 return (KERN_SUCCESS); 6446 } 6447#endif 6448 /* 6449 * Accesss bits for page and section. Note that the entry 6450 * is not in TLB yet, so TLB flush is not necessary. 6451 * 6452 * QQQ: This is hardware emulation, we do not call userret() 6453 * for aborts from user mode. 6454 */ 6455 if (idx == FAULT_ACCESS_L2) { 6456 pte2p = pt2map_entry(far); 6457 pte2 = pte2_load(pte2p); 6458 if (pte2_is_valid(pte2)) { 6459 pte2_store(pte2p, pte2 | PTE2_A); 6460 PMAP_UNLOCK(pmap); 6461 return (KERN_SUCCESS); 6462 } 6463 } 6464 if (idx == FAULT_ACCESS_L1) { 6465 pte1p = pmap_pte1(pmap, far); 6466 pte1 = pte1_load(pte1p); 6467 if (pte1_is_section(pte1)) { 6468 pte1_store(pte1p, pte1 | PTE1_A); 6469 PMAP_UNLOCK(pmap); 6470 return (KERN_SUCCESS); 6471 } 6472 } 6473 6474 /* 6475 * Handle modify bits for page and section. Note that the modify 6476 * bit is emulated by software. So PTEx_RO is software read only 6477 * bit and PTEx_NM flag is real hardware read only bit. 6478 * 6479 * QQQ: This is hardware emulation, we do not call userret() 6480 * for aborts from user mode. 6481 */ 6482 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) { 6483 pte2p = pt2map_entry(far); 6484 pte2 = pte2_load(pte2p); 6485 if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) && 6486 (pte2 & PTE2_NM)) { 6487 pte2_store(pte2p, pte2 & ~PTE2_NM); 6488 tlb_flush(trunc_page(far)); 6489 PMAP_UNLOCK(pmap); 6490 return (KERN_SUCCESS); 6491 } 6492 } 6493 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) { 6494 pte1p = pmap_pte1(pmap, far); 6495 pte1 = pte1_load(pte1p); 6496 if (pte1_is_section(pte1) && !(pte1 & PTE1_RO) && 6497 (pte1 & PTE1_NM)) { 6498 pte1_store(pte1p, pte1 & ~PTE1_NM); 6499 tlb_flush(pte1_trunc(far)); 6500 PMAP_UNLOCK(pmap); 6501 return (KERN_SUCCESS); 6502 } 6503 } 6504 6505 /* 6506 * QQQ: The previous code, mainly fast handling of access and 6507 * modify bits aborts, could be moved to ASM. Now we are 6508 * starting to deal with not fast aborts. 6509 */ 6510 6511#ifdef INVARIANTS 6512 /* 6513 * Read an entry in PT2TAB associated with both pmap and far. 6514 * It's safe because PT2TAB is always mapped. 6515 */ 6516 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far)); 6517 if (pte2_is_valid(pte2)) { 6518 /* 6519 * Now, when we know that L2 page table is allocated, 6520 * we can use PT2MAP to get L2 page table entry. 6521 */ 6522 pte2 = pte2_load(pt2map_entry(far)); 6523 if (pte2_is_valid(pte2)) { 6524 /* 6525 * If L2 page table entry is valid, make sure that 6526 * L1 page table entry is valid too. Note that we 6527 * leave L2 page entries untouched when promoted. 6528 */ 6529 pte1 = pte1_load(pmap_pte1(pmap, far)); 6530 if (!pte1_is_valid(pte1)) { 6531 panic("%s: missing L1 page entry (%p, %#x)", 6532 __func__, pmap, far); 6533 } 6534 } 6535 } 6536#endif 6537 PMAP_UNLOCK(pmap); 6538 return (KERN_FAILURE); 6539} 6540 6541#if defined(PMAP_DEBUG) 6542/* 6543 * Reusing of KVA used in pmap_zero_page function !!! 6544 */ 6545static void 6546pmap_zero_page_check(vm_page_t m) 6547{ 6548 pt2_entry_t *cmap2_pte2p; 6549 uint32_t *p, *end; 6550 struct pcpu *pc; 6551 6552 sched_pin(); 6553 pc = get_pcpu(); 6554 cmap2_pte2p = pc->pc_cmap2_pte2p; 6555 mtx_lock(&pc->pc_cmap_lock); 6556 if (pte2_load(cmap2_pte2p) != 0) 6557 panic("%s: CMAP2 busy", __func__); 6558 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6559 vm_page_pte2_attr(m))); 6560 end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE); 6561 for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++) 6562 if (*p != 0) 6563 panic("%s: page %p not zero, va: %p", __func__, m, 6564 pc->pc_cmap2_addr); 6565 pte2_clear(cmap2_pte2p); 6566 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6567 sched_unpin(); 6568 mtx_unlock(&pc->pc_cmap_lock); 6569} 6570 6571int 6572pmap_pid_dump(int pid) 6573{ 6574 pmap_t pmap; 6575 struct proc *p; 6576 int npte2 = 0; 6577 int i, j, index; 6578 6579 sx_slock(&allproc_lock); 6580 FOREACH_PROC_IN_SYSTEM(p) { 6581 if (p->p_pid != pid || p->p_vmspace == NULL) 6582 continue; 6583 index = 0; 6584 pmap = vmspace_pmap(p->p_vmspace); 6585 for (i = 0; i < NPTE1_IN_PT1; i++) { 6586 pt1_entry_t pte1; 6587 pt2_entry_t *pte2p, pte2; 6588 vm_offset_t base, va; 6589 vm_paddr_t pa; 6590 vm_page_t m; 6591 6592 base = i << PTE1_SHIFT; 6593 pte1 = pte1_load(&pmap->pm_pt1[i]); 6594 6595 if (pte1_is_section(pte1)) { 6596 /* 6597 * QQQ: Do something here! 6598 */ 6599 } else if (pte1_is_link(pte1)) { 6600 for (j = 0; j < NPTE2_IN_PT2; j++) { 6601 va = base + (j << PAGE_SHIFT); 6602 if (va >= VM_MIN_KERNEL_ADDRESS) { 6603 if (index) { 6604 index = 0; 6605 printf("\n"); 6606 } 6607 sx_sunlock(&allproc_lock); 6608 return (npte2); 6609 } 6610 pte2p = pmap_pte2(pmap, va); 6611 pte2 = pte2_load(pte2p); 6612 pmap_pte2_release(pte2p); 6613 if (!pte2_is_valid(pte2)) 6614 continue; 6615 6616 pa = pte2_pa(pte2); 6617 m = PHYS_TO_VM_PAGE(pa); 6618 printf("va: 0x%x, pa: 0x%x, h: %d, w:" 6619 " %d, f: 0x%x", va, pa, 6620 m->hold_count, m->wire_count, 6621 m->flags); 6622 npte2++; 6623 index++; 6624 if (index >= 2) { 6625 index = 0; 6626 printf("\n"); 6627 } else { 6628 printf(" "); 6629 } 6630 } 6631 } 6632 } 6633 } 6634 sx_sunlock(&allproc_lock); 6635 return (npte2); 6636} 6637 6638#endif 6639 6640#ifdef DDB 6641static pt2_entry_t * 6642pmap_pte2_ddb(pmap_t pmap, vm_offset_t va) 6643{ 6644 pt1_entry_t pte1; 6645 vm_paddr_t pt2pg_pa; 6646 6647 pte1 = pte1_load(pmap_pte1(pmap, va)); 6648 if (!pte1_is_link(pte1)) 6649 return (NULL); 6650 6651 if (pmap_is_current(pmap)) 6652 return (pt2map_entry(va)); 6653 6654 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 6655 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 6656 if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) { 6657 pte2_store(PMAP3, PTE2_KPT(pt2pg_pa)); 6658#ifdef SMP 6659 PMAP3cpu = PCPU_GET(cpuid); 6660#endif 6661 tlb_flush_local((vm_offset_t)PADDR3); 6662 } 6663#ifdef SMP 6664 else if (PMAP3cpu != PCPU_GET(cpuid)) { 6665 PMAP3cpu = PCPU_GET(cpuid); 6666 tlb_flush_local((vm_offset_t)PADDR3); 6667 } 6668#endif 6669 return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 6670} 6671 6672static void 6673dump_pmap(pmap_t pmap) 6674{ 6675 6676 printf("pmap %p\n", pmap); 6677 printf(" pm_pt1: %p\n", pmap->pm_pt1); 6678 printf(" pm_pt2tab: %p\n", pmap->pm_pt2tab); 6679 printf(" pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]); 6680} 6681 6682DB_SHOW_COMMAND(pmaps, pmap_list_pmaps) 6683{ 6684 6685 pmap_t pmap; 6686 LIST_FOREACH(pmap, &allpmaps, pm_list) { 6687 dump_pmap(pmap); 6688 } 6689} 6690 6691static int 6692pte2_class(pt2_entry_t pte2) 6693{ 6694 int cls; 6695 6696 cls = (pte2 >> 2) & 0x03; 6697 cls |= (pte2 >> 4) & 0x04; 6698 return (cls); 6699} 6700 6701static void 6702dump_section(pmap_t pmap, uint32_t pte1_idx) 6703{ 6704} 6705 6706static void 6707dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok) 6708{ 6709 uint32_t i; 6710 vm_offset_t va; 6711 pt2_entry_t *pte2p, pte2; 6712 vm_page_t m; 6713 6714 va = pte1_idx << PTE1_SHIFT; 6715 pte2p = pmap_pte2_ddb(pmap, va); 6716 for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) { 6717 pte2 = pte2_load(pte2p); 6718 if (pte2 == 0) 6719 continue; 6720 if (!pte2_is_valid(pte2)) { 6721 printf(" 0x%08X: 0x%08X", va, pte2); 6722 if (!invalid_ok) 6723 printf(" - not valid !!!"); 6724 printf("\n"); 6725 continue; 6726 } 6727 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 6728 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2, 6729 pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m); 6730 if (m != NULL) { 6731 printf(" v:%d h:%d w:%d f:0x%04X\n", m->valid, 6732 m->hold_count, m->wire_count, m->flags); 6733 } else { 6734 printf("\n"); 6735 } 6736 } 6737} 6738 6739static __inline boolean_t 6740is_pv_chunk_space(vm_offset_t va) 6741{ 6742 6743 if ((((vm_offset_t)pv_chunkbase) <= va) && 6744 (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks))) 6745 return (TRUE); 6746 return (FALSE); 6747} 6748 6749DB_SHOW_COMMAND(pmap, pmap_pmap_print) 6750{ 6751 /* XXX convert args. */ 6752 pmap_t pmap = (pmap_t)addr; 6753 pt1_entry_t pte1; 6754 pt2_entry_t pte2; 6755 vm_offset_t va, eva; 6756 vm_page_t m; 6757 uint32_t i; 6758 boolean_t invalid_ok, dump_link_ok, dump_pv_chunk; 6759 6760 if (have_addr) { 6761 pmap_t pm; 6762 6763 LIST_FOREACH(pm, &allpmaps, pm_list) 6764 if (pm == pmap) break; 6765 if (pm == NULL) { 6766 printf("given pmap %p is not in allpmaps list\n", pmap); 6767 return; 6768 } 6769 } else 6770 pmap = PCPU_GET(curpmap); 6771 6772 eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF; 6773 dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */ 6774 6775 printf("pmap: 0x%08X\n", (uint32_t)pmap); 6776 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6777 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6778 6779 for(i = 0; i < NPTE1_IN_PT1; i++) { 6780 pte1 = pte1_load(&pmap->pm_pt1[i]); 6781 if (pte1 == 0) 6782 continue; 6783 va = i << PTE1_SHIFT; 6784 if (va >= eva) 6785 break; 6786 6787 if (pte1_is_section(pte1)) { 6788 printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1, 6789 !!(pte1 & PTE1_S), !(pte1 & PTE1_NG)); 6790 dump_section(pmap, i); 6791 } else if (pte1_is_link(pte1)) { 6792 dump_link_ok = TRUE; 6793 invalid_ok = FALSE; 6794 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6795 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 6796 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p", 6797 va, pte1, pte2, m); 6798 if (is_pv_chunk_space(va)) { 6799 printf(" - pv_chunk space"); 6800 if (dump_pv_chunk) 6801 invalid_ok = TRUE; 6802 else 6803 dump_link_ok = FALSE; 6804 } 6805 else if (m != NULL) 6806 printf(" w:%d w2:%u", m->wire_count, 6807 pt2_wirecount_get(m, pte1_index(va))); 6808 if (pte2 == 0) 6809 printf(" !!! pt2tab entry is ZERO"); 6810 else if (pte2_pa(pte1) != pte2_pa(pte2)) 6811 printf(" !!! pt2tab entry is DIFFERENT - m: %p", 6812 PHYS_TO_VM_PAGE(pte2_pa(pte2))); 6813 printf("\n"); 6814 if (dump_link_ok) 6815 dump_link(pmap, i, invalid_ok); 6816 } else 6817 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6818 } 6819} 6820 6821static void 6822dump_pt2tab(pmap_t pmap) 6823{ 6824 uint32_t i; 6825 pt2_entry_t pte2; 6826 vm_offset_t va; 6827 vm_paddr_t pa; 6828 vm_page_t m; 6829 6830 printf("PT2TAB:\n"); 6831 for (i = 0; i < PT2TAB_ENTRIES; i++) { 6832 pte2 = pte2_load(&pmap->pm_pt2tab[i]); 6833 if (!pte2_is_valid(pte2)) 6834 continue; 6835 va = i << PT2TAB_SHIFT; 6836 pa = pte2_pa(pte2); 6837 m = PHYS_TO_VM_PAGE(pa); 6838 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2, 6839 pte2_class(pte2), !!(pte2 & PTE2_S), m); 6840 if (m != NULL) 6841 printf(" , h: %d, w: %d, f: 0x%04X pidx: %lld", 6842 m->hold_count, m->wire_count, m->flags, m->pindex); 6843 printf("\n"); 6844 } 6845} 6846 6847DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print) 6848{ 6849 /* XXX convert args. */ 6850 pmap_t pmap = (pmap_t)addr; 6851 pt1_entry_t pte1; 6852 pt2_entry_t pte2; 6853 vm_offset_t va; 6854 uint32_t i, start; 6855 6856 if (have_addr) { 6857 printf("supported only on current pmap\n"); 6858 return; 6859 } 6860 6861 pmap = PCPU_GET(curpmap); 6862 printf("curpmap: 0x%08X\n", (uint32_t)pmap); 6863 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6864 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6865 6866 start = pte1_index((vm_offset_t)PT2MAP); 6867 for (i = start; i < (start + NPT2_IN_PT2TAB); i++) { 6868 pte1 = pte1_load(&pmap->pm_pt1[i]); 6869 if (pte1 == 0) 6870 continue; 6871 va = i << PTE1_SHIFT; 6872 if (pte1_is_section(pte1)) { 6873 printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1, 6874 !!(pte1 & PTE1_S)); 6875 dump_section(pmap, i); 6876 } else if (pte1_is_link(pte1)) { 6877 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6878 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va, 6879 pte1, pte2); 6880 if (pte2 == 0) 6881 printf(" !!! pt2tab entry is ZERO\n"); 6882 } else 6883 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6884 } 6885 dump_pt2tab(pmap); 6886} 6887#endif 6888