pmap-v6.c revision 314296
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * Copyright (c) 1994 John S. Dyson 4 * Copyright (c) 1994 David Greenman 5 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu> 6 * Copyright (c) 2014-2016 Svatopluk Kraus <skra@FreeBSD.org> 7 * Copyright (c) 2014-2016 Michal Meloun <mmel@FreeBSD.org> 8 * All rights reserved. 9 * 10 * This code is derived from software contributed to Berkeley by 11 * the Systems Programming Group of the University of Utah Computer 12 * Science Department and William Jolitz of UUNET Technologies Inc. 13 * 14 * Redistribution and use in source and binary forms, with or without 15 * modification, are permitted provided that the following conditions 16 * are met: 17 * 1. Redistributions of source code must retain the above copyright 18 * notice, this list of conditions and the following disclaimer. 19 * 2. Redistributions in binary form must reproduce the above copyright 20 * notice, this list of conditions and the following disclaimer in the 21 * documentation and/or other materials provided with the distribution. 22 * 3. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 39 */ 40/*- 41 * Copyright (c) 2003 Networks Associates Technology, Inc. 42 * All rights reserved. 43 * 44 * This software was developed for the FreeBSD Project by Jake Burkholder, 45 * Safeport Network Services, and Network Associates Laboratories, the 46 * Security Research Division of Network Associates, Inc. under 47 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA 48 * CHATS research program. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 59 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 61 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 62 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 63 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 64 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 65 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 */ 71 72#include <sys/cdefs.h> 73__FBSDID("$FreeBSD: stable/11/sys/arm/arm/pmap-v6.c 314296 2017-02-26 10:53:02Z kib $"); 74 75/* 76 * Manages physical address maps. 77 * 78 * Since the information managed by this module is 79 * also stored by the logical address mapping module, 80 * this module may throw away valid virtual-to-physical 81 * mappings at almost any time. However, invalidations 82 * of virtual-to-physical mappings must be done as 83 * requested. 84 * 85 * In order to cope with hardware architectures which 86 * make virtual-to-physical map invalidates expensive, 87 * this module may delay invalidate or reduced protection 88 * operations until such time as they are actually 89 * necessary. This module is given full information as 90 * to which processors are currently using which maps, 91 * and to when physical maps must be made correct. 92 */ 93 94#include "opt_vm.h" 95#include "opt_pmap.h" 96#include "opt_ddb.h" 97 98#include <sys/param.h> 99#include <sys/systm.h> 100#include <sys/kernel.h> 101#include <sys/ktr.h> 102#include <sys/lock.h> 103#include <sys/proc.h> 104#include <sys/rwlock.h> 105#include <sys/malloc.h> 106#include <sys/vmmeter.h> 107#include <sys/malloc.h> 108#include <sys/mman.h> 109#include <sys/sf_buf.h> 110#include <sys/smp.h> 111#include <sys/sched.h> 112#include <sys/sysctl.h> 113 114#ifdef DDB 115#include <ddb/ddb.h> 116#endif 117 118#include <machine/physmem.h> 119 120#include <vm/vm.h> 121#include <vm/uma.h> 122#include <vm/pmap.h> 123#include <vm/vm_param.h> 124#include <vm/vm_kern.h> 125#include <vm/vm_object.h> 126#include <vm/vm_map.h> 127#include <vm/vm_page.h> 128#include <vm/vm_pageout.h> 129#include <vm/vm_phys.h> 130#include <vm/vm_extern.h> 131#include <vm/vm_reserv.h> 132#include <sys/lock.h> 133#include <sys/mutex.h> 134 135#include <machine/md_var.h> 136#include <machine/pmap_var.h> 137#include <machine/cpu.h> 138#include <machine/pcb.h> 139#include <machine/sf_buf.h> 140#ifdef SMP 141#include <machine/smp.h> 142#endif 143 144#ifndef PMAP_SHPGPERPROC 145#define PMAP_SHPGPERPROC 200 146#endif 147 148#ifndef DIAGNOSTIC 149#define PMAP_INLINE __inline 150#else 151#define PMAP_INLINE 152#endif 153 154#ifdef PMAP_DEBUG 155static void pmap_zero_page_check(vm_page_t m); 156void pmap_debug(int level); 157int pmap_pid_dump(int pid); 158 159#define PDEBUG(_lev_,_stat_) \ 160 if (pmap_debug_level >= (_lev_)) \ 161 ((_stat_)) 162#define dprintf printf 163int pmap_debug_level = 1; 164#else /* PMAP_DEBUG */ 165#define PDEBUG(_lev_,_stat_) /* Nothing */ 166#define dprintf(x, arg...) 167#endif /* PMAP_DEBUG */ 168 169/* 170 * Level 2 page tables map definion ('max' is excluded). 171 */ 172 173#define PT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 174#define PT2V_MAX_ADDRESS ((vm_offset_t)PT2MAP + PT2MAP_SIZE) 175 176#define UPT2V_MIN_ADDRESS ((vm_offset_t)PT2MAP) 177#define UPT2V_MAX_ADDRESS \ 178 ((vm_offset_t)(PT2MAP + (KERNBASE >> PT2MAP_SHIFT))) 179 180/* 181 * Promotion to a 1MB (PTE1) page mapping requires that the corresponding 182 * 4KB (PTE2) page mappings have identical settings for the following fields: 183 */ 184#define PTE2_PROMOTE (PTE2_V | PTE2_A | PTE2_NM | PTE2_S | PTE2_NG | \ 185 PTE2_NX | PTE2_RO | PTE2_U | PTE2_W | \ 186 PTE2_ATTR_MASK) 187 188#define PTE1_PROMOTE (PTE1_V | PTE1_A | PTE1_NM | PTE1_S | PTE1_NG | \ 189 PTE1_NX | PTE1_RO | PTE1_U | PTE1_W | \ 190 PTE1_ATTR_MASK) 191 192#define ATTR_TO_L1(l2_attr) ((((l2_attr) & L2_TEX0) ? L1_S_TEX0 : 0) | \ 193 (((l2_attr) & L2_C) ? L1_S_C : 0) | \ 194 (((l2_attr) & L2_B) ? L1_S_B : 0) | \ 195 (((l2_attr) & PTE2_A) ? PTE1_A : 0) | \ 196 (((l2_attr) & PTE2_NM) ? PTE1_NM : 0) | \ 197 (((l2_attr) & PTE2_S) ? PTE1_S : 0) | \ 198 (((l2_attr) & PTE2_NG) ? PTE1_NG : 0) | \ 199 (((l2_attr) & PTE2_NX) ? PTE1_NX : 0) | \ 200 (((l2_attr) & PTE2_RO) ? PTE1_RO : 0) | \ 201 (((l2_attr) & PTE2_U) ? PTE1_U : 0) | \ 202 (((l2_attr) & PTE2_W) ? PTE1_W : 0)) 203 204#define ATTR_TO_L2(l1_attr) ((((l1_attr) & L1_S_TEX0) ? L2_TEX0 : 0) | \ 205 (((l1_attr) & L1_S_C) ? L2_C : 0) | \ 206 (((l1_attr) & L1_S_B) ? L2_B : 0) | \ 207 (((l1_attr) & PTE1_A) ? PTE2_A : 0) | \ 208 (((l1_attr) & PTE1_NM) ? PTE2_NM : 0) | \ 209 (((l1_attr) & PTE1_S) ? PTE2_S : 0) | \ 210 (((l1_attr) & PTE1_NG) ? PTE2_NG : 0) | \ 211 (((l1_attr) & PTE1_NX) ? PTE2_NX : 0) | \ 212 (((l1_attr) & PTE1_RO) ? PTE2_RO : 0) | \ 213 (((l1_attr) & PTE1_U) ? PTE2_U : 0) | \ 214 (((l1_attr) & PTE1_W) ? PTE2_W : 0)) 215 216/* 217 * PTE2 descriptors creation macros. 218 */ 219#define PTE2_ATTR_DEFAULT vm_memattr_to_pte2(VM_MEMATTR_DEFAULT) 220#define PTE2_ATTR_PT vm_memattr_to_pte2(pt_memattr) 221 222#define PTE2_KPT(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 223#define PTE2_KPT_NG(pa) PTE2_KERN_NG(pa, PTE2_AP_KRW, PTE2_ATTR_PT) 224 225#define PTE2_KRW(pa) PTE2_KERN(pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT) 226#define PTE2_KRO(pa) PTE2_KERN(pa, PTE2_AP_KR, PTE2_ATTR_DEFAULT) 227 228#define PV_STATS 229#ifdef PV_STATS 230#define PV_STAT(x) do { x ; } while (0) 231#else 232#define PV_STAT(x) do { } while (0) 233#endif 234 235/* 236 * The boot_pt1 is used temporary in very early boot stage as L1 page table. 237 * We can init many things with no memory allocation thanks to its static 238 * allocation and this brings two main advantages: 239 * (1) other cores can be started very simply, 240 * (2) various boot loaders can be supported as its arguments can be processed 241 * in virtual address space and can be moved to safe location before 242 * first allocation happened. 243 * Only disadvantage is that boot_pt1 is used only in very early boot stage. 244 * However, the table is uninitialized and so lays in bss. Therefore kernel 245 * image size is not influenced. 246 * 247 * QQQ: In the future, maybe, boot_pt1 can be used for soft reset and 248 * CPU suspend/resume game. 249 */ 250extern pt1_entry_t boot_pt1[]; 251 252vm_paddr_t base_pt1; 253pt1_entry_t *kern_pt1; 254pt2_entry_t *kern_pt2tab; 255pt2_entry_t *PT2MAP; 256 257static uint32_t ttb_flags; 258static vm_memattr_t pt_memattr; 259ttb_entry_t pmap_kern_ttb; 260 261struct pmap kernel_pmap_store; 262LIST_HEAD(pmaplist, pmap); 263static struct pmaplist allpmaps; 264static struct mtx allpmaps_lock; 265 266vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 267vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 268 269static vm_offset_t kernel_vm_end_new; 270vm_offset_t kernel_vm_end = KERNBASE + NKPT2PG * NPT2_IN_PG * PTE1_SIZE; 271vm_offset_t vm_max_kernel_address; 272vm_paddr_t kernel_l1pa; 273 274static struct rwlock __aligned(CACHE_LINE_SIZE) pvh_global_lock; 275 276/* 277 * Data for the pv entry allocation mechanism 278 */ 279static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 280static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 281static struct md_page *pv_table; /* XXX: Is it used only the list in md_page? */ 282static int shpgperproc = PMAP_SHPGPERPROC; 283 284struct pv_chunk *pv_chunkbase; /* KVA block for pv_chunks */ 285int pv_maxchunks; /* How many chunks we have KVA for */ 286vm_offset_t pv_vafree; /* freelist stored in the PTE */ 287 288vm_paddr_t first_managed_pa; 289#define pa_to_pvh(pa) (&pv_table[pte1_index(pa - first_managed_pa)]) 290 291/* 292 * All those kernel PT submaps that BSD is so fond of 293 */ 294static pt2_entry_t *CMAP3; 295static caddr_t CADDR3; 296caddr_t _tmppt = 0; 297 298struct msgbuf *msgbufp = NULL; /* XXX move it to machdep.c */ 299 300/* 301 * Crashdump maps. 302 */ 303static caddr_t crashdumpmap; 304 305static pt2_entry_t *PMAP1 = NULL, *PMAP2; 306static pt2_entry_t *PADDR1 = NULL, *PADDR2; 307#ifdef DDB 308static pt2_entry_t *PMAP3; 309static pt2_entry_t *PADDR3; 310static int PMAP3cpu __unused; /* for SMP only */ 311#endif 312#ifdef SMP 313static int PMAP1cpu; 314static int PMAP1changedcpu; 315SYSCTL_INT(_debug, OID_AUTO, PMAP1changedcpu, CTLFLAG_RD, 316 &PMAP1changedcpu, 0, 317 "Number of times pmap_pte2_quick changed CPU with same PMAP1"); 318#endif 319static int PMAP1changed; 320SYSCTL_INT(_debug, OID_AUTO, PMAP1changed, CTLFLAG_RD, 321 &PMAP1changed, 0, 322 "Number of times pmap_pte2_quick changed PMAP1"); 323static int PMAP1unchanged; 324SYSCTL_INT(_debug, OID_AUTO, PMAP1unchanged, CTLFLAG_RD, 325 &PMAP1unchanged, 0, 326 "Number of times pmap_pte2_quick didn't change PMAP1"); 327static struct mtx PMAP2mutex; 328 329static __inline void pt2_wirecount_init(vm_page_t m); 330static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, 331 vm_offset_t va); 332void cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size); 333 334/* 335 * Function to set the debug level of the pmap code. 336 */ 337#ifdef PMAP_DEBUG 338void 339pmap_debug(int level) 340{ 341 342 pmap_debug_level = level; 343 dprintf("pmap_debug: level=%d\n", pmap_debug_level); 344} 345#endif /* PMAP_DEBUG */ 346 347/* 348 * This table must corespond with memory attribute configuration in vm.h. 349 * First entry is used for normal system mapping. 350 * 351 * Device memory is always marked as shared. 352 * Normal memory is shared only in SMP . 353 * Not outer shareable bits are not used yet. 354 * Class 6 cannot be used on ARM11. 355 */ 356#define TEXDEF_TYPE_SHIFT 0 357#define TEXDEF_TYPE_MASK 0x3 358#define TEXDEF_INNER_SHIFT 2 359#define TEXDEF_INNER_MASK 0x3 360#define TEXDEF_OUTER_SHIFT 4 361#define TEXDEF_OUTER_MASK 0x3 362#define TEXDEF_NOS_SHIFT 6 363#define TEXDEF_NOS_MASK 0x1 364 365#define TEX(t, i, o, s) \ 366 ((t) << TEXDEF_TYPE_SHIFT) | \ 367 ((i) << TEXDEF_INNER_SHIFT) | \ 368 ((o) << TEXDEF_OUTER_SHIFT | \ 369 ((s) << TEXDEF_NOS_SHIFT)) 370 371static uint32_t tex_class[8] = { 372/* type inner cache outer cache */ 373 TEX(PRRR_MEM, NMRR_WB_WA, NMRR_WB_WA, 0), /* 0 - ATTR_WB_WA */ 374 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 1 - ATTR_NOCACHE */ 375 TEX(PRRR_DEV, NMRR_NC, NMRR_NC, 0), /* 2 - ATTR_DEVICE */ 376 TEX(PRRR_SO, NMRR_NC, NMRR_NC, 0), /* 3 - ATTR_SO */ 377 TEX(PRRR_MEM, NMRR_WT, NMRR_WT, 0), /* 4 - ATTR_WT */ 378 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 5 - NOT USED YET */ 379 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 6 - NOT USED YET */ 380 TEX(PRRR_MEM, NMRR_NC, NMRR_NC, 0), /* 7 - NOT USED YET */ 381}; 382#undef TEX 383 384static uint32_t pte2_attr_tab[8] = { 385 PTE2_ATTR_WB_WA, /* 0 - VM_MEMATTR_WB_WA */ 386 PTE2_ATTR_NOCACHE, /* 1 - VM_MEMATTR_NOCACHE */ 387 PTE2_ATTR_DEVICE, /* 2 - VM_MEMATTR_DEVICE */ 388 PTE2_ATTR_SO, /* 3 - VM_MEMATTR_SO */ 389 PTE2_ATTR_WT, /* 4 - VM_MEMATTR_WRITE_THROUGH */ 390 0, /* 5 - NOT USED YET */ 391 0, /* 6 - NOT USED YET */ 392 0 /* 7 - NOT USED YET */ 393}; 394CTASSERT(VM_MEMATTR_WB_WA == 0); 395CTASSERT(VM_MEMATTR_NOCACHE == 1); 396CTASSERT(VM_MEMATTR_DEVICE == 2); 397CTASSERT(VM_MEMATTR_SO == 3); 398CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4); 399 400static inline uint32_t 401vm_memattr_to_pte2(vm_memattr_t ma) 402{ 403 404 KASSERT((u_int)ma < 5, ("%s: bad vm_memattr_t %d", __func__, ma)); 405 return (pte2_attr_tab[(u_int)ma]); 406} 407 408static inline uint32_t 409vm_page_pte2_attr(vm_page_t m) 410{ 411 412 return (vm_memattr_to_pte2(m->md.pat_mode)); 413} 414 415/* 416 * Convert TEX definition entry to TTB flags. 417 */ 418static uint32_t 419encode_ttb_flags(int idx) 420{ 421 uint32_t inner, outer, nos, reg; 422 423 inner = (tex_class[idx] >> TEXDEF_INNER_SHIFT) & 424 TEXDEF_INNER_MASK; 425 outer = (tex_class[idx] >> TEXDEF_OUTER_SHIFT) & 426 TEXDEF_OUTER_MASK; 427 nos = (tex_class[idx] >> TEXDEF_NOS_SHIFT) & 428 TEXDEF_NOS_MASK; 429 430 reg = nos << 5; 431 reg |= outer << 3; 432 if (cpuinfo.coherent_walk) 433 reg |= (inner & 0x1) << 6; 434 reg |= (inner & 0x2) >> 1; 435#ifdef SMP 436 reg |= 1 << 1; 437#endif 438 return reg; 439} 440 441/* 442 * Set TEX remapping registers in current CPU. 443 */ 444void 445pmap_set_tex(void) 446{ 447 uint32_t prrr, nmrr; 448 uint32_t type, inner, outer, nos; 449 int i; 450 451#ifdef PMAP_PTE_NOCACHE 452 /* XXX fixme */ 453 if (cpuinfo.coherent_walk) { 454 pt_memattr = VM_MEMATTR_WB_WA; 455 ttb_flags = encode_ttb_flags(0); 456 } 457 else { 458 pt_memattr = VM_MEMATTR_NOCACHE; 459 ttb_flags = encode_ttb_flags(1); 460 } 461#else 462 pt_memattr = VM_MEMATTR_WB_WA; 463 ttb_flags = encode_ttb_flags(0); 464#endif 465 466 prrr = 0; 467 nmrr = 0; 468 469 /* Build remapping register from TEX classes. */ 470 for (i = 0; i < 8; i++) { 471 type = (tex_class[i] >> TEXDEF_TYPE_SHIFT) & 472 TEXDEF_TYPE_MASK; 473 inner = (tex_class[i] >> TEXDEF_INNER_SHIFT) & 474 TEXDEF_INNER_MASK; 475 outer = (tex_class[i] >> TEXDEF_OUTER_SHIFT) & 476 TEXDEF_OUTER_MASK; 477 nos = (tex_class[i] >> TEXDEF_NOS_SHIFT) & 478 TEXDEF_NOS_MASK; 479 480 prrr |= type << (i * 2); 481 prrr |= nos << (i + 24); 482 nmrr |= inner << (i * 2); 483 nmrr |= outer << (i * 2 + 16); 484 } 485 /* Add shareable bits for device memory. */ 486 prrr |= PRRR_DS0 | PRRR_DS1; 487 488 /* Add shareable bits for normal memory in SMP case. */ 489#ifdef SMP 490 prrr |= PRRR_NS1; 491#endif 492 cp15_prrr_set(prrr); 493 cp15_nmrr_set(nmrr); 494 495 /* Caches are disabled, so full TLB flush should be enough. */ 496 tlb_flush_all_local(); 497} 498 499/* 500 * KERNBASE must be multiple of NPT2_IN_PG * PTE1_SIZE. In other words, 501 * KERNBASE is mapped by first L2 page table in L2 page table page. It 502 * meets same constrain due to PT2MAP being placed just under KERNBASE. 503 */ 504CTASSERT((KERNBASE & (NPT2_IN_PG * PTE1_SIZE - 1)) == 0); 505CTASSERT((KERNBASE - VM_MAXUSER_ADDRESS) >= PT2MAP_SIZE); 506 507/* 508 * In crazy dreams, PAGE_SIZE could be a multiple of PTE2_SIZE in general. 509 * For now, anyhow, the following check must be fulfilled. 510 */ 511CTASSERT(PAGE_SIZE == PTE2_SIZE); 512/* 513 * We don't want to mess up MI code with all MMU and PMAP definitions, 514 * so some things, which depend on other ones, are defined independently. 515 * Now, it is time to check that we don't screw up something. 516 */ 517CTASSERT(PDRSHIFT == PTE1_SHIFT); 518/* 519 * Check L1 and L2 page table entries definitions consistency. 520 */ 521CTASSERT(NB_IN_PT1 == (sizeof(pt1_entry_t) * NPTE1_IN_PT1)); 522CTASSERT(NB_IN_PT2 == (sizeof(pt2_entry_t) * NPTE2_IN_PT2)); 523/* 524 * Check L2 page tables page consistency. 525 */ 526CTASSERT(PAGE_SIZE == (NPT2_IN_PG * NB_IN_PT2)); 527CTASSERT((1 << PT2PG_SHIFT) == NPT2_IN_PG); 528/* 529 * Check PT2TAB consistency. 530 * PT2TAB_ENTRIES is defined as a division of NPTE1_IN_PT1 by NPT2_IN_PG. 531 * This should be done without remainder. 532 */ 533CTASSERT(NPTE1_IN_PT1 == (PT2TAB_ENTRIES * NPT2_IN_PG)); 534 535/* 536 * A PT2MAP magic. 537 * 538 * All level 2 page tables (PT2s) are mapped continuously and accordingly 539 * into PT2MAP address space. As PT2 size is less than PAGE_SIZE, this can 540 * be done only if PAGE_SIZE is a multiple of PT2 size. All PT2s in one page 541 * must be used together, but not necessary at once. The first PT2 in a page 542 * must map things on correctly aligned address and the others must follow 543 * in right order. 544 */ 545#define NB_IN_PT2TAB (PT2TAB_ENTRIES * sizeof(pt2_entry_t)) 546#define NPT2_IN_PT2TAB (NB_IN_PT2TAB / NB_IN_PT2) 547#define NPG_IN_PT2TAB (NB_IN_PT2TAB / PAGE_SIZE) 548 549/* 550 * Check PT2TAB consistency. 551 * NPT2_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by NB_IN_PT2. 552 * NPG_IN_PT2TAB is defined as a division of NB_IN_PT2TAB by PAGE_SIZE. 553 * The both should be done without remainder. 554 */ 555CTASSERT(NB_IN_PT2TAB == (NPT2_IN_PT2TAB * NB_IN_PT2)); 556CTASSERT(NB_IN_PT2TAB == (NPG_IN_PT2TAB * PAGE_SIZE)); 557/* 558 * The implementation was made general, however, with the assumption 559 * bellow in mind. In case of another value of NPG_IN_PT2TAB, 560 * the code should be once more rechecked. 561 */ 562CTASSERT(NPG_IN_PT2TAB == 1); 563 564/* 565 * Get offset of PT2 in a page 566 * associated with given PT1 index. 567 */ 568static __inline u_int 569page_pt2off(u_int pt1_idx) 570{ 571 572 return ((pt1_idx & PT2PG_MASK) * NB_IN_PT2); 573} 574 575/* 576 * Get physical address of PT2 577 * associated with given PT2s page and PT1 index. 578 */ 579static __inline vm_paddr_t 580page_pt2pa(vm_paddr_t pgpa, u_int pt1_idx) 581{ 582 583 return (pgpa + page_pt2off(pt1_idx)); 584} 585 586/* 587 * Get first entry of PT2 588 * associated with given PT2s page and PT1 index. 589 */ 590static __inline pt2_entry_t * 591page_pt2(vm_offset_t pgva, u_int pt1_idx) 592{ 593 594 return ((pt2_entry_t *)(pgva + page_pt2off(pt1_idx))); 595} 596 597/* 598 * Get virtual address of PT2s page (mapped in PT2MAP) 599 * which holds PT2 which holds entry which maps given virtual address. 600 */ 601static __inline vm_offset_t 602pt2map_pt2pg(vm_offset_t va) 603{ 604 605 va &= ~(NPT2_IN_PG * PTE1_SIZE - 1); 606 return ((vm_offset_t)pt2map_entry(va)); 607} 608 609/***************************************************************************** 610 * 611 * THREE pmap initialization milestones exist: 612 * 613 * locore.S 614 * -> fundamental init (including MMU) in ASM 615 * 616 * initarm() 617 * -> fundamental init continues in C 618 * -> first available physical address is known 619 * 620 * pmap_bootstrap_prepare() -> FIRST PMAP MILESTONE (first epoch begins) 621 * -> basic (safe) interface for physical address allocation is made 622 * -> basic (safe) interface for virtual mapping is made 623 * -> limited not SMP coherent work is possible 624 * 625 * -> more fundamental init continues in C 626 * -> locks and some more things are available 627 * -> all fundamental allocations and mappings are done 628 * 629 * pmap_bootstrap() -> SECOND PMAP MILESTONE (second epoch begins) 630 * -> phys_avail[] and virtual_avail is set 631 * -> control is passed to vm subsystem 632 * -> physical and virtual address allocation are off limit 633 * -> low level mapping functions, some SMP coherent, 634 * are available, which cannot be used before vm subsystem 635 * is being inited 636 * 637 * mi_startup() 638 * -> vm subsystem is being inited 639 * 640 * pmap_init() -> THIRD PMAP MILESTONE (third epoch begins) 641 * -> pmap is fully inited 642 * 643 *****************************************************************************/ 644 645/***************************************************************************** 646 * 647 * PMAP first stage initialization and utility functions 648 * for pre-bootstrap epoch. 649 * 650 * After pmap_bootstrap_prepare() is called, the following functions 651 * can be used: 652 * 653 * (1) strictly only for this stage functions for physical page allocations, 654 * virtual space allocations, and mappings: 655 * 656 * vm_paddr_t pmap_preboot_get_pages(u_int num); 657 * void pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num); 658 * vm_offset_t pmap_preboot_reserve_pages(u_int num); 659 * vm_offset_t pmap_preboot_get_vpages(u_int num); 660 * void pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 661 * vm_prot_t prot, vm_memattr_t attr); 662 * 663 * (2) for all stages: 664 * 665 * vm_paddr_t pmap_kextract(vm_offset_t va); 666 * 667 * NOTE: This is not SMP coherent stage. 668 * 669 *****************************************************************************/ 670 671#define KERNEL_P2V(pa) \ 672 ((vm_offset_t)((pa) - arm_physmem_kernaddr + KERNVIRTADDR)) 673#define KERNEL_V2P(va) \ 674 ((vm_paddr_t)((va) - KERNVIRTADDR + arm_physmem_kernaddr)) 675 676static vm_paddr_t last_paddr; 677 678/* 679 * Pre-bootstrap epoch page allocator. 680 */ 681vm_paddr_t 682pmap_preboot_get_pages(u_int num) 683{ 684 vm_paddr_t ret; 685 686 ret = last_paddr; 687 last_paddr += num * PAGE_SIZE; 688 689 return (ret); 690} 691 692/* 693 * The fundamental initialization of PMAP stuff. 694 * 695 * Some things already happened in locore.S and some things could happen 696 * before pmap_bootstrap_prepare() is called, so let's recall what is done: 697 * 1. Caches are disabled. 698 * 2. We are running on virtual addresses already with 'boot_pt1' 699 * as L1 page table. 700 * 3. So far, all virtual addresses can be converted to physical ones and 701 * vice versa by the following macros: 702 * KERNEL_P2V(pa) .... physical to virtual ones, 703 * KERNEL_V2P(va) .... virtual to physical ones. 704 * 705 * What is done herein: 706 * 1. The 'boot_pt1' is replaced by real kernel L1 page table 'kern_pt1'. 707 * 2. PT2MAP magic is brought to live. 708 * 3. Basic preboot functions for page allocations and mappings can be used. 709 * 4. Everything is prepared for L1 cache enabling. 710 * 711 * Variations: 712 * 1. To use second TTB register, so kernel and users page tables will be 713 * separated. This way process forking - pmap_pinit() - could be faster, 714 * it saves physical pages and KVA per a process, and it's simple change. 715 * However, it will lead, due to hardware matter, to the following: 716 * (a) 2G space for kernel and 2G space for users. 717 * (b) 1G space for kernel in low addresses and 3G for users above it. 718 * A question is: Is the case (b) really an option? Note that case (b) 719 * does save neither physical memory and KVA. 720 */ 721void 722pmap_bootstrap_prepare(vm_paddr_t last) 723{ 724 vm_paddr_t pt2pg_pa, pt2tab_pa, pa, size; 725 vm_offset_t pt2pg_va; 726 pt1_entry_t *pte1p; 727 pt2_entry_t *pte2p; 728 u_int i; 729 uint32_t actlr_mask, actlr_set, l1_attr; 730 731 /* 732 * Now, we are going to make real kernel mapping. Note that we are 733 * already running on some mapping made in locore.S and we expect 734 * that it's large enough to ensure nofault access to physical memory 735 * allocated herein before switch. 736 * 737 * As kernel image and everything needed before are and will be mapped 738 * by section mappings, we align last physical address to PTE1_SIZE. 739 */ 740 last_paddr = pte1_roundup(last); 741 742 /* 743 * Allocate and zero page(s) for kernel L1 page table. 744 * 745 * Note that it's first allocation on space which was PTE1_SIZE 746 * aligned and as such base_pt1 is aligned to NB_IN_PT1 too. 747 */ 748 base_pt1 = pmap_preboot_get_pages(NPG_IN_PT1); 749 kern_pt1 = (pt1_entry_t *)KERNEL_P2V(base_pt1); 750 bzero((void*)kern_pt1, NB_IN_PT1); 751 pte1_sync_range(kern_pt1, NB_IN_PT1); 752 753 /* Allocate and zero page(s) for kernel PT2TAB. */ 754 pt2tab_pa = pmap_preboot_get_pages(NPG_IN_PT2TAB); 755 kern_pt2tab = (pt2_entry_t *)KERNEL_P2V(pt2tab_pa); 756 bzero(kern_pt2tab, NB_IN_PT2TAB); 757 pte2_sync_range(kern_pt2tab, NB_IN_PT2TAB); 758 759 /* Allocate and zero page(s) for kernel L2 page tables. */ 760 pt2pg_pa = pmap_preboot_get_pages(NKPT2PG); 761 pt2pg_va = KERNEL_P2V(pt2pg_pa); 762 size = NKPT2PG * PAGE_SIZE; 763 bzero((void*)pt2pg_va, size); 764 pte2_sync_range((pt2_entry_t *)pt2pg_va, size); 765 766 /* 767 * Add a physical memory segment (vm_phys_seg) corresponding to the 768 * preallocated pages for kernel L2 page tables so that vm_page 769 * structures representing these pages will be created. The vm_page 770 * structures are required for promotion of the corresponding kernel 771 * virtual addresses to section mappings. 772 */ 773 vm_phys_add_seg(pt2tab_pa, pmap_preboot_get_pages(0)); 774 775 /* 776 * Insert allocated L2 page table pages to PT2TAB and make 777 * link to all PT2s in L1 page table. See how kernel_vm_end 778 * is initialized. 779 * 780 * We play simple and safe. So every KVA will have underlaying 781 * L2 page table, even kernel image mapped by sections. 782 */ 783 pte2p = kern_pt2tab_entry(KERNBASE); 784 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += PTE2_SIZE) 785 pt2tab_store(pte2p++, PTE2_KPT(pa)); 786 787 pte1p = kern_pte1(KERNBASE); 788 for (pa = pt2pg_pa; pa < pt2pg_pa + size; pa += NB_IN_PT2) 789 pte1_store(pte1p++, PTE1_LINK(pa)); 790 791 /* Make section mappings for kernel. */ 792 l1_attr = ATTR_TO_L1(PTE2_ATTR_DEFAULT); 793 pte1p = kern_pte1(KERNBASE); 794 for (pa = KERNEL_V2P(KERNBASE); pa < last; pa += PTE1_SIZE) 795 pte1_store(pte1p++, PTE1_KERN(pa, PTE1_AP_KRW, l1_attr)); 796 797 /* 798 * Get free and aligned space for PT2MAP and make L1 page table links 799 * to L2 page tables held in PT2TAB. 800 * 801 * Note that pages holding PT2s are stored in PT2TAB as pt2_entry_t 802 * descriptors and PT2TAB page(s) itself is(are) used as PT2s. Thus 803 * each entry in PT2TAB maps all PT2s in a page. This implies that 804 * virtual address of PT2MAP must be aligned to NPT2_IN_PG * PTE1_SIZE. 805 */ 806 PT2MAP = (pt2_entry_t *)(KERNBASE - PT2MAP_SIZE); 807 pte1p = kern_pte1((vm_offset_t)PT2MAP); 808 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 809 pte1_store(pte1p++, PTE1_LINK(pa)); 810 } 811 812 /* 813 * Store PT2TAB in PT2TAB itself, i.e. self reference mapping. 814 * Each pmap will hold own PT2TAB, so the mapping should be not global. 815 */ 816 pte2p = kern_pt2tab_entry((vm_offset_t)PT2MAP); 817 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 818 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 819 } 820 821 /* 822 * Choose correct L2 page table and make mappings for allocations 823 * made herein which replaces temporary locore.S mappings after a while. 824 * Note that PT2MAP cannot be used until we switch to kern_pt1. 825 * 826 * Note, that these allocations started aligned on 1M section and 827 * kernel PT1 was allocated first. Making of mappings must follow 828 * order of physical allocations as we've used KERNEL_P2V() macro 829 * for virtual addresses resolution. 830 */ 831 pte2p = kern_pt2tab_entry((vm_offset_t)kern_pt1); 832 pt2pg_va = KERNEL_P2V(pte2_pa(pte2_load(pte2p))); 833 834 pte2p = page_pt2(pt2pg_va, pte1_index((vm_offset_t)kern_pt1)); 835 836 /* Make mapping for kernel L1 page table. */ 837 for (pa = base_pt1, i = 0; i < NPG_IN_PT1; i++, pa += PTE2_SIZE) 838 pte2_store(pte2p++, PTE2_KPT(pa)); 839 840 /* Make mapping for kernel PT2TAB. */ 841 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) 842 pte2_store(pte2p++, PTE2_KPT(pa)); 843 844 /* Finally, switch from 'boot_pt1' to 'kern_pt1'. */ 845 pmap_kern_ttb = base_pt1 | ttb_flags; 846 cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); 847 reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set); 848 /* 849 * Initialize the first available KVA. As kernel image is mapped by 850 * sections, we are leaving some gap behind. 851 */ 852 virtual_avail = (vm_offset_t)kern_pt2tab + NPG_IN_PT2TAB * PAGE_SIZE; 853} 854 855/* 856 * Setup L2 page table page for given KVA. 857 * Used in pre-bootstrap epoch. 858 * 859 * Note that we have allocated NKPT2PG pages for L2 page tables in advance 860 * and used them for mapping KVA starting from KERNBASE. However, this is not 861 * enough. Vectors and devices need L2 page tables too. Note that they are 862 * even above VM_MAX_KERNEL_ADDRESS. 863 */ 864static __inline vm_paddr_t 865pmap_preboot_pt2pg_setup(vm_offset_t va) 866{ 867 pt2_entry_t *pte2p, pte2; 868 vm_paddr_t pt2pg_pa; 869 870 /* Get associated entry in PT2TAB. */ 871 pte2p = kern_pt2tab_entry(va); 872 873 /* Just return, if PT2s page exists already. */ 874 pte2 = pt2tab_load(pte2p); 875 if (pte2_is_valid(pte2)) 876 return (pte2_pa(pte2)); 877 878 KASSERT(va >= VM_MAX_KERNEL_ADDRESS, 879 ("%s: NKPT2PG too small", __func__)); 880 881 /* 882 * Allocate page for PT2s and insert it to PT2TAB. 883 * In other words, map it into PT2MAP space. 884 */ 885 pt2pg_pa = pmap_preboot_get_pages(1); 886 pt2tab_store(pte2p, PTE2_KPT(pt2pg_pa)); 887 888 /* Zero all PT2s in allocated page. */ 889 bzero((void*)pt2map_pt2pg(va), PAGE_SIZE); 890 pte2_sync_range((pt2_entry_t *)pt2map_pt2pg(va), PAGE_SIZE); 891 892 return (pt2pg_pa); 893} 894 895/* 896 * Setup L2 page table for given KVA. 897 * Used in pre-bootstrap epoch. 898 */ 899static void 900pmap_preboot_pt2_setup(vm_offset_t va) 901{ 902 pt1_entry_t *pte1p; 903 vm_paddr_t pt2pg_pa, pt2_pa; 904 905 /* Setup PT2's page. */ 906 pt2pg_pa = pmap_preboot_pt2pg_setup(va); 907 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(va)); 908 909 /* Insert PT2 to PT1. */ 910 pte1p = kern_pte1(va); 911 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 912} 913 914/* 915 * Get L2 page entry associated with given KVA. 916 * Used in pre-bootstrap epoch. 917 */ 918static __inline pt2_entry_t* 919pmap_preboot_vtopte2(vm_offset_t va) 920{ 921 pt1_entry_t *pte1p; 922 923 /* Setup PT2 if needed. */ 924 pte1p = kern_pte1(va); 925 if (!pte1_is_valid(pte1_load(pte1p))) /* XXX - sections ?! */ 926 pmap_preboot_pt2_setup(va); 927 928 return (pt2map_entry(va)); 929} 930 931/* 932 * Pre-bootstrap epoch page(s) mapping(s). 933 */ 934void 935pmap_preboot_map_pages(vm_paddr_t pa, vm_offset_t va, u_int num) 936{ 937 u_int i; 938 pt2_entry_t *pte2p; 939 940 /* Map all the pages. */ 941 for (i = 0; i < num; i++) { 942 pte2p = pmap_preboot_vtopte2(va); 943 pte2_store(pte2p, PTE2_KRW(pa)); 944 va += PAGE_SIZE; 945 pa += PAGE_SIZE; 946 } 947} 948 949/* 950 * Pre-bootstrap epoch virtual space alocator. 951 */ 952vm_offset_t 953pmap_preboot_reserve_pages(u_int num) 954{ 955 u_int i; 956 vm_offset_t start, va; 957 pt2_entry_t *pte2p; 958 959 /* Allocate virtual space. */ 960 start = va = virtual_avail; 961 virtual_avail += num * PAGE_SIZE; 962 963 /* Zero the mapping. */ 964 for (i = 0; i < num; i++) { 965 pte2p = pmap_preboot_vtopte2(va); 966 pte2_store(pte2p, 0); 967 va += PAGE_SIZE; 968 } 969 970 return (start); 971} 972 973/* 974 * Pre-bootstrap epoch page(s) allocation and mapping(s). 975 */ 976vm_offset_t 977pmap_preboot_get_vpages(u_int num) 978{ 979 vm_paddr_t pa; 980 vm_offset_t va; 981 982 /* Allocate physical page(s). */ 983 pa = pmap_preboot_get_pages(num); 984 985 /* Allocate virtual space. */ 986 va = virtual_avail; 987 virtual_avail += num * PAGE_SIZE; 988 989 /* Map and zero all. */ 990 pmap_preboot_map_pages(pa, va, num); 991 bzero((void *)va, num * PAGE_SIZE); 992 993 return (va); 994} 995 996/* 997 * Pre-bootstrap epoch page mapping(s) with attributes. 998 */ 999void 1000pmap_preboot_map_attr(vm_paddr_t pa, vm_offset_t va, vm_size_t size, 1001 vm_prot_t prot, vm_memattr_t attr) 1002{ 1003 u_int num; 1004 u_int l1_attr, l1_prot, l2_prot, l2_attr; 1005 pt1_entry_t *pte1p; 1006 pt2_entry_t *pte2p; 1007 1008 l2_prot = prot & VM_PROT_WRITE ? PTE2_AP_KRW : PTE2_AP_KR; 1009 l2_prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1010 l2_attr = vm_memattr_to_pte2(attr); 1011 l1_prot = ATTR_TO_L1(l2_prot); 1012 l1_attr = ATTR_TO_L1(l2_attr); 1013 1014 /* Map all the pages. */ 1015 num = round_page(size); 1016 while (num > 0) { 1017 if ((((va | pa) & PTE1_OFFSET) == 0) && (num >= PTE1_SIZE)) { 1018 pte1p = kern_pte1(va); 1019 pte1_store(pte1p, PTE1_KERN(pa, l1_prot, l1_attr)); 1020 va += PTE1_SIZE; 1021 pa += PTE1_SIZE; 1022 num -= PTE1_SIZE; 1023 } else { 1024 pte2p = pmap_preboot_vtopte2(va); 1025 pte2_store(pte2p, PTE2_KERN(pa, l2_prot, l2_attr)); 1026 va += PAGE_SIZE; 1027 pa += PAGE_SIZE; 1028 num -= PAGE_SIZE; 1029 } 1030 } 1031} 1032 1033/* 1034 * Extract from the kernel page table the physical address 1035 * that is mapped by the given virtual address "va". 1036 */ 1037vm_paddr_t 1038pmap_kextract(vm_offset_t va) 1039{ 1040 vm_paddr_t pa; 1041 pt1_entry_t pte1; 1042 pt2_entry_t pte2; 1043 1044 pte1 = pte1_load(kern_pte1(va)); 1045 if (pte1_is_section(pte1)) { 1046 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1047 } else if (pte1_is_link(pte1)) { 1048 /* 1049 * We should beware of concurrent promotion that changes 1050 * pte1 at this point. However, it's not a problem as PT2 1051 * page is preserved by promotion in PT2TAB. So even if 1052 * it happens, using of PT2MAP is still safe. 1053 * 1054 * QQQ: However, concurrent removing is a problem which 1055 * ends in abort on PT2MAP space. Locking must be used 1056 * to deal with this. 1057 */ 1058 pte2 = pte2_load(pt2map_entry(va)); 1059 pa = pte2_pa(pte2) | (va & PTE2_OFFSET); 1060 } 1061 else { 1062 panic("%s: va %#x pte1 %#x", __func__, va, pte1); 1063 } 1064 return (pa); 1065} 1066 1067/* 1068 * Extract from the kernel page table the physical address 1069 * that is mapped by the given virtual address "va". Also 1070 * return L2 page table entry which maps the address. 1071 * 1072 * This is only intended to be used for panic dumps. 1073 */ 1074vm_paddr_t 1075pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) 1076{ 1077 vm_paddr_t pa; 1078 pt1_entry_t pte1; 1079 pt2_entry_t pte2; 1080 1081 pte1 = pte1_load(kern_pte1(va)); 1082 if (pte1_is_section(pte1)) { 1083 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1084 pte2 = pa | ATTR_TO_L2(pte1) | PTE2_V; 1085 } else if (pte1_is_link(pte1)) { 1086 pte2 = pte2_load(pt2map_entry(va)); 1087 pa = pte2_pa(pte2); 1088 } else { 1089 pte2 = 0; 1090 pa = 0; 1091 } 1092 if (pte2p != NULL) 1093 *pte2p = pte2; 1094 return (pa); 1095} 1096 1097/***************************************************************************** 1098 * 1099 * PMAP second stage initialization and utility functions 1100 * for bootstrap epoch. 1101 * 1102 * After pmap_bootstrap() is called, the following functions for 1103 * mappings can be used: 1104 * 1105 * void pmap_kenter(vm_offset_t va, vm_paddr_t pa); 1106 * void pmap_kremove(vm_offset_t va); 1107 * vm_offset_t pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, 1108 * int prot); 1109 * 1110 * NOTE: This is not SMP coherent stage. And physical page allocation is not 1111 * allowed during this stage. 1112 * 1113 *****************************************************************************/ 1114 1115/* 1116 * Initialize kernel PMAP locks and lists, kernel_pmap itself, and 1117 * reserve various virtual spaces for temporary mappings. 1118 */ 1119void 1120pmap_bootstrap(vm_offset_t firstaddr) 1121{ 1122 pt2_entry_t *unused __unused; 1123 struct pcpu *pc; 1124 1125 /* 1126 * Initialize the kernel pmap (which is statically allocated). 1127 */ 1128 PMAP_LOCK_INIT(kernel_pmap); 1129 kernel_l1pa = (vm_paddr_t)kern_pt1; /* for libkvm */ 1130 kernel_pmap->pm_pt1 = kern_pt1; 1131 kernel_pmap->pm_pt2tab = kern_pt2tab; 1132 CPU_FILL(&kernel_pmap->pm_active); /* don't allow deactivation */ 1133 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 1134 1135 /* 1136 * Initialize the global pv list lock. 1137 */ 1138 rw_init(&pvh_global_lock, "pmap pv global"); 1139 1140 LIST_INIT(&allpmaps); 1141 1142 /* 1143 * Request a spin mutex so that changes to allpmaps cannot be 1144 * preempted by smp_rendezvous_cpus(). 1145 */ 1146 mtx_init(&allpmaps_lock, "allpmaps", NULL, MTX_SPIN); 1147 mtx_lock_spin(&allpmaps_lock); 1148 LIST_INSERT_HEAD(&allpmaps, kernel_pmap, pm_list); 1149 mtx_unlock_spin(&allpmaps_lock); 1150 1151 /* 1152 * Reserve some special page table entries/VA space for temporary 1153 * mapping of pages. 1154 */ 1155#define SYSMAP(c, p, v, n) do { \ 1156 v = (c)pmap_preboot_reserve_pages(n); \ 1157 p = pt2map_entry((vm_offset_t)v); \ 1158 } while (0) 1159 1160 /* 1161 * Local CMAP1/CMAP2 are used for zeroing and copying pages. 1162 * Local CMAP2 is also used for data cache cleaning. 1163 * Global CMAP3 is used for the idle process page zeroing. 1164 */ 1165 pc = get_pcpu(); 1166 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1167 SYSMAP(caddr_t, pc->pc_cmap1_pte2p, pc->pc_cmap1_addr, 1); 1168 SYSMAP(caddr_t, pc->pc_cmap2_pte2p, pc->pc_cmap2_addr, 1); 1169 SYSMAP(vm_offset_t, pc->pc_qmap_pte2p, pc->pc_qmap_addr, 1); 1170 SYSMAP(caddr_t, CMAP3, CADDR3, 1); 1171 1172 /* 1173 * Crashdump maps. 1174 */ 1175 SYSMAP(caddr_t, unused, crashdumpmap, MAXDUMPPGS); 1176 1177 /* 1178 * _tmppt is used for reading arbitrary physical pages via /dev/mem. 1179 */ 1180 SYSMAP(caddr_t, unused, _tmppt, 1); 1181 1182 /* 1183 * PADDR1 and PADDR2 are used by pmap_pte2_quick() and pmap_pte2(), 1184 * respectively. PADDR3 is used by pmap_pte2_ddb(). 1185 */ 1186 SYSMAP(pt2_entry_t *, PMAP1, PADDR1, 1); 1187 SYSMAP(pt2_entry_t *, PMAP2, PADDR2, 1); 1188#ifdef DDB 1189 SYSMAP(pt2_entry_t *, PMAP3, PADDR3, 1); 1190#endif 1191 mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF); 1192 1193 /* 1194 * Note that in very short time in initarm(), we are going to 1195 * initialize phys_avail[] array and no further page allocation 1196 * can happen after that until vm subsystem will be initialized. 1197 */ 1198 kernel_vm_end_new = kernel_vm_end; 1199 virtual_end = vm_max_kernel_address; 1200} 1201 1202static void 1203pmap_init_reserved_pages(void) 1204{ 1205 struct pcpu *pc; 1206 vm_offset_t pages; 1207 int i; 1208 1209 CPU_FOREACH(i) { 1210 pc = pcpu_find(i); 1211 /* 1212 * Skip if the mapping has already been initialized, 1213 * i.e. this is the BSP. 1214 */ 1215 if (pc->pc_cmap1_addr != 0) 1216 continue; 1217 mtx_init(&pc->pc_cmap_lock, "SYSMAPS", NULL, MTX_DEF); 1218 pages = kva_alloc(PAGE_SIZE * 3); 1219 if (pages == 0) 1220 panic("%s: unable to allocate KVA", __func__); 1221 pc->pc_cmap1_pte2p = pt2map_entry(pages); 1222 pc->pc_cmap2_pte2p = pt2map_entry(pages + PAGE_SIZE); 1223 pc->pc_qmap_pte2p = pt2map_entry(pages + (PAGE_SIZE * 2)); 1224 pc->pc_cmap1_addr = (caddr_t)pages; 1225 pc->pc_cmap2_addr = (caddr_t)(pages + PAGE_SIZE); 1226 pc->pc_qmap_addr = pages + (PAGE_SIZE * 2); 1227 } 1228} 1229SYSINIT(rpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_reserved_pages, NULL); 1230 1231/* 1232 * The function can already be use in second initialization stage. 1233 * As such, the function DOES NOT call pmap_growkernel() where PT2 1234 * allocation can happen. So if used, be sure that PT2 for given 1235 * virtual address is allocated already! 1236 * 1237 * Add a wired page to the kva. 1238 * Note: not SMP coherent. 1239 */ 1240static __inline void 1241pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot, 1242 uint32_t attr) 1243{ 1244 pt1_entry_t *pte1p; 1245 pt2_entry_t *pte2p; 1246 1247 pte1p = kern_pte1(va); 1248 if (!pte1_is_valid(pte1_load(pte1p))) { /* XXX - sections ?! */ 1249 /* 1250 * This is a very low level function, so PT2 and particularly 1251 * PT2PG associated with given virtual address must be already 1252 * allocated. It's a pain mainly during pmap initialization 1253 * stage. However, called after pmap initialization with 1254 * virtual address not under kernel_vm_end will lead to 1255 * the same misery. 1256 */ 1257 if (!pte2_is_valid(pte2_load(kern_pt2tab_entry(va)))) 1258 panic("%s: kernel PT2 not allocated!", __func__); 1259 } 1260 1261 pte2p = pt2map_entry(va); 1262 pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); 1263} 1264 1265PMAP_INLINE void 1266pmap_kenter(vm_offset_t va, vm_paddr_t pa) 1267{ 1268 1269 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, PTE2_ATTR_DEFAULT); 1270} 1271 1272/* 1273 * Remove a page from the kernel pagetables. 1274 * Note: not SMP coherent. 1275 */ 1276PMAP_INLINE void 1277pmap_kremove(vm_offset_t va) 1278{ 1279 pt2_entry_t *pte2p; 1280 1281 pte2p = pt2map_entry(va); 1282 pte2_clear(pte2p); 1283} 1284 1285/* 1286 * Share new kernel PT2PG with all pmaps. 1287 * The caller is responsible for maintaining TLB consistency. 1288 */ 1289static void 1290pmap_kenter_pt2tab(vm_offset_t va, pt2_entry_t npte2) 1291{ 1292 pmap_t pmap; 1293 pt2_entry_t *pte2p; 1294 1295 mtx_lock_spin(&allpmaps_lock); 1296 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1297 pte2p = pmap_pt2tab_entry(pmap, va); 1298 pt2tab_store(pte2p, npte2); 1299 } 1300 mtx_unlock_spin(&allpmaps_lock); 1301} 1302 1303/* 1304 * Share new kernel PTE1 with all pmaps. 1305 * The caller is responsible for maintaining TLB consistency. 1306 */ 1307static void 1308pmap_kenter_pte1(vm_offset_t va, pt1_entry_t npte1) 1309{ 1310 pmap_t pmap; 1311 pt1_entry_t *pte1p; 1312 1313 mtx_lock_spin(&allpmaps_lock); 1314 LIST_FOREACH(pmap, &allpmaps, pm_list) { 1315 pte1p = pmap_pte1(pmap, va); 1316 pte1_store(pte1p, npte1); 1317 } 1318 mtx_unlock_spin(&allpmaps_lock); 1319} 1320 1321/* 1322 * Used to map a range of physical addresses into kernel 1323 * virtual address space. 1324 * 1325 * The value passed in '*virt' is a suggested virtual address for 1326 * the mapping. Architectures which can support a direct-mapped 1327 * physical to virtual region can return the appropriate address 1328 * within that region, leaving '*virt' unchanged. Other 1329 * architectures should map the pages starting at '*virt' and 1330 * update '*virt' with the first usable address after the mapped 1331 * region. 1332 * 1333 * NOTE: Read the comments above pmap_kenter_prot_attr() as 1334 * the function is used herein! 1335 */ 1336vm_offset_t 1337pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1338{ 1339 vm_offset_t va, sva; 1340 vm_paddr_t pte1_offset; 1341 pt1_entry_t npte1; 1342 uint32_t l1prot, l2prot; 1343 uint32_t l1attr, l2attr; 1344 1345 PDEBUG(1, printf("%s: virt = %#x, start = %#x, end = %#x (size = %#x)," 1346 " prot = %d\n", __func__, *virt, start, end, end - start, prot)); 1347 1348 l2prot = (prot & VM_PROT_WRITE) ? PTE2_AP_KRW : PTE2_AP_KR; 1349 l2prot |= (prot & VM_PROT_EXECUTE) ? PTE2_X : PTE2_NX; 1350 l1prot = ATTR_TO_L1(l2prot); 1351 1352 l2attr = PTE2_ATTR_DEFAULT; 1353 l1attr = ATTR_TO_L1(l2attr); 1354 1355 va = *virt; 1356 /* 1357 * Does the physical address range's size and alignment permit at 1358 * least one section mapping to be created? 1359 */ 1360 pte1_offset = start & PTE1_OFFSET; 1361 if ((end - start) - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) >= 1362 PTE1_SIZE) { 1363 /* 1364 * Increase the starting virtual address so that its alignment 1365 * does not preclude the use of section mappings. 1366 */ 1367 if ((va & PTE1_OFFSET) < pte1_offset) 1368 va = pte1_trunc(va) + pte1_offset; 1369 else if ((va & PTE1_OFFSET) > pte1_offset) 1370 va = pte1_roundup(va) + pte1_offset; 1371 } 1372 sva = va; 1373 while (start < end) { 1374 if ((start & PTE1_OFFSET) == 0 && end - start >= PTE1_SIZE) { 1375 KASSERT((va & PTE1_OFFSET) == 0, 1376 ("%s: misaligned va %#x", __func__, va)); 1377 npte1 = PTE1_KERN(start, l1prot, l1attr); 1378 pmap_kenter_pte1(va, npte1); 1379 va += PTE1_SIZE; 1380 start += PTE1_SIZE; 1381 } else { 1382 pmap_kenter_prot_attr(va, start, l2prot, l2attr); 1383 va += PAGE_SIZE; 1384 start += PAGE_SIZE; 1385 } 1386 } 1387 tlb_flush_range(sva, va - sva); 1388 *virt = va; 1389 return (sva); 1390} 1391 1392/* 1393 * Make a temporary mapping for a physical address. 1394 * This is only intended to be used for panic dumps. 1395 */ 1396void * 1397pmap_kenter_temporary(vm_paddr_t pa, int i) 1398{ 1399 vm_offset_t va; 1400 1401 /* QQQ: 'i' should be less or equal to MAXDUMPPGS. */ 1402 1403 va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); 1404 pmap_kenter(va, pa); 1405 tlb_flush_local(va); 1406 return ((void *)crashdumpmap); 1407} 1408 1409 1410/************************************* 1411 * 1412 * TLB & cache maintenance routines. 1413 * 1414 *************************************/ 1415 1416/* 1417 * We inline these within pmap.c for speed. 1418 */ 1419PMAP_INLINE void 1420pmap_tlb_flush(pmap_t pmap, vm_offset_t va) 1421{ 1422 1423 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1424 tlb_flush(va); 1425} 1426 1427PMAP_INLINE void 1428pmap_tlb_flush_range(pmap_t pmap, vm_offset_t sva, vm_size_t size) 1429{ 1430 1431 if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active)) 1432 tlb_flush_range(sva, size); 1433} 1434 1435/* 1436 * Abuse the pte2 nodes for unmapped kva to thread a kva freelist through. 1437 * Requirements: 1438 * - Must deal with pages in order to ensure that none of the PTE2_* bits 1439 * are ever set, PTE2_V in particular. 1440 * - Assumes we can write to pte2s without pte2_store() atomic ops. 1441 * - Assumes nothing will ever test these addresses for 0 to indicate 1442 * no mapping instead of correctly checking PTE2_V. 1443 * - Assumes a vm_offset_t will fit in a pte2 (true for arm). 1444 * Because PTE2_V is never set, there can be no mappings to invalidate. 1445 */ 1446static vm_offset_t 1447pmap_pte2list_alloc(vm_offset_t *head) 1448{ 1449 pt2_entry_t *pte2p; 1450 vm_offset_t va; 1451 1452 va = *head; 1453 if (va == 0) 1454 panic("pmap_ptelist_alloc: exhausted ptelist KVA"); 1455 pte2p = pt2map_entry(va); 1456 *head = *pte2p; 1457 if (*head & PTE2_V) 1458 panic("%s: va with PTE2_V set!", __func__); 1459 *pte2p = 0; 1460 return (va); 1461} 1462 1463static void 1464pmap_pte2list_free(vm_offset_t *head, vm_offset_t va) 1465{ 1466 pt2_entry_t *pte2p; 1467 1468 if (va & PTE2_V) 1469 panic("%s: freeing va with PTE2_V set!", __func__); 1470 pte2p = pt2map_entry(va); 1471 *pte2p = *head; /* virtual! PTE2_V is 0 though */ 1472 *head = va; 1473} 1474 1475static void 1476pmap_pte2list_init(vm_offset_t *head, void *base, int npages) 1477{ 1478 int i; 1479 vm_offset_t va; 1480 1481 *head = 0; 1482 for (i = npages - 1; i >= 0; i--) { 1483 va = (vm_offset_t)base + i * PAGE_SIZE; 1484 pmap_pte2list_free(head, va); 1485 } 1486} 1487 1488/***************************************************************************** 1489 * 1490 * PMAP third and final stage initialization. 1491 * 1492 * After pmap_init() is called, PMAP subsystem is fully initialized. 1493 * 1494 *****************************************************************************/ 1495 1496SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 1497 1498SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_max, CTLFLAG_RD, &pv_entry_max, 0, 1499 "Max number of PV entries"); 1500SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperproc, CTLFLAG_RD, &shpgperproc, 0, 1501 "Page share factor per proc"); 1502 1503static u_long nkpt2pg = NKPT2PG; 1504SYSCTL_ULONG(_vm_pmap, OID_AUTO, nkpt2pg, CTLFLAG_RD, 1505 &nkpt2pg, 0, "Pre-allocated pages for kernel PT2s"); 1506 1507static int sp_enabled = 1; 1508SYSCTL_INT(_vm_pmap, OID_AUTO, sp_enabled, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 1509 &sp_enabled, 0, "Are large page mappings enabled?"); 1510 1511static SYSCTL_NODE(_vm_pmap, OID_AUTO, pte1, CTLFLAG_RD, 0, 1512 "1MB page mapping counters"); 1513 1514static u_long pmap_pte1_demotions; 1515SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, demotions, CTLFLAG_RD, 1516 &pmap_pte1_demotions, 0, "1MB page demotions"); 1517 1518static u_long pmap_pte1_mappings; 1519SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, mappings, CTLFLAG_RD, 1520 &pmap_pte1_mappings, 0, "1MB page mappings"); 1521 1522static u_long pmap_pte1_p_failures; 1523SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, p_failures, CTLFLAG_RD, 1524 &pmap_pte1_p_failures, 0, "1MB page promotion failures"); 1525 1526static u_long pmap_pte1_promotions; 1527SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, promotions, CTLFLAG_RD, 1528 &pmap_pte1_promotions, 0, "1MB page promotions"); 1529 1530static u_long pmap_pte1_kern_demotions; 1531SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_demotions, CTLFLAG_RD, 1532 &pmap_pte1_kern_demotions, 0, "1MB page kernel demotions"); 1533 1534static u_long pmap_pte1_kern_promotions; 1535SYSCTL_ULONG(_vm_pmap_pte1, OID_AUTO, kern_promotions, CTLFLAG_RD, 1536 &pmap_pte1_kern_promotions, 0, "1MB page kernel promotions"); 1537 1538static __inline ttb_entry_t 1539pmap_ttb_get(pmap_t pmap) 1540{ 1541 1542 return (vtophys(pmap->pm_pt1) | ttb_flags); 1543} 1544 1545/* 1546 * Initialize a vm_page's machine-dependent fields. 1547 * 1548 * Variations: 1549 * 1. Pages for L2 page tables are always not managed. So, pv_list and 1550 * pt2_wirecount can share same physical space. However, proper 1551 * initialization on a page alloc for page tables and reinitialization 1552 * on the page free must be ensured. 1553 */ 1554void 1555pmap_page_init(vm_page_t m) 1556{ 1557 1558 TAILQ_INIT(&m->md.pv_list); 1559 pt2_wirecount_init(m); 1560 m->md.pat_mode = VM_MEMATTR_DEFAULT; 1561} 1562 1563/* 1564 * Virtualization for faster way how to zero whole page. 1565 */ 1566static __inline void 1567pagezero(void *page) 1568{ 1569 1570 bzero(page, PAGE_SIZE); 1571} 1572 1573/* 1574 * Zero L2 page table page. 1575 * Use same KVA as in pmap_zero_page(). 1576 */ 1577static __inline vm_paddr_t 1578pmap_pt2pg_zero(vm_page_t m) 1579{ 1580 pt2_entry_t *cmap2_pte2p; 1581 vm_paddr_t pa; 1582 struct pcpu *pc; 1583 1584 pa = VM_PAGE_TO_PHYS(m); 1585 1586 /* 1587 * XXX: For now, we map whole page even if it's already zero, 1588 * to sync it even if the sync is only DSB. 1589 */ 1590 sched_pin(); 1591 pc = get_pcpu(); 1592 cmap2_pte2p = pc->pc_cmap2_pte2p; 1593 mtx_lock(&pc->pc_cmap_lock); 1594 if (pte2_load(cmap2_pte2p) != 0) 1595 panic("%s: CMAP2 busy", __func__); 1596 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 1597 vm_page_pte2_attr(m))); 1598 /* Even VM_ALLOC_ZERO request is only advisory. */ 1599 if ((m->flags & PG_ZERO) == 0) 1600 pagezero(pc->pc_cmap2_addr); 1601 pte2_sync_range((pt2_entry_t *)pc->pc_cmap2_addr, PAGE_SIZE); 1602 pte2_clear(cmap2_pte2p); 1603 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 1604 1605 /* 1606 * Unpin the thread before releasing the lock. Otherwise the thread 1607 * could be rescheduled while still bound to the current CPU, only 1608 * to unpin itself immediately upon resuming execution. 1609 */ 1610 sched_unpin(); 1611 mtx_unlock(&pc->pc_cmap_lock); 1612 1613 return (pa); 1614} 1615 1616/* 1617 * Init just allocated page as L2 page table(s) holder 1618 * and return its physical address. 1619 */ 1620static __inline vm_paddr_t 1621pmap_pt2pg_init(pmap_t pmap, vm_offset_t va, vm_page_t m) 1622{ 1623 vm_paddr_t pa; 1624 pt2_entry_t *pte2p; 1625 1626 /* Check page attributes. */ 1627 if (m->md.pat_mode != pt_memattr) 1628 pmap_page_set_memattr(m, pt_memattr); 1629 1630 /* Zero page and init wire counts. */ 1631 pa = pmap_pt2pg_zero(m); 1632 pt2_wirecount_init(m); 1633 1634 /* 1635 * Map page to PT2MAP address space for given pmap. 1636 * Note that PT2MAP space is shared with all pmaps. 1637 */ 1638 if (pmap == kernel_pmap) 1639 pmap_kenter_pt2tab(va, PTE2_KPT(pa)); 1640 else { 1641 pte2p = pmap_pt2tab_entry(pmap, va); 1642 pt2tab_store(pte2p, PTE2_KPT_NG(pa)); 1643 } 1644 1645 return (pa); 1646} 1647 1648/* 1649 * Initialize the pmap module. 1650 * Called by vm_init, to initialize any structures that the pmap 1651 * system needs to map virtual memory. 1652 */ 1653void 1654pmap_init(void) 1655{ 1656 vm_size_t s; 1657 pt2_entry_t *pte2p, pte2; 1658 u_int i, pte1_idx, pv_npg; 1659 1660 PDEBUG(1, printf("%s: phys_start = %#x\n", __func__, PHYSADDR)); 1661 1662 /* 1663 * Initialize the vm page array entries for kernel pmap's 1664 * L2 page table pages allocated in advance. 1665 */ 1666 pte1_idx = pte1_index(KERNBASE - PT2MAP_SIZE); 1667 pte2p = kern_pt2tab_entry(KERNBASE - PT2MAP_SIZE); 1668 for (i = 0; i < nkpt2pg + NPG_IN_PT2TAB; i++, pte2p++) { 1669 vm_paddr_t pa; 1670 vm_page_t m; 1671 1672 pte2 = pte2_load(pte2p); 1673 KASSERT(pte2_is_valid(pte2), ("%s: no valid entry", __func__)); 1674 1675 pa = pte2_pa(pte2); 1676 m = PHYS_TO_VM_PAGE(pa); 1677 KASSERT(m >= vm_page_array && 1678 m < &vm_page_array[vm_page_array_size], 1679 ("%s: L2 page table page is out of range", __func__)); 1680 1681 m->pindex = pte1_idx; 1682 m->phys_addr = pa; 1683 pte1_idx += NPT2_IN_PG; 1684 } 1685 1686 /* 1687 * Initialize the address space (zone) for the pv entries. Set a 1688 * high water mark so that the system can recover from excessive 1689 * numbers of pv entries. 1690 */ 1691 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1692 pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; 1693 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1694 pv_entry_max = roundup(pv_entry_max, _NPCPV); 1695 pv_entry_high_water = 9 * (pv_entry_max / 10); 1696 1697 /* 1698 * Are large page mappings enabled? 1699 */ 1700 TUNABLE_INT_FETCH("vm.pmap.sp_enabled", &sp_enabled); 1701 if (sp_enabled) { 1702 KASSERT(MAXPAGESIZES > 1 && pagesizes[1] == 0, 1703 ("%s: can't assign to pagesizes[1]", __func__)); 1704 pagesizes[1] = PTE1_SIZE; 1705 } 1706 1707 /* 1708 * Calculate the size of the pv head table for sections. 1709 * Handle the possibility that "vm_phys_segs[...].end" is zero. 1710 * Note that the table is only for sections which could be promoted. 1711 */ 1712 first_managed_pa = pte1_trunc(vm_phys_segs[0].start); 1713 pv_npg = (pte1_trunc(vm_phys_segs[vm_phys_nsegs - 1].end - PAGE_SIZE) 1714 - first_managed_pa) / PTE1_SIZE + 1; 1715 1716 /* 1717 * Allocate memory for the pv head table for sections. 1718 */ 1719 s = (vm_size_t)(pv_npg * sizeof(struct md_page)); 1720 s = round_page(s); 1721 pv_table = (struct md_page *)kmem_malloc(kernel_arena, s, 1722 M_WAITOK | M_ZERO); 1723 for (i = 0; i < pv_npg; i++) 1724 TAILQ_INIT(&pv_table[i].pv_list); 1725 1726 pv_maxchunks = MAX(pv_entry_max / _NPCPV, maxproc); 1727 pv_chunkbase = (struct pv_chunk *)kva_alloc(PAGE_SIZE * pv_maxchunks); 1728 if (pv_chunkbase == NULL) 1729 panic("%s: not enough kvm for pv chunks", __func__); 1730 pmap_pte2list_init(&pv_vafree, pv_chunkbase, pv_maxchunks); 1731} 1732 1733/* 1734 * Add a list of wired pages to the kva 1735 * this routine is only used for temporary 1736 * kernel mappings that do not need to have 1737 * page modification or references recorded. 1738 * Note that old mappings are simply written 1739 * over. The page *must* be wired. 1740 * Note: SMP coherent. Uses a ranged shootdown IPI. 1741 */ 1742void 1743pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count) 1744{ 1745 u_int anychanged; 1746 pt2_entry_t *epte2p, *pte2p, pte2; 1747 vm_page_t m; 1748 vm_paddr_t pa; 1749 1750 anychanged = 0; 1751 pte2p = pt2map_entry(sva); 1752 epte2p = pte2p + count; 1753 while (pte2p < epte2p) { 1754 m = *ma++; 1755 pa = VM_PAGE_TO_PHYS(m); 1756 pte2 = pte2_load(pte2p); 1757 if ((pte2_pa(pte2) != pa) || 1758 (pte2_attr(pte2) != vm_page_pte2_attr(m))) { 1759 anychanged++; 1760 pte2_store(pte2p, PTE2_KERN(pa, PTE2_AP_KRW, 1761 vm_page_pte2_attr(m))); 1762 } 1763 pte2p++; 1764 } 1765 if (__predict_false(anychanged)) 1766 tlb_flush_range(sva, count * PAGE_SIZE); 1767} 1768 1769/* 1770 * This routine tears out page mappings from the 1771 * kernel -- it is meant only for temporary mappings. 1772 * Note: SMP coherent. Uses a ranged shootdown IPI. 1773 */ 1774void 1775pmap_qremove(vm_offset_t sva, int count) 1776{ 1777 vm_offset_t va; 1778 1779 va = sva; 1780 while (count-- > 0) { 1781 pmap_kremove(va); 1782 va += PAGE_SIZE; 1783 } 1784 tlb_flush_range(sva, va - sva); 1785} 1786 1787/* 1788 * Are we current address space or kernel? 1789 */ 1790static __inline int 1791pmap_is_current(pmap_t pmap) 1792{ 1793 1794 return (pmap == kernel_pmap || 1795 (pmap == vmspace_pmap(curthread->td_proc->p_vmspace))); 1796} 1797 1798/* 1799 * If the given pmap is not the current or kernel pmap, the returned 1800 * pte2 must be released by passing it to pmap_pte2_release(). 1801 */ 1802static pt2_entry_t * 1803pmap_pte2(pmap_t pmap, vm_offset_t va) 1804{ 1805 pt1_entry_t pte1; 1806 vm_paddr_t pt2pg_pa; 1807 1808 pte1 = pte1_load(pmap_pte1(pmap, va)); 1809 if (pte1_is_section(pte1)) 1810 panic("%s: attempt to map PTE1", __func__); 1811 if (pte1_is_link(pte1)) { 1812 /* Are we current address space or kernel? */ 1813 if (pmap_is_current(pmap)) 1814 return (pt2map_entry(va)); 1815 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1816 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1817 mtx_lock(&PMAP2mutex); 1818 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 1819 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 1820 tlb_flush((vm_offset_t)PADDR2); 1821 } 1822 return (PADDR2 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1823 } 1824 return (NULL); 1825} 1826 1827/* 1828 * Releases a pte2 that was obtained from pmap_pte2(). 1829 * Be prepared for the pte2p being NULL. 1830 */ 1831static __inline void 1832pmap_pte2_release(pt2_entry_t *pte2p) 1833{ 1834 1835 if ((pt2_entry_t *)(trunc_page((vm_offset_t)pte2p)) == PADDR2) { 1836 mtx_unlock(&PMAP2mutex); 1837 } 1838} 1839 1840/* 1841 * Super fast pmap_pte2 routine best used when scanning 1842 * the pv lists. This eliminates many coarse-grained 1843 * invltlb calls. Note that many of the pv list 1844 * scans are across different pmaps. It is very wasteful 1845 * to do an entire tlb flush for checking a single mapping. 1846 * 1847 * If the given pmap is not the current pmap, pvh_global_lock 1848 * must be held and curthread pinned to a CPU. 1849 */ 1850static pt2_entry_t * 1851pmap_pte2_quick(pmap_t pmap, vm_offset_t va) 1852{ 1853 pt1_entry_t pte1; 1854 vm_paddr_t pt2pg_pa; 1855 1856 pte1 = pte1_load(pmap_pte1(pmap, va)); 1857 if (pte1_is_section(pte1)) 1858 panic("%s: attempt to map PTE1", __func__); 1859 if (pte1_is_link(pte1)) { 1860 /* Are we current address space or kernel? */ 1861 if (pmap_is_current(pmap)) 1862 return (pt2map_entry(va)); 1863 rw_assert(&pvh_global_lock, RA_WLOCKED); 1864 KASSERT(curthread->td_pinned > 0, 1865 ("%s: curthread not pinned", __func__)); 1866 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 1867 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 1868 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 1869 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 1870#ifdef SMP 1871 PMAP1cpu = PCPU_GET(cpuid); 1872#endif 1873 tlb_flush_local((vm_offset_t)PADDR1); 1874 PMAP1changed++; 1875 } else 1876#ifdef SMP 1877 if (PMAP1cpu != PCPU_GET(cpuid)) { 1878 PMAP1cpu = PCPU_GET(cpuid); 1879 tlb_flush_local((vm_offset_t)PADDR1); 1880 PMAP1changedcpu++; 1881 } else 1882#endif 1883 PMAP1unchanged++; 1884 return (PADDR1 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 1885 } 1886 return (NULL); 1887} 1888 1889/* 1890 * Routine: pmap_extract 1891 * Function: 1892 * Extract the physical page address associated 1893 * with the given map/virtual_address pair. 1894 */ 1895vm_paddr_t 1896pmap_extract(pmap_t pmap, vm_offset_t va) 1897{ 1898 vm_paddr_t pa; 1899 pt1_entry_t pte1; 1900 pt2_entry_t *pte2p; 1901 1902 PMAP_LOCK(pmap); 1903 pte1 = pte1_load(pmap_pte1(pmap, va)); 1904 if (pte1_is_section(pte1)) 1905 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1906 else if (pte1_is_link(pte1)) { 1907 pte2p = pmap_pte2(pmap, va); 1908 pa = pte2_pa(pte2_load(pte2p)) | (va & PTE2_OFFSET); 1909 pmap_pte2_release(pte2p); 1910 } else 1911 pa = 0; 1912 PMAP_UNLOCK(pmap); 1913 return (pa); 1914} 1915 1916/* 1917 * Routine: pmap_extract_and_hold 1918 * Function: 1919 * Atomically extract and hold the physical page 1920 * with the given pmap and virtual address pair 1921 * if that mapping permits the given protection. 1922 */ 1923vm_page_t 1924pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1925{ 1926 vm_paddr_t pa, lockpa; 1927 pt1_entry_t pte1; 1928 pt2_entry_t pte2, *pte2p; 1929 vm_page_t m; 1930 1931 lockpa = 0; 1932 m = NULL; 1933 PMAP_LOCK(pmap); 1934retry: 1935 pte1 = pte1_load(pmap_pte1(pmap, va)); 1936 if (pte1_is_section(pte1)) { 1937 if (!(pte1 & PTE1_RO) || !(prot & VM_PROT_WRITE)) { 1938 pa = pte1_pa(pte1) | (va & PTE1_OFFSET); 1939 if (vm_page_pa_tryrelock(pmap, pa, &lockpa)) 1940 goto retry; 1941 m = PHYS_TO_VM_PAGE(pa); 1942 vm_page_hold(m); 1943 } 1944 } else if (pte1_is_link(pte1)) { 1945 pte2p = pmap_pte2(pmap, va); 1946 pte2 = pte2_load(pte2p); 1947 pmap_pte2_release(pte2p); 1948 if (pte2_is_valid(pte2) && 1949 (!(pte2 & PTE2_RO) || !(prot & VM_PROT_WRITE))) { 1950 pa = pte2_pa(pte2); 1951 if (vm_page_pa_tryrelock(pmap, pa, &lockpa)) 1952 goto retry; 1953 m = PHYS_TO_VM_PAGE(pa); 1954 vm_page_hold(m); 1955 } 1956 } 1957 PA_UNLOCK_COND(lockpa); 1958 PMAP_UNLOCK(pmap); 1959 return (m); 1960} 1961 1962/* 1963 * Grow the number of kernel L2 page table entries, if needed. 1964 */ 1965void 1966pmap_growkernel(vm_offset_t addr) 1967{ 1968 vm_page_t m; 1969 vm_paddr_t pt2pg_pa, pt2_pa; 1970 pt1_entry_t pte1; 1971 pt2_entry_t pte2; 1972 1973 PDEBUG(1, printf("%s: addr = %#x\n", __func__, addr)); 1974 /* 1975 * All the time kernel_vm_end is first KVA for which underlying 1976 * L2 page table is either not allocated or linked from L1 page table 1977 * (not considering sections). Except for two possible cases: 1978 * 1979 * (1) in the very beginning as long as pmap_growkernel() was 1980 * not called, it could be first unused KVA (which is not 1981 * rounded up to PTE1_SIZE), 1982 * 1983 * (2) when all KVA space is mapped and kernel_map->max_offset 1984 * address is not rounded up to PTE1_SIZE. (For example, 1985 * it could be 0xFFFFFFFF.) 1986 */ 1987 kernel_vm_end = pte1_roundup(kernel_vm_end); 1988 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1989 addr = roundup2(addr, PTE1_SIZE); 1990 if (addr - 1 >= kernel_map->max_offset) 1991 addr = kernel_map->max_offset; 1992 while (kernel_vm_end < addr) { 1993 pte1 = pte1_load(kern_pte1(kernel_vm_end)); 1994 if (pte1_is_valid(pte1)) { 1995 kernel_vm_end += PTE1_SIZE; 1996 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1997 kernel_vm_end = kernel_map->max_offset; 1998 break; 1999 } 2000 continue; 2001 } 2002 2003 /* 2004 * kernel_vm_end_new is used in pmap_pinit() when kernel 2005 * mappings are entered to new pmap all at once to avoid race 2006 * between pmap_kenter_pte1() and kernel_vm_end increase. 2007 * The same aplies to pmap_kenter_pt2tab(). 2008 */ 2009 kernel_vm_end_new = kernel_vm_end + PTE1_SIZE; 2010 2011 pte2 = pt2tab_load(kern_pt2tab_entry(kernel_vm_end)); 2012 if (!pte2_is_valid(pte2)) { 2013 /* 2014 * Install new PT2s page into kernel PT2TAB. 2015 */ 2016 m = vm_page_alloc(NULL, 2017 pte1_index(kernel_vm_end) & ~PT2PG_MASK, 2018 VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | 2019 VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2020 if (m == NULL) 2021 panic("%s: no memory to grow kernel", __func__); 2022 /* 2023 * QQQ: To link all new L2 page tables from L1 page 2024 * table now and so pmap_kenter_pte1() them 2025 * at once together with pmap_kenter_pt2tab() 2026 * could be nice speed up. However, 2027 * pmap_growkernel() does not happen so often... 2028 * QQQ: The other TTBR is another option. 2029 */ 2030 pt2pg_pa = pmap_pt2pg_init(kernel_pmap, kernel_vm_end, 2031 m); 2032 } else 2033 pt2pg_pa = pte2_pa(pte2); 2034 2035 pt2_pa = page_pt2pa(pt2pg_pa, pte1_index(kernel_vm_end)); 2036 pmap_kenter_pte1(kernel_vm_end, PTE1_LINK(pt2_pa)); 2037 2038 kernel_vm_end = kernel_vm_end_new; 2039 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 2040 kernel_vm_end = kernel_map->max_offset; 2041 break; 2042 } 2043 } 2044} 2045 2046static int 2047kvm_size(SYSCTL_HANDLER_ARGS) 2048{ 2049 unsigned long ksize = vm_max_kernel_address - KERNBASE; 2050 2051 return (sysctl_handle_long(oidp, &ksize, 0, req)); 2052} 2053SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD, 2054 0, 0, kvm_size, "IU", "Size of KVM"); 2055 2056static int 2057kvm_free(SYSCTL_HANDLER_ARGS) 2058{ 2059 unsigned long kfree = vm_max_kernel_address - kernel_vm_end; 2060 2061 return (sysctl_handle_long(oidp, &kfree, 0, req)); 2062} 2063SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, 2064 0, 0, kvm_free, "IU", "Amount of KVM free"); 2065 2066/*********************************************** 2067 * 2068 * Pmap allocation/deallocation routines. 2069 * 2070 ***********************************************/ 2071 2072/* 2073 * Initialize the pmap for the swapper process. 2074 */ 2075void 2076pmap_pinit0(pmap_t pmap) 2077{ 2078 PDEBUG(1, printf("%s: pmap = %p\n", __func__, pmap)); 2079 2080 PMAP_LOCK_INIT(pmap); 2081 2082 /* 2083 * Kernel page table directory and pmap stuff around is already 2084 * initialized, we are using it right now and here. So, finish 2085 * only PMAP structures initialization for process0 ... 2086 * 2087 * Since the L1 page table and PT2TAB is shared with the kernel pmap, 2088 * which is already included in the list "allpmaps", this pmap does 2089 * not need to be inserted into that list. 2090 */ 2091 pmap->pm_pt1 = kern_pt1; 2092 pmap->pm_pt2tab = kern_pt2tab; 2093 CPU_ZERO(&pmap->pm_active); 2094 PCPU_SET(curpmap, pmap); 2095 TAILQ_INIT(&pmap->pm_pvchunk); 2096 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2097 CPU_SET(0, &pmap->pm_active); 2098} 2099 2100static __inline void 2101pte1_copy_nosync(pt1_entry_t *spte1p, pt1_entry_t *dpte1p, vm_offset_t sva, 2102 vm_offset_t eva) 2103{ 2104 u_int idx, count; 2105 2106 idx = pte1_index(sva); 2107 count = (pte1_index(eva) - idx + 1) * sizeof(pt1_entry_t); 2108 bcopy(spte1p + idx, dpte1p + idx, count); 2109} 2110 2111static __inline void 2112pt2tab_copy_nosync(pt2_entry_t *spte2p, pt2_entry_t *dpte2p, vm_offset_t sva, 2113 vm_offset_t eva) 2114{ 2115 u_int idx, count; 2116 2117 idx = pt2tab_index(sva); 2118 count = (pt2tab_index(eva) - idx + 1) * sizeof(pt2_entry_t); 2119 bcopy(spte2p + idx, dpte2p + idx, count); 2120} 2121 2122/* 2123 * Initialize a preallocated and zeroed pmap structure, 2124 * such as one in a vmspace structure. 2125 */ 2126int 2127pmap_pinit(pmap_t pmap) 2128{ 2129 pt1_entry_t *pte1p; 2130 pt2_entry_t *pte2p; 2131 vm_paddr_t pa, pt2tab_pa; 2132 u_int i; 2133 2134 PDEBUG(6, printf("%s: pmap = %p, pm_pt1 = %p\n", __func__, pmap, 2135 pmap->pm_pt1)); 2136 2137 /* 2138 * No need to allocate L2 page table space yet but we do need 2139 * a valid L1 page table and PT2TAB table. 2140 * 2141 * Install shared kernel mappings to these tables. It's a little 2142 * tricky as some parts of KVA are reserved for vectors, devices, 2143 * and whatever else. These parts are supposed to be above 2144 * vm_max_kernel_address. Thus two regions should be installed: 2145 * 2146 * (1) <KERNBASE, kernel_vm_end), 2147 * (2) <vm_max_kernel_address, 0xFFFFFFFF>. 2148 * 2149 * QQQ: The second region should be stable enough to be installed 2150 * only once in time when the tables are allocated. 2151 * QQQ: Maybe copy of both regions at once could be faster ... 2152 * QQQ: Maybe the other TTBR is an option. 2153 * 2154 * Finally, install own PT2TAB table to these tables. 2155 */ 2156 2157 if (pmap->pm_pt1 == NULL) { 2158 pmap->pm_pt1 = (pt1_entry_t *)kmem_alloc_contig(kernel_arena, 2159 NB_IN_PT1, M_NOWAIT | M_ZERO, 0, -1UL, NB_IN_PT1, 0, 2160 pt_memattr); 2161 if (pmap->pm_pt1 == NULL) 2162 return (0); 2163 } 2164 if (pmap->pm_pt2tab == NULL) { 2165 /* 2166 * QQQ: (1) PT2TAB must be contiguous. If PT2TAB is one page 2167 * only, what should be the only size for 32 bit systems, 2168 * then we could allocate it with vm_page_alloc() and all 2169 * the stuff needed as other L2 page table pages. 2170 * (2) Note that a process PT2TAB is special L2 page table 2171 * page. Its mapping in kernel_arena is permanent and can 2172 * be used no matter which process is current. Its mapping 2173 * in PT2MAP can be used only for current process. 2174 */ 2175 pmap->pm_pt2tab = (pt2_entry_t *)kmem_alloc_attr(kernel_arena, 2176 NB_IN_PT2TAB, M_NOWAIT | M_ZERO, 0, -1UL, pt_memattr); 2177 if (pmap->pm_pt2tab == NULL) { 2178 /* 2179 * QQQ: As struct pmap is allocated from UMA with 2180 * UMA_ZONE_NOFREE flag, it's important to leave 2181 * no allocation in pmap if initialization failed. 2182 */ 2183 kmem_free(kernel_arena, (vm_offset_t)pmap->pm_pt1, 2184 NB_IN_PT1); 2185 pmap->pm_pt1 = NULL; 2186 return (0); 2187 } 2188 /* 2189 * QQQ: Each L2 page table page vm_page_t has pindex set to 2190 * pte1 index of virtual address mapped by this page. 2191 * It's not valid for non kernel PT2TABs themselves. 2192 * The pindex of these pages can not be altered because 2193 * of the way how they are allocated now. However, it 2194 * should not be a problem. 2195 */ 2196 } 2197 2198 mtx_lock_spin(&allpmaps_lock); 2199 /* 2200 * To avoid race with pmap_kenter_pte1() and pmap_kenter_pt2tab(), 2201 * kernel_vm_end_new is used here instead of kernel_vm_end. 2202 */ 2203 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, KERNBASE, 2204 kernel_vm_end_new - 1); 2205 pte1_copy_nosync(kern_pt1, pmap->pm_pt1, vm_max_kernel_address, 2206 0xFFFFFFFF); 2207 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, KERNBASE, 2208 kernel_vm_end_new - 1); 2209 pt2tab_copy_nosync(kern_pt2tab, pmap->pm_pt2tab, vm_max_kernel_address, 2210 0xFFFFFFFF); 2211 LIST_INSERT_HEAD(&allpmaps, pmap, pm_list); 2212 mtx_unlock_spin(&allpmaps_lock); 2213 2214 /* 2215 * Store PT2MAP PT2 pages (a.k.a. PT2TAB) in PT2TAB itself. 2216 * I.e. self reference mapping. The PT2TAB is private, however mapped 2217 * into shared PT2MAP space, so the mapping should be not global. 2218 */ 2219 pt2tab_pa = vtophys(pmap->pm_pt2tab); 2220 pte2p = pmap_pt2tab_entry(pmap, (vm_offset_t)PT2MAP); 2221 for (pa = pt2tab_pa, i = 0; i < NPG_IN_PT2TAB; i++, pa += PTE2_SIZE) { 2222 pt2tab_store(pte2p++, PTE2_KPT_NG(pa)); 2223 } 2224 2225 /* Insert PT2MAP PT2s into pmap PT1. */ 2226 pte1p = pmap_pte1(pmap, (vm_offset_t)PT2MAP); 2227 for (pa = pt2tab_pa, i = 0; i < NPT2_IN_PT2TAB; i++, pa += NB_IN_PT2) { 2228 pte1_store(pte1p++, PTE1_LINK(pa)); 2229 } 2230 2231 /* 2232 * Now synchronize new mapping which was made above. 2233 */ 2234 pte1_sync_range(pmap->pm_pt1, NB_IN_PT1); 2235 pte2_sync_range(pmap->pm_pt2tab, NB_IN_PT2TAB); 2236 2237 CPU_ZERO(&pmap->pm_active); 2238 TAILQ_INIT(&pmap->pm_pvchunk); 2239 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 2240 2241 return (1); 2242} 2243 2244#ifdef INVARIANTS 2245static boolean_t 2246pt2tab_user_is_empty(pt2_entry_t *tab) 2247{ 2248 u_int i, end; 2249 2250 end = pt2tab_index(VM_MAXUSER_ADDRESS); 2251 for (i = 0; i < end; i++) 2252 if (tab[i] != 0) return (FALSE); 2253 return (TRUE); 2254} 2255#endif 2256/* 2257 * Release any resources held by the given physical map. 2258 * Called when a pmap initialized by pmap_pinit is being released. 2259 * Should only be called if the map contains no valid mappings. 2260 */ 2261void 2262pmap_release(pmap_t pmap) 2263{ 2264#ifdef INVARIANTS 2265 vm_offset_t start, end; 2266#endif 2267 KASSERT(pmap->pm_stats.resident_count == 0, 2268 ("%s: pmap resident count %ld != 0", __func__, 2269 pmap->pm_stats.resident_count)); 2270 KASSERT(pt2tab_user_is_empty(pmap->pm_pt2tab), 2271 ("%s: has allocated user PT2(s)", __func__)); 2272 KASSERT(CPU_EMPTY(&pmap->pm_active), 2273 ("%s: pmap %p is active on some CPU(s)", __func__, pmap)); 2274 2275 mtx_lock_spin(&allpmaps_lock); 2276 LIST_REMOVE(pmap, pm_list); 2277 mtx_unlock_spin(&allpmaps_lock); 2278 2279#ifdef INVARIANTS 2280 start = pte1_index(KERNBASE) * sizeof(pt1_entry_t); 2281 end = (pte1_index(0xFFFFFFFF) + 1) * sizeof(pt1_entry_t); 2282 bzero((char *)pmap->pm_pt1 + start, end - start); 2283 2284 start = pt2tab_index(KERNBASE) * sizeof(pt2_entry_t); 2285 end = (pt2tab_index(0xFFFFFFFF) + 1) * sizeof(pt2_entry_t); 2286 bzero((char *)pmap->pm_pt2tab + start, end - start); 2287#endif 2288 /* 2289 * We are leaving PT1 and PT2TAB allocated on released pmap, 2290 * so hopefully UMA vmspace_zone will always be inited with 2291 * UMA_ZONE_NOFREE flag. 2292 */ 2293} 2294 2295/********************************************************* 2296 * 2297 * L2 table pages and their pages management routines. 2298 * 2299 *********************************************************/ 2300 2301/* 2302 * Virtual interface for L2 page table wire counting. 2303 * 2304 * Each L2 page table in a page has own counter which counts a number of 2305 * valid mappings in a table. Global page counter counts mappings in all 2306 * tables in a page plus a single itself mapping in PT2TAB. 2307 * 2308 * During a promotion we leave the associated L2 page table counter 2309 * untouched, so the table (strictly speaking a page which holds it) 2310 * is never freed if promoted. 2311 * 2312 * If a page m->wire_count == 1 then no valid mappings exist in any L2 page 2313 * table in the page and the page itself is only mapped in PT2TAB. 2314 */ 2315 2316static __inline void 2317pt2_wirecount_init(vm_page_t m) 2318{ 2319 u_int i; 2320 2321 /* 2322 * Note: A page m is allocated with VM_ALLOC_WIRED flag and 2323 * m->wire_count should be already set correctly. 2324 * So, there is no need to set it again herein. 2325 */ 2326 for (i = 0; i < NPT2_IN_PG; i++) 2327 m->md.pt2_wirecount[i] = 0; 2328} 2329 2330static __inline void 2331pt2_wirecount_inc(vm_page_t m, uint32_t pte1_idx) 2332{ 2333 2334 /* 2335 * Note: A just modificated pte2 (i.e. already allocated) 2336 * is acquiring one extra reference which must be 2337 * explicitly cleared. It influences the KASSERTs herein. 2338 * All L2 page tables in a page always belong to the same 2339 * pmap, so we allow only one extra reference for the page. 2340 */ 2341 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] < (NPTE2_IN_PT2 + 1), 2342 ("%s: PT2 is overflowing ...", __func__)); 2343 KASSERT(m->wire_count <= (NPTE2_IN_PG + 1), 2344 ("%s: PT2PG is overflowing ...", __func__)); 2345 2346 m->wire_count++; 2347 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]++; 2348} 2349 2350static __inline void 2351pt2_wirecount_dec(vm_page_t m, uint32_t pte1_idx) 2352{ 2353 2354 KASSERT(m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] != 0, 2355 ("%s: PT2 is underflowing ...", __func__)); 2356 KASSERT(m->wire_count > 1, 2357 ("%s: PT2PG is underflowing ...", __func__)); 2358 2359 m->wire_count--; 2360 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]--; 2361} 2362 2363static __inline void 2364pt2_wirecount_set(vm_page_t m, uint32_t pte1_idx, uint16_t count) 2365{ 2366 2367 KASSERT(count <= NPTE2_IN_PT2, 2368 ("%s: invalid count %u", __func__, count)); 2369 KASSERT(m->wire_count > m->md.pt2_wirecount[pte1_idx & PT2PG_MASK], 2370 ("%s: PT2PG corrupting (%u, %u) ...", __func__, m->wire_count, 2371 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK])); 2372 2373 m->wire_count -= m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]; 2374 m->wire_count += count; 2375 m->md.pt2_wirecount[pte1_idx & PT2PG_MASK] = count; 2376 2377 KASSERT(m->wire_count <= (NPTE2_IN_PG + 1), 2378 ("%s: PT2PG is overflowed (%u) ...", __func__, m->wire_count)); 2379} 2380 2381static __inline uint32_t 2382pt2_wirecount_get(vm_page_t m, uint32_t pte1_idx) 2383{ 2384 2385 return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]); 2386} 2387 2388static __inline boolean_t 2389pt2_is_empty(vm_page_t m, vm_offset_t va) 2390{ 2391 2392 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0); 2393} 2394 2395static __inline boolean_t 2396pt2_is_full(vm_page_t m, vm_offset_t va) 2397{ 2398 2399 return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 2400 NPTE2_IN_PT2); 2401} 2402 2403static __inline boolean_t 2404pt2pg_is_empty(vm_page_t m) 2405{ 2406 2407 return (m->wire_count == 1); 2408} 2409 2410/* 2411 * This routine is called if the L2 page table 2412 * is not mapped correctly. 2413 */ 2414static vm_page_t 2415_pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2416{ 2417 uint32_t pte1_idx; 2418 pt1_entry_t *pte1p; 2419 pt2_entry_t pte2; 2420 vm_page_t m; 2421 vm_paddr_t pt2pg_pa, pt2_pa; 2422 2423 pte1_idx = pte1_index(va); 2424 pte1p = pmap->pm_pt1 + pte1_idx; 2425 2426 KASSERT(pte1_load(pte1p) == 0, 2427 ("%s: pm_pt1[%#x] is not zero: %#x", __func__, pte1_idx, 2428 pte1_load(pte1p))); 2429 2430 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, va)); 2431 if (!pte2_is_valid(pte2)) { 2432 /* 2433 * Install new PT2s page into pmap PT2TAB. 2434 */ 2435 m = vm_page_alloc(NULL, pte1_idx & ~PT2PG_MASK, 2436 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); 2437 if (m == NULL) { 2438 if ((flags & PMAP_ENTER_NOSLEEP) == 0) { 2439 PMAP_UNLOCK(pmap); 2440 rw_wunlock(&pvh_global_lock); 2441 VM_WAIT; 2442 rw_wlock(&pvh_global_lock); 2443 PMAP_LOCK(pmap); 2444 } 2445 2446 /* 2447 * Indicate the need to retry. While waiting, 2448 * the L2 page table page may have been allocated. 2449 */ 2450 return (NULL); 2451 } 2452 pmap->pm_stats.resident_count++; 2453 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 2454 } else { 2455 pt2pg_pa = pte2_pa(pte2); 2456 m = PHYS_TO_VM_PAGE(pt2pg_pa); 2457 } 2458 2459 pt2_wirecount_inc(m, pte1_idx); 2460 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 2461 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 2462 2463 return (m); 2464} 2465 2466static vm_page_t 2467pmap_allocpte2(pmap_t pmap, vm_offset_t va, u_int flags) 2468{ 2469 u_int pte1_idx; 2470 pt1_entry_t *pte1p, pte1; 2471 vm_page_t m; 2472 2473 pte1_idx = pte1_index(va); 2474retry: 2475 pte1p = pmap->pm_pt1 + pte1_idx; 2476 pte1 = pte1_load(pte1p); 2477 2478 /* 2479 * This supports switching from a 1MB page to a 2480 * normal 4K page. 2481 */ 2482 if (pte1_is_section(pte1)) { 2483 (void)pmap_demote_pte1(pmap, pte1p, va); 2484 /* 2485 * Reload pte1 after demotion. 2486 * 2487 * Note: Demotion can even fail as either PT2 is not find for 2488 * the virtual address or PT2PG can not be allocated. 2489 */ 2490 pte1 = pte1_load(pte1p); 2491 } 2492 2493 /* 2494 * If the L2 page table page is mapped, we just increment the 2495 * hold count, and activate it. 2496 */ 2497 if (pte1_is_link(pte1)) { 2498 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2499 pt2_wirecount_inc(m, pte1_idx); 2500 } else { 2501 /* 2502 * Here if the PT2 isn't mapped, or if it has 2503 * been deallocated. 2504 */ 2505 m = _pmap_allocpte2(pmap, va, flags); 2506 if (m == NULL && (flags & PMAP_ENTER_NOSLEEP) == 0) 2507 goto retry; 2508 } 2509 2510 return (m); 2511} 2512 2513static __inline void 2514pmap_free_zero_pages(struct spglist *free) 2515{ 2516 vm_page_t m; 2517 2518 while ((m = SLIST_FIRST(free)) != NULL) { 2519 SLIST_REMOVE_HEAD(free, plinks.s.ss); 2520 /* Preserve the page's PG_ZERO setting. */ 2521 vm_page_free_toq(m); 2522 } 2523} 2524 2525/* 2526 * Schedule the specified unused L2 page table page to be freed. Specifically, 2527 * add the page to the specified list of pages that will be released to the 2528 * physical memory manager after the TLB has been updated. 2529 */ 2530static __inline void 2531pmap_add_delayed_free_list(vm_page_t m, struct spglist *free) 2532{ 2533 2534 /* 2535 * Put page on a list so that it is released after 2536 * *ALL* TLB shootdown is done 2537 */ 2538#ifdef PMAP_DEBUG 2539 pmap_zero_page_check(m); 2540#endif 2541 m->flags |= PG_ZERO; 2542 SLIST_INSERT_HEAD(free, m, plinks.s.ss); 2543} 2544 2545/* 2546 * Unwire L2 page tables page. 2547 */ 2548static void 2549pmap_unwire_pt2pg(pmap_t pmap, vm_offset_t va, vm_page_t m) 2550{ 2551 pt1_entry_t *pte1p, opte1 __unused; 2552 pt2_entry_t *pte2p; 2553 uint32_t i; 2554 2555 KASSERT(pt2pg_is_empty(m), 2556 ("%s: pmap %p PT2PG %p wired", __func__, pmap, m)); 2557 2558 /* 2559 * Unmap all L2 page tables in the page from L1 page table. 2560 * 2561 * QQQ: Individual L2 page tables (except the last one) can be unmapped 2562 * earlier. However, we are doing that this way. 2563 */ 2564 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 2565 ("%s: pmap %p va %#x PT2PG %p bad index", __func__, pmap, va, m)); 2566 pte1p = pmap->pm_pt1 + m->pindex; 2567 for (i = 0; i < NPT2_IN_PG; i++, pte1p++) { 2568 KASSERT(m->md.pt2_wirecount[i] == 0, 2569 ("%s: pmap %p PT2 %u (PG %p) wired", __func__, pmap, i, m)); 2570 opte1 = pte1_load(pte1p); 2571 if (pte1_is_link(opte1)) { 2572 pte1_clear(pte1p); 2573 /* 2574 * Flush intermediate TLB cache. 2575 */ 2576 pmap_tlb_flush(pmap, (m->pindex + i) << PTE1_SHIFT); 2577 } 2578#ifdef INVARIANTS 2579 else 2580 KASSERT((opte1 == 0) || pte1_is_section(opte1), 2581 ("%s: pmap %p va %#x bad pte1 %x at %u", __func__, 2582 pmap, va, opte1, i)); 2583#endif 2584 } 2585 2586 /* 2587 * Unmap the page from PT2TAB. 2588 */ 2589 pte2p = pmap_pt2tab_entry(pmap, va); 2590 (void)pt2tab_load_clear(pte2p); 2591 pmap_tlb_flush(pmap, pt2map_pt2pg(va)); 2592 2593 m->wire_count = 0; 2594 pmap->pm_stats.resident_count--; 2595 2596 /* 2597 * This is a release store so that the ordinary store unmapping 2598 * the L2 page table page is globally performed before TLB shoot- 2599 * down is begun. 2600 */ 2601 atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1); 2602} 2603 2604/* 2605 * Decrements a L2 page table page's wire count, which is used to record the 2606 * number of valid page table entries within the page. If the wire count 2607 * drops to zero, then the page table page is unmapped. Returns TRUE if the 2608 * page table page was unmapped and FALSE otherwise. 2609 */ 2610static __inline boolean_t 2611pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) 2612{ 2613 pt2_wirecount_dec(m, pte1_index(va)); 2614 if (pt2pg_is_empty(m)) { 2615 /* 2616 * QQQ: Wire count is zero, so whole page should be zero and 2617 * we can set PG_ZERO flag to it. 2618 * Note that when promotion is enabled, it takes some 2619 * more efforts. See pmap_unwire_pt2_all() below. 2620 */ 2621 pmap_unwire_pt2pg(pmap, va, m); 2622 pmap_add_delayed_free_list(m, free); 2623 return (TRUE); 2624 } else 2625 return (FALSE); 2626} 2627 2628/* 2629 * Drop a L2 page table page's wire count at once, which is used to record 2630 * the number of valid L2 page table entries within the page. If the wire 2631 * count drops to zero, then the L2 page table page is unmapped. 2632 */ 2633static __inline void 2634pmap_unwire_pt2_all(pmap_t pmap, vm_offset_t va, vm_page_t m, 2635 struct spglist *free) 2636{ 2637 u_int pte1_idx = pte1_index(va); 2638 2639 KASSERT(m->pindex == (pte1_idx & ~PT2PG_MASK), 2640 ("%s: PT2 page's pindex is wrong", __func__)); 2641 KASSERT(m->wire_count > pt2_wirecount_get(m, pte1_idx), 2642 ("%s: bad pt2 wire count %u > %u", __func__, m->wire_count, 2643 pt2_wirecount_get(m, pte1_idx))); 2644 2645 /* 2646 * It's possible that the L2 page table was never used. 2647 * It happened in case that a section was created without promotion. 2648 */ 2649 if (pt2_is_full(m, va)) { 2650 pt2_wirecount_set(m, pte1_idx, 0); 2651 2652 /* 2653 * QQQ: We clear L2 page table now, so when L2 page table page 2654 * is going to be freed, we can set it PG_ZERO flag ... 2655 * This function is called only on section mappings, so 2656 * hopefully it's not to big overload. 2657 * 2658 * XXX: If pmap is current, existing PT2MAP mapping could be 2659 * used for zeroing. 2660 */ 2661 pmap_zero_page_area(m, page_pt2off(pte1_idx), NB_IN_PT2); 2662 } 2663#ifdef INVARIANTS 2664 else 2665 KASSERT(pt2_is_empty(m, va), ("%s: PT2 is not empty (%u)", 2666 __func__, pt2_wirecount_get(m, pte1_idx))); 2667#endif 2668 if (pt2pg_is_empty(m)) { 2669 pmap_unwire_pt2pg(pmap, va, m); 2670 pmap_add_delayed_free_list(m, free); 2671 } 2672} 2673 2674/* 2675 * After removing a L2 page table entry, this routine is used to 2676 * conditionally free the page, and manage the hold/wire counts. 2677 */ 2678static boolean_t 2679pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free) 2680{ 2681 pt1_entry_t pte1; 2682 vm_page_t mpte; 2683 2684 if (va >= VM_MAXUSER_ADDRESS) 2685 return (FALSE); 2686 pte1 = pte1_load(pmap_pte1(pmap, va)); 2687 mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 2688 return (pmap_unwire_pt2(pmap, va, mpte, free)); 2689} 2690 2691/************************************* 2692 * 2693 * Page management routines. 2694 * 2695 *************************************/ 2696 2697CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 2698CTASSERT(_NPCM == 11); 2699CTASSERT(_NPCPV == 336); 2700 2701static __inline struct pv_chunk * 2702pv_to_chunk(pv_entry_t pv) 2703{ 2704 2705 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 2706} 2707 2708#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 2709 2710#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 2711#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 2712 2713static const uint32_t pc_freemask[_NPCM] = { 2714 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2715 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2716 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 2717 PC_FREE0_9, PC_FREE10 2718}; 2719 2720SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 2721 "Current number of pv entries"); 2722 2723#ifdef PV_STATS 2724static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 2725 2726SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 2727 "Current number of pv entry chunks"); 2728SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 2729 "Current number of pv entry chunks allocated"); 2730SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 2731 "Current number of pv entry chunks frees"); 2732SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 2733 0, "Number of times tried to get a chunk page but failed."); 2734 2735static long pv_entry_frees, pv_entry_allocs; 2736static int pv_entry_spare; 2737 2738SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 2739 "Current number of pv entry frees"); 2740SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 2741 0, "Current number of pv entry allocs"); 2742SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 2743 "Current number of spare pv entries"); 2744#endif 2745 2746/* 2747 * Is given page managed? 2748 */ 2749static __inline boolean_t 2750is_managed(vm_paddr_t pa) 2751{ 2752 vm_offset_t pgnum; 2753 vm_page_t m; 2754 2755 pgnum = atop(pa); 2756 if (pgnum >= first_page) { 2757 m = PHYS_TO_VM_PAGE(pa); 2758 if (m == NULL) 2759 return (FALSE); 2760 if ((m->oflags & VPO_UNMANAGED) == 0) 2761 return (TRUE); 2762 } 2763 return (FALSE); 2764} 2765 2766static __inline boolean_t 2767pte1_is_managed(pt1_entry_t pte1) 2768{ 2769 2770 return (is_managed(pte1_pa(pte1))); 2771} 2772 2773static __inline boolean_t 2774pte2_is_managed(pt2_entry_t pte2) 2775{ 2776 2777 return (is_managed(pte2_pa(pte2))); 2778} 2779 2780/* 2781 * We are in a serious low memory condition. Resort to 2782 * drastic measures to free some pages so we can allocate 2783 * another pv entry chunk. 2784 */ 2785static vm_page_t 2786pmap_pv_reclaim(pmap_t locked_pmap) 2787{ 2788 struct pch newtail; 2789 struct pv_chunk *pc; 2790 struct md_page *pvh; 2791 pt1_entry_t *pte1p; 2792 pmap_t pmap; 2793 pt2_entry_t *pte2p, tpte2; 2794 pv_entry_t pv; 2795 vm_offset_t va; 2796 vm_page_t m, m_pc; 2797 struct spglist free; 2798 uint32_t inuse; 2799 int bit, field, freed; 2800 2801 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 2802 pmap = NULL; 2803 m_pc = NULL; 2804 SLIST_INIT(&free); 2805 TAILQ_INIT(&newtail); 2806 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL && (pv_vafree == 0 || 2807 SLIST_EMPTY(&free))) { 2808 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2809 if (pmap != pc->pc_pmap) { 2810 if (pmap != NULL) { 2811 if (pmap != locked_pmap) 2812 PMAP_UNLOCK(pmap); 2813 } 2814 pmap = pc->pc_pmap; 2815 /* Avoid deadlock and lock recursion. */ 2816 if (pmap > locked_pmap) 2817 PMAP_LOCK(pmap); 2818 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 2819 pmap = NULL; 2820 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2821 continue; 2822 } 2823 } 2824 2825 /* 2826 * Destroy every non-wired, 4 KB page mapping in the chunk. 2827 */ 2828 freed = 0; 2829 for (field = 0; field < _NPCM; field++) { 2830 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 2831 inuse != 0; inuse &= ~(1UL << bit)) { 2832 bit = ffs(inuse) - 1; 2833 pv = &pc->pc_pventry[field * 32 + bit]; 2834 va = pv->pv_va; 2835 pte1p = pmap_pte1(pmap, va); 2836 if (pte1_is_section(pte1_load(pte1p))) 2837 continue; 2838 pte2p = pmap_pte2(pmap, va); 2839 tpte2 = pte2_load(pte2p); 2840 if ((tpte2 & PTE2_W) == 0) 2841 tpte2 = pte2_load_clear(pte2p); 2842 pmap_pte2_release(pte2p); 2843 if ((tpte2 & PTE2_W) != 0) 2844 continue; 2845 KASSERT(tpte2 != 0, 2846 ("pmap_pv_reclaim: pmap %p va %#x zero pte", 2847 pmap, va)); 2848 pmap_tlb_flush(pmap, va); 2849 m = PHYS_TO_VM_PAGE(pte2_pa(tpte2)); 2850 if (pte2_is_dirty(tpte2)) 2851 vm_page_dirty(m); 2852 if ((tpte2 & PTE2_A) != 0) 2853 vm_page_aflag_set(m, PGA_REFERENCED); 2854 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 2855 if (TAILQ_EMPTY(&m->md.pv_list) && 2856 (m->flags & PG_FICTITIOUS) == 0) { 2857 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 2858 if (TAILQ_EMPTY(&pvh->pv_list)) { 2859 vm_page_aflag_clear(m, 2860 PGA_WRITEABLE); 2861 } 2862 } 2863 pc->pc_map[field] |= 1UL << bit; 2864 pmap_unuse_pt2(pmap, va, &free); 2865 freed++; 2866 } 2867 } 2868 if (freed == 0) { 2869 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2870 continue; 2871 } 2872 /* Every freed mapping is for a 4 KB page. */ 2873 pmap->pm_stats.resident_count -= freed; 2874 PV_STAT(pv_entry_frees += freed); 2875 PV_STAT(pv_entry_spare += freed); 2876 pv_entry_count -= freed; 2877 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2878 for (field = 0; field < _NPCM; field++) 2879 if (pc->pc_map[field] != pc_freemask[field]) { 2880 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2881 pc_list); 2882 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 2883 2884 /* 2885 * One freed pv entry in locked_pmap is 2886 * sufficient. 2887 */ 2888 if (pmap == locked_pmap) 2889 goto out; 2890 break; 2891 } 2892 if (field == _NPCM) { 2893 PV_STAT(pv_entry_spare -= _NPCPV); 2894 PV_STAT(pc_chunk_count--); 2895 PV_STAT(pc_chunk_frees++); 2896 /* Entire chunk is free; return it. */ 2897 m_pc = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2898 pmap_qremove((vm_offset_t)pc, 1); 2899 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2900 break; 2901 } 2902 } 2903out: 2904 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 2905 if (pmap != NULL) { 2906 if (pmap != locked_pmap) 2907 PMAP_UNLOCK(pmap); 2908 } 2909 if (m_pc == NULL && pv_vafree != 0 && SLIST_EMPTY(&free)) { 2910 m_pc = SLIST_FIRST(&free); 2911 SLIST_REMOVE_HEAD(&free, plinks.s.ss); 2912 /* Recycle a freed page table page. */ 2913 m_pc->wire_count = 1; 2914 atomic_add_int(&vm_cnt.v_wire_count, 1); 2915 } 2916 pmap_free_zero_pages(&free); 2917 return (m_pc); 2918} 2919 2920static void 2921free_pv_chunk(struct pv_chunk *pc) 2922{ 2923 vm_page_t m; 2924 2925 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 2926 PV_STAT(pv_entry_spare -= _NPCPV); 2927 PV_STAT(pc_chunk_count--); 2928 PV_STAT(pc_chunk_frees++); 2929 /* entire chunk is free, return it */ 2930 m = PHYS_TO_VM_PAGE(pmap_kextract((vm_offset_t)pc)); 2931 pmap_qremove((vm_offset_t)pc, 1); 2932 vm_page_unwire(m, PQ_NONE); 2933 vm_page_free(m); 2934 pmap_pte2list_free(&pv_vafree, (vm_offset_t)pc); 2935} 2936 2937/* 2938 * Free the pv_entry back to the free list. 2939 */ 2940static void 2941free_pv_entry(pmap_t pmap, pv_entry_t pv) 2942{ 2943 struct pv_chunk *pc; 2944 int idx, field, bit; 2945 2946 rw_assert(&pvh_global_lock, RA_WLOCKED); 2947 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2948 PV_STAT(pv_entry_frees++); 2949 PV_STAT(pv_entry_spare++); 2950 pv_entry_count--; 2951 pc = pv_to_chunk(pv); 2952 idx = pv - &pc->pc_pventry[0]; 2953 field = idx / 32; 2954 bit = idx % 32; 2955 pc->pc_map[field] |= 1ul << bit; 2956 for (idx = 0; idx < _NPCM; idx++) 2957 if (pc->pc_map[idx] != pc_freemask[idx]) { 2958 /* 2959 * 98% of the time, pc is already at the head of the 2960 * list. If it isn't already, move it to the head. 2961 */ 2962 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 2963 pc)) { 2964 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2965 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 2966 pc_list); 2967 } 2968 return; 2969 } 2970 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2971 free_pv_chunk(pc); 2972} 2973 2974/* 2975 * Get a new pv_entry, allocating a block from the system 2976 * when needed. 2977 */ 2978static pv_entry_t 2979get_pv_entry(pmap_t pmap, boolean_t try) 2980{ 2981 static const struct timeval printinterval = { 60, 0 }; 2982 static struct timeval lastprint; 2983 int bit, field; 2984 pv_entry_t pv; 2985 struct pv_chunk *pc; 2986 vm_page_t m; 2987 2988 rw_assert(&pvh_global_lock, RA_WLOCKED); 2989 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2990 PV_STAT(pv_entry_allocs++); 2991 pv_entry_count++; 2992 if (pv_entry_count > pv_entry_high_water) 2993 if (ratecheck(&lastprint, &printinterval)) 2994 printf("Approaching the limit on PV entries, consider " 2995 "increasing either the vm.pmap.shpgperproc or the " 2996 "vm.pmap.pv_entry_max tunable.\n"); 2997retry: 2998 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 2999 if (pc != NULL) { 3000 for (field = 0; field < _NPCM; field++) { 3001 if (pc->pc_map[field]) { 3002 bit = ffs(pc->pc_map[field]) - 1; 3003 break; 3004 } 3005 } 3006 if (field < _NPCM) { 3007 pv = &pc->pc_pventry[field * 32 + bit]; 3008 pc->pc_map[field] &= ~(1ul << bit); 3009 /* If this was the last item, move it to tail */ 3010 for (field = 0; field < _NPCM; field++) 3011 if (pc->pc_map[field] != 0) { 3012 PV_STAT(pv_entry_spare--); 3013 return (pv); /* not full, return */ 3014 } 3015 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 3016 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 3017 PV_STAT(pv_entry_spare--); 3018 return (pv); 3019 } 3020 } 3021 /* 3022 * Access to the pte2list "pv_vafree" is synchronized by the pvh 3023 * global lock. If "pv_vafree" is currently non-empty, it will 3024 * remain non-empty until pmap_pte2list_alloc() completes. 3025 */ 3026 if (pv_vafree == 0 || (m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | 3027 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 3028 if (try) { 3029 pv_entry_count--; 3030 PV_STAT(pc_chunk_tryfail++); 3031 return (NULL); 3032 } 3033 m = pmap_pv_reclaim(pmap); 3034 if (m == NULL) 3035 goto retry; 3036 } 3037 PV_STAT(pc_chunk_count++); 3038 PV_STAT(pc_chunk_allocs++); 3039 pc = (struct pv_chunk *)pmap_pte2list_alloc(&pv_vafree); 3040 pmap_qenter((vm_offset_t)pc, &m, 1); 3041 pc->pc_pmap = pmap; 3042 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 3043 for (field = 1; field < _NPCM; field++) 3044 pc->pc_map[field] = pc_freemask[field]; 3045 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 3046 pv = &pc->pc_pventry[0]; 3047 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 3048 PV_STAT(pv_entry_spare += _NPCPV - 1); 3049 return (pv); 3050} 3051 3052/* 3053 * Create a pv entry for page at pa for 3054 * (pmap, va). 3055 */ 3056static void 3057pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3058{ 3059 pv_entry_t pv; 3060 3061 rw_assert(&pvh_global_lock, RA_WLOCKED); 3062 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3063 pv = get_pv_entry(pmap, FALSE); 3064 pv->pv_va = va; 3065 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3066} 3067 3068static __inline pv_entry_t 3069pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3070{ 3071 pv_entry_t pv; 3072 3073 rw_assert(&pvh_global_lock, RA_WLOCKED); 3074 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 3075 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 3076 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 3077 break; 3078 } 3079 } 3080 return (pv); 3081} 3082 3083static void 3084pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 3085{ 3086 pv_entry_t pv; 3087 3088 pv = pmap_pvh_remove(pvh, pmap, va); 3089 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found")); 3090 free_pv_entry(pmap, pv); 3091} 3092 3093static void 3094pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 3095{ 3096 struct md_page *pvh; 3097 3098 rw_assert(&pvh_global_lock, RA_WLOCKED); 3099 pmap_pvh_free(&m->md, pmap, va); 3100 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 3101 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 3102 if (TAILQ_EMPTY(&pvh->pv_list)) 3103 vm_page_aflag_clear(m, PGA_WRITEABLE); 3104 } 3105} 3106 3107static void 3108pmap_pv_demote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3109{ 3110 struct md_page *pvh; 3111 pv_entry_t pv; 3112 vm_offset_t va_last; 3113 vm_page_t m; 3114 3115 rw_assert(&pvh_global_lock, RA_WLOCKED); 3116 KASSERT((pa & PTE1_OFFSET) == 0, 3117 ("pmap_pv_demote_pte1: pa is not 1mpage aligned")); 3118 3119 /* 3120 * Transfer the 1mpage's pv entry for this mapping to the first 3121 * page's pv list. 3122 */ 3123 pvh = pa_to_pvh(pa); 3124 va = pte1_trunc(va); 3125 pv = pmap_pvh_remove(pvh, pmap, va); 3126 KASSERT(pv != NULL, ("pmap_pv_demote_pte1: pv not found")); 3127 m = PHYS_TO_VM_PAGE(pa); 3128 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3129 /* Instantiate the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3130 va_last = va + PTE1_SIZE - PAGE_SIZE; 3131 do { 3132 m++; 3133 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 3134 ("pmap_pv_demote_pte1: page %p is not managed", m)); 3135 va += PAGE_SIZE; 3136 pmap_insert_entry(pmap, va, m); 3137 } while (va < va_last); 3138} 3139 3140static void 3141pmap_pv_promote_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3142{ 3143 struct md_page *pvh; 3144 pv_entry_t pv; 3145 vm_offset_t va_last; 3146 vm_page_t m; 3147 3148 rw_assert(&pvh_global_lock, RA_WLOCKED); 3149 KASSERT((pa & PTE1_OFFSET) == 0, 3150 ("pmap_pv_promote_pte1: pa is not 1mpage aligned")); 3151 3152 /* 3153 * Transfer the first page's pv entry for this mapping to the 3154 * 1mpage's pv list. Aside from avoiding the cost of a call 3155 * to get_pv_entry(), a transfer avoids the possibility that 3156 * get_pv_entry() calls pmap_pv_reclaim() and that pmap_pv_reclaim() 3157 * removes one of the mappings that is being promoted. 3158 */ 3159 m = PHYS_TO_VM_PAGE(pa); 3160 va = pte1_trunc(va); 3161 pv = pmap_pvh_remove(&m->md, pmap, va); 3162 KASSERT(pv != NULL, ("pmap_pv_promote_pte1: pv not found")); 3163 pvh = pa_to_pvh(pa); 3164 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3165 /* Free the remaining NPTE2_IN_PT2 - 1 pv entries. */ 3166 va_last = va + PTE1_SIZE - PAGE_SIZE; 3167 do { 3168 m++; 3169 va += PAGE_SIZE; 3170 pmap_pvh_free(&m->md, pmap, va); 3171 } while (va < va_last); 3172} 3173 3174/* 3175 * Conditionally create a pv entry. 3176 */ 3177static boolean_t 3178pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) 3179{ 3180 pv_entry_t pv; 3181 3182 rw_assert(&pvh_global_lock, RA_WLOCKED); 3183 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3184 if (pv_entry_count < pv_entry_high_water && 3185 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3186 pv->pv_va = va; 3187 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3188 return (TRUE); 3189 } else 3190 return (FALSE); 3191} 3192 3193/* 3194 * Create the pv entries for each of the pages within a section. 3195 */ 3196static boolean_t 3197pmap_pv_insert_pte1(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) 3198{ 3199 struct md_page *pvh; 3200 pv_entry_t pv; 3201 3202 rw_assert(&pvh_global_lock, RA_WLOCKED); 3203 if (pv_entry_count < pv_entry_high_water && 3204 (pv = get_pv_entry(pmap, TRUE)) != NULL) { 3205 pv->pv_va = va; 3206 pvh = pa_to_pvh(pa); 3207 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 3208 return (TRUE); 3209 } else 3210 return (FALSE); 3211} 3212 3213static inline void 3214pmap_tlb_flush_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t npte1) 3215{ 3216 3217 /* Kill all the small mappings or the big one only. */ 3218 if (pte1_is_section(npte1)) 3219 pmap_tlb_flush_range(pmap, pte1_trunc(va), PTE1_SIZE); 3220 else 3221 pmap_tlb_flush(pmap, pte1_trunc(va)); 3222} 3223 3224/* 3225 * Update kernel pte1 on all pmaps. 3226 * 3227 * The following function is called only on one cpu with disabled interrupts. 3228 * In SMP case, smp_rendezvous_cpus() is used to stop other cpus. This way 3229 * nobody can invoke explicit hardware table walk during the update of pte1. 3230 * Unsolicited hardware table walk can still happen, invoked by speculative 3231 * data or instruction prefetch or even by speculative hardware table walk. 3232 * 3233 * The break-before-make approach should be implemented here. However, it's 3234 * not so easy to do that for kernel mappings as it would be unhappy to unmap 3235 * itself unexpectedly but voluntarily. 3236 */ 3237static void 3238pmap_update_pte1_kernel(vm_offset_t va, pt1_entry_t npte1) 3239{ 3240 pmap_t pmap; 3241 pt1_entry_t *pte1p; 3242 3243 /* 3244 * Get current pmap. Interrupts should be disabled here 3245 * so PCPU_GET() is done atomically. 3246 */ 3247 pmap = PCPU_GET(curpmap); 3248 if (pmap == NULL) 3249 pmap = kernel_pmap; 3250 3251 /* 3252 * (1) Change pte1 on current pmap. 3253 * (2) Flush all obsolete TLB entries on current CPU. 3254 * (3) Change pte1 on all pmaps. 3255 * (4) Flush all obsolete TLB entries on all CPUs in SMP case. 3256 */ 3257 3258 pte1p = pmap_pte1(pmap, va); 3259 pte1_store(pte1p, npte1); 3260 3261 /* Kill all the small mappings or the big one only. */ 3262 if (pte1_is_section(npte1)) { 3263 pmap_pte1_kern_promotions++; 3264 tlb_flush_range_local(pte1_trunc(va), PTE1_SIZE); 3265 } else { 3266 pmap_pte1_kern_demotions++; 3267 tlb_flush_local(pte1_trunc(va)); 3268 } 3269 3270 /* 3271 * In SMP case, this function is called when all cpus are at smp 3272 * rendezvous, so there is no need to use 'allpmaps_lock' lock here. 3273 * In UP case, the function is called with this lock locked. 3274 */ 3275 LIST_FOREACH(pmap, &allpmaps, pm_list) { 3276 pte1p = pmap_pte1(pmap, va); 3277 pte1_store(pte1p, npte1); 3278 } 3279 3280#ifdef SMP 3281 /* Kill all the small mappings or the big one only. */ 3282 if (pte1_is_section(npte1)) 3283 tlb_flush_range(pte1_trunc(va), PTE1_SIZE); 3284 else 3285 tlb_flush(pte1_trunc(va)); 3286#endif 3287} 3288 3289#ifdef SMP 3290struct pte1_action { 3291 vm_offset_t va; 3292 pt1_entry_t npte1; 3293 u_int update; /* CPU that updates the PTE1 */ 3294}; 3295 3296static void 3297pmap_update_pte1_action(void *arg) 3298{ 3299 struct pte1_action *act = arg; 3300 3301 if (act->update == PCPU_GET(cpuid)) 3302 pmap_update_pte1_kernel(act->va, act->npte1); 3303} 3304 3305/* 3306 * Change pte1 on current pmap. 3307 * Note that kernel pte1 must be changed on all pmaps. 3308 * 3309 * According to the architecture reference manual published by ARM, 3310 * the behaviour is UNPREDICTABLE when two or more TLB entries map the same VA. 3311 * According to this manual, UNPREDICTABLE behaviours must never happen in 3312 * a viable system. In contrast, on x86 processors, it is not specified which 3313 * TLB entry mapping the virtual address will be used, but the MMU doesn't 3314 * generate a bogus translation the way it does on Cortex-A8 rev 2 (Beaglebone 3315 * Black). 3316 * 3317 * It's a problem when either promotion or demotion is being done. The pte1 3318 * update and appropriate TLB flush must be done atomically in general. 3319 */ 3320static void 3321pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3322 pt1_entry_t npte1) 3323{ 3324 3325 if (pmap == kernel_pmap) { 3326 struct pte1_action act; 3327 3328 sched_pin(); 3329 act.va = va; 3330 act.npte1 = npte1; 3331 act.update = PCPU_GET(cpuid); 3332 smp_rendezvous_cpus(all_cpus, smp_no_rendevous_barrier, 3333 pmap_update_pte1_action, NULL, &act); 3334 sched_unpin(); 3335 } else { 3336 register_t cspr; 3337 3338 /* 3339 * Use break-before-make approach for changing userland 3340 * mappings. It can cause L1 translation aborts on other 3341 * cores in SMP case. So, special treatment is implemented 3342 * in pmap_fault(). To reduce the likelihood that another core 3343 * will be affected by the broken mapping, disable interrupts 3344 * until the mapping change is completed. 3345 */ 3346 cspr = disable_interrupts(PSR_I | PSR_F); 3347 pte1_clear(pte1p); 3348 pmap_tlb_flush_pte1(pmap, va, npte1); 3349 pte1_store(pte1p, npte1); 3350 restore_interrupts(cspr); 3351 } 3352} 3353#else 3354static void 3355pmap_change_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va, 3356 pt1_entry_t npte1) 3357{ 3358 3359 if (pmap == kernel_pmap) { 3360 mtx_lock_spin(&allpmaps_lock); 3361 pmap_update_pte1_kernel(va, npte1); 3362 mtx_unlock_spin(&allpmaps_lock); 3363 } else { 3364 register_t cspr; 3365 3366 /* 3367 * Use break-before-make approach for changing userland 3368 * mappings. It's absolutely safe in UP case when interrupts 3369 * are disabled. 3370 */ 3371 cspr = disable_interrupts(PSR_I | PSR_F); 3372 pte1_clear(pte1p); 3373 pmap_tlb_flush_pte1(pmap, va, npte1); 3374 pte1_store(pte1p, npte1); 3375 restore_interrupts(cspr); 3376 } 3377} 3378#endif 3379 3380/* 3381 * Tries to promote the NPTE2_IN_PT2, contiguous 4KB page mappings that are 3382 * within a single page table page (PT2) to a single 1MB page mapping. 3383 * For promotion to occur, two conditions must be met: (1) the 4KB page 3384 * mappings must map aligned, contiguous physical memory and (2) the 4KB page 3385 * mappings must have identical characteristics. 3386 * 3387 * Managed (PG_MANAGED) mappings within the kernel address space are not 3388 * promoted. The reason is that kernel PTE1s are replicated in each pmap but 3389 * pmap_remove_write(), pmap_clear_modify(), and pmap_clear_reference() only 3390 * read the PTE1 from the kernel pmap. 3391 */ 3392static void 3393pmap_promote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3394{ 3395 pt1_entry_t npte1; 3396 pt2_entry_t *fpte2p, fpte2, fpte2_fav; 3397 pt2_entry_t *pte2p, pte2; 3398 vm_offset_t pteva __unused; 3399 vm_page_t m __unused; 3400 3401 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3402 pmap, va, pte1_load(pte1p), pte1p)); 3403 3404 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3405 3406 /* 3407 * Examine the first PTE2 in the specified PT2. Abort if this PTE2 is 3408 * either invalid, unused, or does not map the first 4KB physical page 3409 * within a 1MB page. 3410 */ 3411 fpte2p = pmap_pte2_quick(pmap, pte1_trunc(va)); 3412 fpte2 = pte2_load(fpte2p); 3413 if ((fpte2 & ((PTE2_FRAME & PTE1_OFFSET) | PTE2_A | PTE2_V)) != 3414 (PTE2_A | PTE2_V)) { 3415 pmap_pte1_p_failures++; 3416 CTR3(KTR_PMAP, "%s: failure(1) for va %#x in pmap %p", 3417 __func__, va, pmap); 3418 return; 3419 } 3420 if (pte2_is_managed(fpte2) && pmap == kernel_pmap) { 3421 pmap_pte1_p_failures++; 3422 CTR3(KTR_PMAP, "%s: failure(2) for va %#x in pmap %p", 3423 __func__, va, pmap); 3424 return; 3425 } 3426 if ((fpte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3427 /* 3428 * When page is not modified, PTE2_RO can be set without 3429 * a TLB invalidation. 3430 */ 3431 fpte2 |= PTE2_RO; 3432 pte2_store(fpte2p, fpte2); 3433 } 3434 3435 /* 3436 * Examine each of the other PTE2s in the specified PT2. Abort if this 3437 * PTE2 maps an unexpected 4KB physical page or does not have identical 3438 * characteristics to the first PTE2. 3439 */ 3440 fpte2_fav = (fpte2 & (PTE2_FRAME | PTE2_A | PTE2_V)); 3441 fpte2_fav += PTE1_SIZE - PTE2_SIZE; /* examine from the end */ 3442 for (pte2p = fpte2p + NPTE2_IN_PT2 - 1; pte2p > fpte2p; pte2p--) { 3443 pte2 = pte2_load(pte2p); 3444 if ((pte2 & (PTE2_FRAME | PTE2_A | PTE2_V)) != fpte2_fav) { 3445 pmap_pte1_p_failures++; 3446 CTR3(KTR_PMAP, "%s: failure(3) for va %#x in pmap %p", 3447 __func__, va, pmap); 3448 return; 3449 } 3450 if ((pte2 & (PTE2_NM | PTE2_RO)) == PTE2_NM) { 3451 /* 3452 * When page is not modified, PTE2_RO can be set 3453 * without a TLB invalidation. See note above. 3454 */ 3455 pte2 |= PTE2_RO; 3456 pte2_store(pte2p, pte2); 3457 pteva = pte1_trunc(va) | (pte2 & PTE1_OFFSET & 3458 PTE2_FRAME); 3459 CTR3(KTR_PMAP, "%s: protect for va %#x in pmap %p", 3460 __func__, pteva, pmap); 3461 } 3462 if ((pte2 & PTE2_PROMOTE) != (fpte2 & PTE2_PROMOTE)) { 3463 pmap_pte1_p_failures++; 3464 CTR3(KTR_PMAP, "%s: failure(4) for va %#x in pmap %p", 3465 __func__, va, pmap); 3466 return; 3467 } 3468 3469 fpte2_fav -= PTE2_SIZE; 3470 } 3471 /* 3472 * The page table page in its current state will stay in PT2TAB 3473 * until the PTE1 mapping the section is demoted by pmap_demote_pte1() 3474 * or destroyed by pmap_remove_pte1(). 3475 * 3476 * Note that L2 page table size is not equal to PAGE_SIZE. 3477 */ 3478 m = PHYS_TO_VM_PAGE(trunc_page(pte1_link_pa(pte1_load(pte1p)))); 3479 KASSERT(m >= vm_page_array && m < &vm_page_array[vm_page_array_size], 3480 ("%s: PT2 page is out of range", __func__)); 3481 KASSERT(m->pindex == (pte1_index(va) & ~PT2PG_MASK), 3482 ("%s: PT2 page's pindex is wrong", __func__)); 3483 3484 /* 3485 * Get pte1 from pte2 format. 3486 */ 3487 npte1 = (fpte2 & PTE1_FRAME) | ATTR_TO_L1(fpte2) | PTE1_V; 3488 3489 /* 3490 * Promote the pv entries. 3491 */ 3492 if (pte2_is_managed(fpte2)) 3493 pmap_pv_promote_pte1(pmap, va, pte1_pa(npte1)); 3494 3495 /* 3496 * Promote the mappings. 3497 */ 3498 pmap_change_pte1(pmap, pte1p, va, npte1); 3499 3500 pmap_pte1_promotions++; 3501 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3502 __func__, va, pmap); 3503 3504 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3505 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3506} 3507 3508/* 3509 * Zero L2 page table page. 3510 */ 3511static __inline void 3512pmap_clear_pt2(pt2_entry_t *fpte2p) 3513{ 3514 pt2_entry_t *pte2p; 3515 3516 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) 3517 pte2_clear(pte2p); 3518 3519} 3520 3521/* 3522 * Removes a 1MB page mapping from the kernel pmap. 3523 */ 3524static void 3525pmap_remove_kernel_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3526{ 3527 vm_page_t m; 3528 uint32_t pte1_idx; 3529 pt2_entry_t *fpte2p; 3530 vm_paddr_t pt2_pa; 3531 3532 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3533 m = pmap_pt2_page(pmap, va); 3534 if (m == NULL) 3535 /* 3536 * QQQ: Is this function called only on promoted pte1? 3537 * We certainly do section mappings directly 3538 * (without promotion) in kernel !!! 3539 */ 3540 panic("%s: missing pt2 page", __func__); 3541 3542 pte1_idx = pte1_index(va); 3543 3544 /* 3545 * Initialize the L2 page table. 3546 */ 3547 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3548 pmap_clear_pt2(fpte2p); 3549 3550 /* 3551 * Remove the mapping. 3552 */ 3553 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(m), pte1_idx); 3554 pmap_kenter_pte1(va, PTE1_LINK(pt2_pa)); 3555 3556 /* 3557 * QQQ: We do not need to invalidate PT2MAP mapping 3558 * as we did not change it. I.e. the L2 page table page 3559 * was and still is mapped the same way. 3560 */ 3561} 3562 3563/* 3564 * Do the things to unmap a section in a process 3565 */ 3566static void 3567pmap_remove_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 3568 struct spglist *free) 3569{ 3570 pt1_entry_t opte1; 3571 struct md_page *pvh; 3572 vm_offset_t eva, va; 3573 vm_page_t m; 3574 3575 PDEBUG(6, printf("%s(%p): va %#x pte1 %#x at %p\n", __func__, pmap, sva, 3576 pte1_load(pte1p), pte1p)); 3577 3578 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3579 KASSERT((sva & PTE1_OFFSET) == 0, 3580 ("%s: sva is not 1mpage aligned", __func__)); 3581 3582 /* 3583 * Clear and invalidate the mapping. It should occupy one and only TLB 3584 * entry. So, pmap_tlb_flush() called with aligned address should be 3585 * sufficient. 3586 */ 3587 opte1 = pte1_load_clear(pte1p); 3588 pmap_tlb_flush(pmap, sva); 3589 3590 if (pte1_is_wired(opte1)) 3591 pmap->pm_stats.wired_count -= PTE1_SIZE / PAGE_SIZE; 3592 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 3593 if (pte1_is_managed(opte1)) { 3594 pvh = pa_to_pvh(pte1_pa(opte1)); 3595 pmap_pvh_free(pvh, pmap, sva); 3596 eva = sva + PTE1_SIZE; 3597 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 3598 va < eva; va += PAGE_SIZE, m++) { 3599 if (pte1_is_dirty(opte1)) 3600 vm_page_dirty(m); 3601 if (opte1 & PTE1_A) 3602 vm_page_aflag_set(m, PGA_REFERENCED); 3603 if (TAILQ_EMPTY(&m->md.pv_list) && 3604 TAILQ_EMPTY(&pvh->pv_list)) 3605 vm_page_aflag_clear(m, PGA_WRITEABLE); 3606 } 3607 } 3608 if (pmap == kernel_pmap) { 3609 /* 3610 * L2 page table(s) can't be removed from kernel map as 3611 * kernel counts on it (stuff around pmap_growkernel()). 3612 */ 3613 pmap_remove_kernel_pte1(pmap, pte1p, sva); 3614 } else { 3615 /* 3616 * Get associated L2 page table page. 3617 * It's possible that the page was never allocated. 3618 */ 3619 m = pmap_pt2_page(pmap, sva); 3620 if (m != NULL) 3621 pmap_unwire_pt2_all(pmap, sva, m, free); 3622 } 3623} 3624 3625/* 3626 * Fills L2 page table page with mappings to consecutive physical pages. 3627 */ 3628static __inline void 3629pmap_fill_pt2(pt2_entry_t *fpte2p, pt2_entry_t npte2) 3630{ 3631 pt2_entry_t *pte2p; 3632 3633 for (pte2p = fpte2p; pte2p < fpte2p + NPTE2_IN_PT2; pte2p++) { 3634 pte2_store(pte2p, npte2); 3635 npte2 += PTE2_SIZE; 3636 } 3637} 3638 3639/* 3640 * Tries to demote a 1MB page mapping. If demotion fails, the 3641 * 1MB page mapping is invalidated. 3642 */ 3643static boolean_t 3644pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) 3645{ 3646 pt1_entry_t opte1, npte1; 3647 pt2_entry_t *fpte2p, npte2; 3648 vm_paddr_t pt2pg_pa, pt2_pa; 3649 vm_page_t m; 3650 struct spglist free; 3651 uint32_t pte1_idx, isnew = 0; 3652 3653 PDEBUG(6, printf("%s(%p): try for va %#x pte1 %#x at %p\n", __func__, 3654 pmap, va, pte1_load(pte1p), pte1p)); 3655 3656 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 3657 3658 opte1 = pte1_load(pte1p); 3659 KASSERT(pte1_is_section(opte1), ("%s: opte1 not a section", __func__)); 3660 3661 if ((opte1 & PTE1_A) == 0 || (m = pmap_pt2_page(pmap, va)) == NULL) { 3662 KASSERT(!pte1_is_wired(opte1), 3663 ("%s: PT2 page for a wired mapping is missing", __func__)); 3664 3665 /* 3666 * Invalidate the 1MB page mapping and return 3667 * "failure" if the mapping was never accessed or the 3668 * allocation of the new page table page fails. 3669 */ 3670 if ((opte1 & PTE1_A) == 0 || (m = vm_page_alloc(NULL, 3671 pte1_index(va) & ~PT2PG_MASK, VM_ALLOC_NOOBJ | 3672 VM_ALLOC_NORMAL | VM_ALLOC_WIRED)) == NULL) { 3673 SLIST_INIT(&free); 3674 pmap_remove_pte1(pmap, pte1p, pte1_trunc(va), &free); 3675 pmap_free_zero_pages(&free); 3676 CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", 3677 __func__, va, pmap); 3678 return (FALSE); 3679 } 3680 if (va < VM_MAXUSER_ADDRESS) 3681 pmap->pm_stats.resident_count++; 3682 3683 isnew = 1; 3684 3685 /* 3686 * We init all L2 page tables in the page even if 3687 * we are going to change everything for one L2 page 3688 * table in a while. 3689 */ 3690 pt2pg_pa = pmap_pt2pg_init(pmap, va, m); 3691 } else { 3692 if (va < VM_MAXUSER_ADDRESS) { 3693 if (pt2_is_empty(m, va)) 3694 isnew = 1; /* Demoting section w/o promotion. */ 3695#ifdef INVARIANTS 3696 else 3697 KASSERT(pt2_is_full(m, va), ("%s: bad PT2 wire" 3698 " count %u", __func__, 3699 pt2_wirecount_get(m, pte1_index(va)))); 3700#endif 3701 } 3702 } 3703 3704 pt2pg_pa = VM_PAGE_TO_PHYS(m); 3705 pte1_idx = pte1_index(va); 3706 /* 3707 * If the pmap is current, then the PT2MAP can provide access to 3708 * the page table page (promoted L2 page tables are not unmapped). 3709 * Otherwise, temporarily map the L2 page table page (m) into 3710 * the kernel's address space at either PADDR1 or PADDR2. 3711 * 3712 * Note that L2 page table size is not equal to PAGE_SIZE. 3713 */ 3714 if (pmap_is_current(pmap)) 3715 fpte2p = page_pt2(pt2map_pt2pg(va), pte1_idx); 3716 else if (curthread->td_pinned > 0 && rw_wowned(&pvh_global_lock)) { 3717 if (pte2_pa(pte2_load(PMAP1)) != pt2pg_pa) { 3718 pte2_store(PMAP1, PTE2_KPT(pt2pg_pa)); 3719#ifdef SMP 3720 PMAP1cpu = PCPU_GET(cpuid); 3721#endif 3722 tlb_flush_local((vm_offset_t)PADDR1); 3723 PMAP1changed++; 3724 } else 3725#ifdef SMP 3726 if (PMAP1cpu != PCPU_GET(cpuid)) { 3727 PMAP1cpu = PCPU_GET(cpuid); 3728 tlb_flush_local((vm_offset_t)PADDR1); 3729 PMAP1changedcpu++; 3730 } else 3731#endif 3732 PMAP1unchanged++; 3733 fpte2p = page_pt2((vm_offset_t)PADDR1, pte1_idx); 3734 } else { 3735 mtx_lock(&PMAP2mutex); 3736 if (pte2_pa(pte2_load(PMAP2)) != pt2pg_pa) { 3737 pte2_store(PMAP2, PTE2_KPT(pt2pg_pa)); 3738 tlb_flush((vm_offset_t)PADDR2); 3739 } 3740 fpte2p = page_pt2((vm_offset_t)PADDR2, pte1_idx); 3741 } 3742 pt2_pa = page_pt2pa(pt2pg_pa, pte1_idx); 3743 npte1 = PTE1_LINK(pt2_pa); 3744 3745 KASSERT((opte1 & PTE1_A) != 0, 3746 ("%s: opte1 is missing PTE1_A", __func__)); 3747 KASSERT((opte1 & (PTE1_NM | PTE1_RO)) != PTE1_NM, 3748 ("%s: opte1 has PTE1_NM", __func__)); 3749 3750 /* 3751 * Get pte2 from pte1 format. 3752 */ 3753 npte2 = pte1_pa(opte1) | ATTR_TO_L2(opte1) | PTE2_V; 3754 3755 /* 3756 * If the L2 page table page is new, initialize it. If the mapping 3757 * has changed attributes, update the page table entries. 3758 */ 3759 if (isnew != 0) { 3760 pt2_wirecount_set(m, pte1_idx, NPTE2_IN_PT2); 3761 pmap_fill_pt2(fpte2p, npte2); 3762 } else if ((pte2_load(fpte2p) & PTE2_PROMOTE) != 3763 (npte2 & PTE2_PROMOTE)) 3764 pmap_fill_pt2(fpte2p, npte2); 3765 3766 KASSERT(pte2_pa(pte2_load(fpte2p)) == pte2_pa(npte2), 3767 ("%s: fpte2p and npte2 map different physical addresses", 3768 __func__)); 3769 3770 if (fpte2p == PADDR2) 3771 mtx_unlock(&PMAP2mutex); 3772 3773 /* 3774 * Demote the mapping. This pmap is locked. The old PTE1 has 3775 * PTE1_A set. If the old PTE1 has not PTE1_RO set, it also 3776 * has not PTE1_NM set. Thus, there is no danger of a race with 3777 * another processor changing the setting of PTE1_A and/or PTE1_NM 3778 * between the read above and the store below. 3779 */ 3780 pmap_change_pte1(pmap, pte1p, va, npte1); 3781 3782 /* 3783 * Demote the pv entry. This depends on the earlier demotion 3784 * of the mapping. Specifically, the (re)creation of a per- 3785 * page pv entry might trigger the execution of pmap_pv_reclaim(), 3786 * which might reclaim a newly (re)created per-page pv entry 3787 * and destroy the associated mapping. In order to destroy 3788 * the mapping, the PTE1 must have already changed from mapping 3789 * the 1mpage to referencing the page table page. 3790 */ 3791 if (pte1_is_managed(opte1)) 3792 pmap_pv_demote_pte1(pmap, va, pte1_pa(opte1)); 3793 3794 pmap_pte1_demotions++; 3795 CTR3(KTR_PMAP, "%s: success for va %#x in pmap %p", 3796 __func__, va, pmap); 3797 3798 PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", 3799 __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); 3800 return (TRUE); 3801} 3802 3803/* 3804 * Insert the given physical page (p) at 3805 * the specified virtual address (v) in the 3806 * target physical map with the protection requested. 3807 * 3808 * If specified, the page will be wired down, meaning 3809 * that the related pte can not be reclaimed. 3810 * 3811 * NB: This is the only routine which MAY NOT lazy-evaluate 3812 * or lose information. That is, this routine must actually 3813 * insert this page into the given map NOW. 3814 */ 3815int 3816pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 3817 u_int flags, int8_t psind) 3818{ 3819 pt1_entry_t *pte1p; 3820 pt2_entry_t *pte2p; 3821 pt2_entry_t npte2, opte2; 3822 pv_entry_t pv; 3823 vm_paddr_t opa, pa; 3824 vm_page_t mpte2, om; 3825 boolean_t wired; 3826 3827 va = trunc_page(va); 3828 mpte2 = NULL; 3829 wired = (flags & PMAP_ENTER_WIRED) != 0; 3830 3831 KASSERT(va <= vm_max_kernel_address, ("%s: toobig", __func__)); 3832 KASSERT(va < UPT2V_MIN_ADDRESS || va >= UPT2V_MAX_ADDRESS, 3833 ("%s: invalid to pmap_enter page table pages (va: 0x%x)", __func__, 3834 va)); 3835 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 3836 VM_OBJECT_ASSERT_LOCKED(m->object); 3837 3838 rw_wlock(&pvh_global_lock); 3839 PMAP_LOCK(pmap); 3840 sched_pin(); 3841 3842 /* 3843 * In the case that a page table page is not 3844 * resident, we are creating it here. 3845 */ 3846 if (va < VM_MAXUSER_ADDRESS) { 3847 mpte2 = pmap_allocpte2(pmap, va, flags); 3848 if (mpte2 == NULL) { 3849 KASSERT((flags & PMAP_ENTER_NOSLEEP) != 0, 3850 ("pmap_allocpte2 failed with sleep allowed")); 3851 sched_unpin(); 3852 rw_wunlock(&pvh_global_lock); 3853 PMAP_UNLOCK(pmap); 3854 return (KERN_RESOURCE_SHORTAGE); 3855 } 3856 } 3857 pte1p = pmap_pte1(pmap, va); 3858 if (pte1_is_section(pte1_load(pte1p))) 3859 panic("%s: attempted on 1MB page", __func__); 3860 pte2p = pmap_pte2_quick(pmap, va); 3861 if (pte2p == NULL) 3862 panic("%s: invalid L1 page table entry va=%#x", __func__, va); 3863 3864 om = NULL; 3865 pa = VM_PAGE_TO_PHYS(m); 3866 opte2 = pte2_load(pte2p); 3867 opa = pte2_pa(opte2); 3868 /* 3869 * Mapping has not changed, must be protection or wiring change. 3870 */ 3871 if (pte2_is_valid(opte2) && (opa == pa)) { 3872 /* 3873 * Wiring change, just update stats. We don't worry about 3874 * wiring PT2 pages as they remain resident as long as there 3875 * are valid mappings in them. Hence, if a user page is wired, 3876 * the PT2 page will be also. 3877 */ 3878 if (wired && !pte2_is_wired(opte2)) 3879 pmap->pm_stats.wired_count++; 3880 else if (!wired && pte2_is_wired(opte2)) 3881 pmap->pm_stats.wired_count--; 3882 3883 /* 3884 * Remove extra pte2 reference 3885 */ 3886 if (mpte2) 3887 pt2_wirecount_dec(mpte2, pte1_index(va)); 3888 if (pte2_is_managed(opte2)) 3889 om = m; 3890 goto validate; 3891 } 3892 3893 /* 3894 * QQQ: We think that changing physical address on writeable mapping 3895 * is not safe. Well, maybe on kernel address space with correct 3896 * locking, it can make a sense. However, we have no idea why 3897 * anyone should do that on user address space. Are we wrong? 3898 */ 3899 KASSERT((opa == 0) || (opa == pa) || 3900 !pte2_is_valid(opte2) || ((opte2 & PTE2_RO) != 0), 3901 ("%s: pmap %p va %#x(%#x) opa %#x pa %#x - gotcha %#x %#x!", 3902 __func__, pmap, va, opte2, opa, pa, flags, prot)); 3903 3904 pv = NULL; 3905 3906 /* 3907 * Mapping has changed, invalidate old range and fall through to 3908 * handle validating new mapping. 3909 */ 3910 if (opa) { 3911 if (pte2_is_wired(opte2)) 3912 pmap->pm_stats.wired_count--; 3913 if (pte2_is_managed(opte2)) { 3914 om = PHYS_TO_VM_PAGE(opa); 3915 pv = pmap_pvh_remove(&om->md, pmap, va); 3916 } 3917 /* 3918 * Remove extra pte2 reference 3919 */ 3920 if (mpte2 != NULL) 3921 pt2_wirecount_dec(mpte2, va >> PTE1_SHIFT); 3922 } else 3923 pmap->pm_stats.resident_count++; 3924 3925 /* 3926 * Enter on the PV list if part of our managed memory. 3927 */ 3928 if ((m->oflags & VPO_UNMANAGED) == 0) { 3929 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 3930 ("%s: managed mapping within the clean submap", __func__)); 3931 if (pv == NULL) 3932 pv = get_pv_entry(pmap, FALSE); 3933 pv->pv_va = va; 3934 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 3935 } else if (pv != NULL) 3936 free_pv_entry(pmap, pv); 3937 3938 /* 3939 * Increment counters 3940 */ 3941 if (wired) 3942 pmap->pm_stats.wired_count++; 3943 3944validate: 3945 /* 3946 * Now validate mapping with desired protection/wiring. 3947 */ 3948 npte2 = PTE2(pa, PTE2_NM, vm_page_pte2_attr(m)); 3949 if (prot & VM_PROT_WRITE) { 3950 if (pte2_is_managed(npte2)) 3951 vm_page_aflag_set(m, PGA_WRITEABLE); 3952 } 3953 else 3954 npte2 |= PTE2_RO; 3955 if ((prot & VM_PROT_EXECUTE) == 0) 3956 npte2 |= PTE2_NX; 3957 if (wired) 3958 npte2 |= PTE2_W; 3959 if (va < VM_MAXUSER_ADDRESS) 3960 npte2 |= PTE2_U; 3961 if (pmap != kernel_pmap) 3962 npte2 |= PTE2_NG; 3963 3964 /* 3965 * If the mapping or permission bits are different, we need 3966 * to update the pte2. 3967 * 3968 * QQQ: Think again and again what to do 3969 * if the mapping is going to be changed! 3970 */ 3971 if ((opte2 & ~(PTE2_NM | PTE2_A)) != (npte2 & ~(PTE2_NM | PTE2_A))) { 3972 /* 3973 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 3974 * is set. Do it now, before the mapping is stored and made 3975 * valid for hardware table walk. If done later, there is a race 3976 * for other threads of current process in lazy loading case. 3977 * Don't do it for kernel memory which is mapped with exec 3978 * permission even if the memory isn't going to hold executable 3979 * code. The only time when icache sync is needed is after 3980 * kernel module is loaded and the relocation info is processed. 3981 * And it's done in elf_cpu_load_file(). 3982 * 3983 * QQQ: (1) Does it exist any better way where 3984 * or how to sync icache? 3985 * (2) Now, we do it on a page basis. 3986 */ 3987 if ((prot & VM_PROT_EXECUTE) && pmap != kernel_pmap && 3988 m->md.pat_mode == VM_MEMATTR_WB_WA && 3989 (opa != pa || (opte2 & PTE2_NX))) 3990 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 3991 3992 npte2 |= PTE2_A; 3993 if (flags & VM_PROT_WRITE) 3994 npte2 &= ~PTE2_NM; 3995 if (opte2 & PTE2_V) { 3996 /* Change mapping with break-before-make approach. */ 3997 opte2 = pte2_load_clear(pte2p); 3998 pmap_tlb_flush(pmap, va); 3999 pte2_store(pte2p, npte2); 4000 if (opte2 & PTE2_A) { 4001 if (pte2_is_managed(opte2)) 4002 vm_page_aflag_set(om, PGA_REFERENCED); 4003 } 4004 if (pte2_is_dirty(opte2)) { 4005 if (pte2_is_managed(opte2)) 4006 vm_page_dirty(om); 4007 } 4008 if (pte2_is_managed(opte2) && 4009 TAILQ_EMPTY(&om->md.pv_list) && 4010 ((om->flags & PG_FICTITIOUS) != 0 || 4011 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) 4012 vm_page_aflag_clear(om, PGA_WRITEABLE); 4013 } else 4014 pte2_store(pte2p, npte2); 4015 } 4016#if 0 4017 else { 4018 /* 4019 * QQQ: In time when both access and not mofified bits are 4020 * emulated by software, this should not happen. Some 4021 * analysis is need, if this really happen. Missing 4022 * tlb flush somewhere could be the reason. 4023 */ 4024 panic("%s: pmap %p va %#x opte2 %x npte2 %x !!", __func__, pmap, 4025 va, opte2, npte2); 4026 } 4027#endif 4028 /* 4029 * If both the L2 page table page and the reservation are fully 4030 * populated, then attempt promotion. 4031 */ 4032 if ((mpte2 == NULL || pt2_is_full(mpte2, va)) && 4033 sp_enabled && (m->flags & PG_FICTITIOUS) == 0 && 4034 vm_reserv_level_iffullpop(m) == 0) 4035 pmap_promote_pte1(pmap, pte1p, va); 4036 sched_unpin(); 4037 rw_wunlock(&pvh_global_lock); 4038 PMAP_UNLOCK(pmap); 4039 return (KERN_SUCCESS); 4040} 4041 4042/* 4043 * Do the things to unmap a page in a process. 4044 */ 4045static int 4046pmap_remove_pte2(pmap_t pmap, pt2_entry_t *pte2p, vm_offset_t va, 4047 struct spglist *free) 4048{ 4049 pt2_entry_t opte2; 4050 vm_page_t m; 4051 4052 rw_assert(&pvh_global_lock, RA_WLOCKED); 4053 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4054 4055 /* Clear and invalidate the mapping. */ 4056 opte2 = pte2_load_clear(pte2p); 4057 pmap_tlb_flush(pmap, va); 4058 4059 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %#x not link pte2 %#x", 4060 __func__, pmap, va, opte2)); 4061 4062 if (opte2 & PTE2_W) 4063 pmap->pm_stats.wired_count -= 1; 4064 pmap->pm_stats.resident_count -= 1; 4065 if (pte2_is_managed(opte2)) { 4066 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4067 if (pte2_is_dirty(opte2)) 4068 vm_page_dirty(m); 4069 if (opte2 & PTE2_A) 4070 vm_page_aflag_set(m, PGA_REFERENCED); 4071 pmap_remove_entry(pmap, m, va); 4072 } 4073 return (pmap_unuse_pt2(pmap, va, free)); 4074} 4075 4076/* 4077 * Remove a single page from a process address space. 4078 */ 4079static void 4080pmap_remove_page(pmap_t pmap, vm_offset_t va, struct spglist *free) 4081{ 4082 pt2_entry_t *pte2p; 4083 4084 rw_assert(&pvh_global_lock, RA_WLOCKED); 4085 KASSERT(curthread->td_pinned > 0, 4086 ("%s: curthread not pinned", __func__)); 4087 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4088 if ((pte2p = pmap_pte2_quick(pmap, va)) == NULL || 4089 !pte2_is_valid(pte2_load(pte2p))) 4090 return; 4091 pmap_remove_pte2(pmap, pte2p, va, free); 4092} 4093 4094/* 4095 * Remove the given range of addresses from the specified map. 4096 * 4097 * It is assumed that the start and end are properly 4098 * rounded to the page size. 4099 */ 4100void 4101pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 4102{ 4103 vm_offset_t nextva; 4104 pt1_entry_t *pte1p, pte1; 4105 pt2_entry_t *pte2p, pte2; 4106 struct spglist free; 4107 4108 /* 4109 * Perform an unsynchronized read. This is, however, safe. 4110 */ 4111 if (pmap->pm_stats.resident_count == 0) 4112 return; 4113 4114 SLIST_INIT(&free); 4115 4116 rw_wlock(&pvh_global_lock); 4117 sched_pin(); 4118 PMAP_LOCK(pmap); 4119 4120 /* 4121 * Special handling of removing one page. A very common 4122 * operation and easy to short circuit some code. 4123 */ 4124 if (sva + PAGE_SIZE == eva) { 4125 pte1 = pte1_load(pmap_pte1(pmap, sva)); 4126 if (pte1_is_link(pte1)) { 4127 pmap_remove_page(pmap, sva, &free); 4128 goto out; 4129 } 4130 } 4131 4132 for (; sva < eva; sva = nextva) { 4133 /* 4134 * Calculate address for next L2 page table. 4135 */ 4136 nextva = pte1_trunc(sva + PTE1_SIZE); 4137 if (nextva < sva) 4138 nextva = eva; 4139 if (pmap->pm_stats.resident_count == 0) 4140 break; 4141 4142 pte1p = pmap_pte1(pmap, sva); 4143 pte1 = pte1_load(pte1p); 4144 4145 /* 4146 * Weed out invalid mappings. Note: we assume that the L1 page 4147 * table is always allocated, and in kernel virtual. 4148 */ 4149 if (pte1 == 0) 4150 continue; 4151 4152 if (pte1_is_section(pte1)) { 4153 /* 4154 * Are we removing the entire large page? If not, 4155 * demote the mapping and fall through. 4156 */ 4157 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4158 pmap_remove_pte1(pmap, pte1p, sva, &free); 4159 continue; 4160 } else if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4161 /* The large page mapping was destroyed. */ 4162 continue; 4163 } 4164#ifdef INVARIANTS 4165 else { 4166 /* Update pte1 after demotion. */ 4167 pte1 = pte1_load(pte1p); 4168 } 4169#endif 4170 } 4171 4172 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4173 " is not link", __func__, pmap, sva, pte1, pte1p)); 4174 4175 /* 4176 * Limit our scan to either the end of the va represented 4177 * by the current L2 page table page, or to the end of the 4178 * range being removed. 4179 */ 4180 if (nextva > eva) 4181 nextva = eva; 4182 4183 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; 4184 pte2p++, sva += PAGE_SIZE) { 4185 pte2 = pte2_load(pte2p); 4186 if (!pte2_is_valid(pte2)) 4187 continue; 4188 if (pmap_remove_pte2(pmap, pte2p, sva, &free)) 4189 break; 4190 } 4191 } 4192out: 4193 sched_unpin(); 4194 rw_wunlock(&pvh_global_lock); 4195 PMAP_UNLOCK(pmap); 4196 pmap_free_zero_pages(&free); 4197} 4198 4199/* 4200 * Routine: pmap_remove_all 4201 * Function: 4202 * Removes this physical page from 4203 * all physical maps in which it resides. 4204 * Reflects back modify bits to the pager. 4205 * 4206 * Notes: 4207 * Original versions of this routine were very 4208 * inefficient because they iteratively called 4209 * pmap_remove (slow...) 4210 */ 4211 4212void 4213pmap_remove_all(vm_page_t m) 4214{ 4215 struct md_page *pvh; 4216 pv_entry_t pv; 4217 pmap_t pmap; 4218 pt2_entry_t *pte2p, opte2; 4219 pt1_entry_t *pte1p; 4220 vm_offset_t va; 4221 struct spglist free; 4222 4223 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 4224 ("%s: page %p is not managed", __func__, m)); 4225 SLIST_INIT(&free); 4226 rw_wlock(&pvh_global_lock); 4227 sched_pin(); 4228 if ((m->flags & PG_FICTITIOUS) != 0) 4229 goto small_mappings; 4230 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 4231 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) { 4232 va = pv->pv_va; 4233 pmap = PV_PMAP(pv); 4234 PMAP_LOCK(pmap); 4235 pte1p = pmap_pte1(pmap, va); 4236 (void)pmap_demote_pte1(pmap, pte1p, va); 4237 PMAP_UNLOCK(pmap); 4238 } 4239small_mappings: 4240 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 4241 pmap = PV_PMAP(pv); 4242 PMAP_LOCK(pmap); 4243 pmap->pm_stats.resident_count--; 4244 pte1p = pmap_pte1(pmap, pv->pv_va); 4245 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found " 4246 "a 1mpage in page %p's pv list", __func__, m)); 4247 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 4248 opte2 = pte2_load_clear(pte2p); 4249 pmap_tlb_flush(pmap, pv->pv_va); 4250 KASSERT(pte2_is_valid(opte2), ("%s: pmap %p va %x zero pte2", 4251 __func__, pmap, pv->pv_va)); 4252 if (pte2_is_wired(opte2)) 4253 pmap->pm_stats.wired_count--; 4254 if (opte2 & PTE2_A) 4255 vm_page_aflag_set(m, PGA_REFERENCED); 4256 4257 /* 4258 * Update the vm_page_t clean and reference bits. 4259 */ 4260 if (pte2_is_dirty(opte2)) 4261 vm_page_dirty(m); 4262 pmap_unuse_pt2(pmap, pv->pv_va, &free); 4263 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4264 free_pv_entry(pmap, pv); 4265 PMAP_UNLOCK(pmap); 4266 } 4267 vm_page_aflag_clear(m, PGA_WRITEABLE); 4268 sched_unpin(); 4269 rw_wunlock(&pvh_global_lock); 4270 pmap_free_zero_pages(&free); 4271} 4272 4273/* 4274 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4275 * good coding style, a.k.a. 80 character line width limit hell. 4276 */ 4277static __inline void 4278pmap_remove_pte1_quick(pmap_t pmap, pt1_entry_t pte1, pv_entry_t pv, 4279 struct spglist *free) 4280{ 4281 vm_paddr_t pa; 4282 vm_page_t m, mt, mpt2pg; 4283 struct md_page *pvh; 4284 4285 pa = pte1_pa(pte1); 4286 m = PHYS_TO_VM_PAGE(pa); 4287 4288 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4289 __func__, m, m->phys_addr, pa)); 4290 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4291 m < &vm_page_array[vm_page_array_size], 4292 ("%s: bad pte1 %#x", __func__, pte1)); 4293 4294 if (pte1_is_dirty(pte1)) { 4295 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4296 vm_page_dirty(mt); 4297 } 4298 4299 pmap->pm_stats.resident_count -= PTE1_SIZE / PAGE_SIZE; 4300 pvh = pa_to_pvh(pa); 4301 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 4302 if (TAILQ_EMPTY(&pvh->pv_list)) { 4303 for (mt = m; mt < &m[PTE1_SIZE / PAGE_SIZE]; mt++) 4304 if (TAILQ_EMPTY(&mt->md.pv_list)) 4305 vm_page_aflag_clear(mt, PGA_WRITEABLE); 4306 } 4307 mpt2pg = pmap_pt2_page(pmap, pv->pv_va); 4308 if (mpt2pg != NULL) 4309 pmap_unwire_pt2_all(pmap, pv->pv_va, mpt2pg, free); 4310} 4311 4312/* 4313 * Just subroutine for pmap_remove_pages() to reasonably satisfy 4314 * good coding style, a.k.a. 80 character line width limit hell. 4315 */ 4316static __inline void 4317pmap_remove_pte2_quick(pmap_t pmap, pt2_entry_t pte2, pv_entry_t pv, 4318 struct spglist *free) 4319{ 4320 vm_paddr_t pa; 4321 vm_page_t m; 4322 struct md_page *pvh; 4323 4324 pa = pte2_pa(pte2); 4325 m = PHYS_TO_VM_PAGE(pa); 4326 4327 KASSERT(m->phys_addr == pa, ("%s: vm_page_t %p addr mismatch %#x %#x", 4328 __func__, m, m->phys_addr, pa)); 4329 KASSERT((m->flags & PG_FICTITIOUS) != 0 || 4330 m < &vm_page_array[vm_page_array_size], 4331 ("%s: bad pte2 %#x", __func__, pte2)); 4332 4333 if (pte2_is_dirty(pte2)) 4334 vm_page_dirty(m); 4335 4336 pmap->pm_stats.resident_count--; 4337 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 4338 if (TAILQ_EMPTY(&m->md.pv_list) && (m->flags & PG_FICTITIOUS) == 0) { 4339 pvh = pa_to_pvh(pa); 4340 if (TAILQ_EMPTY(&pvh->pv_list)) 4341 vm_page_aflag_clear(m, PGA_WRITEABLE); 4342 } 4343 pmap_unuse_pt2(pmap, pv->pv_va, free); 4344} 4345 4346/* 4347 * Remove all pages from specified address space this aids process 4348 * exit speeds. Also, this code is special cased for current process 4349 * only, but can have the more generic (and slightly slower) mode enabled. 4350 * This is much faster than pmap_remove in the case of running down 4351 * an entire address space. 4352 */ 4353void 4354pmap_remove_pages(pmap_t pmap) 4355{ 4356 pt1_entry_t *pte1p, pte1; 4357 pt2_entry_t *pte2p, pte2; 4358 pv_entry_t pv; 4359 struct pv_chunk *pc, *npc; 4360 struct spglist free; 4361 int field, idx; 4362 int32_t bit; 4363 uint32_t inuse, bitmask; 4364 boolean_t allfree; 4365 4366 /* 4367 * Assert that the given pmap is only active on the current 4368 * CPU. Unfortunately, we cannot block another CPU from 4369 * activating the pmap while this function is executing. 4370 */ 4371 KASSERT(pmap == vmspace_pmap(curthread->td_proc->p_vmspace), 4372 ("%s: non-current pmap %p", __func__, pmap)); 4373#if defined(SMP) && defined(INVARIANTS) 4374 { 4375 cpuset_t other_cpus; 4376 4377 sched_pin(); 4378 other_cpus = pmap->pm_active; 4379 CPU_CLR(PCPU_GET(cpuid), &other_cpus); 4380 sched_unpin(); 4381 KASSERT(CPU_EMPTY(&other_cpus), 4382 ("%s: pmap %p active on other cpus", __func__, pmap)); 4383 } 4384#endif 4385 SLIST_INIT(&free); 4386 rw_wlock(&pvh_global_lock); 4387 PMAP_LOCK(pmap); 4388 sched_pin(); 4389 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 4390 KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p", 4391 __func__, pmap, pc->pc_pmap)); 4392 allfree = TRUE; 4393 for (field = 0; field < _NPCM; field++) { 4394 inuse = (~(pc->pc_map[field])) & pc_freemask[field]; 4395 while (inuse != 0) { 4396 bit = ffs(inuse) - 1; 4397 bitmask = 1UL << bit; 4398 idx = field * 32 + bit; 4399 pv = &pc->pc_pventry[idx]; 4400 inuse &= ~bitmask; 4401 4402 /* 4403 * Note that we cannot remove wired pages 4404 * from a process' mapping at this time 4405 */ 4406 pte1p = pmap_pte1(pmap, pv->pv_va); 4407 pte1 = pte1_load(pte1p); 4408 if (pte1_is_section(pte1)) { 4409 if (pte1_is_wired(pte1)) { 4410 allfree = FALSE; 4411 continue; 4412 } 4413 pte1_clear(pte1p); 4414 pmap_remove_pte1_quick(pmap, pte1, pv, 4415 &free); 4416 } 4417 else if (pte1_is_link(pte1)) { 4418 pte2p = pt2map_entry(pv->pv_va); 4419 pte2 = pte2_load(pte2p); 4420 4421 if (!pte2_is_valid(pte2)) { 4422 printf("%s: pmap %p va %#x " 4423 "pte2 %#x\n", __func__, 4424 pmap, pv->pv_va, pte2); 4425 panic("bad pte2"); 4426 } 4427 4428 if (pte2_is_wired(pte2)) { 4429 allfree = FALSE; 4430 continue; 4431 } 4432 pte2_clear(pte2p); 4433 pmap_remove_pte2_quick(pmap, pte2, pv, 4434 &free); 4435 } else { 4436 printf("%s: pmap %p va %#x pte1 %#x\n", 4437 __func__, pmap, pv->pv_va, pte1); 4438 panic("bad pte1"); 4439 } 4440 4441 /* Mark free */ 4442 PV_STAT(pv_entry_frees++); 4443 PV_STAT(pv_entry_spare++); 4444 pv_entry_count--; 4445 pc->pc_map[field] |= bitmask; 4446 } 4447 } 4448 if (allfree) { 4449 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 4450 free_pv_chunk(pc); 4451 } 4452 } 4453 tlb_flush_all_ng_local(); 4454 sched_unpin(); 4455 rw_wunlock(&pvh_global_lock); 4456 PMAP_UNLOCK(pmap); 4457 pmap_free_zero_pages(&free); 4458} 4459 4460/* 4461 * This code makes some *MAJOR* assumptions: 4462 * 1. Current pmap & pmap exists. 4463 * 2. Not wired. 4464 * 3. Read access. 4465 * 4. No L2 page table pages. 4466 * but is *MUCH* faster than pmap_enter... 4467 */ 4468static vm_page_t 4469pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 4470 vm_prot_t prot, vm_page_t mpt2pg) 4471{ 4472 pt2_entry_t *pte2p, pte2; 4473 vm_paddr_t pa; 4474 struct spglist free; 4475 uint32_t l2prot; 4476 4477 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 4478 (m->oflags & VPO_UNMANAGED) != 0, 4479 ("%s: managed mapping within the clean submap", __func__)); 4480 rw_assert(&pvh_global_lock, RA_WLOCKED); 4481 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4482 4483 /* 4484 * In the case that a L2 page table page is not 4485 * resident, we are creating it here. 4486 */ 4487 if (va < VM_MAXUSER_ADDRESS) { 4488 u_int pte1_idx; 4489 pt1_entry_t pte1, *pte1p; 4490 vm_paddr_t pt2_pa; 4491 4492 /* 4493 * Get L1 page table things. 4494 */ 4495 pte1_idx = pte1_index(va); 4496 pte1p = pmap_pte1(pmap, va); 4497 pte1 = pte1_load(pte1p); 4498 4499 if (mpt2pg && (mpt2pg->pindex == (pte1_idx & ~PT2PG_MASK))) { 4500 /* 4501 * Each of NPT2_IN_PG L2 page tables on the page can 4502 * come here. Make sure that associated L1 page table 4503 * link is established. 4504 * 4505 * QQQ: It comes that we don't establish all links to 4506 * L2 page tables for newly allocated L2 page 4507 * tables page. 4508 */ 4509 KASSERT(!pte1_is_section(pte1), 4510 ("%s: pte1 %#x is section", __func__, pte1)); 4511 if (!pte1_is_link(pte1)) { 4512 pt2_pa = page_pt2pa(VM_PAGE_TO_PHYS(mpt2pg), 4513 pte1_idx); 4514 pte1_store(pte1p, PTE1_LINK(pt2_pa)); 4515 } 4516 pt2_wirecount_inc(mpt2pg, pte1_idx); 4517 } else { 4518 /* 4519 * If the L2 page table page is mapped, we just 4520 * increment the hold count, and activate it. 4521 */ 4522 if (pte1_is_section(pte1)) { 4523 return (NULL); 4524 } else if (pte1_is_link(pte1)) { 4525 mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 4526 pt2_wirecount_inc(mpt2pg, pte1_idx); 4527 } else { 4528 mpt2pg = _pmap_allocpte2(pmap, va, 4529 PMAP_ENTER_NOSLEEP); 4530 if (mpt2pg == NULL) 4531 return (NULL); 4532 } 4533 } 4534 } else { 4535 mpt2pg = NULL; 4536 } 4537 4538 /* 4539 * This call to pt2map_entry() makes the assumption that we are 4540 * entering the page into the current pmap. In order to support 4541 * quick entry into any pmap, one would likely use pmap_pte2_quick(). 4542 * But that isn't as quick as pt2map_entry(). 4543 */ 4544 pte2p = pt2map_entry(va); 4545 pte2 = pte2_load(pte2p); 4546 if (pte2_is_valid(pte2)) { 4547 if (mpt2pg != NULL) { 4548 /* 4549 * Remove extra pte2 reference 4550 */ 4551 pt2_wirecount_dec(mpt2pg, pte1_index(va)); 4552 mpt2pg = NULL; 4553 } 4554 return (NULL); 4555 } 4556 4557 /* 4558 * Enter on the PV list if part of our managed memory. 4559 */ 4560 if ((m->oflags & VPO_UNMANAGED) == 0 && 4561 !pmap_try_insert_pv_entry(pmap, va, m)) { 4562 if (mpt2pg != NULL) { 4563 SLIST_INIT(&free); 4564 if (pmap_unwire_pt2(pmap, va, mpt2pg, &free)) { 4565 pmap_tlb_flush(pmap, va); 4566 pmap_free_zero_pages(&free); 4567 } 4568 4569 mpt2pg = NULL; 4570 } 4571 return (NULL); 4572 } 4573 4574 /* 4575 * Increment counters 4576 */ 4577 pmap->pm_stats.resident_count++; 4578 4579 /* 4580 * Now validate mapping with RO protection 4581 */ 4582 pa = VM_PAGE_TO_PHYS(m); 4583 l2prot = PTE2_RO | PTE2_NM; 4584 if (va < VM_MAXUSER_ADDRESS) 4585 l2prot |= PTE2_U | PTE2_NG; 4586 if ((prot & VM_PROT_EXECUTE) == 0) 4587 l2prot |= PTE2_NX; 4588 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4589 /* 4590 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4591 * is set. QQQ: For more info, see comments in pmap_enter(). 4592 */ 4593 cache_icache_sync_fresh(va, pa, PAGE_SIZE); 4594 } 4595 pte2_store(pte2p, PTE2(pa, l2prot, vm_page_pte2_attr(m))); 4596 4597 return (mpt2pg); 4598} 4599 4600void 4601pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4602{ 4603 4604 rw_wlock(&pvh_global_lock); 4605 PMAP_LOCK(pmap); 4606 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 4607 rw_wunlock(&pvh_global_lock); 4608 PMAP_UNLOCK(pmap); 4609} 4610 4611/* 4612 * Tries to create 1MB page mapping. Returns TRUE if successful and 4613 * FALSE otherwise. Fails if (1) a page table page cannot be allocated without 4614 * blocking, (2) a mapping already exists at the specified virtual address, or 4615 * (3) a pv entry cannot be allocated without reclaiming another pv entry. 4616 */ 4617static boolean_t 4618pmap_enter_pte1(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 4619{ 4620 pt1_entry_t *pte1p; 4621 vm_paddr_t pa; 4622 uint32_t l1prot; 4623 4624 rw_assert(&pvh_global_lock, RA_WLOCKED); 4625 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4626 pte1p = pmap_pte1(pmap, va); 4627 if (pte1_is_valid(pte1_load(pte1p))) { 4628 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", __func__, 4629 va, pmap); 4630 return (FALSE); 4631 } 4632 if ((m->oflags & VPO_UNMANAGED) == 0) { 4633 /* 4634 * Abort this mapping if its PV entry could not be created. 4635 */ 4636 if (!pmap_pv_insert_pte1(pmap, va, VM_PAGE_TO_PHYS(m))) { 4637 CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", 4638 __func__, va, pmap); 4639 return (FALSE); 4640 } 4641 } 4642 /* 4643 * Increment counters. 4644 */ 4645 pmap->pm_stats.resident_count += PTE1_SIZE / PAGE_SIZE; 4646 4647 /* 4648 * Map the section. 4649 * 4650 * QQQ: Why VM_PROT_WRITE is not evaluated and the mapping is 4651 * made readonly? 4652 */ 4653 pa = VM_PAGE_TO_PHYS(m); 4654 l1prot = PTE1_RO | PTE1_NM; 4655 if (va < VM_MAXUSER_ADDRESS) 4656 l1prot |= PTE1_U | PTE1_NG; 4657 if ((prot & VM_PROT_EXECUTE) == 0) 4658 l1prot |= PTE1_NX; 4659 else if (m->md.pat_mode == VM_MEMATTR_WB_WA && pmap != kernel_pmap) { 4660 /* 4661 * Sync icache if exec permission and attribute VM_MEMATTR_WB_WA 4662 * is set. QQQ: For more info, see comments in pmap_enter(). 4663 */ 4664 cache_icache_sync_fresh(va, pa, PTE1_SIZE); 4665 } 4666 pte1_store(pte1p, PTE1(pa, l1prot, ATTR_TO_L1(vm_page_pte2_attr(m)))); 4667 4668 pmap_pte1_mappings++; 4669 CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, 4670 pmap); 4671 return (TRUE); 4672} 4673 4674/* 4675 * Maps a sequence of resident pages belonging to the same object. 4676 * The sequence begins with the given page m_start. This page is 4677 * mapped at the given virtual address start. Each subsequent page is 4678 * mapped at a virtual address that is offset from start by the same 4679 * amount as the page is offset from m_start within the object. The 4680 * last page in the sequence is the page with the largest offset from 4681 * m_start that can be mapped at a virtual address less than the given 4682 * virtual address end. Not every virtual page between start and end 4683 * is mapped; only those for which a resident page exists with the 4684 * corresponding offset from m_start are mapped. 4685 */ 4686void 4687pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 4688 vm_page_t m_start, vm_prot_t prot) 4689{ 4690 vm_offset_t va; 4691 vm_page_t m, mpt2pg; 4692 vm_pindex_t diff, psize; 4693 4694 PDEBUG(6, printf("%s: pmap %p start %#x end %#x m %p prot %#x\n", 4695 __func__, pmap, start, end, m_start, prot)); 4696 4697 VM_OBJECT_ASSERT_LOCKED(m_start->object); 4698 psize = atop(end - start); 4699 mpt2pg = NULL; 4700 m = m_start; 4701 rw_wlock(&pvh_global_lock); 4702 PMAP_LOCK(pmap); 4703 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 4704 va = start + ptoa(diff); 4705 if ((va & PTE1_OFFSET) == 0 && va + PTE1_SIZE <= end && 4706 m->psind == 1 && sp_enabled && 4707 pmap_enter_pte1(pmap, va, m, prot)) 4708 m = &m[PTE1_SIZE / PAGE_SIZE - 1]; 4709 else 4710 mpt2pg = pmap_enter_quick_locked(pmap, va, m, prot, 4711 mpt2pg); 4712 m = TAILQ_NEXT(m, listq); 4713 } 4714 rw_wunlock(&pvh_global_lock); 4715 PMAP_UNLOCK(pmap); 4716} 4717 4718/* 4719 * This code maps large physical mmap regions into the 4720 * processor address space. Note that some shortcuts 4721 * are taken, but the code works. 4722 */ 4723void 4724pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 4725 vm_pindex_t pindex, vm_size_t size) 4726{ 4727 pt1_entry_t *pte1p; 4728 vm_paddr_t pa, pte2_pa; 4729 vm_page_t p; 4730 vm_memattr_t pat_mode; 4731 u_int l1attr, l1prot; 4732 4733 VM_OBJECT_ASSERT_WLOCKED(object); 4734 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 4735 ("%s: non-device object", __func__)); 4736 if ((addr & PTE1_OFFSET) == 0 && (size & PTE1_OFFSET) == 0) { 4737 if (!vm_object_populate(object, pindex, pindex + atop(size))) 4738 return; 4739 p = vm_page_lookup(object, pindex); 4740 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4741 ("%s: invalid page %p", __func__, p)); 4742 pat_mode = p->md.pat_mode; 4743 4744 /* 4745 * Abort the mapping if the first page is not physically 4746 * aligned to a 1MB page boundary. 4747 */ 4748 pte2_pa = VM_PAGE_TO_PHYS(p); 4749 if (pte2_pa & PTE1_OFFSET) 4750 return; 4751 4752 /* 4753 * Skip the first page. Abort the mapping if the rest of 4754 * the pages are not physically contiguous or have differing 4755 * memory attributes. 4756 */ 4757 p = TAILQ_NEXT(p, listq); 4758 for (pa = pte2_pa + PAGE_SIZE; pa < pte2_pa + size; 4759 pa += PAGE_SIZE) { 4760 KASSERT(p->valid == VM_PAGE_BITS_ALL, 4761 ("%s: invalid page %p", __func__, p)); 4762 if (pa != VM_PAGE_TO_PHYS(p) || 4763 pat_mode != p->md.pat_mode) 4764 return; 4765 p = TAILQ_NEXT(p, listq); 4766 } 4767 4768 /* 4769 * Map using 1MB pages. 4770 * 4771 * QQQ: Well, we are mapping a section, so same condition must 4772 * be hold like during promotion. It looks that only RW mapping 4773 * is done here, so readonly mapping must be done elsewhere. 4774 */ 4775 l1prot = PTE1_U | PTE1_NG | PTE1_RW | PTE1_M | PTE1_A; 4776 l1attr = ATTR_TO_L1(vm_memattr_to_pte2(pat_mode)); 4777 PMAP_LOCK(pmap); 4778 for (pa = pte2_pa; pa < pte2_pa + size; pa += PTE1_SIZE) { 4779 pte1p = pmap_pte1(pmap, addr); 4780 if (!pte1_is_valid(pte1_load(pte1p))) { 4781 pte1_store(pte1p, PTE1(pa, l1prot, l1attr)); 4782 pmap->pm_stats.resident_count += PTE1_SIZE / 4783 PAGE_SIZE; 4784 pmap_pte1_mappings++; 4785 } 4786 /* Else continue on if the PTE1 is already valid. */ 4787 addr += PTE1_SIZE; 4788 } 4789 PMAP_UNLOCK(pmap); 4790 } 4791} 4792 4793/* 4794 * Do the things to protect a 1mpage in a process. 4795 */ 4796static void 4797pmap_protect_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t sva, 4798 vm_prot_t prot) 4799{ 4800 pt1_entry_t npte1, opte1; 4801 vm_offset_t eva, va; 4802 vm_page_t m; 4803 4804 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 4805 KASSERT((sva & PTE1_OFFSET) == 0, 4806 ("%s: sva is not 1mpage aligned", __func__)); 4807 4808 opte1 = npte1 = pte1_load(pte1p); 4809 if (pte1_is_managed(opte1) && pte1_is_dirty(opte1)) { 4810 eva = sva + PTE1_SIZE; 4811 for (va = sva, m = PHYS_TO_VM_PAGE(pte1_pa(opte1)); 4812 va < eva; va += PAGE_SIZE, m++) 4813 vm_page_dirty(m); 4814 } 4815 if ((prot & VM_PROT_WRITE) == 0) 4816 npte1 |= PTE1_RO | PTE1_NM; 4817 if ((prot & VM_PROT_EXECUTE) == 0) 4818 npte1 |= PTE1_NX; 4819 4820 /* 4821 * QQQ: Herein, execute permission is never set. 4822 * It only can be cleared. So, no icache 4823 * syncing is needed. 4824 */ 4825 4826 if (npte1 != opte1) { 4827 pte1_store(pte1p, npte1); 4828 pmap_tlb_flush(pmap, sva); 4829 } 4830} 4831 4832/* 4833 * Set the physical protection on the 4834 * specified range of this map as requested. 4835 */ 4836void 4837pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 4838{ 4839 boolean_t pv_lists_locked; 4840 vm_offset_t nextva; 4841 pt1_entry_t *pte1p, pte1; 4842 pt2_entry_t *pte2p, opte2, npte2; 4843 4844 KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); 4845 if (prot == VM_PROT_NONE) { 4846 pmap_remove(pmap, sva, eva); 4847 return; 4848 } 4849 4850 if ((prot & (VM_PROT_WRITE | VM_PROT_EXECUTE)) == 4851 (VM_PROT_WRITE | VM_PROT_EXECUTE)) 4852 return; 4853 4854 if (pmap_is_current(pmap)) 4855 pv_lists_locked = FALSE; 4856 else { 4857 pv_lists_locked = TRUE; 4858resume: 4859 rw_wlock(&pvh_global_lock); 4860 sched_pin(); 4861 } 4862 4863 PMAP_LOCK(pmap); 4864 for (; sva < eva; sva = nextva) { 4865 /* 4866 * Calculate address for next L2 page table. 4867 */ 4868 nextva = pte1_trunc(sva + PTE1_SIZE); 4869 if (nextva < sva) 4870 nextva = eva; 4871 4872 pte1p = pmap_pte1(pmap, sva); 4873 pte1 = pte1_load(pte1p); 4874 4875 /* 4876 * Weed out invalid mappings. Note: we assume that L1 page 4877 * page table is always allocated, and in kernel virtual. 4878 */ 4879 if (pte1 == 0) 4880 continue; 4881 4882 if (pte1_is_section(pte1)) { 4883 /* 4884 * Are we protecting the entire large page? If not, 4885 * demote the mapping and fall through. 4886 */ 4887 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 4888 pmap_protect_pte1(pmap, pte1p, sva, prot); 4889 continue; 4890 } else { 4891 if (!pv_lists_locked) { 4892 pv_lists_locked = TRUE; 4893 if (!rw_try_wlock(&pvh_global_lock)) { 4894 PMAP_UNLOCK(pmap); 4895 goto resume; 4896 } 4897 sched_pin(); 4898 } 4899 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 4900 /* 4901 * The large page mapping 4902 * was destroyed. 4903 */ 4904 continue; 4905 } 4906#ifdef INVARIANTS 4907 else { 4908 /* Update pte1 after demotion */ 4909 pte1 = pte1_load(pte1p); 4910 } 4911#endif 4912 } 4913 } 4914 4915 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 4916 " is not link", __func__, pmap, sva, pte1, pte1p)); 4917 4918 /* 4919 * Limit our scan to either the end of the va represented 4920 * by the current L2 page table page, or to the end of the 4921 * range being protected. 4922 */ 4923 if (nextva > eva) 4924 nextva = eva; 4925 4926 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 4927 sva += PAGE_SIZE) { 4928 vm_page_t m; 4929 4930 opte2 = npte2 = pte2_load(pte2p); 4931 if (!pte2_is_valid(opte2)) 4932 continue; 4933 4934 if ((prot & VM_PROT_WRITE) == 0) { 4935 if (pte2_is_managed(opte2) && 4936 pte2_is_dirty(opte2)) { 4937 m = PHYS_TO_VM_PAGE(pte2_pa(opte2)); 4938 vm_page_dirty(m); 4939 } 4940 npte2 |= PTE2_RO | PTE2_NM; 4941 } 4942 4943 if ((prot & VM_PROT_EXECUTE) == 0) 4944 npte2 |= PTE2_NX; 4945 4946 /* 4947 * QQQ: Herein, execute permission is never set. 4948 * It only can be cleared. So, no icache 4949 * syncing is needed. 4950 */ 4951 4952 if (npte2 != opte2) { 4953 pte2_store(pte2p, npte2); 4954 pmap_tlb_flush(pmap, sva); 4955 } 4956 } 4957 } 4958 if (pv_lists_locked) { 4959 sched_unpin(); 4960 rw_wunlock(&pvh_global_lock); 4961 } 4962 PMAP_UNLOCK(pmap); 4963} 4964 4965/* 4966 * pmap_pvh_wired_mappings: 4967 * 4968 * Return the updated number "count" of managed mappings that are wired. 4969 */ 4970static int 4971pmap_pvh_wired_mappings(struct md_page *pvh, int count) 4972{ 4973 pmap_t pmap; 4974 pt1_entry_t pte1; 4975 pt2_entry_t pte2; 4976 pv_entry_t pv; 4977 4978 rw_assert(&pvh_global_lock, RA_WLOCKED); 4979 sched_pin(); 4980 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 4981 pmap = PV_PMAP(pv); 4982 PMAP_LOCK(pmap); 4983 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 4984 if (pte1_is_section(pte1)) { 4985 if (pte1_is_wired(pte1)) 4986 count++; 4987 } else { 4988 KASSERT(pte1_is_link(pte1), 4989 ("%s: pte1 %#x is not link", __func__, pte1)); 4990 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 4991 if (pte2_is_wired(pte2)) 4992 count++; 4993 } 4994 PMAP_UNLOCK(pmap); 4995 } 4996 sched_unpin(); 4997 return (count); 4998} 4999 5000/* 5001 * pmap_page_wired_mappings: 5002 * 5003 * Return the number of managed mappings to the given physical page 5004 * that are wired. 5005 */ 5006int 5007pmap_page_wired_mappings(vm_page_t m) 5008{ 5009 int count; 5010 5011 count = 0; 5012 if ((m->oflags & VPO_UNMANAGED) != 0) 5013 return (count); 5014 rw_wlock(&pvh_global_lock); 5015 count = pmap_pvh_wired_mappings(&m->md, count); 5016 if ((m->flags & PG_FICTITIOUS) == 0) { 5017 count = pmap_pvh_wired_mappings(pa_to_pvh(VM_PAGE_TO_PHYS(m)), 5018 count); 5019 } 5020 rw_wunlock(&pvh_global_lock); 5021 return (count); 5022} 5023 5024/* 5025 * Returns TRUE if any of the given mappings were used to modify 5026 * physical memory. Otherwise, returns FALSE. Both page and 1mpage 5027 * mappings are supported. 5028 */ 5029static boolean_t 5030pmap_is_modified_pvh(struct md_page *pvh) 5031{ 5032 pv_entry_t pv; 5033 pt1_entry_t pte1; 5034 pt2_entry_t pte2; 5035 pmap_t pmap; 5036 boolean_t rv; 5037 5038 rw_assert(&pvh_global_lock, RA_WLOCKED); 5039 rv = FALSE; 5040 sched_pin(); 5041 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5042 pmap = PV_PMAP(pv); 5043 PMAP_LOCK(pmap); 5044 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5045 if (pte1_is_section(pte1)) { 5046 rv = pte1_is_dirty(pte1); 5047 } else { 5048 KASSERT(pte1_is_link(pte1), 5049 ("%s: pte1 %#x is not link", __func__, pte1)); 5050 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5051 rv = pte2_is_dirty(pte2); 5052 } 5053 PMAP_UNLOCK(pmap); 5054 if (rv) 5055 break; 5056 } 5057 sched_unpin(); 5058 return (rv); 5059} 5060 5061/* 5062 * pmap_is_modified: 5063 * 5064 * Return whether or not the specified physical page was modified 5065 * in any physical maps. 5066 */ 5067boolean_t 5068pmap_is_modified(vm_page_t m) 5069{ 5070 boolean_t rv; 5071 5072 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5073 ("%s: page %p is not managed", __func__, m)); 5074 5075 /* 5076 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5077 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 5078 * is clear, no PTE2s can have PG_M set. 5079 */ 5080 VM_OBJECT_ASSERT_WLOCKED(m->object); 5081 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5082 return (FALSE); 5083 rw_wlock(&pvh_global_lock); 5084 rv = pmap_is_modified_pvh(&m->md) || 5085 ((m->flags & PG_FICTITIOUS) == 0 && 5086 pmap_is_modified_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5087 rw_wunlock(&pvh_global_lock); 5088 return (rv); 5089} 5090 5091/* 5092 * pmap_is_prefaultable: 5093 * 5094 * Return whether or not the specified virtual address is eligible 5095 * for prefault. 5096 */ 5097boolean_t 5098pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 5099{ 5100 pt1_entry_t pte1; 5101 pt2_entry_t pte2; 5102 boolean_t rv; 5103 5104 rv = FALSE; 5105 PMAP_LOCK(pmap); 5106 pte1 = pte1_load(pmap_pte1(pmap, addr)); 5107 if (pte1_is_link(pte1)) { 5108 pte2 = pte2_load(pt2map_entry(addr)); 5109 rv = !pte2_is_valid(pte2) ; 5110 } 5111 PMAP_UNLOCK(pmap); 5112 return (rv); 5113} 5114 5115/* 5116 * Returns TRUE if any of the given mappings were referenced and FALSE 5117 * otherwise. Both page and 1mpage mappings are supported. 5118 */ 5119static boolean_t 5120pmap_is_referenced_pvh(struct md_page *pvh) 5121{ 5122 5123 pv_entry_t pv; 5124 pt1_entry_t pte1; 5125 pt2_entry_t pte2; 5126 pmap_t pmap; 5127 boolean_t rv; 5128 5129 rw_assert(&pvh_global_lock, RA_WLOCKED); 5130 rv = FALSE; 5131 sched_pin(); 5132 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5133 pmap = PV_PMAP(pv); 5134 PMAP_LOCK(pmap); 5135 pte1 = pte1_load(pmap_pte1(pmap, pv->pv_va)); 5136 if (pte1_is_section(pte1)) { 5137 rv = (pte1 & (PTE1_A | PTE1_V)) == (PTE1_A | PTE1_V); 5138 } else { 5139 pte2 = pte2_load(pmap_pte2_quick(pmap, pv->pv_va)); 5140 rv = (pte2 & (PTE2_A | PTE2_V)) == (PTE2_A | PTE2_V); 5141 } 5142 PMAP_UNLOCK(pmap); 5143 if (rv) 5144 break; 5145 } 5146 sched_unpin(); 5147 return (rv); 5148} 5149 5150/* 5151 * pmap_is_referenced: 5152 * 5153 * Return whether or not the specified physical page was referenced 5154 * in any physical maps. 5155 */ 5156boolean_t 5157pmap_is_referenced(vm_page_t m) 5158{ 5159 boolean_t rv; 5160 5161 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5162 ("%s: page %p is not managed", __func__, m)); 5163 rw_wlock(&pvh_global_lock); 5164 rv = pmap_is_referenced_pvh(&m->md) || 5165 ((m->flags & PG_FICTITIOUS) == 0 && 5166 pmap_is_referenced_pvh(pa_to_pvh(VM_PAGE_TO_PHYS(m)))); 5167 rw_wunlock(&pvh_global_lock); 5168 return (rv); 5169} 5170 5171#define PMAP_TS_REFERENCED_MAX 5 5172 5173/* 5174 * pmap_ts_referenced: 5175 * 5176 * Return a count of reference bits for a page, clearing those bits. 5177 * It is not necessary for every reference bit to be cleared, but it 5178 * is necessary that 0 only be returned when there are truly no 5179 * reference bits set. 5180 * 5181 * XXX: The exact number of bits to check and clear is a matter that 5182 * should be tested and standardized at some point in the future for 5183 * optimal aging of shared pages. 5184 * 5185 * As an optimization, update the page's dirty field if a modified bit is 5186 * found while counting reference bits. This opportunistic update can be 5187 * performed at low cost and can eliminate the need for some future calls 5188 * to pmap_is_modified(). However, since this function stops after 5189 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 5190 * dirty pages. Those dirty pages will only be detected by a future call 5191 * to pmap_is_modified(). 5192 */ 5193int 5194pmap_ts_referenced(vm_page_t m) 5195{ 5196 struct md_page *pvh; 5197 pv_entry_t pv, pvf; 5198 pmap_t pmap; 5199 pt1_entry_t *pte1p, opte1; 5200 pt2_entry_t *pte2p, opte2; 5201 vm_paddr_t pa; 5202 int rtval = 0; 5203 5204 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5205 ("%s: page %p is not managed", __func__, m)); 5206 pa = VM_PAGE_TO_PHYS(m); 5207 pvh = pa_to_pvh(pa); 5208 rw_wlock(&pvh_global_lock); 5209 sched_pin(); 5210 if ((m->flags & PG_FICTITIOUS) != 0 || 5211 (pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL) 5212 goto small_mappings; 5213 pv = pvf; 5214 do { 5215 pmap = PV_PMAP(pv); 5216 PMAP_LOCK(pmap); 5217 pte1p = pmap_pte1(pmap, pv->pv_va); 5218 opte1 = pte1_load(pte1p); 5219 if (pte1_is_dirty(opte1)) { 5220 /* 5221 * Although "opte1" is mapping a 1MB page, because 5222 * this function is called at a 4KB page granularity, 5223 * we only update the 4KB page under test. 5224 */ 5225 vm_page_dirty(m); 5226 } 5227 if ((opte1 & PTE1_A) != 0) { 5228 /* 5229 * Since this reference bit is shared by 256 4KB pages, 5230 * it should not be cleared every time it is tested. 5231 * Apply a simple "hash" function on the physical page 5232 * number, the virtual section number, and the pmap 5233 * address to select one 4KB page out of the 256 5234 * on which testing the reference bit will result 5235 * in clearing that bit. This function is designed 5236 * to avoid the selection of the same 4KB page 5237 * for every 1MB page mapping. 5238 * 5239 * On demotion, a mapping that hasn't been referenced 5240 * is simply destroyed. To avoid the possibility of a 5241 * subsequent page fault on a demoted wired mapping, 5242 * always leave its reference bit set. Moreover, 5243 * since the section is wired, the current state of 5244 * its reference bit won't affect page replacement. 5245 */ 5246 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> PTE1_SHIFT) ^ 5247 (uintptr_t)pmap) & (NPTE2_IN_PG - 1)) == 0 && 5248 !pte1_is_wired(opte1)) { 5249 pte1_clear_bit(pte1p, PTE1_A); 5250 pmap_tlb_flush(pmap, pv->pv_va); 5251 } 5252 rtval++; 5253 } 5254 PMAP_UNLOCK(pmap); 5255 /* Rotate the PV list if it has more than one entry. */ 5256 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5257 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); 5258 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); 5259 } 5260 if (rtval >= PMAP_TS_REFERENCED_MAX) 5261 goto out; 5262 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf); 5263small_mappings: 5264 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL) 5265 goto out; 5266 pv = pvf; 5267 do { 5268 pmap = PV_PMAP(pv); 5269 PMAP_LOCK(pmap); 5270 pte1p = pmap_pte1(pmap, pv->pv_va); 5271 KASSERT(pte1_is_link(pte1_load(pte1p)), 5272 ("%s: not found a link in page %p's pv list", __func__, m)); 5273 5274 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5275 opte2 = pte2_load(pte2p); 5276 if (pte2_is_dirty(opte2)) 5277 vm_page_dirty(m); 5278 if ((opte2 & PTE2_A) != 0) { 5279 pte2_clear_bit(pte2p, PTE2_A); 5280 pmap_tlb_flush(pmap, pv->pv_va); 5281 rtval++; 5282 } 5283 PMAP_UNLOCK(pmap); 5284 /* Rotate the PV list if it has more than one entry. */ 5285 if (TAILQ_NEXT(pv, pv_next) != NULL) { 5286 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); 5287 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); 5288 } 5289 } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && rtval < 5290 PMAP_TS_REFERENCED_MAX); 5291out: 5292 sched_unpin(); 5293 rw_wunlock(&pvh_global_lock); 5294 return (rtval); 5295} 5296 5297/* 5298 * Clear the wired attribute from the mappings for the specified range of 5299 * addresses in the given pmap. Every valid mapping within that range 5300 * must have the wired attribute set. In contrast, invalid mappings 5301 * cannot have the wired attribute set, so they are ignored. 5302 * 5303 * The wired attribute of the page table entry is not a hardware feature, 5304 * so there is no need to invalidate any TLB entries. 5305 */ 5306void 5307pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 5308{ 5309 vm_offset_t nextva; 5310 pt1_entry_t *pte1p, pte1; 5311 pt2_entry_t *pte2p, pte2; 5312 boolean_t pv_lists_locked; 5313 5314 if (pmap_is_current(pmap)) 5315 pv_lists_locked = FALSE; 5316 else { 5317 pv_lists_locked = TRUE; 5318resume: 5319 rw_wlock(&pvh_global_lock); 5320 sched_pin(); 5321 } 5322 PMAP_LOCK(pmap); 5323 for (; sva < eva; sva = nextva) { 5324 nextva = pte1_trunc(sva + PTE1_SIZE); 5325 if (nextva < sva) 5326 nextva = eva; 5327 5328 pte1p = pmap_pte1(pmap, sva); 5329 pte1 = pte1_load(pte1p); 5330 5331 /* 5332 * Weed out invalid mappings. Note: we assume that L1 page 5333 * page table is always allocated, and in kernel virtual. 5334 */ 5335 if (pte1 == 0) 5336 continue; 5337 5338 if (pte1_is_section(pte1)) { 5339 if (!pte1_is_wired(pte1)) 5340 panic("%s: pte1 %#x not wired", __func__, pte1); 5341 5342 /* 5343 * Are we unwiring the entire large page? If not, 5344 * demote the mapping and fall through. 5345 */ 5346 if (sva + PTE1_SIZE == nextva && eva >= nextva) { 5347 pte1_clear_bit(pte1p, PTE1_W); 5348 pmap->pm_stats.wired_count -= PTE1_SIZE / 5349 PAGE_SIZE; 5350 continue; 5351 } else { 5352 if (!pv_lists_locked) { 5353 pv_lists_locked = TRUE; 5354 if (!rw_try_wlock(&pvh_global_lock)) { 5355 PMAP_UNLOCK(pmap); 5356 /* Repeat sva. */ 5357 goto resume; 5358 } 5359 sched_pin(); 5360 } 5361 if (!pmap_demote_pte1(pmap, pte1p, sva)) 5362 panic("%s: demotion failed", __func__); 5363#ifdef INVARIANTS 5364 else { 5365 /* Update pte1 after demotion */ 5366 pte1 = pte1_load(pte1p); 5367 } 5368#endif 5369 } 5370 } 5371 5372 KASSERT(pte1_is_link(pte1), ("%s: pmap %p va %#x pte1 %#x at %p" 5373 " is not link", __func__, pmap, sva, pte1, pte1p)); 5374 5375 /* 5376 * Limit our scan to either the end of the va represented 5377 * by the current L2 page table page, or to the end of the 5378 * range being protected. 5379 */ 5380 if (nextva > eva) 5381 nextva = eva; 5382 5383 for (pte2p = pmap_pte2_quick(pmap, sva); sva != nextva; pte2p++, 5384 sva += PAGE_SIZE) { 5385 pte2 = pte2_load(pte2p); 5386 if (!pte2_is_valid(pte2)) 5387 continue; 5388 if (!pte2_is_wired(pte2)) 5389 panic("%s: pte2 %#x is missing PTE2_W", 5390 __func__, pte2); 5391 5392 /* 5393 * PTE2_W must be cleared atomically. Although the pmap 5394 * lock synchronizes access to PTE2_W, another processor 5395 * could be changing PTE2_NM and/or PTE2_A concurrently. 5396 */ 5397 pte2_clear_bit(pte2p, PTE2_W); 5398 pmap->pm_stats.wired_count--; 5399 } 5400 } 5401 if (pv_lists_locked) { 5402 sched_unpin(); 5403 rw_wunlock(&pvh_global_lock); 5404 } 5405 PMAP_UNLOCK(pmap); 5406} 5407 5408/* 5409 * Clear the write and modified bits in each of the given page's mappings. 5410 */ 5411void 5412pmap_remove_write(vm_page_t m) 5413{ 5414 struct md_page *pvh; 5415 pv_entry_t next_pv, pv; 5416 pmap_t pmap; 5417 pt1_entry_t *pte1p; 5418 pt2_entry_t *pte2p, opte2; 5419 vm_offset_t va; 5420 5421 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5422 ("%s: page %p is not managed", __func__, m)); 5423 5424 /* 5425 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 5426 * set by another thread while the object is locked. Thus, 5427 * if PGA_WRITEABLE is clear, no page table entries need updating. 5428 */ 5429 VM_OBJECT_ASSERT_WLOCKED(m->object); 5430 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 5431 return; 5432 rw_wlock(&pvh_global_lock); 5433 sched_pin(); 5434 if ((m->flags & PG_FICTITIOUS) != 0) 5435 goto small_mappings; 5436 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5437 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5438 va = pv->pv_va; 5439 pmap = PV_PMAP(pv); 5440 PMAP_LOCK(pmap); 5441 pte1p = pmap_pte1(pmap, va); 5442 if (!(pte1_load(pte1p) & PTE1_RO)) 5443 (void)pmap_demote_pte1(pmap, pte1p, va); 5444 PMAP_UNLOCK(pmap); 5445 } 5446small_mappings: 5447 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5448 pmap = PV_PMAP(pv); 5449 PMAP_LOCK(pmap); 5450 pte1p = pmap_pte1(pmap, pv->pv_va); 5451 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5452 " a section in page %p's pv list", __func__, m)); 5453 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5454 opte2 = pte2_load(pte2p); 5455 if (!(opte2 & PTE2_RO)) { 5456 pte2_store(pte2p, opte2 | PTE2_RO | PTE2_NM); 5457 if (pte2_is_dirty(opte2)) 5458 vm_page_dirty(m); 5459 pmap_tlb_flush(pmap, pv->pv_va); 5460 } 5461 PMAP_UNLOCK(pmap); 5462 } 5463 vm_page_aflag_clear(m, PGA_WRITEABLE); 5464 sched_unpin(); 5465 rw_wunlock(&pvh_global_lock); 5466} 5467 5468/* 5469 * Apply the given advice to the specified range of addresses within the 5470 * given pmap. Depending on the advice, clear the referenced and/or 5471 * modified flags in each mapping and set the mapped page's dirty field. 5472 */ 5473void 5474pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 5475{ 5476 pt1_entry_t *pte1p, opte1; 5477 pt2_entry_t *pte2p, pte2; 5478 vm_offset_t pdnxt; 5479 vm_page_t m; 5480 boolean_t pv_lists_locked; 5481 5482 if (advice != MADV_DONTNEED && advice != MADV_FREE) 5483 return; 5484 if (pmap_is_current(pmap)) 5485 pv_lists_locked = FALSE; 5486 else { 5487 pv_lists_locked = TRUE; 5488resume: 5489 rw_wlock(&pvh_global_lock); 5490 sched_pin(); 5491 } 5492 PMAP_LOCK(pmap); 5493 for (; sva < eva; sva = pdnxt) { 5494 pdnxt = pte1_trunc(sva + PTE1_SIZE); 5495 if (pdnxt < sva) 5496 pdnxt = eva; 5497 pte1p = pmap_pte1(pmap, sva); 5498 opte1 = pte1_load(pte1p); 5499 if (!pte1_is_valid(opte1)) /* XXX */ 5500 continue; 5501 else if (pte1_is_section(opte1)) { 5502 if (!pte1_is_managed(opte1)) 5503 continue; 5504 if (!pv_lists_locked) { 5505 pv_lists_locked = TRUE; 5506 if (!rw_try_wlock(&pvh_global_lock)) { 5507 PMAP_UNLOCK(pmap); 5508 goto resume; 5509 } 5510 sched_pin(); 5511 } 5512 if (!pmap_demote_pte1(pmap, pte1p, sva)) { 5513 /* 5514 * The large page mapping was destroyed. 5515 */ 5516 continue; 5517 } 5518 5519 /* 5520 * Unless the page mappings are wired, remove the 5521 * mapping to a single page so that a subsequent 5522 * access may repromote. Since the underlying L2 page 5523 * table is fully populated, this removal never 5524 * frees a L2 page table page. 5525 */ 5526 if (!pte1_is_wired(opte1)) { 5527 pte2p = pmap_pte2_quick(pmap, sva); 5528 KASSERT(pte2_is_valid(pte2_load(pte2p)), 5529 ("%s: invalid PTE2", __func__)); 5530 pmap_remove_pte2(pmap, pte2p, sva, NULL); 5531 } 5532 } 5533 if (pdnxt > eva) 5534 pdnxt = eva; 5535 for (pte2p = pmap_pte2_quick(pmap, sva); sva != pdnxt; pte2p++, 5536 sva += PAGE_SIZE) { 5537 pte2 = pte2_load(pte2p); 5538 if (!pte2_is_valid(pte2) || !pte2_is_managed(pte2)) 5539 continue; 5540 else if (pte2_is_dirty(pte2)) { 5541 if (advice == MADV_DONTNEED) { 5542 /* 5543 * Future calls to pmap_is_modified() 5544 * can be avoided by making the page 5545 * dirty now. 5546 */ 5547 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 5548 vm_page_dirty(m); 5549 } 5550 pte2_set_bit(pte2p, PTE2_NM); 5551 pte2_clear_bit(pte2p, PTE2_A); 5552 } else if ((pte2 & PTE2_A) != 0) 5553 pte2_clear_bit(pte2p, PTE2_A); 5554 else 5555 continue; 5556 pmap_tlb_flush(pmap, sva); 5557 } 5558 } 5559 if (pv_lists_locked) { 5560 sched_unpin(); 5561 rw_wunlock(&pvh_global_lock); 5562 } 5563 PMAP_UNLOCK(pmap); 5564} 5565 5566/* 5567 * Clear the modify bits on the specified physical page. 5568 */ 5569void 5570pmap_clear_modify(vm_page_t m) 5571{ 5572 struct md_page *pvh; 5573 pv_entry_t next_pv, pv; 5574 pmap_t pmap; 5575 pt1_entry_t *pte1p, opte1; 5576 pt2_entry_t *pte2p, opte2; 5577 vm_offset_t va; 5578 5579 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5580 ("%s: page %p is not managed", __func__, m)); 5581 VM_OBJECT_ASSERT_WLOCKED(m->object); 5582 KASSERT(!vm_page_xbusied(m), 5583 ("%s: page %p is exclusive busy", __func__, m)); 5584 5585 /* 5586 * If the page is not PGA_WRITEABLE, then no PTE2s can have PTE2_NM 5587 * cleared. If the object containing the page is locked and the page 5588 * is not exclusive busied, then PGA_WRITEABLE cannot be concurrently 5589 * set. 5590 */ 5591 if ((m->flags & PGA_WRITEABLE) == 0) 5592 return; 5593 rw_wlock(&pvh_global_lock); 5594 sched_pin(); 5595 if ((m->flags & PG_FICTITIOUS) != 0) 5596 goto small_mappings; 5597 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5598 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { 5599 va = pv->pv_va; 5600 pmap = PV_PMAP(pv); 5601 PMAP_LOCK(pmap); 5602 pte1p = pmap_pte1(pmap, va); 5603 opte1 = pte1_load(pte1p); 5604 if (!(opte1 & PTE1_RO)) { 5605 if (pmap_demote_pte1(pmap, pte1p, va) && 5606 !pte1_is_wired(opte1)) { 5607 /* 5608 * Write protect the mapping to a 5609 * single page so that a subsequent 5610 * write access may repromote. 5611 */ 5612 va += VM_PAGE_TO_PHYS(m) - pte1_pa(opte1); 5613 pte2p = pmap_pte2_quick(pmap, va); 5614 opte2 = pte2_load(pte2p); 5615 if ((opte2 & PTE2_V)) { 5616 pte2_set_bit(pte2p, PTE2_NM | PTE2_RO); 5617 vm_page_dirty(m); 5618 pmap_tlb_flush(pmap, va); 5619 } 5620 } 5621 } 5622 PMAP_UNLOCK(pmap); 5623 } 5624small_mappings: 5625 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5626 pmap = PV_PMAP(pv); 5627 PMAP_LOCK(pmap); 5628 pte1p = pmap_pte1(pmap, pv->pv_va); 5629 KASSERT(!pte1_is_section(pte1_load(pte1p)), ("%s: found" 5630 " a section in page %p's pv list", __func__, m)); 5631 pte2p = pmap_pte2_quick(pmap, pv->pv_va); 5632 if (pte2_is_dirty(pte2_load(pte2p))) { 5633 pte2_set_bit(pte2p, PTE2_NM); 5634 pmap_tlb_flush(pmap, pv->pv_va); 5635 } 5636 PMAP_UNLOCK(pmap); 5637 } 5638 sched_unpin(); 5639 rw_wunlock(&pvh_global_lock); 5640} 5641 5642 5643/* 5644 * Sets the memory attribute for the specified page. 5645 */ 5646void 5647pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) 5648{ 5649 pt2_entry_t *cmap2_pte2p; 5650 vm_memattr_t oma; 5651 vm_paddr_t pa; 5652 struct pcpu *pc; 5653 5654 oma = m->md.pat_mode; 5655 m->md.pat_mode = ma; 5656 5657 CTR5(KTR_PMAP, "%s: page %p - 0x%08X oma: %d, ma: %d", __func__, m, 5658 VM_PAGE_TO_PHYS(m), oma, ma); 5659 if ((m->flags & PG_FICTITIOUS) != 0) 5660 return; 5661#if 0 5662 /* 5663 * If "m" is a normal page, flush it from the cache. 5664 * 5665 * First, try to find an existing mapping of the page by sf 5666 * buffer. sf_buf_invalidate_cache() modifies mapping and 5667 * flushes the cache. 5668 */ 5669 if (sf_buf_invalidate_cache(m, oma)) 5670 return; 5671#endif 5672 /* 5673 * If page is not mapped by sf buffer, map the page 5674 * transient and do invalidation. 5675 */ 5676 if (ma != oma) { 5677 pa = VM_PAGE_TO_PHYS(m); 5678 sched_pin(); 5679 pc = get_pcpu(); 5680 cmap2_pte2p = pc->pc_cmap2_pte2p; 5681 mtx_lock(&pc->pc_cmap_lock); 5682 if (pte2_load(cmap2_pte2p) != 0) 5683 panic("%s: CMAP2 busy", __func__); 5684 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, 5685 vm_memattr_to_pte2(ma))); 5686 dcache_wbinv_poc((vm_offset_t)pc->pc_cmap2_addr, pa, PAGE_SIZE); 5687 pte2_clear(cmap2_pte2p); 5688 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5689 sched_unpin(); 5690 mtx_unlock(&pc->pc_cmap_lock); 5691 } 5692} 5693 5694/* 5695 * Miscellaneous support routines follow 5696 */ 5697 5698/* 5699 * Returns TRUE if the given page is mapped individually or as part of 5700 * a 1mpage. Otherwise, returns FALSE. 5701 */ 5702boolean_t 5703pmap_page_is_mapped(vm_page_t m) 5704{ 5705 boolean_t rv; 5706 5707 if ((m->oflags & VPO_UNMANAGED) != 0) 5708 return (FALSE); 5709 rw_wlock(&pvh_global_lock); 5710 rv = !TAILQ_EMPTY(&m->md.pv_list) || 5711 ((m->flags & PG_FICTITIOUS) == 0 && 5712 !TAILQ_EMPTY(&pa_to_pvh(VM_PAGE_TO_PHYS(m))->pv_list)); 5713 rw_wunlock(&pvh_global_lock); 5714 return (rv); 5715} 5716 5717/* 5718 * Returns true if the pmap's pv is one of the first 5719 * 16 pvs linked to from this page. This count may 5720 * be changed upwards or downwards in the future; it 5721 * is only necessary that true be returned for a small 5722 * subset of pmaps for proper page aging. 5723 */ 5724boolean_t 5725pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 5726{ 5727 struct md_page *pvh; 5728 pv_entry_t pv; 5729 int loops = 0; 5730 boolean_t rv; 5731 5732 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 5733 ("%s: page %p is not managed", __func__, m)); 5734 rv = FALSE; 5735 rw_wlock(&pvh_global_lock); 5736 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { 5737 if (PV_PMAP(pv) == pmap) { 5738 rv = TRUE; 5739 break; 5740 } 5741 loops++; 5742 if (loops >= 16) 5743 break; 5744 } 5745 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) { 5746 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); 5747 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { 5748 if (PV_PMAP(pv) == pmap) { 5749 rv = TRUE; 5750 break; 5751 } 5752 loops++; 5753 if (loops >= 16) 5754 break; 5755 } 5756 } 5757 rw_wunlock(&pvh_global_lock); 5758 return (rv); 5759} 5760 5761/* 5762 * pmap_zero_page zeros the specified hardware page by mapping 5763 * the page into KVM and using bzero to clear its contents. 5764 */ 5765void 5766pmap_zero_page(vm_page_t m) 5767{ 5768 pt2_entry_t *cmap2_pte2p; 5769 struct pcpu *pc; 5770 5771 sched_pin(); 5772 pc = get_pcpu(); 5773 cmap2_pte2p = pc->pc_cmap2_pte2p; 5774 mtx_lock(&pc->pc_cmap_lock); 5775 if (pte2_load(cmap2_pte2p) != 0) 5776 panic("%s: CMAP2 busy", __func__); 5777 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5778 vm_page_pte2_attr(m))); 5779 pagezero(pc->pc_cmap2_addr); 5780 pte2_clear(cmap2_pte2p); 5781 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5782 sched_unpin(); 5783 mtx_unlock(&pc->pc_cmap_lock); 5784} 5785 5786/* 5787 * pmap_zero_page_area zeros the specified hardware page by mapping 5788 * the page into KVM and using bzero to clear its contents. 5789 * 5790 * off and size may not cover an area beyond a single hardware page. 5791 */ 5792void 5793pmap_zero_page_area(vm_page_t m, int off, int size) 5794{ 5795 pt2_entry_t *cmap2_pte2p; 5796 struct pcpu *pc; 5797 5798 sched_pin(); 5799 pc = get_pcpu(); 5800 cmap2_pte2p = pc->pc_cmap2_pte2p; 5801 mtx_lock(&pc->pc_cmap_lock); 5802 if (pte2_load(cmap2_pte2p) != 0) 5803 panic("%s: CMAP2 busy", __func__); 5804 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5805 vm_page_pte2_attr(m))); 5806 if (off == 0 && size == PAGE_SIZE) 5807 pagezero(pc->pc_cmap2_addr); 5808 else 5809 bzero(pc->pc_cmap2_addr + off, size); 5810 pte2_clear(cmap2_pte2p); 5811 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5812 sched_unpin(); 5813 mtx_unlock(&pc->pc_cmap_lock); 5814} 5815 5816/* 5817 * pmap_zero_page_idle zeros the specified hardware page by mapping 5818 * the page into KVM and using bzero to clear its contents. This 5819 * is intended to be called from the vm_pagezero process only and 5820 * outside of Giant. 5821 */ 5822void 5823pmap_zero_page_idle(vm_page_t m) 5824{ 5825 5826 if (pte2_load(CMAP3) != 0) 5827 panic("%s: CMAP3 busy", __func__); 5828 sched_pin(); 5829 pte2_store(CMAP3, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5830 vm_page_pte2_attr(m))); 5831 pagezero(CADDR3); 5832 pte2_clear(CMAP3); 5833 tlb_flush((vm_offset_t)CADDR3); 5834 sched_unpin(); 5835} 5836 5837/* 5838 * pmap_copy_page copies the specified (machine independent) 5839 * page by mapping the page into virtual memory and using 5840 * bcopy to copy the page, one machine dependent page at a 5841 * time. 5842 */ 5843void 5844pmap_copy_page(vm_page_t src, vm_page_t dst) 5845{ 5846 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5847 struct pcpu *pc; 5848 5849 sched_pin(); 5850 pc = get_pcpu(); 5851 cmap1_pte2p = pc->pc_cmap1_pte2p; 5852 cmap2_pte2p = pc->pc_cmap2_pte2p; 5853 mtx_lock(&pc->pc_cmap_lock); 5854 if (pte2_load(cmap1_pte2p) != 0) 5855 panic("%s: CMAP1 busy", __func__); 5856 if (pte2_load(cmap2_pte2p) != 0) 5857 panic("%s: CMAP2 busy", __func__); 5858 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(src), 5859 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(src))); 5860 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(dst), 5861 PTE2_AP_KRW, vm_page_pte2_attr(dst))); 5862 bcopy(pc->pc_cmap1_addr, pc->pc_cmap2_addr, PAGE_SIZE); 5863 pte2_clear(cmap1_pte2p); 5864 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5865 pte2_clear(cmap2_pte2p); 5866 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5867 sched_unpin(); 5868 mtx_unlock(&pc->pc_cmap_lock); 5869} 5870 5871int unmapped_buf_allowed = 1; 5872 5873void 5874pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 5875 vm_offset_t b_offset, int xfersize) 5876{ 5877 pt2_entry_t *cmap1_pte2p, *cmap2_pte2p; 5878 vm_page_t a_pg, b_pg; 5879 char *a_cp, *b_cp; 5880 vm_offset_t a_pg_offset, b_pg_offset; 5881 struct pcpu *pc; 5882 int cnt; 5883 5884 sched_pin(); 5885 pc = get_pcpu(); 5886 cmap1_pte2p = pc->pc_cmap1_pte2p; 5887 cmap2_pte2p = pc->pc_cmap2_pte2p; 5888 mtx_lock(&pc->pc_cmap_lock); 5889 if (pte2_load(cmap1_pte2p) != 0) 5890 panic("pmap_copy_pages: CMAP1 busy"); 5891 if (pte2_load(cmap2_pte2p) != 0) 5892 panic("pmap_copy_pages: CMAP2 busy"); 5893 while (xfersize > 0) { 5894 a_pg = ma[a_offset >> PAGE_SHIFT]; 5895 a_pg_offset = a_offset & PAGE_MASK; 5896 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 5897 b_pg = mb[b_offset >> PAGE_SHIFT]; 5898 b_pg_offset = b_offset & PAGE_MASK; 5899 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 5900 pte2_store(cmap1_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(a_pg), 5901 PTE2_AP_KR | PTE2_NM, vm_page_pte2_attr(a_pg))); 5902 tlb_flush_local((vm_offset_t)pc->pc_cmap1_addr); 5903 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(b_pg), 5904 PTE2_AP_KRW, vm_page_pte2_attr(b_pg))); 5905 tlb_flush_local((vm_offset_t)pc->pc_cmap2_addr); 5906 a_cp = pc->pc_cmap1_addr + a_pg_offset; 5907 b_cp = pc->pc_cmap2_addr + b_pg_offset; 5908 bcopy(a_cp, b_cp, cnt); 5909 a_offset += cnt; 5910 b_offset += cnt; 5911 xfersize -= cnt; 5912 } 5913 pte2_clear(cmap1_pte2p); 5914 tlb_flush((vm_offset_t)pc->pc_cmap1_addr); 5915 pte2_clear(cmap2_pte2p); 5916 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 5917 sched_unpin(); 5918 mtx_unlock(&pc->pc_cmap_lock); 5919} 5920 5921vm_offset_t 5922pmap_quick_enter_page(vm_page_t m) 5923{ 5924 struct pcpu *pc; 5925 pt2_entry_t *pte2p; 5926 5927 critical_enter(); 5928 pc = get_pcpu(); 5929 pte2p = pc->pc_qmap_pte2p; 5930 5931 KASSERT(pte2_load(pte2p) == 0, ("%s: PTE2 busy", __func__)); 5932 5933 pte2_store(pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 5934 vm_page_pte2_attr(m))); 5935 return (pc->pc_qmap_addr); 5936} 5937 5938void 5939pmap_quick_remove_page(vm_offset_t addr) 5940{ 5941 struct pcpu *pc; 5942 pt2_entry_t *pte2p; 5943 5944 pc = get_pcpu(); 5945 pte2p = pc->pc_qmap_pte2p; 5946 5947 KASSERT(addr == pc->pc_qmap_addr, ("%s: invalid address", __func__)); 5948 KASSERT(pte2_load(pte2p) != 0, ("%s: PTE2 not in use", __func__)); 5949 5950 pte2_clear(pte2p); 5951 tlb_flush(pc->pc_qmap_addr); 5952 critical_exit(); 5953} 5954 5955/* 5956 * Copy the range specified by src_addr/len 5957 * from the source map to the range dst_addr/len 5958 * in the destination map. 5959 * 5960 * This routine is only advisory and need not do anything. 5961 */ 5962void 5963pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 5964 vm_offset_t src_addr) 5965{ 5966 struct spglist free; 5967 vm_offset_t addr; 5968 vm_offset_t end_addr = src_addr + len; 5969 vm_offset_t nextva; 5970 5971 if (dst_addr != src_addr) 5972 return; 5973 5974 if (!pmap_is_current(src_pmap)) 5975 return; 5976 5977 rw_wlock(&pvh_global_lock); 5978 if (dst_pmap < src_pmap) { 5979 PMAP_LOCK(dst_pmap); 5980 PMAP_LOCK(src_pmap); 5981 } else { 5982 PMAP_LOCK(src_pmap); 5983 PMAP_LOCK(dst_pmap); 5984 } 5985 sched_pin(); 5986 for (addr = src_addr; addr < end_addr; addr = nextva) { 5987 pt2_entry_t *src_pte2p, *dst_pte2p; 5988 vm_page_t dst_mpt2pg, src_mpt2pg; 5989 pt1_entry_t src_pte1; 5990 u_int pte1_idx; 5991 5992 KASSERT(addr < VM_MAXUSER_ADDRESS, 5993 ("%s: invalid to pmap_copy page tables", __func__)); 5994 5995 nextva = pte1_trunc(addr + PTE1_SIZE); 5996 if (nextva < addr) 5997 nextva = end_addr; 5998 5999 pte1_idx = pte1_index(addr); 6000 src_pte1 = src_pmap->pm_pt1[pte1_idx]; 6001 if (pte1_is_section(src_pte1)) { 6002 if ((addr & PTE1_OFFSET) != 0 || 6003 (addr + PTE1_SIZE) > end_addr) 6004 continue; 6005 if (dst_pmap->pm_pt1[pte1_idx] == 0 && 6006 (!pte1_is_managed(src_pte1) || 6007 pmap_pv_insert_pte1(dst_pmap, addr, 6008 pte1_pa(src_pte1)))) { 6009 dst_pmap->pm_pt1[pte1_idx] = src_pte1 & 6010 ~PTE1_W; 6011 dst_pmap->pm_stats.resident_count += 6012 PTE1_SIZE / PAGE_SIZE; 6013 pmap_pte1_mappings++; 6014 } 6015 continue; 6016 } else if (!pte1_is_link(src_pte1)) 6017 continue; 6018 6019 src_mpt2pg = PHYS_TO_VM_PAGE(pte1_link_pa(src_pte1)); 6020 6021 /* 6022 * We leave PT2s to be linked from PT1 even if they are not 6023 * referenced until all PT2s in a page are without reference. 6024 * 6025 * QQQ: It could be changed ... 6026 */ 6027#if 0 /* single_pt2_link_is_cleared */ 6028 KASSERT(pt2_wirecount_get(src_mpt2pg, pte1_idx) > 0, 6029 ("%s: source page table page is unused", __func__)); 6030#else 6031 if (pt2_wirecount_get(src_mpt2pg, pte1_idx) == 0) 6032 continue; 6033#endif 6034 if (nextva > end_addr) 6035 nextva = end_addr; 6036 6037 src_pte2p = pt2map_entry(addr); 6038 while (addr < nextva) { 6039 pt2_entry_t temp_pte2; 6040 temp_pte2 = pte2_load(src_pte2p); 6041 /* 6042 * we only virtual copy managed pages 6043 */ 6044 if (pte2_is_managed(temp_pte2)) { 6045 dst_mpt2pg = pmap_allocpte2(dst_pmap, addr, 6046 PMAP_ENTER_NOSLEEP); 6047 if (dst_mpt2pg == NULL) 6048 goto out; 6049 dst_pte2p = pmap_pte2_quick(dst_pmap, addr); 6050 if (!pte2_is_valid(pte2_load(dst_pte2p)) && 6051 pmap_try_insert_pv_entry(dst_pmap, addr, 6052 PHYS_TO_VM_PAGE(pte2_pa(temp_pte2)))) { 6053 /* 6054 * Clear the wired, modified, and 6055 * accessed (referenced) bits 6056 * during the copy. 6057 */ 6058 temp_pte2 &= ~(PTE2_W | PTE2_A); 6059 temp_pte2 |= PTE2_NM; 6060 pte2_store(dst_pte2p, temp_pte2); 6061 dst_pmap->pm_stats.resident_count++; 6062 } else { 6063 SLIST_INIT(&free); 6064 if (pmap_unwire_pt2(dst_pmap, addr, 6065 dst_mpt2pg, &free)) { 6066 pmap_tlb_flush(dst_pmap, addr); 6067 pmap_free_zero_pages(&free); 6068 } 6069 goto out; 6070 } 6071 if (pt2_wirecount_get(dst_mpt2pg, pte1_idx) >= 6072 pt2_wirecount_get(src_mpt2pg, pte1_idx)) 6073 break; 6074 } 6075 addr += PAGE_SIZE; 6076 src_pte2p++; 6077 } 6078 } 6079out: 6080 sched_unpin(); 6081 rw_wunlock(&pvh_global_lock); 6082 PMAP_UNLOCK(src_pmap); 6083 PMAP_UNLOCK(dst_pmap); 6084} 6085 6086/* 6087 * Increase the starting virtual address of the given mapping if a 6088 * different alignment might result in more section mappings. 6089 */ 6090void 6091pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 6092 vm_offset_t *addr, vm_size_t size) 6093{ 6094 vm_offset_t pte1_offset; 6095 6096 if (size < PTE1_SIZE) 6097 return; 6098 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 6099 offset += ptoa(object->pg_color); 6100 pte1_offset = offset & PTE1_OFFSET; 6101 if (size - ((PTE1_SIZE - pte1_offset) & PTE1_OFFSET) < PTE1_SIZE || 6102 (*addr & PTE1_OFFSET) == pte1_offset) 6103 return; 6104 if ((*addr & PTE1_OFFSET) < pte1_offset) 6105 *addr = pte1_trunc(*addr) + pte1_offset; 6106 else 6107 *addr = pte1_roundup(*addr) + pte1_offset; 6108} 6109 6110void 6111pmap_activate(struct thread *td) 6112{ 6113 pmap_t pmap, oldpmap; 6114 u_int cpuid, ttb; 6115 6116 PDEBUG(9, printf("%s: td = %08x\n", __func__, (uint32_t)td)); 6117 6118 critical_enter(); 6119 pmap = vmspace_pmap(td->td_proc->p_vmspace); 6120 oldpmap = PCPU_GET(curpmap); 6121 cpuid = PCPU_GET(cpuid); 6122 6123#if defined(SMP) 6124 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 6125 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 6126#else 6127 CPU_CLR(cpuid, &oldpmap->pm_active); 6128 CPU_SET(cpuid, &pmap->pm_active); 6129#endif 6130 6131 ttb = pmap_ttb_get(pmap); 6132 6133 /* 6134 * pmap_activate is for the current thread on the current cpu 6135 */ 6136 td->td_pcb->pcb_pagedir = ttb; 6137 cp15_ttbr_set(ttb); 6138 PCPU_SET(curpmap, pmap); 6139 critical_exit(); 6140} 6141 6142/* 6143 * Perform the pmap work for mincore. 6144 */ 6145int 6146pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 6147{ 6148 pt1_entry_t *pte1p, pte1; 6149 pt2_entry_t *pte2p, pte2; 6150 vm_paddr_t pa; 6151 boolean_t managed; 6152 int val; 6153 6154 PMAP_LOCK(pmap); 6155retry: 6156 pte1p = pmap_pte1(pmap, addr); 6157 pte1 = pte1_load(pte1p); 6158 if (pte1_is_section(pte1)) { 6159 pa = trunc_page(pte1_pa(pte1) | (addr & PTE1_OFFSET)); 6160 managed = pte1_is_managed(pte1); 6161 val = MINCORE_SUPER | MINCORE_INCORE; 6162 if (pte1_is_dirty(pte1)) 6163 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6164 if (pte1 & PTE1_A) 6165 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6166 } else if (pte1_is_link(pte1)) { 6167 pte2p = pmap_pte2(pmap, addr); 6168 pte2 = pte2_load(pte2p); 6169 pmap_pte2_release(pte2p); 6170 pa = pte2_pa(pte2); 6171 managed = pte2_is_managed(pte2); 6172 val = MINCORE_INCORE; 6173 if (pte2_is_dirty(pte2)) 6174 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 6175 if (pte2 & PTE2_A) 6176 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 6177 } else { 6178 managed = FALSE; 6179 val = 0; 6180 } 6181 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 6182 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 6183 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 6184 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 6185 goto retry; 6186 } else 6187 PA_UNLOCK_COND(*locked_pa); 6188 PMAP_UNLOCK(pmap); 6189 return (val); 6190} 6191 6192void 6193pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) 6194{ 6195 vm_offset_t sva; 6196 uint32_t l2attr; 6197 6198 KASSERT((size & PAGE_MASK) == 0, 6199 ("%s: device mapping not page-sized", __func__)); 6200 6201 sva = va; 6202 l2attr = vm_memattr_to_pte2(VM_MEMATTR_DEVICE); 6203 while (size != 0) { 6204 pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, l2attr); 6205 va += PAGE_SIZE; 6206 pa += PAGE_SIZE; 6207 size -= PAGE_SIZE; 6208 } 6209 tlb_flush_range(sva, va - sva); 6210} 6211 6212void 6213pmap_kremove_device(vm_offset_t va, vm_size_t size) 6214{ 6215 vm_offset_t sva; 6216 6217 KASSERT((size & PAGE_MASK) == 0, 6218 ("%s: device mapping not page-sized", __func__)); 6219 6220 sva = va; 6221 while (size != 0) { 6222 pmap_kremove(va); 6223 va += PAGE_SIZE; 6224 size -= PAGE_SIZE; 6225 } 6226 tlb_flush_range(sva, va - sva); 6227} 6228 6229void 6230pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb) 6231{ 6232 6233 pcb->pcb_pagedir = pmap_ttb_get(pmap); 6234} 6235 6236 6237/* 6238 * Clean L1 data cache range by physical address. 6239 * The range must be within a single page. 6240 */ 6241static void 6242pmap_dcache_wb_pou(vm_paddr_t pa, vm_size_t size, uint32_t attr) 6243{ 6244 pt2_entry_t *cmap2_pte2p; 6245 struct pcpu *pc; 6246 6247 KASSERT(((pa & PAGE_MASK) + size) <= PAGE_SIZE, 6248 ("%s: not on single page", __func__)); 6249 6250 sched_pin(); 6251 pc = get_pcpu(); 6252 cmap2_pte2p = pc->pc_cmap2_pte2p; 6253 mtx_lock(&pc->pc_cmap_lock); 6254 if (pte2_load(cmap2_pte2p) != 0) 6255 panic("%s: CMAP2 busy", __func__); 6256 pte2_store(cmap2_pte2p, PTE2_KERN_NG(pa, PTE2_AP_KRW, attr)); 6257 dcache_wb_pou((vm_offset_t)pc->pc_cmap2_addr + (pa & PAGE_MASK), size); 6258 pte2_clear(cmap2_pte2p); 6259 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6260 sched_unpin(); 6261 mtx_unlock(&pc->pc_cmap_lock); 6262} 6263 6264/* 6265 * Sync instruction cache range which is not mapped yet. 6266 */ 6267void 6268cache_icache_sync_fresh(vm_offset_t va, vm_paddr_t pa, vm_size_t size) 6269{ 6270 uint32_t len, offset; 6271 vm_page_t m; 6272 6273 /* Write back d-cache on given address range. */ 6274 offset = pa & PAGE_MASK; 6275 for ( ; size != 0; size -= len, pa += len, offset = 0) { 6276 len = min(PAGE_SIZE - offset, size); 6277 m = PHYS_TO_VM_PAGE(pa); 6278 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6279 __func__, pa)); 6280 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6281 } 6282 /* 6283 * I-cache is VIPT. Only way how to flush all virtual mappings 6284 * on given physical address is to invalidate all i-cache. 6285 */ 6286 icache_inv_all(); 6287} 6288 6289void 6290pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t size) 6291{ 6292 6293 /* Write back d-cache on given address range. */ 6294 if (va >= VM_MIN_KERNEL_ADDRESS) { 6295 dcache_wb_pou(va, size); 6296 } else { 6297 uint32_t len, offset; 6298 vm_paddr_t pa; 6299 vm_page_t m; 6300 6301 offset = va & PAGE_MASK; 6302 for ( ; size != 0; size -= len, va += len, offset = 0) { 6303 pa = pmap_extract(pmap, va); /* offset is preserved */ 6304 len = min(PAGE_SIZE - offset, size); 6305 m = PHYS_TO_VM_PAGE(pa); 6306 KASSERT(m != NULL, ("%s: vm_page_t is null for %#x", 6307 __func__, pa)); 6308 pmap_dcache_wb_pou(pa, len, vm_page_pte2_attr(m)); 6309 } 6310 } 6311 /* 6312 * I-cache is VIPT. Only way how to flush all virtual mappings 6313 * on given physical address is to invalidate all i-cache. 6314 */ 6315 icache_inv_all(); 6316} 6317 6318/* 6319 * The implementation of pmap_fault() uses IN_RANGE2() macro which 6320 * depends on the fact that given range size is a power of 2. 6321 */ 6322CTASSERT(powerof2(NB_IN_PT1)); 6323CTASSERT(powerof2(PT2MAP_SIZE)); 6324 6325#define IN_RANGE2(addr, start, size) \ 6326 ((vm_offset_t)(start) == ((vm_offset_t)(addr) & ~((size) - 1))) 6327 6328/* 6329 * Handle access and R/W emulation faults. 6330 */ 6331int 6332pmap_fault(pmap_t pmap, vm_offset_t far, uint32_t fsr, int idx, bool usermode) 6333{ 6334 pt1_entry_t *pte1p, pte1; 6335 pt2_entry_t *pte2p, pte2; 6336 6337 if (pmap == NULL) 6338 pmap = kernel_pmap; 6339 6340 /* 6341 * In kernel, we should never get abort with FAR which is in range of 6342 * pmap->pm_pt1 or PT2MAP address spaces. If it happens, stop here 6343 * and print out a useful abort message and even get to the debugger 6344 * otherwise it likely ends with never ending loop of aborts. 6345 */ 6346 if (__predict_false(IN_RANGE2(far, pmap->pm_pt1, NB_IN_PT1))) { 6347 /* 6348 * All L1 tables should always be mapped and present. 6349 * However, we check only current one herein. For user mode, 6350 * only permission abort from malicious user is not fatal. 6351 * And alignment abort as it may have higher priority. 6352 */ 6353 if (!usermode || (idx != FAULT_ALIGN && idx != FAULT_PERM_L2)) { 6354 CTR4(KTR_PMAP, "%s: pmap %#x pm_pt1 %#x far %#x", 6355 __func__, pmap, pmap->pm_pt1, far); 6356 panic("%s: pm_pt1 abort", __func__); 6357 } 6358 return (KERN_INVALID_ADDRESS); 6359 } 6360 if (__predict_false(IN_RANGE2(far, PT2MAP, PT2MAP_SIZE))) { 6361 /* 6362 * PT2MAP should be always mapped and present in current 6363 * L1 table. However, only existing L2 tables are mapped 6364 * in PT2MAP. For user mode, only L2 translation abort and 6365 * permission abort from malicious user is not fatal. 6366 * And alignment abort as it may have higher priority. 6367 */ 6368 if (!usermode || (idx != FAULT_ALIGN && 6369 idx != FAULT_TRAN_L2 && idx != FAULT_PERM_L2)) { 6370 CTR4(KTR_PMAP, "%s: pmap %#x PT2MAP %#x far %#x", 6371 __func__, pmap, PT2MAP, far); 6372 panic("%s: PT2MAP abort", __func__); 6373 } 6374 return (KERN_INVALID_ADDRESS); 6375 } 6376 6377 /* 6378 * A pmap lock is used below for handling of access and R/W emulation 6379 * aborts. They were handled by atomic operations before so some 6380 * analysis of new situation is needed to answer the following question: 6381 * Is it safe to use the lock even for these aborts? 6382 * 6383 * There may happen two cases in general: 6384 * 6385 * (1) Aborts while the pmap lock is locked already - this should not 6386 * happen as pmap lock is not recursive. However, under pmap lock only 6387 * internal kernel data should be accessed and such data should be 6388 * mapped with A bit set and NM bit cleared. If double abort happens, 6389 * then a mapping of data which has caused it must be fixed. Further, 6390 * all new mappings are always made with A bit set and the bit can be 6391 * cleared only on managed mappings. 6392 * 6393 * (2) Aborts while another lock(s) is/are locked - this already can 6394 * happen. However, there is no difference here if it's either access or 6395 * R/W emulation abort, or if it's some other abort. 6396 */ 6397 6398 PMAP_LOCK(pmap); 6399#ifdef SMP 6400 /* 6401 * Special treatment is due to break-before-make approach done when 6402 * pte1 is updated for userland mapping during section promotion or 6403 * demotion. If not caught here, pmap_enter() can find a section 6404 * mapping on faulting address. That is not allowed. 6405 */ 6406 if (idx == FAULT_TRAN_L1 && usermode && cp15_ats1cur_check(far) == 0) { 6407 PMAP_UNLOCK(pmap); 6408 return (KERN_SUCCESS); 6409 } 6410#endif 6411 /* 6412 * Accesss bits for page and section. Note that the entry 6413 * is not in TLB yet, so TLB flush is not necessary. 6414 * 6415 * QQQ: This is hardware emulation, we do not call userret() 6416 * for aborts from user mode. 6417 */ 6418 if (idx == FAULT_ACCESS_L2) { 6419 pte2p = pt2map_entry(far); 6420 pte2 = pte2_load(pte2p); 6421 if (pte2_is_valid(pte2)) { 6422 pte2_store(pte2p, pte2 | PTE2_A); 6423 PMAP_UNLOCK(pmap); 6424 return (KERN_SUCCESS); 6425 } 6426 } 6427 if (idx == FAULT_ACCESS_L1) { 6428 pte1p = pmap_pte1(pmap, far); 6429 pte1 = pte1_load(pte1p); 6430 if (pte1_is_section(pte1)) { 6431 pte1_store(pte1p, pte1 | PTE1_A); 6432 PMAP_UNLOCK(pmap); 6433 return (KERN_SUCCESS); 6434 } 6435 } 6436 6437 /* 6438 * Handle modify bits for page and section. Note that the modify 6439 * bit is emulated by software. So PTEx_RO is software read only 6440 * bit and PTEx_NM flag is real hardware read only bit. 6441 * 6442 * QQQ: This is hardware emulation, we do not call userret() 6443 * for aborts from user mode. 6444 */ 6445 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L2)) { 6446 pte2p = pt2map_entry(far); 6447 pte2 = pte2_load(pte2p); 6448 if (pte2_is_valid(pte2) && !(pte2 & PTE2_RO) && 6449 (pte2 & PTE2_NM)) { 6450 pte2_store(pte2p, pte2 & ~PTE2_NM); 6451 tlb_flush(trunc_page(far)); 6452 PMAP_UNLOCK(pmap); 6453 return (KERN_SUCCESS); 6454 } 6455 } 6456 if ((fsr & FSR_WNR) && (idx == FAULT_PERM_L1)) { 6457 pte1p = pmap_pte1(pmap, far); 6458 pte1 = pte1_load(pte1p); 6459 if (pte1_is_section(pte1) && !(pte1 & PTE1_RO) && 6460 (pte1 & PTE1_NM)) { 6461 pte1_store(pte1p, pte1 & ~PTE1_NM); 6462 tlb_flush(pte1_trunc(far)); 6463 PMAP_UNLOCK(pmap); 6464 return (KERN_SUCCESS); 6465 } 6466 } 6467 6468 /* 6469 * QQQ: The previous code, mainly fast handling of access and 6470 * modify bits aborts, could be moved to ASM. Now we are 6471 * starting to deal with not fast aborts. 6472 */ 6473 6474#ifdef INVARIANTS 6475 /* 6476 * Read an entry in PT2TAB associated with both pmap and far. 6477 * It's safe because PT2TAB is always mapped. 6478 */ 6479 pte2 = pt2tab_load(pmap_pt2tab_entry(pmap, far)); 6480 if (pte2_is_valid(pte2)) { 6481 /* 6482 * Now, when we know that L2 page table is allocated, 6483 * we can use PT2MAP to get L2 page table entry. 6484 */ 6485 pte2 = pte2_load(pt2map_entry(far)); 6486 if (pte2_is_valid(pte2)) { 6487 /* 6488 * If L2 page table entry is valid, make sure that 6489 * L1 page table entry is valid too. Note that we 6490 * leave L2 page entries untouched when promoted. 6491 */ 6492 pte1 = pte1_load(pmap_pte1(pmap, far)); 6493 if (!pte1_is_valid(pte1)) { 6494 panic("%s: missing L1 page entry (%p, %#x)", 6495 __func__, pmap, far); 6496 } 6497 } 6498 } 6499#endif 6500 PMAP_UNLOCK(pmap); 6501 return (KERN_FAILURE); 6502} 6503 6504#if defined(PMAP_DEBUG) 6505/* 6506 * Reusing of KVA used in pmap_zero_page function !!! 6507 */ 6508static void 6509pmap_zero_page_check(vm_page_t m) 6510{ 6511 pt2_entry_t *cmap2_pte2p; 6512 uint32_t *p, *end; 6513 struct pcpu *pc; 6514 6515 sched_pin(); 6516 pc = get_pcpu(); 6517 cmap2_pte2p = pc->pc_cmap2_pte2p; 6518 mtx_lock(&pc->pc_cmap_lock); 6519 if (pte2_load(cmap2_pte2p) != 0) 6520 panic("%s: CMAP2 busy", __func__); 6521 pte2_store(cmap2_pte2p, PTE2_KERN_NG(VM_PAGE_TO_PHYS(m), PTE2_AP_KRW, 6522 vm_page_pte2_attr(m))); 6523 end = (uint32_t*)(pc->pc_cmap2_addr + PAGE_SIZE); 6524 for (p = (uint32_t*)pc->pc_cmap2_addr; p < end; p++) 6525 if (*p != 0) 6526 panic("%s: page %p not zero, va: %p", __func__, m, 6527 pc->pc_cmap2_addr); 6528 pte2_clear(cmap2_pte2p); 6529 tlb_flush((vm_offset_t)pc->pc_cmap2_addr); 6530 sched_unpin(); 6531 mtx_unlock(&pc->pc_cmap_lock); 6532} 6533 6534int 6535pmap_pid_dump(int pid) 6536{ 6537 pmap_t pmap; 6538 struct proc *p; 6539 int npte2 = 0; 6540 int i, j, index; 6541 6542 sx_slock(&allproc_lock); 6543 FOREACH_PROC_IN_SYSTEM(p) { 6544 if (p->p_pid != pid || p->p_vmspace == NULL) 6545 continue; 6546 index = 0; 6547 pmap = vmspace_pmap(p->p_vmspace); 6548 for (i = 0; i < NPTE1_IN_PT1; i++) { 6549 pt1_entry_t pte1; 6550 pt2_entry_t *pte2p, pte2; 6551 vm_offset_t base, va; 6552 vm_paddr_t pa; 6553 vm_page_t m; 6554 6555 base = i << PTE1_SHIFT; 6556 pte1 = pte1_load(&pmap->pm_pt1[i]); 6557 6558 if (pte1_is_section(pte1)) { 6559 /* 6560 * QQQ: Do something here! 6561 */ 6562 } else if (pte1_is_link(pte1)) { 6563 for (j = 0; j < NPTE2_IN_PT2; j++) { 6564 va = base + (j << PAGE_SHIFT); 6565 if (va >= VM_MIN_KERNEL_ADDRESS) { 6566 if (index) { 6567 index = 0; 6568 printf("\n"); 6569 } 6570 sx_sunlock(&allproc_lock); 6571 return (npte2); 6572 } 6573 pte2p = pmap_pte2(pmap, va); 6574 pte2 = pte2_load(pte2p); 6575 pmap_pte2_release(pte2p); 6576 if (!pte2_is_valid(pte2)) 6577 continue; 6578 6579 pa = pte2_pa(pte2); 6580 m = PHYS_TO_VM_PAGE(pa); 6581 printf("va: 0x%x, pa: 0x%x, h: %d, w:" 6582 " %d, f: 0x%x", va, pa, 6583 m->hold_count, m->wire_count, 6584 m->flags); 6585 npte2++; 6586 index++; 6587 if (index >= 2) { 6588 index = 0; 6589 printf("\n"); 6590 } else { 6591 printf(" "); 6592 } 6593 } 6594 } 6595 } 6596 } 6597 sx_sunlock(&allproc_lock); 6598 return (npte2); 6599} 6600 6601#endif 6602 6603#ifdef DDB 6604static pt2_entry_t * 6605pmap_pte2_ddb(pmap_t pmap, vm_offset_t va) 6606{ 6607 pt1_entry_t pte1; 6608 vm_paddr_t pt2pg_pa; 6609 6610 pte1 = pte1_load(pmap_pte1(pmap, va)); 6611 if (!pte1_is_link(pte1)) 6612 return (NULL); 6613 6614 if (pmap_is_current(pmap)) 6615 return (pt2map_entry(va)); 6616 6617 /* Note that L2 page table size is not equal to PAGE_SIZE. */ 6618 pt2pg_pa = trunc_page(pte1_link_pa(pte1)); 6619 if (pte2_pa(pte2_load(PMAP3)) != pt2pg_pa) { 6620 pte2_store(PMAP3, PTE2_KPT(pt2pg_pa)); 6621#ifdef SMP 6622 PMAP3cpu = PCPU_GET(cpuid); 6623#endif 6624 tlb_flush_local((vm_offset_t)PADDR3); 6625 } 6626#ifdef SMP 6627 else if (PMAP3cpu != PCPU_GET(cpuid)) { 6628 PMAP3cpu = PCPU_GET(cpuid); 6629 tlb_flush_local((vm_offset_t)PADDR3); 6630 } 6631#endif 6632 return (PADDR3 + (arm32_btop(va) & (NPTE2_IN_PG - 1))); 6633} 6634 6635static void 6636dump_pmap(pmap_t pmap) 6637{ 6638 6639 printf("pmap %p\n", pmap); 6640 printf(" pm_pt1: %p\n", pmap->pm_pt1); 6641 printf(" pm_pt2tab: %p\n", pmap->pm_pt2tab); 6642 printf(" pm_active: 0x%08lX\n", pmap->pm_active.__bits[0]); 6643} 6644 6645DB_SHOW_COMMAND(pmaps, pmap_list_pmaps) 6646{ 6647 6648 pmap_t pmap; 6649 LIST_FOREACH(pmap, &allpmaps, pm_list) { 6650 dump_pmap(pmap); 6651 } 6652} 6653 6654static int 6655pte2_class(pt2_entry_t pte2) 6656{ 6657 int cls; 6658 6659 cls = (pte2 >> 2) & 0x03; 6660 cls |= (pte2 >> 4) & 0x04; 6661 return (cls); 6662} 6663 6664static void 6665dump_section(pmap_t pmap, uint32_t pte1_idx) 6666{ 6667} 6668 6669static void 6670dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok) 6671{ 6672 uint32_t i; 6673 vm_offset_t va; 6674 pt2_entry_t *pte2p, pte2; 6675 vm_page_t m; 6676 6677 va = pte1_idx << PTE1_SHIFT; 6678 pte2p = pmap_pte2_ddb(pmap, va); 6679 for (i = 0; i < NPTE2_IN_PT2; i++, pte2p++, va += PAGE_SIZE) { 6680 pte2 = pte2_load(pte2p); 6681 if (pte2 == 0) 6682 continue; 6683 if (!pte2_is_valid(pte2)) { 6684 printf(" 0x%08X: 0x%08X", va, pte2); 6685 if (!invalid_ok) 6686 printf(" - not valid !!!"); 6687 printf("\n"); 6688 continue; 6689 } 6690 m = PHYS_TO_VM_PAGE(pte2_pa(pte2)); 6691 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, g:%d, m:%p", va , pte2, 6692 pte2_class(pte2), !!(pte2 & PTE2_S), !(pte2 & PTE2_NG), m); 6693 if (m != NULL) { 6694 printf(" v:%d h:%d w:%d f:0x%04X\n", m->valid, 6695 m->hold_count, m->wire_count, m->flags); 6696 } else { 6697 printf("\n"); 6698 } 6699 } 6700} 6701 6702static __inline boolean_t 6703is_pv_chunk_space(vm_offset_t va) 6704{ 6705 6706 if ((((vm_offset_t)pv_chunkbase) <= va) && 6707 (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks))) 6708 return (TRUE); 6709 return (FALSE); 6710} 6711 6712DB_SHOW_COMMAND(pmap, pmap_pmap_print) 6713{ 6714 /* XXX convert args. */ 6715 pmap_t pmap = (pmap_t)addr; 6716 pt1_entry_t pte1; 6717 pt2_entry_t pte2; 6718 vm_offset_t va, eva; 6719 vm_page_t m; 6720 uint32_t i; 6721 boolean_t invalid_ok, dump_link_ok, dump_pv_chunk; 6722 6723 if (have_addr) { 6724 pmap_t pm; 6725 6726 LIST_FOREACH(pm, &allpmaps, pm_list) 6727 if (pm == pmap) break; 6728 if (pm == NULL) { 6729 printf("given pmap %p is not in allpmaps list\n", pmap); 6730 return; 6731 } 6732 } else 6733 pmap = PCPU_GET(curpmap); 6734 6735 eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF; 6736 dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */ 6737 6738 printf("pmap: 0x%08X\n", (uint32_t)pmap); 6739 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6740 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6741 6742 for(i = 0; i < NPTE1_IN_PT1; i++) { 6743 pte1 = pte1_load(&pmap->pm_pt1[i]); 6744 if (pte1 == 0) 6745 continue; 6746 va = i << PTE1_SHIFT; 6747 if (va >= eva) 6748 break; 6749 6750 if (pte1_is_section(pte1)) { 6751 printf("0x%08X: Section 0x%08X, s:%d g:%d\n", va, pte1, 6752 !!(pte1 & PTE1_S), !(pte1 & PTE1_NG)); 6753 dump_section(pmap, i); 6754 } else if (pte1_is_link(pte1)) { 6755 dump_link_ok = TRUE; 6756 invalid_ok = FALSE; 6757 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6758 m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); 6759 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p", 6760 va, pte1, pte2, m); 6761 if (is_pv_chunk_space(va)) { 6762 printf(" - pv_chunk space"); 6763 if (dump_pv_chunk) 6764 invalid_ok = TRUE; 6765 else 6766 dump_link_ok = FALSE; 6767 } 6768 else if (m != NULL) 6769 printf(" w:%d w2:%u", m->wire_count, 6770 pt2_wirecount_get(m, pte1_index(va))); 6771 if (pte2 == 0) 6772 printf(" !!! pt2tab entry is ZERO"); 6773 else if (pte2_pa(pte1) != pte2_pa(pte2)) 6774 printf(" !!! pt2tab entry is DIFFERENT - m: %p", 6775 PHYS_TO_VM_PAGE(pte2_pa(pte2))); 6776 printf("\n"); 6777 if (dump_link_ok) 6778 dump_link(pmap, i, invalid_ok); 6779 } else 6780 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6781 } 6782} 6783 6784static void 6785dump_pt2tab(pmap_t pmap) 6786{ 6787 uint32_t i; 6788 pt2_entry_t pte2; 6789 vm_offset_t va; 6790 vm_paddr_t pa; 6791 vm_page_t m; 6792 6793 printf("PT2TAB:\n"); 6794 for (i = 0; i < PT2TAB_ENTRIES; i++) { 6795 pte2 = pte2_load(&pmap->pm_pt2tab[i]); 6796 if (!pte2_is_valid(pte2)) 6797 continue; 6798 va = i << PT2TAB_SHIFT; 6799 pa = pte2_pa(pte2); 6800 m = PHYS_TO_VM_PAGE(pa); 6801 printf(" 0x%08X: 0x%08X, TEX%d, s:%d, m:%p", va, pte2, 6802 pte2_class(pte2), !!(pte2 & PTE2_S), m); 6803 if (m != NULL) 6804 printf(" , h: %d, w: %d, f: 0x%04X pidx: %lld", 6805 m->hold_count, m->wire_count, m->flags, m->pindex); 6806 printf("\n"); 6807 } 6808} 6809 6810DB_SHOW_COMMAND(pmap_pt2tab, pmap_pt2tab_print) 6811{ 6812 /* XXX convert args. */ 6813 pmap_t pmap = (pmap_t)addr; 6814 pt1_entry_t pte1; 6815 pt2_entry_t pte2; 6816 vm_offset_t va; 6817 uint32_t i, start; 6818 6819 if (have_addr) { 6820 printf("supported only on current pmap\n"); 6821 return; 6822 } 6823 6824 pmap = PCPU_GET(curpmap); 6825 printf("curpmap: 0x%08X\n", (uint32_t)pmap); 6826 printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); 6827 printf("pt2tab: 0x%08X\n", (uint32_t)pmap->pm_pt2tab); 6828 6829 start = pte1_index((vm_offset_t)PT2MAP); 6830 for (i = start; i < (start + NPT2_IN_PT2TAB); i++) { 6831 pte1 = pte1_load(&pmap->pm_pt1[i]); 6832 if (pte1 == 0) 6833 continue; 6834 va = i << PTE1_SHIFT; 6835 if (pte1_is_section(pte1)) { 6836 printf("0x%08X: Section 0x%08X, s:%d\n", va, pte1, 6837 !!(pte1 & PTE1_S)); 6838 dump_section(pmap, i); 6839 } else if (pte1_is_link(pte1)) { 6840 pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); 6841 printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X\n", va, 6842 pte1, pte2); 6843 if (pte2 == 0) 6844 printf(" !!! pt2tab entry is ZERO\n"); 6845 } else 6846 printf("0x%08X: Invalid entry 0x%08X\n", va, pte1); 6847 } 6848 dump_pt2tab(pmap); 6849} 6850#endif 6851