pmap.c revision 324400
1/*- 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38 */ 39 40#include <sys/cdefs.h> 41__FBSDID("$FreeBSD: stable/11/sys/sparc64/sparc64/pmap.c 324400 2017-10-07 21:13:54Z alc $"); 42 43/* 44 * Manages physical address maps. 45 * 46 * Since the information managed by this module is also stored by the 47 * logical address mapping module, this module may throw away valid virtual 48 * to physical mappings at almost any time. However, invalidations of 49 * mappings must be done as requested. 50 * 51 * In order to cope with hardware architectures which make virtual to 52 * physical map invalidates expensive, this module may delay invalidate 53 * reduced protection operations until such time as they are actually 54 * necessary. This module is given full information as to which processors 55 * are currently using which maps, and to when physical maps must be made 56 * correct. 57 */ 58 59#include "opt_kstack_pages.h" 60#include "opt_pmap.h" 61 62#include <sys/param.h> 63#include <sys/kernel.h> 64#include <sys/ktr.h> 65#include <sys/lock.h> 66#include <sys/msgbuf.h> 67#include <sys/mutex.h> 68#include <sys/proc.h> 69#include <sys/rwlock.h> 70#include <sys/smp.h> 71#include <sys/sysctl.h> 72#include <sys/systm.h> 73#include <sys/vmmeter.h> 74 75#include <dev/ofw/openfirm.h> 76 77#include <vm/vm.h> 78#include <vm/vm_param.h> 79#include <vm/vm_kern.h> 80#include <vm/vm_page.h> 81#include <vm/vm_map.h> 82#include <vm/vm_object.h> 83#include <vm/vm_extern.h> 84#include <vm/vm_pageout.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_phys.h> 87 88#include <machine/cache.h> 89#include <machine/frame.h> 90#include <machine/instr.h> 91#include <machine/md_var.h> 92#include <machine/metadata.h> 93#include <machine/ofw_mem.h> 94#include <machine/smp.h> 95#include <machine/tlb.h> 96#include <machine/tte.h> 97#include <machine/tsb.h> 98#include <machine/ver.h> 99 100/* 101 * Virtual address of message buffer 102 */ 103struct msgbuf *msgbufp; 104 105/* 106 * Map of physical memory reagions 107 */ 108vm_paddr_t phys_avail[128]; 109static struct ofw_mem_region mra[128]; 110struct ofw_mem_region sparc64_memreg[128]; 111int sparc64_nmemreg; 112static struct ofw_map translations[128]; 113static int translations_size; 114 115static vm_offset_t pmap_idle_map; 116static vm_offset_t pmap_temp_map_1; 117static vm_offset_t pmap_temp_map_2; 118 119/* 120 * First and last available kernel virtual addresses 121 */ 122vm_offset_t virtual_avail; 123vm_offset_t virtual_end; 124vm_offset_t kernel_vm_end; 125 126vm_offset_t vm_max_kernel_address; 127 128/* 129 * Kernel pmap 130 */ 131struct pmap kernel_pmap_store; 132 133struct rwlock_padalign tte_list_global_lock; 134 135/* 136 * Allocate physical memory for use in pmap_bootstrap. 137 */ 138static vm_paddr_t pmap_bootstrap_alloc(vm_size_t size, uint32_t colors); 139 140static void pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data); 141static void pmap_cache_remove(vm_page_t m, vm_offset_t va); 142static int pmap_protect_tte(struct pmap *pm1, struct pmap *pm2, 143 struct tte *tp, vm_offset_t va); 144static int pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, 145 vm_offset_t va); 146static void pmap_init_qpages(void); 147 148/* 149 * Map the given physical page at the specified virtual address in the 150 * target pmap with the protection requested. If specified the page 151 * will be wired down. 152 * 153 * The page queues and pmap must be locked. 154 */ 155static int pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, 156 vm_prot_t prot, u_int flags, int8_t psind); 157 158extern int tl1_dmmu_miss_direct_patch_tsb_phys_1[]; 159extern int tl1_dmmu_miss_direct_patch_tsb_phys_end_1[]; 160extern int tl1_dmmu_miss_patch_asi_1[]; 161extern int tl1_dmmu_miss_patch_quad_ldd_1[]; 162extern int tl1_dmmu_miss_patch_tsb_1[]; 163extern int tl1_dmmu_miss_patch_tsb_2[]; 164extern int tl1_dmmu_miss_patch_tsb_mask_1[]; 165extern int tl1_dmmu_miss_patch_tsb_mask_2[]; 166extern int tl1_dmmu_prot_patch_asi_1[]; 167extern int tl1_dmmu_prot_patch_quad_ldd_1[]; 168extern int tl1_dmmu_prot_patch_tsb_1[]; 169extern int tl1_dmmu_prot_patch_tsb_2[]; 170extern int tl1_dmmu_prot_patch_tsb_mask_1[]; 171extern int tl1_dmmu_prot_patch_tsb_mask_2[]; 172extern int tl1_immu_miss_patch_asi_1[]; 173extern int tl1_immu_miss_patch_quad_ldd_1[]; 174extern int tl1_immu_miss_patch_tsb_1[]; 175extern int tl1_immu_miss_patch_tsb_2[]; 176extern int tl1_immu_miss_patch_tsb_mask_1[]; 177extern int tl1_immu_miss_patch_tsb_mask_2[]; 178 179/* 180 * If user pmap is processed with pmap_remove and with pmap_remove and the 181 * resident count drops to 0, there are no more pages to remove, so we 182 * need not continue. 183 */ 184#define PMAP_REMOVE_DONE(pm) \ 185 ((pm) != kernel_pmap && (pm)->pm_stats.resident_count == 0) 186 187/* 188 * The threshold (in bytes) above which tsb_foreach() is used in pmap_remove() 189 * and pmap_protect() instead of trying each virtual address. 190 */ 191#define PMAP_TSB_THRESH ((TSB_SIZE / 2) * PAGE_SIZE) 192 193SYSCTL_NODE(_debug, OID_AUTO, pmap_stats, CTLFLAG_RD, 0, ""); 194 195PMAP_STATS_VAR(pmap_nenter); 196PMAP_STATS_VAR(pmap_nenter_update); 197PMAP_STATS_VAR(pmap_nenter_replace); 198PMAP_STATS_VAR(pmap_nenter_new); 199PMAP_STATS_VAR(pmap_nkenter); 200PMAP_STATS_VAR(pmap_nkenter_oc); 201PMAP_STATS_VAR(pmap_nkenter_stupid); 202PMAP_STATS_VAR(pmap_nkremove); 203PMAP_STATS_VAR(pmap_nqenter); 204PMAP_STATS_VAR(pmap_nqremove); 205PMAP_STATS_VAR(pmap_ncache_enter); 206PMAP_STATS_VAR(pmap_ncache_enter_c); 207PMAP_STATS_VAR(pmap_ncache_enter_oc); 208PMAP_STATS_VAR(pmap_ncache_enter_cc); 209PMAP_STATS_VAR(pmap_ncache_enter_coc); 210PMAP_STATS_VAR(pmap_ncache_enter_nc); 211PMAP_STATS_VAR(pmap_ncache_enter_cnc); 212PMAP_STATS_VAR(pmap_ncache_remove); 213PMAP_STATS_VAR(pmap_ncache_remove_c); 214PMAP_STATS_VAR(pmap_ncache_remove_oc); 215PMAP_STATS_VAR(pmap_ncache_remove_cc); 216PMAP_STATS_VAR(pmap_ncache_remove_coc); 217PMAP_STATS_VAR(pmap_ncache_remove_nc); 218PMAP_STATS_VAR(pmap_nzero_page); 219PMAP_STATS_VAR(pmap_nzero_page_c); 220PMAP_STATS_VAR(pmap_nzero_page_oc); 221PMAP_STATS_VAR(pmap_nzero_page_nc); 222PMAP_STATS_VAR(pmap_nzero_page_area); 223PMAP_STATS_VAR(pmap_nzero_page_area_c); 224PMAP_STATS_VAR(pmap_nzero_page_area_oc); 225PMAP_STATS_VAR(pmap_nzero_page_area_nc); 226PMAP_STATS_VAR(pmap_nzero_page_idle); 227PMAP_STATS_VAR(pmap_nzero_page_idle_c); 228PMAP_STATS_VAR(pmap_nzero_page_idle_oc); 229PMAP_STATS_VAR(pmap_nzero_page_idle_nc); 230PMAP_STATS_VAR(pmap_ncopy_page); 231PMAP_STATS_VAR(pmap_ncopy_page_c); 232PMAP_STATS_VAR(pmap_ncopy_page_oc); 233PMAP_STATS_VAR(pmap_ncopy_page_nc); 234PMAP_STATS_VAR(pmap_ncopy_page_dc); 235PMAP_STATS_VAR(pmap_ncopy_page_doc); 236PMAP_STATS_VAR(pmap_ncopy_page_sc); 237PMAP_STATS_VAR(pmap_ncopy_page_soc); 238 239PMAP_STATS_VAR(pmap_nnew_thread); 240PMAP_STATS_VAR(pmap_nnew_thread_oc); 241 242static inline u_long dtlb_get_data(u_int tlb, u_int slot); 243 244/* 245 * Quick sort callout for comparing memory regions 246 */ 247static int mr_cmp(const void *a, const void *b); 248static int om_cmp(const void *a, const void *b); 249 250static int 251mr_cmp(const void *a, const void *b) 252{ 253 const struct ofw_mem_region *mra; 254 const struct ofw_mem_region *mrb; 255 256 mra = a; 257 mrb = b; 258 if (mra->mr_start < mrb->mr_start) 259 return (-1); 260 else if (mra->mr_start > mrb->mr_start) 261 return (1); 262 else 263 return (0); 264} 265 266static int 267om_cmp(const void *a, const void *b) 268{ 269 const struct ofw_map *oma; 270 const struct ofw_map *omb; 271 272 oma = a; 273 omb = b; 274 if (oma->om_start < omb->om_start) 275 return (-1); 276 else if (oma->om_start > omb->om_start) 277 return (1); 278 else 279 return (0); 280} 281 282static inline u_long 283dtlb_get_data(u_int tlb, u_int slot) 284{ 285 u_long data; 286 register_t s; 287 288 slot = TLB_DAR_SLOT(tlb, slot); 289 /* 290 * We read ASI_DTLB_DATA_ACCESS_REG twice back-to-back in order to 291 * work around errata of USIII and beyond. 292 */ 293 s = intr_disable(); 294 (void)ldxa(slot, ASI_DTLB_DATA_ACCESS_REG); 295 data = ldxa(slot, ASI_DTLB_DATA_ACCESS_REG); 296 intr_restore(s); 297 return (data); 298} 299 300/* 301 * Bootstrap the system enough to run with virtual memory. 302 */ 303void 304pmap_bootstrap(u_int cpu_impl) 305{ 306 struct pmap *pm; 307 struct tte *tp; 308 vm_offset_t off; 309 vm_offset_t va; 310 vm_paddr_t pa; 311 vm_size_t physsz; 312 vm_size_t virtsz; 313 u_long data; 314 u_long vpn; 315 phandle_t pmem; 316 phandle_t vmem; 317 u_int dtlb_slots_avail; 318 int i; 319 int j; 320 int sz; 321 uint32_t asi; 322 uint32_t colors; 323 uint32_t ldd; 324 325 /* 326 * Set the kernel context. 327 */ 328 pmap_set_kctx(); 329 330 colors = dcache_color_ignore != 0 ? 1 : DCACHE_COLORS; 331 332 /* 333 * Find out what physical memory is available from the PROM and 334 * initialize the phys_avail array. This must be done before 335 * pmap_bootstrap_alloc is called. 336 */ 337 if ((pmem = OF_finddevice("/memory")) == -1) 338 OF_panic("%s: finddevice /memory", __func__); 339 if ((sz = OF_getproplen(pmem, "available")) == -1) 340 OF_panic("%s: getproplen /memory/available", __func__); 341 if (sizeof(phys_avail) < sz) 342 OF_panic("%s: phys_avail too small", __func__); 343 if (sizeof(mra) < sz) 344 OF_panic("%s: mra too small", __func__); 345 bzero(mra, sz); 346 if (OF_getprop(pmem, "available", mra, sz) == -1) 347 OF_panic("%s: getprop /memory/available", __func__); 348 sz /= sizeof(*mra); 349#ifdef DIAGNOSTIC 350 OF_printf("pmap_bootstrap: physical memory\n"); 351#endif 352 qsort(mra, sz, sizeof (*mra), mr_cmp); 353 physsz = 0; 354 getenv_quad("hw.physmem", &physmem); 355 physmem = btoc(physmem); 356 for (i = 0, j = 0; i < sz; i++, j += 2) { 357#ifdef DIAGNOSTIC 358 OF_printf("start=%#lx size=%#lx\n", mra[i].mr_start, 359 mra[i].mr_size); 360#endif 361 if (physmem != 0 && btoc(physsz + mra[i].mr_size) >= physmem) { 362 if (btoc(physsz) < physmem) { 363 phys_avail[j] = mra[i].mr_start; 364 phys_avail[j + 1] = mra[i].mr_start + 365 (ctob(physmem) - physsz); 366 physsz = ctob(physmem); 367 } 368 break; 369 } 370 phys_avail[j] = mra[i].mr_start; 371 phys_avail[j + 1] = mra[i].mr_start + mra[i].mr_size; 372 physsz += mra[i].mr_size; 373 } 374 physmem = btoc(physsz); 375 376 /* 377 * Calculate the size of kernel virtual memory, and the size and mask 378 * for the kernel TSB based on the phsyical memory size but limited 379 * by the amount of dTLB slots available for locked entries if we have 380 * to lock the TSB in the TLB (given that for spitfire-class CPUs all 381 * of the dt64 slots can hold locked entries but there is no large 382 * dTLB for unlocked ones, we don't use more than half of it for the 383 * TSB). 384 * Note that for reasons unknown OpenSolaris doesn't take advantage of 385 * ASI_ATOMIC_QUAD_LDD_PHYS on UltraSPARC-III. However, given that no 386 * public documentation is available for these, the latter just might 387 * not support it, yet. 388 */ 389 if (cpu_impl == CPU_IMPL_SPARC64V || 390 cpu_impl >= CPU_IMPL_ULTRASPARCIIIp) { 391 tsb_kernel_ldd_phys = 1; 392 virtsz = roundup(5 / 3 * physsz, PAGE_SIZE_4M << 393 (PAGE_SHIFT - TTE_SHIFT)); 394 } else { 395 dtlb_slots_avail = 0; 396 for (i = 0; i < dtlb_slots; i++) { 397 data = dtlb_get_data(cpu_impl == 398 CPU_IMPL_ULTRASPARCIII ? TLB_DAR_T16 : 399 TLB_DAR_T32, i); 400 if ((data & (TD_V | TD_L)) != (TD_V | TD_L)) 401 dtlb_slots_avail++; 402 } 403#ifdef SMP 404 dtlb_slots_avail -= PCPU_PAGES; 405#endif 406 if (cpu_impl >= CPU_IMPL_ULTRASPARCI && 407 cpu_impl < CPU_IMPL_ULTRASPARCIII) 408 dtlb_slots_avail /= 2; 409 virtsz = roundup(physsz, PAGE_SIZE_4M << 410 (PAGE_SHIFT - TTE_SHIFT)); 411 virtsz = MIN(virtsz, (dtlb_slots_avail * PAGE_SIZE_4M) << 412 (PAGE_SHIFT - TTE_SHIFT)); 413 } 414 vm_max_kernel_address = VM_MIN_KERNEL_ADDRESS + virtsz; 415 tsb_kernel_size = virtsz >> (PAGE_SHIFT - TTE_SHIFT); 416 tsb_kernel_mask = (tsb_kernel_size >> TTE_SHIFT) - 1; 417 418 /* 419 * Allocate the kernel TSB and lock it in the TLB if necessary. 420 */ 421 pa = pmap_bootstrap_alloc(tsb_kernel_size, colors); 422 if (pa & PAGE_MASK_4M) 423 OF_panic("%s: TSB unaligned", __func__); 424 tsb_kernel_phys = pa; 425 if (tsb_kernel_ldd_phys == 0) { 426 tsb_kernel = 427 (struct tte *)(VM_MIN_KERNEL_ADDRESS - tsb_kernel_size); 428 pmap_map_tsb(); 429 bzero(tsb_kernel, tsb_kernel_size); 430 } else { 431 tsb_kernel = 432 (struct tte *)TLB_PHYS_TO_DIRECT(tsb_kernel_phys); 433 aszero(ASI_PHYS_USE_EC, tsb_kernel_phys, tsb_kernel_size); 434 } 435 436 /* 437 * Allocate and map the dynamic per-CPU area for the BSP. 438 */ 439 pa = pmap_bootstrap_alloc(DPCPU_SIZE, colors); 440 dpcpu0 = (void *)TLB_PHYS_TO_DIRECT(pa); 441 442 /* 443 * Allocate and map the message buffer. 444 */ 445 pa = pmap_bootstrap_alloc(msgbufsize, colors); 446 msgbufp = (struct msgbuf *)TLB_PHYS_TO_DIRECT(pa); 447 448 /* 449 * Patch the TSB addresses and mask as well as the ASIs used to load 450 * it into the trap table. 451 */ 452 453#define LDDA_R_I_R(rd, imm_asi, rs1, rs2) \ 454 (EIF_OP(IOP_LDST) | EIF_F3_RD(rd) | EIF_F3_OP3(INS3_LDDA) | \ 455 EIF_F3_RS1(rs1) | EIF_F3_I(0) | EIF_F3_IMM_ASI(imm_asi) | \ 456 EIF_F3_RS2(rs2)) 457#define OR_R_I_R(rd, imm13, rs1) \ 458 (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_OR) | \ 459 EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13)) 460#define SETHI(rd, imm22) \ 461 (EIF_OP(IOP_FORM2) | EIF_F2_RD(rd) | EIF_F2_OP2(INS0_SETHI) | \ 462 EIF_IMM((imm22) >> 10, 22)) 463#define WR_R_I(rd, imm13, rs1) \ 464 (EIF_OP(IOP_MISC) | EIF_F3_RD(rd) | EIF_F3_OP3(INS2_WR) | \ 465 EIF_F3_RS1(rs1) | EIF_F3_I(1) | EIF_IMM(imm13, 13)) 466 467#define PATCH_ASI(addr, asi) do { \ 468 if (addr[0] != WR_R_I(IF_F3_RD(addr[0]), 0x0, \ 469 IF_F3_RS1(addr[0]))) \ 470 OF_panic("%s: patched instructions have changed", \ 471 __func__); \ 472 addr[0] |= EIF_IMM((asi), 13); \ 473 flush(addr); \ 474} while (0) 475 476#define PATCH_LDD(addr, asi) do { \ 477 if (addr[0] != LDDA_R_I_R(IF_F3_RD(addr[0]), 0x0, \ 478 IF_F3_RS1(addr[0]), IF_F3_RS2(addr[0]))) \ 479 OF_panic("%s: patched instructions have changed", \ 480 __func__); \ 481 addr[0] |= EIF_F3_IMM_ASI(asi); \ 482 flush(addr); \ 483} while (0) 484 485#define PATCH_TSB(addr, val) do { \ 486 if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \ 487 addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \ 488 IF_F3_RS1(addr[1])) || \ 489 addr[3] != SETHI(IF_F2_RD(addr[3]), 0x0)) \ 490 OF_panic("%s: patched instructions have changed", \ 491 __func__); \ 492 addr[0] |= EIF_IMM((val) >> 42, 22); \ 493 addr[1] |= EIF_IMM((val) >> 32, 10); \ 494 addr[3] |= EIF_IMM((val) >> 10, 22); \ 495 flush(addr); \ 496 flush(addr + 1); \ 497 flush(addr + 3); \ 498} while (0) 499 500#define PATCH_TSB_MASK(addr, val) do { \ 501 if (addr[0] != SETHI(IF_F2_RD(addr[0]), 0x0) || \ 502 addr[1] != OR_R_I_R(IF_F3_RD(addr[1]), 0x0, \ 503 IF_F3_RS1(addr[1]))) \ 504 OF_panic("%s: patched instructions have changed", \ 505 __func__); \ 506 addr[0] |= EIF_IMM((val) >> 10, 22); \ 507 addr[1] |= EIF_IMM((val), 10); \ 508 flush(addr); \ 509 flush(addr + 1); \ 510} while (0) 511 512 if (tsb_kernel_ldd_phys == 0) { 513 asi = ASI_N; 514 ldd = ASI_NUCLEUS_QUAD_LDD; 515 off = (vm_offset_t)tsb_kernel; 516 } else { 517 asi = ASI_PHYS_USE_EC; 518 ldd = ASI_ATOMIC_QUAD_LDD_PHYS; 519 off = (vm_offset_t)tsb_kernel_phys; 520 } 521 PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_1, tsb_kernel_phys); 522 PATCH_TSB(tl1_dmmu_miss_direct_patch_tsb_phys_end_1, 523 tsb_kernel_phys + tsb_kernel_size - 1); 524 PATCH_ASI(tl1_dmmu_miss_patch_asi_1, asi); 525 PATCH_LDD(tl1_dmmu_miss_patch_quad_ldd_1, ldd); 526 PATCH_TSB(tl1_dmmu_miss_patch_tsb_1, off); 527 PATCH_TSB(tl1_dmmu_miss_patch_tsb_2, off); 528 PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_1, tsb_kernel_mask); 529 PATCH_TSB_MASK(tl1_dmmu_miss_patch_tsb_mask_2, tsb_kernel_mask); 530 PATCH_ASI(tl1_dmmu_prot_patch_asi_1, asi); 531 PATCH_LDD(tl1_dmmu_prot_patch_quad_ldd_1, ldd); 532 PATCH_TSB(tl1_dmmu_prot_patch_tsb_1, off); 533 PATCH_TSB(tl1_dmmu_prot_patch_tsb_2, off); 534 PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_1, tsb_kernel_mask); 535 PATCH_TSB_MASK(tl1_dmmu_prot_patch_tsb_mask_2, tsb_kernel_mask); 536 PATCH_ASI(tl1_immu_miss_patch_asi_1, asi); 537 PATCH_LDD(tl1_immu_miss_patch_quad_ldd_1, ldd); 538 PATCH_TSB(tl1_immu_miss_patch_tsb_1, off); 539 PATCH_TSB(tl1_immu_miss_patch_tsb_2, off); 540 PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_1, tsb_kernel_mask); 541 PATCH_TSB_MASK(tl1_immu_miss_patch_tsb_mask_2, tsb_kernel_mask); 542 543 /* 544 * Enter fake 8k pages for the 4MB kernel pages, so that 545 * pmap_kextract() will work for them. 546 */ 547 for (i = 0; i < kernel_tlb_slots; i++) { 548 pa = kernel_tlbs[i].te_pa; 549 va = kernel_tlbs[i].te_va; 550 for (off = 0; off < PAGE_SIZE_4M; off += PAGE_SIZE) { 551 tp = tsb_kvtotte(va + off); 552 vpn = TV_VPN(va + off, TS_8K); 553 data = TD_V | TD_8K | TD_PA(pa + off) | TD_REF | 554 TD_SW | TD_CP | TD_CV | TD_P | TD_W; 555 pmap_bootstrap_set_tte(tp, vpn, data); 556 } 557 } 558 559 /* 560 * Set the start and end of KVA. The kernel is loaded starting 561 * at the first available 4MB super page, so we advance to the 562 * end of the last one used for it. 563 */ 564 virtual_avail = KERNBASE + kernel_tlb_slots * PAGE_SIZE_4M; 565 virtual_end = vm_max_kernel_address; 566 kernel_vm_end = vm_max_kernel_address; 567 568 /* 569 * Allocate kva space for temporary mappings. 570 */ 571 pmap_idle_map = virtual_avail; 572 virtual_avail += PAGE_SIZE * colors; 573 pmap_temp_map_1 = virtual_avail; 574 virtual_avail += PAGE_SIZE * colors; 575 pmap_temp_map_2 = virtual_avail; 576 virtual_avail += PAGE_SIZE * colors; 577 578 /* 579 * Allocate a kernel stack with guard page for thread0 and map it 580 * into the kernel TSB. We must ensure that the virtual address is 581 * colored properly for corresponding CPUs, since we're allocating 582 * from phys_avail so the memory won't have an associated vm_page_t. 583 */ 584 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, colors); 585 kstack0_phys = pa; 586 virtual_avail += roundup(KSTACK_GUARD_PAGES, colors) * PAGE_SIZE; 587 kstack0 = virtual_avail; 588 virtual_avail += roundup(KSTACK_PAGES, colors) * PAGE_SIZE; 589 if (dcache_color_ignore == 0) 590 KASSERT(DCACHE_COLOR(kstack0) == DCACHE_COLOR(kstack0_phys), 591 ("pmap_bootstrap: kstack0 miscolored")); 592 for (i = 0; i < KSTACK_PAGES; i++) { 593 pa = kstack0_phys + i * PAGE_SIZE; 594 va = kstack0 + i * PAGE_SIZE; 595 tp = tsb_kvtotte(va); 596 vpn = TV_VPN(va, TS_8K); 597 data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_SW | TD_CP | 598 TD_CV | TD_P | TD_W; 599 pmap_bootstrap_set_tte(tp, vpn, data); 600 } 601 602 /* 603 * Calculate the last available physical address. 604 */ 605 for (i = 0; phys_avail[i + 2] != 0; i += 2) 606 ; 607 Maxmem = sparc64_btop(phys_avail[i + 1]); 608 609 /* 610 * Add the PROM mappings to the kernel TSB. 611 */ 612 if ((vmem = OF_finddevice("/virtual-memory")) == -1) 613 OF_panic("%s: finddevice /virtual-memory", __func__); 614 if ((sz = OF_getproplen(vmem, "translations")) == -1) 615 OF_panic("%s: getproplen translations", __func__); 616 if (sizeof(translations) < sz) 617 OF_panic("%s: translations too small", __func__); 618 bzero(translations, sz); 619 if (OF_getprop(vmem, "translations", translations, sz) == -1) 620 OF_panic("%s: getprop /virtual-memory/translations", 621 __func__); 622 sz /= sizeof(*translations); 623 translations_size = sz; 624#ifdef DIAGNOSTIC 625 OF_printf("pmap_bootstrap: translations\n"); 626#endif 627 qsort(translations, sz, sizeof (*translations), om_cmp); 628 for (i = 0; i < sz; i++) { 629#ifdef DIAGNOSTIC 630 OF_printf("translation: start=%#lx size=%#lx tte=%#lx\n", 631 translations[i].om_start, translations[i].om_size, 632 translations[i].om_tte); 633#endif 634 if ((translations[i].om_tte & TD_V) == 0) 635 continue; 636 if (translations[i].om_start < VM_MIN_PROM_ADDRESS || 637 translations[i].om_start > VM_MAX_PROM_ADDRESS) 638 continue; 639 for (off = 0; off < translations[i].om_size; 640 off += PAGE_SIZE) { 641 va = translations[i].om_start + off; 642 tp = tsb_kvtotte(va); 643 vpn = TV_VPN(va, TS_8K); 644 data = ((translations[i].om_tte & 645 ~((TD_SOFT2_MASK << TD_SOFT2_SHIFT) | 646 (cpu_impl >= CPU_IMPL_ULTRASPARCI && 647 cpu_impl < CPU_IMPL_ULTRASPARCIII ? 648 (TD_DIAG_SF_MASK << TD_DIAG_SF_SHIFT) : 649 (TD_RSVD_CH_MASK << TD_RSVD_CH_SHIFT)) | 650 (TD_SOFT_MASK << TD_SOFT_SHIFT))) | TD_EXEC) + 651 off; 652 pmap_bootstrap_set_tte(tp, vpn, data); 653 } 654 } 655 656 /* 657 * Get the available physical memory ranges from /memory/reg. These 658 * are only used for kernel dumps, but it may not be wise to do PROM 659 * calls in that situation. 660 */ 661 if ((sz = OF_getproplen(pmem, "reg")) == -1) 662 OF_panic("%s: getproplen /memory/reg", __func__); 663 if (sizeof(sparc64_memreg) < sz) 664 OF_panic("%s: sparc64_memreg too small", __func__); 665 if (OF_getprop(pmem, "reg", sparc64_memreg, sz) == -1) 666 OF_panic("%s: getprop /memory/reg", __func__); 667 sparc64_nmemreg = sz / sizeof(*sparc64_memreg); 668 669 /* 670 * Initialize the kernel pmap (which is statically allocated). 671 */ 672 pm = kernel_pmap; 673 PMAP_LOCK_INIT(pm); 674 for (i = 0; i < MAXCPU; i++) 675 pm->pm_context[i] = TLB_CTX_KERNEL; 676 CPU_FILL(&pm->pm_active); 677 678 /* 679 * Initialize the global tte list lock, which is more commonly 680 * known as the pmap pv global lock. 681 */ 682 rw_init(&tte_list_global_lock, "pmap pv global"); 683 684 /* 685 * Flush all non-locked TLB entries possibly left over by the 686 * firmware. 687 */ 688 tlb_flush_nonlocked(); 689} 690 691static void 692pmap_init_qpages(void) 693{ 694 struct pcpu *pc; 695 int i; 696 697 if (dcache_color_ignore != 0) 698 return; 699 700 CPU_FOREACH(i) { 701 pc = pcpu_find(i); 702 pc->pc_qmap_addr = kva_alloc(PAGE_SIZE * DCACHE_COLORS); 703 if (pc->pc_qmap_addr == 0) 704 panic("pmap_init_qpages: unable to allocate KVA"); 705 } 706} 707 708SYSINIT(qpages_init, SI_SUB_CPU, SI_ORDER_ANY, pmap_init_qpages, NULL); 709 710/* 711 * Map the 4MB kernel TSB pages. 712 */ 713void 714pmap_map_tsb(void) 715{ 716 vm_offset_t va; 717 vm_paddr_t pa; 718 u_long data; 719 int i; 720 721 for (i = 0; i < tsb_kernel_size; i += PAGE_SIZE_4M) { 722 va = (vm_offset_t)tsb_kernel + i; 723 pa = tsb_kernel_phys + i; 724 data = TD_V | TD_4M | TD_PA(pa) | TD_L | TD_CP | TD_CV | 725 TD_P | TD_W; 726 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | 727 TLB_TAR_CTX(TLB_CTX_KERNEL)); 728 stxa_sync(0, ASI_DTLB_DATA_IN_REG, data); 729 } 730} 731 732/* 733 * Set the secondary context to be the kernel context (needed for FP block 734 * operations in the kernel). 735 */ 736void 737pmap_set_kctx(void) 738{ 739 740 stxa(AA_DMMU_SCXR, ASI_DMMU, (ldxa(AA_DMMU_SCXR, ASI_DMMU) & 741 TLB_CXR_PGSZ_MASK) | TLB_CTX_KERNEL); 742 flush(KERNBASE); 743} 744 745/* 746 * Allocate a physical page of memory directly from the phys_avail map. 747 * Can only be called from pmap_bootstrap before avail start and end are 748 * calculated. 749 */ 750static vm_paddr_t 751pmap_bootstrap_alloc(vm_size_t size, uint32_t colors) 752{ 753 vm_paddr_t pa; 754 int i; 755 756 size = roundup(size, PAGE_SIZE * colors); 757 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 758 if (phys_avail[i + 1] - phys_avail[i] < size) 759 continue; 760 pa = phys_avail[i]; 761 phys_avail[i] += size; 762 return (pa); 763 } 764 OF_panic("%s: no suitable region found", __func__); 765} 766 767/* 768 * Set a TTE. This function is intended as a helper when tsb_kernel is 769 * direct-mapped but we haven't taken over the trap table, yet, as it's the 770 * case when we are taking advantage of ASI_ATOMIC_QUAD_LDD_PHYS to access 771 * the kernel TSB. 772 */ 773void 774pmap_bootstrap_set_tte(struct tte *tp, u_long vpn, u_long data) 775{ 776 777 if (tsb_kernel_ldd_phys == 0) { 778 tp->tte_vpn = vpn; 779 tp->tte_data = data; 780 } else { 781 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_vpn), 782 ASI_PHYS_USE_EC, vpn); 783 stxa((vm_paddr_t)tp + offsetof(struct tte, tte_data), 784 ASI_PHYS_USE_EC, data); 785 } 786} 787 788/* 789 * Initialize a vm_page's machine-dependent fields. 790 */ 791void 792pmap_page_init(vm_page_t m) 793{ 794 795 TAILQ_INIT(&m->md.tte_list); 796 m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m)); 797 m->md.pmap = NULL; 798} 799 800/* 801 * Initialize the pmap module. 802 */ 803void 804pmap_init(void) 805{ 806 vm_offset_t addr; 807 vm_size_t size; 808 int result; 809 int i; 810 811 for (i = 0; i < translations_size; i++) { 812 addr = translations[i].om_start; 813 size = translations[i].om_size; 814 if ((translations[i].om_tte & TD_V) == 0) 815 continue; 816 if (addr < VM_MIN_PROM_ADDRESS || addr > VM_MAX_PROM_ADDRESS) 817 continue; 818 result = vm_map_find(kernel_map, NULL, 0, &addr, size, 0, 819 VMFS_NO_SPACE, VM_PROT_ALL, VM_PROT_ALL, MAP_NOFAULT); 820 if (result != KERN_SUCCESS || addr != translations[i].om_start) 821 panic("pmap_init: vm_map_find"); 822 } 823} 824 825/* 826 * Extract the physical page address associated with the given 827 * map/virtual_address pair. 828 */ 829vm_paddr_t 830pmap_extract(pmap_t pm, vm_offset_t va) 831{ 832 struct tte *tp; 833 vm_paddr_t pa; 834 835 if (pm == kernel_pmap) 836 return (pmap_kextract(va)); 837 PMAP_LOCK(pm); 838 tp = tsb_tte_lookup(pm, va); 839 if (tp == NULL) 840 pa = 0; 841 else 842 pa = TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp)); 843 PMAP_UNLOCK(pm); 844 return (pa); 845} 846 847/* 848 * Atomically extract and hold the physical page with the given 849 * pmap and virtual address pair if that mapping permits the given 850 * protection. 851 */ 852vm_page_t 853pmap_extract_and_hold(pmap_t pm, vm_offset_t va, vm_prot_t prot) 854{ 855 struct tte *tp; 856 vm_page_t m; 857 vm_paddr_t pa; 858 859 m = NULL; 860 pa = 0; 861 PMAP_LOCK(pm); 862retry: 863 if (pm == kernel_pmap) { 864 if (va >= VM_MIN_DIRECT_ADDRESS) { 865 tp = NULL; 866 m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS(va)); 867 (void)vm_page_pa_tryrelock(pm, TLB_DIRECT_TO_PHYS(va), 868 &pa); 869 vm_page_hold(m); 870 } else { 871 tp = tsb_kvtotte(va); 872 if ((tp->tte_data & TD_V) == 0) 873 tp = NULL; 874 } 875 } else 876 tp = tsb_tte_lookup(pm, va); 877 if (tp != NULL && ((tp->tte_data & TD_SW) || 878 (prot & VM_PROT_WRITE) == 0)) { 879 if (vm_page_pa_tryrelock(pm, TTE_GET_PA(tp), &pa)) 880 goto retry; 881 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 882 vm_page_hold(m); 883 } 884 PA_UNLOCK_COND(pa); 885 PMAP_UNLOCK(pm); 886 return (m); 887} 888 889/* 890 * Extract the physical page address associated with the given kernel virtual 891 * address. 892 */ 893vm_paddr_t 894pmap_kextract(vm_offset_t va) 895{ 896 struct tte *tp; 897 898 if (va >= VM_MIN_DIRECT_ADDRESS) 899 return (TLB_DIRECT_TO_PHYS(va)); 900 tp = tsb_kvtotte(va); 901 if ((tp->tte_data & TD_V) == 0) 902 return (0); 903 return (TTE_GET_PA(tp) | (va & TTE_GET_PAGE_MASK(tp))); 904} 905 906int 907pmap_cache_enter(vm_page_t m, vm_offset_t va) 908{ 909 struct tte *tp; 910 int color; 911 912 rw_assert(&tte_list_global_lock, RA_WLOCKED); 913 KASSERT((m->flags & PG_FICTITIOUS) == 0, 914 ("pmap_cache_enter: fake page")); 915 PMAP_STATS_INC(pmap_ncache_enter); 916 917 if (dcache_color_ignore != 0) 918 return (1); 919 920 /* 921 * Find the color for this virtual address and note the added mapping. 922 */ 923 color = DCACHE_COLOR(va); 924 m->md.colors[color]++; 925 926 /* 927 * If all existing mappings have the same color, the mapping is 928 * cacheable. 929 */ 930 if (m->md.color == color) { 931 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] == 0, 932 ("pmap_cache_enter: cacheable, mappings of other color")); 933 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 934 PMAP_STATS_INC(pmap_ncache_enter_c); 935 else 936 PMAP_STATS_INC(pmap_ncache_enter_oc); 937 return (1); 938 } 939 940 /* 941 * If there are no mappings of the other color, and the page still has 942 * the wrong color, this must be a new mapping. Change the color to 943 * match the new mapping, which is cacheable. We must flush the page 944 * from the cache now. 945 */ 946 if (m->md.colors[DCACHE_OTHER_COLOR(color)] == 0) { 947 KASSERT(m->md.colors[color] == 1, 948 ("pmap_cache_enter: changing color, not new mapping")); 949 dcache_page_inval(VM_PAGE_TO_PHYS(m)); 950 m->md.color = color; 951 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 952 PMAP_STATS_INC(pmap_ncache_enter_cc); 953 else 954 PMAP_STATS_INC(pmap_ncache_enter_coc); 955 return (1); 956 } 957 958 /* 959 * If the mapping is already non-cacheable, just return. 960 */ 961 if (m->md.color == -1) { 962 PMAP_STATS_INC(pmap_ncache_enter_nc); 963 return (0); 964 } 965 966 PMAP_STATS_INC(pmap_ncache_enter_cnc); 967 968 /* 969 * Mark all mappings as uncacheable, flush any lines with the other 970 * color out of the dcache, and set the color to none (-1). 971 */ 972 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 973 atomic_clear_long(&tp->tte_data, TD_CV); 974 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 975 } 976 dcache_page_inval(VM_PAGE_TO_PHYS(m)); 977 m->md.color = -1; 978 return (0); 979} 980 981static void 982pmap_cache_remove(vm_page_t m, vm_offset_t va) 983{ 984 struct tte *tp; 985 int color; 986 987 rw_assert(&tte_list_global_lock, RA_WLOCKED); 988 CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, 989 m->md.colors[DCACHE_COLOR(va)]); 990 KASSERT((m->flags & PG_FICTITIOUS) == 0, 991 ("pmap_cache_remove: fake page")); 992 PMAP_STATS_INC(pmap_ncache_remove); 993 994 if (dcache_color_ignore != 0) 995 return; 996 997 KASSERT(m->md.colors[DCACHE_COLOR(va)] > 0, 998 ("pmap_cache_remove: no mappings %d <= 0", 999 m->md.colors[DCACHE_COLOR(va)])); 1000 1001 /* 1002 * Find the color for this virtual address and note the removal of 1003 * the mapping. 1004 */ 1005 color = DCACHE_COLOR(va); 1006 m->md.colors[color]--; 1007 1008 /* 1009 * If the page is cacheable, just return and keep the same color, even 1010 * if there are no longer any mappings. 1011 */ 1012 if (m->md.color != -1) { 1013 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 1014 PMAP_STATS_INC(pmap_ncache_remove_c); 1015 else 1016 PMAP_STATS_INC(pmap_ncache_remove_oc); 1017 return; 1018 } 1019 1020 KASSERT(m->md.colors[DCACHE_OTHER_COLOR(color)] != 0, 1021 ("pmap_cache_remove: uncacheable, no mappings of other color")); 1022 1023 /* 1024 * If the page is not cacheable (color is -1), and the number of 1025 * mappings for this color is not zero, just return. There are 1026 * mappings of the other color still, so remain non-cacheable. 1027 */ 1028 if (m->md.colors[color] != 0) { 1029 PMAP_STATS_INC(pmap_ncache_remove_nc); 1030 return; 1031 } 1032 1033 /* 1034 * The number of mappings for this color is now zero. Recache the 1035 * other colored mappings, and change the page color to the other 1036 * color. There should be no lines in the data cache for this page, 1037 * so flushing should not be needed. 1038 */ 1039 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 1040 atomic_set_long(&tp->tte_data, TD_CV); 1041 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 1042 } 1043 m->md.color = DCACHE_OTHER_COLOR(color); 1044 1045 if (m->md.color == DCACHE_COLOR(VM_PAGE_TO_PHYS(m))) 1046 PMAP_STATS_INC(pmap_ncache_remove_cc); 1047 else 1048 PMAP_STATS_INC(pmap_ncache_remove_coc); 1049} 1050 1051/* 1052 * Map a wired page into kernel virtual address space. 1053 */ 1054void 1055pmap_kenter(vm_offset_t va, vm_page_t m) 1056{ 1057 vm_offset_t ova; 1058 struct tte *tp; 1059 vm_page_t om; 1060 u_long data; 1061 1062 rw_assert(&tte_list_global_lock, RA_WLOCKED); 1063 PMAP_STATS_INC(pmap_nkenter); 1064 tp = tsb_kvtotte(va); 1065 CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", 1066 va, VM_PAGE_TO_PHYS(m), tp, tp->tte_data); 1067 if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) { 1068 CTR5(KTR_SPARE2, 1069 "pmap_kenter: off color va=%#lx pa=%#lx o=%p ot=%d pi=%#lx", 1070 va, VM_PAGE_TO_PHYS(m), m->object, 1071 m->object ? m->object->type : -1, 1072 m->pindex); 1073 PMAP_STATS_INC(pmap_nkenter_oc); 1074 } 1075 if ((tp->tte_data & TD_V) != 0) { 1076 om = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 1077 ova = TTE_GET_VA(tp); 1078 if (m == om && va == ova) { 1079 PMAP_STATS_INC(pmap_nkenter_stupid); 1080 return; 1081 } 1082 TAILQ_REMOVE(&om->md.tte_list, tp, tte_link); 1083 pmap_cache_remove(om, ova); 1084 if (va != ova) 1085 tlb_page_demap(kernel_pmap, ova); 1086 } 1087 data = TD_V | TD_8K | VM_PAGE_TO_PHYS(m) | TD_REF | TD_SW | TD_CP | 1088 TD_P | TD_W; 1089 if (pmap_cache_enter(m, va) != 0) 1090 data |= TD_CV; 1091 tp->tte_vpn = TV_VPN(va, TS_8K); 1092 tp->tte_data = data; 1093 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); 1094} 1095 1096/* 1097 * Map a wired page into kernel virtual address space. This additionally 1098 * takes a flag argument which is or'ed to the TTE data. This is used by 1099 * sparc64_bus_mem_map(). 1100 * NOTE: if the mapping is non-cacheable, it's the caller's responsibility 1101 * to flush entries that might still be in the cache, if applicable. 1102 */ 1103void 1104pmap_kenter_flags(vm_offset_t va, vm_paddr_t pa, u_long flags) 1105{ 1106 struct tte *tp; 1107 1108 tp = tsb_kvtotte(va); 1109 CTR4(KTR_PMAP, "pmap_kenter_flags: va=%#lx pa=%#lx tp=%p data=%#lx", 1110 va, pa, tp, tp->tte_data); 1111 tp->tte_vpn = TV_VPN(va, TS_8K); 1112 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_REF | TD_P | flags; 1113} 1114 1115/* 1116 * Remove a wired page from kernel virtual address space. 1117 */ 1118void 1119pmap_kremove(vm_offset_t va) 1120{ 1121 struct tte *tp; 1122 vm_page_t m; 1123 1124 rw_assert(&tte_list_global_lock, RA_WLOCKED); 1125 PMAP_STATS_INC(pmap_nkremove); 1126 tp = tsb_kvtotte(va); 1127 CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, 1128 tp->tte_data); 1129 if ((tp->tte_data & TD_V) == 0) 1130 return; 1131 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 1132 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 1133 pmap_cache_remove(m, va); 1134 TTE_ZERO(tp); 1135} 1136 1137/* 1138 * Inverse of pmap_kenter_flags, used by bus_space_unmap(). 1139 */ 1140void 1141pmap_kremove_flags(vm_offset_t va) 1142{ 1143 struct tte *tp; 1144 1145 tp = tsb_kvtotte(va); 1146 CTR3(KTR_PMAP, "pmap_kremove_flags: va=%#lx tp=%p data=%#lx", va, tp, 1147 tp->tte_data); 1148 TTE_ZERO(tp); 1149} 1150 1151/* 1152 * Map a range of physical addresses into kernel virtual address space. 1153 * 1154 * The value passed in *virt is a suggested virtual address for the mapping. 1155 * Architectures which can support a direct-mapped physical to virtual region 1156 * can return the appropriate address within that region, leaving '*virt' 1157 * unchanged. 1158 */ 1159vm_offset_t 1160pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 1161{ 1162 1163 return (TLB_PHYS_TO_DIRECT(start)); 1164} 1165 1166/* 1167 * Map a list of wired pages into kernel virtual address space. This is 1168 * intended for temporary mappings which do not need page modification or 1169 * references recorded. Existing mappings in the region are overwritten. 1170 */ 1171void 1172pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 1173{ 1174 vm_offset_t va; 1175 1176 PMAP_STATS_INC(pmap_nqenter); 1177 va = sva; 1178 rw_wlock(&tte_list_global_lock); 1179 while (count-- > 0) { 1180 pmap_kenter(va, *m); 1181 va += PAGE_SIZE; 1182 m++; 1183 } 1184 rw_wunlock(&tte_list_global_lock); 1185 tlb_range_demap(kernel_pmap, sva, va); 1186} 1187 1188/* 1189 * Remove page mappings from kernel virtual address space. Intended for 1190 * temporary mappings entered by pmap_qenter. 1191 */ 1192void 1193pmap_qremove(vm_offset_t sva, int count) 1194{ 1195 vm_offset_t va; 1196 1197 PMAP_STATS_INC(pmap_nqremove); 1198 va = sva; 1199 rw_wlock(&tte_list_global_lock); 1200 while (count-- > 0) { 1201 pmap_kremove(va); 1202 va += PAGE_SIZE; 1203 } 1204 rw_wunlock(&tte_list_global_lock); 1205 tlb_range_demap(kernel_pmap, sva, va); 1206} 1207 1208/* 1209 * Initialize the pmap associated with process 0. 1210 */ 1211void 1212pmap_pinit0(pmap_t pm) 1213{ 1214 int i; 1215 1216 PMAP_LOCK_INIT(pm); 1217 for (i = 0; i < MAXCPU; i++) 1218 pm->pm_context[i] = TLB_CTX_KERNEL; 1219 CPU_ZERO(&pm->pm_active); 1220 pm->pm_tsb = NULL; 1221 pm->pm_tsb_obj = NULL; 1222 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1223} 1224 1225/* 1226 * Initialize a preallocated and zeroed pmap structure, such as one in a 1227 * vmspace structure. 1228 */ 1229int 1230pmap_pinit(pmap_t pm) 1231{ 1232 vm_page_t ma[TSB_PAGES]; 1233 int i; 1234 1235 /* 1236 * Allocate KVA space for the TSB. 1237 */ 1238 if (pm->pm_tsb == NULL) { 1239 pm->pm_tsb = (struct tte *)kva_alloc(TSB_BSIZE); 1240 if (pm->pm_tsb == NULL) 1241 return (0); 1242 } 1243 1244 /* 1245 * Allocate an object for it. 1246 */ 1247 if (pm->pm_tsb_obj == NULL) 1248 pm->pm_tsb_obj = vm_object_allocate(OBJT_PHYS, TSB_PAGES); 1249 1250 for (i = 0; i < MAXCPU; i++) 1251 pm->pm_context[i] = -1; 1252 CPU_ZERO(&pm->pm_active); 1253 1254 VM_OBJECT_WLOCK(pm->pm_tsb_obj); 1255 (void)vm_page_grab_pages(pm->pm_tsb_obj, 0, VM_ALLOC_NORMAL | 1256 VM_ALLOC_NOBUSY | VM_ALLOC_WIRED | VM_ALLOC_ZERO, ma, TSB_PAGES); 1257 VM_OBJECT_WUNLOCK(pm->pm_tsb_obj); 1258 for (i = 0; i < TSB_PAGES; i++) 1259 ma[i]->md.pmap = pm; 1260 pmap_qenter((vm_offset_t)pm->pm_tsb, ma, TSB_PAGES); 1261 1262 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1263 return (1); 1264} 1265 1266/* 1267 * Release any resources held by the given physical map. 1268 * Called when a pmap initialized by pmap_pinit is being released. 1269 * Should only be called if the map contains no valid mappings. 1270 */ 1271void 1272pmap_release(pmap_t pm) 1273{ 1274 vm_object_t obj; 1275 vm_page_t m; 1276#ifdef SMP 1277 struct pcpu *pc; 1278#endif 1279 1280 CTR2(KTR_PMAP, "pmap_release: ctx=%#x tsb=%p", 1281 pm->pm_context[curcpu], pm->pm_tsb); 1282 KASSERT(pmap_resident_count(pm) == 0, 1283 ("pmap_release: resident pages %ld != 0", 1284 pmap_resident_count(pm))); 1285 1286 /* 1287 * After the pmap was freed, it might be reallocated to a new process. 1288 * When switching, this might lead us to wrongly assume that we need 1289 * not switch contexts because old and new pmap pointer are equal. 1290 * Therefore, make sure that this pmap is not referenced by any PCPU 1291 * pointer any more. This could happen in two cases: 1292 * - A process that referenced the pmap is currently exiting on a CPU. 1293 * However, it is guaranteed to not switch in any more after setting 1294 * its state to PRS_ZOMBIE. 1295 * - A process that referenced this pmap ran on a CPU, but we switched 1296 * to a kernel thread, leaving the pmap pointer unchanged. 1297 */ 1298#ifdef SMP 1299 sched_pin(); 1300 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) 1301 atomic_cmpset_rel_ptr((uintptr_t *)&pc->pc_pmap, 1302 (uintptr_t)pm, (uintptr_t)NULL); 1303 sched_unpin(); 1304#else 1305 critical_enter(); 1306 if (PCPU_GET(pmap) == pm) 1307 PCPU_SET(pmap, NULL); 1308 critical_exit(); 1309#endif 1310 1311 pmap_qremove((vm_offset_t)pm->pm_tsb, TSB_PAGES); 1312 obj = pm->pm_tsb_obj; 1313 VM_OBJECT_WLOCK(obj); 1314 KASSERT(obj->ref_count == 1, ("pmap_release: tsbobj ref count != 1")); 1315 while (!TAILQ_EMPTY(&obj->memq)) { 1316 m = TAILQ_FIRST(&obj->memq); 1317 m->md.pmap = NULL; 1318 m->wire_count--; 1319 atomic_subtract_int(&vm_cnt.v_wire_count, 1); 1320 vm_page_free_zero(m); 1321 } 1322 VM_OBJECT_WUNLOCK(obj); 1323} 1324 1325/* 1326 * Grow the number of kernel page table entries. Unneeded. 1327 */ 1328void 1329pmap_growkernel(vm_offset_t addr) 1330{ 1331 1332 panic("pmap_growkernel: can't grow kernel"); 1333} 1334 1335int 1336pmap_remove_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, 1337 vm_offset_t va) 1338{ 1339 vm_page_t m; 1340 u_long data; 1341 1342 rw_assert(&tte_list_global_lock, RA_WLOCKED); 1343 data = atomic_readandclear_long(&tp->tte_data); 1344 if ((data & TD_FAKE) == 0) { 1345 m = PHYS_TO_VM_PAGE(TD_PA(data)); 1346 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 1347 if ((data & TD_WIRED) != 0) 1348 pm->pm_stats.wired_count--; 1349 if ((data & TD_PV) != 0) { 1350 if ((data & TD_W) != 0) 1351 vm_page_dirty(m); 1352 if ((data & TD_REF) != 0) 1353 vm_page_aflag_set(m, PGA_REFERENCED); 1354 if (TAILQ_EMPTY(&m->md.tte_list)) 1355 vm_page_aflag_clear(m, PGA_WRITEABLE); 1356 pm->pm_stats.resident_count--; 1357 } 1358 pmap_cache_remove(m, va); 1359 } 1360 TTE_ZERO(tp); 1361 if (PMAP_REMOVE_DONE(pm)) 1362 return (0); 1363 return (1); 1364} 1365 1366/* 1367 * Remove the given range of addresses from the specified map. 1368 */ 1369void 1370pmap_remove(pmap_t pm, vm_offset_t start, vm_offset_t end) 1371{ 1372 struct tte *tp; 1373 vm_offset_t va; 1374 1375 CTR3(KTR_PMAP, "pmap_remove: ctx=%#lx start=%#lx end=%#lx", 1376 pm->pm_context[curcpu], start, end); 1377 if (PMAP_REMOVE_DONE(pm)) 1378 return; 1379 rw_wlock(&tte_list_global_lock); 1380 PMAP_LOCK(pm); 1381 if (end - start > PMAP_TSB_THRESH) { 1382 tsb_foreach(pm, NULL, start, end, pmap_remove_tte); 1383 tlb_context_demap(pm); 1384 } else { 1385 for (va = start; va < end; va += PAGE_SIZE) 1386 if ((tp = tsb_tte_lookup(pm, va)) != NULL && 1387 !pmap_remove_tte(pm, NULL, tp, va)) 1388 break; 1389 tlb_range_demap(pm, start, end - 1); 1390 } 1391 PMAP_UNLOCK(pm); 1392 rw_wunlock(&tte_list_global_lock); 1393} 1394 1395void 1396pmap_remove_all(vm_page_t m) 1397{ 1398 struct pmap *pm; 1399 struct tte *tpn; 1400 struct tte *tp; 1401 vm_offset_t va; 1402 1403 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1404 ("pmap_remove_all: page %p is not managed", m)); 1405 rw_wlock(&tte_list_global_lock); 1406 for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { 1407 tpn = TAILQ_NEXT(tp, tte_link); 1408 if ((tp->tte_data & TD_PV) == 0) 1409 continue; 1410 pm = TTE_GET_PMAP(tp); 1411 va = TTE_GET_VA(tp); 1412 PMAP_LOCK(pm); 1413 if ((tp->tte_data & TD_WIRED) != 0) 1414 pm->pm_stats.wired_count--; 1415 if ((tp->tte_data & TD_REF) != 0) 1416 vm_page_aflag_set(m, PGA_REFERENCED); 1417 if ((tp->tte_data & TD_W) != 0) 1418 vm_page_dirty(m); 1419 tp->tte_data &= ~TD_V; 1420 tlb_page_demap(pm, va); 1421 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 1422 pm->pm_stats.resident_count--; 1423 pmap_cache_remove(m, va); 1424 TTE_ZERO(tp); 1425 PMAP_UNLOCK(pm); 1426 } 1427 vm_page_aflag_clear(m, PGA_WRITEABLE); 1428 rw_wunlock(&tte_list_global_lock); 1429} 1430 1431static int 1432pmap_protect_tte(struct pmap *pm, struct pmap *pm2, struct tte *tp, 1433 vm_offset_t va) 1434{ 1435 u_long data; 1436 vm_page_t m; 1437 1438 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1439 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W); 1440 if ((data & (TD_PV | TD_W)) == (TD_PV | TD_W)) { 1441 m = PHYS_TO_VM_PAGE(TD_PA(data)); 1442 vm_page_dirty(m); 1443 } 1444 return (1); 1445} 1446 1447/* 1448 * Set the physical protection on the specified range of this map as requested. 1449 */ 1450void 1451pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1452{ 1453 vm_offset_t va; 1454 struct tte *tp; 1455 1456 CTR4(KTR_PMAP, "pmap_protect: ctx=%#lx sva=%#lx eva=%#lx prot=%#lx", 1457 pm->pm_context[curcpu], sva, eva, prot); 1458 1459 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1460 pmap_remove(pm, sva, eva); 1461 return; 1462 } 1463 1464 if (prot & VM_PROT_WRITE) 1465 return; 1466 1467 PMAP_LOCK(pm); 1468 if (eva - sva > PMAP_TSB_THRESH) { 1469 tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); 1470 tlb_context_demap(pm); 1471 } else { 1472 for (va = sva; va < eva; va += PAGE_SIZE) 1473 if ((tp = tsb_tte_lookup(pm, va)) != NULL) 1474 pmap_protect_tte(pm, NULL, tp, va); 1475 tlb_range_demap(pm, sva, eva - 1); 1476 } 1477 PMAP_UNLOCK(pm); 1478} 1479 1480/* 1481 * Map the given physical page at the specified virtual address in the 1482 * target pmap with the protection requested. If specified the page 1483 * will be wired down. 1484 */ 1485int 1486pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1487 u_int flags, int8_t psind) 1488{ 1489 int rv; 1490 1491 rw_wlock(&tte_list_global_lock); 1492 PMAP_LOCK(pm); 1493 rv = pmap_enter_locked(pm, va, m, prot, flags, psind); 1494 rw_wunlock(&tte_list_global_lock); 1495 PMAP_UNLOCK(pm); 1496 return (rv); 1497} 1498 1499/* 1500 * Map the given physical page at the specified virtual address in the 1501 * target pmap with the protection requested. If specified the page 1502 * will be wired down. 1503 * 1504 * The page queues and pmap must be locked. 1505 */ 1506static int 1507pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1508 u_int flags, int8_t psind __unused) 1509{ 1510 struct tte *tp; 1511 vm_paddr_t pa; 1512 vm_page_t real; 1513 u_long data; 1514 boolean_t wired; 1515 1516 rw_assert(&tte_list_global_lock, RA_WLOCKED); 1517 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1518 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1519 VM_OBJECT_ASSERT_LOCKED(m->object); 1520 PMAP_STATS_INC(pmap_nenter); 1521 pa = VM_PAGE_TO_PHYS(m); 1522 wired = (flags & PMAP_ENTER_WIRED) != 0; 1523 1524 /* 1525 * If this is a fake page from the device_pager, but it covers actual 1526 * physical memory, convert to the real backing page. 1527 */ 1528 if ((m->flags & PG_FICTITIOUS) != 0) { 1529 real = vm_phys_paddr_to_vm_page(pa); 1530 if (real != NULL) 1531 m = real; 1532 } 1533 1534 CTR6(KTR_PMAP, 1535 "pmap_enter_locked: ctx=%p m=%p va=%#lx pa=%#lx prot=%#x wired=%d", 1536 pm->pm_context[curcpu], m, va, pa, prot, wired); 1537 1538 /* 1539 * If there is an existing mapping, and the physical address has not 1540 * changed, must be protection or wiring change. 1541 */ 1542 if ((tp = tsb_tte_lookup(pm, va)) != NULL && TTE_GET_PA(tp) == pa) { 1543 CTR0(KTR_PMAP, "pmap_enter_locked: update"); 1544 PMAP_STATS_INC(pmap_nenter_update); 1545 1546 /* 1547 * Wiring change, just update stats. 1548 */ 1549 if (wired) { 1550 if ((tp->tte_data & TD_WIRED) == 0) { 1551 tp->tte_data |= TD_WIRED; 1552 pm->pm_stats.wired_count++; 1553 } 1554 } else { 1555 if ((tp->tte_data & TD_WIRED) != 0) { 1556 tp->tte_data &= ~TD_WIRED; 1557 pm->pm_stats.wired_count--; 1558 } 1559 } 1560 1561 /* 1562 * Save the old bits and clear the ones we're interested in. 1563 */ 1564 data = tp->tte_data; 1565 tp->tte_data &= ~(TD_EXEC | TD_SW | TD_W); 1566 1567 /* 1568 * If we're turning off write permissions, sense modify status. 1569 */ 1570 if ((prot & VM_PROT_WRITE) != 0) { 1571 tp->tte_data |= TD_SW; 1572 if (wired) 1573 tp->tte_data |= TD_W; 1574 if ((m->oflags & VPO_UNMANAGED) == 0) 1575 vm_page_aflag_set(m, PGA_WRITEABLE); 1576 } else if ((data & TD_W) != 0) 1577 vm_page_dirty(m); 1578 1579 /* 1580 * If we're turning on execute permissions, flush the icache. 1581 */ 1582 if ((prot & VM_PROT_EXECUTE) != 0) { 1583 if ((data & TD_EXEC) == 0) 1584 icache_page_inval(pa); 1585 tp->tte_data |= TD_EXEC; 1586 } 1587 1588 /* 1589 * Delete the old mapping. 1590 */ 1591 tlb_page_demap(pm, TTE_GET_VA(tp)); 1592 } else { 1593 /* 1594 * If there is an existing mapping, but its for a different 1595 * physical address, delete the old mapping. 1596 */ 1597 if (tp != NULL) { 1598 CTR0(KTR_PMAP, "pmap_enter_locked: replace"); 1599 PMAP_STATS_INC(pmap_nenter_replace); 1600 pmap_remove_tte(pm, NULL, tp, va); 1601 tlb_page_demap(pm, va); 1602 } else { 1603 CTR0(KTR_PMAP, "pmap_enter_locked: new"); 1604 PMAP_STATS_INC(pmap_nenter_new); 1605 } 1606 1607 /* 1608 * Now set up the data and install the new mapping. 1609 */ 1610 data = TD_V | TD_8K | TD_PA(pa); 1611 if (pm == kernel_pmap) 1612 data |= TD_P; 1613 if ((prot & VM_PROT_WRITE) != 0) { 1614 data |= TD_SW; 1615 if ((m->oflags & VPO_UNMANAGED) == 0) 1616 vm_page_aflag_set(m, PGA_WRITEABLE); 1617 } 1618 if (prot & VM_PROT_EXECUTE) { 1619 data |= TD_EXEC; 1620 icache_page_inval(pa); 1621 } 1622 1623 /* 1624 * If its wired update stats. We also don't need reference or 1625 * modify tracking for wired mappings, so set the bits now. 1626 */ 1627 if (wired) { 1628 pm->pm_stats.wired_count++; 1629 data |= TD_REF | TD_WIRED; 1630 if ((prot & VM_PROT_WRITE) != 0) 1631 data |= TD_W; 1632 } 1633 1634 tsb_tte_enter(pm, m, va, TS_8K, data); 1635 } 1636 1637 return (KERN_SUCCESS); 1638} 1639 1640/* 1641 * Maps a sequence of resident pages belonging to the same object. 1642 * The sequence begins with the given page m_start. This page is 1643 * mapped at the given virtual address start. Each subsequent page is 1644 * mapped at a virtual address that is offset from start by the same 1645 * amount as the page is offset from m_start within the object. The 1646 * last page in the sequence is the page with the largest offset from 1647 * m_start that can be mapped at a virtual address less than the given 1648 * virtual address end. Not every virtual page between start and end 1649 * is mapped; only those for which a resident page exists with the 1650 * corresponding offset from m_start are mapped. 1651 */ 1652void 1653pmap_enter_object(pmap_t pm, vm_offset_t start, vm_offset_t end, 1654 vm_page_t m_start, vm_prot_t prot) 1655{ 1656 vm_page_t m; 1657 vm_pindex_t diff, psize; 1658 1659 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1660 1661 psize = atop(end - start); 1662 m = m_start; 1663 rw_wlock(&tte_list_global_lock); 1664 PMAP_LOCK(pm); 1665 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1666 pmap_enter_locked(pm, start + ptoa(diff), m, prot & 1667 (VM_PROT_READ | VM_PROT_EXECUTE), 0, 0); 1668 m = TAILQ_NEXT(m, listq); 1669 } 1670 rw_wunlock(&tte_list_global_lock); 1671 PMAP_UNLOCK(pm); 1672} 1673 1674void 1675pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) 1676{ 1677 1678 rw_wlock(&tte_list_global_lock); 1679 PMAP_LOCK(pm); 1680 pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), 1681 0, 0); 1682 rw_wunlock(&tte_list_global_lock); 1683 PMAP_UNLOCK(pm); 1684} 1685 1686void 1687pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1688 vm_pindex_t pindex, vm_size_t size) 1689{ 1690 1691 VM_OBJECT_ASSERT_WLOCKED(object); 1692 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 1693 ("pmap_object_init_pt: non-device object")); 1694} 1695 1696static int 1697pmap_unwire_tte(pmap_t pm, pmap_t pm2, struct tte *tp, vm_offset_t va) 1698{ 1699 1700 PMAP_LOCK_ASSERT(pm, MA_OWNED); 1701 if ((tp->tte_data & TD_WIRED) == 0) 1702 panic("pmap_unwire_tte: tp %p is missing TD_WIRED", tp); 1703 atomic_clear_long(&tp->tte_data, TD_WIRED); 1704 pm->pm_stats.wired_count--; 1705 return (1); 1706} 1707 1708/* 1709 * Clear the wired attribute from the mappings for the specified range of 1710 * addresses in the given pmap. Every valid mapping within that range must 1711 * have the wired attribute set. In contrast, invalid mappings cannot have 1712 * the wired attribute set, so they are ignored. 1713 * 1714 * The wired attribute of the translation table entry is not a hardware 1715 * feature, so there is no need to invalidate any TLB entries. 1716 */ 1717void 1718pmap_unwire(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1719{ 1720 vm_offset_t va; 1721 struct tte *tp; 1722 1723 PMAP_LOCK(pm); 1724 if (eva - sva > PMAP_TSB_THRESH) 1725 tsb_foreach(pm, NULL, sva, eva, pmap_unwire_tte); 1726 else { 1727 for (va = sva; va < eva; va += PAGE_SIZE) 1728 if ((tp = tsb_tte_lookup(pm, va)) != NULL) 1729 pmap_unwire_tte(pm, NULL, tp, va); 1730 } 1731 PMAP_UNLOCK(pm); 1732} 1733 1734static int 1735pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, struct tte *tp, 1736 vm_offset_t va) 1737{ 1738 vm_page_t m; 1739 u_long data; 1740 1741 if ((tp->tte_data & TD_FAKE) != 0) 1742 return (1); 1743 if (tsb_tte_lookup(dst_pmap, va) == NULL) { 1744 data = tp->tte_data & 1745 ~(TD_PV | TD_REF | TD_SW | TD_CV | TD_W); 1746 m = PHYS_TO_VM_PAGE(TTE_GET_PA(tp)); 1747 tsb_tte_enter(dst_pmap, m, va, TS_8K, data); 1748 } 1749 return (1); 1750} 1751 1752void 1753pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 1754 vm_size_t len, vm_offset_t src_addr) 1755{ 1756 struct tte *tp; 1757 vm_offset_t va; 1758 1759 if (dst_addr != src_addr) 1760 return; 1761 rw_wlock(&tte_list_global_lock); 1762 if (dst_pmap < src_pmap) { 1763 PMAP_LOCK(dst_pmap); 1764 PMAP_LOCK(src_pmap); 1765 } else { 1766 PMAP_LOCK(src_pmap); 1767 PMAP_LOCK(dst_pmap); 1768 } 1769 if (len > PMAP_TSB_THRESH) { 1770 tsb_foreach(src_pmap, dst_pmap, src_addr, src_addr + len, 1771 pmap_copy_tte); 1772 tlb_context_demap(dst_pmap); 1773 } else { 1774 for (va = src_addr; va < src_addr + len; va += PAGE_SIZE) 1775 if ((tp = tsb_tte_lookup(src_pmap, va)) != NULL) 1776 pmap_copy_tte(src_pmap, dst_pmap, tp, va); 1777 tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); 1778 } 1779 rw_wunlock(&tte_list_global_lock); 1780 PMAP_UNLOCK(src_pmap); 1781 PMAP_UNLOCK(dst_pmap); 1782} 1783 1784void 1785pmap_zero_page(vm_page_t m) 1786{ 1787 struct tte *tp; 1788 vm_offset_t va; 1789 vm_paddr_t pa; 1790 1791 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1792 ("pmap_zero_page: fake page")); 1793 PMAP_STATS_INC(pmap_nzero_page); 1794 pa = VM_PAGE_TO_PHYS(m); 1795 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { 1796 PMAP_STATS_INC(pmap_nzero_page_c); 1797 va = TLB_PHYS_TO_DIRECT(pa); 1798 cpu_block_zero((void *)va, PAGE_SIZE); 1799 } else if (m->md.color == -1) { 1800 PMAP_STATS_INC(pmap_nzero_page_nc); 1801 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); 1802 } else { 1803 PMAP_STATS_INC(pmap_nzero_page_oc); 1804 PMAP_LOCK(kernel_pmap); 1805 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE); 1806 tp = tsb_kvtotte(va); 1807 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1808 tp->tte_vpn = TV_VPN(va, TS_8K); 1809 cpu_block_zero((void *)va, PAGE_SIZE); 1810 tlb_page_demap(kernel_pmap, va); 1811 PMAP_UNLOCK(kernel_pmap); 1812 } 1813} 1814 1815void 1816pmap_zero_page_area(vm_page_t m, int off, int size) 1817{ 1818 struct tte *tp; 1819 vm_offset_t va; 1820 vm_paddr_t pa; 1821 1822 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1823 ("pmap_zero_page_area: fake page")); 1824 KASSERT(off + size <= PAGE_SIZE, ("pmap_zero_page_area: bad off/size")); 1825 PMAP_STATS_INC(pmap_nzero_page_area); 1826 pa = VM_PAGE_TO_PHYS(m); 1827 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { 1828 PMAP_STATS_INC(pmap_nzero_page_area_c); 1829 va = TLB_PHYS_TO_DIRECT(pa); 1830 bzero((void *)(va + off), size); 1831 } else if (m->md.color == -1) { 1832 PMAP_STATS_INC(pmap_nzero_page_area_nc); 1833 aszero(ASI_PHYS_USE_EC, pa + off, size); 1834 } else { 1835 PMAP_STATS_INC(pmap_nzero_page_area_oc); 1836 PMAP_LOCK(kernel_pmap); 1837 va = pmap_temp_map_1 + (m->md.color * PAGE_SIZE); 1838 tp = tsb_kvtotte(va); 1839 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1840 tp->tte_vpn = TV_VPN(va, TS_8K); 1841 bzero((void *)(va + off), size); 1842 tlb_page_demap(kernel_pmap, va); 1843 PMAP_UNLOCK(kernel_pmap); 1844 } 1845} 1846 1847void 1848pmap_zero_page_idle(vm_page_t m) 1849{ 1850 struct tte *tp; 1851 vm_offset_t va; 1852 vm_paddr_t pa; 1853 1854 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1855 ("pmap_zero_page_idle: fake page")); 1856 PMAP_STATS_INC(pmap_nzero_page_idle); 1857 pa = VM_PAGE_TO_PHYS(m); 1858 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) { 1859 PMAP_STATS_INC(pmap_nzero_page_idle_c); 1860 va = TLB_PHYS_TO_DIRECT(pa); 1861 cpu_block_zero((void *)va, PAGE_SIZE); 1862 } else if (m->md.color == -1) { 1863 PMAP_STATS_INC(pmap_nzero_page_idle_nc); 1864 aszero(ASI_PHYS_USE_EC, pa, PAGE_SIZE); 1865 } else { 1866 PMAP_STATS_INC(pmap_nzero_page_idle_oc); 1867 va = pmap_idle_map + (m->md.color * PAGE_SIZE); 1868 tp = tsb_kvtotte(va); 1869 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1870 tp->tte_vpn = TV_VPN(va, TS_8K); 1871 cpu_block_zero((void *)va, PAGE_SIZE); 1872 tlb_page_demap(kernel_pmap, va); 1873 } 1874} 1875 1876void 1877pmap_copy_page(vm_page_t msrc, vm_page_t mdst) 1878{ 1879 vm_offset_t vdst; 1880 vm_offset_t vsrc; 1881 vm_paddr_t pdst; 1882 vm_paddr_t psrc; 1883 struct tte *tp; 1884 1885 KASSERT((mdst->flags & PG_FICTITIOUS) == 0, 1886 ("pmap_copy_page: fake dst page")); 1887 KASSERT((msrc->flags & PG_FICTITIOUS) == 0, 1888 ("pmap_copy_page: fake src page")); 1889 PMAP_STATS_INC(pmap_ncopy_page); 1890 pdst = VM_PAGE_TO_PHYS(mdst); 1891 psrc = VM_PAGE_TO_PHYS(msrc); 1892 if (dcache_color_ignore != 0 || 1893 (msrc->md.color == DCACHE_COLOR(psrc) && 1894 mdst->md.color == DCACHE_COLOR(pdst))) { 1895 PMAP_STATS_INC(pmap_ncopy_page_c); 1896 vdst = TLB_PHYS_TO_DIRECT(pdst); 1897 vsrc = TLB_PHYS_TO_DIRECT(psrc); 1898 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE); 1899 } else if (msrc->md.color == -1 && mdst->md.color == -1) { 1900 PMAP_STATS_INC(pmap_ncopy_page_nc); 1901 ascopy(ASI_PHYS_USE_EC, psrc, pdst, PAGE_SIZE); 1902 } else if (msrc->md.color == -1) { 1903 if (mdst->md.color == DCACHE_COLOR(pdst)) { 1904 PMAP_STATS_INC(pmap_ncopy_page_dc); 1905 vdst = TLB_PHYS_TO_DIRECT(pdst); 1906 ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst, 1907 PAGE_SIZE); 1908 } else { 1909 PMAP_STATS_INC(pmap_ncopy_page_doc); 1910 PMAP_LOCK(kernel_pmap); 1911 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE); 1912 tp = tsb_kvtotte(vdst); 1913 tp->tte_data = 1914 TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W; 1915 tp->tte_vpn = TV_VPN(vdst, TS_8K); 1916 ascopyfrom(ASI_PHYS_USE_EC, psrc, (void *)vdst, 1917 PAGE_SIZE); 1918 tlb_page_demap(kernel_pmap, vdst); 1919 PMAP_UNLOCK(kernel_pmap); 1920 } 1921 } else if (mdst->md.color == -1) { 1922 if (msrc->md.color == DCACHE_COLOR(psrc)) { 1923 PMAP_STATS_INC(pmap_ncopy_page_sc); 1924 vsrc = TLB_PHYS_TO_DIRECT(psrc); 1925 ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst, 1926 PAGE_SIZE); 1927 } else { 1928 PMAP_STATS_INC(pmap_ncopy_page_soc); 1929 PMAP_LOCK(kernel_pmap); 1930 vsrc = pmap_temp_map_1 + (msrc->md.color * PAGE_SIZE); 1931 tp = tsb_kvtotte(vsrc); 1932 tp->tte_data = 1933 TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W; 1934 tp->tte_vpn = TV_VPN(vsrc, TS_8K); 1935 ascopyto((void *)vsrc, ASI_PHYS_USE_EC, pdst, 1936 PAGE_SIZE); 1937 tlb_page_demap(kernel_pmap, vsrc); 1938 PMAP_UNLOCK(kernel_pmap); 1939 } 1940 } else { 1941 PMAP_STATS_INC(pmap_ncopy_page_oc); 1942 PMAP_LOCK(kernel_pmap); 1943 vdst = pmap_temp_map_1 + (mdst->md.color * PAGE_SIZE); 1944 tp = tsb_kvtotte(vdst); 1945 tp->tte_data = 1946 TD_V | TD_8K | TD_PA(pdst) | TD_CP | TD_CV | TD_W; 1947 tp->tte_vpn = TV_VPN(vdst, TS_8K); 1948 vsrc = pmap_temp_map_2 + (msrc->md.color * PAGE_SIZE); 1949 tp = tsb_kvtotte(vsrc); 1950 tp->tte_data = 1951 TD_V | TD_8K | TD_PA(psrc) | TD_CP | TD_CV | TD_W; 1952 tp->tte_vpn = TV_VPN(vsrc, TS_8K); 1953 cpu_block_copy((void *)vsrc, (void *)vdst, PAGE_SIZE); 1954 tlb_page_demap(kernel_pmap, vdst); 1955 tlb_page_demap(kernel_pmap, vsrc); 1956 PMAP_UNLOCK(kernel_pmap); 1957 } 1958} 1959 1960vm_offset_t 1961pmap_quick_enter_page(vm_page_t m) 1962{ 1963 vm_paddr_t pa; 1964 vm_offset_t qaddr; 1965 struct tte *tp; 1966 1967 pa = VM_PAGE_TO_PHYS(m); 1968 if (dcache_color_ignore != 0 || m->md.color == DCACHE_COLOR(pa)) 1969 return (TLB_PHYS_TO_DIRECT(pa)); 1970 1971 critical_enter(); 1972 qaddr = PCPU_GET(qmap_addr); 1973 qaddr += (PAGE_SIZE * ((DCACHE_COLORS + DCACHE_COLOR(pa) - 1974 DCACHE_COLOR(qaddr)) % DCACHE_COLORS)); 1975 tp = tsb_kvtotte(qaddr); 1976 1977 KASSERT(tp->tte_data == 0, ("pmap_quick_enter_page: PTE busy")); 1978 1979 tp->tte_data = TD_V | TD_8K | TD_PA(pa) | TD_CP | TD_CV | TD_W; 1980 tp->tte_vpn = TV_VPN(qaddr, TS_8K); 1981 1982 return (qaddr); 1983} 1984 1985void 1986pmap_quick_remove_page(vm_offset_t addr) 1987{ 1988 vm_offset_t qaddr; 1989 struct tte *tp; 1990 1991 if (addr >= VM_MIN_DIRECT_ADDRESS) 1992 return; 1993 1994 tp = tsb_kvtotte(addr); 1995 qaddr = PCPU_GET(qmap_addr); 1996 1997 KASSERT((addr >= qaddr) && (addr < (qaddr + (PAGE_SIZE * DCACHE_COLORS))), 1998 ("pmap_quick_remove_page: invalid address")); 1999 KASSERT(tp->tte_data != 0, ("pmap_quick_remove_page: PTE not in use")); 2000 2001 stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_DMMU_DEMAP, 0); 2002 stxa(TLB_DEMAP_VA(addr) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE, ASI_IMMU_DEMAP, 0); 2003 flush(KERNBASE); 2004 TTE_ZERO(tp); 2005 critical_exit(); 2006} 2007 2008int unmapped_buf_allowed; 2009 2010void 2011pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 2012 vm_offset_t b_offset, int xfersize) 2013{ 2014 2015 panic("pmap_copy_pages: not implemented"); 2016} 2017 2018/* 2019 * Returns true if the pmap's pv is one of the first 2020 * 16 pvs linked to from this page. This count may 2021 * be changed upwards or downwards in the future; it 2022 * is only necessary that true be returned for a small 2023 * subset of pmaps for proper page aging. 2024 */ 2025boolean_t 2026pmap_page_exists_quick(pmap_t pm, vm_page_t m) 2027{ 2028 struct tte *tp; 2029 int loops; 2030 boolean_t rv; 2031 2032 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2033 ("pmap_page_exists_quick: page %p is not managed", m)); 2034 loops = 0; 2035 rv = FALSE; 2036 rw_wlock(&tte_list_global_lock); 2037 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2038 if ((tp->tte_data & TD_PV) == 0) 2039 continue; 2040 if (TTE_GET_PMAP(tp) == pm) { 2041 rv = TRUE; 2042 break; 2043 } 2044 if (++loops >= 16) 2045 break; 2046 } 2047 rw_wunlock(&tte_list_global_lock); 2048 return (rv); 2049} 2050 2051/* 2052 * Return the number of managed mappings to the given physical page 2053 * that are wired. 2054 */ 2055int 2056pmap_page_wired_mappings(vm_page_t m) 2057{ 2058 struct tte *tp; 2059 int count; 2060 2061 count = 0; 2062 if ((m->oflags & VPO_UNMANAGED) != 0) 2063 return (count); 2064 rw_wlock(&tte_list_global_lock); 2065 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) 2066 if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) 2067 count++; 2068 rw_wunlock(&tte_list_global_lock); 2069 return (count); 2070} 2071 2072/* 2073 * Remove all pages from specified address space, this aids process exit 2074 * speeds. This is much faster than pmap_remove in the case of running down 2075 * an entire address space. Only works for the current pmap. 2076 */ 2077void 2078pmap_remove_pages(pmap_t pm) 2079{ 2080 2081} 2082 2083/* 2084 * Returns TRUE if the given page has a managed mapping. 2085 */ 2086boolean_t 2087pmap_page_is_mapped(vm_page_t m) 2088{ 2089 struct tte *tp; 2090 boolean_t rv; 2091 2092 rv = FALSE; 2093 if ((m->oflags & VPO_UNMANAGED) != 0) 2094 return (rv); 2095 rw_wlock(&tte_list_global_lock); 2096 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) 2097 if ((tp->tte_data & TD_PV) != 0) { 2098 rv = TRUE; 2099 break; 2100 } 2101 rw_wunlock(&tte_list_global_lock); 2102 return (rv); 2103} 2104 2105/* 2106 * Return a count of reference bits for a page, clearing those bits. 2107 * It is not necessary for every reference bit to be cleared, but it 2108 * is necessary that 0 only be returned when there are truly no 2109 * reference bits set. 2110 * 2111 * As an optimization, update the page's dirty field if a modified bit is 2112 * found while counting reference bits. This opportunistic update can be 2113 * performed at low cost and can eliminate the need for some future calls 2114 * to pmap_is_modified(). However, since this function stops after 2115 * finding PMAP_TS_REFERENCED_MAX reference bits, it may not detect some 2116 * dirty pages. Those dirty pages will only be detected by a future call 2117 * to pmap_is_modified(). 2118 */ 2119int 2120pmap_ts_referenced(vm_page_t m) 2121{ 2122 struct tte *tpf; 2123 struct tte *tpn; 2124 struct tte *tp; 2125 u_long data; 2126 int count; 2127 2128 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2129 ("pmap_ts_referenced: page %p is not managed", m)); 2130 count = 0; 2131 rw_wlock(&tte_list_global_lock); 2132 if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) { 2133 tpf = tp; 2134 do { 2135 tpn = TAILQ_NEXT(tp, tte_link); 2136 TAILQ_REMOVE(&m->md.tte_list, tp, tte_link); 2137 TAILQ_INSERT_TAIL(&m->md.tte_list, tp, tte_link); 2138 if ((tp->tte_data & TD_PV) == 0) 2139 continue; 2140 data = atomic_clear_long(&tp->tte_data, TD_REF); 2141 if ((data & TD_W) != 0) 2142 vm_page_dirty(m); 2143 if ((data & TD_REF) != 0 && ++count >= 2144 PMAP_TS_REFERENCED_MAX) 2145 break; 2146 } while ((tp = tpn) != NULL && tp != tpf); 2147 } 2148 rw_wunlock(&tte_list_global_lock); 2149 return (count); 2150} 2151 2152boolean_t 2153pmap_is_modified(vm_page_t m) 2154{ 2155 struct tte *tp; 2156 boolean_t rv; 2157 2158 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2159 ("pmap_is_modified: page %p is not managed", m)); 2160 rv = FALSE; 2161 2162 /* 2163 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2164 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2165 * is clear, no TTEs can have TD_W set. 2166 */ 2167 VM_OBJECT_ASSERT_WLOCKED(m->object); 2168 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2169 return (rv); 2170 rw_wlock(&tte_list_global_lock); 2171 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2172 if ((tp->tte_data & TD_PV) == 0) 2173 continue; 2174 if ((tp->tte_data & TD_W) != 0) { 2175 rv = TRUE; 2176 break; 2177 } 2178 } 2179 rw_wunlock(&tte_list_global_lock); 2180 return (rv); 2181} 2182 2183/* 2184 * pmap_is_prefaultable: 2185 * 2186 * Return whether or not the specified virtual address is elgible 2187 * for prefault. 2188 */ 2189boolean_t 2190pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2191{ 2192 boolean_t rv; 2193 2194 PMAP_LOCK(pmap); 2195 rv = tsb_tte_lookup(pmap, addr) == NULL; 2196 PMAP_UNLOCK(pmap); 2197 return (rv); 2198} 2199 2200/* 2201 * Return whether or not the specified physical page was referenced 2202 * in any physical maps. 2203 */ 2204boolean_t 2205pmap_is_referenced(vm_page_t m) 2206{ 2207 struct tte *tp; 2208 boolean_t rv; 2209 2210 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2211 ("pmap_is_referenced: page %p is not managed", m)); 2212 rv = FALSE; 2213 rw_wlock(&tte_list_global_lock); 2214 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2215 if ((tp->tte_data & TD_PV) == 0) 2216 continue; 2217 if ((tp->tte_data & TD_REF) != 0) { 2218 rv = TRUE; 2219 break; 2220 } 2221 } 2222 rw_wunlock(&tte_list_global_lock); 2223 return (rv); 2224} 2225 2226/* 2227 * This function is advisory. 2228 */ 2229void 2230pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 2231{ 2232} 2233 2234void 2235pmap_clear_modify(vm_page_t m) 2236{ 2237 struct tte *tp; 2238 u_long data; 2239 2240 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2241 ("pmap_clear_modify: page %p is not managed", m)); 2242 VM_OBJECT_ASSERT_WLOCKED(m->object); 2243 KASSERT(!vm_page_xbusied(m), 2244 ("pmap_clear_modify: page %p is exclusive busied", m)); 2245 2246 /* 2247 * If the page is not PGA_WRITEABLE, then no TTEs can have TD_W set. 2248 * If the object containing the page is locked and the page is not 2249 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. 2250 */ 2251 if ((m->aflags & PGA_WRITEABLE) == 0) 2252 return; 2253 rw_wlock(&tte_list_global_lock); 2254 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2255 if ((tp->tte_data & TD_PV) == 0) 2256 continue; 2257 data = atomic_clear_long(&tp->tte_data, TD_W); 2258 if ((data & TD_W) != 0) 2259 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 2260 } 2261 rw_wunlock(&tte_list_global_lock); 2262} 2263 2264void 2265pmap_remove_write(vm_page_t m) 2266{ 2267 struct tte *tp; 2268 u_long data; 2269 2270 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2271 ("pmap_remove_write: page %p is not managed", m)); 2272 2273 /* 2274 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2275 * set by another thread while the object is locked. Thus, 2276 * if PGA_WRITEABLE is clear, no page table entries need updating. 2277 */ 2278 VM_OBJECT_ASSERT_WLOCKED(m->object); 2279 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2280 return; 2281 rw_wlock(&tte_list_global_lock); 2282 TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { 2283 if ((tp->tte_data & TD_PV) == 0) 2284 continue; 2285 data = atomic_clear_long(&tp->tte_data, TD_SW | TD_W); 2286 if ((data & TD_W) != 0) { 2287 vm_page_dirty(m); 2288 tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); 2289 } 2290 } 2291 vm_page_aflag_clear(m, PGA_WRITEABLE); 2292 rw_wunlock(&tte_list_global_lock); 2293} 2294 2295int 2296pmap_mincore(pmap_t pm, vm_offset_t addr, vm_paddr_t *locked_pa) 2297{ 2298 2299 /* TODO; */ 2300 return (0); 2301} 2302 2303/* 2304 * Activate a user pmap. The pmap must be activated before its address space 2305 * can be accessed in any way. 2306 */ 2307void 2308pmap_activate(struct thread *td) 2309{ 2310 struct vmspace *vm; 2311 struct pmap *pm; 2312 int context; 2313 2314 critical_enter(); 2315 vm = td->td_proc->p_vmspace; 2316 pm = vmspace_pmap(vm); 2317 2318 context = PCPU_GET(tlb_ctx); 2319 if (context == PCPU_GET(tlb_ctx_max)) { 2320 tlb_flush_user(); 2321 context = PCPU_GET(tlb_ctx_min); 2322 } 2323 PCPU_SET(tlb_ctx, context + 1); 2324 2325 pm->pm_context[curcpu] = context; 2326#ifdef SMP 2327 CPU_SET_ATOMIC(PCPU_GET(cpuid), &pm->pm_active); 2328 atomic_store_acq_ptr((uintptr_t *)PCPU_PTR(pmap), (uintptr_t)pm); 2329#else 2330 CPU_SET(PCPU_GET(cpuid), &pm->pm_active); 2331 PCPU_SET(pmap, pm); 2332#endif 2333 2334 stxa(AA_DMMU_TSB, ASI_DMMU, pm->pm_tsb); 2335 stxa(AA_IMMU_TSB, ASI_IMMU, pm->pm_tsb); 2336 stxa(AA_DMMU_PCXR, ASI_DMMU, (ldxa(AA_DMMU_PCXR, ASI_DMMU) & 2337 TLB_CXR_PGSZ_MASK) | context); 2338 flush(KERNBASE); 2339 critical_exit(); 2340} 2341 2342void 2343pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 2344{ 2345 2346} 2347 2348/* 2349 * Increase the starting virtual address of the given mapping if a 2350 * different alignment might result in more superpage mappings. 2351 */ 2352void 2353pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 2354 vm_offset_t *addr, vm_size_t size) 2355{ 2356 2357} 2358