pmap.c revision 239170
1133359Sobrien/* 2133359Sobrien * Copyright (c) 1991 Regents of the University of California. 3133359Sobrien * All rights reserved. 4133359Sobrien * Copyright (c) 1994 John S. Dyson 5133359Sobrien * All rights reserved. 6133359Sobrien * Copyright (c) 1994 David Greenman 7133359Sobrien * All rights reserved. 8133359Sobrien * 9133359Sobrien * This code is derived from software contributed to Berkeley by 10133359Sobrien * the Systems Programming Group of the University of Utah Computer 11133359Sobrien * Science Department and William Jolitz of UUNET Technologies Inc. 12133359Sobrien * 13133359Sobrien * Redistribution and use in source and binary forms, with or without 14133359Sobrien * modification, are permitted provided that the following conditions 15133359Sobrien * are met: 16133359Sobrien * 1. Redistributions of source code must retain the above copyright 17133359Sobrien * notice, this list of conditions and the following disclaimer. 18133359Sobrien * 2. Redistributions in binary form must reproduce the above copyright 19133359Sobrien * notice, this list of conditions and the following disclaimer in the 20133359Sobrien * documentation and/or other materials provided with the distribution. 21133359Sobrien * 4. Neither the name of the University nor the names of its contributors 22133359Sobrien * may be used to endorse or promote products derived from this software 23133359Sobrien * without specific prior written permission. 24133359Sobrien * 25133359Sobrien * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26133359Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27133359Sobrien * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28133359Sobrien * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29133359Sobrien * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30133359Sobrien * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31133359Sobrien * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32133359Sobrien * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33133359Sobrien * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34133359Sobrien * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35133359Sobrien * SUCH DAMAGE. 36133359Sobrien * 37133359Sobrien * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38133359Sobrien * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps 39133359Sobrien * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish 40133359Sobrien */ 41169942Sobrien 42133359Sobrien/* 43133359Sobrien * Manages physical address maps. 44133359Sobrien * 45133359Sobrien * In addition to hardware address maps, this 46133359Sobrien * module is called upon to provide software-use-only 47133359Sobrien * maps which may or may not be stored in the same 48133359Sobrien * form as hardware maps. These pseudo-maps are 49133359Sobrien * used to store intermediate results from copy 50133359Sobrien * operations to and from address spaces. 51133359Sobrien * 52133359Sobrien * Since the information managed by this module is 53133359Sobrien * also stored by the logical address mapping module, 54133359Sobrien * this module may throw away valid virtual-to-physical 55133359Sobrien * mappings at almost any time. However, invalidations 56133359Sobrien * of virtual-to-physical mappings must be done as 57133359Sobrien * requested. 58133359Sobrien * 59133359Sobrien * In order to cope with hardware architectures which 60133359Sobrien * make virtual-to-physical map invalidates expensive, 61133359Sobrien * this module may delay invalidate or reduced protection 62133359Sobrien * operations until such time as they are actually 63133359Sobrien * necessary. This module is given full information as 64133359Sobrien * to which processors are currently using which maps, 65133359Sobrien * and to when physical maps must be made correct. 66169962Sobrien */ 67133359Sobrien 68133359Sobrien#include <sys/cdefs.h> 69133359Sobrien__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 239170 2012-08-10 05:00:50Z alc $"); 70133359Sobrien 71133359Sobrien#include "opt_ddb.h" 72133359Sobrien 73133359Sobrien#include <sys/param.h> 74133359Sobrien#include <sys/systm.h> 75133359Sobrien#include <sys/proc.h> 76133359Sobrien#include <sys/msgbuf.h> 77133359Sobrien#include <sys/vmmeter.h> 78169942Sobrien#include <sys/mman.h> 79133359Sobrien#include <sys/smp.h> 80159764Sobrien#ifdef DDB 81159764Sobrien#include <ddb/ddb.h> 82159764Sobrien#endif 83159764Sobrien 84133359Sobrien#include <vm/vm.h> 85133359Sobrien#include <vm/vm_param.h> 86133359Sobrien#include <vm/vm_phys.h> 87133359Sobrien#include <sys/lock.h> 88133359Sobrien#include <sys/mutex.h> 89169962Sobrien#include <vm/vm_kern.h> 90133359Sobrien#include <vm/vm_page.h> 91133359Sobrien#include <vm/vm_map.h> 92133359Sobrien#include <vm/vm_object.h> 93133359Sobrien#include <vm/vm_extern.h> 94139368Sobrien#include <vm/vm_pageout.h> 95133359Sobrien#include <vm/vm_pager.h> 96133359Sobrien#include <vm/uma.h> 97169962Sobrien#include <sys/pcpu.h> 98139368Sobrien#include <sys/sched.h> 99139368Sobrien#ifdef SMP 100139368Sobrien#include <sys/smp.h> 101133359Sobrien#endif 102139368Sobrien 103139368Sobrien#include <machine/cache.h> 104139368Sobrien#include <machine/md_var.h> 105169962Sobrien#include <machine/tlb.h> 106169962Sobrien 107139368Sobrien#undef PMAP_DEBUG 108139368Sobrien 109133359Sobrien#ifndef PMAP_SHPGPERPROC 110133359Sobrien#define PMAP_SHPGPERPROC 200 111133359Sobrien#endif 112169962Sobrien 113169962Sobrien#if !defined(DIAGNOSTIC) 114133359Sobrien#define PMAP_INLINE __inline 115139368Sobrien#else 116139368Sobrien#define PMAP_INLINE 117139368Sobrien#endif 118139368Sobrien 119139368Sobrien/* 120139368Sobrien * Get PDEs and PTEs for user/kernel address space 121139368Sobrien */ 122133359Sobrien#define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1)) 123133359Sobrien#define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1)) 124133359Sobrien#define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1)) 125133359Sobrien#define pmap_pde_pindex(v) ((v) >> PDRSHIFT) 126133359Sobrien 127133359Sobrien#ifdef __mips_n64 128133359Sobrien#define NUPDE (NPDEPG * NPDEPG) 129133359Sobrien#define NUSERPGTBLS (NUPDE + NPDEPG) 130133359Sobrien#else 131133359Sobrien#define NUPDE (NPDEPG) 132133359Sobrien#define NUSERPGTBLS (NUPDE) 133133359Sobrien#endif 134133359Sobrien 135133359Sobrien#define is_kernel_pmap(x) ((x) == kernel_pmap) 136133359Sobrien 137133359Sobrienstruct pmap kernel_pmap_store; 138133359Sobrienpd_entry_t *kernel_segmap; 139133359Sobrien 140133359Sobrienvm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 141133359Sobrienvm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 142169942Sobrien 143169942Sobrienstatic int nkpt; 144169942Sobrienunsigned pmap_max_asid; /* max ASID supported by the system */ 145169942Sobrien 146169942Sobrien#define PMAP_ASID_RESERVED 0 147169942Sobrien 148169942Sobrienvm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 149169942Sobrien 150169942Sobrienstatic void pmap_asid_alloc(pmap_t pmap); 151169942Sobrien 152169942Sobrien/* 153169942Sobrien * Data for the pv entry allocation mechanism 154169942Sobrien */ 155169942Sobrienstatic uma_zone_t pvzone; 156169942Sobrienstatic struct vm_object pvzone_obj; 157169942Sobrienstatic int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 158169942Sobrien 159169942Sobrienstatic PMAP_INLINE void free_pv_entry(pv_entry_t pv); 160133359Sobrienstatic pv_entry_t get_pv_entry(pmap_t locked_pmap); 161169942Sobrienstatic void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 162133359Sobrienstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 163133359Sobrien vm_offset_t va); 164139368Sobrienstatic __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem); 165133359Sobrienstatic vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 166169962Sobrien vm_page_t m, vm_prot_t prot, vm_page_t mpte); 167133359Sobrienstatic int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va, 168133359Sobrien pd_entry_t pde); 169133359Sobrienstatic void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 170133359Sobrienstatic void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); 171133359Sobrienstatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, 172133359Sobrien vm_offset_t va, vm_page_t m); 173133359Sobrienstatic void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte); 174133359Sobrienstatic void pmap_invalidate_all(pmap_t pmap); 175133359Sobrienstatic void pmap_invalidate_page(pmap_t pmap, vm_offset_t va); 176133359Sobrienstatic int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m); 177133359Sobrien 178133359Sobrienstatic vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 179133359Sobrienstatic vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 180133359Sobrienstatic int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t); 181133359Sobrienstatic pt_entry_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot); 182133359Sobrien 183133359Sobrien#ifdef SMP 184133359Sobrienstatic void pmap_invalidate_page_action(void *arg); 185133359Sobrienstatic void pmap_update_page_action(void *arg); 186133359Sobrien#endif 187133359Sobrien 188133359Sobrien#ifndef __mips_n64 189133359Sobrien/* 190133359Sobrien * This structure is for high memory (memory above 512Meg in 32 bit) support. 191133359Sobrien * The highmem area does not have a KSEG0 mapping, and we need a mechanism to 192133359Sobrien * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc. 193133359Sobrien * 194133359Sobrien * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To 195133359Sobrien * access a highmem physical address on a CPU, we map the physical address to 196133359Sobrien * the reserved virtual address for the CPU in the kernel pagetable. This is 197133359Sobrien * done with interrupts disabled(although a spinlock and sched_pin would be 198133359Sobrien * sufficient). 199133359Sobrien */ 200133359Sobrienstruct local_sysmaps { 201133359Sobrien vm_offset_t base; 202133359Sobrien uint32_t saved_intr; 203133359Sobrien uint16_t valid1, valid2; 204133359Sobrien}; 205159764Sobrienstatic struct local_sysmaps sysmap_lmem[MAXCPU]; 206159764Sobrien 207133359Sobrienstatic __inline void 208159764Sobrienpmap_alloc_lmem_map(void) 209159764Sobrien{ 210133359Sobrien int i; 211133359Sobrien 212133359Sobrien for (i = 0; i < MAXCPU; i++) { 213133359Sobrien sysmap_lmem[i].base = virtual_avail; 214133359Sobrien virtual_avail += PAGE_SIZE * 2; 215133359Sobrien sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0; 216133359Sobrien } 217133359Sobrien} 218133359Sobrien 219133359Sobrienstatic __inline vm_offset_t 220133359Sobrienpmap_lmem_map1(vm_paddr_t phys) 221133359Sobrien{ 222133359Sobrien struct local_sysmaps *sysm; 223133359Sobrien pt_entry_t *pte, npte; 224133359Sobrien vm_offset_t va; 225133359Sobrien uint32_t intr; 226133359Sobrien int cpu; 227133359Sobrien 228133359Sobrien intr = intr_disable(); 229133359Sobrien cpu = PCPU_GET(cpuid); 230133359Sobrien sysm = &sysmap_lmem[cpu]; 231133359Sobrien sysm->saved_intr = intr; 232133359Sobrien va = sysm->base; 233133359Sobrien npte = TLBLO_PA_TO_PFN(phys) | 234133359Sobrien PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 235133359Sobrien pte = pmap_pte(kernel_pmap, va); 236133359Sobrien *pte = npte; 237133359Sobrien sysm->valid1 = 1; 238133359Sobrien return (va); 239133359Sobrien} 240159764Sobrien 241159764Sobrienstatic __inline vm_offset_t 242133359Sobrienpmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 243133359Sobrien{ 244169942Sobrien struct local_sysmaps *sysm; 245133359Sobrien pt_entry_t *pte, npte; 246159764Sobrien vm_offset_t va1, va2; 247159764Sobrien uint32_t intr; 248159764Sobrien int cpu; 249159764Sobrien 250159764Sobrien intr = intr_disable(); 251159764Sobrien cpu = PCPU_GET(cpuid); 252133359Sobrien sysm = &sysmap_lmem[cpu]; 253133359Sobrien sysm->saved_intr = intr; 254159764Sobrien va1 = sysm->base; 255159764Sobrien va2 = sysm->base + PAGE_SIZE; 256159764Sobrien npte = TLBLO_PA_TO_PFN(phys1) | 257133359Sobrien PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 258169942Sobrien pte = pmap_pte(kernel_pmap, va1); 259159764Sobrien *pte = npte; 260169942Sobrien npte = TLBLO_PA_TO_PFN(phys2) | 261133359Sobrien PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 262169942Sobrien pte = pmap_pte(kernel_pmap, va2); 263159764Sobrien *pte = npte; 264159764Sobrien sysm->valid1 = 1; 265133359Sobrien sysm->valid2 = 1; 266133359Sobrien return (va1); 267169942Sobrien} 268133359Sobrien 269169942Sobrienstatic __inline void 270169942Sobrienpmap_lmem_unmap(void) 271169942Sobrien{ 272169942Sobrien struct local_sysmaps *sysm; 273169942Sobrien pt_entry_t *pte; 274169942Sobrien int cpu; 275169942Sobrien 276169942Sobrien cpu = PCPU_GET(cpuid); 277169942Sobrien sysm = &sysmap_lmem[cpu]; 278169942Sobrien pte = pmap_pte(kernel_pmap, sysm->base); 279169942Sobrien *pte = PTE_G; 280169942Sobrien tlb_invalidate_address(kernel_pmap, sysm->base); 281159764Sobrien sysm->valid1 = 0; 282169942Sobrien if (sysm->valid2) { 283169942Sobrien pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); 284169942Sobrien *pte = PTE_G; 285159764Sobrien tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); 286169942Sobrien sysm->valid2 = 0; 287169942Sobrien } 288169942Sobrien intr_restore(sysm->saved_intr); 289159764Sobrien} 290159764Sobrien#else /* __mips_n64 */ 291169942Sobrien 292159764Sobrienstatic __inline void 293169942Sobrienpmap_alloc_lmem_map(void) 294169942Sobrien{ 295169942Sobrien} 296169942Sobrien 297169942Sobrienstatic __inline vm_offset_t 298169942Sobrienpmap_lmem_map1(vm_paddr_t phys) 299169942Sobrien{ 300133359Sobrien 301133359Sobrien return (0); 302133359Sobrien} 303133359Sobrien 304133359Sobrienstatic __inline vm_offset_t 305169942Sobrienpmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 306169942Sobrien{ 307169942Sobrien 308169942Sobrien return (0); 309169962Sobrien} 310169942Sobrien 311169942Sobrienstatic __inline vm_offset_t 312169942Sobrienpmap_lmem_unmap(void) 313169942Sobrien{ 314169942Sobrien 315169942Sobrien return (0); 316169942Sobrien} 317169942Sobrien#endif /* !__mips_n64 */ 318169942Sobrien 319169942Sobrien/* 320169942Sobrien * Page table entry lookup routines. 321169942Sobrien */ 322169942Sobrienstatic __inline pd_entry_t * 323169942Sobrienpmap_segmap(pmap_t pmap, vm_offset_t va) 324169942Sobrien{ 325169942Sobrien 326169942Sobrien return (&pmap->pm_segtab[pmap_seg_index(va)]); 327133359Sobrien} 328133359Sobrien 329133359Sobrien#ifdef __mips_n64 330133359Sobrienstatic __inline pd_entry_t * 331133359Sobrienpmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 332133359Sobrien{ 333133359Sobrien pd_entry_t *pde; 334133359Sobrien 335133359Sobrien pde = (pd_entry_t *)*pdpe; 336133359Sobrien return (&pde[pmap_pde_index(va)]); 337159764Sobrien} 338169962Sobrien 339133359Sobrienstatic __inline pd_entry_t * 340133359Sobrienpmap_pde(pmap_t pmap, vm_offset_t va) 341159764Sobrien{ 342133359Sobrien pd_entry_t *pdpe; 343159764Sobrien 344133359Sobrien pdpe = pmap_segmap(pmap, va); 345159764Sobrien if (pdpe == NULL || *pdpe == NULL) 346133359Sobrien return (NULL); 347133359Sobrien 348133359Sobrien return (pmap_pdpe_to_pde(pdpe, va)); 349133359Sobrien} 350133359Sobrien#else 351133359Sobrienstatic __inline pd_entry_t * 352133359Sobrienpmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 353133359Sobrien{ 354133359Sobrien 355133359Sobrien return (pdpe); 356133359Sobrien} 357133359Sobrien 358169962Sobrienstatic __inline 359133359Sobrienpd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va) 360133359Sobrien{ 361133359Sobrien 362133359Sobrien return (pmap_segmap(pmap, va)); 363133359Sobrien} 364133359Sobrien#endif 365133359Sobrien 366133359Sobrienstatic __inline pt_entry_t * 367133359Sobrienpmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) 368133359Sobrien{ 369133359Sobrien pt_entry_t *pte; 370133359Sobrien 371133359Sobrien pte = (pt_entry_t *)*pde; 372133359Sobrien return (&pte[pmap_pte_index(va)]); 373133359Sobrien} 374133359Sobrien 375133359Sobrienpt_entry_t * 376133359Sobrienpmap_pte(pmap_t pmap, vm_offset_t va) 377133359Sobrien{ 378133359Sobrien pd_entry_t *pde; 379133359Sobrien 380133359Sobrien pde = pmap_pde(pmap, va); 381133359Sobrien if (pde == NULL || *pde == NULL) 382133359Sobrien return (NULL); 383133359Sobrien 384133359Sobrien return (pmap_pde_to_pte(pde, va)); 385133359Sobrien} 386133359Sobrien 387vm_offset_t 388pmap_steal_memory(vm_size_t size) 389{ 390 vm_paddr_t bank_size, pa; 391 vm_offset_t va; 392 393 size = round_page(size); 394 bank_size = phys_avail[1] - phys_avail[0]; 395 while (size > bank_size) { 396 int i; 397 398 for (i = 0; phys_avail[i + 2]; i += 2) { 399 phys_avail[i] = phys_avail[i + 2]; 400 phys_avail[i + 1] = phys_avail[i + 3]; 401 } 402 phys_avail[i] = 0; 403 phys_avail[i + 1] = 0; 404 if (!phys_avail[0]) 405 panic("pmap_steal_memory: out of memory"); 406 bank_size = phys_avail[1] - phys_avail[0]; 407 } 408 409 pa = phys_avail[0]; 410 phys_avail[0] += size; 411 if (MIPS_DIRECT_MAPPABLE(pa) == 0) 412 panic("Out of memory below 512Meg?"); 413 va = MIPS_PHYS_TO_DIRECT(pa); 414 bzero((caddr_t)va, size); 415 return (va); 416} 417 418/* 419 * Bootstrap the system enough to run with virtual memory. This 420 * assumes that the phys_avail array has been initialized. 421 */ 422static void 423pmap_create_kernel_pagetable(void) 424{ 425 int i, j; 426 vm_offset_t ptaddr; 427 pt_entry_t *pte; 428#ifdef __mips_n64 429 pd_entry_t *pde; 430 vm_offset_t pdaddr; 431 int npt, npde; 432#endif 433 434 /* 435 * Allocate segment table for the kernel 436 */ 437 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE); 438 439 /* 440 * Allocate second level page tables for the kernel 441 */ 442#ifdef __mips_n64 443 npde = howmany(NKPT, NPDEPG); 444 pdaddr = pmap_steal_memory(PAGE_SIZE * npde); 445#endif 446 nkpt = NKPT; 447 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt); 448 449 /* 450 * The R[4-7]?00 stores only one copy of the Global bit in the 451 * translation lookaside buffer for each 2 page entry. Thus invalid 452 * entrys must have the Global bit set so when Entry LO and Entry HI 453 * G bits are anded together they will produce a global bit to store 454 * in the tlb. 455 */ 456 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++) 457 *pte = PTE_G; 458 459#ifdef __mips_n64 460 for (i = 0, npt = nkpt; npt > 0; i++) { 461 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE); 462 pde = (pd_entry_t *)kernel_segmap[i]; 463 464 for (j = 0; j < NPDEPG && npt > 0; j++, npt--) 465 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE); 466 } 467#else 468 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++) 469 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE)); 470#endif 471 472 PMAP_LOCK_INIT(kernel_pmap); 473 kernel_pmap->pm_segtab = kernel_segmap; 474 CPU_FILL(&kernel_pmap->pm_active); 475 TAILQ_INIT(&kernel_pmap->pm_pvlist); 476 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED; 477 kernel_pmap->pm_asid[0].gen = 0; 478 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE; 479} 480 481void 482pmap_bootstrap(void) 483{ 484 int i; 485 int need_local_mappings = 0; 486 487 /* Sort. */ 488again: 489 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 490 /* 491 * Keep the memory aligned on page boundary. 492 */ 493 phys_avail[i] = round_page(phys_avail[i]); 494 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 495 496 if (i < 2) 497 continue; 498 if (phys_avail[i - 2] > phys_avail[i]) { 499 vm_paddr_t ptemp[2]; 500 501 ptemp[0] = phys_avail[i + 0]; 502 ptemp[1] = phys_avail[i + 1]; 503 504 phys_avail[i + 0] = phys_avail[i - 2]; 505 phys_avail[i + 1] = phys_avail[i - 1]; 506 507 phys_avail[i - 2] = ptemp[0]; 508 phys_avail[i - 1] = ptemp[1]; 509 goto again; 510 } 511 } 512 513 /* 514 * In 32 bit, we may have memory which cannot be mapped directly. 515 * This memory will need temporary mapping before it can be 516 * accessed. 517 */ 518 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1)) 519 need_local_mappings = 1; 520 521 /* 522 * Copy the phys_avail[] array before we start stealing memory from it. 523 */ 524 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 525 physmem_desc[i] = phys_avail[i]; 526 physmem_desc[i + 1] = phys_avail[i + 1]; 527 } 528 529 Maxmem = atop(phys_avail[i - 1]); 530 531 if (bootverbose) { 532 printf("Physical memory chunk(s):\n"); 533 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 534 vm_paddr_t size; 535 536 size = phys_avail[i + 1] - phys_avail[i]; 537 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n", 538 (uintmax_t) phys_avail[i], 539 (uintmax_t) phys_avail[i + 1] - 1, 540 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE); 541 } 542 printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem)); 543 } 544 /* 545 * Steal the message buffer from the beginning of memory. 546 */ 547 msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize); 548 msgbufinit(msgbufp, msgbufsize); 549 550 /* 551 * Steal thread0 kstack. 552 */ 553 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT); 554 555 virtual_avail = VM_MIN_KERNEL_ADDRESS; 556 virtual_end = VM_MAX_KERNEL_ADDRESS; 557 558#ifdef SMP 559 /* 560 * Steal some virtual address space to map the pcpu area. 561 */ 562 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2); 563 pcpup = (struct pcpu *)virtual_avail; 564 virtual_avail += PAGE_SIZE * 2; 565 566 /* 567 * Initialize the wired TLB entry mapping the pcpu region for 568 * the BSP at 'pcpup'. Up until this point we were operating 569 * with the 'pcpup' for the BSP pointing to a virtual address 570 * in KSEG0 so there was no need for a TLB mapping. 571 */ 572 mips_pcpu_tlb_init(PCPU_ADDR(0)); 573 574 if (bootverbose) 575 printf("pcpu is available at virtual address %p.\n", pcpup); 576#endif 577 578 if (need_local_mappings) 579 pmap_alloc_lmem_map(); 580 pmap_create_kernel_pagetable(); 581 pmap_max_asid = VMNUM_PIDS; 582 mips_wr_entryhi(0); 583 mips_wr_pagemask(0); 584} 585 586/* 587 * Initialize a vm_page's machine-dependent fields. 588 */ 589void 590pmap_page_init(vm_page_t m) 591{ 592 593 TAILQ_INIT(&m->md.pv_list); 594 m->md.pv_list_count = 0; 595 m->md.pv_flags = 0; 596} 597 598/* 599 * Initialize the pmap module. 600 * Called by vm_init, to initialize any structures that the pmap 601 * system needs to map virtual memory. 602 * pmap_init has been enhanced to support in a fairly consistant 603 * way, discontiguous physical memory. 604 */ 605void 606pmap_init(void) 607{ 608 609 /* 610 * Initialize the address space (zone) for the pv entries. Set a 611 * high water mark so that the system can recover from excessive 612 * numbers of pv entries. 613 */ 614 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 615 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 616 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count; 617 pv_entry_high_water = 9 * (pv_entry_max / 10); 618 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 619} 620 621/*************************************************** 622 * Low level helper routines..... 623 ***************************************************/ 624 625#ifdef SMP 626static __inline void 627pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 628{ 629 int cpuid, cpu, self; 630 cpuset_t active_cpus; 631 632 sched_pin(); 633 if (is_kernel_pmap(pmap)) { 634 smp_rendezvous(NULL, fn, NULL, arg); 635 goto out; 636 } 637 /* Force ASID update on inactive CPUs */ 638 CPU_FOREACH(cpu) { 639 if (!CPU_ISSET(cpu, &pmap->pm_active)) 640 pmap->pm_asid[cpu].gen = 0; 641 } 642 cpuid = PCPU_GET(cpuid); 643 /* 644 * XXX: barrier/locking for active? 645 * 646 * Take a snapshot of active here, any further changes are ignored. 647 * tlb update/invalidate should be harmless on inactive CPUs 648 */ 649 active_cpus = pmap->pm_active; 650 self = CPU_ISSET(cpuid, &active_cpus); 651 CPU_CLR(cpuid, &active_cpus); 652 /* Optimize for the case where this cpu is the only active one */ 653 if (CPU_EMPTY(&active_cpus)) { 654 if (self) 655 fn(arg); 656 } else { 657 if (self) 658 CPU_SET(cpuid, &active_cpus); 659 smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg); 660 } 661out: 662 sched_unpin(); 663} 664#else /* !SMP */ 665static __inline void 666pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 667{ 668 int cpuid; 669 670 if (is_kernel_pmap(pmap)) { 671 fn(arg); 672 return; 673 } 674 cpuid = PCPU_GET(cpuid); 675 if (!CPU_ISSET(cpuid, &pmap->pm_active)) 676 pmap->pm_asid[cpuid].gen = 0; 677 else 678 fn(arg); 679} 680#endif /* SMP */ 681 682static void 683pmap_invalidate_all(pmap_t pmap) 684{ 685 686 pmap_call_on_active_cpus(pmap, 687 (void (*)(void *))tlb_invalidate_all_user, pmap); 688} 689 690struct pmap_invalidate_page_arg { 691 pmap_t pmap; 692 vm_offset_t va; 693}; 694 695static void 696pmap_invalidate_page_action(void *arg) 697{ 698 struct pmap_invalidate_page_arg *p = arg; 699 700 tlb_invalidate_address(p->pmap, p->va); 701} 702 703static void 704pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 705{ 706 struct pmap_invalidate_page_arg arg; 707 708 arg.pmap = pmap; 709 arg.va = va; 710 pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg); 711} 712 713struct pmap_update_page_arg { 714 pmap_t pmap; 715 vm_offset_t va; 716 pt_entry_t pte; 717}; 718 719static void 720pmap_update_page_action(void *arg) 721{ 722 struct pmap_update_page_arg *p = arg; 723 724 tlb_update(p->pmap, p->va, p->pte); 725} 726 727static void 728pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 729{ 730 struct pmap_update_page_arg arg; 731 732 arg.pmap = pmap; 733 arg.va = va; 734 arg.pte = pte; 735 pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg); 736} 737 738/* 739 * Routine: pmap_extract 740 * Function: 741 * Extract the physical page address associated 742 * with the given map/virtual_address pair. 743 */ 744vm_paddr_t 745pmap_extract(pmap_t pmap, vm_offset_t va) 746{ 747 pt_entry_t *pte; 748 vm_offset_t retval = 0; 749 750 PMAP_LOCK(pmap); 751 pte = pmap_pte(pmap, va); 752 if (pte) { 753 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK); 754 } 755 PMAP_UNLOCK(pmap); 756 return (retval); 757} 758 759/* 760 * Routine: pmap_extract_and_hold 761 * Function: 762 * Atomically extract and hold the physical page 763 * with the given pmap and virtual address pair 764 * if that mapping permits the given protection. 765 */ 766vm_page_t 767pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 768{ 769 pt_entry_t *ptep; 770 pt_entry_t pte; 771 vm_page_t m; 772 vm_paddr_t pa; 773 774 m = NULL; 775 pa = 0; 776 PMAP_LOCK(pmap); 777retry: 778 ptep = pmap_pte(pmap, va); 779 if ((ptep != NULL) && ((pte = *ptep) != 0) && 780 pte_test(&pte, PTE_V) && 781 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) { 782 if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa)) 783 goto retry; 784 785 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte)); 786 vm_page_hold(m); 787 } 788 PA_UNLOCK_COND(pa); 789 PMAP_UNLOCK(pmap); 790 return (m); 791} 792 793/*************************************************** 794 * Low level mapping routines..... 795 ***************************************************/ 796 797/* 798 * add a wired page to the kva 799 */ 800void 801pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr) 802{ 803 pt_entry_t *pte; 804 pt_entry_t opte, npte; 805 806#ifdef PMAP_DEBUG 807 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 808#endif 809 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr; 810 811 pte = pmap_pte(kernel_pmap, va); 812 opte = *pte; 813 *pte = npte; 814 if (pte_test(&opte, PTE_V) && opte != npte) 815 pmap_update_page(kernel_pmap, va, npte); 816} 817 818void 819pmap_kenter(vm_offset_t va, vm_paddr_t pa) 820{ 821 822 KASSERT(is_cacheable_mem(pa), 823 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa)); 824 825 pmap_kenter_attr(va, pa, PTE_C_CACHE); 826} 827 828/* 829 * remove a page from the kernel pagetables 830 */ 831 /* PMAP_INLINE */ void 832pmap_kremove(vm_offset_t va) 833{ 834 pt_entry_t *pte; 835 836 /* 837 * Write back all caches from the page being destroyed 838 */ 839 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 840 841 pte = pmap_pte(kernel_pmap, va); 842 *pte = PTE_G; 843 pmap_invalidate_page(kernel_pmap, va); 844} 845 846/* 847 * Used to map a range of physical addresses into kernel 848 * virtual address space. 849 * 850 * The value passed in '*virt' is a suggested virtual address for 851 * the mapping. Architectures which can support a direct-mapped 852 * physical to virtual region can return the appropriate address 853 * within that region, leaving '*virt' unchanged. Other 854 * architectures should map the pages starting at '*virt' and 855 * update '*virt' with the first usable address after the mapped 856 * region. 857 * 858 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 859 */ 860vm_offset_t 861pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 862{ 863 vm_offset_t va, sva; 864 865 if (MIPS_DIRECT_MAPPABLE(end - 1)) 866 return (MIPS_PHYS_TO_DIRECT(start)); 867 868 va = sva = *virt; 869 while (start < end) { 870 pmap_kenter(va, start); 871 va += PAGE_SIZE; 872 start += PAGE_SIZE; 873 } 874 *virt = va; 875 return (sva); 876} 877 878/* 879 * Add a list of wired pages to the kva 880 * this routine is only used for temporary 881 * kernel mappings that do not need to have 882 * page modification or references recorded. 883 * Note that old mappings are simply written 884 * over. The page *must* be wired. 885 */ 886void 887pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 888{ 889 int i; 890 vm_offset_t origva = va; 891 892 for (i = 0; i < count; i++) { 893 pmap_flush_pvcache(m[i]); 894 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 895 va += PAGE_SIZE; 896 } 897 898 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count); 899} 900 901/* 902 * this routine jerks page mappings from the 903 * kernel -- it is meant only for temporary mappings. 904 */ 905void 906pmap_qremove(vm_offset_t va, int count) 907{ 908 /* 909 * No need to wb/inv caches here, 910 * pmap_kremove will do it for us 911 */ 912 913 while (count-- > 0) { 914 pmap_kremove(va); 915 va += PAGE_SIZE; 916 } 917} 918 919/*************************************************** 920 * Page table page management routines..... 921 ***************************************************/ 922 923/* Revision 1.507 924 * 925 * Simplify the reference counting of page table pages. Specifically, use 926 * the page table page's wired count rather than its hold count to contain 927 * the reference count. 928 */ 929 930/* 931 * This routine unholds page table pages, and if the hold count 932 * drops to zero, then it decrements the wire count. 933 */ 934static PMAP_INLINE int 935pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 936{ 937 --m->wire_count; 938 if (m->wire_count == 0) 939 return (_pmap_unwire_pte_hold(pmap, va, m)); 940 else 941 return (0); 942} 943 944static int 945_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 946{ 947 pd_entry_t *pde; 948 949 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 950 /* 951 * unmap the page table page 952 */ 953#ifdef __mips_n64 954 if (m->pindex < NUPDE) 955 pde = pmap_pde(pmap, va); 956 else 957 pde = pmap_segmap(pmap, va); 958#else 959 pde = pmap_pde(pmap, va); 960#endif 961 *pde = 0; 962 pmap->pm_stats.resident_count--; 963 964#ifdef __mips_n64 965 if (m->pindex < NUPDE) { 966 pd_entry_t *pdp; 967 vm_page_t pdpg; 968 969 /* 970 * Recursively decrement next level pagetable refcount 971 */ 972 pdp = (pd_entry_t *)*pmap_segmap(pmap, va); 973 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp)); 974 pmap_unwire_pte_hold(pmap, va, pdpg); 975 } 976#endif 977 978 /* 979 * If the page is finally unwired, simply free it. 980 */ 981 vm_page_free_zero(m); 982 atomic_subtract_int(&cnt.v_wire_count, 1); 983 return (1); 984} 985 986/* 987 * After removing a page table entry, this routine is used to 988 * conditionally free the page, and manage the hold/wire counts. 989 */ 990static int 991pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 992{ 993 vm_page_t mpte; 994 995 if (va >= VM_MAXUSER_ADDRESS) 996 return (0); 997 KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0")); 998 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde)); 999 return (pmap_unwire_pte_hold(pmap, va, mpte)); 1000} 1001 1002void 1003pmap_pinit0(pmap_t pmap) 1004{ 1005 int i; 1006 1007 PMAP_LOCK_INIT(pmap); 1008 pmap->pm_segtab = kernel_segmap; 1009 CPU_ZERO(&pmap->pm_active); 1010 for (i = 0; i < MAXCPU; i++) { 1011 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1012 pmap->pm_asid[i].gen = 0; 1013 } 1014 PCPU_SET(curpmap, pmap); 1015 TAILQ_INIT(&pmap->pm_pvlist); 1016 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1017} 1018 1019void 1020pmap_grow_direct_page_cache() 1021{ 1022 1023#ifdef __mips_n64 1024 vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS); 1025#else 1026 vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS); 1027#endif 1028} 1029 1030vm_page_t 1031pmap_alloc_direct_page(unsigned int index, int req) 1032{ 1033 vm_page_t m; 1034 1035 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED | 1036 VM_ALLOC_ZERO); 1037 if (m == NULL) 1038 return (NULL); 1039 1040 if ((m->flags & PG_ZERO) == 0) 1041 pmap_zero_page(m); 1042 1043 m->pindex = index; 1044 return (m); 1045} 1046 1047/* 1048 * Initialize a preallocated and zeroed pmap structure, 1049 * such as one in a vmspace structure. 1050 */ 1051int 1052pmap_pinit(pmap_t pmap) 1053{ 1054 vm_offset_t ptdva; 1055 vm_page_t ptdpg; 1056 int i; 1057 1058 PMAP_LOCK_INIT(pmap); 1059 1060 /* 1061 * allocate the page directory page 1062 */ 1063 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL) 1064 pmap_grow_direct_page_cache(); 1065 1066 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg)); 1067 pmap->pm_segtab = (pd_entry_t *)ptdva; 1068 CPU_ZERO(&pmap->pm_active); 1069 for (i = 0; i < MAXCPU; i++) { 1070 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1071 pmap->pm_asid[i].gen = 0; 1072 } 1073 TAILQ_INIT(&pmap->pm_pvlist); 1074 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1075 1076 return (1); 1077} 1078 1079/* 1080 * this routine is called if the page table page is not 1081 * mapped correctly. 1082 */ 1083static vm_page_t 1084_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1085{ 1086 vm_offset_t pageva; 1087 vm_page_t m; 1088 1089 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1090 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1091 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1092 1093 /* 1094 * Find or fabricate a new pagetable page 1095 */ 1096 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) { 1097 if (flags & M_WAITOK) { 1098 PMAP_UNLOCK(pmap); 1099 vm_page_unlock_queues(); 1100 pmap_grow_direct_page_cache(); 1101 vm_page_lock_queues(); 1102 PMAP_LOCK(pmap); 1103 } 1104 1105 /* 1106 * Indicate the need to retry. While waiting, the page 1107 * table page may have been allocated. 1108 */ 1109 return (NULL); 1110 } 1111 1112 /* 1113 * Map the pagetable page into the process address space, if it 1114 * isn't already there. 1115 */ 1116 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1117 1118#ifdef __mips_n64 1119 if (ptepindex >= NUPDE) { 1120 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva; 1121 } else { 1122 pd_entry_t *pdep, *pde; 1123 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT); 1124 int pdeindex = ptepindex & (NPDEPG - 1); 1125 vm_page_t pg; 1126 1127 pdep = &pmap->pm_segtab[segindex]; 1128 if (*pdep == NULL) { 1129 /* recurse for allocating page dir */ 1130 if (_pmap_allocpte(pmap, NUPDE + segindex, 1131 flags) == NULL) { 1132 /* alloc failed, release current */ 1133 --m->wire_count; 1134 atomic_subtract_int(&cnt.v_wire_count, 1); 1135 vm_page_free_zero(m); 1136 return (NULL); 1137 } 1138 } else { 1139 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep)); 1140 pg->wire_count++; 1141 } 1142 /* Next level entry */ 1143 pde = (pd_entry_t *)*pdep; 1144 pde[pdeindex] = (pd_entry_t)pageva; 1145 } 1146#else 1147 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva; 1148#endif 1149 pmap->pm_stats.resident_count++; 1150 return (m); 1151} 1152 1153static vm_page_t 1154pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1155{ 1156 unsigned ptepindex; 1157 pd_entry_t *pde; 1158 vm_page_t m; 1159 1160 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1161 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1162 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1163 1164 /* 1165 * Calculate pagetable page index 1166 */ 1167 ptepindex = pmap_pde_pindex(va); 1168retry: 1169 /* 1170 * Get the page directory entry 1171 */ 1172 pde = pmap_pde(pmap, va); 1173 1174 /* 1175 * If the page table page is mapped, we just increment the hold 1176 * count, and activate it. 1177 */ 1178 if (pde != NULL && *pde != NULL) { 1179 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde)); 1180 m->wire_count++; 1181 } else { 1182 /* 1183 * Here if the pte page isn't mapped, or if it has been 1184 * deallocated. 1185 */ 1186 m = _pmap_allocpte(pmap, ptepindex, flags); 1187 if (m == NULL && (flags & M_WAITOK)) 1188 goto retry; 1189 } 1190 return (m); 1191} 1192 1193 1194/*************************************************** 1195* Pmap allocation/deallocation routines. 1196 ***************************************************/ 1197/* 1198 * Revision 1.397 1199 * - Merged pmap_release and pmap_release_free_page. When pmap_release is 1200 * called only the page directory page(s) can be left in the pmap pte 1201 * object, since all page table pages will have been freed by 1202 * pmap_remove_pages and pmap_remove. In addition, there can only be one 1203 * reference to the pmap and the page directory is wired, so the page(s) 1204 * can never be busy. So all there is to do is clear the magic mappings 1205 * from the page directory and free the page(s). 1206 */ 1207 1208 1209/* 1210 * Release any resources held by the given physical map. 1211 * Called when a pmap initialized by pmap_pinit is being released. 1212 * Should only be called if the map contains no valid mappings. 1213 */ 1214void 1215pmap_release(pmap_t pmap) 1216{ 1217 vm_offset_t ptdva; 1218 vm_page_t ptdpg; 1219 1220 KASSERT(pmap->pm_stats.resident_count == 0, 1221 ("pmap_release: pmap resident count %ld != 0", 1222 pmap->pm_stats.resident_count)); 1223 1224 ptdva = (vm_offset_t)pmap->pm_segtab; 1225 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva)); 1226 1227 ptdpg->wire_count--; 1228 atomic_subtract_int(&cnt.v_wire_count, 1); 1229 vm_page_free_zero(ptdpg); 1230 PMAP_LOCK_DESTROY(pmap); 1231} 1232 1233/* 1234 * grow the number of kernel page table entries, if needed 1235 */ 1236void 1237pmap_growkernel(vm_offset_t addr) 1238{ 1239 vm_page_t nkpg; 1240 pd_entry_t *pde, *pdpe; 1241 pt_entry_t *pte; 1242 int i; 1243 1244 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1245 addr = roundup2(addr, NBSEG); 1246 if (addr - 1 >= kernel_map->max_offset) 1247 addr = kernel_map->max_offset; 1248 while (kernel_vm_end < addr) { 1249 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end); 1250#ifdef __mips_n64 1251 if (*pdpe == 0) { 1252 /* new intermediate page table entry */ 1253 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1254 if (nkpg == NULL) 1255 panic("pmap_growkernel: no memory to grow kernel"); 1256 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1257 continue; /* try again */ 1258 } 1259#endif 1260 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); 1261 if (*pde != 0) { 1262 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1263 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1264 kernel_vm_end = kernel_map->max_offset; 1265 break; 1266 } 1267 continue; 1268 } 1269 1270 /* 1271 * This index is bogus, but out of the way 1272 */ 1273 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1274 if (!nkpg) 1275 panic("pmap_growkernel: no memory to grow kernel"); 1276 nkpt++; 1277 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1278 1279 /* 1280 * The R[4-7]?00 stores only one copy of the Global bit in 1281 * the translation lookaside buffer for each 2 page entry. 1282 * Thus invalid entrys must have the Global bit set so when 1283 * Entry LO and Entry HI G bits are anded together they will 1284 * produce a global bit to store in the tlb. 1285 */ 1286 pte = (pt_entry_t *)*pde; 1287 for (i = 0; i < NPTEPG; i++) 1288 pte[i] = PTE_G; 1289 1290 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1291 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1292 kernel_vm_end = kernel_map->max_offset; 1293 break; 1294 } 1295 } 1296} 1297 1298/*************************************************** 1299* page management routines. 1300 ***************************************************/ 1301 1302/* 1303 * free the pv_entry back to the free list 1304 */ 1305static PMAP_INLINE void 1306free_pv_entry(pv_entry_t pv) 1307{ 1308 1309 pv_entry_count--; 1310 uma_zfree(pvzone, pv); 1311} 1312 1313/* 1314 * get a new pv_entry, allocating a block from the system 1315 * when needed. 1316 * the memory allocation is performed bypassing the malloc code 1317 * because of the possibility of allocations at interrupt time. 1318 */ 1319static pv_entry_t 1320get_pv_entry(pmap_t locked_pmap) 1321{ 1322 static const struct timeval printinterval = { 60, 0 }; 1323 static struct timeval lastprint; 1324 struct vpgqueues *vpq; 1325 pd_entry_t *pde; 1326 pt_entry_t *pte, oldpte; 1327 pmap_t pmap; 1328 pv_entry_t allocated_pv, next_pv, pv; 1329 vm_offset_t va; 1330 vm_page_t m; 1331 1332 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1333 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1334 allocated_pv = uma_zalloc(pvzone, M_NOWAIT); 1335 if (allocated_pv != NULL) { 1336 pv_entry_count++; 1337 if (pv_entry_count > pv_entry_high_water) 1338 pagedaemon_wakeup(); 1339 else 1340 return (allocated_pv); 1341 } 1342 /* 1343 * Reclaim pv entries: At first, destroy mappings to inactive 1344 * pages. After that, if a pv entry is still needed, destroy 1345 * mappings to active pages. 1346 */ 1347 if (ratecheck(&lastprint, &printinterval)) 1348 printf("Approaching the limit on PV entries, " 1349 "increase the vm.pmap.shpgperproc tunable.\n"); 1350 vpq = &vm_page_queues[PQ_INACTIVE]; 1351retry: 1352 TAILQ_FOREACH(m, &vpq->pl, pageq) { 1353 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy) 1354 continue; 1355 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 1356 va = pv->pv_va; 1357 pmap = pv->pv_pmap; 1358 /* Avoid deadlock and lock recursion. */ 1359 if (pmap > locked_pmap) 1360 PMAP_LOCK(pmap); 1361 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 1362 continue; 1363 pmap->pm_stats.resident_count--; 1364 pde = pmap_pde(pmap, va); 1365 KASSERT(pde != NULL && *pde != 0, 1366 ("get_pv_entry: pde")); 1367 pte = pmap_pde_to_pte(pde, va); 1368 oldpte = *pte; 1369 if (is_kernel_pmap(pmap)) 1370 *pte = PTE_G; 1371 else 1372 *pte = 0; 1373 KASSERT(!pte_test(&oldpte, PTE_W), 1374 ("wired pte for unwired page")); 1375 if (m->md.pv_flags & PV_TABLE_REF) 1376 vm_page_aflag_set(m, PGA_REFERENCED); 1377 if (pte_test(&oldpte, PTE_D)) 1378 vm_page_dirty(m); 1379 pmap_invalidate_page(pmap, va); 1380 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1381 m->md.pv_list_count--; 1382 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1383 pmap_unuse_pt(pmap, va, *pde); 1384 if (pmap != locked_pmap) 1385 PMAP_UNLOCK(pmap); 1386 if (allocated_pv == NULL) 1387 allocated_pv = pv; 1388 else 1389 free_pv_entry(pv); 1390 } 1391 if (TAILQ_EMPTY(&m->md.pv_list)) { 1392 vm_page_aflag_clear(m, PGA_WRITEABLE); 1393 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1394 } 1395 } 1396 if (allocated_pv == NULL) { 1397 if (vpq == &vm_page_queues[PQ_INACTIVE]) { 1398 vpq = &vm_page_queues[PQ_ACTIVE]; 1399 goto retry; 1400 } 1401 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); 1402 } 1403 return (allocated_pv); 1404} 1405 1406/* 1407 * Revision 1.370 1408 * 1409 * Move pmap_collect() out of the machine-dependent code, rename it 1410 * to reflect its new location, and add page queue and flag locking. 1411 * 1412 * Notes: (1) alpha, i386, and ia64 had identical implementations 1413 * of pmap_collect() in terms of machine-independent interfaces; 1414 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO. 1415 * 1416 * MIPS implementation was identical to alpha [Junos 8.2] 1417 */ 1418 1419/* 1420 * If it is the first entry on the list, it is actually 1421 * in the header and we must copy the following entry up 1422 * to the header. Otherwise we must search the list for 1423 * the entry. In either case we free the now unused entry. 1424 */ 1425 1426static pv_entry_t 1427pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1428{ 1429 pv_entry_t pv; 1430 1431 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1432 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1433 if (pvh->pv_list_count < pmap->pm_stats.resident_count) { 1434 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 1435 if (pmap == pv->pv_pmap && va == pv->pv_va) 1436 break; 1437 } 1438 } else { 1439 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { 1440 if (va == pv->pv_va) 1441 break; 1442 } 1443 } 1444 if (pv != NULL) { 1445 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 1446 pvh->pv_list_count--; 1447 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1448 } 1449 return (pv); 1450} 1451 1452static void 1453pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1454{ 1455 pv_entry_t pv; 1456 1457 pv = pmap_pvh_remove(pvh, pmap, va); 1458 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx", 1459 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)), 1460 (u_long)va)); 1461 free_pv_entry(pv); 1462} 1463 1464static void 1465pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1466{ 1467 1468 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1469 pmap_pvh_free(&m->md, pmap, va); 1470 if (TAILQ_EMPTY(&m->md.pv_list)) 1471 vm_page_aflag_clear(m, PGA_WRITEABLE); 1472} 1473 1474/* 1475 * Conditionally create a pv entry. 1476 */ 1477static boolean_t 1478pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va, 1479 vm_page_t m) 1480{ 1481 pv_entry_t pv; 1482 1483 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1484 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1485 if (pv_entry_count < pv_entry_high_water && 1486 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { 1487 pv_entry_count++; 1488 pv->pv_va = va; 1489 pv->pv_pmap = pmap; 1490 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1491 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1492 m->md.pv_list_count++; 1493 return (TRUE); 1494 } else 1495 return (FALSE); 1496} 1497 1498/* 1499 * pmap_remove_pte: do the things to unmap a page in a process 1500 */ 1501static int 1502pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va, 1503 pd_entry_t pde) 1504{ 1505 pt_entry_t oldpte; 1506 vm_page_t m; 1507 vm_paddr_t pa; 1508 1509 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1510 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1511 1512 oldpte = *ptq; 1513 if (is_kernel_pmap(pmap)) 1514 *ptq = PTE_G; 1515 else 1516 *ptq = 0; 1517 1518 if (pte_test(&oldpte, PTE_W)) 1519 pmap->pm_stats.wired_count -= 1; 1520 1521 pmap->pm_stats.resident_count -= 1; 1522 pa = TLBLO_PTE_TO_PA(oldpte); 1523 1524 if (page_is_managed(pa)) { 1525 m = PHYS_TO_VM_PAGE(pa); 1526 if (pte_test(&oldpte, PTE_D)) { 1527 KASSERT(!pte_test(&oldpte, PTE_RO), 1528 ("%s: modified page not writable: va: %p, pte: %#jx", 1529 __func__, (void *)va, (uintmax_t)oldpte)); 1530 vm_page_dirty(m); 1531 } 1532 if (m->md.pv_flags & PV_TABLE_REF) 1533 vm_page_aflag_set(m, PGA_REFERENCED); 1534 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1535 1536 pmap_remove_entry(pmap, m, va); 1537 } 1538 return (pmap_unuse_pt(pmap, va, pde)); 1539} 1540 1541/* 1542 * Remove a single page from a process address space 1543 */ 1544static void 1545pmap_remove_page(struct pmap *pmap, vm_offset_t va) 1546{ 1547 pd_entry_t *pde; 1548 pt_entry_t *ptq; 1549 1550 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1551 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1552 pde = pmap_pde(pmap, va); 1553 if (pde == NULL || *pde == 0) 1554 return; 1555 ptq = pmap_pde_to_pte(pde, va); 1556 1557 /* 1558 * if there is no pte for this address, just skip it!!! 1559 */ 1560 if (!pte_test(ptq, PTE_V)) { 1561 return; 1562 } 1563 1564 /* 1565 * Write back all caches from the page being destroyed 1566 */ 1567 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 1568 1569 /* 1570 * get a local va for mappings for this pmap. 1571 */ 1572 (void)pmap_remove_pte(pmap, ptq, va, *pde); 1573 pmap_invalidate_page(pmap, va); 1574 1575 return; 1576} 1577 1578/* 1579 * Remove the given range of addresses from the specified map. 1580 * 1581 * It is assumed that the start and end are properly 1582 * rounded to the page size. 1583 */ 1584void 1585pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 1586{ 1587 vm_offset_t va_next; 1588 pd_entry_t *pde, *pdpe; 1589 pt_entry_t *pte; 1590 1591 if (pmap->pm_stats.resident_count == 0) 1592 return; 1593 1594 vm_page_lock_queues(); 1595 PMAP_LOCK(pmap); 1596 1597 /* 1598 * special handling of removing one page. a very common operation 1599 * and easy to short circuit some code. 1600 */ 1601 if ((sva + PAGE_SIZE) == eva) { 1602 pmap_remove_page(pmap, sva); 1603 goto out; 1604 } 1605 for (; sva < eva; sva = va_next) { 1606 pdpe = pmap_segmap(pmap, sva); 1607#ifdef __mips_n64 1608 if (*pdpe == 0) { 1609 va_next = (sva + NBSEG) & ~SEGMASK; 1610 if (va_next < sva) 1611 va_next = eva; 1612 continue; 1613 } 1614#endif 1615 va_next = (sva + NBPDR) & ~PDRMASK; 1616 if (va_next < sva) 1617 va_next = eva; 1618 1619 pde = pmap_pdpe_to_pde(pdpe, sva); 1620 if (*pde == 0) 1621 continue; 1622 if (va_next > eva) 1623 va_next = eva; 1624 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; 1625 pte++, sva += PAGE_SIZE) { 1626 pmap_remove_page(pmap, sva); 1627 } 1628 } 1629out: 1630 vm_page_unlock_queues(); 1631 PMAP_UNLOCK(pmap); 1632} 1633 1634/* 1635 * Routine: pmap_remove_all 1636 * Function: 1637 * Removes this physical page from 1638 * all physical maps in which it resides. 1639 * Reflects back modify bits to the pager. 1640 * 1641 * Notes: 1642 * Original versions of this routine were very 1643 * inefficient because they iteratively called 1644 * pmap_remove (slow...) 1645 */ 1646 1647void 1648pmap_remove_all(vm_page_t m) 1649{ 1650 pv_entry_t pv; 1651 pd_entry_t *pde; 1652 pt_entry_t *pte, tpte; 1653 1654 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1655 ("pmap_remove_all: page %p is not managed", m)); 1656 vm_page_lock_queues(); 1657 1658 if (m->md.pv_flags & PV_TABLE_REF) 1659 vm_page_aflag_set(m, PGA_REFERENCED); 1660 1661 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1662 PMAP_LOCK(pv->pv_pmap); 1663 1664 /* 1665 * If it's last mapping writeback all caches from 1666 * the page being destroyed 1667 */ 1668 if (m->md.pv_list_count == 1) 1669 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 1670 1671 pv->pv_pmap->pm_stats.resident_count--; 1672 1673 pde = pmap_pde(pv->pv_pmap, pv->pv_va); 1674 KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde")); 1675 pte = pmap_pde_to_pte(pde, pv->pv_va); 1676 1677 tpte = *pte; 1678 if (is_kernel_pmap(pv->pv_pmap)) 1679 *pte = PTE_G; 1680 else 1681 *pte = 0; 1682 1683 if (pte_test(&tpte, PTE_W)) 1684 pv->pv_pmap->pm_stats.wired_count--; 1685 1686 /* 1687 * Update the vm_page_t clean and reference bits. 1688 */ 1689 if (pte_test(&tpte, PTE_D)) { 1690 KASSERT(!pte_test(&tpte, PTE_RO), 1691 ("%s: modified page not writable: va: %p, pte: %#jx", 1692 __func__, (void *)pv->pv_va, (uintmax_t)tpte)); 1693 vm_page_dirty(m); 1694 } 1695 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1696 1697 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 1698 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1699 m->md.pv_list_count--; 1700 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, *pde); 1701 PMAP_UNLOCK(pv->pv_pmap); 1702 free_pv_entry(pv); 1703 } 1704 1705 vm_page_aflag_clear(m, PGA_WRITEABLE); 1706 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1707 vm_page_unlock_queues(); 1708} 1709 1710/* 1711 * Set the physical protection on the 1712 * specified range of this map as requested. 1713 */ 1714void 1715pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1716{ 1717 pt_entry_t *pte; 1718 pd_entry_t *pde, *pdpe; 1719 vm_offset_t va_next; 1720 1721 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1722 pmap_remove(pmap, sva, eva); 1723 return; 1724 } 1725 if (prot & VM_PROT_WRITE) 1726 return; 1727 1728 vm_page_lock_queues(); 1729 PMAP_LOCK(pmap); 1730 for (; sva < eva; sva = va_next) { 1731 pt_entry_t pbits; 1732 vm_page_t m; 1733 vm_paddr_t pa; 1734 1735 pdpe = pmap_segmap(pmap, sva); 1736#ifdef __mips_n64 1737 if (*pdpe == 0) { 1738 va_next = (sva + NBSEG) & ~SEGMASK; 1739 if (va_next < sva) 1740 va_next = eva; 1741 continue; 1742 } 1743#endif 1744 va_next = (sva + NBPDR) & ~PDRMASK; 1745 if (va_next < sva) 1746 va_next = eva; 1747 1748 pde = pmap_pdpe_to_pde(pdpe, sva); 1749 if (pde == NULL || *pde == NULL) 1750 continue; 1751 if (va_next > eva) 1752 va_next = eva; 1753 1754 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 1755 sva += PAGE_SIZE) { 1756 1757 /* Skip invalid PTEs */ 1758 if (!pte_test(pte, PTE_V)) 1759 continue; 1760 pbits = *pte; 1761 pa = TLBLO_PTE_TO_PA(pbits); 1762 if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) { 1763 m = PHYS_TO_VM_PAGE(pa); 1764 vm_page_dirty(m); 1765 m->md.pv_flags &= ~PV_TABLE_MOD; 1766 } 1767 pte_clear(&pbits, PTE_D); 1768 pte_set(&pbits, PTE_RO); 1769 1770 if (pbits != *pte) { 1771 *pte = pbits; 1772 pmap_update_page(pmap, sva, pbits); 1773 } 1774 } 1775 } 1776 vm_page_unlock_queues(); 1777 PMAP_UNLOCK(pmap); 1778} 1779 1780/* 1781 * Insert the given physical page (p) at 1782 * the specified virtual address (v) in the 1783 * target physical map with the protection requested. 1784 * 1785 * If specified, the page will be wired down, meaning 1786 * that the related pte can not be reclaimed. 1787 * 1788 * NB: This is the only routine which MAY NOT lazy-evaluate 1789 * or lose information. That is, this routine must actually 1790 * insert this page into the given map NOW. 1791 */ 1792void 1793pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 1794 vm_prot_t prot, boolean_t wired) 1795{ 1796 vm_paddr_t pa, opa; 1797 pt_entry_t *pte; 1798 pt_entry_t origpte, newpte; 1799 pv_entry_t pv; 1800 vm_page_t mpte, om; 1801 pt_entry_t rw = 0; 1802 1803 va &= ~PAGE_MASK; 1804 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 1805 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, 1806 ("pmap_enter: page %p is not busy", m)); 1807 1808 mpte = NULL; 1809 1810 vm_page_lock_queues(); 1811 PMAP_LOCK(pmap); 1812 1813 /* 1814 * In the case that a page table page is not resident, we are 1815 * creating it here. 1816 */ 1817 if (va < VM_MAXUSER_ADDRESS) { 1818 mpte = pmap_allocpte(pmap, va, M_WAITOK); 1819 } 1820 pte = pmap_pte(pmap, va); 1821 1822 /* 1823 * Page Directory table entry not valid, we need a new PT page 1824 */ 1825 if (pte == NULL) { 1826 panic("pmap_enter: invalid page directory, pdir=%p, va=%p", 1827 (void *)pmap->pm_segtab, (void *)va); 1828 } 1829 pa = VM_PAGE_TO_PHYS(m); 1830 om = NULL; 1831 origpte = *pte; 1832 opa = TLBLO_PTE_TO_PA(origpte); 1833 1834 /* 1835 * Mapping has not changed, must be protection or wiring change. 1836 */ 1837 if (pte_test(&origpte, PTE_V) && opa == pa) { 1838 /* 1839 * Wiring change, just update stats. We don't worry about 1840 * wiring PT pages as they remain resident as long as there 1841 * are valid mappings in them. Hence, if a user page is 1842 * wired, the PT page will be also. 1843 */ 1844 if (wired && !pte_test(&origpte, PTE_W)) 1845 pmap->pm_stats.wired_count++; 1846 else if (!wired && pte_test(&origpte, PTE_W)) 1847 pmap->pm_stats.wired_count--; 1848 1849 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), 1850 ("%s: modified page not writable: va: %p, pte: %#jx", 1851 __func__, (void *)va, (uintmax_t)origpte)); 1852 1853 /* 1854 * Remove extra pte reference 1855 */ 1856 if (mpte) 1857 mpte->wire_count--; 1858 1859 if (page_is_managed(opa)) { 1860 om = m; 1861 } 1862 goto validate; 1863 } 1864 1865 pv = NULL; 1866 1867 /* 1868 * Mapping has changed, invalidate old range and fall through to 1869 * handle validating new mapping. 1870 */ 1871 if (opa) { 1872 if (pte_test(&origpte, PTE_W)) 1873 pmap->pm_stats.wired_count--; 1874 1875 if (page_is_managed(opa)) { 1876 om = PHYS_TO_VM_PAGE(opa); 1877 pv = pmap_pvh_remove(&om->md, pmap, va); 1878 } 1879 if (mpte != NULL) { 1880 mpte->wire_count--; 1881 KASSERT(mpte->wire_count > 0, 1882 ("pmap_enter: missing reference to page table page," 1883 " va: %p", (void *)va)); 1884 } 1885 } else 1886 pmap->pm_stats.resident_count++; 1887 1888 /* 1889 * Enter on the PV list if part of our managed memory. Note that we 1890 * raise IPL while manipulating pv_table since pmap_enter can be 1891 * called at interrupt time. 1892 */ 1893 if ((m->oflags & VPO_UNMANAGED) == 0) { 1894 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 1895 ("pmap_enter: managed mapping within the clean submap")); 1896 if (pv == NULL) 1897 pv = get_pv_entry(pmap); 1898 pv->pv_va = va; 1899 pv->pv_pmap = pmap; 1900 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1901 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1902 m->md.pv_list_count++; 1903 } else if (pv != NULL) 1904 free_pv_entry(pv); 1905 1906 /* 1907 * Increment counters 1908 */ 1909 if (wired) 1910 pmap->pm_stats.wired_count++; 1911 1912validate: 1913 if ((access & VM_PROT_WRITE) != 0) 1914 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF; 1915 rw = init_pte_prot(va, m, prot); 1916 1917#ifdef PMAP_DEBUG 1918 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 1919#endif 1920 /* 1921 * Now validate mapping with desired protection/wiring. 1922 */ 1923 newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V; 1924 1925 if (is_cacheable_mem(pa)) 1926 newpte |= PTE_C_CACHE; 1927 else 1928 newpte |= PTE_C_UNCACHED; 1929 1930 if (wired) 1931 newpte |= PTE_W; 1932 1933 if (is_kernel_pmap(pmap)) 1934 newpte |= PTE_G; 1935 1936 /* 1937 * if the mapping or permission bits are different, we need to 1938 * update the pte. 1939 */ 1940 if (origpte != newpte) { 1941 if (pte_test(&origpte, PTE_V)) { 1942 *pte = newpte; 1943 if (page_is_managed(opa) && (opa != pa)) { 1944 if (om->md.pv_flags & PV_TABLE_REF) 1945 vm_page_aflag_set(om, PGA_REFERENCED); 1946 om->md.pv_flags &= 1947 ~(PV_TABLE_REF | PV_TABLE_MOD); 1948 } 1949 if (pte_test(&origpte, PTE_D)) { 1950 KASSERT(!pte_test(&origpte, PTE_RO), 1951 ("pmap_enter: modified page not writable:" 1952 " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte)); 1953 if (page_is_managed(opa)) 1954 vm_page_dirty(om); 1955 } 1956 if (page_is_managed(opa) && 1957 TAILQ_EMPTY(&om->md.pv_list)) 1958 vm_page_aflag_clear(om, PGA_WRITEABLE); 1959 } else { 1960 *pte = newpte; 1961 } 1962 } 1963 pmap_update_page(pmap, va, newpte); 1964 1965 /* 1966 * Sync I & D caches for executable pages. Do this only if the 1967 * target pmap belongs to the current process. Otherwise, an 1968 * unresolvable TLB miss may occur. 1969 */ 1970 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && 1971 (prot & VM_PROT_EXECUTE)) { 1972 mips_icache_sync_range(va, PAGE_SIZE); 1973 mips_dcache_wbinv_range(va, PAGE_SIZE); 1974 } 1975 vm_page_unlock_queues(); 1976 PMAP_UNLOCK(pmap); 1977} 1978 1979/* 1980 * this code makes some *MAJOR* assumptions: 1981 * 1. Current pmap & pmap exists. 1982 * 2. Not wired. 1983 * 3. Read access. 1984 * 4. No page table pages. 1985 * but is *MUCH* faster than pmap_enter... 1986 */ 1987 1988void 1989pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 1990{ 1991 1992 vm_page_lock_queues(); 1993 PMAP_LOCK(pmap); 1994 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 1995 vm_page_unlock_queues(); 1996 PMAP_UNLOCK(pmap); 1997} 1998 1999static vm_page_t 2000pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 2001 vm_prot_t prot, vm_page_t mpte) 2002{ 2003 pt_entry_t *pte; 2004 vm_paddr_t pa; 2005 2006 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2007 (m->oflags & VPO_UNMANAGED) != 0, 2008 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2009 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2010 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2011 2012 /* 2013 * In the case that a page table page is not resident, we are 2014 * creating it here. 2015 */ 2016 if (va < VM_MAXUSER_ADDRESS) { 2017 pd_entry_t *pde; 2018 unsigned ptepindex; 2019 2020 /* 2021 * Calculate pagetable page index 2022 */ 2023 ptepindex = pmap_pde_pindex(va); 2024 if (mpte && (mpte->pindex == ptepindex)) { 2025 mpte->wire_count++; 2026 } else { 2027 /* 2028 * Get the page directory entry 2029 */ 2030 pde = pmap_pde(pmap, va); 2031 2032 /* 2033 * If the page table page is mapped, we just 2034 * increment the hold count, and activate it. 2035 */ 2036 if (pde && *pde != 0) { 2037 mpte = PHYS_TO_VM_PAGE( 2038 MIPS_DIRECT_TO_PHYS(*pde)); 2039 mpte->wire_count++; 2040 } else { 2041 mpte = _pmap_allocpte(pmap, ptepindex, 2042 M_NOWAIT); 2043 if (mpte == NULL) 2044 return (mpte); 2045 } 2046 } 2047 } else { 2048 mpte = NULL; 2049 } 2050 2051 pte = pmap_pte(pmap, va); 2052 if (pte_test(pte, PTE_V)) { 2053 if (mpte != NULL) { 2054 mpte->wire_count--; 2055 mpte = NULL; 2056 } 2057 return (mpte); 2058 } 2059 2060 /* 2061 * Enter on the PV list if part of our managed memory. 2062 */ 2063 if ((m->oflags & VPO_UNMANAGED) == 0 && 2064 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) { 2065 if (mpte != NULL) { 2066 pmap_unwire_pte_hold(pmap, va, mpte); 2067 mpte = NULL; 2068 } 2069 return (mpte); 2070 } 2071 2072 /* 2073 * Increment counters 2074 */ 2075 pmap->pm_stats.resident_count++; 2076 2077 pa = VM_PAGE_TO_PHYS(m); 2078 2079 /* 2080 * Now validate mapping with RO protection 2081 */ 2082 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V; 2083 2084 if (is_cacheable_mem(pa)) 2085 *pte |= PTE_C_CACHE; 2086 else 2087 *pte |= PTE_C_UNCACHED; 2088 2089 if (is_kernel_pmap(pmap)) 2090 *pte |= PTE_G; 2091 else { 2092 *pte |= PTE_RO; 2093 /* 2094 * Sync I & D caches. Do this only if the target pmap 2095 * belongs to the current process. Otherwise, an 2096 * unresolvable TLB miss may occur. */ 2097 if (pmap == &curproc->p_vmspace->vm_pmap) { 2098 va &= ~PAGE_MASK; 2099 mips_icache_sync_range(va, PAGE_SIZE); 2100 mips_dcache_wbinv_range(va, PAGE_SIZE); 2101 } 2102 } 2103 return (mpte); 2104} 2105 2106/* 2107 * Make a temporary mapping for a physical address. This is only intended 2108 * to be used for panic dumps. 2109 * 2110 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2111 */ 2112void * 2113pmap_kenter_temporary(vm_paddr_t pa, int i) 2114{ 2115 vm_offset_t va; 2116 2117 if (i != 0) 2118 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n", 2119 __func__); 2120 2121 if (MIPS_DIRECT_MAPPABLE(pa)) { 2122 va = MIPS_PHYS_TO_DIRECT(pa); 2123 } else { 2124#ifndef __mips_n64 /* XXX : to be converted to new style */ 2125 int cpu; 2126 register_t intr; 2127 struct local_sysmaps *sysm; 2128 pt_entry_t *pte, npte; 2129 2130 /* If this is used other than for dumps, we may need to leave 2131 * interrupts disasbled on return. If crash dumps don't work when 2132 * we get to this point, we might want to consider this (leaving things 2133 * disabled as a starting point ;-) 2134 */ 2135 intr = intr_disable(); 2136 cpu = PCPU_GET(cpuid); 2137 sysm = &sysmap_lmem[cpu]; 2138 /* Since this is for the debugger, no locks or any other fun */ 2139 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 2140 pte = pmap_pte(kernel_pmap, sysm->base); 2141 *pte = npte; 2142 sysm->valid1 = 1; 2143 pmap_update_page(kernel_pmap, sysm->base, npte); 2144 va = sysm->base; 2145 intr_restore(intr); 2146#endif 2147 } 2148 return ((void *)va); 2149} 2150 2151void 2152pmap_kenter_temporary_free(vm_paddr_t pa) 2153{ 2154#ifndef __mips_n64 /* XXX : to be converted to new style */ 2155 int cpu; 2156 register_t intr; 2157 struct local_sysmaps *sysm; 2158#endif 2159 2160 if (MIPS_DIRECT_MAPPABLE(pa)) { 2161 /* nothing to do for this case */ 2162 return; 2163 } 2164#ifndef __mips_n64 /* XXX : to be converted to new style */ 2165 cpu = PCPU_GET(cpuid); 2166 sysm = &sysmap_lmem[cpu]; 2167 if (sysm->valid1) { 2168 pt_entry_t *pte; 2169 2170 intr = intr_disable(); 2171 pte = pmap_pte(kernel_pmap, sysm->base); 2172 *pte = PTE_G; 2173 pmap_invalidate_page(kernel_pmap, sysm->base); 2174 intr_restore(intr); 2175 sysm->valid1 = 0; 2176 } 2177#endif 2178} 2179 2180/* 2181 * Moved the code to Machine Independent 2182 * vm_map_pmap_enter() 2183 */ 2184 2185/* 2186 * Maps a sequence of resident pages belonging to the same object. 2187 * The sequence begins with the given page m_start. This page is 2188 * mapped at the given virtual address start. Each subsequent page is 2189 * mapped at a virtual address that is offset from start by the same 2190 * amount as the page is offset from m_start within the object. The 2191 * last page in the sequence is the page with the largest offset from 2192 * m_start that can be mapped at a virtual address less than the given 2193 * virtual address end. Not every virtual page between start and end 2194 * is mapped; only those for which a resident page exists with the 2195 * corresponding offset from m_start are mapped. 2196 */ 2197void 2198pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2199 vm_page_t m_start, vm_prot_t prot) 2200{ 2201 vm_page_t m, mpte; 2202 vm_pindex_t diff, psize; 2203 2204 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2205 psize = atop(end - start); 2206 mpte = NULL; 2207 m = m_start; 2208 vm_page_lock_queues(); 2209 PMAP_LOCK(pmap); 2210 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2211 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, 2212 prot, mpte); 2213 m = TAILQ_NEXT(m, listq); 2214 } 2215 vm_page_unlock_queues(); 2216 PMAP_UNLOCK(pmap); 2217} 2218 2219/* 2220 * pmap_object_init_pt preloads the ptes for a given object 2221 * into the specified pmap. This eliminates the blast of soft 2222 * faults on process startup and immediately after an mmap. 2223 */ 2224void 2225pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2226 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2227{ 2228 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2229 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2230 ("pmap_object_init_pt: non-device object")); 2231} 2232 2233/* 2234 * Routine: pmap_change_wiring 2235 * Function: Change the wiring attribute for a map/virtual-address 2236 * pair. 2237 * In/out conditions: 2238 * The mapping must already exist in the pmap. 2239 */ 2240void 2241pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 2242{ 2243 pt_entry_t *pte; 2244 2245 PMAP_LOCK(pmap); 2246 pte = pmap_pte(pmap, va); 2247 2248 if (wired && !pte_test(pte, PTE_W)) 2249 pmap->pm_stats.wired_count++; 2250 else if (!wired && pte_test(pte, PTE_W)) 2251 pmap->pm_stats.wired_count--; 2252 2253 /* 2254 * Wiring is not a hardware characteristic so there is no need to 2255 * invalidate TLB. 2256 */ 2257 if (wired) 2258 pte_set(pte, PTE_W); 2259 else 2260 pte_clear(pte, PTE_W); 2261 PMAP_UNLOCK(pmap); 2262} 2263 2264/* 2265 * Copy the range specified by src_addr/len 2266 * from the source map to the range dst_addr/len 2267 * in the destination map. 2268 * 2269 * This routine is only advisory and need not do anything. 2270 */ 2271 2272void 2273pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2274 vm_size_t len, vm_offset_t src_addr) 2275{ 2276} 2277 2278/* 2279 * pmap_zero_page zeros the specified hardware page by mapping 2280 * the page into KVM and using bzero to clear its contents. 2281 * 2282 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2283 */ 2284void 2285pmap_zero_page(vm_page_t m) 2286{ 2287 vm_offset_t va; 2288 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2289 2290 if (MIPS_DIRECT_MAPPABLE(phys)) { 2291 va = MIPS_PHYS_TO_DIRECT(phys); 2292 bzero((caddr_t)va, PAGE_SIZE); 2293 mips_dcache_wbinv_range(va, PAGE_SIZE); 2294 } else { 2295 va = pmap_lmem_map1(phys); 2296 bzero((caddr_t)va, PAGE_SIZE); 2297 mips_dcache_wbinv_range(va, PAGE_SIZE); 2298 pmap_lmem_unmap(); 2299 } 2300} 2301 2302/* 2303 * pmap_zero_page_area zeros the specified hardware page by mapping 2304 * the page into KVM and using bzero to clear its contents. 2305 * 2306 * off and size may not cover an area beyond a single hardware page. 2307 */ 2308void 2309pmap_zero_page_area(vm_page_t m, int off, int size) 2310{ 2311 vm_offset_t va; 2312 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2313 2314 if (MIPS_DIRECT_MAPPABLE(phys)) { 2315 va = MIPS_PHYS_TO_DIRECT(phys); 2316 bzero((char *)(caddr_t)va + off, size); 2317 mips_dcache_wbinv_range(va + off, size); 2318 } else { 2319 va = pmap_lmem_map1(phys); 2320 bzero((char *)va + off, size); 2321 mips_dcache_wbinv_range(va + off, size); 2322 pmap_lmem_unmap(); 2323 } 2324} 2325 2326void 2327pmap_zero_page_idle(vm_page_t m) 2328{ 2329 vm_offset_t va; 2330 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2331 2332 if (MIPS_DIRECT_MAPPABLE(phys)) { 2333 va = MIPS_PHYS_TO_DIRECT(phys); 2334 bzero((caddr_t)va, PAGE_SIZE); 2335 mips_dcache_wbinv_range(va, PAGE_SIZE); 2336 } else { 2337 va = pmap_lmem_map1(phys); 2338 bzero((caddr_t)va, PAGE_SIZE); 2339 mips_dcache_wbinv_range(va, PAGE_SIZE); 2340 pmap_lmem_unmap(); 2341 } 2342} 2343 2344/* 2345 * pmap_copy_page copies the specified (machine independent) 2346 * page by mapping the page into virtual memory and using 2347 * bcopy to copy the page, one machine dependent page at a 2348 * time. 2349 * 2350 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2351 */ 2352void 2353pmap_copy_page(vm_page_t src, vm_page_t dst) 2354{ 2355 vm_offset_t va_src, va_dst; 2356 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src); 2357 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst); 2358 2359 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) { 2360 /* easy case, all can be accessed via KSEG0 */ 2361 /* 2362 * Flush all caches for VA that are mapped to this page 2363 * to make sure that data in SDRAM is up to date 2364 */ 2365 pmap_flush_pvcache(src); 2366 mips_dcache_wbinv_range_index( 2367 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE); 2368 va_src = MIPS_PHYS_TO_DIRECT(phys_src); 2369 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst); 2370 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE); 2371 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2372 } else { 2373 va_src = pmap_lmem_map2(phys_src, phys_dst); 2374 va_dst = va_src + PAGE_SIZE; 2375 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE); 2376 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2377 pmap_lmem_unmap(); 2378 } 2379} 2380 2381/* 2382 * Returns true if the pmap's pv is one of the first 2383 * 16 pvs linked to from this page. This count may 2384 * be changed upwards or downwards in the future; it 2385 * is only necessary that true be returned for a small 2386 * subset of pmaps for proper page aging. 2387 */ 2388boolean_t 2389pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 2390{ 2391 pv_entry_t pv; 2392 int loops = 0; 2393 boolean_t rv; 2394 2395 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2396 ("pmap_page_exists_quick: page %p is not managed", m)); 2397 rv = FALSE; 2398 vm_page_lock_queues(); 2399 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2400 if (pv->pv_pmap == pmap) { 2401 rv = TRUE; 2402 break; 2403 } 2404 loops++; 2405 if (loops >= 16) 2406 break; 2407 } 2408 vm_page_unlock_queues(); 2409 return (rv); 2410} 2411 2412/* 2413 * Remove all pages from specified address space 2414 * this aids process exit speeds. Also, this code 2415 * is special cased for current process only, but 2416 * can have the more generic (and slightly slower) 2417 * mode enabled. This is much faster than pmap_remove 2418 * in the case of running down an entire address space. 2419 */ 2420void 2421pmap_remove_pages(pmap_t pmap) 2422{ 2423 pd_entry_t *pde; 2424 pt_entry_t *pte, tpte; 2425 pv_entry_t pv, npv; 2426 vm_page_t m; 2427 2428 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 2429 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2430 return; 2431 } 2432 vm_page_lock_queues(); 2433 PMAP_LOCK(pmap); 2434 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) { 2435 2436 pde = pmap_pde(pmap, pv->pv_va); 2437 KASSERT(pde != NULL && *pde != 0, ("pmap_remove_pages: pde")); 2438 pte = pmap_pde_to_pte(pde, pv->pv_va); 2439 if (!pte_test(pte, PTE_V)) 2440 panic("pmap_remove_pages: page on pm_pvlist has no pte"); 2441 tpte = *pte; 2442 2443/* 2444 * We cannot remove wired pages from a process' mapping at this time 2445 */ 2446 if (pte_test(&tpte, PTE_W)) { 2447 npv = TAILQ_NEXT(pv, pv_plist); 2448 continue; 2449 } 2450 *pte = is_kernel_pmap(pmap) ? PTE_G : 0; 2451 2452 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte)); 2453 KASSERT(m != NULL, 2454 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); 2455 2456 pmap->pm_stats.resident_count--; 2457 2458 /* 2459 * Update the vm_page_t clean and reference bits. 2460 */ 2461 if (pte_test(&tpte, PTE_D)) { 2462 vm_page_dirty(m); 2463 } 2464 npv = TAILQ_NEXT(pv, pv_plist); 2465 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 2466 2467 m->md.pv_list_count--; 2468 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2469 if (TAILQ_FIRST(&m->md.pv_list) == NULL) { 2470 vm_page_aflag_clear(m, PGA_WRITEABLE); 2471 } 2472 pmap_unuse_pt(pmap, pv->pv_va, *pde); 2473 free_pv_entry(pv); 2474 } 2475 pmap_invalidate_all(pmap); 2476 PMAP_UNLOCK(pmap); 2477 vm_page_unlock_queues(); 2478} 2479 2480/* 2481 * pmap_testbit tests bits in pte's 2482 * note that the testbit/changebit routines are inline, 2483 * and a lot of things compile-time evaluate. 2484 */ 2485static boolean_t 2486pmap_testbit(vm_page_t m, int bit) 2487{ 2488 pv_entry_t pv; 2489 pt_entry_t *pte; 2490 boolean_t rv = FALSE; 2491 2492 if (m->oflags & VPO_UNMANAGED) 2493 return (rv); 2494 2495 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 2496 return (rv); 2497 2498 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2499 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2500 PMAP_LOCK(pv->pv_pmap); 2501 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2502 rv = pte_test(pte, bit); 2503 PMAP_UNLOCK(pv->pv_pmap); 2504 if (rv) 2505 break; 2506 } 2507 return (rv); 2508} 2509 2510/* 2511 * this routine is used to clear dirty bits in ptes 2512 */ 2513static __inline void 2514pmap_changebit(vm_page_t m, int bit, boolean_t setem) 2515{ 2516 pv_entry_t pv; 2517 pt_entry_t *pte; 2518 2519 if (m->oflags & VPO_UNMANAGED) 2520 return; 2521 2522 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2523 /* 2524 * Loop over all current mappings setting/clearing as appropos If 2525 * setting RO do we need to clear the VAC? 2526 */ 2527 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2528 PMAP_LOCK(pv->pv_pmap); 2529 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2530 if (setem) { 2531 *pte |= bit; 2532 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte); 2533 } else { 2534 pt_entry_t pbits = *pte; 2535 2536 if (pbits & bit) { 2537 if (bit == PTE_D) { 2538 if (pbits & PTE_D) 2539 vm_page_dirty(m); 2540 *pte = (pbits & ~PTE_D) | PTE_RO; 2541 } else { 2542 *pte = pbits & ~bit; 2543 } 2544 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte); 2545 } 2546 } 2547 PMAP_UNLOCK(pv->pv_pmap); 2548 } 2549 if (!setem && bit == PTE_D) 2550 vm_page_aflag_clear(m, PGA_WRITEABLE); 2551} 2552 2553/* 2554 * pmap_page_wired_mappings: 2555 * 2556 * Return the number of managed mappings to the given physical page 2557 * that are wired. 2558 */ 2559int 2560pmap_page_wired_mappings(vm_page_t m) 2561{ 2562 pv_entry_t pv; 2563 pmap_t pmap; 2564 pt_entry_t *pte; 2565 int count; 2566 2567 count = 0; 2568 if ((m->oflags & VPO_UNMANAGED) != 0) 2569 return (count); 2570 vm_page_lock_queues(); 2571 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2572 pmap = pv->pv_pmap; 2573 PMAP_LOCK(pmap); 2574 pte = pmap_pte(pmap, pv->pv_va); 2575 if (pte_test(pte, PTE_W)) 2576 count++; 2577 PMAP_UNLOCK(pmap); 2578 } 2579 vm_page_unlock_queues(); 2580 return (count); 2581} 2582 2583/* 2584 * Clear the write and modified bits in each of the given page's mappings. 2585 */ 2586void 2587pmap_remove_write(vm_page_t m) 2588{ 2589 pv_entry_t pv, npv; 2590 vm_offset_t va; 2591 pt_entry_t *pte; 2592 2593 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2594 ("pmap_remove_write: page %p is not managed", m)); 2595 2596 /* 2597 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 2598 * another thread while the object is locked. Thus, if PGA_WRITEABLE 2599 * is clear, no page table entries need updating. 2600 */ 2601 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2602 if ((m->oflags & VPO_BUSY) == 0 && 2603 (m->aflags & PGA_WRITEABLE) == 0) 2604 return; 2605 2606 /* 2607 * Loop over all current mappings setting/clearing as appropos. 2608 */ 2609 vm_page_lock_queues(); 2610 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) { 2611 npv = TAILQ_NEXT(pv, pv_plist); 2612 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2613 if (pte == NULL || !pte_test(pte, PTE_V)) 2614 panic("page on pm_pvlist has no pte"); 2615 2616 va = pv->pv_va; 2617 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 2618 VM_PROT_READ | VM_PROT_EXECUTE); 2619 } 2620 vm_page_aflag_clear(m, PGA_WRITEABLE); 2621 vm_page_unlock_queues(); 2622} 2623 2624/* 2625 * pmap_ts_referenced: 2626 * 2627 * Return the count of reference bits for a page, clearing all of them. 2628 */ 2629int 2630pmap_ts_referenced(vm_page_t m) 2631{ 2632 2633 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2634 ("pmap_ts_referenced: page %p is not managed", m)); 2635 if (m->md.pv_flags & PV_TABLE_REF) { 2636 vm_page_lock_queues(); 2637 m->md.pv_flags &= ~PV_TABLE_REF; 2638 vm_page_unlock_queues(); 2639 return (1); 2640 } 2641 return (0); 2642} 2643 2644/* 2645 * pmap_is_modified: 2646 * 2647 * Return whether or not the specified physical page was modified 2648 * in any physical maps. 2649 */ 2650boolean_t 2651pmap_is_modified(vm_page_t m) 2652{ 2653 boolean_t rv; 2654 2655 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2656 ("pmap_is_modified: page %p is not managed", m)); 2657 2658 /* 2659 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2660 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2661 * is clear, no PTEs can have PTE_D set. 2662 */ 2663 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2664 if ((m->oflags & VPO_BUSY) == 0 && 2665 (m->aflags & PGA_WRITEABLE) == 0) 2666 return (FALSE); 2667 vm_page_lock_queues(); 2668 if (m->md.pv_flags & PV_TABLE_MOD) 2669 rv = TRUE; 2670 else 2671 rv = pmap_testbit(m, PTE_D); 2672 vm_page_unlock_queues(); 2673 return (rv); 2674} 2675 2676/* N/C */ 2677 2678/* 2679 * pmap_is_prefaultable: 2680 * 2681 * Return whether or not the specified virtual address is elgible 2682 * for prefault. 2683 */ 2684boolean_t 2685pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2686{ 2687 pd_entry_t *pde; 2688 pt_entry_t *pte; 2689 boolean_t rv; 2690 2691 rv = FALSE; 2692 PMAP_LOCK(pmap); 2693 pde = pmap_pde(pmap, addr); 2694 if (pde != NULL && *pde != 0) { 2695 pte = pmap_pde_to_pte(pde, addr); 2696 rv = (*pte == 0); 2697 } 2698 PMAP_UNLOCK(pmap); 2699 return (rv); 2700} 2701 2702/* 2703 * Clear the modify bits on the specified physical page. 2704 */ 2705void 2706pmap_clear_modify(vm_page_t m) 2707{ 2708 2709 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2710 ("pmap_clear_modify: page %p is not managed", m)); 2711 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2712 KASSERT((m->oflags & VPO_BUSY) == 0, 2713 ("pmap_clear_modify: page %p is busy", m)); 2714 2715 /* 2716 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. 2717 * If the object containing the page is locked and the page is not 2718 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 2719 */ 2720 if ((m->aflags & PGA_WRITEABLE) == 0) 2721 return; 2722 vm_page_lock_queues(); 2723 if (m->md.pv_flags & PV_TABLE_MOD) { 2724 pmap_changebit(m, PTE_D, FALSE); 2725 m->md.pv_flags &= ~PV_TABLE_MOD; 2726 } 2727 vm_page_unlock_queues(); 2728} 2729 2730/* 2731 * pmap_is_referenced: 2732 * 2733 * Return whether or not the specified physical page was referenced 2734 * in any physical maps. 2735 */ 2736boolean_t 2737pmap_is_referenced(vm_page_t m) 2738{ 2739 2740 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2741 ("pmap_is_referenced: page %p is not managed", m)); 2742 return ((m->md.pv_flags & PV_TABLE_REF) != 0); 2743} 2744 2745/* 2746 * pmap_clear_reference: 2747 * 2748 * Clear the reference bit on the specified physical page. 2749 */ 2750void 2751pmap_clear_reference(vm_page_t m) 2752{ 2753 2754 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2755 ("pmap_clear_reference: page %p is not managed", m)); 2756 vm_page_lock_queues(); 2757 if (m->md.pv_flags & PV_TABLE_REF) { 2758 m->md.pv_flags &= ~PV_TABLE_REF; 2759 } 2760 vm_page_unlock_queues(); 2761} 2762 2763/* 2764 * Miscellaneous support routines follow 2765 */ 2766 2767/* 2768 * Map a set of physical memory pages into the kernel virtual 2769 * address space. Return a pointer to where it is mapped. This 2770 * routine is intended to be used for mapping device memory, 2771 * NOT real memory. 2772 */ 2773 2774/* 2775 * Map a set of physical memory pages into the kernel virtual 2776 * address space. Return a pointer to where it is mapped. This 2777 * routine is intended to be used for mapping device memory, 2778 * NOT real memory. 2779 * 2780 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit. 2781 */ 2782void * 2783pmap_mapdev(vm_paddr_t pa, vm_size_t size) 2784{ 2785 vm_offset_t va, tmpva, offset; 2786 2787 /* 2788 * KSEG1 maps only first 512M of phys address space. For 2789 * pa > 0x20000000 we should make proper mapping * using pmap_kenter. 2790 */ 2791 if (MIPS_DIRECT_MAPPABLE(pa + size - 1)) 2792 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); 2793 else { 2794 offset = pa & PAGE_MASK; 2795 size = roundup(size + offset, PAGE_SIZE); 2796 2797 va = kmem_alloc_nofault(kernel_map, size); 2798 if (!va) 2799 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2800 pa = trunc_page(pa); 2801 for (tmpva = va; size > 0;) { 2802 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED); 2803 size -= PAGE_SIZE; 2804 tmpva += PAGE_SIZE; 2805 pa += PAGE_SIZE; 2806 } 2807 } 2808 2809 return ((void *)(va + offset)); 2810} 2811 2812void 2813pmap_unmapdev(vm_offset_t va, vm_size_t size) 2814{ 2815#ifndef __mips_n64 2816 vm_offset_t base, offset, tmpva; 2817 2818 /* If the address is within KSEG1 then there is nothing to do */ 2819 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END) 2820 return; 2821 2822 base = trunc_page(va); 2823 offset = va & PAGE_MASK; 2824 size = roundup(size + offset, PAGE_SIZE); 2825 for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE) 2826 pmap_kremove(tmpva); 2827 kmem_free(kernel_map, base, size); 2828#endif 2829} 2830 2831/* 2832 * perform the pmap work for mincore 2833 */ 2834int 2835pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 2836{ 2837 pt_entry_t *ptep, pte; 2838 vm_paddr_t pa; 2839 vm_page_t m; 2840 int val; 2841 boolean_t managed; 2842 2843 PMAP_LOCK(pmap); 2844retry: 2845 ptep = pmap_pte(pmap, addr); 2846 pte = (ptep != NULL) ? *ptep : 0; 2847 if (!pte_test(&pte, PTE_V)) { 2848 val = 0; 2849 goto out; 2850 } 2851 val = MINCORE_INCORE; 2852 if (pte_test(&pte, PTE_D)) 2853 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 2854 pa = TLBLO_PTE_TO_PA(pte); 2855 managed = page_is_managed(pa); 2856 if (managed) { 2857 /* 2858 * This may falsely report the given address as 2859 * MINCORE_REFERENCED. Unfortunately, due to the lack of 2860 * per-PTE reference information, it is impossible to 2861 * determine if the address is MINCORE_REFERENCED. 2862 */ 2863 m = PHYS_TO_VM_PAGE(pa); 2864 if ((m->aflags & PGA_REFERENCED) != 0) 2865 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 2866 } 2867 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 2868 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 2869 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 2870 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 2871 goto retry; 2872 } else 2873out: 2874 PA_UNLOCK_COND(*locked_pa); 2875 PMAP_UNLOCK(pmap); 2876 return (val); 2877} 2878 2879void 2880pmap_activate(struct thread *td) 2881{ 2882 pmap_t pmap, oldpmap; 2883 struct proc *p = td->td_proc; 2884 u_int cpuid; 2885 2886 critical_enter(); 2887 2888 pmap = vmspace_pmap(p->p_vmspace); 2889 oldpmap = PCPU_GET(curpmap); 2890 cpuid = PCPU_GET(cpuid); 2891 2892 if (oldpmap) 2893 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 2894 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 2895 pmap_asid_alloc(pmap); 2896 if (td == curthread) { 2897 PCPU_SET(segbase, pmap->pm_segtab); 2898 mips_wr_entryhi(pmap->pm_asid[cpuid].asid); 2899 } 2900 2901 PCPU_SET(curpmap, pmap); 2902 critical_exit(); 2903} 2904 2905void 2906pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 2907{ 2908} 2909 2910/* 2911 * Increase the starting virtual address of the given mapping if a 2912 * different alignment might result in more superpage mappings. 2913 */ 2914void 2915pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 2916 vm_offset_t *addr, vm_size_t size) 2917{ 2918 vm_offset_t superpage_offset; 2919 2920 if (size < NBSEG) 2921 return; 2922 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 2923 offset += ptoa(object->pg_color); 2924 superpage_offset = offset & SEGMASK; 2925 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG || 2926 (*addr & SEGMASK) == superpage_offset) 2927 return; 2928 if ((*addr & SEGMASK) < superpage_offset) 2929 *addr = (*addr & ~SEGMASK) + superpage_offset; 2930 else 2931 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset; 2932} 2933 2934/* 2935 * Increase the starting virtual address of the given mapping so 2936 * that it is aligned to not be the second page in a TLB entry. 2937 * This routine assumes that the length is appropriately-sized so 2938 * that the allocation does not share a TLB entry at all if required. 2939 */ 2940void 2941pmap_align_tlb(vm_offset_t *addr) 2942{ 2943 if ((*addr & PAGE_SIZE) == 0) 2944 return; 2945 *addr += PAGE_SIZE; 2946 return; 2947} 2948 2949#ifdef DDB 2950DB_SHOW_COMMAND(ptable, ddb_pid_dump) 2951{ 2952 pmap_t pmap; 2953 struct thread *td = NULL; 2954 struct proc *p; 2955 int i, j, k; 2956 vm_paddr_t pa; 2957 vm_offset_t va; 2958 2959 if (have_addr) { 2960 td = db_lookup_thread(addr, TRUE); 2961 if (td == NULL) { 2962 db_printf("Invalid pid or tid"); 2963 return; 2964 } 2965 p = td->td_proc; 2966 if (p->p_vmspace == NULL) { 2967 db_printf("No vmspace for process"); 2968 return; 2969 } 2970 pmap = vmspace_pmap(p->p_vmspace); 2971 } else 2972 pmap = kernel_pmap; 2973 2974 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n", 2975 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid, 2976 pmap->pm_asid[0].gen); 2977 for (i = 0; i < NPDEPG; i++) { 2978 pd_entry_t *pdpe; 2979 pt_entry_t *pde; 2980 pt_entry_t pte; 2981 2982 pdpe = (pd_entry_t *)pmap->pm_segtab[i]; 2983 if (pdpe == NULL) 2984 continue; 2985 db_printf("[%4d] %p\n", i, pdpe); 2986#ifdef __mips_n64 2987 for (j = 0; j < NPDEPG; j++) { 2988 pde = (pt_entry_t *)pdpe[j]; 2989 if (pde == NULL) 2990 continue; 2991 db_printf("\t[%4d] %p\n", j, pde); 2992#else 2993 { 2994 j = 0; 2995 pde = (pt_entry_t *)pdpe; 2996#endif 2997 for (k = 0; k < NPTEPG; k++) { 2998 pte = pde[k]; 2999 if (pte == 0 || !pte_test(&pte, PTE_V)) 3000 continue; 3001 pa = TLBLO_PTE_TO_PA(pte); 3002 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT); 3003 db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n", 3004 k, (void *)va, (uintmax_t)pte, (uintmax_t)pa); 3005 } 3006 } 3007 } 3008} 3009#endif 3010 3011#if defined(DEBUG) 3012 3013static void pads(pmap_t pm); 3014void pmap_pvdump(vm_offset_t pa); 3015 3016/* print address space of pmap*/ 3017static void 3018pads(pmap_t pm) 3019{ 3020 unsigned va, i, j; 3021 pt_entry_t *ptep; 3022 3023 if (pm == kernel_pmap) 3024 return; 3025 for (i = 0; i < NPTEPG; i++) 3026 if (pm->pm_segtab[i]) 3027 for (j = 0; j < NPTEPG; j++) { 3028 va = (i << SEGSHIFT) + (j << PAGE_SHIFT); 3029 if (pm == kernel_pmap && va < KERNBASE) 3030 continue; 3031 if (pm != kernel_pmap && 3032 va >= VM_MAXUSER_ADDRESS) 3033 continue; 3034 ptep = pmap_pte(pm, va); 3035 if (pte_test(ptep, PTE_V)) 3036 printf("%x:%x ", va, *(int *)ptep); 3037 } 3038 3039} 3040 3041void 3042pmap_pvdump(vm_offset_t pa) 3043{ 3044 register pv_entry_t pv; 3045 vm_page_t m; 3046 3047 printf("pa %x", pa); 3048 m = PHYS_TO_VM_PAGE(pa); 3049 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3050 pv = TAILQ_NEXT(pv, pv_list)) { 3051 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); 3052 pads(pv->pv_pmap); 3053 } 3054 printf(" "); 3055} 3056 3057/* N/C */ 3058#endif 3059 3060 3061/* 3062 * Allocate TLB address space tag (called ASID or TLBPID) and return it. 3063 * It takes almost as much or more time to search the TLB for a 3064 * specific ASID and flush those entries as it does to flush the entire TLB. 3065 * Therefore, when we allocate a new ASID, we just take the next number. When 3066 * we run out of numbers, we flush the TLB, increment the generation count 3067 * and start over. ASID zero is reserved for kernel use. 3068 */ 3069static void 3070pmap_asid_alloc(pmap) 3071 pmap_t pmap; 3072{ 3073 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED && 3074 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation)); 3075 else { 3076 if (PCPU_GET(next_asid) == pmap_max_asid) { 3077 tlb_invalidate_all_user(NULL); 3078 PCPU_SET(asid_generation, 3079 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK); 3080 if (PCPU_GET(asid_generation) == 0) { 3081 PCPU_SET(asid_generation, 1); 3082 } 3083 PCPU_SET(next_asid, 1); /* 0 means invalid */ 3084 } 3085 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid); 3086 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation); 3087 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1); 3088 } 3089} 3090 3091int 3092page_is_managed(vm_paddr_t pa) 3093{ 3094 vm_offset_t pgnum = atop(pa); 3095 3096 if (pgnum >= first_page) { 3097 vm_page_t m; 3098 3099 m = PHYS_TO_VM_PAGE(pa); 3100 if (m == NULL) 3101 return (0); 3102 if ((m->oflags & VPO_UNMANAGED) == 0) 3103 return (1); 3104 } 3105 return (0); 3106} 3107 3108static pt_entry_t 3109init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot) 3110{ 3111 pt_entry_t rw; 3112 3113 if (!(prot & VM_PROT_WRITE)) 3114 rw = PTE_V | PTE_RO; 3115 else if ((m->oflags & VPO_UNMANAGED) == 0) { 3116 if ((m->md.pv_flags & PV_TABLE_MOD) != 0) 3117 rw = PTE_V | PTE_D; 3118 else 3119 rw = PTE_V; 3120 vm_page_aflag_set(m, PGA_WRITEABLE); 3121 } else 3122 /* Needn't emulate a modified bit for unmanaged pages. */ 3123 rw = PTE_V | PTE_D; 3124 return (rw); 3125} 3126 3127/* 3128 * pmap_emulate_modified : do dirty bit emulation 3129 * 3130 * On SMP, update just the local TLB, other CPUs will update their 3131 * TLBs from PTE lazily, if they get the exception. 3132 * Returns 0 in case of sucess, 1 if the page is read only and we 3133 * need to fault. 3134 */ 3135int 3136pmap_emulate_modified(pmap_t pmap, vm_offset_t va) 3137{ 3138 vm_page_t m; 3139 pt_entry_t *pte; 3140 vm_paddr_t pa; 3141 3142 PMAP_LOCK(pmap); 3143 pte = pmap_pte(pmap, va); 3144 if (pte == NULL) 3145 panic("pmap_emulate_modified: can't find PTE"); 3146#ifdef SMP 3147 /* It is possible that some other CPU changed m-bit */ 3148 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) { 3149 tlb_update(pmap, va, *pte); 3150 PMAP_UNLOCK(pmap); 3151 return (0); 3152 } 3153#else 3154 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) 3155 panic("pmap_emulate_modified: invalid pte"); 3156#endif 3157 if (pte_test(pte, PTE_RO)) { 3158 /* write to read only page in the kernel */ 3159 PMAP_UNLOCK(pmap); 3160 return (1); 3161 } 3162 pte_set(pte, PTE_D); 3163 tlb_update(pmap, va, *pte); 3164 pa = TLBLO_PTE_TO_PA(*pte); 3165 if (!page_is_managed(pa)) 3166 panic("pmap_emulate_modified: unmanaged page"); 3167 m = PHYS_TO_VM_PAGE(pa); 3168 m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD); 3169 PMAP_UNLOCK(pmap); 3170 return (0); 3171} 3172 3173/* 3174 * Routine: pmap_kextract 3175 * Function: 3176 * Extract the physical page address associated 3177 * virtual address. 3178 */ 3179vm_paddr_t 3180pmap_kextract(vm_offset_t va) 3181{ 3182 int mapped; 3183 3184 /* 3185 * First, the direct-mapped regions. 3186 */ 3187#if defined(__mips_n64) 3188 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) 3189 return (MIPS_XKPHYS_TO_PHYS(va)); 3190#endif 3191 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END) 3192 return (MIPS_KSEG0_TO_PHYS(va)); 3193 3194 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END) 3195 return (MIPS_KSEG1_TO_PHYS(va)); 3196 3197 /* 3198 * User virtual addresses. 3199 */ 3200 if (va < VM_MAXUSER_ADDRESS) { 3201 pt_entry_t *ptep; 3202 3203 if (curproc && curproc->p_vmspace) { 3204 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va); 3205 if (ptep) { 3206 return (TLBLO_PTE_TO_PA(*ptep) | 3207 (va & PAGE_MASK)); 3208 } 3209 return (0); 3210 } 3211 } 3212 3213 /* 3214 * Should be kernel virtual here, otherwise fail 3215 */ 3216 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END); 3217#if defined(__mips_n64) 3218 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END); 3219#endif 3220 /* 3221 * Kernel virtual. 3222 */ 3223 3224 if (mapped) { 3225 pt_entry_t *ptep; 3226 3227 /* Is the kernel pmap initialized? */ 3228 if (!CPU_EMPTY(&kernel_pmap->pm_active)) { 3229 /* It's inside the virtual address range */ 3230 ptep = pmap_pte(kernel_pmap, va); 3231 if (ptep) { 3232 return (TLBLO_PTE_TO_PA(*ptep) | 3233 (va & PAGE_MASK)); 3234 } 3235 } 3236 return (0); 3237 } 3238 3239 panic("%s for unknown address space %p.", __func__, (void *)va); 3240} 3241 3242 3243void 3244pmap_flush_pvcache(vm_page_t m) 3245{ 3246 pv_entry_t pv; 3247 3248 if (m != NULL) { 3249 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3250 pv = TAILQ_NEXT(pv, pv_list)) { 3251 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 3252 } 3253 } 3254} 3255