pmap.c revision 239964
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps 39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish 40 */ 41 42/* 43 * Manages physical address maps. 44 * 45 * In addition to hardware address maps, this 46 * module is called upon to provide software-use-only 47 * maps which may or may not be stored in the same 48 * form as hardware maps. These pseudo-maps are 49 * used to store intermediate results from copy 50 * operations to and from address spaces. 51 * 52 * Since the information managed by this module is 53 * also stored by the logical address mapping module, 54 * this module may throw away valid virtual-to-physical 55 * mappings at almost any time. However, invalidations 56 * of virtual-to-physical mappings must be done as 57 * requested. 58 * 59 * In order to cope with hardware architectures which 60 * make virtual-to-physical map invalidates expensive, 61 * this module may delay invalidate or reduced protection 62 * operations until such time as they are actually 63 * necessary. This module is given full information as 64 * to which processors are currently using which maps, 65 * and to when physical maps must be made correct. 66 */ 67 68#include <sys/cdefs.h> 69__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 239964 2012-09-01 03:46:28Z alc $"); 70 71#include "opt_ddb.h" 72#include "opt_pmap.h" 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/lock.h> 77#include <sys/mman.h> 78#include <sys/msgbuf.h> 79#include <sys/mutex.h> 80#include <sys/pcpu.h> 81#include <sys/proc.h> 82#include <sys/rwlock.h> 83#include <sys/sched.h> 84#ifdef SMP 85#include <sys/smp.h> 86#else 87#include <sys/cpuset.h> 88#endif 89#include <sys/sysctl.h> 90#include <sys/vmmeter.h> 91 92#ifdef DDB 93#include <ddb/ddb.h> 94#endif 95 96#include <vm/vm.h> 97#include <vm/vm_param.h> 98#include <vm/vm_kern.h> 99#include <vm/vm_page.h> 100#include <vm/vm_map.h> 101#include <vm/vm_object.h> 102#include <vm/vm_extern.h> 103#include <vm/vm_pageout.h> 104#include <vm/vm_pager.h> 105#include <vm/uma.h> 106 107#include <machine/cache.h> 108#include <machine/md_var.h> 109#include <machine/tlb.h> 110 111#undef PMAP_DEBUG 112 113#if !defined(DIAGNOSTIC) 114#define PMAP_INLINE __inline 115#else 116#define PMAP_INLINE 117#endif 118 119#ifdef PV_STATS 120#define PV_STAT(x) do { x ; } while (0) 121#else 122#define PV_STAT(x) do { } while (0) 123#endif 124 125/* 126 * Get PDEs and PTEs for user/kernel address space 127 */ 128#define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1)) 129#define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1)) 130#define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1)) 131#define pmap_pde_pindex(v) ((v) >> PDRSHIFT) 132 133#ifdef __mips_n64 134#define NUPDE (NPDEPG * NPDEPG) 135#define NUSERPGTBLS (NUPDE + NPDEPG) 136#else 137#define NUPDE (NPDEPG) 138#define NUSERPGTBLS (NUPDE) 139#endif 140 141#define is_kernel_pmap(x) ((x) == kernel_pmap) 142 143struct pmap kernel_pmap_store; 144pd_entry_t *kernel_segmap; 145 146vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 147vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 148 149static int nkpt; 150unsigned pmap_max_asid; /* max ASID supported by the system */ 151 152#define PMAP_ASID_RESERVED 0 153 154vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 155 156static void pmap_asid_alloc(pmap_t pmap); 157 158/* 159 * Isolate the global pv list lock from data and other locks to prevent false 160 * sharing within the cache. 161 */ 162static struct { 163 struct rwlock lock; 164 char padding[CACHE_LINE_SIZE - sizeof(struct rwlock)]; 165} pvh_global __aligned(CACHE_LINE_SIZE); 166 167#define pvh_global_lock pvh_global.lock 168 169/* 170 * Data for the pv entry allocation mechanism 171 */ 172static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 173static int pv_entry_count; 174 175static void free_pv_chunk(struct pv_chunk *pc); 176static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 177static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 178static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap); 179static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 180static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 181 vm_offset_t va); 182static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 183 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 184static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va, 185 pd_entry_t pde); 186static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 187static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); 188static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, 189 vm_offset_t va, vm_page_t m); 190static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte); 191static void pmap_invalidate_all(pmap_t pmap); 192static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va); 193static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m); 194 195static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 196static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 197static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t); 198static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot); 199 200#ifdef SMP 201static void pmap_invalidate_page_action(void *arg); 202static void pmap_update_page_action(void *arg); 203#endif 204 205#ifndef __mips_n64 206/* 207 * This structure is for high memory (memory above 512Meg in 32 bit) support. 208 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to 209 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc. 210 * 211 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To 212 * access a highmem physical address on a CPU, we map the physical address to 213 * the reserved virtual address for the CPU in the kernel pagetable. This is 214 * done with interrupts disabled(although a spinlock and sched_pin would be 215 * sufficient). 216 */ 217struct local_sysmaps { 218 vm_offset_t base; 219 uint32_t saved_intr; 220 uint16_t valid1, valid2; 221}; 222static struct local_sysmaps sysmap_lmem[MAXCPU]; 223 224static __inline void 225pmap_alloc_lmem_map(void) 226{ 227 int i; 228 229 for (i = 0; i < MAXCPU; i++) { 230 sysmap_lmem[i].base = virtual_avail; 231 virtual_avail += PAGE_SIZE * 2; 232 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0; 233 } 234} 235 236static __inline vm_offset_t 237pmap_lmem_map1(vm_paddr_t phys) 238{ 239 struct local_sysmaps *sysm; 240 pt_entry_t *pte, npte; 241 vm_offset_t va; 242 uint32_t intr; 243 int cpu; 244 245 intr = intr_disable(); 246 cpu = PCPU_GET(cpuid); 247 sysm = &sysmap_lmem[cpu]; 248 sysm->saved_intr = intr; 249 va = sysm->base; 250 npte = TLBLO_PA_TO_PFN(phys) | 251 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 252 pte = pmap_pte(kernel_pmap, va); 253 *pte = npte; 254 sysm->valid1 = 1; 255 return (va); 256} 257 258static __inline vm_offset_t 259pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 260{ 261 struct local_sysmaps *sysm; 262 pt_entry_t *pte, npte; 263 vm_offset_t va1, va2; 264 uint32_t intr; 265 int cpu; 266 267 intr = intr_disable(); 268 cpu = PCPU_GET(cpuid); 269 sysm = &sysmap_lmem[cpu]; 270 sysm->saved_intr = intr; 271 va1 = sysm->base; 272 va2 = sysm->base + PAGE_SIZE; 273 npte = TLBLO_PA_TO_PFN(phys1) | 274 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 275 pte = pmap_pte(kernel_pmap, va1); 276 *pte = npte; 277 npte = TLBLO_PA_TO_PFN(phys2) | 278 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 279 pte = pmap_pte(kernel_pmap, va2); 280 *pte = npte; 281 sysm->valid1 = 1; 282 sysm->valid2 = 1; 283 return (va1); 284} 285 286static __inline void 287pmap_lmem_unmap(void) 288{ 289 struct local_sysmaps *sysm; 290 pt_entry_t *pte; 291 int cpu; 292 293 cpu = PCPU_GET(cpuid); 294 sysm = &sysmap_lmem[cpu]; 295 pte = pmap_pte(kernel_pmap, sysm->base); 296 *pte = PTE_G; 297 tlb_invalidate_address(kernel_pmap, sysm->base); 298 sysm->valid1 = 0; 299 if (sysm->valid2) { 300 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); 301 *pte = PTE_G; 302 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); 303 sysm->valid2 = 0; 304 } 305 intr_restore(sysm->saved_intr); 306} 307#else /* __mips_n64 */ 308 309static __inline void 310pmap_alloc_lmem_map(void) 311{ 312} 313 314static __inline vm_offset_t 315pmap_lmem_map1(vm_paddr_t phys) 316{ 317 318 return (0); 319} 320 321static __inline vm_offset_t 322pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 323{ 324 325 return (0); 326} 327 328static __inline vm_offset_t 329pmap_lmem_unmap(void) 330{ 331 332 return (0); 333} 334#endif /* !__mips_n64 */ 335 336/* 337 * Page table entry lookup routines. 338 */ 339static __inline pd_entry_t * 340pmap_segmap(pmap_t pmap, vm_offset_t va) 341{ 342 343 return (&pmap->pm_segtab[pmap_seg_index(va)]); 344} 345 346#ifdef __mips_n64 347static __inline pd_entry_t * 348pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 349{ 350 pd_entry_t *pde; 351 352 pde = (pd_entry_t *)*pdpe; 353 return (&pde[pmap_pde_index(va)]); 354} 355 356static __inline pd_entry_t * 357pmap_pde(pmap_t pmap, vm_offset_t va) 358{ 359 pd_entry_t *pdpe; 360 361 pdpe = pmap_segmap(pmap, va); 362 if (pdpe == NULL || *pdpe == NULL) 363 return (NULL); 364 365 return (pmap_pdpe_to_pde(pdpe, va)); 366} 367#else 368static __inline pd_entry_t * 369pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 370{ 371 372 return (pdpe); 373} 374 375static __inline 376pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va) 377{ 378 379 return (pmap_segmap(pmap, va)); 380} 381#endif 382 383static __inline pt_entry_t * 384pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) 385{ 386 pt_entry_t *pte; 387 388 pte = (pt_entry_t *)*pde; 389 return (&pte[pmap_pte_index(va)]); 390} 391 392pt_entry_t * 393pmap_pte(pmap_t pmap, vm_offset_t va) 394{ 395 pd_entry_t *pde; 396 397 pde = pmap_pde(pmap, va); 398 if (pde == NULL || *pde == NULL) 399 return (NULL); 400 401 return (pmap_pde_to_pte(pde, va)); 402} 403 404vm_offset_t 405pmap_steal_memory(vm_size_t size) 406{ 407 vm_paddr_t bank_size, pa; 408 vm_offset_t va; 409 410 size = round_page(size); 411 bank_size = phys_avail[1] - phys_avail[0]; 412 while (size > bank_size) { 413 int i; 414 415 for (i = 0; phys_avail[i + 2]; i += 2) { 416 phys_avail[i] = phys_avail[i + 2]; 417 phys_avail[i + 1] = phys_avail[i + 3]; 418 } 419 phys_avail[i] = 0; 420 phys_avail[i + 1] = 0; 421 if (!phys_avail[0]) 422 panic("pmap_steal_memory: out of memory"); 423 bank_size = phys_avail[1] - phys_avail[0]; 424 } 425 426 pa = phys_avail[0]; 427 phys_avail[0] += size; 428 if (MIPS_DIRECT_MAPPABLE(pa) == 0) 429 panic("Out of memory below 512Meg?"); 430 va = MIPS_PHYS_TO_DIRECT(pa); 431 bzero((caddr_t)va, size); 432 return (va); 433} 434 435/* 436 * Bootstrap the system enough to run with virtual memory. This 437 * assumes that the phys_avail array has been initialized. 438 */ 439static void 440pmap_create_kernel_pagetable(void) 441{ 442 int i, j; 443 vm_offset_t ptaddr; 444 pt_entry_t *pte; 445#ifdef __mips_n64 446 pd_entry_t *pde; 447 vm_offset_t pdaddr; 448 int npt, npde; 449#endif 450 451 /* 452 * Allocate segment table for the kernel 453 */ 454 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE); 455 456 /* 457 * Allocate second level page tables for the kernel 458 */ 459#ifdef __mips_n64 460 npde = howmany(NKPT, NPDEPG); 461 pdaddr = pmap_steal_memory(PAGE_SIZE * npde); 462#endif 463 nkpt = NKPT; 464 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt); 465 466 /* 467 * The R[4-7]?00 stores only one copy of the Global bit in the 468 * translation lookaside buffer for each 2 page entry. Thus invalid 469 * entrys must have the Global bit set so when Entry LO and Entry HI 470 * G bits are anded together they will produce a global bit to store 471 * in the tlb. 472 */ 473 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++) 474 *pte = PTE_G; 475 476#ifdef __mips_n64 477 for (i = 0, npt = nkpt; npt > 0; i++) { 478 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE); 479 pde = (pd_entry_t *)kernel_segmap[i]; 480 481 for (j = 0; j < NPDEPG && npt > 0; j++, npt--) 482 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE); 483 } 484#else 485 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++) 486 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE)); 487#endif 488 489 PMAP_LOCK_INIT(kernel_pmap); 490 kernel_pmap->pm_segtab = kernel_segmap; 491 CPU_FILL(&kernel_pmap->pm_active); 492 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 493 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED; 494 kernel_pmap->pm_asid[0].gen = 0; 495 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE; 496} 497 498void 499pmap_bootstrap(void) 500{ 501 int i; 502 int need_local_mappings = 0; 503 504 /* Sort. */ 505again: 506 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 507 /* 508 * Keep the memory aligned on page boundary. 509 */ 510 phys_avail[i] = round_page(phys_avail[i]); 511 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 512 513 if (i < 2) 514 continue; 515 if (phys_avail[i - 2] > phys_avail[i]) { 516 vm_paddr_t ptemp[2]; 517 518 ptemp[0] = phys_avail[i + 0]; 519 ptemp[1] = phys_avail[i + 1]; 520 521 phys_avail[i + 0] = phys_avail[i - 2]; 522 phys_avail[i + 1] = phys_avail[i - 1]; 523 524 phys_avail[i - 2] = ptemp[0]; 525 phys_avail[i - 1] = ptemp[1]; 526 goto again; 527 } 528 } 529 530 /* 531 * In 32 bit, we may have memory which cannot be mapped directly. 532 * This memory will need temporary mapping before it can be 533 * accessed. 534 */ 535 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1)) 536 need_local_mappings = 1; 537 538 /* 539 * Copy the phys_avail[] array before we start stealing memory from it. 540 */ 541 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 542 physmem_desc[i] = phys_avail[i]; 543 physmem_desc[i + 1] = phys_avail[i + 1]; 544 } 545 546 Maxmem = atop(phys_avail[i - 1]); 547 548 if (bootverbose) { 549 printf("Physical memory chunk(s):\n"); 550 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 551 vm_paddr_t size; 552 553 size = phys_avail[i + 1] - phys_avail[i]; 554 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n", 555 (uintmax_t) phys_avail[i], 556 (uintmax_t) phys_avail[i + 1] - 1, 557 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE); 558 } 559 printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem)); 560 } 561 /* 562 * Steal the message buffer from the beginning of memory. 563 */ 564 msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize); 565 msgbufinit(msgbufp, msgbufsize); 566 567 /* 568 * Steal thread0 kstack. 569 */ 570 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT); 571 572 virtual_avail = VM_MIN_KERNEL_ADDRESS; 573 virtual_end = VM_MAX_KERNEL_ADDRESS; 574 575#ifdef SMP 576 /* 577 * Steal some virtual address space to map the pcpu area. 578 */ 579 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2); 580 pcpup = (struct pcpu *)virtual_avail; 581 virtual_avail += PAGE_SIZE * 2; 582 583 /* 584 * Initialize the wired TLB entry mapping the pcpu region for 585 * the BSP at 'pcpup'. Up until this point we were operating 586 * with the 'pcpup' for the BSP pointing to a virtual address 587 * in KSEG0 so there was no need for a TLB mapping. 588 */ 589 mips_pcpu_tlb_init(PCPU_ADDR(0)); 590 591 if (bootverbose) 592 printf("pcpu is available at virtual address %p.\n", pcpup); 593#endif 594 595 if (need_local_mappings) 596 pmap_alloc_lmem_map(); 597 pmap_create_kernel_pagetable(); 598 pmap_max_asid = VMNUM_PIDS; 599 mips_wr_entryhi(0); 600 mips_wr_pagemask(0); 601 602 /* 603 * Initialize the global pv list lock. 604 */ 605 rw_init(&pvh_global_lock, "pmap pv global"); 606} 607 608/* 609 * Initialize a vm_page's machine-dependent fields. 610 */ 611void 612pmap_page_init(vm_page_t m) 613{ 614 615 TAILQ_INIT(&m->md.pv_list); 616 m->md.pv_flags = 0; 617} 618 619/* 620 * Initialize the pmap module. 621 * Called by vm_init, to initialize any structures that the pmap 622 * system needs to map virtual memory. 623 */ 624void 625pmap_init(void) 626{ 627} 628 629/*************************************************** 630 * Low level helper routines..... 631 ***************************************************/ 632 633#ifdef SMP 634static __inline void 635pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 636{ 637 int cpuid, cpu, self; 638 cpuset_t active_cpus; 639 640 sched_pin(); 641 if (is_kernel_pmap(pmap)) { 642 smp_rendezvous(NULL, fn, NULL, arg); 643 goto out; 644 } 645 /* Force ASID update on inactive CPUs */ 646 CPU_FOREACH(cpu) { 647 if (!CPU_ISSET(cpu, &pmap->pm_active)) 648 pmap->pm_asid[cpu].gen = 0; 649 } 650 cpuid = PCPU_GET(cpuid); 651 /* 652 * XXX: barrier/locking for active? 653 * 654 * Take a snapshot of active here, any further changes are ignored. 655 * tlb update/invalidate should be harmless on inactive CPUs 656 */ 657 active_cpus = pmap->pm_active; 658 self = CPU_ISSET(cpuid, &active_cpus); 659 CPU_CLR(cpuid, &active_cpus); 660 /* Optimize for the case where this cpu is the only active one */ 661 if (CPU_EMPTY(&active_cpus)) { 662 if (self) 663 fn(arg); 664 } else { 665 if (self) 666 CPU_SET(cpuid, &active_cpus); 667 smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg); 668 } 669out: 670 sched_unpin(); 671} 672#else /* !SMP */ 673static __inline void 674pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 675{ 676 int cpuid; 677 678 if (is_kernel_pmap(pmap)) { 679 fn(arg); 680 return; 681 } 682 cpuid = PCPU_GET(cpuid); 683 if (!CPU_ISSET(cpuid, &pmap->pm_active)) 684 pmap->pm_asid[cpuid].gen = 0; 685 else 686 fn(arg); 687} 688#endif /* SMP */ 689 690static void 691pmap_invalidate_all(pmap_t pmap) 692{ 693 694 pmap_call_on_active_cpus(pmap, 695 (void (*)(void *))tlb_invalidate_all_user, pmap); 696} 697 698struct pmap_invalidate_page_arg { 699 pmap_t pmap; 700 vm_offset_t va; 701}; 702 703static void 704pmap_invalidate_page_action(void *arg) 705{ 706 struct pmap_invalidate_page_arg *p = arg; 707 708 tlb_invalidate_address(p->pmap, p->va); 709} 710 711static void 712pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 713{ 714 struct pmap_invalidate_page_arg arg; 715 716 arg.pmap = pmap; 717 arg.va = va; 718 pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg); 719} 720 721struct pmap_update_page_arg { 722 pmap_t pmap; 723 vm_offset_t va; 724 pt_entry_t pte; 725}; 726 727static void 728pmap_update_page_action(void *arg) 729{ 730 struct pmap_update_page_arg *p = arg; 731 732 tlb_update(p->pmap, p->va, p->pte); 733} 734 735static void 736pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 737{ 738 struct pmap_update_page_arg arg; 739 740 arg.pmap = pmap; 741 arg.va = va; 742 arg.pte = pte; 743 pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg); 744} 745 746/* 747 * Routine: pmap_extract 748 * Function: 749 * Extract the physical page address associated 750 * with the given map/virtual_address pair. 751 */ 752vm_paddr_t 753pmap_extract(pmap_t pmap, vm_offset_t va) 754{ 755 pt_entry_t *pte; 756 vm_offset_t retval = 0; 757 758 PMAP_LOCK(pmap); 759 pte = pmap_pte(pmap, va); 760 if (pte) { 761 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK); 762 } 763 PMAP_UNLOCK(pmap); 764 return (retval); 765} 766 767/* 768 * Routine: pmap_extract_and_hold 769 * Function: 770 * Atomically extract and hold the physical page 771 * with the given pmap and virtual address pair 772 * if that mapping permits the given protection. 773 */ 774vm_page_t 775pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 776{ 777 pt_entry_t *ptep; 778 pt_entry_t pte; 779 vm_page_t m; 780 vm_paddr_t pa; 781 782 m = NULL; 783 pa = 0; 784 PMAP_LOCK(pmap); 785retry: 786 ptep = pmap_pte(pmap, va); 787 if ((ptep != NULL) && ((pte = *ptep) != 0) && 788 pte_test(&pte, PTE_V) && 789 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) { 790 if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa)) 791 goto retry; 792 793 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte)); 794 vm_page_hold(m); 795 } 796 PA_UNLOCK_COND(pa); 797 PMAP_UNLOCK(pmap); 798 return (m); 799} 800 801/*************************************************** 802 * Low level mapping routines..... 803 ***************************************************/ 804 805/* 806 * add a wired page to the kva 807 */ 808void 809pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr) 810{ 811 pt_entry_t *pte; 812 pt_entry_t opte, npte; 813 814#ifdef PMAP_DEBUG 815 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 816#endif 817 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr; 818 819 pte = pmap_pte(kernel_pmap, va); 820 opte = *pte; 821 *pte = npte; 822 if (pte_test(&opte, PTE_V) && opte != npte) 823 pmap_update_page(kernel_pmap, va, npte); 824} 825 826void 827pmap_kenter(vm_offset_t va, vm_paddr_t pa) 828{ 829 830 KASSERT(is_cacheable_mem(pa), 831 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa)); 832 833 pmap_kenter_attr(va, pa, PTE_C_CACHE); 834} 835 836/* 837 * remove a page from the kernel pagetables 838 */ 839 /* PMAP_INLINE */ void 840pmap_kremove(vm_offset_t va) 841{ 842 pt_entry_t *pte; 843 844 /* 845 * Write back all caches from the page being destroyed 846 */ 847 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 848 849 pte = pmap_pte(kernel_pmap, va); 850 *pte = PTE_G; 851 pmap_invalidate_page(kernel_pmap, va); 852} 853 854/* 855 * Used to map a range of physical addresses into kernel 856 * virtual address space. 857 * 858 * The value passed in '*virt' is a suggested virtual address for 859 * the mapping. Architectures which can support a direct-mapped 860 * physical to virtual region can return the appropriate address 861 * within that region, leaving '*virt' unchanged. Other 862 * architectures should map the pages starting at '*virt' and 863 * update '*virt' with the first usable address after the mapped 864 * region. 865 * 866 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 867 */ 868vm_offset_t 869pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 870{ 871 vm_offset_t va, sva; 872 873 if (MIPS_DIRECT_MAPPABLE(end - 1)) 874 return (MIPS_PHYS_TO_DIRECT(start)); 875 876 va = sva = *virt; 877 while (start < end) { 878 pmap_kenter(va, start); 879 va += PAGE_SIZE; 880 start += PAGE_SIZE; 881 } 882 *virt = va; 883 return (sva); 884} 885 886/* 887 * Add a list of wired pages to the kva 888 * this routine is only used for temporary 889 * kernel mappings that do not need to have 890 * page modification or references recorded. 891 * Note that old mappings are simply written 892 * over. The page *must* be wired. 893 */ 894void 895pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 896{ 897 int i; 898 vm_offset_t origva = va; 899 900 for (i = 0; i < count; i++) { 901 pmap_flush_pvcache(m[i]); 902 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 903 va += PAGE_SIZE; 904 } 905 906 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count); 907} 908 909/* 910 * this routine jerks page mappings from the 911 * kernel -- it is meant only for temporary mappings. 912 */ 913void 914pmap_qremove(vm_offset_t va, int count) 915{ 916 /* 917 * No need to wb/inv caches here, 918 * pmap_kremove will do it for us 919 */ 920 921 while (count-- > 0) { 922 pmap_kremove(va); 923 va += PAGE_SIZE; 924 } 925} 926 927/*************************************************** 928 * Page table page management routines..... 929 ***************************************************/ 930 931/* Revision 1.507 932 * 933 * Simplify the reference counting of page table pages. Specifically, use 934 * the page table page's wired count rather than its hold count to contain 935 * the reference count. 936 */ 937 938/* 939 * This routine unholds page table pages, and if the hold count 940 * drops to zero, then it decrements the wire count. 941 */ 942static PMAP_INLINE int 943pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 944{ 945 --m->wire_count; 946 if (m->wire_count == 0) 947 return (_pmap_unwire_pte_hold(pmap, va, m)); 948 else 949 return (0); 950} 951 952static int 953_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 954{ 955 pd_entry_t *pde; 956 957 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 958 /* 959 * unmap the page table page 960 */ 961#ifdef __mips_n64 962 if (m->pindex < NUPDE) 963 pde = pmap_pde(pmap, va); 964 else 965 pde = pmap_segmap(pmap, va); 966#else 967 pde = pmap_pde(pmap, va); 968#endif 969 *pde = 0; 970 pmap->pm_stats.resident_count--; 971 972#ifdef __mips_n64 973 if (m->pindex < NUPDE) { 974 pd_entry_t *pdp; 975 vm_page_t pdpg; 976 977 /* 978 * Recursively decrement next level pagetable refcount 979 */ 980 pdp = (pd_entry_t *)*pmap_segmap(pmap, va); 981 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp)); 982 pmap_unwire_pte_hold(pmap, va, pdpg); 983 } 984#endif 985 986 /* 987 * If the page is finally unwired, simply free it. 988 */ 989 vm_page_free_zero(m); 990 atomic_subtract_int(&cnt.v_wire_count, 1); 991 return (1); 992} 993 994/* 995 * After removing a page table entry, this routine is used to 996 * conditionally free the page, and manage the hold/wire counts. 997 */ 998static int 999pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1000{ 1001 vm_page_t mpte; 1002 1003 if (va >= VM_MAXUSER_ADDRESS) 1004 return (0); 1005 KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0")); 1006 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde)); 1007 return (pmap_unwire_pte_hold(pmap, va, mpte)); 1008} 1009 1010void 1011pmap_pinit0(pmap_t pmap) 1012{ 1013 int i; 1014 1015 PMAP_LOCK_INIT(pmap); 1016 pmap->pm_segtab = kernel_segmap; 1017 CPU_ZERO(&pmap->pm_active); 1018 for (i = 0; i < MAXCPU; i++) { 1019 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1020 pmap->pm_asid[i].gen = 0; 1021 } 1022 PCPU_SET(curpmap, pmap); 1023 TAILQ_INIT(&pmap->pm_pvchunk); 1024 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1025} 1026 1027void 1028pmap_grow_direct_page_cache() 1029{ 1030 1031#ifdef __mips_n64 1032 vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS); 1033#else 1034 vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS); 1035#endif 1036} 1037 1038vm_page_t 1039pmap_alloc_direct_page(unsigned int index, int req) 1040{ 1041 vm_page_t m; 1042 1043 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED | 1044 VM_ALLOC_ZERO); 1045 if (m == NULL) 1046 return (NULL); 1047 1048 if ((m->flags & PG_ZERO) == 0) 1049 pmap_zero_page(m); 1050 1051 m->pindex = index; 1052 return (m); 1053} 1054 1055/* 1056 * Initialize a preallocated and zeroed pmap structure, 1057 * such as one in a vmspace structure. 1058 */ 1059int 1060pmap_pinit(pmap_t pmap) 1061{ 1062 vm_offset_t ptdva; 1063 vm_page_t ptdpg; 1064 int i; 1065 1066 PMAP_LOCK_INIT(pmap); 1067 1068 /* 1069 * allocate the page directory page 1070 */ 1071 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL) 1072 pmap_grow_direct_page_cache(); 1073 1074 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg)); 1075 pmap->pm_segtab = (pd_entry_t *)ptdva; 1076 CPU_ZERO(&pmap->pm_active); 1077 for (i = 0; i < MAXCPU; i++) { 1078 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1079 pmap->pm_asid[i].gen = 0; 1080 } 1081 TAILQ_INIT(&pmap->pm_pvchunk); 1082 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1083 1084 return (1); 1085} 1086 1087/* 1088 * this routine is called if the page table page is not 1089 * mapped correctly. 1090 */ 1091static vm_page_t 1092_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1093{ 1094 vm_offset_t pageva; 1095 vm_page_t m; 1096 1097 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1098 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1099 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1100 1101 /* 1102 * Find or fabricate a new pagetable page 1103 */ 1104 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) { 1105 if (flags & M_WAITOK) { 1106 PMAP_UNLOCK(pmap); 1107 rw_wunlock(&pvh_global_lock); 1108 pmap_grow_direct_page_cache(); 1109 rw_wlock(&pvh_global_lock); 1110 PMAP_LOCK(pmap); 1111 } 1112 1113 /* 1114 * Indicate the need to retry. While waiting, the page 1115 * table page may have been allocated. 1116 */ 1117 return (NULL); 1118 } 1119 1120 /* 1121 * Map the pagetable page into the process address space, if it 1122 * isn't already there. 1123 */ 1124 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1125 1126#ifdef __mips_n64 1127 if (ptepindex >= NUPDE) { 1128 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva; 1129 } else { 1130 pd_entry_t *pdep, *pde; 1131 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT); 1132 int pdeindex = ptepindex & (NPDEPG - 1); 1133 vm_page_t pg; 1134 1135 pdep = &pmap->pm_segtab[segindex]; 1136 if (*pdep == NULL) { 1137 /* recurse for allocating page dir */ 1138 if (_pmap_allocpte(pmap, NUPDE + segindex, 1139 flags) == NULL) { 1140 /* alloc failed, release current */ 1141 --m->wire_count; 1142 atomic_subtract_int(&cnt.v_wire_count, 1); 1143 vm_page_free_zero(m); 1144 return (NULL); 1145 } 1146 } else { 1147 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep)); 1148 pg->wire_count++; 1149 } 1150 /* Next level entry */ 1151 pde = (pd_entry_t *)*pdep; 1152 pde[pdeindex] = (pd_entry_t)pageva; 1153 } 1154#else 1155 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva; 1156#endif 1157 pmap->pm_stats.resident_count++; 1158 return (m); 1159} 1160 1161static vm_page_t 1162pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1163{ 1164 unsigned ptepindex; 1165 pd_entry_t *pde; 1166 vm_page_t m; 1167 1168 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1169 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1170 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1171 1172 /* 1173 * Calculate pagetable page index 1174 */ 1175 ptepindex = pmap_pde_pindex(va); 1176retry: 1177 /* 1178 * Get the page directory entry 1179 */ 1180 pde = pmap_pde(pmap, va); 1181 1182 /* 1183 * If the page table page is mapped, we just increment the hold 1184 * count, and activate it. 1185 */ 1186 if (pde != NULL && *pde != NULL) { 1187 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde)); 1188 m->wire_count++; 1189 } else { 1190 /* 1191 * Here if the pte page isn't mapped, or if it has been 1192 * deallocated. 1193 */ 1194 m = _pmap_allocpte(pmap, ptepindex, flags); 1195 if (m == NULL && (flags & M_WAITOK)) 1196 goto retry; 1197 } 1198 return (m); 1199} 1200 1201 1202/*************************************************** 1203 * Pmap allocation/deallocation routines. 1204 ***************************************************/ 1205 1206/* 1207 * Release any resources held by the given physical map. 1208 * Called when a pmap initialized by pmap_pinit is being released. 1209 * Should only be called if the map contains no valid mappings. 1210 */ 1211void 1212pmap_release(pmap_t pmap) 1213{ 1214 vm_offset_t ptdva; 1215 vm_page_t ptdpg; 1216 1217 KASSERT(pmap->pm_stats.resident_count == 0, 1218 ("pmap_release: pmap resident count %ld != 0", 1219 pmap->pm_stats.resident_count)); 1220 1221 ptdva = (vm_offset_t)pmap->pm_segtab; 1222 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva)); 1223 1224 ptdpg->wire_count--; 1225 atomic_subtract_int(&cnt.v_wire_count, 1); 1226 vm_page_free_zero(ptdpg); 1227 PMAP_LOCK_DESTROY(pmap); 1228} 1229 1230/* 1231 * grow the number of kernel page table entries, if needed 1232 */ 1233void 1234pmap_growkernel(vm_offset_t addr) 1235{ 1236 vm_page_t nkpg; 1237 pd_entry_t *pde, *pdpe; 1238 pt_entry_t *pte; 1239 int i; 1240 1241 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1242 addr = roundup2(addr, NBSEG); 1243 if (addr - 1 >= kernel_map->max_offset) 1244 addr = kernel_map->max_offset; 1245 while (kernel_vm_end < addr) { 1246 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end); 1247#ifdef __mips_n64 1248 if (*pdpe == 0) { 1249 /* new intermediate page table entry */ 1250 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1251 if (nkpg == NULL) 1252 panic("pmap_growkernel: no memory to grow kernel"); 1253 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1254 continue; /* try again */ 1255 } 1256#endif 1257 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); 1258 if (*pde != 0) { 1259 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1260 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1261 kernel_vm_end = kernel_map->max_offset; 1262 break; 1263 } 1264 continue; 1265 } 1266 1267 /* 1268 * This index is bogus, but out of the way 1269 */ 1270 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1271 if (!nkpg) 1272 panic("pmap_growkernel: no memory to grow kernel"); 1273 nkpt++; 1274 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1275 1276 /* 1277 * The R[4-7]?00 stores only one copy of the Global bit in 1278 * the translation lookaside buffer for each 2 page entry. 1279 * Thus invalid entrys must have the Global bit set so when 1280 * Entry LO and Entry HI G bits are anded together they will 1281 * produce a global bit to store in the tlb. 1282 */ 1283 pte = (pt_entry_t *)*pde; 1284 for (i = 0; i < NPTEPG; i++) 1285 pte[i] = PTE_G; 1286 1287 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1288 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1289 kernel_vm_end = kernel_map->max_offset; 1290 break; 1291 } 1292 } 1293} 1294 1295/*************************************************** 1296 * page management routines. 1297 ***************************************************/ 1298 1299CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1300#ifdef __mips_n64 1301CTASSERT(_NPCM == 3); 1302CTASSERT(_NPCPV == 168); 1303#else 1304CTASSERT(_NPCM == 11); 1305CTASSERT(_NPCPV == 336); 1306#endif 1307 1308static __inline struct pv_chunk * 1309pv_to_chunk(pv_entry_t pv) 1310{ 1311 1312 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1313} 1314 1315#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1316 1317#ifdef __mips_n64 1318#define PC_FREE0_1 0xfffffffffffffffful 1319#define PC_FREE2 0x000000fffffffffful 1320#else 1321#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1322#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1323#endif 1324 1325static const u_long pc_freemask[_NPCM] = { 1326#ifdef __mips_n64 1327 PC_FREE0_1, PC_FREE0_1, PC_FREE2 1328#else 1329 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1330 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1331 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1332 PC_FREE0_9, PC_FREE10 1333#endif 1334}; 1335 1336static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 1337 1338SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1339 "Current number of pv entries"); 1340 1341#ifdef PV_STATS 1342static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1343 1344SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1345 "Current number of pv entry chunks"); 1346SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1347 "Current number of pv entry chunks allocated"); 1348SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1349 "Current number of pv entry chunks frees"); 1350SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1351 "Number of times tried to get a chunk page but failed."); 1352 1353static long pv_entry_frees, pv_entry_allocs; 1354static int pv_entry_spare; 1355 1356SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1357 "Current number of pv entry frees"); 1358SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1359 "Current number of pv entry allocs"); 1360SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1361 "Current number of spare pv entries"); 1362#endif 1363 1364/* 1365 * We are in a serious low memory condition. Resort to 1366 * drastic measures to free some pages so we can allocate 1367 * another pv entry chunk. 1368 */ 1369static vm_page_t 1370pmap_pv_reclaim(pmap_t locked_pmap) 1371{ 1372 struct pch newtail; 1373 struct pv_chunk *pc; 1374 pd_entry_t *pde; 1375 pmap_t pmap; 1376 pt_entry_t *pte, oldpte; 1377 pv_entry_t pv; 1378 vm_offset_t va; 1379 vm_page_t m, m_pc; 1380 u_long inuse; 1381 int bit, field, freed, idx; 1382 1383 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1384 pmap = NULL; 1385 m_pc = NULL; 1386 TAILQ_INIT(&newtail); 1387 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) { 1388 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1389 if (pmap != pc->pc_pmap) { 1390 if (pmap != NULL) { 1391 pmap_invalidate_all(pmap); 1392 if (pmap != locked_pmap) 1393 PMAP_UNLOCK(pmap); 1394 } 1395 pmap = pc->pc_pmap; 1396 /* Avoid deadlock and lock recursion. */ 1397 if (pmap > locked_pmap) 1398 PMAP_LOCK(pmap); 1399 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 1400 pmap = NULL; 1401 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 1402 continue; 1403 } 1404 } 1405 1406 /* 1407 * Destroy every non-wired, 4 KB page mapping in the chunk. 1408 */ 1409 freed = 0; 1410 for (field = 0; field < _NPCM; field++) { 1411 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 1412 inuse != 0; inuse &= ~(1UL << bit)) { 1413 bit = ffsl(inuse) - 1; 1414 idx = field * sizeof(inuse) * NBBY + bit; 1415 pv = &pc->pc_pventry[idx]; 1416 va = pv->pv_va; 1417 pde = pmap_pde(pmap, va); 1418 KASSERT(pde != NULL && *pde != 0, 1419 ("pmap_pv_reclaim: pde")); 1420 pte = pmap_pde_to_pte(pde, va); 1421 oldpte = *pte; 1422 KASSERT(!pte_test(&oldpte, PTE_W), 1423 ("wired pte for unwired page")); 1424 if (is_kernel_pmap(pmap)) 1425 *pte = PTE_G; 1426 else 1427 *pte = 0; 1428 pmap_invalidate_page(pmap, va); 1429 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte)); 1430 if (pte_test(&oldpte, PTE_D)) 1431 vm_page_dirty(m); 1432 if (m->md.pv_flags & PV_TABLE_REF) 1433 vm_page_aflag_set(m, PGA_REFERENCED); 1434 m->md.pv_flags &= ~PV_TABLE_REF; 1435 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1436 if (TAILQ_EMPTY(&m->md.pv_list)) 1437 vm_page_aflag_clear(m, PGA_WRITEABLE); 1438 pc->pc_map[field] |= 1UL << bit; 1439 pmap_unuse_pt(pmap, va, *pde); 1440 freed++; 1441 } 1442 } 1443 if (freed == 0) { 1444 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 1445 continue; 1446 } 1447 /* Every freed mapping is for a 4 KB page. */ 1448 pmap->pm_stats.resident_count -= freed; 1449 PV_STAT(pv_entry_frees += freed); 1450 PV_STAT(pv_entry_spare += freed); 1451 pv_entry_count -= freed; 1452 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1453 for (field = 0; field < _NPCM; field++) 1454 if (pc->pc_map[field] != pc_freemask[field]) { 1455 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 1456 pc_list); 1457 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 1458 1459 /* 1460 * One freed pv entry in locked_pmap is 1461 * sufficient. 1462 */ 1463 if (pmap == locked_pmap) 1464 goto out; 1465 break; 1466 } 1467 if (field == _NPCM) { 1468 PV_STAT(pv_entry_spare -= _NPCPV); 1469 PV_STAT(pc_chunk_count--); 1470 PV_STAT(pc_chunk_frees++); 1471 /* Entire chunk is free; return it. */ 1472 m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS( 1473 (vm_offset_t)pc)); 1474 break; 1475 } 1476 } 1477out: 1478 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 1479 if (pmap != NULL) { 1480 pmap_invalidate_all(pmap); 1481 if (pmap != locked_pmap) 1482 PMAP_UNLOCK(pmap); 1483 } 1484 return (m_pc); 1485} 1486 1487/* 1488 * free the pv_entry back to the free list 1489 */ 1490static void 1491free_pv_entry(pmap_t pmap, pv_entry_t pv) 1492{ 1493 struct pv_chunk *pc; 1494 int bit, field, idx; 1495 1496 rw_assert(&pvh_global_lock, RA_WLOCKED); 1497 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1498 PV_STAT(pv_entry_frees++); 1499 PV_STAT(pv_entry_spare++); 1500 pv_entry_count--; 1501 pc = pv_to_chunk(pv); 1502 idx = pv - &pc->pc_pventry[0]; 1503 field = idx / (sizeof(u_long) * NBBY); 1504 bit = idx % (sizeof(u_long) * NBBY); 1505 pc->pc_map[field] |= 1ul << bit; 1506 for (idx = 0; idx < _NPCM; idx++) 1507 if (pc->pc_map[idx] != pc_freemask[idx]) { 1508 /* 1509 * 98% of the time, pc is already at the head of the 1510 * list. If it isn't already, move it to the head. 1511 */ 1512 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 1513 pc)) { 1514 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1515 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 1516 pc_list); 1517 } 1518 return; 1519 } 1520 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1521 free_pv_chunk(pc); 1522} 1523 1524static void 1525free_pv_chunk(struct pv_chunk *pc) 1526{ 1527 vm_page_t m; 1528 1529 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1530 PV_STAT(pv_entry_spare -= _NPCPV); 1531 PV_STAT(pc_chunk_count--); 1532 PV_STAT(pc_chunk_frees++); 1533 /* entire chunk is free, return it */ 1534 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc)); 1535 vm_page_unwire(m, 0); 1536 vm_page_free(m); 1537} 1538 1539/* 1540 * get a new pv_entry, allocating a block from the system 1541 * when needed. 1542 */ 1543static pv_entry_t 1544get_pv_entry(pmap_t pmap, boolean_t try) 1545{ 1546 struct pv_chunk *pc; 1547 pv_entry_t pv; 1548 vm_page_t m; 1549 int bit, field, idx; 1550 1551 rw_assert(&pvh_global_lock, RA_WLOCKED); 1552 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1553 PV_STAT(pv_entry_allocs++); 1554 pv_entry_count++; 1555retry: 1556 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1557 if (pc != NULL) { 1558 for (field = 0; field < _NPCM; field++) { 1559 if (pc->pc_map[field]) { 1560 bit = ffsl(pc->pc_map[field]) - 1; 1561 break; 1562 } 1563 } 1564 if (field < _NPCM) { 1565 idx = field * sizeof(pc->pc_map[field]) * NBBY + bit; 1566 pv = &pc->pc_pventry[idx]; 1567 pc->pc_map[field] &= ~(1ul << bit); 1568 /* If this was the last item, move it to tail */ 1569 for (field = 0; field < _NPCM; field++) 1570 if (pc->pc_map[field] != 0) { 1571 PV_STAT(pv_entry_spare--); 1572 return (pv); /* not full, return */ 1573 } 1574 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1575 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1576 PV_STAT(pv_entry_spare--); 1577 return (pv); 1578 } 1579 } 1580 /* No free items, allocate another chunk */ 1581 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL | 1582 VM_ALLOC_WIRED); 1583 if (m == NULL) { 1584 if (try) { 1585 pv_entry_count--; 1586 PV_STAT(pc_chunk_tryfail++); 1587 return (NULL); 1588 } 1589 m = pmap_pv_reclaim(pmap); 1590 if (m == NULL) 1591 goto retry; 1592 } 1593 PV_STAT(pc_chunk_count++); 1594 PV_STAT(pc_chunk_allocs++); 1595 pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1596 pc->pc_pmap = pmap; 1597 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 1598 for (field = 1; field < _NPCM; field++) 1599 pc->pc_map[field] = pc_freemask[field]; 1600 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 1601 pv = &pc->pc_pventry[0]; 1602 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1603 PV_STAT(pv_entry_spare += _NPCPV - 1); 1604 return (pv); 1605} 1606 1607/* 1608 * If it is the first entry on the list, it is actually 1609 * in the header and we must copy the following entry up 1610 * to the header. Otherwise we must search the list for 1611 * the entry. In either case we free the now unused entry. 1612 */ 1613 1614static pv_entry_t 1615pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1616{ 1617 pv_entry_t pv; 1618 1619 rw_assert(&pvh_global_lock, RA_WLOCKED); 1620 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 1621 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 1622 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 1623 break; 1624 } 1625 } 1626 return (pv); 1627} 1628 1629static void 1630pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1631{ 1632 pv_entry_t pv; 1633 1634 pv = pmap_pvh_remove(pvh, pmap, va); 1635 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx", 1636 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)), 1637 (u_long)va)); 1638 free_pv_entry(pmap, pv); 1639} 1640 1641static void 1642pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1643{ 1644 1645 rw_assert(&pvh_global_lock, RA_WLOCKED); 1646 pmap_pvh_free(&m->md, pmap, va); 1647 if (TAILQ_EMPTY(&m->md.pv_list)) 1648 vm_page_aflag_clear(m, PGA_WRITEABLE); 1649} 1650 1651/* 1652 * Conditionally create a pv entry. 1653 */ 1654static boolean_t 1655pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va, 1656 vm_page_t m) 1657{ 1658 pv_entry_t pv; 1659 1660 rw_assert(&pvh_global_lock, RA_WLOCKED); 1661 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1662 if ((pv = get_pv_entry(pmap, TRUE)) != NULL) { 1663 pv->pv_va = va; 1664 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1665 return (TRUE); 1666 } else 1667 return (FALSE); 1668} 1669 1670/* 1671 * pmap_remove_pte: do the things to unmap a page in a process 1672 */ 1673static int 1674pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va, 1675 pd_entry_t pde) 1676{ 1677 pt_entry_t oldpte; 1678 vm_page_t m; 1679 vm_paddr_t pa; 1680 1681 rw_assert(&pvh_global_lock, RA_WLOCKED); 1682 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1683 1684 oldpte = *ptq; 1685 if (is_kernel_pmap(pmap)) 1686 *ptq = PTE_G; 1687 else 1688 *ptq = 0; 1689 1690 if (pte_test(&oldpte, PTE_W)) 1691 pmap->pm_stats.wired_count -= 1; 1692 1693 pmap->pm_stats.resident_count -= 1; 1694 1695 if (pte_test(&oldpte, PTE_MANAGED)) { 1696 pa = TLBLO_PTE_TO_PA(oldpte); 1697 m = PHYS_TO_VM_PAGE(pa); 1698 if (pte_test(&oldpte, PTE_D)) { 1699 KASSERT(!pte_test(&oldpte, PTE_RO), 1700 ("%s: modified page not writable: va: %p, pte: %#jx", 1701 __func__, (void *)va, (uintmax_t)oldpte)); 1702 vm_page_dirty(m); 1703 } 1704 if (m->md.pv_flags & PV_TABLE_REF) 1705 vm_page_aflag_set(m, PGA_REFERENCED); 1706 m->md.pv_flags &= ~PV_TABLE_REF; 1707 1708 pmap_remove_entry(pmap, m, va); 1709 } 1710 return (pmap_unuse_pt(pmap, va, pde)); 1711} 1712 1713/* 1714 * Remove a single page from a process address space 1715 */ 1716static void 1717pmap_remove_page(struct pmap *pmap, vm_offset_t va) 1718{ 1719 pd_entry_t *pde; 1720 pt_entry_t *ptq; 1721 1722 rw_assert(&pvh_global_lock, RA_WLOCKED); 1723 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1724 pde = pmap_pde(pmap, va); 1725 if (pde == NULL || *pde == 0) 1726 return; 1727 ptq = pmap_pde_to_pte(pde, va); 1728 1729 /* 1730 * if there is no pte for this address, just skip it!!! 1731 */ 1732 if (!pte_test(ptq, PTE_V)) { 1733 return; 1734 } 1735 1736 /* 1737 * Write back all caches from the page being destroyed 1738 */ 1739 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 1740 1741 /* 1742 * get a local va for mappings for this pmap. 1743 */ 1744 (void)pmap_remove_pte(pmap, ptq, va, *pde); 1745 pmap_invalidate_page(pmap, va); 1746 1747 return; 1748} 1749 1750/* 1751 * Remove the given range of addresses from the specified map. 1752 * 1753 * It is assumed that the start and end are properly 1754 * rounded to the page size. 1755 */ 1756void 1757pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 1758{ 1759 vm_offset_t va_next; 1760 pd_entry_t *pde, *pdpe; 1761 pt_entry_t *pte; 1762 1763 if (pmap->pm_stats.resident_count == 0) 1764 return; 1765 1766 rw_wlock(&pvh_global_lock); 1767 PMAP_LOCK(pmap); 1768 1769 /* 1770 * special handling of removing one page. a very common operation 1771 * and easy to short circuit some code. 1772 */ 1773 if ((sva + PAGE_SIZE) == eva) { 1774 pmap_remove_page(pmap, sva); 1775 goto out; 1776 } 1777 for (; sva < eva; sva = va_next) { 1778 pdpe = pmap_segmap(pmap, sva); 1779#ifdef __mips_n64 1780 if (*pdpe == 0) { 1781 va_next = (sva + NBSEG) & ~SEGMASK; 1782 if (va_next < sva) 1783 va_next = eva; 1784 continue; 1785 } 1786#endif 1787 va_next = (sva + NBPDR) & ~PDRMASK; 1788 if (va_next < sva) 1789 va_next = eva; 1790 1791 pde = pmap_pdpe_to_pde(pdpe, sva); 1792 if (*pde == 0) 1793 continue; 1794 if (va_next > eva) 1795 va_next = eva; 1796 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; 1797 pte++, sva += PAGE_SIZE) { 1798 pmap_remove_page(pmap, sva); 1799 } 1800 } 1801out: 1802 rw_wunlock(&pvh_global_lock); 1803 PMAP_UNLOCK(pmap); 1804} 1805 1806/* 1807 * Routine: pmap_remove_all 1808 * Function: 1809 * Removes this physical page from 1810 * all physical maps in which it resides. 1811 * Reflects back modify bits to the pager. 1812 * 1813 * Notes: 1814 * Original versions of this routine were very 1815 * inefficient because they iteratively called 1816 * pmap_remove (slow...) 1817 */ 1818 1819void 1820pmap_remove_all(vm_page_t m) 1821{ 1822 pv_entry_t pv; 1823 pmap_t pmap; 1824 pd_entry_t *pde; 1825 pt_entry_t *pte, tpte; 1826 1827 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1828 ("pmap_remove_all: page %p is not managed", m)); 1829 rw_wlock(&pvh_global_lock); 1830 1831 if (m->md.pv_flags & PV_TABLE_REF) 1832 vm_page_aflag_set(m, PGA_REFERENCED); 1833 1834 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1835 pmap = PV_PMAP(pv); 1836 PMAP_LOCK(pmap); 1837 1838 /* 1839 * If it's last mapping writeback all caches from 1840 * the page being destroyed 1841 */ 1842 if (TAILQ_NEXT(pv, pv_list) == NULL) 1843 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 1844 1845 pmap->pm_stats.resident_count--; 1846 1847 pde = pmap_pde(pmap, pv->pv_va); 1848 KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde")); 1849 pte = pmap_pde_to_pte(pde, pv->pv_va); 1850 1851 tpte = *pte; 1852 if (is_kernel_pmap(pmap)) 1853 *pte = PTE_G; 1854 else 1855 *pte = 0; 1856 1857 if (pte_test(&tpte, PTE_W)) 1858 pmap->pm_stats.wired_count--; 1859 1860 /* 1861 * Update the vm_page_t clean and reference bits. 1862 */ 1863 if (pte_test(&tpte, PTE_D)) { 1864 KASSERT(!pte_test(&tpte, PTE_RO), 1865 ("%s: modified page not writable: va: %p, pte: %#jx", 1866 __func__, (void *)pv->pv_va, (uintmax_t)tpte)); 1867 vm_page_dirty(m); 1868 } 1869 pmap_invalidate_page(pmap, pv->pv_va); 1870 1871 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1872 pmap_unuse_pt(pmap, pv->pv_va, *pde); 1873 free_pv_entry(pmap, pv); 1874 PMAP_UNLOCK(pmap); 1875 } 1876 1877 vm_page_aflag_clear(m, PGA_WRITEABLE); 1878 m->md.pv_flags &= ~PV_TABLE_REF; 1879 rw_wunlock(&pvh_global_lock); 1880} 1881 1882/* 1883 * Set the physical protection on the 1884 * specified range of this map as requested. 1885 */ 1886void 1887pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1888{ 1889 pt_entry_t *pte; 1890 pd_entry_t *pde, *pdpe; 1891 vm_offset_t va_next; 1892 1893 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1894 pmap_remove(pmap, sva, eva); 1895 return; 1896 } 1897 if (prot & VM_PROT_WRITE) 1898 return; 1899 1900 rw_wlock(&pvh_global_lock); 1901 PMAP_LOCK(pmap); 1902 for (; sva < eva; sva = va_next) { 1903 pt_entry_t pbits; 1904 vm_page_t m; 1905 vm_paddr_t pa; 1906 1907 pdpe = pmap_segmap(pmap, sva); 1908#ifdef __mips_n64 1909 if (*pdpe == 0) { 1910 va_next = (sva + NBSEG) & ~SEGMASK; 1911 if (va_next < sva) 1912 va_next = eva; 1913 continue; 1914 } 1915#endif 1916 va_next = (sva + NBPDR) & ~PDRMASK; 1917 if (va_next < sva) 1918 va_next = eva; 1919 1920 pde = pmap_pdpe_to_pde(pdpe, sva); 1921 if (pde == NULL || *pde == NULL) 1922 continue; 1923 if (va_next > eva) 1924 va_next = eva; 1925 1926 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 1927 sva += PAGE_SIZE) { 1928 1929 /* Skip invalid PTEs */ 1930 if (!pte_test(pte, PTE_V)) 1931 continue; 1932 pbits = *pte; 1933 if (pte_test(&pbits, PTE_MANAGED | PTE_D)) { 1934 pa = TLBLO_PTE_TO_PA(pbits); 1935 m = PHYS_TO_VM_PAGE(pa); 1936 vm_page_dirty(m); 1937 } 1938 pte_clear(&pbits, PTE_D); 1939 pte_set(&pbits, PTE_RO); 1940 1941 if (pbits != *pte) { 1942 *pte = pbits; 1943 pmap_update_page(pmap, sva, pbits); 1944 } 1945 } 1946 } 1947 rw_wunlock(&pvh_global_lock); 1948 PMAP_UNLOCK(pmap); 1949} 1950 1951/* 1952 * Insert the given physical page (p) at 1953 * the specified virtual address (v) in the 1954 * target physical map with the protection requested. 1955 * 1956 * If specified, the page will be wired down, meaning 1957 * that the related pte can not be reclaimed. 1958 * 1959 * NB: This is the only routine which MAY NOT lazy-evaluate 1960 * or lose information. That is, this routine must actually 1961 * insert this page into the given map NOW. 1962 */ 1963void 1964pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 1965 vm_prot_t prot, boolean_t wired) 1966{ 1967 vm_paddr_t pa, opa; 1968 pt_entry_t *pte; 1969 pt_entry_t origpte, newpte; 1970 pv_entry_t pv; 1971 vm_page_t mpte, om; 1972 pt_entry_t rw; 1973 1974 va &= ~PAGE_MASK; 1975 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 1976 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, 1977 ("pmap_enter: page %p is not busy", m)); 1978 pa = VM_PAGE_TO_PHYS(m); 1979 newpte = TLBLO_PA_TO_PFN(pa) | PTE_V; 1980 1981 mpte = NULL; 1982 1983 rw_wlock(&pvh_global_lock); 1984 PMAP_LOCK(pmap); 1985 1986 /* 1987 * In the case that a page table page is not resident, we are 1988 * creating it here. 1989 */ 1990 if (va < VM_MAXUSER_ADDRESS) { 1991 mpte = pmap_allocpte(pmap, va, M_WAITOK); 1992 } 1993 pte = pmap_pte(pmap, va); 1994 1995 /* 1996 * Page Directory table entry not valid, we need a new PT page 1997 */ 1998 if (pte == NULL) { 1999 panic("pmap_enter: invalid page directory, pdir=%p, va=%p", 2000 (void *)pmap->pm_segtab, (void *)va); 2001 } 2002 om = NULL; 2003 origpte = *pte; 2004 opa = TLBLO_PTE_TO_PA(origpte); 2005 2006 /* 2007 * Mapping has not changed, must be protection or wiring change. 2008 */ 2009 if (pte_test(&origpte, PTE_V) && opa == pa) { 2010 /* 2011 * Wiring change, just update stats. We don't worry about 2012 * wiring PT pages as they remain resident as long as there 2013 * are valid mappings in them. Hence, if a user page is 2014 * wired, the PT page will be also. 2015 */ 2016 if (wired && !pte_test(&origpte, PTE_W)) 2017 pmap->pm_stats.wired_count++; 2018 else if (!wired && pte_test(&origpte, PTE_W)) 2019 pmap->pm_stats.wired_count--; 2020 2021 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), 2022 ("%s: modified page not writable: va: %p, pte: %#jx", 2023 __func__, (void *)va, (uintmax_t)origpte)); 2024 2025 /* 2026 * Remove extra pte reference 2027 */ 2028 if (mpte) 2029 mpte->wire_count--; 2030 2031 if (pte_test(&origpte, PTE_MANAGED)) { 2032 om = m; 2033 newpte |= PTE_MANAGED; 2034 } 2035 goto validate; 2036 } 2037 2038 pv = NULL; 2039 2040 /* 2041 * Mapping has changed, invalidate old range and fall through to 2042 * handle validating new mapping. 2043 */ 2044 if (opa) { 2045 if (pte_test(&origpte, PTE_W)) 2046 pmap->pm_stats.wired_count--; 2047 2048 if (pte_test(&origpte, PTE_MANAGED)) { 2049 om = PHYS_TO_VM_PAGE(opa); 2050 pv = pmap_pvh_remove(&om->md, pmap, va); 2051 } 2052 if (mpte != NULL) { 2053 mpte->wire_count--; 2054 KASSERT(mpte->wire_count > 0, 2055 ("pmap_enter: missing reference to page table page," 2056 " va: %p", (void *)va)); 2057 } 2058 } else 2059 pmap->pm_stats.resident_count++; 2060 2061 /* 2062 * Enter on the PV list if part of our managed memory. Note that we 2063 * raise IPL while manipulating pv_table since pmap_enter can be 2064 * called at interrupt time. 2065 */ 2066 if ((m->oflags & VPO_UNMANAGED) == 0) { 2067 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 2068 ("pmap_enter: managed mapping within the clean submap")); 2069 if (pv == NULL) 2070 pv = get_pv_entry(pmap, FALSE); 2071 pv->pv_va = va; 2072 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2073 newpte |= PTE_MANAGED; 2074 } else if (pv != NULL) 2075 free_pv_entry(pmap, pv); 2076 2077 /* 2078 * Increment counters 2079 */ 2080 if (wired) 2081 pmap->pm_stats.wired_count++; 2082 2083validate: 2084 if ((access & VM_PROT_WRITE) != 0) 2085 m->md.pv_flags |= PV_TABLE_REF; 2086 rw = init_pte_prot(m, access, prot); 2087 2088#ifdef PMAP_DEBUG 2089 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 2090#endif 2091 /* 2092 * Now validate mapping with desired protection/wiring. 2093 */ 2094 newpte |= rw; 2095 2096 if (is_cacheable_mem(pa)) 2097 newpte |= PTE_C_CACHE; 2098 else 2099 newpte |= PTE_C_UNCACHED; 2100 2101 if (wired) 2102 newpte |= PTE_W; 2103 2104 if (is_kernel_pmap(pmap)) 2105 newpte |= PTE_G; 2106 2107 /* 2108 * if the mapping or permission bits are different, we need to 2109 * update the pte. 2110 */ 2111 if (origpte != newpte) { 2112 if (pte_test(&origpte, PTE_V)) { 2113 *pte = newpte; 2114 if (pte_test(&origpte, PTE_MANAGED) && opa != pa) { 2115 if (om->md.pv_flags & PV_TABLE_REF) 2116 vm_page_aflag_set(om, PGA_REFERENCED); 2117 om->md.pv_flags &= ~PV_TABLE_REF; 2118 } 2119 if (pte_test(&origpte, PTE_D)) { 2120 KASSERT(!pte_test(&origpte, PTE_RO), 2121 ("pmap_enter: modified page not writable:" 2122 " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte)); 2123 if (pte_test(&origpte, PTE_MANAGED)) 2124 vm_page_dirty(om); 2125 } 2126 if (pte_test(&origpte, PTE_MANAGED) && 2127 TAILQ_EMPTY(&om->md.pv_list)) 2128 vm_page_aflag_clear(om, PGA_WRITEABLE); 2129 } else { 2130 *pte = newpte; 2131 } 2132 } 2133 pmap_update_page(pmap, va, newpte); 2134 2135 /* 2136 * Sync I & D caches for executable pages. Do this only if the 2137 * target pmap belongs to the current process. Otherwise, an 2138 * unresolvable TLB miss may occur. 2139 */ 2140 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && 2141 (prot & VM_PROT_EXECUTE)) { 2142 mips_icache_sync_range(va, PAGE_SIZE); 2143 mips_dcache_wbinv_range(va, PAGE_SIZE); 2144 } 2145 rw_wunlock(&pvh_global_lock); 2146 PMAP_UNLOCK(pmap); 2147} 2148 2149/* 2150 * this code makes some *MAJOR* assumptions: 2151 * 1. Current pmap & pmap exists. 2152 * 2. Not wired. 2153 * 3. Read access. 2154 * 4. No page table pages. 2155 * but is *MUCH* faster than pmap_enter... 2156 */ 2157 2158void 2159pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2160{ 2161 2162 rw_wlock(&pvh_global_lock); 2163 PMAP_LOCK(pmap); 2164 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 2165 rw_wunlock(&pvh_global_lock); 2166 PMAP_UNLOCK(pmap); 2167} 2168 2169static vm_page_t 2170pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 2171 vm_prot_t prot, vm_page_t mpte) 2172{ 2173 pt_entry_t *pte; 2174 vm_paddr_t pa; 2175 2176 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2177 (m->oflags & VPO_UNMANAGED) != 0, 2178 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2179 rw_assert(&pvh_global_lock, RA_WLOCKED); 2180 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2181 2182 /* 2183 * In the case that a page table page is not resident, we are 2184 * creating it here. 2185 */ 2186 if (va < VM_MAXUSER_ADDRESS) { 2187 pd_entry_t *pde; 2188 unsigned ptepindex; 2189 2190 /* 2191 * Calculate pagetable page index 2192 */ 2193 ptepindex = pmap_pde_pindex(va); 2194 if (mpte && (mpte->pindex == ptepindex)) { 2195 mpte->wire_count++; 2196 } else { 2197 /* 2198 * Get the page directory entry 2199 */ 2200 pde = pmap_pde(pmap, va); 2201 2202 /* 2203 * If the page table page is mapped, we just 2204 * increment the hold count, and activate it. 2205 */ 2206 if (pde && *pde != 0) { 2207 mpte = PHYS_TO_VM_PAGE( 2208 MIPS_DIRECT_TO_PHYS(*pde)); 2209 mpte->wire_count++; 2210 } else { 2211 mpte = _pmap_allocpte(pmap, ptepindex, 2212 M_NOWAIT); 2213 if (mpte == NULL) 2214 return (mpte); 2215 } 2216 } 2217 } else { 2218 mpte = NULL; 2219 } 2220 2221 pte = pmap_pte(pmap, va); 2222 if (pte_test(pte, PTE_V)) { 2223 if (mpte != NULL) { 2224 mpte->wire_count--; 2225 mpte = NULL; 2226 } 2227 return (mpte); 2228 } 2229 2230 /* 2231 * Enter on the PV list if part of our managed memory. 2232 */ 2233 if ((m->oflags & VPO_UNMANAGED) == 0 && 2234 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) { 2235 if (mpte != NULL) { 2236 pmap_unwire_pte_hold(pmap, va, mpte); 2237 mpte = NULL; 2238 } 2239 return (mpte); 2240 } 2241 2242 /* 2243 * Increment counters 2244 */ 2245 pmap->pm_stats.resident_count++; 2246 2247 pa = VM_PAGE_TO_PHYS(m); 2248 2249 /* 2250 * Now validate mapping with RO protection 2251 */ 2252 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V; 2253 if ((m->oflags & VPO_UNMANAGED) == 0) 2254 *pte |= PTE_MANAGED; 2255 2256 if (is_cacheable_mem(pa)) 2257 *pte |= PTE_C_CACHE; 2258 else 2259 *pte |= PTE_C_UNCACHED; 2260 2261 if (is_kernel_pmap(pmap)) 2262 *pte |= PTE_G; 2263 else { 2264 *pte |= PTE_RO; 2265 /* 2266 * Sync I & D caches. Do this only if the target pmap 2267 * belongs to the current process. Otherwise, an 2268 * unresolvable TLB miss may occur. */ 2269 if (pmap == &curproc->p_vmspace->vm_pmap) { 2270 va &= ~PAGE_MASK; 2271 mips_icache_sync_range(va, PAGE_SIZE); 2272 mips_dcache_wbinv_range(va, PAGE_SIZE); 2273 } 2274 } 2275 return (mpte); 2276} 2277 2278/* 2279 * Make a temporary mapping for a physical address. This is only intended 2280 * to be used for panic dumps. 2281 * 2282 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2283 */ 2284void * 2285pmap_kenter_temporary(vm_paddr_t pa, int i) 2286{ 2287 vm_offset_t va; 2288 2289 if (i != 0) 2290 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n", 2291 __func__); 2292 2293 if (MIPS_DIRECT_MAPPABLE(pa)) { 2294 va = MIPS_PHYS_TO_DIRECT(pa); 2295 } else { 2296#ifndef __mips_n64 /* XXX : to be converted to new style */ 2297 int cpu; 2298 register_t intr; 2299 struct local_sysmaps *sysm; 2300 pt_entry_t *pte, npte; 2301 2302 /* If this is used other than for dumps, we may need to leave 2303 * interrupts disasbled on return. If crash dumps don't work when 2304 * we get to this point, we might want to consider this (leaving things 2305 * disabled as a starting point ;-) 2306 */ 2307 intr = intr_disable(); 2308 cpu = PCPU_GET(cpuid); 2309 sysm = &sysmap_lmem[cpu]; 2310 /* Since this is for the debugger, no locks or any other fun */ 2311 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 2312 pte = pmap_pte(kernel_pmap, sysm->base); 2313 *pte = npte; 2314 sysm->valid1 = 1; 2315 pmap_update_page(kernel_pmap, sysm->base, npte); 2316 va = sysm->base; 2317 intr_restore(intr); 2318#endif 2319 } 2320 return ((void *)va); 2321} 2322 2323void 2324pmap_kenter_temporary_free(vm_paddr_t pa) 2325{ 2326#ifndef __mips_n64 /* XXX : to be converted to new style */ 2327 int cpu; 2328 register_t intr; 2329 struct local_sysmaps *sysm; 2330#endif 2331 2332 if (MIPS_DIRECT_MAPPABLE(pa)) { 2333 /* nothing to do for this case */ 2334 return; 2335 } 2336#ifndef __mips_n64 /* XXX : to be converted to new style */ 2337 cpu = PCPU_GET(cpuid); 2338 sysm = &sysmap_lmem[cpu]; 2339 if (sysm->valid1) { 2340 pt_entry_t *pte; 2341 2342 intr = intr_disable(); 2343 pte = pmap_pte(kernel_pmap, sysm->base); 2344 *pte = PTE_G; 2345 pmap_invalidate_page(kernel_pmap, sysm->base); 2346 intr_restore(intr); 2347 sysm->valid1 = 0; 2348 } 2349#endif 2350} 2351 2352/* 2353 * Maps a sequence of resident pages belonging to the same object. 2354 * The sequence begins with the given page m_start. This page is 2355 * mapped at the given virtual address start. Each subsequent page is 2356 * mapped at a virtual address that is offset from start by the same 2357 * amount as the page is offset from m_start within the object. The 2358 * last page in the sequence is the page with the largest offset from 2359 * m_start that can be mapped at a virtual address less than the given 2360 * virtual address end. Not every virtual page between start and end 2361 * is mapped; only those for which a resident page exists with the 2362 * corresponding offset from m_start are mapped. 2363 */ 2364void 2365pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2366 vm_page_t m_start, vm_prot_t prot) 2367{ 2368 vm_page_t m, mpte; 2369 vm_pindex_t diff, psize; 2370 2371 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2372 psize = atop(end - start); 2373 mpte = NULL; 2374 m = m_start; 2375 rw_wlock(&pvh_global_lock); 2376 PMAP_LOCK(pmap); 2377 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2378 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, 2379 prot, mpte); 2380 m = TAILQ_NEXT(m, listq); 2381 } 2382 rw_wunlock(&pvh_global_lock); 2383 PMAP_UNLOCK(pmap); 2384} 2385 2386/* 2387 * pmap_object_init_pt preloads the ptes for a given object 2388 * into the specified pmap. This eliminates the blast of soft 2389 * faults on process startup and immediately after an mmap. 2390 */ 2391void 2392pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2393 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2394{ 2395 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2396 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2397 ("pmap_object_init_pt: non-device object")); 2398} 2399 2400/* 2401 * Routine: pmap_change_wiring 2402 * Function: Change the wiring attribute for a map/virtual-address 2403 * pair. 2404 * In/out conditions: 2405 * The mapping must already exist in the pmap. 2406 */ 2407void 2408pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 2409{ 2410 pt_entry_t *pte; 2411 2412 PMAP_LOCK(pmap); 2413 pte = pmap_pte(pmap, va); 2414 2415 if (wired && !pte_test(pte, PTE_W)) 2416 pmap->pm_stats.wired_count++; 2417 else if (!wired && pte_test(pte, PTE_W)) 2418 pmap->pm_stats.wired_count--; 2419 2420 /* 2421 * Wiring is not a hardware characteristic so there is no need to 2422 * invalidate TLB. 2423 */ 2424 if (wired) 2425 pte_set(pte, PTE_W); 2426 else 2427 pte_clear(pte, PTE_W); 2428 PMAP_UNLOCK(pmap); 2429} 2430 2431/* 2432 * Copy the range specified by src_addr/len 2433 * from the source map to the range dst_addr/len 2434 * in the destination map. 2435 * 2436 * This routine is only advisory and need not do anything. 2437 */ 2438 2439void 2440pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2441 vm_size_t len, vm_offset_t src_addr) 2442{ 2443} 2444 2445/* 2446 * pmap_zero_page zeros the specified hardware page by mapping 2447 * the page into KVM and using bzero to clear its contents. 2448 * 2449 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2450 */ 2451void 2452pmap_zero_page(vm_page_t m) 2453{ 2454 vm_offset_t va; 2455 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2456 2457 if (MIPS_DIRECT_MAPPABLE(phys)) { 2458 va = MIPS_PHYS_TO_DIRECT(phys); 2459 bzero((caddr_t)va, PAGE_SIZE); 2460 mips_dcache_wbinv_range(va, PAGE_SIZE); 2461 } else { 2462 va = pmap_lmem_map1(phys); 2463 bzero((caddr_t)va, PAGE_SIZE); 2464 mips_dcache_wbinv_range(va, PAGE_SIZE); 2465 pmap_lmem_unmap(); 2466 } 2467} 2468 2469/* 2470 * pmap_zero_page_area zeros the specified hardware page by mapping 2471 * the page into KVM and using bzero to clear its contents. 2472 * 2473 * off and size may not cover an area beyond a single hardware page. 2474 */ 2475void 2476pmap_zero_page_area(vm_page_t m, int off, int size) 2477{ 2478 vm_offset_t va; 2479 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2480 2481 if (MIPS_DIRECT_MAPPABLE(phys)) { 2482 va = MIPS_PHYS_TO_DIRECT(phys); 2483 bzero((char *)(caddr_t)va + off, size); 2484 mips_dcache_wbinv_range(va + off, size); 2485 } else { 2486 va = pmap_lmem_map1(phys); 2487 bzero((char *)va + off, size); 2488 mips_dcache_wbinv_range(va + off, size); 2489 pmap_lmem_unmap(); 2490 } 2491} 2492 2493void 2494pmap_zero_page_idle(vm_page_t m) 2495{ 2496 vm_offset_t va; 2497 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2498 2499 if (MIPS_DIRECT_MAPPABLE(phys)) { 2500 va = MIPS_PHYS_TO_DIRECT(phys); 2501 bzero((caddr_t)va, PAGE_SIZE); 2502 mips_dcache_wbinv_range(va, PAGE_SIZE); 2503 } else { 2504 va = pmap_lmem_map1(phys); 2505 bzero((caddr_t)va, PAGE_SIZE); 2506 mips_dcache_wbinv_range(va, PAGE_SIZE); 2507 pmap_lmem_unmap(); 2508 } 2509} 2510 2511/* 2512 * pmap_copy_page copies the specified (machine independent) 2513 * page by mapping the page into virtual memory and using 2514 * bcopy to copy the page, one machine dependent page at a 2515 * time. 2516 * 2517 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2518 */ 2519void 2520pmap_copy_page(vm_page_t src, vm_page_t dst) 2521{ 2522 vm_offset_t va_src, va_dst; 2523 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src); 2524 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst); 2525 2526 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) { 2527 /* easy case, all can be accessed via KSEG0 */ 2528 /* 2529 * Flush all caches for VA that are mapped to this page 2530 * to make sure that data in SDRAM is up to date 2531 */ 2532 pmap_flush_pvcache(src); 2533 mips_dcache_wbinv_range_index( 2534 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE); 2535 va_src = MIPS_PHYS_TO_DIRECT(phys_src); 2536 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst); 2537 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE); 2538 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2539 } else { 2540 va_src = pmap_lmem_map2(phys_src, phys_dst); 2541 va_dst = va_src + PAGE_SIZE; 2542 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE); 2543 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2544 pmap_lmem_unmap(); 2545 } 2546} 2547 2548/* 2549 * Returns true if the pmap's pv is one of the first 2550 * 16 pvs linked to from this page. This count may 2551 * be changed upwards or downwards in the future; it 2552 * is only necessary that true be returned for a small 2553 * subset of pmaps for proper page aging. 2554 */ 2555boolean_t 2556pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 2557{ 2558 pv_entry_t pv; 2559 int loops = 0; 2560 boolean_t rv; 2561 2562 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2563 ("pmap_page_exists_quick: page %p is not managed", m)); 2564 rv = FALSE; 2565 rw_wlock(&pvh_global_lock); 2566 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2567 if (PV_PMAP(pv) == pmap) { 2568 rv = TRUE; 2569 break; 2570 } 2571 loops++; 2572 if (loops >= 16) 2573 break; 2574 } 2575 rw_wunlock(&pvh_global_lock); 2576 return (rv); 2577} 2578 2579/* 2580 * Remove all pages from specified address space 2581 * this aids process exit speeds. Also, this code 2582 * is special cased for current process only, but 2583 * can have the more generic (and slightly slower) 2584 * mode enabled. This is much faster than pmap_remove 2585 * in the case of running down an entire address space. 2586 */ 2587void 2588pmap_remove_pages(pmap_t pmap) 2589{ 2590 pd_entry_t *pde; 2591 pt_entry_t *pte, tpte; 2592 pv_entry_t pv; 2593 vm_page_t m; 2594 struct pv_chunk *pc, *npc; 2595 u_long inuse, bitmask; 2596 int allfree, bit, field, idx; 2597 2598 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 2599 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2600 return; 2601 } 2602 rw_wlock(&pvh_global_lock); 2603 PMAP_LOCK(pmap); 2604 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 2605 allfree = 1; 2606 for (field = 0; field < _NPCM; field++) { 2607 inuse = ~pc->pc_map[field] & pc_freemask[field]; 2608 while (inuse != 0) { 2609 bit = ffsl(inuse) - 1; 2610 bitmask = 1UL << bit; 2611 idx = field * sizeof(inuse) * NBBY + bit; 2612 pv = &pc->pc_pventry[idx]; 2613 inuse &= ~bitmask; 2614 2615 pde = pmap_pde(pmap, pv->pv_va); 2616 KASSERT(pde != NULL && *pde != 0, 2617 ("pmap_remove_pages: pde")); 2618 pte = pmap_pde_to_pte(pde, pv->pv_va); 2619 if (!pte_test(pte, PTE_V)) 2620 panic("pmap_remove_pages: bad pte"); 2621 tpte = *pte; 2622 2623/* 2624 * We cannot remove wired pages from a process' mapping at this time 2625 */ 2626 if (pte_test(&tpte, PTE_W)) { 2627 allfree = 0; 2628 continue; 2629 } 2630 *pte = is_kernel_pmap(pmap) ? PTE_G : 0; 2631 2632 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte)); 2633 KASSERT(m != NULL, 2634 ("pmap_remove_pages: bad tpte %#jx", 2635 (uintmax_t)tpte)); 2636 2637 /* 2638 * Update the vm_page_t clean and reference bits. 2639 */ 2640 if (pte_test(&tpte, PTE_D)) 2641 vm_page_dirty(m); 2642 2643 /* Mark free */ 2644 PV_STAT(pv_entry_frees++); 2645 PV_STAT(pv_entry_spare++); 2646 pv_entry_count--; 2647 pc->pc_map[field] |= bitmask; 2648 pmap->pm_stats.resident_count--; 2649 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2650 if (TAILQ_EMPTY(&m->md.pv_list)) 2651 vm_page_aflag_clear(m, PGA_WRITEABLE); 2652 pmap_unuse_pt(pmap, pv->pv_va, *pde); 2653 } 2654 } 2655 if (allfree) { 2656 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2657 free_pv_chunk(pc); 2658 } 2659 } 2660 pmap_invalidate_all(pmap); 2661 PMAP_UNLOCK(pmap); 2662 rw_wunlock(&pvh_global_lock); 2663} 2664 2665/* 2666 * pmap_testbit tests bits in pte's 2667 */ 2668static boolean_t 2669pmap_testbit(vm_page_t m, int bit) 2670{ 2671 pv_entry_t pv; 2672 pmap_t pmap; 2673 pt_entry_t *pte; 2674 boolean_t rv = FALSE; 2675 2676 if (m->oflags & VPO_UNMANAGED) 2677 return (rv); 2678 2679 rw_assert(&pvh_global_lock, RA_WLOCKED); 2680 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2681 pmap = PV_PMAP(pv); 2682 PMAP_LOCK(pmap); 2683 pte = pmap_pte(pmap, pv->pv_va); 2684 rv = pte_test(pte, bit); 2685 PMAP_UNLOCK(pmap); 2686 if (rv) 2687 break; 2688 } 2689 return (rv); 2690} 2691 2692/* 2693 * pmap_page_wired_mappings: 2694 * 2695 * Return the number of managed mappings to the given physical page 2696 * that are wired. 2697 */ 2698int 2699pmap_page_wired_mappings(vm_page_t m) 2700{ 2701 pv_entry_t pv; 2702 pmap_t pmap; 2703 pt_entry_t *pte; 2704 int count; 2705 2706 count = 0; 2707 if ((m->oflags & VPO_UNMANAGED) != 0) 2708 return (count); 2709 rw_wlock(&pvh_global_lock); 2710 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2711 pmap = PV_PMAP(pv); 2712 PMAP_LOCK(pmap); 2713 pte = pmap_pte(pmap, pv->pv_va); 2714 if (pte_test(pte, PTE_W)) 2715 count++; 2716 PMAP_UNLOCK(pmap); 2717 } 2718 rw_wunlock(&pvh_global_lock); 2719 return (count); 2720} 2721 2722/* 2723 * Clear the write and modified bits in each of the given page's mappings. 2724 */ 2725void 2726pmap_remove_write(vm_page_t m) 2727{ 2728 pmap_t pmap; 2729 pt_entry_t pbits, *pte; 2730 pv_entry_t pv; 2731 2732 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2733 ("pmap_remove_write: page %p is not managed", m)); 2734 2735 /* 2736 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 2737 * another thread while the object is locked. Thus, if PGA_WRITEABLE 2738 * is clear, no page table entries need updating. 2739 */ 2740 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2741 if ((m->oflags & VPO_BUSY) == 0 && 2742 (m->aflags & PGA_WRITEABLE) == 0) 2743 return; 2744 rw_wlock(&pvh_global_lock); 2745 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2746 pmap = PV_PMAP(pv); 2747 PMAP_LOCK(pmap); 2748 pte = pmap_pte(pmap, pv->pv_va); 2749 KASSERT(pte != NULL && pte_test(pte, PTE_V), 2750 ("page on pv_list has no pte")); 2751 pbits = *pte; 2752 if (pte_test(&pbits, PTE_D)) { 2753 pte_clear(&pbits, PTE_D); 2754 vm_page_dirty(m); 2755 } 2756 pte_set(&pbits, PTE_RO); 2757 if (pbits != *pte) { 2758 *pte = pbits; 2759 pmap_update_page(pmap, pv->pv_va, pbits); 2760 } 2761 PMAP_UNLOCK(pmap); 2762 } 2763 vm_page_aflag_clear(m, PGA_WRITEABLE); 2764 rw_wunlock(&pvh_global_lock); 2765} 2766 2767/* 2768 * pmap_ts_referenced: 2769 * 2770 * Return the count of reference bits for a page, clearing all of them. 2771 */ 2772int 2773pmap_ts_referenced(vm_page_t m) 2774{ 2775 2776 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2777 ("pmap_ts_referenced: page %p is not managed", m)); 2778 if (m->md.pv_flags & PV_TABLE_REF) { 2779 rw_wlock(&pvh_global_lock); 2780 m->md.pv_flags &= ~PV_TABLE_REF; 2781 rw_wunlock(&pvh_global_lock); 2782 return (1); 2783 } 2784 return (0); 2785} 2786 2787/* 2788 * pmap_is_modified: 2789 * 2790 * Return whether or not the specified physical page was modified 2791 * in any physical maps. 2792 */ 2793boolean_t 2794pmap_is_modified(vm_page_t m) 2795{ 2796 boolean_t rv; 2797 2798 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2799 ("pmap_is_modified: page %p is not managed", m)); 2800 2801 /* 2802 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2803 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2804 * is clear, no PTEs can have PTE_D set. 2805 */ 2806 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2807 if ((m->oflags & VPO_BUSY) == 0 && 2808 (m->aflags & PGA_WRITEABLE) == 0) 2809 return (FALSE); 2810 rw_wlock(&pvh_global_lock); 2811 rv = pmap_testbit(m, PTE_D); 2812 rw_wunlock(&pvh_global_lock); 2813 return (rv); 2814} 2815 2816/* N/C */ 2817 2818/* 2819 * pmap_is_prefaultable: 2820 * 2821 * Return whether or not the specified virtual address is elgible 2822 * for prefault. 2823 */ 2824boolean_t 2825pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2826{ 2827 pd_entry_t *pde; 2828 pt_entry_t *pte; 2829 boolean_t rv; 2830 2831 rv = FALSE; 2832 PMAP_LOCK(pmap); 2833 pde = pmap_pde(pmap, addr); 2834 if (pde != NULL && *pde != 0) { 2835 pte = pmap_pde_to_pte(pde, addr); 2836 rv = (*pte == 0); 2837 } 2838 PMAP_UNLOCK(pmap); 2839 return (rv); 2840} 2841 2842/* 2843 * Clear the modify bits on the specified physical page. 2844 */ 2845void 2846pmap_clear_modify(vm_page_t m) 2847{ 2848 pmap_t pmap; 2849 pt_entry_t *pte; 2850 pv_entry_t pv; 2851 2852 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2853 ("pmap_clear_modify: page %p is not managed", m)); 2854 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2855 KASSERT((m->oflags & VPO_BUSY) == 0, 2856 ("pmap_clear_modify: page %p is busy", m)); 2857 2858 /* 2859 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. 2860 * If the object containing the page is locked and the page is not 2861 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 2862 */ 2863 if ((m->aflags & PGA_WRITEABLE) == 0) 2864 return; 2865 rw_wlock(&pvh_global_lock); 2866 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2867 pmap = PV_PMAP(pv); 2868 PMAP_LOCK(pmap); 2869 pte = pmap_pte(pmap, pv->pv_va); 2870 if (pte_test(pte, PTE_D)) { 2871 pte_clear(pte, PTE_D); 2872 pmap_update_page(pmap, pv->pv_va, *pte); 2873 } 2874 PMAP_UNLOCK(pmap); 2875 } 2876 rw_wunlock(&pvh_global_lock); 2877} 2878 2879/* 2880 * pmap_is_referenced: 2881 * 2882 * Return whether or not the specified physical page was referenced 2883 * in any physical maps. 2884 */ 2885boolean_t 2886pmap_is_referenced(vm_page_t m) 2887{ 2888 2889 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2890 ("pmap_is_referenced: page %p is not managed", m)); 2891 return ((m->md.pv_flags & PV_TABLE_REF) != 0); 2892} 2893 2894/* 2895 * pmap_clear_reference: 2896 * 2897 * Clear the reference bit on the specified physical page. 2898 */ 2899void 2900pmap_clear_reference(vm_page_t m) 2901{ 2902 2903 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2904 ("pmap_clear_reference: page %p is not managed", m)); 2905 rw_wlock(&pvh_global_lock); 2906 if (m->md.pv_flags & PV_TABLE_REF) { 2907 m->md.pv_flags &= ~PV_TABLE_REF; 2908 } 2909 rw_wunlock(&pvh_global_lock); 2910} 2911 2912/* 2913 * Miscellaneous support routines follow 2914 */ 2915 2916/* 2917 * Map a set of physical memory pages into the kernel virtual 2918 * address space. Return a pointer to where it is mapped. This 2919 * routine is intended to be used for mapping device memory, 2920 * NOT real memory. 2921 */ 2922 2923/* 2924 * Map a set of physical memory pages into the kernel virtual 2925 * address space. Return a pointer to where it is mapped. This 2926 * routine is intended to be used for mapping device memory, 2927 * NOT real memory. 2928 * 2929 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit. 2930 */ 2931void * 2932pmap_mapdev(vm_paddr_t pa, vm_size_t size) 2933{ 2934 vm_offset_t va, tmpva, offset; 2935 2936 /* 2937 * KSEG1 maps only first 512M of phys address space. For 2938 * pa > 0x20000000 we should make proper mapping * using pmap_kenter. 2939 */ 2940 if (MIPS_DIRECT_MAPPABLE(pa + size - 1)) 2941 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); 2942 else { 2943 offset = pa & PAGE_MASK; 2944 size = roundup(size + offset, PAGE_SIZE); 2945 2946 va = kmem_alloc_nofault(kernel_map, size); 2947 if (!va) 2948 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2949 pa = trunc_page(pa); 2950 for (tmpva = va; size > 0;) { 2951 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED); 2952 size -= PAGE_SIZE; 2953 tmpva += PAGE_SIZE; 2954 pa += PAGE_SIZE; 2955 } 2956 } 2957 2958 return ((void *)(va + offset)); 2959} 2960 2961void 2962pmap_unmapdev(vm_offset_t va, vm_size_t size) 2963{ 2964#ifndef __mips_n64 2965 vm_offset_t base, offset, tmpva; 2966 2967 /* If the address is within KSEG1 then there is nothing to do */ 2968 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END) 2969 return; 2970 2971 base = trunc_page(va); 2972 offset = va & PAGE_MASK; 2973 size = roundup(size + offset, PAGE_SIZE); 2974 for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE) 2975 pmap_kremove(tmpva); 2976 kmem_free(kernel_map, base, size); 2977#endif 2978} 2979 2980/* 2981 * perform the pmap work for mincore 2982 */ 2983int 2984pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 2985{ 2986 pt_entry_t *ptep, pte; 2987 vm_paddr_t pa; 2988 vm_page_t m; 2989 int val; 2990 2991 PMAP_LOCK(pmap); 2992retry: 2993 ptep = pmap_pte(pmap, addr); 2994 pte = (ptep != NULL) ? *ptep : 0; 2995 if (!pte_test(&pte, PTE_V)) { 2996 val = 0; 2997 goto out; 2998 } 2999 val = MINCORE_INCORE; 3000 if (pte_test(&pte, PTE_D)) 3001 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 3002 pa = TLBLO_PTE_TO_PA(pte); 3003 if (pte_test(&pte, PTE_MANAGED)) { 3004 /* 3005 * This may falsely report the given address as 3006 * MINCORE_REFERENCED. Unfortunately, due to the lack of 3007 * per-PTE reference information, it is impossible to 3008 * determine if the address is MINCORE_REFERENCED. 3009 */ 3010 m = PHYS_TO_VM_PAGE(pa); 3011 if ((m->aflags & PGA_REFERENCED) != 0) 3012 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 3013 } 3014 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 3015 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 3016 pte_test(&pte, PTE_MANAGED)) { 3017 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 3018 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 3019 goto retry; 3020 } else 3021out: 3022 PA_UNLOCK_COND(*locked_pa); 3023 PMAP_UNLOCK(pmap); 3024 return (val); 3025} 3026 3027void 3028pmap_activate(struct thread *td) 3029{ 3030 pmap_t pmap, oldpmap; 3031 struct proc *p = td->td_proc; 3032 u_int cpuid; 3033 3034 critical_enter(); 3035 3036 pmap = vmspace_pmap(p->p_vmspace); 3037 oldpmap = PCPU_GET(curpmap); 3038 cpuid = PCPU_GET(cpuid); 3039 3040 if (oldpmap) 3041 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 3042 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 3043 pmap_asid_alloc(pmap); 3044 if (td == curthread) { 3045 PCPU_SET(segbase, pmap->pm_segtab); 3046 mips_wr_entryhi(pmap->pm_asid[cpuid].asid); 3047 } 3048 3049 PCPU_SET(curpmap, pmap); 3050 critical_exit(); 3051} 3052 3053void 3054pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 3055{ 3056} 3057 3058/* 3059 * Increase the starting virtual address of the given mapping if a 3060 * different alignment might result in more superpage mappings. 3061 */ 3062void 3063pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 3064 vm_offset_t *addr, vm_size_t size) 3065{ 3066 vm_offset_t superpage_offset; 3067 3068 if (size < NBSEG) 3069 return; 3070 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 3071 offset += ptoa(object->pg_color); 3072 superpage_offset = offset & SEGMASK; 3073 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG || 3074 (*addr & SEGMASK) == superpage_offset) 3075 return; 3076 if ((*addr & SEGMASK) < superpage_offset) 3077 *addr = (*addr & ~SEGMASK) + superpage_offset; 3078 else 3079 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset; 3080} 3081 3082/* 3083 * Increase the starting virtual address of the given mapping so 3084 * that it is aligned to not be the second page in a TLB entry. 3085 * This routine assumes that the length is appropriately-sized so 3086 * that the allocation does not share a TLB entry at all if required. 3087 */ 3088void 3089pmap_align_tlb(vm_offset_t *addr) 3090{ 3091 if ((*addr & PAGE_SIZE) == 0) 3092 return; 3093 *addr += PAGE_SIZE; 3094 return; 3095} 3096 3097#ifdef DDB 3098DB_SHOW_COMMAND(ptable, ddb_pid_dump) 3099{ 3100 pmap_t pmap; 3101 struct thread *td = NULL; 3102 struct proc *p; 3103 int i, j, k; 3104 vm_paddr_t pa; 3105 vm_offset_t va; 3106 3107 if (have_addr) { 3108 td = db_lookup_thread(addr, TRUE); 3109 if (td == NULL) { 3110 db_printf("Invalid pid or tid"); 3111 return; 3112 } 3113 p = td->td_proc; 3114 if (p->p_vmspace == NULL) { 3115 db_printf("No vmspace for process"); 3116 return; 3117 } 3118 pmap = vmspace_pmap(p->p_vmspace); 3119 } else 3120 pmap = kernel_pmap; 3121 3122 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n", 3123 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid, 3124 pmap->pm_asid[0].gen); 3125 for (i = 0; i < NPDEPG; i++) { 3126 pd_entry_t *pdpe; 3127 pt_entry_t *pde; 3128 pt_entry_t pte; 3129 3130 pdpe = (pd_entry_t *)pmap->pm_segtab[i]; 3131 if (pdpe == NULL) 3132 continue; 3133 db_printf("[%4d] %p\n", i, pdpe); 3134#ifdef __mips_n64 3135 for (j = 0; j < NPDEPG; j++) { 3136 pde = (pt_entry_t *)pdpe[j]; 3137 if (pde == NULL) 3138 continue; 3139 db_printf("\t[%4d] %p\n", j, pde); 3140#else 3141 { 3142 j = 0; 3143 pde = (pt_entry_t *)pdpe; 3144#endif 3145 for (k = 0; k < NPTEPG; k++) { 3146 pte = pde[k]; 3147 if (pte == 0 || !pte_test(&pte, PTE_V)) 3148 continue; 3149 pa = TLBLO_PTE_TO_PA(pte); 3150 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT); 3151 db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n", 3152 k, (void *)va, (uintmax_t)pte, (uintmax_t)pa); 3153 } 3154 } 3155 } 3156} 3157#endif 3158 3159#if defined(DEBUG) 3160 3161static void pads(pmap_t pm); 3162void pmap_pvdump(vm_offset_t pa); 3163 3164/* print address space of pmap*/ 3165static void 3166pads(pmap_t pm) 3167{ 3168 unsigned va, i, j; 3169 pt_entry_t *ptep; 3170 3171 if (pm == kernel_pmap) 3172 return; 3173 for (i = 0; i < NPTEPG; i++) 3174 if (pm->pm_segtab[i]) 3175 for (j = 0; j < NPTEPG; j++) { 3176 va = (i << SEGSHIFT) + (j << PAGE_SHIFT); 3177 if (pm == kernel_pmap && va < KERNBASE) 3178 continue; 3179 if (pm != kernel_pmap && 3180 va >= VM_MAXUSER_ADDRESS) 3181 continue; 3182 ptep = pmap_pte(pm, va); 3183 if (pte_test(ptep, PTE_V)) 3184 printf("%x:%x ", va, *(int *)ptep); 3185 } 3186 3187} 3188 3189void 3190pmap_pvdump(vm_offset_t pa) 3191{ 3192 register pv_entry_t pv; 3193 vm_page_t m; 3194 3195 printf("pa %x", pa); 3196 m = PHYS_TO_VM_PAGE(pa); 3197 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3198 pv = TAILQ_NEXT(pv, pv_list)) { 3199 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); 3200 pads(pv->pv_pmap); 3201 } 3202 printf(" "); 3203} 3204 3205/* N/C */ 3206#endif 3207 3208 3209/* 3210 * Allocate TLB address space tag (called ASID or TLBPID) and return it. 3211 * It takes almost as much or more time to search the TLB for a 3212 * specific ASID and flush those entries as it does to flush the entire TLB. 3213 * Therefore, when we allocate a new ASID, we just take the next number. When 3214 * we run out of numbers, we flush the TLB, increment the generation count 3215 * and start over. ASID zero is reserved for kernel use. 3216 */ 3217static void 3218pmap_asid_alloc(pmap) 3219 pmap_t pmap; 3220{ 3221 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED && 3222 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation)); 3223 else { 3224 if (PCPU_GET(next_asid) == pmap_max_asid) { 3225 tlb_invalidate_all_user(NULL); 3226 PCPU_SET(asid_generation, 3227 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK); 3228 if (PCPU_GET(asid_generation) == 0) { 3229 PCPU_SET(asid_generation, 1); 3230 } 3231 PCPU_SET(next_asid, 1); /* 0 means invalid */ 3232 } 3233 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid); 3234 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation); 3235 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1); 3236 } 3237} 3238 3239static pt_entry_t 3240init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot) 3241{ 3242 pt_entry_t rw; 3243 3244 if (!(prot & VM_PROT_WRITE)) 3245 rw = PTE_V | PTE_RO; 3246 else if ((m->oflags & VPO_UNMANAGED) == 0) { 3247 if ((access & VM_PROT_WRITE) != 0) 3248 rw = PTE_V | PTE_D; 3249 else 3250 rw = PTE_V; 3251 vm_page_aflag_set(m, PGA_WRITEABLE); 3252 } else 3253 /* Needn't emulate a modified bit for unmanaged pages. */ 3254 rw = PTE_V | PTE_D; 3255 return (rw); 3256} 3257 3258/* 3259 * pmap_emulate_modified : do dirty bit emulation 3260 * 3261 * On SMP, update just the local TLB, other CPUs will update their 3262 * TLBs from PTE lazily, if they get the exception. 3263 * Returns 0 in case of sucess, 1 if the page is read only and we 3264 * need to fault. 3265 */ 3266int 3267pmap_emulate_modified(pmap_t pmap, vm_offset_t va) 3268{ 3269 vm_page_t m; 3270 pt_entry_t *pte; 3271 vm_paddr_t pa; 3272 3273 PMAP_LOCK(pmap); 3274 pte = pmap_pte(pmap, va); 3275 if (pte == NULL) 3276 panic("pmap_emulate_modified: can't find PTE"); 3277#ifdef SMP 3278 /* It is possible that some other CPU changed m-bit */ 3279 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) { 3280 tlb_update(pmap, va, *pte); 3281 PMAP_UNLOCK(pmap); 3282 return (0); 3283 } 3284#else 3285 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) 3286 panic("pmap_emulate_modified: invalid pte"); 3287#endif 3288 if (pte_test(pte, PTE_RO)) { 3289 /* write to read only page in the kernel */ 3290 PMAP_UNLOCK(pmap); 3291 return (1); 3292 } 3293 pte_set(pte, PTE_D); 3294 tlb_update(pmap, va, *pte); 3295 if (!pte_test(pte, PTE_MANAGED)) 3296 panic("pmap_emulate_modified: unmanaged page"); 3297 pa = TLBLO_PTE_TO_PA(*pte); 3298 m = PHYS_TO_VM_PAGE(pa); 3299 m->md.pv_flags |= PV_TABLE_REF; 3300 PMAP_UNLOCK(pmap); 3301 return (0); 3302} 3303 3304/* 3305 * Routine: pmap_kextract 3306 * Function: 3307 * Extract the physical page address associated 3308 * virtual address. 3309 */ 3310vm_paddr_t 3311pmap_kextract(vm_offset_t va) 3312{ 3313 int mapped; 3314 3315 /* 3316 * First, the direct-mapped regions. 3317 */ 3318#if defined(__mips_n64) 3319 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) 3320 return (MIPS_XKPHYS_TO_PHYS(va)); 3321#endif 3322 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END) 3323 return (MIPS_KSEG0_TO_PHYS(va)); 3324 3325 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END) 3326 return (MIPS_KSEG1_TO_PHYS(va)); 3327 3328 /* 3329 * User virtual addresses. 3330 */ 3331 if (va < VM_MAXUSER_ADDRESS) { 3332 pt_entry_t *ptep; 3333 3334 if (curproc && curproc->p_vmspace) { 3335 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va); 3336 if (ptep) { 3337 return (TLBLO_PTE_TO_PA(*ptep) | 3338 (va & PAGE_MASK)); 3339 } 3340 return (0); 3341 } 3342 } 3343 3344 /* 3345 * Should be kernel virtual here, otherwise fail 3346 */ 3347 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END); 3348#if defined(__mips_n64) 3349 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END); 3350#endif 3351 /* 3352 * Kernel virtual. 3353 */ 3354 3355 if (mapped) { 3356 pt_entry_t *ptep; 3357 3358 /* Is the kernel pmap initialized? */ 3359 if (!CPU_EMPTY(&kernel_pmap->pm_active)) { 3360 /* It's inside the virtual address range */ 3361 ptep = pmap_pte(kernel_pmap, va); 3362 if (ptep) { 3363 return (TLBLO_PTE_TO_PA(*ptep) | 3364 (va & PAGE_MASK)); 3365 } 3366 } 3367 return (0); 3368 } 3369 3370 panic("%s for unknown address space %p.", __func__, (void *)va); 3371} 3372 3373 3374void 3375pmap_flush_pvcache(vm_page_t m) 3376{ 3377 pv_entry_t pv; 3378 3379 if (m != NULL) { 3380 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3381 pv = TAILQ_NEXT(pv, pv_list)) { 3382 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 3383 } 3384 } 3385} 3386