pmap.c revision 233308
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps 39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish 40 */ 41 42/* 43 * Manages physical address maps. 44 * 45 * In addition to hardware address maps, this 46 * module is called upon to provide software-use-only 47 * maps which may or may not be stored in the same 48 * form as hardware maps. These pseudo-maps are 49 * used to store intermediate results from copy 50 * operations to and from address spaces. 51 * 52 * Since the information managed by this module is 53 * also stored by the logical address mapping module, 54 * this module may throw away valid virtual-to-physical 55 * mappings at almost any time. However, invalidations 56 * of virtual-to-physical mappings must be done as 57 * requested. 58 * 59 * In order to cope with hardware architectures which 60 * make virtual-to-physical map invalidates expensive, 61 * this module may delay invalidate or reduced protection 62 * operations until such time as they are actually 63 * necessary. This module is given full information as 64 * to which processors are currently using which maps, 65 * and to when physical maps must be made correct. 66 */ 67 68#include <sys/cdefs.h> 69__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 233308 2012-03-22 15:14:10Z jchandra $"); 70 71#include "opt_ddb.h" 72 73#include <sys/param.h> 74#include <sys/systm.h> 75#include <sys/proc.h> 76#include <sys/msgbuf.h> 77#include <sys/vmmeter.h> 78#include <sys/mman.h> 79#include <sys/smp.h> 80#ifdef DDB 81#include <ddb/ddb.h> 82#endif 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <vm/vm_phys.h> 87#include <sys/lock.h> 88#include <sys/mutex.h> 89#include <vm/vm_kern.h> 90#include <vm/vm_page.h> 91#include <vm/vm_map.h> 92#include <vm/vm_object.h> 93#include <vm/vm_extern.h> 94#include <vm/vm_pageout.h> 95#include <vm/vm_pager.h> 96#include <vm/uma.h> 97#include <sys/pcpu.h> 98#include <sys/sched.h> 99#ifdef SMP 100#include <sys/smp.h> 101#endif 102 103#include <machine/cache.h> 104#include <machine/md_var.h> 105#include <machine/tlb.h> 106 107#undef PMAP_DEBUG 108 109#ifndef PMAP_SHPGPERPROC 110#define PMAP_SHPGPERPROC 200 111#endif 112 113#if !defined(DIAGNOSTIC) 114#define PMAP_INLINE __inline 115#else 116#define PMAP_INLINE 117#endif 118 119/* 120 * Get PDEs and PTEs for user/kernel address space 121 */ 122#define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1)) 123#define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1)) 124#define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1)) 125#define pmap_pde_pindex(v) ((v) >> PDRSHIFT) 126 127#ifdef __mips_n64 128#define NUPDE (NPDEPG * NPDEPG) 129#define NUSERPGTBLS (NUPDE + NPDEPG) 130#else 131#define NUPDE (NPDEPG) 132#define NUSERPGTBLS (NUPDE) 133#endif 134 135#define is_kernel_pmap(x) ((x) == kernel_pmap) 136 137struct pmap kernel_pmap_store; 138pd_entry_t *kernel_segmap; 139 140vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 141vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 142 143static int nkpt; 144unsigned pmap_max_asid; /* max ASID supported by the system */ 145 146#define PMAP_ASID_RESERVED 0 147 148vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 149 150static void pmap_asid_alloc(pmap_t pmap); 151 152/* 153 * Data for the pv entry allocation mechanism 154 */ 155static uma_zone_t pvzone; 156static struct vm_object pvzone_obj; 157static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 158 159static PMAP_INLINE void free_pv_entry(pv_entry_t pv); 160static pv_entry_t get_pv_entry(pmap_t locked_pmap); 161static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 162static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 163 vm_offset_t va); 164static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem); 165static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 166 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 167static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va); 168static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 169static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); 170static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, 171 vm_offset_t va, vm_page_t m); 172static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte); 173static void pmap_invalidate_all(pmap_t pmap); 174static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va); 175static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m); 176 177static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 178static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 179static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); 180static pt_entry_t init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot); 181 182#ifdef SMP 183static void pmap_invalidate_page_action(void *arg); 184static void pmap_update_page_action(void *arg); 185#endif 186 187#ifndef __mips_n64 188/* 189 * This structure is for high memory (memory above 512Meg in 32 bit) support. 190 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to 191 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc. 192 * 193 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To 194 * access a highmem physical address on a CPU, we map the physical address to 195 * the reserved virtual address for the CPU in the kernel pagetable. This is 196 * done with interrupts disabled(although a spinlock and sched_pin would be 197 * sufficient). 198 */ 199struct local_sysmaps { 200 vm_offset_t base; 201 uint32_t saved_intr; 202 uint16_t valid1, valid2; 203}; 204static struct local_sysmaps sysmap_lmem[MAXCPU]; 205 206static __inline void 207pmap_alloc_lmem_map(void) 208{ 209 int i; 210 211 for (i = 0; i < MAXCPU; i++) { 212 sysmap_lmem[i].base = virtual_avail; 213 virtual_avail += PAGE_SIZE * 2; 214 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0; 215 } 216} 217 218static __inline vm_offset_t 219pmap_lmem_map1(vm_paddr_t phys) 220{ 221 struct local_sysmaps *sysm; 222 pt_entry_t *pte, npte; 223 vm_offset_t va; 224 uint32_t intr; 225 int cpu; 226 227 intr = intr_disable(); 228 cpu = PCPU_GET(cpuid); 229 sysm = &sysmap_lmem[cpu]; 230 sysm->saved_intr = intr; 231 va = sysm->base; 232 npte = TLBLO_PA_TO_PFN(phys) | 233 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 234 pte = pmap_pte(kernel_pmap, va); 235 *pte = npte; 236 sysm->valid1 = 1; 237 return (va); 238} 239 240static __inline vm_offset_t 241pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 242{ 243 struct local_sysmaps *sysm; 244 pt_entry_t *pte, npte; 245 vm_offset_t va1, va2; 246 uint32_t intr; 247 int cpu; 248 249 intr = intr_disable(); 250 cpu = PCPU_GET(cpuid); 251 sysm = &sysmap_lmem[cpu]; 252 sysm->saved_intr = intr; 253 va1 = sysm->base; 254 va2 = sysm->base + PAGE_SIZE; 255 npte = TLBLO_PA_TO_PFN(phys1) | 256 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 257 pte = pmap_pte(kernel_pmap, va1); 258 *pte = npte; 259 npte = TLBLO_PA_TO_PFN(phys2) | 260 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 261 pte = pmap_pte(kernel_pmap, va2); 262 *pte = npte; 263 sysm->valid1 = 1; 264 sysm->valid2 = 1; 265 return (va1); 266} 267 268static __inline void 269pmap_lmem_unmap(void) 270{ 271 struct local_sysmaps *sysm; 272 pt_entry_t *pte; 273 int cpu; 274 275 cpu = PCPU_GET(cpuid); 276 sysm = &sysmap_lmem[cpu]; 277 pte = pmap_pte(kernel_pmap, sysm->base); 278 *pte = PTE_G; 279 tlb_invalidate_address(kernel_pmap, sysm->base); 280 sysm->valid1 = 0; 281 if (sysm->valid2) { 282 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); 283 *pte = PTE_G; 284 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); 285 sysm->valid2 = 0; 286 } 287 intr_restore(sysm->saved_intr); 288} 289#else /* __mips_n64 */ 290 291static __inline void 292pmap_alloc_lmem_map(void) 293{ 294} 295 296static __inline vm_offset_t 297pmap_lmem_map1(vm_paddr_t phys) 298{ 299 300 return (0); 301} 302 303static __inline vm_offset_t 304pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 305{ 306 307 return (0); 308} 309 310static __inline vm_offset_t 311pmap_lmem_unmap(void) 312{ 313 314 return (0); 315} 316#endif /* !__mips_n64 */ 317 318/* 319 * Page table entry lookup routines. 320 */ 321static __inline pd_entry_t * 322pmap_segmap(pmap_t pmap, vm_offset_t va) 323{ 324 325 return (&pmap->pm_segtab[pmap_seg_index(va)]); 326} 327 328#ifdef __mips_n64 329static __inline pd_entry_t * 330pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 331{ 332 pd_entry_t *pde; 333 334 pde = (pd_entry_t *)*pdpe; 335 return (&pde[pmap_pde_index(va)]); 336} 337 338static __inline pd_entry_t * 339pmap_pde(pmap_t pmap, vm_offset_t va) 340{ 341 pd_entry_t *pdpe; 342 343 pdpe = pmap_segmap(pmap, va); 344 if (pdpe == NULL || *pdpe == NULL) 345 return (NULL); 346 347 return (pmap_pdpe_to_pde(pdpe, va)); 348} 349#else 350static __inline pd_entry_t * 351pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 352{ 353 354 return (pdpe); 355} 356 357static __inline 358pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va) 359{ 360 361 return (pmap_segmap(pmap, va)); 362} 363#endif 364 365static __inline pt_entry_t * 366pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) 367{ 368 pt_entry_t *pte; 369 370 pte = (pt_entry_t *)*pde; 371 return (&pte[pmap_pte_index(va)]); 372} 373 374pt_entry_t * 375pmap_pte(pmap_t pmap, vm_offset_t va) 376{ 377 pd_entry_t *pde; 378 379 pde = pmap_pde(pmap, va); 380 if (pde == NULL || *pde == NULL) 381 return (NULL); 382 383 return (pmap_pde_to_pte(pde, va)); 384} 385 386vm_offset_t 387pmap_steal_memory(vm_size_t size) 388{ 389 vm_paddr_t bank_size, pa; 390 vm_offset_t va; 391 392 size = round_page(size); 393 bank_size = phys_avail[1] - phys_avail[0]; 394 while (size > bank_size) { 395 int i; 396 397 for (i = 0; phys_avail[i + 2]; i += 2) { 398 phys_avail[i] = phys_avail[i + 2]; 399 phys_avail[i + 1] = phys_avail[i + 3]; 400 } 401 phys_avail[i] = 0; 402 phys_avail[i + 1] = 0; 403 if (!phys_avail[0]) 404 panic("pmap_steal_memory: out of memory"); 405 bank_size = phys_avail[1] - phys_avail[0]; 406 } 407 408 pa = phys_avail[0]; 409 phys_avail[0] += size; 410 if (MIPS_DIRECT_MAPPABLE(pa) == 0) 411 panic("Out of memory below 512Meg?"); 412 va = MIPS_PHYS_TO_DIRECT(pa); 413 bzero((caddr_t)va, size); 414 return (va); 415} 416 417/* 418 * Bootstrap the system enough to run with virtual memory. This 419 * assumes that the phys_avail array has been initialized. 420 */ 421static void 422pmap_create_kernel_pagetable(void) 423{ 424 int i, j; 425 vm_offset_t ptaddr; 426 pt_entry_t *pte; 427#ifdef __mips_n64 428 pd_entry_t *pde; 429 vm_offset_t pdaddr; 430 int npt, npde; 431#endif 432 433 /* 434 * Allocate segment table for the kernel 435 */ 436 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE); 437 438 /* 439 * Allocate second level page tables for the kernel 440 */ 441#ifdef __mips_n64 442 npde = howmany(NKPT, NPDEPG); 443 pdaddr = pmap_steal_memory(PAGE_SIZE * npde); 444#endif 445 nkpt = NKPT; 446 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt); 447 448 /* 449 * The R[4-7]?00 stores only one copy of the Global bit in the 450 * translation lookaside buffer for each 2 page entry. Thus invalid 451 * entrys must have the Global bit set so when Entry LO and Entry HI 452 * G bits are anded together they will produce a global bit to store 453 * in the tlb. 454 */ 455 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++) 456 *pte = PTE_G; 457 458#ifdef __mips_n64 459 for (i = 0, npt = nkpt; npt > 0; i++) { 460 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE); 461 pde = (pd_entry_t *)kernel_segmap[i]; 462 463 for (j = 0; j < NPDEPG && npt > 0; j++, npt--) 464 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE); 465 } 466#else 467 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++) 468 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE)); 469#endif 470 471 PMAP_LOCK_INIT(kernel_pmap); 472 kernel_pmap->pm_segtab = kernel_segmap; 473 CPU_FILL(&kernel_pmap->pm_active); 474 TAILQ_INIT(&kernel_pmap->pm_pvlist); 475 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED; 476 kernel_pmap->pm_asid[0].gen = 0; 477 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE; 478} 479 480void 481pmap_bootstrap(void) 482{ 483 int i; 484 int need_local_mappings = 0; 485 486 /* Sort. */ 487again: 488 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 489 /* 490 * Keep the memory aligned on page boundary. 491 */ 492 phys_avail[i] = round_page(phys_avail[i]); 493 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 494 495 if (i < 2) 496 continue; 497 if (phys_avail[i - 2] > phys_avail[i]) { 498 vm_paddr_t ptemp[2]; 499 500 ptemp[0] = phys_avail[i + 0]; 501 ptemp[1] = phys_avail[i + 1]; 502 503 phys_avail[i + 0] = phys_avail[i - 2]; 504 phys_avail[i + 1] = phys_avail[i - 1]; 505 506 phys_avail[i - 2] = ptemp[0]; 507 phys_avail[i - 1] = ptemp[1]; 508 goto again; 509 } 510 } 511 512 /* 513 * In 32 bit, we may have memory which cannot be mapped directly. 514 * This memory will need temporary mapping before it can be 515 * accessed. 516 */ 517 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1)) 518 need_local_mappings = 1; 519 520 /* 521 * Copy the phys_avail[] array before we start stealing memory from it. 522 */ 523 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 524 physmem_desc[i] = phys_avail[i]; 525 physmem_desc[i + 1] = phys_avail[i + 1]; 526 } 527 528 Maxmem = atop(phys_avail[i - 1]); 529 530 if (bootverbose) { 531 printf("Physical memory chunk(s):\n"); 532 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 533 vm_paddr_t size; 534 535 size = phys_avail[i + 1] - phys_avail[i]; 536 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n", 537 (uintmax_t) phys_avail[i], 538 (uintmax_t) phys_avail[i + 1] - 1, 539 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE); 540 } 541 printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem)); 542 } 543 /* 544 * Steal the message buffer from the beginning of memory. 545 */ 546 msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize); 547 msgbufinit(msgbufp, msgbufsize); 548 549 /* 550 * Steal thread0 kstack. 551 */ 552 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT); 553 554 virtual_avail = VM_MIN_KERNEL_ADDRESS; 555 virtual_end = VM_MAX_KERNEL_ADDRESS; 556 557#ifdef SMP 558 /* 559 * Steal some virtual address space to map the pcpu area. 560 */ 561 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2); 562 pcpup = (struct pcpu *)virtual_avail; 563 virtual_avail += PAGE_SIZE * 2; 564 565 /* 566 * Initialize the wired TLB entry mapping the pcpu region for 567 * the BSP at 'pcpup'. Up until this point we were operating 568 * with the 'pcpup' for the BSP pointing to a virtual address 569 * in KSEG0 so there was no need for a TLB mapping. 570 */ 571 mips_pcpu_tlb_init(PCPU_ADDR(0)); 572 573 if (bootverbose) 574 printf("pcpu is available at virtual address %p.\n", pcpup); 575#endif 576 577 if (need_local_mappings) 578 pmap_alloc_lmem_map(); 579 pmap_create_kernel_pagetable(); 580 pmap_max_asid = VMNUM_PIDS; 581 mips_wr_entryhi(0); 582 mips_wr_pagemask(0); 583} 584 585/* 586 * Initialize a vm_page's machine-dependent fields. 587 */ 588void 589pmap_page_init(vm_page_t m) 590{ 591 592 TAILQ_INIT(&m->md.pv_list); 593 m->md.pv_list_count = 0; 594 m->md.pv_flags = 0; 595} 596 597/* 598 * Initialize the pmap module. 599 * Called by vm_init, to initialize any structures that the pmap 600 * system needs to map virtual memory. 601 * pmap_init has been enhanced to support in a fairly consistant 602 * way, discontiguous physical memory. 603 */ 604void 605pmap_init(void) 606{ 607 608 /* 609 * Initialize the address space (zone) for the pv entries. Set a 610 * high water mark so that the system can recover from excessive 611 * numbers of pv entries. 612 */ 613 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 614 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 615 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count; 616 pv_entry_high_water = 9 * (pv_entry_max / 10); 617 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 618} 619 620/*************************************************** 621 * Low level helper routines..... 622 ***************************************************/ 623 624#ifdef SMP 625static __inline void 626pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 627{ 628 int cpuid, cpu, self; 629 cpuset_t active_cpus; 630 631 sched_pin(); 632 if (is_kernel_pmap(pmap)) { 633 smp_rendezvous(NULL, fn, NULL, arg); 634 goto out; 635 } 636 /* Force ASID update on inactive CPUs */ 637 CPU_FOREACH(cpu) { 638 if (!CPU_ISSET(cpu, &pmap->pm_active)) 639 pmap->pm_asid[cpu].gen = 0; 640 } 641 cpuid = PCPU_GET(cpuid); 642 /* 643 * XXX: barrier/locking for active? 644 * 645 * Take a snapshot of active here, any further changes are ignored. 646 * tlb update/invalidate should be harmless on inactive CPUs 647 */ 648 active_cpus = pmap->pm_active; 649 self = CPU_ISSET(cpuid, &active_cpus); 650 CPU_CLR(cpuid, &active_cpus); 651 /* Optimize for the case where this cpu is the only active one */ 652 if (CPU_EMPTY(&active_cpus)) { 653 if (self) 654 fn(arg); 655 } else { 656 if (self) 657 CPU_SET(cpuid, &active_cpus); 658 smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg); 659 } 660out: 661 sched_unpin(); 662} 663#else /* !SMP */ 664static __inline void 665pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 666{ 667 int cpuid; 668 669 if (is_kernel_pmap(pmap)) { 670 fn(arg); 671 return; 672 } 673 cpuid = PCPU_GET(cpuid); 674 if (!CPU_ISSET(cpuid, &pmap->pm_active)) 675 pmap->pm_asid[cpuid].gen = 0; 676 else 677 fn(arg); 678} 679#endif /* SMP */ 680 681static void 682pmap_invalidate_all(pmap_t pmap) 683{ 684 685 pmap_call_on_active_cpus(pmap, 686 (void (*)(void *))tlb_invalidate_all_user, pmap); 687} 688 689struct pmap_invalidate_page_arg { 690 pmap_t pmap; 691 vm_offset_t va; 692}; 693 694static void 695pmap_invalidate_page_action(void *arg) 696{ 697 struct pmap_invalidate_page_arg *p = arg; 698 699 tlb_invalidate_address(p->pmap, p->va); 700} 701 702static void 703pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 704{ 705 struct pmap_invalidate_page_arg arg; 706 707 arg.pmap = pmap; 708 arg.va = va; 709 pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg); 710} 711 712struct pmap_update_page_arg { 713 pmap_t pmap; 714 vm_offset_t va; 715 pt_entry_t pte; 716}; 717 718static void 719pmap_update_page_action(void *arg) 720{ 721 struct pmap_update_page_arg *p = arg; 722 723 tlb_update(p->pmap, p->va, p->pte); 724} 725 726static void 727pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 728{ 729 struct pmap_update_page_arg arg; 730 731 arg.pmap = pmap; 732 arg.va = va; 733 arg.pte = pte; 734 pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg); 735} 736 737/* 738 * Routine: pmap_extract 739 * Function: 740 * Extract the physical page address associated 741 * with the given map/virtual_address pair. 742 */ 743vm_paddr_t 744pmap_extract(pmap_t pmap, vm_offset_t va) 745{ 746 pt_entry_t *pte; 747 vm_offset_t retval = 0; 748 749 PMAP_LOCK(pmap); 750 pte = pmap_pte(pmap, va); 751 if (pte) { 752 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK); 753 } 754 PMAP_UNLOCK(pmap); 755 return (retval); 756} 757 758/* 759 * Routine: pmap_extract_and_hold 760 * Function: 761 * Atomically extract and hold the physical page 762 * with the given pmap and virtual address pair 763 * if that mapping permits the given protection. 764 */ 765vm_page_t 766pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 767{ 768 pt_entry_t pte; 769 vm_page_t m; 770 vm_paddr_t pa; 771 772 m = NULL; 773 pa = 0; 774 PMAP_LOCK(pmap); 775retry: 776 pte = *pmap_pte(pmap, va); 777 if (pte != 0 && pte_test(&pte, PTE_V) && 778 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) { 779 if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa)) 780 goto retry; 781 782 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte)); 783 vm_page_hold(m); 784 } 785 PA_UNLOCK_COND(pa); 786 PMAP_UNLOCK(pmap); 787 return (m); 788} 789 790/*************************************************** 791 * Low level mapping routines..... 792 ***************************************************/ 793 794/* 795 * add a wired page to the kva 796 */ 797void 798pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr) 799{ 800 pt_entry_t *pte; 801 pt_entry_t opte, npte; 802 803#ifdef PMAP_DEBUG 804 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 805#endif 806 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr; 807 808 pte = pmap_pte(kernel_pmap, va); 809 opte = *pte; 810 *pte = npte; 811 if (pte_test(&opte, PTE_V) && opte != npte) 812 pmap_update_page(kernel_pmap, va, npte); 813} 814 815void 816pmap_kenter(vm_offset_t va, vm_paddr_t pa) 817{ 818 819 KASSERT(is_cacheable_mem(pa), 820 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa)); 821 822 pmap_kenter_attr(va, pa, PTE_C_CACHE); 823} 824 825/* 826 * remove a page from the kernel pagetables 827 */ 828 /* PMAP_INLINE */ void 829pmap_kremove(vm_offset_t va) 830{ 831 pt_entry_t *pte; 832 833 /* 834 * Write back all caches from the page being destroyed 835 */ 836 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 837 838 pte = pmap_pte(kernel_pmap, va); 839 *pte = PTE_G; 840 pmap_invalidate_page(kernel_pmap, va); 841} 842 843/* 844 * Used to map a range of physical addresses into kernel 845 * virtual address space. 846 * 847 * The value passed in '*virt' is a suggested virtual address for 848 * the mapping. Architectures which can support a direct-mapped 849 * physical to virtual region can return the appropriate address 850 * within that region, leaving '*virt' unchanged. Other 851 * architectures should map the pages starting at '*virt' and 852 * update '*virt' with the first usable address after the mapped 853 * region. 854 * 855 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 856 */ 857vm_offset_t 858pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 859{ 860 vm_offset_t va, sva; 861 862 if (MIPS_DIRECT_MAPPABLE(end - 1)) 863 return (MIPS_PHYS_TO_DIRECT(start)); 864 865 va = sva = *virt; 866 while (start < end) { 867 pmap_kenter(va, start); 868 va += PAGE_SIZE; 869 start += PAGE_SIZE; 870 } 871 *virt = va; 872 return (sva); 873} 874 875/* 876 * Add a list of wired pages to the kva 877 * this routine is only used for temporary 878 * kernel mappings that do not need to have 879 * page modification or references recorded. 880 * Note that old mappings are simply written 881 * over. The page *must* be wired. 882 */ 883void 884pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 885{ 886 int i; 887 vm_offset_t origva = va; 888 889 for (i = 0; i < count; i++) { 890 pmap_flush_pvcache(m[i]); 891 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 892 va += PAGE_SIZE; 893 } 894 895 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count); 896} 897 898/* 899 * this routine jerks page mappings from the 900 * kernel -- it is meant only for temporary mappings. 901 */ 902void 903pmap_qremove(vm_offset_t va, int count) 904{ 905 /* 906 * No need to wb/inv caches here, 907 * pmap_kremove will do it for us 908 */ 909 910 while (count-- > 0) { 911 pmap_kremove(va); 912 va += PAGE_SIZE; 913 } 914} 915 916/*************************************************** 917 * Page table page management routines..... 918 ***************************************************/ 919 920/* Revision 1.507 921 * 922 * Simplify the reference counting of page table pages. Specifically, use 923 * the page table page's wired count rather than its hold count to contain 924 * the reference count. 925 */ 926 927/* 928 * This routine unholds page table pages, and if the hold count 929 * drops to zero, then it decrements the wire count. 930 */ 931static PMAP_INLINE int 932pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 933{ 934 --m->wire_count; 935 if (m->wire_count == 0) 936 return (_pmap_unwire_pte_hold(pmap, va, m)); 937 else 938 return (0); 939} 940 941static int 942_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 943{ 944 pd_entry_t *pde; 945 946 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 947 /* 948 * unmap the page table page 949 */ 950#ifdef __mips_n64 951 if (m->pindex < NUPDE) 952 pde = pmap_pde(pmap, va); 953 else 954 pde = pmap_segmap(pmap, va); 955#else 956 pde = pmap_pde(pmap, va); 957#endif 958 *pde = 0; 959 pmap->pm_stats.resident_count--; 960 961#ifdef __mips_n64 962 if (m->pindex < NUPDE) { 963 pd_entry_t *pdp; 964 vm_page_t pdpg; 965 966 /* 967 * Recursively decrement next level pagetable refcount 968 */ 969 pdp = (pd_entry_t *)*pmap_segmap(pmap, va); 970 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp)); 971 pmap_unwire_pte_hold(pmap, va, pdpg); 972 } 973#endif 974 if (pmap->pm_ptphint == m) 975 pmap->pm_ptphint = NULL; 976 977 /* 978 * If the page is finally unwired, simply free it. 979 */ 980 vm_page_free_zero(m); 981 atomic_subtract_int(&cnt.v_wire_count, 1); 982 return (1); 983} 984 985/* 986 * After removing a page table entry, this routine is used to 987 * conditionally free the page, and manage the hold/wire counts. 988 */ 989static int 990pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 991{ 992 unsigned ptepindex; 993 pd_entry_t pteva; 994 995 if (va >= VM_MAXUSER_ADDRESS) 996 return (0); 997 998 if (mpte == NULL) { 999 ptepindex = pmap_pde_pindex(va); 1000 if (pmap->pm_ptphint && 1001 (pmap->pm_ptphint->pindex == ptepindex)) { 1002 mpte = pmap->pm_ptphint; 1003 } else { 1004 pteva = *pmap_pde(pmap, va); 1005 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva)); 1006 pmap->pm_ptphint = mpte; 1007 } 1008 } 1009 return (pmap_unwire_pte_hold(pmap, va, mpte)); 1010} 1011 1012void 1013pmap_pinit0(pmap_t pmap) 1014{ 1015 int i; 1016 1017 PMAP_LOCK_INIT(pmap); 1018 pmap->pm_segtab = kernel_segmap; 1019 CPU_ZERO(&pmap->pm_active); 1020 pmap->pm_ptphint = NULL; 1021 for (i = 0; i < MAXCPU; i++) { 1022 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1023 pmap->pm_asid[i].gen = 0; 1024 } 1025 PCPU_SET(curpmap, pmap); 1026 TAILQ_INIT(&pmap->pm_pvlist); 1027 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1028} 1029 1030void 1031pmap_grow_direct_page_cache() 1032{ 1033 1034#ifdef __mips_n64 1035 vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS); 1036#else 1037 vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS); 1038#endif 1039} 1040 1041vm_page_t 1042pmap_alloc_direct_page(unsigned int index, int req) 1043{ 1044 vm_page_t m; 1045 1046 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED | 1047 VM_ALLOC_ZERO); 1048 if (m == NULL) 1049 return (NULL); 1050 1051 if ((m->flags & PG_ZERO) == 0) 1052 pmap_zero_page(m); 1053 1054 m->pindex = index; 1055 return (m); 1056} 1057 1058/* 1059 * Initialize a preallocated and zeroed pmap structure, 1060 * such as one in a vmspace structure. 1061 */ 1062int 1063pmap_pinit(pmap_t pmap) 1064{ 1065 vm_offset_t ptdva; 1066 vm_page_t ptdpg; 1067 int i; 1068 1069 PMAP_LOCK_INIT(pmap); 1070 1071 /* 1072 * allocate the page directory page 1073 */ 1074 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL) 1075 pmap_grow_direct_page_cache(); 1076 1077 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg)); 1078 pmap->pm_segtab = (pd_entry_t *)ptdva; 1079 CPU_ZERO(&pmap->pm_active); 1080 pmap->pm_ptphint = NULL; 1081 for (i = 0; i < MAXCPU; i++) { 1082 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1083 pmap->pm_asid[i].gen = 0; 1084 } 1085 TAILQ_INIT(&pmap->pm_pvlist); 1086 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1087 1088 return (1); 1089} 1090 1091/* 1092 * this routine is called if the page table page is not 1093 * mapped correctly. 1094 */ 1095static vm_page_t 1096_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1097{ 1098 vm_offset_t pageva; 1099 vm_page_t m; 1100 1101 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1102 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1103 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1104 1105 /* 1106 * Find or fabricate a new pagetable page 1107 */ 1108 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) { 1109 if (flags & M_WAITOK) { 1110 PMAP_UNLOCK(pmap); 1111 vm_page_unlock_queues(); 1112 pmap_grow_direct_page_cache(); 1113 vm_page_lock_queues(); 1114 PMAP_LOCK(pmap); 1115 } 1116 1117 /* 1118 * Indicate the need to retry. While waiting, the page 1119 * table page may have been allocated. 1120 */ 1121 return (NULL); 1122 } 1123 1124 /* 1125 * Map the pagetable page into the process address space, if it 1126 * isn't already there. 1127 */ 1128 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1129 1130#ifdef __mips_n64 1131 if (ptepindex >= NUPDE) { 1132 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva; 1133 } else { 1134 pd_entry_t *pdep, *pde; 1135 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT); 1136 int pdeindex = ptepindex & (NPDEPG - 1); 1137 vm_page_t pg; 1138 1139 pdep = &pmap->pm_segtab[segindex]; 1140 if (*pdep == NULL) { 1141 /* recurse for allocating page dir */ 1142 if (_pmap_allocpte(pmap, NUPDE + segindex, 1143 flags) == NULL) { 1144 /* alloc failed, release current */ 1145 --m->wire_count; 1146 atomic_subtract_int(&cnt.v_wire_count, 1); 1147 vm_page_free_zero(m); 1148 return (NULL); 1149 } 1150 } else { 1151 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep)); 1152 pg->wire_count++; 1153 } 1154 /* Next level entry */ 1155 pde = (pd_entry_t *)*pdep; 1156 pde[pdeindex] = (pd_entry_t)pageva; 1157 pmap->pm_ptphint = m; 1158 } 1159#else 1160 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva; 1161#endif 1162 pmap->pm_stats.resident_count++; 1163 1164 /* 1165 * Set the page table hint 1166 */ 1167 pmap->pm_ptphint = m; 1168 return (m); 1169} 1170 1171static vm_page_t 1172pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1173{ 1174 unsigned ptepindex; 1175 pd_entry_t *pde; 1176 vm_page_t m; 1177 1178 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1179 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1180 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1181 1182 /* 1183 * Calculate pagetable page index 1184 */ 1185 ptepindex = pmap_pde_pindex(va); 1186retry: 1187 /* 1188 * Get the page directory entry 1189 */ 1190 pde = pmap_pde(pmap, va); 1191 1192 /* 1193 * If the page table page is mapped, we just increment the hold 1194 * count, and activate it. 1195 */ 1196 if (pde != NULL && *pde != NULL) { 1197 /* 1198 * In order to get the page table page, try the hint first. 1199 */ 1200 if (pmap->pm_ptphint && 1201 (pmap->pm_ptphint->pindex == ptepindex)) { 1202 m = pmap->pm_ptphint; 1203 } else { 1204 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde)); 1205 pmap->pm_ptphint = m; 1206 } 1207 m->wire_count++; 1208 } else { 1209 /* 1210 * Here if the pte page isn't mapped, or if it has been 1211 * deallocated. 1212 */ 1213 m = _pmap_allocpte(pmap, ptepindex, flags); 1214 if (m == NULL && (flags & M_WAITOK)) 1215 goto retry; 1216 } 1217 return (m); 1218} 1219 1220 1221/*************************************************** 1222* Pmap allocation/deallocation routines. 1223 ***************************************************/ 1224/* 1225 * Revision 1.397 1226 * - Merged pmap_release and pmap_release_free_page. When pmap_release is 1227 * called only the page directory page(s) can be left in the pmap pte 1228 * object, since all page table pages will have been freed by 1229 * pmap_remove_pages and pmap_remove. In addition, there can only be one 1230 * reference to the pmap and the page directory is wired, so the page(s) 1231 * can never be busy. So all there is to do is clear the magic mappings 1232 * from the page directory and free the page(s). 1233 */ 1234 1235 1236/* 1237 * Release any resources held by the given physical map. 1238 * Called when a pmap initialized by pmap_pinit is being released. 1239 * Should only be called if the map contains no valid mappings. 1240 */ 1241void 1242pmap_release(pmap_t pmap) 1243{ 1244 vm_offset_t ptdva; 1245 vm_page_t ptdpg; 1246 1247 KASSERT(pmap->pm_stats.resident_count == 0, 1248 ("pmap_release: pmap resident count %ld != 0", 1249 pmap->pm_stats.resident_count)); 1250 1251 ptdva = (vm_offset_t)pmap->pm_segtab; 1252 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva)); 1253 1254 ptdpg->wire_count--; 1255 atomic_subtract_int(&cnt.v_wire_count, 1); 1256 vm_page_free_zero(ptdpg); 1257 PMAP_LOCK_DESTROY(pmap); 1258} 1259 1260/* 1261 * grow the number of kernel page table entries, if needed 1262 */ 1263void 1264pmap_growkernel(vm_offset_t addr) 1265{ 1266 vm_page_t nkpg; 1267 pd_entry_t *pde, *pdpe; 1268 pt_entry_t *pte; 1269 int i; 1270 1271 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1272 addr = roundup2(addr, NBSEG); 1273 if (addr - 1 >= kernel_map->max_offset) 1274 addr = kernel_map->max_offset; 1275 while (kernel_vm_end < addr) { 1276 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end); 1277#ifdef __mips_n64 1278 if (*pdpe == 0) { 1279 /* new intermediate page table entry */ 1280 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1281 if (nkpg == NULL) 1282 panic("pmap_growkernel: no memory to grow kernel"); 1283 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1284 continue; /* try again */ 1285 } 1286#endif 1287 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); 1288 if (*pde != 0) { 1289 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1290 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1291 kernel_vm_end = kernel_map->max_offset; 1292 break; 1293 } 1294 continue; 1295 } 1296 1297 /* 1298 * This index is bogus, but out of the way 1299 */ 1300 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1301 if (!nkpg) 1302 panic("pmap_growkernel: no memory to grow kernel"); 1303 nkpt++; 1304 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1305 1306 /* 1307 * The R[4-7]?00 stores only one copy of the Global bit in 1308 * the translation lookaside buffer for each 2 page entry. 1309 * Thus invalid entrys must have the Global bit set so when 1310 * Entry LO and Entry HI G bits are anded together they will 1311 * produce a global bit to store in the tlb. 1312 */ 1313 pte = (pt_entry_t *)*pde; 1314 for (i = 0; i < NPTEPG; i++) 1315 pte[i] = PTE_G; 1316 1317 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1318 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1319 kernel_vm_end = kernel_map->max_offset; 1320 break; 1321 } 1322 } 1323} 1324 1325/*************************************************** 1326* page management routines. 1327 ***************************************************/ 1328 1329/* 1330 * free the pv_entry back to the free list 1331 */ 1332static PMAP_INLINE void 1333free_pv_entry(pv_entry_t pv) 1334{ 1335 1336 pv_entry_count--; 1337 uma_zfree(pvzone, pv); 1338} 1339 1340/* 1341 * get a new pv_entry, allocating a block from the system 1342 * when needed. 1343 * the memory allocation is performed bypassing the malloc code 1344 * because of the possibility of allocations at interrupt time. 1345 */ 1346static pv_entry_t 1347get_pv_entry(pmap_t locked_pmap) 1348{ 1349 static const struct timeval printinterval = { 60, 0 }; 1350 static struct timeval lastprint; 1351 struct vpgqueues *vpq; 1352 pt_entry_t *pte, oldpte; 1353 pmap_t pmap; 1354 pv_entry_t allocated_pv, next_pv, pv; 1355 vm_offset_t va; 1356 vm_page_t m; 1357 1358 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1359 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1360 allocated_pv = uma_zalloc(pvzone, M_NOWAIT); 1361 if (allocated_pv != NULL) { 1362 pv_entry_count++; 1363 if (pv_entry_count > pv_entry_high_water) 1364 pagedaemon_wakeup(); 1365 else 1366 return (allocated_pv); 1367 } 1368 /* 1369 * Reclaim pv entries: At first, destroy mappings to inactive 1370 * pages. After that, if a pv entry is still needed, destroy 1371 * mappings to active pages. 1372 */ 1373 if (ratecheck(&lastprint, &printinterval)) 1374 printf("Approaching the limit on PV entries, " 1375 "increase the vm.pmap.shpgperproc tunable.\n"); 1376 vpq = &vm_page_queues[PQ_INACTIVE]; 1377retry: 1378 TAILQ_FOREACH(m, &vpq->pl, pageq) { 1379 if ((m->flags & PG_MARKER) != 0 || m->hold_count || m->busy) 1380 continue; 1381 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 1382 va = pv->pv_va; 1383 pmap = pv->pv_pmap; 1384 /* Avoid deadlock and lock recursion. */ 1385 if (pmap > locked_pmap) 1386 PMAP_LOCK(pmap); 1387 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 1388 continue; 1389 pmap->pm_stats.resident_count--; 1390 pte = pmap_pte(pmap, va); 1391 KASSERT(pte != NULL, ("pte")); 1392 oldpte = *pte; 1393 if (is_kernel_pmap(pmap)) 1394 *pte = PTE_G; 1395 else 1396 *pte = 0; 1397 KASSERT(!pte_test(&oldpte, PTE_W), 1398 ("wired pte for unwired page")); 1399 if (m->md.pv_flags & PV_TABLE_REF) 1400 vm_page_aflag_set(m, PGA_REFERENCED); 1401 if (pte_test(&oldpte, PTE_D)) 1402 vm_page_dirty(m); 1403 pmap_invalidate_page(pmap, va); 1404 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1405 m->md.pv_list_count--; 1406 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1407 pmap_unuse_pt(pmap, va, pv->pv_ptem); 1408 if (pmap != locked_pmap) 1409 PMAP_UNLOCK(pmap); 1410 if (allocated_pv == NULL) 1411 allocated_pv = pv; 1412 else 1413 free_pv_entry(pv); 1414 } 1415 if (TAILQ_EMPTY(&m->md.pv_list)) { 1416 vm_page_aflag_clear(m, PGA_WRITEABLE); 1417 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1418 } 1419 } 1420 if (allocated_pv == NULL) { 1421 if (vpq == &vm_page_queues[PQ_INACTIVE]) { 1422 vpq = &vm_page_queues[PQ_ACTIVE]; 1423 goto retry; 1424 } 1425 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); 1426 } 1427 return (allocated_pv); 1428} 1429 1430/* 1431 * Revision 1.370 1432 * 1433 * Move pmap_collect() out of the machine-dependent code, rename it 1434 * to reflect its new location, and add page queue and flag locking. 1435 * 1436 * Notes: (1) alpha, i386, and ia64 had identical implementations 1437 * of pmap_collect() in terms of machine-independent interfaces; 1438 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO. 1439 * 1440 * MIPS implementation was identical to alpha [Junos 8.2] 1441 */ 1442 1443/* 1444 * If it is the first entry on the list, it is actually 1445 * in the header and we must copy the following entry up 1446 * to the header. Otherwise we must search the list for 1447 * the entry. In either case we free the now unused entry. 1448 */ 1449 1450static pv_entry_t 1451pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1452{ 1453 pv_entry_t pv; 1454 1455 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1456 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1457 if (pvh->pv_list_count < pmap->pm_stats.resident_count) { 1458 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 1459 if (pmap == pv->pv_pmap && va == pv->pv_va) 1460 break; 1461 } 1462 } else { 1463 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { 1464 if (va == pv->pv_va) 1465 break; 1466 } 1467 } 1468 if (pv != NULL) { 1469 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 1470 pvh->pv_list_count--; 1471 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1472 } 1473 return (pv); 1474} 1475 1476static void 1477pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1478{ 1479 pv_entry_t pv; 1480 1481 pv = pmap_pvh_remove(pvh, pmap, va); 1482 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx", 1483 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)), 1484 (u_long)va)); 1485 free_pv_entry(pv); 1486} 1487 1488static void 1489pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1490{ 1491 1492 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1493 pmap_pvh_free(&m->md, pmap, va); 1494 if (TAILQ_EMPTY(&m->md.pv_list)) 1495 vm_page_aflag_clear(m, PGA_WRITEABLE); 1496} 1497 1498/* 1499 * Conditionally create a pv entry. 1500 */ 1501static boolean_t 1502pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va, 1503 vm_page_t m) 1504{ 1505 pv_entry_t pv; 1506 1507 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1508 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1509 if (pv_entry_count < pv_entry_high_water && 1510 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { 1511 pv_entry_count++; 1512 pv->pv_va = va; 1513 pv->pv_pmap = pmap; 1514 pv->pv_ptem = mpte; 1515 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1516 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1517 m->md.pv_list_count++; 1518 return (TRUE); 1519 } else 1520 return (FALSE); 1521} 1522 1523/* 1524 * pmap_remove_pte: do the things to unmap a page in a process 1525 */ 1526static int 1527pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va) 1528{ 1529 pt_entry_t oldpte; 1530 vm_page_t m; 1531 vm_paddr_t pa; 1532 1533 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1534 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1535 1536 oldpte = *ptq; 1537 if (is_kernel_pmap(pmap)) 1538 *ptq = PTE_G; 1539 else 1540 *ptq = 0; 1541 1542 if (pte_test(&oldpte, PTE_W)) 1543 pmap->pm_stats.wired_count -= 1; 1544 1545 pmap->pm_stats.resident_count -= 1; 1546 pa = TLBLO_PTE_TO_PA(oldpte); 1547 1548 if (page_is_managed(pa)) { 1549 m = PHYS_TO_VM_PAGE(pa); 1550 if (pte_test(&oldpte, PTE_D)) { 1551 KASSERT(!pte_test(&oldpte, PTE_RO), 1552 ("%s: modified page not writable: va: %p, pte: %#jx", 1553 __func__, (void *)va, (uintmax_t)oldpte)); 1554 vm_page_dirty(m); 1555 } 1556 if (m->md.pv_flags & PV_TABLE_REF) 1557 vm_page_aflag_set(m, PGA_REFERENCED); 1558 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1559 1560 pmap_remove_entry(pmap, m, va); 1561 } 1562 return (pmap_unuse_pt(pmap, va, NULL)); 1563} 1564 1565/* 1566 * Remove a single page from a process address space 1567 */ 1568static void 1569pmap_remove_page(struct pmap *pmap, vm_offset_t va) 1570{ 1571 pt_entry_t *ptq; 1572 1573 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1574 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1575 ptq = pmap_pte(pmap, va); 1576 1577 /* 1578 * if there is no pte for this address, just skip it!!! 1579 */ 1580 if (!ptq || !pte_test(ptq, PTE_V)) { 1581 return; 1582 } 1583 1584 /* 1585 * Write back all caches from the page being destroyed 1586 */ 1587 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 1588 1589 /* 1590 * get a local va for mappings for this pmap. 1591 */ 1592 (void)pmap_remove_pte(pmap, ptq, va); 1593 pmap_invalidate_page(pmap, va); 1594 1595 return; 1596} 1597 1598/* 1599 * Remove the given range of addresses from the specified map. 1600 * 1601 * It is assumed that the start and end are properly 1602 * rounded to the page size. 1603 */ 1604void 1605pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 1606{ 1607 vm_offset_t va_next; 1608 pd_entry_t *pde, *pdpe; 1609 pt_entry_t *pte; 1610 1611 if (pmap == NULL) 1612 return; 1613 1614 if (pmap->pm_stats.resident_count == 0) 1615 return; 1616 1617 vm_page_lock_queues(); 1618 PMAP_LOCK(pmap); 1619 1620 /* 1621 * special handling of removing one page. a very common operation 1622 * and easy to short circuit some code. 1623 */ 1624 if ((sva + PAGE_SIZE) == eva) { 1625 pmap_remove_page(pmap, sva); 1626 goto out; 1627 } 1628 for (; sva < eva; sva = va_next) { 1629 pdpe = pmap_segmap(pmap, sva); 1630#ifdef __mips_n64 1631 if (*pdpe == 0) { 1632 va_next = (sva + NBSEG) & ~SEGMASK; 1633 if (va_next < sva) 1634 va_next = eva; 1635 continue; 1636 } 1637#endif 1638 va_next = (sva + NBPDR) & ~PDRMASK; 1639 if (va_next < sva) 1640 va_next = eva; 1641 1642 pde = pmap_pdpe_to_pde(pdpe, sva); 1643 if (*pde == 0) 1644 continue; 1645 if (va_next > eva) 1646 va_next = eva; 1647 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; 1648 pte++, sva += PAGE_SIZE) { 1649 pmap_remove_page(pmap, sva); 1650 } 1651 } 1652out: 1653 vm_page_unlock_queues(); 1654 PMAP_UNLOCK(pmap); 1655} 1656 1657/* 1658 * Routine: pmap_remove_all 1659 * Function: 1660 * Removes this physical page from 1661 * all physical maps in which it resides. 1662 * Reflects back modify bits to the pager. 1663 * 1664 * Notes: 1665 * Original versions of this routine were very 1666 * inefficient because they iteratively called 1667 * pmap_remove (slow...) 1668 */ 1669 1670void 1671pmap_remove_all(vm_page_t m) 1672{ 1673 pv_entry_t pv; 1674 pt_entry_t *pte, tpte; 1675 1676 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1677 ("pmap_remove_all: page %p is not managed", m)); 1678 vm_page_lock_queues(); 1679 1680 if (m->md.pv_flags & PV_TABLE_REF) 1681 vm_page_aflag_set(m, PGA_REFERENCED); 1682 1683 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1684 PMAP_LOCK(pv->pv_pmap); 1685 1686 /* 1687 * If it's last mapping writeback all caches from 1688 * the page being destroyed 1689 */ 1690 if (m->md.pv_list_count == 1) 1691 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 1692 1693 pv->pv_pmap->pm_stats.resident_count--; 1694 1695 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1696 1697 tpte = *pte; 1698 if (is_kernel_pmap(pv->pv_pmap)) 1699 *pte = PTE_G; 1700 else 1701 *pte = 0; 1702 1703 if (pte_test(&tpte, PTE_W)) 1704 pv->pv_pmap->pm_stats.wired_count--; 1705 1706 /* 1707 * Update the vm_page_t clean and reference bits. 1708 */ 1709 if (pte_test(&tpte, PTE_D)) { 1710 KASSERT(!pte_test(&tpte, PTE_RO), 1711 ("%s: modified page not writable: va: %p, pte: %#jx", 1712 __func__, (void *)pv->pv_va, (uintmax_t)tpte)); 1713 vm_page_dirty(m); 1714 } 1715 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1716 1717 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 1718 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1719 m->md.pv_list_count--; 1720 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 1721 PMAP_UNLOCK(pv->pv_pmap); 1722 free_pv_entry(pv); 1723 } 1724 1725 vm_page_aflag_clear(m, PGA_WRITEABLE); 1726 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1727 vm_page_unlock_queues(); 1728} 1729 1730/* 1731 * Set the physical protection on the 1732 * specified range of this map as requested. 1733 */ 1734void 1735pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1736{ 1737 pt_entry_t *pte; 1738 pd_entry_t *pde, *pdpe; 1739 vm_offset_t va_next; 1740 1741 if (pmap == NULL) 1742 return; 1743 1744 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1745 pmap_remove(pmap, sva, eva); 1746 return; 1747 } 1748 if (prot & VM_PROT_WRITE) 1749 return; 1750 1751 vm_page_lock_queues(); 1752 PMAP_LOCK(pmap); 1753 for (; sva < eva; sva = va_next) { 1754 pt_entry_t pbits; 1755 vm_page_t m; 1756 vm_paddr_t pa; 1757 1758 pdpe = pmap_segmap(pmap, sva); 1759#ifdef __mips_n64 1760 if (*pdpe == 0) { 1761 va_next = (sva + NBSEG) & ~SEGMASK; 1762 if (va_next < sva) 1763 va_next = eva; 1764 continue; 1765 } 1766#endif 1767 va_next = (sva + NBPDR) & ~PDRMASK; 1768 if (va_next < sva) 1769 va_next = eva; 1770 1771 pde = pmap_pdpe_to_pde(pdpe, sva); 1772 if (pde == NULL || *pde == NULL) 1773 continue; 1774 if (va_next > eva) 1775 va_next = eva; 1776 1777 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 1778 sva += PAGE_SIZE) { 1779 1780 /* Skip invalid PTEs */ 1781 if (!pte_test(pte, PTE_V)) 1782 continue; 1783 pbits = *pte; 1784 pa = TLBLO_PTE_TO_PA(pbits); 1785 if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) { 1786 m = PHYS_TO_VM_PAGE(pa); 1787 vm_page_dirty(m); 1788 m->md.pv_flags &= ~PV_TABLE_MOD; 1789 } 1790 pte_clear(&pbits, PTE_D); 1791 pte_set(&pbits, PTE_RO); 1792 1793 if (pbits != *pte) { 1794 *pte = pbits; 1795 pmap_update_page(pmap, sva, pbits); 1796 } 1797 } 1798 } 1799 vm_page_unlock_queues(); 1800 PMAP_UNLOCK(pmap); 1801} 1802 1803/* 1804 * Insert the given physical page (p) at 1805 * the specified virtual address (v) in the 1806 * target physical map with the protection requested. 1807 * 1808 * If specified, the page will be wired down, meaning 1809 * that the related pte can not be reclaimed. 1810 * 1811 * NB: This is the only routine which MAY NOT lazy-evaluate 1812 * or lose information. That is, this routine must actually 1813 * insert this page into the given map NOW. 1814 */ 1815void 1816pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 1817 vm_prot_t prot, boolean_t wired) 1818{ 1819 vm_paddr_t pa, opa; 1820 pt_entry_t *pte; 1821 pt_entry_t origpte, newpte; 1822 pv_entry_t pv; 1823 vm_page_t mpte, om; 1824 pt_entry_t rw = 0; 1825 1826 if (pmap == NULL) 1827 return; 1828 1829 va &= ~PAGE_MASK; 1830 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 1831 KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0, 1832 ("pmap_enter: page %p is not busy", m)); 1833 1834 mpte = NULL; 1835 1836 vm_page_lock_queues(); 1837 PMAP_LOCK(pmap); 1838 1839 /* 1840 * In the case that a page table page is not resident, we are 1841 * creating it here. 1842 */ 1843 if (va < VM_MAXUSER_ADDRESS) { 1844 mpte = pmap_allocpte(pmap, va, M_WAITOK); 1845 } 1846 pte = pmap_pte(pmap, va); 1847 1848 /* 1849 * Page Directory table entry not valid, we need a new PT page 1850 */ 1851 if (pte == NULL) { 1852 panic("pmap_enter: invalid page directory, pdir=%p, va=%p", 1853 (void *)pmap->pm_segtab, (void *)va); 1854 } 1855 pa = VM_PAGE_TO_PHYS(m); 1856 om = NULL; 1857 origpte = *pte; 1858 opa = TLBLO_PTE_TO_PA(origpte); 1859 1860 /* 1861 * Mapping has not changed, must be protection or wiring change. 1862 */ 1863 if (pte_test(&origpte, PTE_V) && opa == pa) { 1864 /* 1865 * Wiring change, just update stats. We don't worry about 1866 * wiring PT pages as they remain resident as long as there 1867 * are valid mappings in them. Hence, if a user page is 1868 * wired, the PT page will be also. 1869 */ 1870 if (wired && !pte_test(&origpte, PTE_W)) 1871 pmap->pm_stats.wired_count++; 1872 else if (!wired && pte_test(&origpte, PTE_W)) 1873 pmap->pm_stats.wired_count--; 1874 1875 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), 1876 ("%s: modified page not writable: va: %p, pte: %#jx", 1877 __func__, (void *)va, (uintmax_t)origpte)); 1878 1879 /* 1880 * Remove extra pte reference 1881 */ 1882 if (mpte) 1883 mpte->wire_count--; 1884 1885 if (page_is_managed(opa)) { 1886 om = m; 1887 } 1888 goto validate; 1889 } 1890 1891 pv = NULL; 1892 1893 /* 1894 * Mapping has changed, invalidate old range and fall through to 1895 * handle validating new mapping. 1896 */ 1897 if (opa) { 1898 if (pte_test(&origpte, PTE_W)) 1899 pmap->pm_stats.wired_count--; 1900 1901 if (page_is_managed(opa)) { 1902 om = PHYS_TO_VM_PAGE(opa); 1903 pv = pmap_pvh_remove(&om->md, pmap, va); 1904 } 1905 if (mpte != NULL) { 1906 mpte->wire_count--; 1907 KASSERT(mpte->wire_count > 0, 1908 ("pmap_enter: missing reference to page table page," 1909 " va: %p", (void *)va)); 1910 } 1911 } else 1912 pmap->pm_stats.resident_count++; 1913 1914 /* 1915 * Enter on the PV list if part of our managed memory. Note that we 1916 * raise IPL while manipulating pv_table since pmap_enter can be 1917 * called at interrupt time. 1918 */ 1919 if ((m->oflags & VPO_UNMANAGED) == 0) { 1920 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 1921 ("pmap_enter: managed mapping within the clean submap")); 1922 if (pv == NULL) 1923 pv = get_pv_entry(pmap); 1924 pv->pv_va = va; 1925 pv->pv_pmap = pmap; 1926 pv->pv_ptem = mpte; 1927 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1928 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1929 m->md.pv_list_count++; 1930 } else if (pv != NULL) 1931 free_pv_entry(pv); 1932 1933 /* 1934 * Increment counters 1935 */ 1936 if (wired) 1937 pmap->pm_stats.wired_count++; 1938 1939validate: 1940 if ((access & VM_PROT_WRITE) != 0) 1941 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF; 1942 rw = init_pte_prot(va, m, prot); 1943 1944#ifdef PMAP_DEBUG 1945 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 1946#endif 1947 /* 1948 * Now validate mapping with desired protection/wiring. 1949 */ 1950 newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V; 1951 1952 if (is_cacheable_mem(pa)) 1953 newpte |= PTE_C_CACHE; 1954 else 1955 newpte |= PTE_C_UNCACHED; 1956 1957 if (wired) 1958 newpte |= PTE_W; 1959 1960 if (is_kernel_pmap(pmap)) 1961 newpte |= PTE_G; 1962 1963 /* 1964 * if the mapping or permission bits are different, we need to 1965 * update the pte. 1966 */ 1967 if (origpte != newpte) { 1968 if (pte_test(&origpte, PTE_V)) { 1969 *pte = newpte; 1970 if (page_is_managed(opa) && (opa != pa)) { 1971 if (om->md.pv_flags & PV_TABLE_REF) 1972 vm_page_aflag_set(om, PGA_REFERENCED); 1973 om->md.pv_flags &= 1974 ~(PV_TABLE_REF | PV_TABLE_MOD); 1975 } 1976 if (pte_test(&origpte, PTE_D)) { 1977 KASSERT(!pte_test(&origpte, PTE_RO), 1978 ("pmap_enter: modified page not writable:" 1979 " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte)); 1980 if (page_is_managed(opa)) 1981 vm_page_dirty(om); 1982 } 1983 if (page_is_managed(opa) && 1984 TAILQ_EMPTY(&om->md.pv_list)) 1985 vm_page_aflag_clear(om, PGA_WRITEABLE); 1986 } else { 1987 *pte = newpte; 1988 } 1989 } 1990 pmap_update_page(pmap, va, newpte); 1991 1992 /* 1993 * Sync I & D caches for executable pages. Do this only if the 1994 * target pmap belongs to the current process. Otherwise, an 1995 * unresolvable TLB miss may occur. 1996 */ 1997 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && 1998 (prot & VM_PROT_EXECUTE)) { 1999 mips_icache_sync_range(va, PAGE_SIZE); 2000 mips_dcache_wbinv_range(va, PAGE_SIZE); 2001 } 2002 vm_page_unlock_queues(); 2003 PMAP_UNLOCK(pmap); 2004} 2005 2006/* 2007 * this code makes some *MAJOR* assumptions: 2008 * 1. Current pmap & pmap exists. 2009 * 2. Not wired. 2010 * 3. Read access. 2011 * 4. No page table pages. 2012 * but is *MUCH* faster than pmap_enter... 2013 */ 2014 2015void 2016pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2017{ 2018 2019 vm_page_lock_queues(); 2020 PMAP_LOCK(pmap); 2021 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 2022 vm_page_unlock_queues(); 2023 PMAP_UNLOCK(pmap); 2024} 2025 2026static vm_page_t 2027pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 2028 vm_prot_t prot, vm_page_t mpte) 2029{ 2030 pt_entry_t *pte; 2031 vm_paddr_t pa; 2032 2033 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2034 (m->oflags & VPO_UNMANAGED) != 0, 2035 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2036 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2037 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2038 2039 /* 2040 * In the case that a page table page is not resident, we are 2041 * creating it here. 2042 */ 2043 if (va < VM_MAXUSER_ADDRESS) { 2044 pd_entry_t *pde; 2045 unsigned ptepindex; 2046 2047 /* 2048 * Calculate pagetable page index 2049 */ 2050 ptepindex = pmap_pde_pindex(va); 2051 if (mpte && (mpte->pindex == ptepindex)) { 2052 mpte->wire_count++; 2053 } else { 2054 /* 2055 * Get the page directory entry 2056 */ 2057 pde = pmap_pde(pmap, va); 2058 2059 /* 2060 * If the page table page is mapped, we just 2061 * increment the hold count, and activate it. 2062 */ 2063 if (pde && *pde != 0) { 2064 if (pmap->pm_ptphint && 2065 (pmap->pm_ptphint->pindex == ptepindex)) { 2066 mpte = pmap->pm_ptphint; 2067 } else { 2068 mpte = PHYS_TO_VM_PAGE( 2069 MIPS_DIRECT_TO_PHYS(*pde)); 2070 pmap->pm_ptphint = mpte; 2071 } 2072 mpte->wire_count++; 2073 } else { 2074 mpte = _pmap_allocpte(pmap, ptepindex, 2075 M_NOWAIT); 2076 if (mpte == NULL) 2077 return (mpte); 2078 } 2079 } 2080 } else { 2081 mpte = NULL; 2082 } 2083 2084 pte = pmap_pte(pmap, va); 2085 if (pte_test(pte, PTE_V)) { 2086 if (mpte != NULL) { 2087 mpte->wire_count--; 2088 mpte = NULL; 2089 } 2090 return (mpte); 2091 } 2092 2093 /* 2094 * Enter on the PV list if part of our managed memory. 2095 */ 2096 if ((m->oflags & VPO_UNMANAGED) == 0 && 2097 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) { 2098 if (mpte != NULL) { 2099 pmap_unwire_pte_hold(pmap, va, mpte); 2100 mpte = NULL; 2101 } 2102 return (mpte); 2103 } 2104 2105 /* 2106 * Increment counters 2107 */ 2108 pmap->pm_stats.resident_count++; 2109 2110 pa = VM_PAGE_TO_PHYS(m); 2111 2112 /* 2113 * Now validate mapping with RO protection 2114 */ 2115 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V; 2116 2117 if (is_cacheable_mem(pa)) 2118 *pte |= PTE_C_CACHE; 2119 else 2120 *pte |= PTE_C_UNCACHED; 2121 2122 if (is_kernel_pmap(pmap)) 2123 *pte |= PTE_G; 2124 else { 2125 *pte |= PTE_RO; 2126 /* 2127 * Sync I & D caches. Do this only if the target pmap 2128 * belongs to the current process. Otherwise, an 2129 * unresolvable TLB miss may occur. */ 2130 if (pmap == &curproc->p_vmspace->vm_pmap) { 2131 va &= ~PAGE_MASK; 2132 mips_icache_sync_range(va, PAGE_SIZE); 2133 mips_dcache_wbinv_range(va, PAGE_SIZE); 2134 } 2135 } 2136 return (mpte); 2137} 2138 2139/* 2140 * Make a temporary mapping for a physical address. This is only intended 2141 * to be used for panic dumps. 2142 * 2143 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2144 */ 2145void * 2146pmap_kenter_temporary(vm_paddr_t pa, int i) 2147{ 2148 vm_offset_t va; 2149 2150 if (i != 0) 2151 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n", 2152 __func__); 2153 2154 if (MIPS_DIRECT_MAPPABLE(pa)) { 2155 va = MIPS_PHYS_TO_DIRECT(pa); 2156 } else { 2157#ifndef __mips_n64 /* XXX : to be converted to new style */ 2158 int cpu; 2159 register_t intr; 2160 struct local_sysmaps *sysm; 2161 pt_entry_t *pte, npte; 2162 2163 /* If this is used other than for dumps, we may need to leave 2164 * interrupts disasbled on return. If crash dumps don't work when 2165 * we get to this point, we might want to consider this (leaving things 2166 * disabled as a starting point ;-) 2167 */ 2168 intr = intr_disable(); 2169 cpu = PCPU_GET(cpuid); 2170 sysm = &sysmap_lmem[cpu]; 2171 /* Since this is for the debugger, no locks or any other fun */ 2172 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 2173 pte = pmap_pte(kernel_pmap, sysm->base); 2174 *pte = npte; 2175 sysm->valid1 = 1; 2176 pmap_update_page(kernel_pmap, sysm->base, npte); 2177 va = sysm->base; 2178 intr_restore(intr); 2179#endif 2180 } 2181 return ((void *)va); 2182} 2183 2184void 2185pmap_kenter_temporary_free(vm_paddr_t pa) 2186{ 2187#ifndef __mips_n64 /* XXX : to be converted to new style */ 2188 int cpu; 2189 register_t intr; 2190 struct local_sysmaps *sysm; 2191#endif 2192 2193 if (MIPS_DIRECT_MAPPABLE(pa)) { 2194 /* nothing to do for this case */ 2195 return; 2196 } 2197#ifndef __mips_n64 /* XXX : to be converted to new style */ 2198 cpu = PCPU_GET(cpuid); 2199 sysm = &sysmap_lmem[cpu]; 2200 if (sysm->valid1) { 2201 pt_entry_t *pte; 2202 2203 intr = intr_disable(); 2204 pte = pmap_pte(kernel_pmap, sysm->base); 2205 *pte = PTE_G; 2206 pmap_invalidate_page(kernel_pmap, sysm->base); 2207 intr_restore(intr); 2208 sysm->valid1 = 0; 2209 } 2210#endif 2211} 2212 2213/* 2214 * Moved the code to Machine Independent 2215 * vm_map_pmap_enter() 2216 */ 2217 2218/* 2219 * Maps a sequence of resident pages belonging to the same object. 2220 * The sequence begins with the given page m_start. This page is 2221 * mapped at the given virtual address start. Each subsequent page is 2222 * mapped at a virtual address that is offset from start by the same 2223 * amount as the page is offset from m_start within the object. The 2224 * last page in the sequence is the page with the largest offset from 2225 * m_start that can be mapped at a virtual address less than the given 2226 * virtual address end. Not every virtual page between start and end 2227 * is mapped; only those for which a resident page exists with the 2228 * corresponding offset from m_start are mapped. 2229 */ 2230void 2231pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2232 vm_page_t m_start, vm_prot_t prot) 2233{ 2234 vm_page_t m, mpte; 2235 vm_pindex_t diff, psize; 2236 2237 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2238 psize = atop(end - start); 2239 mpte = NULL; 2240 m = m_start; 2241 vm_page_lock_queues(); 2242 PMAP_LOCK(pmap); 2243 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2244 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, 2245 prot, mpte); 2246 m = TAILQ_NEXT(m, listq); 2247 } 2248 vm_page_unlock_queues(); 2249 PMAP_UNLOCK(pmap); 2250} 2251 2252/* 2253 * pmap_object_init_pt preloads the ptes for a given object 2254 * into the specified pmap. This eliminates the blast of soft 2255 * faults on process startup and immediately after an mmap. 2256 */ 2257void 2258pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2259 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2260{ 2261 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2262 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2263 ("pmap_object_init_pt: non-device object")); 2264} 2265 2266/* 2267 * Routine: pmap_change_wiring 2268 * Function: Change the wiring attribute for a map/virtual-address 2269 * pair. 2270 * In/out conditions: 2271 * The mapping must already exist in the pmap. 2272 */ 2273void 2274pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 2275{ 2276 pt_entry_t *pte; 2277 2278 if (pmap == NULL) 2279 return; 2280 2281 PMAP_LOCK(pmap); 2282 pte = pmap_pte(pmap, va); 2283 2284 if (wired && !pte_test(pte, PTE_W)) 2285 pmap->pm_stats.wired_count++; 2286 else if (!wired && pte_test(pte, PTE_W)) 2287 pmap->pm_stats.wired_count--; 2288 2289 /* 2290 * Wiring is not a hardware characteristic so there is no need to 2291 * invalidate TLB. 2292 */ 2293 if (wired) 2294 pte_set(pte, PTE_W); 2295 else 2296 pte_clear(pte, PTE_W); 2297 PMAP_UNLOCK(pmap); 2298} 2299 2300/* 2301 * Copy the range specified by src_addr/len 2302 * from the source map to the range dst_addr/len 2303 * in the destination map. 2304 * 2305 * This routine is only advisory and need not do anything. 2306 */ 2307 2308void 2309pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2310 vm_size_t len, vm_offset_t src_addr) 2311{ 2312} 2313 2314/* 2315 * pmap_zero_page zeros the specified hardware page by mapping 2316 * the page into KVM and using bzero to clear its contents. 2317 * 2318 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2319 */ 2320void 2321pmap_zero_page(vm_page_t m) 2322{ 2323 vm_offset_t va; 2324 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2325 2326 if (MIPS_DIRECT_MAPPABLE(phys)) { 2327 va = MIPS_PHYS_TO_DIRECT(phys); 2328 bzero((caddr_t)va, PAGE_SIZE); 2329 mips_dcache_wbinv_range(va, PAGE_SIZE); 2330 } else { 2331 va = pmap_lmem_map1(phys); 2332 bzero((caddr_t)va, PAGE_SIZE); 2333 mips_dcache_wbinv_range(va, PAGE_SIZE); 2334 pmap_lmem_unmap(); 2335 } 2336} 2337 2338/* 2339 * pmap_zero_page_area zeros the specified hardware page by mapping 2340 * the page into KVM and using bzero to clear its contents. 2341 * 2342 * off and size may not cover an area beyond a single hardware page. 2343 */ 2344void 2345pmap_zero_page_area(vm_page_t m, int off, int size) 2346{ 2347 vm_offset_t va; 2348 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2349 2350 if (MIPS_DIRECT_MAPPABLE(phys)) { 2351 va = MIPS_PHYS_TO_DIRECT(phys); 2352 bzero((char *)(caddr_t)va + off, size); 2353 mips_dcache_wbinv_range(va + off, size); 2354 } else { 2355 va = pmap_lmem_map1(phys); 2356 bzero((char *)va + off, size); 2357 mips_dcache_wbinv_range(va + off, size); 2358 pmap_lmem_unmap(); 2359 } 2360} 2361 2362void 2363pmap_zero_page_idle(vm_page_t m) 2364{ 2365 vm_offset_t va; 2366 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2367 2368 if (MIPS_DIRECT_MAPPABLE(phys)) { 2369 va = MIPS_PHYS_TO_DIRECT(phys); 2370 bzero((caddr_t)va, PAGE_SIZE); 2371 mips_dcache_wbinv_range(va, PAGE_SIZE); 2372 } else { 2373 va = pmap_lmem_map1(phys); 2374 bzero((caddr_t)va, PAGE_SIZE); 2375 mips_dcache_wbinv_range(va, PAGE_SIZE); 2376 pmap_lmem_unmap(); 2377 } 2378} 2379 2380/* 2381 * pmap_copy_page copies the specified (machine independent) 2382 * page by mapping the page into virtual memory and using 2383 * bcopy to copy the page, one machine dependent page at a 2384 * time. 2385 * 2386 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2387 */ 2388void 2389pmap_copy_page(vm_page_t src, vm_page_t dst) 2390{ 2391 vm_offset_t va_src, va_dst; 2392 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src); 2393 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst); 2394 2395 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) { 2396 /* easy case, all can be accessed via KSEG0 */ 2397 /* 2398 * Flush all caches for VA that are mapped to this page 2399 * to make sure that data in SDRAM is up to date 2400 */ 2401 pmap_flush_pvcache(src); 2402 mips_dcache_wbinv_range_index( 2403 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE); 2404 va_src = MIPS_PHYS_TO_DIRECT(phys_src); 2405 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst); 2406 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE); 2407 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2408 } else { 2409 va_src = pmap_lmem_map2(phys_src, phys_dst); 2410 va_dst = va_src + PAGE_SIZE; 2411 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE); 2412 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2413 pmap_lmem_unmap(); 2414 } 2415} 2416 2417/* 2418 * Returns true if the pmap's pv is one of the first 2419 * 16 pvs linked to from this page. This count may 2420 * be changed upwards or downwards in the future; it 2421 * is only necessary that true be returned for a small 2422 * subset of pmaps for proper page aging. 2423 */ 2424boolean_t 2425pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 2426{ 2427 pv_entry_t pv; 2428 int loops = 0; 2429 boolean_t rv; 2430 2431 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2432 ("pmap_page_exists_quick: page %p is not managed", m)); 2433 rv = FALSE; 2434 vm_page_lock_queues(); 2435 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2436 if (pv->pv_pmap == pmap) { 2437 rv = TRUE; 2438 break; 2439 } 2440 loops++; 2441 if (loops >= 16) 2442 break; 2443 } 2444 vm_page_unlock_queues(); 2445 return (rv); 2446} 2447 2448/* 2449 * Remove all pages from specified address space 2450 * this aids process exit speeds. Also, this code 2451 * is special cased for current process only, but 2452 * can have the more generic (and slightly slower) 2453 * mode enabled. This is much faster than pmap_remove 2454 * in the case of running down an entire address space. 2455 */ 2456void 2457pmap_remove_pages(pmap_t pmap) 2458{ 2459 pt_entry_t *pte, tpte; 2460 pv_entry_t pv, npv; 2461 vm_page_t m; 2462 2463 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 2464 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2465 return; 2466 } 2467 vm_page_lock_queues(); 2468 PMAP_LOCK(pmap); 2469 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) { 2470 2471 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2472 if (!pte_test(pte, PTE_V)) 2473 panic("pmap_remove_pages: page on pm_pvlist has no pte"); 2474 tpte = *pte; 2475 2476/* 2477 * We cannot remove wired pages from a process' mapping at this time 2478 */ 2479 if (pte_test(&tpte, PTE_W)) { 2480 npv = TAILQ_NEXT(pv, pv_plist); 2481 continue; 2482 } 2483 *pte = is_kernel_pmap(pmap) ? PTE_G : 0; 2484 2485 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte)); 2486 KASSERT(m != NULL, 2487 ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); 2488 2489 pv->pv_pmap->pm_stats.resident_count--; 2490 2491 /* 2492 * Update the vm_page_t clean and reference bits. 2493 */ 2494 if (pte_test(&tpte, PTE_D)) { 2495 vm_page_dirty(m); 2496 } 2497 npv = TAILQ_NEXT(pv, pv_plist); 2498 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 2499 2500 m->md.pv_list_count--; 2501 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2502 if (TAILQ_FIRST(&m->md.pv_list) == NULL) { 2503 vm_page_aflag_clear(m, PGA_WRITEABLE); 2504 } 2505 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 2506 free_pv_entry(pv); 2507 } 2508 pmap_invalidate_all(pmap); 2509 PMAP_UNLOCK(pmap); 2510 vm_page_unlock_queues(); 2511} 2512 2513/* 2514 * pmap_testbit tests bits in pte's 2515 * note that the testbit/changebit routines are inline, 2516 * and a lot of things compile-time evaluate. 2517 */ 2518static boolean_t 2519pmap_testbit(vm_page_t m, int bit) 2520{ 2521 pv_entry_t pv; 2522 pt_entry_t *pte; 2523 boolean_t rv = FALSE; 2524 2525 if (m->oflags & VPO_UNMANAGED) 2526 return (rv); 2527 2528 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 2529 return (rv); 2530 2531 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2532 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2533 PMAP_LOCK(pv->pv_pmap); 2534 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2535 rv = pte_test(pte, bit); 2536 PMAP_UNLOCK(pv->pv_pmap); 2537 if (rv) 2538 break; 2539 } 2540 return (rv); 2541} 2542 2543/* 2544 * this routine is used to clear dirty bits in ptes 2545 */ 2546static __inline void 2547pmap_changebit(vm_page_t m, int bit, boolean_t setem) 2548{ 2549 pv_entry_t pv; 2550 pt_entry_t *pte; 2551 2552 if (m->oflags & VPO_UNMANAGED) 2553 return; 2554 2555 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2556 /* 2557 * Loop over all current mappings setting/clearing as appropos If 2558 * setting RO do we need to clear the VAC? 2559 */ 2560 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2561 PMAP_LOCK(pv->pv_pmap); 2562 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2563 if (setem) { 2564 *pte |= bit; 2565 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte); 2566 } else { 2567 pt_entry_t pbits = *pte; 2568 2569 if (pbits & bit) { 2570 if (bit == PTE_D) { 2571 if (pbits & PTE_D) 2572 vm_page_dirty(m); 2573 *pte = (pbits & ~PTE_D) | PTE_RO; 2574 } else { 2575 *pte = pbits & ~bit; 2576 } 2577 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte); 2578 } 2579 } 2580 PMAP_UNLOCK(pv->pv_pmap); 2581 } 2582 if (!setem && bit == PTE_D) 2583 vm_page_aflag_clear(m, PGA_WRITEABLE); 2584} 2585 2586/* 2587 * pmap_page_wired_mappings: 2588 * 2589 * Return the number of managed mappings to the given physical page 2590 * that are wired. 2591 */ 2592int 2593pmap_page_wired_mappings(vm_page_t m) 2594{ 2595 pv_entry_t pv; 2596 pmap_t pmap; 2597 pt_entry_t *pte; 2598 int count; 2599 2600 count = 0; 2601 if ((m->oflags & VPO_UNMANAGED) != 0) 2602 return (count); 2603 vm_page_lock_queues(); 2604 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2605 pmap = pv->pv_pmap; 2606 PMAP_LOCK(pmap); 2607 pte = pmap_pte(pmap, pv->pv_va); 2608 if (pte_test(pte, PTE_W)) 2609 count++; 2610 PMAP_UNLOCK(pmap); 2611 } 2612 vm_page_unlock_queues(); 2613 return (count); 2614} 2615 2616/* 2617 * Clear the write and modified bits in each of the given page's mappings. 2618 */ 2619void 2620pmap_remove_write(vm_page_t m) 2621{ 2622 pv_entry_t pv, npv; 2623 vm_offset_t va; 2624 pt_entry_t *pte; 2625 2626 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2627 ("pmap_remove_write: page %p is not managed", m)); 2628 2629 /* 2630 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be set by 2631 * another thread while the object is locked. Thus, if PGA_WRITEABLE 2632 * is clear, no page table entries need updating. 2633 */ 2634 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2635 if ((m->oflags & VPO_BUSY) == 0 && 2636 (m->aflags & PGA_WRITEABLE) == 0) 2637 return; 2638 2639 /* 2640 * Loop over all current mappings setting/clearing as appropos. 2641 */ 2642 vm_page_lock_queues(); 2643 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) { 2644 npv = TAILQ_NEXT(pv, pv_plist); 2645 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2646 if (pte == NULL || !pte_test(pte, PTE_V)) 2647 panic("page on pm_pvlist has no pte"); 2648 2649 va = pv->pv_va; 2650 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 2651 VM_PROT_READ | VM_PROT_EXECUTE); 2652 } 2653 vm_page_aflag_clear(m, PGA_WRITEABLE); 2654 vm_page_unlock_queues(); 2655} 2656 2657/* 2658 * pmap_ts_referenced: 2659 * 2660 * Return the count of reference bits for a page, clearing all of them. 2661 */ 2662int 2663pmap_ts_referenced(vm_page_t m) 2664{ 2665 2666 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2667 ("pmap_ts_referenced: page %p is not managed", m)); 2668 if (m->md.pv_flags & PV_TABLE_REF) { 2669 vm_page_lock_queues(); 2670 m->md.pv_flags &= ~PV_TABLE_REF; 2671 vm_page_unlock_queues(); 2672 return (1); 2673 } 2674 return (0); 2675} 2676 2677/* 2678 * pmap_is_modified: 2679 * 2680 * Return whether or not the specified physical page was modified 2681 * in any physical maps. 2682 */ 2683boolean_t 2684pmap_is_modified(vm_page_t m) 2685{ 2686 boolean_t rv; 2687 2688 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2689 ("pmap_is_modified: page %p is not managed", m)); 2690 2691 /* 2692 * If the page is not VPO_BUSY, then PGA_WRITEABLE cannot be 2693 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2694 * is clear, no PTEs can have PTE_D set. 2695 */ 2696 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2697 if ((m->oflags & VPO_BUSY) == 0 && 2698 (m->aflags & PGA_WRITEABLE) == 0) 2699 return (FALSE); 2700 vm_page_lock_queues(); 2701 if (m->md.pv_flags & PV_TABLE_MOD) 2702 rv = TRUE; 2703 else 2704 rv = pmap_testbit(m, PTE_D); 2705 vm_page_unlock_queues(); 2706 return (rv); 2707} 2708 2709/* N/C */ 2710 2711/* 2712 * pmap_is_prefaultable: 2713 * 2714 * Return whether or not the specified virtual address is elgible 2715 * for prefault. 2716 */ 2717boolean_t 2718pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2719{ 2720 pd_entry_t *pde; 2721 pt_entry_t *pte; 2722 boolean_t rv; 2723 2724 rv = FALSE; 2725 PMAP_LOCK(pmap); 2726 pde = pmap_pde(pmap, addr); 2727 if (pde != NULL && *pde != 0) { 2728 pte = pmap_pde_to_pte(pde, addr); 2729 rv = (*pte == 0); 2730 } 2731 PMAP_UNLOCK(pmap); 2732 return (rv); 2733} 2734 2735/* 2736 * Clear the modify bits on the specified physical page. 2737 */ 2738void 2739pmap_clear_modify(vm_page_t m) 2740{ 2741 2742 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2743 ("pmap_clear_modify: page %p is not managed", m)); 2744 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2745 KASSERT((m->oflags & VPO_BUSY) == 0, 2746 ("pmap_clear_modify: page %p is busy", m)); 2747 2748 /* 2749 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. 2750 * If the object containing the page is locked and the page is not 2751 * VPO_BUSY, then PGA_WRITEABLE cannot be concurrently set. 2752 */ 2753 if ((m->aflags & PGA_WRITEABLE) == 0) 2754 return; 2755 vm_page_lock_queues(); 2756 if (m->md.pv_flags & PV_TABLE_MOD) { 2757 pmap_changebit(m, PTE_D, FALSE); 2758 m->md.pv_flags &= ~PV_TABLE_MOD; 2759 } 2760 vm_page_unlock_queues(); 2761} 2762 2763/* 2764 * pmap_is_referenced: 2765 * 2766 * Return whether or not the specified physical page was referenced 2767 * in any physical maps. 2768 */ 2769boolean_t 2770pmap_is_referenced(vm_page_t m) 2771{ 2772 2773 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2774 ("pmap_is_referenced: page %p is not managed", m)); 2775 return ((m->md.pv_flags & PV_TABLE_REF) != 0); 2776} 2777 2778/* 2779 * pmap_clear_reference: 2780 * 2781 * Clear the reference bit on the specified physical page. 2782 */ 2783void 2784pmap_clear_reference(vm_page_t m) 2785{ 2786 2787 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2788 ("pmap_clear_reference: page %p is not managed", m)); 2789 vm_page_lock_queues(); 2790 if (m->md.pv_flags & PV_TABLE_REF) { 2791 m->md.pv_flags &= ~PV_TABLE_REF; 2792 } 2793 vm_page_unlock_queues(); 2794} 2795 2796/* 2797 * Miscellaneous support routines follow 2798 */ 2799 2800/* 2801 * Map a set of physical memory pages into the kernel virtual 2802 * address space. Return a pointer to where it is mapped. This 2803 * routine is intended to be used for mapping device memory, 2804 * NOT real memory. 2805 */ 2806 2807/* 2808 * Map a set of physical memory pages into the kernel virtual 2809 * address space. Return a pointer to where it is mapped. This 2810 * routine is intended to be used for mapping device memory, 2811 * NOT real memory. 2812 * 2813 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit. 2814 */ 2815void * 2816pmap_mapdev(vm_paddr_t pa, vm_size_t size) 2817{ 2818 vm_offset_t va, tmpva, offset; 2819 2820 /* 2821 * KSEG1 maps only first 512M of phys address space. For 2822 * pa > 0x20000000 we should make proper mapping * using pmap_kenter. 2823 */ 2824 if (MIPS_DIRECT_MAPPABLE(pa + size - 1)) 2825 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); 2826 else { 2827 offset = pa & PAGE_MASK; 2828 size = roundup(size + offset, PAGE_SIZE); 2829 2830 va = kmem_alloc_nofault(kernel_map, size); 2831 if (!va) 2832 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2833 pa = trunc_page(pa); 2834 for (tmpva = va; size > 0;) { 2835 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED); 2836 size -= PAGE_SIZE; 2837 tmpva += PAGE_SIZE; 2838 pa += PAGE_SIZE; 2839 } 2840 } 2841 2842 return ((void *)(va + offset)); 2843} 2844 2845void 2846pmap_unmapdev(vm_offset_t va, vm_size_t size) 2847{ 2848#ifndef __mips_n64 2849 vm_offset_t base, offset, tmpva; 2850 2851 /* If the address is within KSEG1 then there is nothing to do */ 2852 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END) 2853 return; 2854 2855 base = trunc_page(va); 2856 offset = va & PAGE_MASK; 2857 size = roundup(size + offset, PAGE_SIZE); 2858 for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE) 2859 pmap_kremove(tmpva); 2860 kmem_free(kernel_map, base, size); 2861#endif 2862} 2863 2864/* 2865 * perform the pmap work for mincore 2866 */ 2867int 2868pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 2869{ 2870 pt_entry_t *ptep, pte; 2871 vm_paddr_t pa; 2872 vm_page_t m; 2873 int val; 2874 boolean_t managed; 2875 2876 PMAP_LOCK(pmap); 2877retry: 2878 ptep = pmap_pte(pmap, addr); 2879 pte = (ptep != NULL) ? *ptep : 0; 2880 if (!pte_test(&pte, PTE_V)) { 2881 val = 0; 2882 goto out; 2883 } 2884 val = MINCORE_INCORE; 2885 if (pte_test(&pte, PTE_D)) 2886 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 2887 pa = TLBLO_PTE_TO_PA(pte); 2888 managed = page_is_managed(pa); 2889 if (managed) { 2890 /* 2891 * This may falsely report the given address as 2892 * MINCORE_REFERENCED. Unfortunately, due to the lack of 2893 * per-PTE reference information, it is impossible to 2894 * determine if the address is MINCORE_REFERENCED. 2895 */ 2896 m = PHYS_TO_VM_PAGE(pa); 2897 if ((m->aflags & PGA_REFERENCED) != 0) 2898 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 2899 } 2900 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 2901 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 2902 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 2903 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 2904 goto retry; 2905 } else 2906out: 2907 PA_UNLOCK_COND(*locked_pa); 2908 PMAP_UNLOCK(pmap); 2909 return (val); 2910} 2911 2912void 2913pmap_activate(struct thread *td) 2914{ 2915 pmap_t pmap, oldpmap; 2916 struct proc *p = td->td_proc; 2917 u_int cpuid; 2918 2919 critical_enter(); 2920 2921 pmap = vmspace_pmap(p->p_vmspace); 2922 oldpmap = PCPU_GET(curpmap); 2923 cpuid = PCPU_GET(cpuid); 2924 2925 if (oldpmap) 2926 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 2927 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 2928 pmap_asid_alloc(pmap); 2929 if (td == curthread) { 2930 PCPU_SET(segbase, pmap->pm_segtab); 2931 mips_wr_entryhi(pmap->pm_asid[cpuid].asid); 2932 } 2933 2934 PCPU_SET(curpmap, pmap); 2935 critical_exit(); 2936} 2937 2938void 2939pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 2940{ 2941} 2942 2943/* 2944 * Increase the starting virtual address of the given mapping if a 2945 * different alignment might result in more superpage mappings. 2946 */ 2947void 2948pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 2949 vm_offset_t *addr, vm_size_t size) 2950{ 2951 vm_offset_t superpage_offset; 2952 2953 if (size < NBSEG) 2954 return; 2955 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 2956 offset += ptoa(object->pg_color); 2957 superpage_offset = offset & SEGMASK; 2958 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG || 2959 (*addr & SEGMASK) == superpage_offset) 2960 return; 2961 if ((*addr & SEGMASK) < superpage_offset) 2962 *addr = (*addr & ~SEGMASK) + superpage_offset; 2963 else 2964 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset; 2965} 2966 2967/* 2968 * Increase the starting virtual address of the given mapping so 2969 * that it is aligned to not be the second page in a TLB entry. 2970 * This routine assumes that the length is appropriately-sized so 2971 * that the allocation does not share a TLB entry at all if required. 2972 */ 2973void 2974pmap_align_tlb(vm_offset_t *addr) 2975{ 2976 if ((*addr & PAGE_SIZE) == 0) 2977 return; 2978 *addr += PAGE_SIZE; 2979 return; 2980} 2981 2982#ifdef DDB 2983DB_SHOW_COMMAND(ptable, ddb_pid_dump) 2984{ 2985 pmap_t pmap; 2986 struct thread *td = NULL; 2987 struct proc *p; 2988 int i, j, k; 2989 vm_paddr_t pa; 2990 vm_offset_t va; 2991 2992 if (have_addr) { 2993 td = db_lookup_thread(addr, TRUE); 2994 if (td == NULL) { 2995 db_printf("Invalid pid or tid"); 2996 return; 2997 } 2998 p = td->td_proc; 2999 if (p->p_vmspace == NULL) { 3000 db_printf("No vmspace for process"); 3001 return; 3002 } 3003 pmap = vmspace_pmap(p->p_vmspace); 3004 } else 3005 pmap = kernel_pmap; 3006 3007 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n", 3008 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid, 3009 pmap->pm_asid[0].gen); 3010 for (i = 0; i < NPDEPG; i++) { 3011 pd_entry_t *pdpe; 3012 pt_entry_t *pde; 3013 pt_entry_t pte; 3014 3015 pdpe = (pd_entry_t *)pmap->pm_segtab[i]; 3016 if (pdpe == NULL) 3017 continue; 3018 db_printf("[%4d] %p\n", i, pdpe); 3019#ifdef __mips_n64 3020 for (j = 0; j < NPDEPG; j++) { 3021 pde = (pt_entry_t *)pdpe[j]; 3022 if (pde == NULL) 3023 continue; 3024 db_printf("\t[%4d] %p\n", j, pde); 3025#else 3026 { 3027 j = 0; 3028 pde = (pt_entry_t *)pdpe; 3029#endif 3030 for (k = 0; k < NPTEPG; k++) { 3031 pte = pde[k]; 3032 if (pte == 0 || !pte_test(&pte, PTE_V)) 3033 continue; 3034 pa = TLBLO_PTE_TO_PA(pte); 3035 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT); 3036 db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n", 3037 k, (void *)va, (uintmax_t)pte, (uintmax_t)pa); 3038 } 3039 } 3040 } 3041} 3042#endif 3043 3044#if defined(DEBUG) 3045 3046static void pads(pmap_t pm); 3047void pmap_pvdump(vm_offset_t pa); 3048 3049/* print address space of pmap*/ 3050static void 3051pads(pmap_t pm) 3052{ 3053 unsigned va, i, j; 3054 pt_entry_t *ptep; 3055 3056 if (pm == kernel_pmap) 3057 return; 3058 for (i = 0; i < NPTEPG; i++) 3059 if (pm->pm_segtab[i]) 3060 for (j = 0; j < NPTEPG; j++) { 3061 va = (i << SEGSHIFT) + (j << PAGE_SHIFT); 3062 if (pm == kernel_pmap && va < KERNBASE) 3063 continue; 3064 if (pm != kernel_pmap && 3065 va >= VM_MAXUSER_ADDRESS) 3066 continue; 3067 ptep = pmap_pte(pm, va); 3068 if (pte_test(ptep, PTE_V)) 3069 printf("%x:%x ", va, *(int *)ptep); 3070 } 3071 3072} 3073 3074void 3075pmap_pvdump(vm_offset_t pa) 3076{ 3077 register pv_entry_t pv; 3078 vm_page_t m; 3079 3080 printf("pa %x", pa); 3081 m = PHYS_TO_VM_PAGE(pa); 3082 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3083 pv = TAILQ_NEXT(pv, pv_list)) { 3084 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); 3085 pads(pv->pv_pmap); 3086 } 3087 printf(" "); 3088} 3089 3090/* N/C */ 3091#endif 3092 3093 3094/* 3095 * Allocate TLB address space tag (called ASID or TLBPID) and return it. 3096 * It takes almost as much or more time to search the TLB for a 3097 * specific ASID and flush those entries as it does to flush the entire TLB. 3098 * Therefore, when we allocate a new ASID, we just take the next number. When 3099 * we run out of numbers, we flush the TLB, increment the generation count 3100 * and start over. ASID zero is reserved for kernel use. 3101 */ 3102static void 3103pmap_asid_alloc(pmap) 3104 pmap_t pmap; 3105{ 3106 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED && 3107 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation)); 3108 else { 3109 if (PCPU_GET(next_asid) == pmap_max_asid) { 3110 tlb_invalidate_all_user(NULL); 3111 PCPU_SET(asid_generation, 3112 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK); 3113 if (PCPU_GET(asid_generation) == 0) { 3114 PCPU_SET(asid_generation, 1); 3115 } 3116 PCPU_SET(next_asid, 1); /* 0 means invalid */ 3117 } 3118 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid); 3119 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation); 3120 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1); 3121 } 3122} 3123 3124int 3125page_is_managed(vm_paddr_t pa) 3126{ 3127 vm_offset_t pgnum = atop(pa); 3128 3129 if (pgnum >= first_page) { 3130 vm_page_t m; 3131 3132 m = PHYS_TO_VM_PAGE(pa); 3133 if (m == NULL) 3134 return (0); 3135 if ((m->oflags & VPO_UNMANAGED) == 0) 3136 return (1); 3137 } 3138 return (0); 3139} 3140 3141static pt_entry_t 3142init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot) 3143{ 3144 pt_entry_t rw; 3145 3146 if (!(prot & VM_PROT_WRITE)) 3147 rw = PTE_V | PTE_RO | PTE_C_CACHE; 3148 else if ((m->oflags & VPO_UNMANAGED) == 0) { 3149 if ((m->md.pv_flags & PV_TABLE_MOD) != 0) 3150 rw = PTE_V | PTE_D | PTE_C_CACHE; 3151 else 3152 rw = PTE_V | PTE_C_CACHE; 3153 vm_page_aflag_set(m, PGA_WRITEABLE); 3154 } else 3155 /* Needn't emulate a modified bit for unmanaged pages. */ 3156 rw = PTE_V | PTE_D | PTE_C_CACHE; 3157 return (rw); 3158} 3159 3160/* 3161 * pmap_emulate_modified : do dirty bit emulation 3162 * 3163 * On SMP, update just the local TLB, other CPUs will update their 3164 * TLBs from PTE lazily, if they get the exception. 3165 * Returns 0 in case of sucess, 1 if the page is read only and we 3166 * need to fault. 3167 */ 3168int 3169pmap_emulate_modified(pmap_t pmap, vm_offset_t va) 3170{ 3171 vm_page_t m; 3172 pt_entry_t *pte; 3173 vm_paddr_t pa; 3174 3175 PMAP_LOCK(pmap); 3176 pte = pmap_pte(pmap, va); 3177 if (pte == NULL) 3178 panic("pmap_emulate_modified: can't find PTE"); 3179#ifdef SMP 3180 /* It is possible that some other CPU changed m-bit */ 3181 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) { 3182 tlb_update(pmap, va, *pte); 3183 PMAP_UNLOCK(pmap); 3184 return (0); 3185 } 3186#else 3187 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) 3188 panic("pmap_emulate_modified: invalid pte"); 3189#endif 3190 if (pte_test(pte, PTE_RO)) { 3191 /* write to read only page in the kernel */ 3192 PMAP_UNLOCK(pmap); 3193 return (1); 3194 } 3195 pte_set(pte, PTE_D); 3196 tlb_update(pmap, va, *pte); 3197 pa = TLBLO_PTE_TO_PA(*pte); 3198 if (!page_is_managed(pa)) 3199 panic("pmap_emulate_modified: unmanaged page"); 3200 m = PHYS_TO_VM_PAGE(pa); 3201 m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD); 3202 PMAP_UNLOCK(pmap); 3203 return (0); 3204} 3205 3206/* 3207 * Routine: pmap_kextract 3208 * Function: 3209 * Extract the physical page address associated 3210 * virtual address. 3211 */ 3212vm_paddr_t 3213pmap_kextract(vm_offset_t va) 3214{ 3215 int mapped; 3216 3217 /* 3218 * First, the direct-mapped regions. 3219 */ 3220#if defined(__mips_n64) 3221 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) 3222 return (MIPS_XKPHYS_TO_PHYS(va)); 3223#endif 3224 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END) 3225 return (MIPS_KSEG0_TO_PHYS(va)); 3226 3227 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END) 3228 return (MIPS_KSEG1_TO_PHYS(va)); 3229 3230 /* 3231 * User virtual addresses. 3232 */ 3233 if (va < VM_MAXUSER_ADDRESS) { 3234 pt_entry_t *ptep; 3235 3236 if (curproc && curproc->p_vmspace) { 3237 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va); 3238 if (ptep) { 3239 return (TLBLO_PTE_TO_PA(*ptep) | 3240 (va & PAGE_MASK)); 3241 } 3242 return (0); 3243 } 3244 } 3245 3246 /* 3247 * Should be kernel virtual here, otherwise fail 3248 */ 3249 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END); 3250#if defined(__mips_n64) 3251 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END); 3252#endif 3253 /* 3254 * Kernel virtual. 3255 */ 3256 3257 if (mapped) { 3258 pt_entry_t *ptep; 3259 3260 /* Is the kernel pmap initialized? */ 3261 if (!CPU_EMPTY(&kernel_pmap->pm_active)) { 3262 /* It's inside the virtual address range */ 3263 ptep = pmap_pte(kernel_pmap, va); 3264 if (ptep) { 3265 return (TLBLO_PTE_TO_PA(*ptep) | 3266 (va & PAGE_MASK)); 3267 } 3268 } 3269 return (0); 3270 } 3271 3272 panic("%s for unknown address space %p.", __func__, (void *)va); 3273} 3274 3275 3276void 3277pmap_flush_pvcache(vm_page_t m) 3278{ 3279 pv_entry_t pv; 3280 3281 if (m != NULL) { 3282 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3283 pv = TAILQ_NEXT(pv, pv_list)) { 3284 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 3285 } 3286 } 3287} 3288