pmap.c revision 216315
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps 39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish 40 */ 41 42/* 43 * Manages physical address maps. 44 * 45 * In addition to hardware address maps, this 46 * module is called upon to provide software-use-only 47 * maps which may or may not be stored in the same 48 * form as hardware maps. These pseudo-maps are 49 * used to store intermediate results from copy 50 * operations to and from address spaces. 51 * 52 * Since the information managed by this module is 53 * also stored by the logical address mapping module, 54 * this module may throw away valid virtual-to-physical 55 * mappings at almost any time. However, invalidations 56 * of virtual-to-physical mappings must be done as 57 * requested. 58 * 59 * In order to cope with hardware architectures which 60 * make virtual-to-physical map invalidates expensive, 61 * this module may delay invalidate or reduced protection 62 * operations until such time as they are actually 63 * necessary. This module is given full information as 64 * to which processors are currently using which maps, 65 * and to when physical maps must be made correct. 66 */ 67 68#include <sys/cdefs.h> 69__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 216315 2010-12-09 06:34:28Z jchandra $"); 70 71#include "opt_msgbuf.h" 72#include "opt_ddb.h" 73 74#include <sys/param.h> 75#include <sys/systm.h> 76#include <sys/proc.h> 77#include <sys/msgbuf.h> 78#include <sys/vmmeter.h> 79#include <sys/mman.h> 80#include <sys/smp.h> 81#ifdef DDB 82#include <ddb/ddb.h> 83#endif 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_phys.h> 88#include <sys/lock.h> 89#include <sys/mutex.h> 90#include <vm/vm_kern.h> 91#include <vm/vm_page.h> 92#include <vm/vm_map.h> 93#include <vm/vm_object.h> 94#include <vm/vm_extern.h> 95#include <vm/vm_pageout.h> 96#include <vm/vm_pager.h> 97#include <vm/uma.h> 98#include <sys/pcpu.h> 99#include <sys/sched.h> 100#ifdef SMP 101#include <sys/smp.h> 102#endif 103 104#include <machine/cache.h> 105#include <machine/md_var.h> 106#include <machine/tlb.h> 107 108#undef PMAP_DEBUG 109 110#ifndef PMAP_SHPGPERPROC 111#define PMAP_SHPGPERPROC 200 112#endif 113 114#if !defined(DIAGNOSTIC) 115#define PMAP_INLINE __inline 116#else 117#define PMAP_INLINE 118#endif 119 120/* 121 * Get PDEs and PTEs for user/kernel address space 122 * 123 * XXX The & for pmap_segshift() is wrong, as is the fact that it doesn't 124 * trim off gratuitous bits of the address space. By having the & 125 * there, we break defining NUSERPGTBLS below because the address space 126 * is defined such that it ends immediately after NPDEPG*NPTEPG*PAGE_SIZE, 127 * so we end up getting NUSERPGTBLS of 0. 128 */ 129#define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1)) 130#define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1)) 131#define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1)) 132#define pmap_pde_pindex(v) ((v) >> PDRSHIFT) 133 134#ifdef __mips_n64 135#define NUPDE (NPDEPG * NPDEPG) 136#define NUSERPGTBLS (NUPDE + NPDEPG) 137#else 138#define NUPDE (NPDEPG) 139#define NUSERPGTBLS (NUPDE) 140#endif 141 142#define is_kernel_pmap(x) ((x) == kernel_pmap) 143 144struct pmap kernel_pmap_store; 145pd_entry_t *kernel_segmap; 146 147vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 148vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 149 150static int nkpt; 151unsigned pmap_max_asid; /* max ASID supported by the system */ 152 153#define PMAP_ASID_RESERVED 0 154 155vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 156 157static void pmap_asid_alloc(pmap_t pmap); 158 159/* 160 * Data for the pv entry allocation mechanism 161 */ 162static uma_zone_t pvzone; 163static struct vm_object pvzone_obj; 164static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 165 166static PMAP_INLINE void free_pv_entry(pv_entry_t pv); 167static pv_entry_t get_pv_entry(pmap_t locked_pmap); 168static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 169static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 170 vm_offset_t va); 171static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem); 172static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 173 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 174static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va); 175static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 176static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); 177static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, 178 vm_offset_t va, vm_page_t m); 179static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte); 180static void pmap_invalidate_all(pmap_t pmap); 181static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va); 182static int _pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m); 183 184static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 185static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 186static int pmap_unuse_pt(pmap_t, vm_offset_t, vm_page_t); 187static int init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot); 188 189#ifdef SMP 190static void pmap_invalidate_page_action(void *arg); 191static void pmap_invalidate_all_action(void *arg); 192static void pmap_update_page_action(void *arg); 193#endif 194 195#ifndef __mips_n64 196/* 197 * This structure is for high memory (memory above 512Meg in 32 bit) support. 198 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to 199 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc. 200 * 201 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To 202 * access a highmem physical address on a CPU, we map the physical address to 203 * the reserved virtual address for the CPU in the kernel pagetable. This is 204 * done with interrupts disabled(although a spinlock and sched_pin would be 205 * sufficient). 206 */ 207struct local_sysmaps { 208 vm_offset_t base; 209 uint32_t saved_intr; 210 uint16_t valid1, valid2; 211}; 212static struct local_sysmaps sysmap_lmem[MAXCPU]; 213 214static __inline void 215pmap_alloc_lmem_map(void) 216{ 217 int i; 218 219 for (i = 0; i < MAXCPU; i++) { 220 sysmap_lmem[i].base = virtual_avail; 221 virtual_avail += PAGE_SIZE * 2; 222 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0; 223 } 224} 225 226static __inline vm_offset_t 227pmap_lmem_map1(vm_paddr_t phys) 228{ 229 struct local_sysmaps *sysm; 230 pt_entry_t *pte, npte; 231 vm_offset_t va; 232 uint32_t intr; 233 int cpu; 234 235 intr = intr_disable(); 236 cpu = PCPU_GET(cpuid); 237 sysm = &sysmap_lmem[cpu]; 238 sysm->saved_intr = intr; 239 va = sysm->base; 240 npte = TLBLO_PA_TO_PFN(phys) | 241 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 242 pte = pmap_pte(kernel_pmap, va); 243 *pte = npte; 244 sysm->valid1 = 1; 245 return (va); 246} 247 248static __inline vm_offset_t 249pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 250{ 251 struct local_sysmaps *sysm; 252 pt_entry_t *pte, npte; 253 vm_offset_t va1, va2; 254 uint32_t intr; 255 int cpu; 256 257 intr = intr_disable(); 258 cpu = PCPU_GET(cpuid); 259 sysm = &sysmap_lmem[cpu]; 260 sysm->saved_intr = intr; 261 va1 = sysm->base; 262 va2 = sysm->base + PAGE_SIZE; 263 npte = TLBLO_PA_TO_PFN(phys1) | 264 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 265 pte = pmap_pte(kernel_pmap, va1); 266 *pte = npte; 267 npte = TLBLO_PA_TO_PFN(phys2) | 268 PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 269 pte = pmap_pte(kernel_pmap, va2); 270 *pte = npte; 271 sysm->valid1 = 1; 272 sysm->valid2 = 1; 273 return (va1); 274} 275 276static __inline void 277pmap_lmem_unmap(void) 278{ 279 struct local_sysmaps *sysm; 280 pt_entry_t *pte; 281 int cpu; 282 283 cpu = PCPU_GET(cpuid); 284 sysm = &sysmap_lmem[cpu]; 285 pte = pmap_pte(kernel_pmap, sysm->base); 286 *pte = PTE_G; 287 tlb_invalidate_address(kernel_pmap, sysm->base); 288 sysm->valid1 = 0; 289 if (sysm->valid2) { 290 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); 291 *pte = PTE_G; 292 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); 293 sysm->valid2 = 0; 294 } 295 intr_restore(sysm->saved_intr); 296} 297#else /* __mips_n64 */ 298 299static __inline void 300pmap_alloc_lmem_map(void) 301{ 302} 303 304static __inline vm_offset_t 305pmap_lmem_map1(vm_paddr_t phys) 306{ 307 308 return (0); 309} 310 311static __inline vm_offset_t 312pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 313{ 314 315 return (0); 316} 317 318static __inline vm_offset_t 319pmap_lmem_unmap(void) 320{ 321 322 return (0); 323} 324#endif /* !__mips_n64 */ 325 326/* 327 * Page table entry lookup routines. 328 */ 329static __inline pd_entry_t * 330pmap_segmap(pmap_t pmap, vm_offset_t va) 331{ 332 333 return (&pmap->pm_segtab[pmap_seg_index(va)]); 334} 335 336#ifdef __mips_n64 337static __inline pd_entry_t * 338pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 339{ 340 pd_entry_t *pde; 341 342 pde = (pd_entry_t *)*pdpe; 343 return (&pde[pmap_pde_index(va)]); 344} 345 346static __inline pd_entry_t * 347pmap_pde(pmap_t pmap, vm_offset_t va) 348{ 349 pd_entry_t *pdpe; 350 351 pdpe = pmap_segmap(pmap, va); 352 if (pdpe == NULL || *pdpe == NULL) 353 return (NULL); 354 355 return (pmap_pdpe_to_pde(pdpe, va)); 356} 357#else 358static __inline pd_entry_t * 359pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 360{ 361 362 return (pdpe); 363} 364 365static __inline 366pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va) 367{ 368 369 return (pmap_segmap(pmap, va)); 370} 371#endif 372 373static __inline pt_entry_t * 374pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) 375{ 376 pt_entry_t *pte; 377 378 pte = (pt_entry_t *)*pde; 379 return (&pte[pmap_pte_index(va)]); 380} 381 382pt_entry_t * 383pmap_pte(pmap_t pmap, vm_offset_t va) 384{ 385 pd_entry_t *pde; 386 387 pde = pmap_pde(pmap, va); 388 if (pde == NULL || *pde == NULL) 389 return (NULL); 390 391 return (pmap_pde_to_pte(pde, va)); 392} 393 394vm_offset_t 395pmap_steal_memory(vm_size_t size) 396{ 397 vm_size_t bank_size; 398 vm_offset_t pa, va; 399 400 size = round_page(size); 401 402 bank_size = phys_avail[1] - phys_avail[0]; 403 while (size > bank_size) { 404 int i; 405 406 for (i = 0; phys_avail[i + 2]; i += 2) { 407 phys_avail[i] = phys_avail[i + 2]; 408 phys_avail[i + 1] = phys_avail[i + 3]; 409 } 410 phys_avail[i] = 0; 411 phys_avail[i + 1] = 0; 412 if (!phys_avail[0]) 413 panic("pmap_steal_memory: out of memory"); 414 bank_size = phys_avail[1] - phys_avail[0]; 415 } 416 417 pa = phys_avail[0]; 418 phys_avail[0] += size; 419 if (MIPS_DIRECT_MAPPABLE(pa) == 0) 420 panic("Out of memory below 512Meg?"); 421 va = MIPS_PHYS_TO_DIRECT(pa); 422 bzero((caddr_t)va, size); 423 return (va); 424} 425 426/* 427 * Bootstrap the system enough to run with virtual memory. This 428 * assumes that the phys_avail array has been initialized. 429 */ 430static void 431pmap_create_kernel_pagetable(void) 432{ 433 int i, j; 434 vm_offset_t ptaddr; 435 pt_entry_t *pte; 436#ifdef __mips_n64 437 pd_entry_t *pde; 438 vm_offset_t pdaddr; 439 int npt, npde; 440#endif 441 442 /* 443 * Allocate segment table for the kernel 444 */ 445 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE); 446 447 /* 448 * Allocate second level page tables for the kernel 449 */ 450#ifdef __mips_n64 451 npde = howmany(NKPT, NPDEPG); 452 pdaddr = pmap_steal_memory(PAGE_SIZE * npde); 453#endif 454 nkpt = NKPT; 455 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt); 456 457 /* 458 * The R[4-7]?00 stores only one copy of the Global bit in the 459 * translation lookaside buffer for each 2 page entry. Thus invalid 460 * entrys must have the Global bit set so when Entry LO and Entry HI 461 * G bits are anded together they will produce a global bit to store 462 * in the tlb. 463 */ 464 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++) 465 *pte = PTE_G; 466 467#ifdef __mips_n64 468 for (i = 0, npt = nkpt; npt > 0; i++) { 469 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE); 470 pde = (pd_entry_t *)kernel_segmap[i]; 471 472 for (j = 0; j < NPDEPG && npt > 0; j++, npt--) 473 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE); 474 } 475#else 476 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++) 477 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE)); 478#endif 479 480 PMAP_LOCK_INIT(kernel_pmap); 481 kernel_pmap->pm_segtab = kernel_segmap; 482 kernel_pmap->pm_active = ~0; 483 TAILQ_INIT(&kernel_pmap->pm_pvlist); 484 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED; 485 kernel_pmap->pm_asid[0].gen = 0; 486 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE; 487} 488 489void 490pmap_bootstrap(void) 491{ 492 int i; 493 int need_local_mappings = 0; 494 495 /* Sort. */ 496again: 497 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 498 /* 499 * Keep the memory aligned on page boundary. 500 */ 501 phys_avail[i] = round_page(phys_avail[i]); 502 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 503 504 if (i < 2) 505 continue; 506 if (phys_avail[i - 2] > phys_avail[i]) { 507 vm_paddr_t ptemp[2]; 508 509 ptemp[0] = phys_avail[i + 0]; 510 ptemp[1] = phys_avail[i + 1]; 511 512 phys_avail[i + 0] = phys_avail[i - 2]; 513 phys_avail[i + 1] = phys_avail[i - 1]; 514 515 phys_avail[i - 2] = ptemp[0]; 516 phys_avail[i - 1] = ptemp[1]; 517 goto again; 518 } 519 } 520 521 /* 522 * In 32 bit, we may have memory which cannot be mapped directly. 523 * This memory will need temporary mapping before it can be 524 * accessed. 525 */ 526 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1)) 527 need_local_mappings = 1; 528 529 /* 530 * Copy the phys_avail[] array before we start stealing memory from it. 531 */ 532 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 533 physmem_desc[i] = phys_avail[i]; 534 physmem_desc[i + 1] = phys_avail[i + 1]; 535 } 536 537 Maxmem = atop(phys_avail[i - 1]); 538 539 if (bootverbose) { 540 printf("Physical memory chunk(s):\n"); 541 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 542 vm_paddr_t size; 543 544 size = phys_avail[i + 1] - phys_avail[i]; 545 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n", 546 (uintmax_t) phys_avail[i], 547 (uintmax_t) phys_avail[i + 1] - 1, 548 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE); 549 } 550 printf("Maxmem is 0x%0lx\n", ptoa(Maxmem)); 551 } 552 /* 553 * Steal the message buffer from the beginning of memory. 554 */ 555 msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE); 556 msgbufinit(msgbufp, MSGBUF_SIZE); 557 558 /* 559 * Steal thread0 kstack. 560 */ 561 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT); 562 563 virtual_avail = VM_MIN_KERNEL_ADDRESS; 564 virtual_end = VM_MAX_KERNEL_ADDRESS; 565 566#ifdef SMP 567 /* 568 * Steal some virtual address space to map the pcpu area. 569 */ 570 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2); 571 pcpup = (struct pcpu *)virtual_avail; 572 virtual_avail += PAGE_SIZE * 2; 573 574 /* 575 * Initialize the wired TLB entry mapping the pcpu region for 576 * the BSP at 'pcpup'. Up until this point we were operating 577 * with the 'pcpup' for the BSP pointing to a virtual address 578 * in KSEG0 so there was no need for a TLB mapping. 579 */ 580 mips_pcpu_tlb_init(PCPU_ADDR(0)); 581 582 if (bootverbose) 583 printf("pcpu is available at virtual address %p.\n", pcpup); 584#endif 585 586 if (need_local_mappings) 587 pmap_alloc_lmem_map(); 588 pmap_create_kernel_pagetable(); 589 pmap_max_asid = VMNUM_PIDS; 590 mips_wr_entryhi(0); 591 mips_wr_pagemask(0); 592} 593 594/* 595 * Initialize a vm_page's machine-dependent fields. 596 */ 597void 598pmap_page_init(vm_page_t m) 599{ 600 601 TAILQ_INIT(&m->md.pv_list); 602 m->md.pv_list_count = 0; 603 m->md.pv_flags = 0; 604} 605 606/* 607 * Initialize the pmap module. 608 * Called by vm_init, to initialize any structures that the pmap 609 * system needs to map virtual memory. 610 * pmap_init has been enhanced to support in a fairly consistant 611 * way, discontiguous physical memory. 612 */ 613void 614pmap_init(void) 615{ 616 617 /* 618 * Initialize the address space (zone) for the pv entries. Set a 619 * high water mark so that the system can recover from excessive 620 * numbers of pv entries. 621 */ 622 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 623 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 624 pv_entry_max = PMAP_SHPGPERPROC * maxproc + cnt.v_page_count; 625 pv_entry_high_water = 9 * (pv_entry_max / 10); 626 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 627} 628 629/*************************************************** 630 * Low level helper routines..... 631 ***************************************************/ 632 633static __inline void 634pmap_invalidate_all_local(pmap_t pmap) 635{ 636 637 if (pmap == kernel_pmap) { 638 tlb_invalidate_all(); 639 return; 640 } 641 if (pmap->pm_active & PCPU_GET(cpumask)) 642 tlb_invalidate_all_user(pmap); 643 else 644 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0; 645} 646 647#ifdef SMP 648static void 649pmap_invalidate_all(pmap_t pmap) 650{ 651 652 smp_rendezvous(0, pmap_invalidate_all_action, 0, pmap); 653} 654 655static void 656pmap_invalidate_all_action(void *arg) 657{ 658 659 pmap_invalidate_all_local((pmap_t)arg); 660} 661#else 662static void 663pmap_invalidate_all(pmap_t pmap) 664{ 665 666 pmap_invalidate_all_local(pmap); 667} 668#endif 669 670static __inline void 671pmap_invalidate_page_local(pmap_t pmap, vm_offset_t va) 672{ 673 674 if (is_kernel_pmap(pmap)) { 675 tlb_invalidate_address(pmap, va); 676 return; 677 } 678 if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation)) 679 return; 680 else if (!(pmap->pm_active & PCPU_GET(cpumask))) { 681 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0; 682 return; 683 } 684 tlb_invalidate_address(pmap, va); 685} 686 687#ifdef SMP 688struct pmap_invalidate_page_arg { 689 pmap_t pmap; 690 vm_offset_t va; 691}; 692 693static void 694pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 695{ 696 struct pmap_invalidate_page_arg arg; 697 698 arg.pmap = pmap; 699 arg.va = va; 700 smp_rendezvous(0, pmap_invalidate_page_action, 0, &arg); 701} 702 703static void 704pmap_invalidate_page_action(void *arg) 705{ 706 struct pmap_invalidate_page_arg *p = arg; 707 708 pmap_invalidate_page_local(p->pmap, p->va); 709} 710#else 711static void 712pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 713{ 714 715 pmap_invalidate_page_local(pmap, va); 716} 717#endif 718 719static __inline void 720pmap_update_page_local(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 721{ 722 723 if (is_kernel_pmap(pmap)) { 724 tlb_update(pmap, va, pte); 725 return; 726 } 727 if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation)) 728 return; 729 else if (!(pmap->pm_active & PCPU_GET(cpumask))) { 730 pmap->pm_asid[PCPU_GET(cpuid)].gen = 0; 731 return; 732 } 733 tlb_update(pmap, va, pte); 734} 735 736#ifdef SMP 737struct pmap_update_page_arg { 738 pmap_t pmap; 739 vm_offset_t va; 740 pt_entry_t pte; 741}; 742 743static void 744pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 745{ 746 struct pmap_update_page_arg arg; 747 748 arg.pmap = pmap; 749 arg.va = va; 750 arg.pte = pte; 751 smp_rendezvous(0, pmap_update_page_action, 0, &arg); 752} 753 754static void 755pmap_update_page_action(void *arg) 756{ 757 struct pmap_update_page_arg *p = arg; 758 759 pmap_update_page_local(p->pmap, p->va, p->pte); 760} 761#else 762static void 763pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 764{ 765 766 pmap_update_page_local(pmap, va, pte); 767} 768#endif 769 770/* 771 * Routine: pmap_extract 772 * Function: 773 * Extract the physical page address associated 774 * with the given map/virtual_address pair. 775 */ 776vm_paddr_t 777pmap_extract(pmap_t pmap, vm_offset_t va) 778{ 779 pt_entry_t *pte; 780 vm_offset_t retval = 0; 781 782 PMAP_LOCK(pmap); 783 pte = pmap_pte(pmap, va); 784 if (pte) { 785 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK); 786 } 787 PMAP_UNLOCK(pmap); 788 return (retval); 789} 790 791/* 792 * Routine: pmap_extract_and_hold 793 * Function: 794 * Atomically extract and hold the physical page 795 * with the given pmap and virtual address pair 796 * if that mapping permits the given protection. 797 */ 798vm_page_t 799pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 800{ 801 pt_entry_t pte; 802 vm_page_t m; 803 vm_paddr_t pa; 804 805 m = NULL; 806 pa = 0; 807 PMAP_LOCK(pmap); 808retry: 809 pte = *pmap_pte(pmap, va); 810 if (pte != 0 && pte_test(&pte, PTE_V) && 811 (pte_test(&pte, PTE_D) || (prot & VM_PROT_WRITE) == 0)) { 812 if (vm_page_pa_tryrelock(pmap, TLBLO_PTE_TO_PA(pte), &pa)) 813 goto retry; 814 815 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(pte)); 816 vm_page_hold(m); 817 } 818 PA_UNLOCK_COND(pa); 819 PMAP_UNLOCK(pmap); 820 return (m); 821} 822 823/*************************************************** 824 * Low level mapping routines..... 825 ***************************************************/ 826 827/* 828 * add a wired page to the kva 829 */ 830void 831pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr) 832{ 833 pt_entry_t *pte; 834 pt_entry_t opte, npte; 835 836#ifdef PMAP_DEBUG 837 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 838#endif 839 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | attr; 840 841 pte = pmap_pte(kernel_pmap, va); 842 opte = *pte; 843 *pte = npte; 844 if (pte_test(&opte, PTE_V) && opte != npte) 845 pmap_update_page(kernel_pmap, va, npte); 846} 847 848void 849pmap_kenter(vm_offset_t va, vm_paddr_t pa) 850{ 851 852 KASSERT(is_cacheable_mem(pa), 853 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa)); 854 855 pmap_kenter_attr(va, pa, PTE_C_CACHE); 856} 857 858/* 859 * remove a page from the kernel pagetables 860 */ 861 /* PMAP_INLINE */ void 862pmap_kremove(vm_offset_t va) 863{ 864 pt_entry_t *pte; 865 866 /* 867 * Write back all caches from the page being destroyed 868 */ 869 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 870 871 pte = pmap_pte(kernel_pmap, va); 872 *pte = PTE_G; 873 pmap_invalidate_page(kernel_pmap, va); 874} 875 876/* 877 * Used to map a range of physical addresses into kernel 878 * virtual address space. 879 * 880 * The value passed in '*virt' is a suggested virtual address for 881 * the mapping. Architectures which can support a direct-mapped 882 * physical to virtual region can return the appropriate address 883 * within that region, leaving '*virt' unchanged. Other 884 * architectures should map the pages starting at '*virt' and 885 * update '*virt' with the first usable address after the mapped 886 * region. 887 * 888 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 889 */ 890vm_offset_t 891pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 892{ 893 vm_offset_t va, sva; 894 895 if (MIPS_DIRECT_MAPPABLE(end - 1)) 896 return (MIPS_PHYS_TO_DIRECT(start)); 897 898 va = sva = *virt; 899 while (start < end) { 900 pmap_kenter(va, start); 901 va += PAGE_SIZE; 902 start += PAGE_SIZE; 903 } 904 *virt = va; 905 return (sva); 906} 907 908/* 909 * Add a list of wired pages to the kva 910 * this routine is only used for temporary 911 * kernel mappings that do not need to have 912 * page modification or references recorded. 913 * Note that old mappings are simply written 914 * over. The page *must* be wired. 915 */ 916void 917pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 918{ 919 int i; 920 vm_offset_t origva = va; 921 922 for (i = 0; i < count; i++) { 923 pmap_flush_pvcache(m[i]); 924 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 925 va += PAGE_SIZE; 926 } 927 928 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count); 929} 930 931/* 932 * this routine jerks page mappings from the 933 * kernel -- it is meant only for temporary mappings. 934 */ 935void 936pmap_qremove(vm_offset_t va, int count) 937{ 938 /* 939 * No need to wb/inv caches here, 940 * pmap_kremove will do it for us 941 */ 942 943 while (count-- > 0) { 944 pmap_kremove(va); 945 va += PAGE_SIZE; 946 } 947} 948 949/*************************************************** 950 * Page table page management routines..... 951 ***************************************************/ 952 953/* Revision 1.507 954 * 955 * Simplify the reference counting of page table pages. Specifically, use 956 * the page table page's wired count rather than its hold count to contain 957 * the reference count. 958 */ 959 960/* 961 * This routine unholds page table pages, and if the hold count 962 * drops to zero, then it decrements the wire count. 963 */ 964static PMAP_INLINE int 965pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 966{ 967 --m->wire_count; 968 if (m->wire_count == 0) 969 return (_pmap_unwire_pte_hold(pmap, va, m)); 970 else 971 return (0); 972} 973 974static int 975_pmap_unwire_pte_hold(pmap_t pmap, vm_offset_t va, vm_page_t m) 976{ 977 pd_entry_t *pde; 978 979 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 980 /* 981 * unmap the page table page 982 */ 983#ifdef __mips_n64 984 if (m->pindex < NUPDE) 985 pde = pmap_pde(pmap, va); 986 else 987 pde = pmap_segmap(pmap, va); 988#else 989 pde = pmap_pde(pmap, va); 990#endif 991 *pde = 0; 992 pmap->pm_stats.resident_count--; 993 994#ifdef __mips_n64 995 if (m->pindex < NUPDE) { 996 pd_entry_t *pdp; 997 vm_page_t pdpg; 998 999 /* 1000 * Recursively decrement next level pagetable refcount 1001 */ 1002 pdp = (pd_entry_t *)*pmap_segmap(pmap, va); 1003 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp)); 1004 pmap_unwire_pte_hold(pmap, va, pdpg); 1005 } 1006#endif 1007 if (pmap->pm_ptphint == m) 1008 pmap->pm_ptphint = NULL; 1009 1010 /* 1011 * If the page is finally unwired, simply free it. 1012 */ 1013 vm_page_free_zero(m); 1014 atomic_subtract_int(&cnt.v_wire_count, 1); 1015 return (1); 1016} 1017 1018/* 1019 * After removing a page table entry, this routine is used to 1020 * conditionally free the page, and manage the hold/wire counts. 1021 */ 1022static int 1023pmap_unuse_pt(pmap_t pmap, vm_offset_t va, vm_page_t mpte) 1024{ 1025 unsigned ptepindex; 1026 pd_entry_t pteva; 1027 1028 if (va >= VM_MAXUSER_ADDRESS) 1029 return (0); 1030 1031 if (mpte == NULL) { 1032 ptepindex = pmap_pde_pindex(va); 1033 if (pmap->pm_ptphint && 1034 (pmap->pm_ptphint->pindex == ptepindex)) { 1035 mpte = pmap->pm_ptphint; 1036 } else { 1037 pteva = *pmap_pde(pmap, va); 1038 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pteva)); 1039 pmap->pm_ptphint = mpte; 1040 } 1041 } 1042 return (pmap_unwire_pte_hold(pmap, va, mpte)); 1043} 1044 1045void 1046pmap_pinit0(pmap_t pmap) 1047{ 1048 int i; 1049 1050 PMAP_LOCK_INIT(pmap); 1051 pmap->pm_segtab = kernel_segmap; 1052 pmap->pm_active = 0; 1053 pmap->pm_ptphint = NULL; 1054 for (i = 0; i < MAXCPU; i++) { 1055 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1056 pmap->pm_asid[i].gen = 0; 1057 } 1058 PCPU_SET(curpmap, pmap); 1059 TAILQ_INIT(&pmap->pm_pvlist); 1060 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1061} 1062 1063void 1064pmap_grow_direct_page_cache() 1065{ 1066 1067#ifdef __mips_n64 1068 vm_contig_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS); 1069#else 1070 vm_contig_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS); 1071#endif 1072} 1073 1074vm_page_t 1075pmap_alloc_direct_page(unsigned int index, int req) 1076{ 1077 vm_page_t m; 1078 1079 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req); 1080 if (m == NULL) 1081 return (NULL); 1082 1083 if ((m->flags & PG_ZERO) == 0) 1084 pmap_zero_page(m); 1085 1086 m->pindex = index; 1087 atomic_add_int(&cnt.v_wire_count, 1); 1088 m->wire_count = 1; 1089 return (m); 1090} 1091 1092/* 1093 * Initialize a preallocated and zeroed pmap structure, 1094 * such as one in a vmspace structure. 1095 */ 1096int 1097pmap_pinit(pmap_t pmap) 1098{ 1099 vm_offset_t ptdva; 1100 vm_page_t ptdpg; 1101 int i; 1102 1103 PMAP_LOCK_INIT(pmap); 1104 1105 /* 1106 * allocate the page directory page 1107 */ 1108 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL) 1109 pmap_grow_direct_page_cache(); 1110 1111 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg)); 1112 pmap->pm_segtab = (pd_entry_t *)ptdva; 1113 pmap->pm_active = 0; 1114 pmap->pm_ptphint = NULL; 1115 for (i = 0; i < MAXCPU; i++) { 1116 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1117 pmap->pm_asid[i].gen = 0; 1118 } 1119 TAILQ_INIT(&pmap->pm_pvlist); 1120 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1121 1122 return (1); 1123} 1124 1125/* 1126 * this routine is called if the page table page is not 1127 * mapped correctly. 1128 */ 1129static vm_page_t 1130_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1131{ 1132 vm_offset_t pageva; 1133 vm_page_t m; 1134 1135 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1136 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1137 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1138 1139 /* 1140 * Find or fabricate a new pagetable page 1141 */ 1142 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) { 1143 if (flags & M_WAITOK) { 1144 PMAP_UNLOCK(pmap); 1145 vm_page_unlock_queues(); 1146 pmap_grow_direct_page_cache(); 1147 vm_page_lock_queues(); 1148 PMAP_LOCK(pmap); 1149 } 1150 1151 /* 1152 * Indicate the need to retry. While waiting, the page 1153 * table page may have been allocated. 1154 */ 1155 return (NULL); 1156 } 1157 1158 /* 1159 * Map the pagetable page into the process address space, if it 1160 * isn't already there. 1161 */ 1162 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1163 1164#ifdef __mips_n64 1165 if (ptepindex >= NUPDE) { 1166 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva; 1167 } else { 1168 pd_entry_t *pdep, *pde; 1169 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT); 1170 int pdeindex = ptepindex & (NPDEPG - 1); 1171 vm_page_t pg; 1172 1173 pdep = &pmap->pm_segtab[segindex]; 1174 if (*pdep == NULL) { 1175 /* recurse for allocating page dir */ 1176 if (_pmap_allocpte(pmap, NUPDE + segindex, 1177 flags) == NULL) { 1178 /* alloc failed, release current */ 1179 --m->wire_count; 1180 atomic_subtract_int(&cnt.v_wire_count, 1); 1181 vm_page_free_zero(m); 1182 return (NULL); 1183 } 1184 } else { 1185 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep)); 1186 pg->wire_count++; 1187 } 1188 /* Next level entry */ 1189 pde = (pd_entry_t *)*pdep; 1190 pde[pdeindex] = (pd_entry_t)pageva; 1191 pmap->pm_ptphint = m; 1192 } 1193#else 1194 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva; 1195#endif 1196 pmap->pm_stats.resident_count++; 1197 1198 /* 1199 * Set the page table hint 1200 */ 1201 pmap->pm_ptphint = m; 1202 return (m); 1203} 1204 1205static vm_page_t 1206pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1207{ 1208 unsigned ptepindex; 1209 pd_entry_t *pde; 1210 vm_page_t m; 1211 1212 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1213 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1214 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1215 1216 /* 1217 * Calculate pagetable page index 1218 */ 1219 ptepindex = pmap_pde_pindex(va); 1220retry: 1221 /* 1222 * Get the page directory entry 1223 */ 1224 pde = pmap_pde(pmap, va); 1225 1226 /* 1227 * If the page table page is mapped, we just increment the hold 1228 * count, and activate it. 1229 */ 1230 if (pde != NULL && *pde != NULL) { 1231 /* 1232 * In order to get the page table page, try the hint first. 1233 */ 1234 if (pmap->pm_ptphint && 1235 (pmap->pm_ptphint->pindex == ptepindex)) { 1236 m = pmap->pm_ptphint; 1237 } else { 1238 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde)); 1239 pmap->pm_ptphint = m; 1240 } 1241 m->wire_count++; 1242 } else { 1243 /* 1244 * Here if the pte page isn't mapped, or if it has been 1245 * deallocated. 1246 */ 1247 m = _pmap_allocpte(pmap, ptepindex, flags); 1248 if (m == NULL && (flags & M_WAITOK)) 1249 goto retry; 1250 } 1251 return (m); 1252} 1253 1254 1255/*************************************************** 1256* Pmap allocation/deallocation routines. 1257 ***************************************************/ 1258/* 1259 * Revision 1.397 1260 * - Merged pmap_release and pmap_release_free_page. When pmap_release is 1261 * called only the page directory page(s) can be left in the pmap pte 1262 * object, since all page table pages will have been freed by 1263 * pmap_remove_pages and pmap_remove. In addition, there can only be one 1264 * reference to the pmap and the page directory is wired, so the page(s) 1265 * can never be busy. So all there is to do is clear the magic mappings 1266 * from the page directory and free the page(s). 1267 */ 1268 1269 1270/* 1271 * Release any resources held by the given physical map. 1272 * Called when a pmap initialized by pmap_pinit is being released. 1273 * Should only be called if the map contains no valid mappings. 1274 */ 1275void 1276pmap_release(pmap_t pmap) 1277{ 1278 vm_offset_t ptdva; 1279 vm_page_t ptdpg; 1280 1281 KASSERT(pmap->pm_stats.resident_count == 0, 1282 ("pmap_release: pmap resident count %ld != 0", 1283 pmap->pm_stats.resident_count)); 1284 1285 ptdva = (vm_offset_t)pmap->pm_segtab; 1286 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva)); 1287 1288 ptdpg->wire_count--; 1289 atomic_subtract_int(&cnt.v_wire_count, 1); 1290 vm_page_free_zero(ptdpg); 1291 PMAP_LOCK_DESTROY(pmap); 1292} 1293 1294/* 1295 * grow the number of kernel page table entries, if needed 1296 */ 1297void 1298pmap_growkernel(vm_offset_t addr) 1299{ 1300 vm_page_t nkpg; 1301 pd_entry_t *pde, *pdpe; 1302 pt_entry_t *pte; 1303 int i; 1304 1305 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1306 addr = roundup2(addr, NBSEG); 1307 if (addr - 1 >= kernel_map->max_offset) 1308 addr = kernel_map->max_offset; 1309 while (kernel_vm_end < addr) { 1310 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end); 1311#ifdef __mips_n64 1312 if (*pdpe == 0) { 1313 /* new intermediate page table entry */ 1314 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1315 if (nkpg == NULL) 1316 panic("pmap_growkernel: no memory to grow kernel"); 1317 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1318 continue; /* try again */ 1319 } 1320#endif 1321 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); 1322 if (*pde != 0) { 1323 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1324 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1325 kernel_vm_end = kernel_map->max_offset; 1326 break; 1327 } 1328 continue; 1329 } 1330 1331 /* 1332 * This index is bogus, but out of the way 1333 */ 1334 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1335 if (!nkpg) 1336 panic("pmap_growkernel: no memory to grow kernel"); 1337 nkpt++; 1338 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1339 1340 /* 1341 * The R[4-7]?00 stores only one copy of the Global bit in 1342 * the translation lookaside buffer for each 2 page entry. 1343 * Thus invalid entrys must have the Global bit set so when 1344 * Entry LO and Entry HI G bits are anded together they will 1345 * produce a global bit to store in the tlb. 1346 */ 1347 pte = (pt_entry_t *)*pde; 1348 for (i = 0; i < NPTEPG; i++) 1349 pte[i] = PTE_G; 1350 1351 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1352 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1353 kernel_vm_end = kernel_map->max_offset; 1354 break; 1355 } 1356 } 1357} 1358 1359/*************************************************** 1360* page management routines. 1361 ***************************************************/ 1362 1363/* 1364 * free the pv_entry back to the free list 1365 */ 1366static PMAP_INLINE void 1367free_pv_entry(pv_entry_t pv) 1368{ 1369 1370 pv_entry_count--; 1371 uma_zfree(pvzone, pv); 1372} 1373 1374/* 1375 * get a new pv_entry, allocating a block from the system 1376 * when needed. 1377 * the memory allocation is performed bypassing the malloc code 1378 * because of the possibility of allocations at interrupt time. 1379 */ 1380static pv_entry_t 1381get_pv_entry(pmap_t locked_pmap) 1382{ 1383 static const struct timeval printinterval = { 60, 0 }; 1384 static struct timeval lastprint; 1385 struct vpgqueues *vpq; 1386 pt_entry_t *pte, oldpte; 1387 pmap_t pmap; 1388 pv_entry_t allocated_pv, next_pv, pv; 1389 vm_offset_t va; 1390 vm_page_t m; 1391 1392 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1393 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1394 allocated_pv = uma_zalloc(pvzone, M_NOWAIT); 1395 if (allocated_pv != NULL) { 1396 pv_entry_count++; 1397 if (pv_entry_count > pv_entry_high_water) 1398 pagedaemon_wakeup(); 1399 else 1400 return (allocated_pv); 1401 } 1402 /* 1403 * Reclaim pv entries: At first, destroy mappings to inactive 1404 * pages. After that, if a pv entry is still needed, destroy 1405 * mappings to active pages. 1406 */ 1407 if (ratecheck(&lastprint, &printinterval)) 1408 printf("Approaching the limit on PV entries, " 1409 "increase the vm.pmap.shpgperproc tunable.\n"); 1410 vpq = &vm_page_queues[PQ_INACTIVE]; 1411retry: 1412 TAILQ_FOREACH(m, &vpq->pl, pageq) { 1413 if (m->hold_count || m->busy) 1414 continue; 1415 TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { 1416 va = pv->pv_va; 1417 pmap = pv->pv_pmap; 1418 /* Avoid deadlock and lock recursion. */ 1419 if (pmap > locked_pmap) 1420 PMAP_LOCK(pmap); 1421 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) 1422 continue; 1423 pmap->pm_stats.resident_count--; 1424 pte = pmap_pte(pmap, va); 1425 KASSERT(pte != NULL, ("pte")); 1426 oldpte = *pte; 1427 if (is_kernel_pmap(pmap)) 1428 *pte = PTE_G; 1429 else 1430 *pte = 0; 1431 KASSERT(!pte_test(&oldpte, PTE_W), 1432 ("wired pte for unwired page")); 1433 if (m->md.pv_flags & PV_TABLE_REF) 1434 vm_page_flag_set(m, PG_REFERENCED); 1435 if (pte_test(&oldpte, PTE_D)) 1436 vm_page_dirty(m); 1437 pmap_invalidate_page(pmap, va); 1438 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1439 m->md.pv_list_count--; 1440 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1441 pmap_unuse_pt(pmap, va, pv->pv_ptem); 1442 if (pmap != locked_pmap) 1443 PMAP_UNLOCK(pmap); 1444 if (allocated_pv == NULL) 1445 allocated_pv = pv; 1446 else 1447 free_pv_entry(pv); 1448 } 1449 if (TAILQ_EMPTY(&m->md.pv_list)) { 1450 vm_page_flag_clear(m, PG_WRITEABLE); 1451 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1452 } 1453 } 1454 if (allocated_pv == NULL) { 1455 if (vpq == &vm_page_queues[PQ_INACTIVE]) { 1456 vpq = &vm_page_queues[PQ_ACTIVE]; 1457 goto retry; 1458 } 1459 panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); 1460 } 1461 return (allocated_pv); 1462} 1463 1464/* 1465 * Revision 1.370 1466 * 1467 * Move pmap_collect() out of the machine-dependent code, rename it 1468 * to reflect its new location, and add page queue and flag locking. 1469 * 1470 * Notes: (1) alpha, i386, and ia64 had identical implementations 1471 * of pmap_collect() in terms of machine-independent interfaces; 1472 * (2) sparc64 doesn't require it; (3) powerpc had it as a TODO. 1473 * 1474 * MIPS implementation was identical to alpha [Junos 8.2] 1475 */ 1476 1477/* 1478 * If it is the first entry on the list, it is actually 1479 * in the header and we must copy the following entry up 1480 * to the header. Otherwise we must search the list for 1481 * the entry. In either case we free the now unused entry. 1482 */ 1483 1484static pv_entry_t 1485pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1486{ 1487 pv_entry_t pv; 1488 1489 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1490 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1491 if (pvh->pv_list_count < pmap->pm_stats.resident_count) { 1492 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 1493 if (pmap == pv->pv_pmap && va == pv->pv_va) 1494 break; 1495 } 1496 } else { 1497 TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { 1498 if (va == pv->pv_va) 1499 break; 1500 } 1501 } 1502 if (pv != NULL) { 1503 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 1504 pvh->pv_list_count--; 1505 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 1506 } 1507 return (pv); 1508} 1509 1510static void 1511pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1512{ 1513 pv_entry_t pv; 1514 1515 pv = pmap_pvh_remove(pvh, pmap, va); 1516 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx", 1517 (u_long)VM_PAGE_TO_PHYS(member2struct(vm_page, md, pvh)), 1518 (u_long)va)); 1519 free_pv_entry(pv); 1520} 1521 1522static void 1523pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1524{ 1525 1526 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1527 pmap_pvh_free(&m->md, pmap, va); 1528 if (TAILQ_EMPTY(&m->md.pv_list)) 1529 vm_page_flag_clear(m, PG_WRITEABLE); 1530} 1531 1532/* 1533 * Conditionally create a pv entry. 1534 */ 1535static boolean_t 1536pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va, 1537 vm_page_t m) 1538{ 1539 pv_entry_t pv; 1540 1541 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1542 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1543 if (pv_entry_count < pv_entry_high_water && 1544 (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { 1545 pv_entry_count++; 1546 pv->pv_va = va; 1547 pv->pv_pmap = pmap; 1548 pv->pv_ptem = mpte; 1549 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1550 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1551 m->md.pv_list_count++; 1552 return (TRUE); 1553 } else 1554 return (FALSE); 1555} 1556 1557/* 1558 * pmap_remove_pte: do the things to unmap a page in a process 1559 */ 1560static int 1561pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va) 1562{ 1563 pt_entry_t oldpte; 1564 vm_page_t m; 1565 vm_offset_t pa; 1566 1567 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1568 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1569 1570 oldpte = *ptq; 1571 if (is_kernel_pmap(pmap)) 1572 *ptq = PTE_G; 1573 else 1574 *ptq = 0; 1575 1576 if (pte_test(&oldpte, PTE_W)) 1577 pmap->pm_stats.wired_count -= 1; 1578 1579 pmap->pm_stats.resident_count -= 1; 1580 pa = TLBLO_PTE_TO_PA(oldpte); 1581 1582 if (page_is_managed(pa)) { 1583 m = PHYS_TO_VM_PAGE(pa); 1584 if (pte_test(&oldpte, PTE_D)) { 1585 KASSERT(!pte_test(&oldpte, PTE_RO), 1586 ("%s: modified page not writable: va: %p, pte: 0x%x", 1587 __func__, (void *)va, oldpte)); 1588 vm_page_dirty(m); 1589 } 1590 if (m->md.pv_flags & PV_TABLE_REF) 1591 vm_page_flag_set(m, PG_REFERENCED); 1592 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1593 1594 pmap_remove_entry(pmap, m, va); 1595 } 1596 return (pmap_unuse_pt(pmap, va, NULL)); 1597} 1598 1599/* 1600 * Remove a single page from a process address space 1601 */ 1602static void 1603pmap_remove_page(struct pmap *pmap, vm_offset_t va) 1604{ 1605 pt_entry_t *ptq; 1606 1607 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1608 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1609 ptq = pmap_pte(pmap, va); 1610 1611 /* 1612 * if there is no pte for this address, just skip it!!! 1613 */ 1614 if (!ptq || !pte_test(ptq, PTE_V)) { 1615 return; 1616 } 1617 1618 /* 1619 * Write back all caches from the page being destroyed 1620 */ 1621 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 1622 1623 /* 1624 * get a local va for mappings for this pmap. 1625 */ 1626 (void)pmap_remove_pte(pmap, ptq, va); 1627 pmap_invalidate_page(pmap, va); 1628 1629 return; 1630} 1631 1632/* 1633 * Remove the given range of addresses from the specified map. 1634 * 1635 * It is assumed that the start and end are properly 1636 * rounded to the page size. 1637 */ 1638void 1639pmap_remove(struct pmap *pmap, vm_offset_t sva, vm_offset_t eva) 1640{ 1641 vm_offset_t va_next; 1642 pd_entry_t *pde, *pdpe; 1643 pt_entry_t *pte; 1644 1645 if (pmap == NULL) 1646 return; 1647 1648 if (pmap->pm_stats.resident_count == 0) 1649 return; 1650 1651 vm_page_lock_queues(); 1652 PMAP_LOCK(pmap); 1653 1654 /* 1655 * special handling of removing one page. a very common operation 1656 * and easy to short circuit some code. 1657 */ 1658 if ((sva + PAGE_SIZE) == eva) { 1659 pmap_remove_page(pmap, sva); 1660 goto out; 1661 } 1662 for (; sva < eva; sva = va_next) { 1663 pdpe = pmap_segmap(pmap, sva); 1664#ifdef __mips_n64 1665 if (*pdpe == 0) { 1666 va_next = (sva + NBSEG) & ~SEGMASK; 1667 if (va_next < sva) 1668 va_next = eva; 1669 continue; 1670 } 1671#endif 1672 va_next = (sva + NBPDR) & ~PDRMASK; 1673 if (va_next < sva) 1674 va_next = eva; 1675 1676 pde = pmap_pdpe_to_pde(pdpe, sva); 1677 if (*pde == 0) 1678 continue; 1679 if (va_next > eva) 1680 va_next = eva; 1681 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; 1682 pte++, sva += PAGE_SIZE) { 1683 pmap_remove_page(pmap, sva); 1684 } 1685 } 1686out: 1687 vm_page_unlock_queues(); 1688 PMAP_UNLOCK(pmap); 1689} 1690 1691/* 1692 * Routine: pmap_remove_all 1693 * Function: 1694 * Removes this physical page from 1695 * all physical maps in which it resides. 1696 * Reflects back modify bits to the pager. 1697 * 1698 * Notes: 1699 * Original versions of this routine were very 1700 * inefficient because they iteratively called 1701 * pmap_remove (slow...) 1702 */ 1703 1704void 1705pmap_remove_all(vm_page_t m) 1706{ 1707 pv_entry_t pv; 1708 pt_entry_t *pte, tpte; 1709 1710 KASSERT((m->flags & PG_FICTITIOUS) == 0, 1711 ("pmap_remove_all: page %p is fictitious", m)); 1712 vm_page_lock_queues(); 1713 1714 if (m->md.pv_flags & PV_TABLE_REF) 1715 vm_page_flag_set(m, PG_REFERENCED); 1716 1717 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1718 PMAP_LOCK(pv->pv_pmap); 1719 1720 /* 1721 * If it's last mapping writeback all caches from 1722 * the page being destroyed 1723 */ 1724 if (m->md.pv_list_count == 1) 1725 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 1726 1727 pv->pv_pmap->pm_stats.resident_count--; 1728 1729 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 1730 1731 tpte = *pte; 1732 if (is_kernel_pmap(pv->pv_pmap)) 1733 *pte = PTE_G; 1734 else 1735 *pte = 0; 1736 1737 if (pte_test(&tpte, PTE_W)) 1738 pv->pv_pmap->pm_stats.wired_count--; 1739 1740 /* 1741 * Update the vm_page_t clean and reference bits. 1742 */ 1743 if (pte_test(&tpte, PTE_D)) { 1744 KASSERT(!pte_test(&tpte, PTE_RO), 1745 ("%s: modified page not writable: va: %p, pte: 0x%x", 1746 __func__, (void *)pv->pv_va, tpte)); 1747 vm_page_dirty(m); 1748 } 1749 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1750 1751 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 1752 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1753 m->md.pv_list_count--; 1754 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 1755 PMAP_UNLOCK(pv->pv_pmap); 1756 free_pv_entry(pv); 1757 } 1758 1759 vm_page_flag_clear(m, PG_WRITEABLE); 1760 m->md.pv_flags &= ~(PV_TABLE_REF | PV_TABLE_MOD); 1761 vm_page_unlock_queues(); 1762} 1763 1764/* 1765 * Set the physical protection on the 1766 * specified range of this map as requested. 1767 */ 1768void 1769pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1770{ 1771 pt_entry_t *pte; 1772 pd_entry_t *pde, *pdpe; 1773 vm_offset_t va_next; 1774 1775 if (pmap == NULL) 1776 return; 1777 1778 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1779 pmap_remove(pmap, sva, eva); 1780 return; 1781 } 1782 if (prot & VM_PROT_WRITE) 1783 return; 1784 1785 vm_page_lock_queues(); 1786 PMAP_LOCK(pmap); 1787 for (; sva < eva; sva = va_next) { 1788 pt_entry_t pbits; 1789 vm_page_t m; 1790 vm_paddr_t pa; 1791 1792 pdpe = pmap_segmap(pmap, sva); 1793#ifdef __mips_n64 1794 if (*pdpe == 0) { 1795 va_next = (sva + NBSEG) & ~SEGMASK; 1796 if (va_next < sva) 1797 va_next = eva; 1798 continue; 1799 } 1800#endif 1801 va_next = (sva + NBPDR) & ~PDRMASK; 1802 if (va_next < sva) 1803 va_next = eva; 1804 1805 pde = pmap_pdpe_to_pde(pdpe, sva); 1806 if (pde == NULL || *pde == NULL) 1807 continue; 1808 if (va_next > eva) 1809 va_next = eva; 1810 1811 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 1812 sva += PAGE_SIZE) { 1813 1814 /* Skip invalid PTEs */ 1815 if (!pte_test(pte, PTE_V)) 1816 continue; 1817 pbits = *pte; 1818 pa = TLBLO_PTE_TO_PA(pbits); 1819 if (page_is_managed(pa) && pte_test(&pbits, PTE_D)) { 1820 m = PHYS_TO_VM_PAGE(pa); 1821 vm_page_dirty(m); 1822 m->md.pv_flags &= ~PV_TABLE_MOD; 1823 } 1824 pte_clear(&pbits, PTE_D); 1825 pte_set(&pbits, PTE_RO); 1826 1827 if (pbits != *pte) { 1828 *pte = pbits; 1829 pmap_update_page(pmap, sva, pbits); 1830 } 1831 } 1832 } 1833 vm_page_unlock_queues(); 1834 PMAP_UNLOCK(pmap); 1835} 1836 1837/* 1838 * Insert the given physical page (p) at 1839 * the specified virtual address (v) in the 1840 * target physical map with the protection requested. 1841 * 1842 * If specified, the page will be wired down, meaning 1843 * that the related pte can not be reclaimed. 1844 * 1845 * NB: This is the only routine which MAY NOT lazy-evaluate 1846 * or lose information. That is, this routine must actually 1847 * insert this page into the given map NOW. 1848 */ 1849void 1850pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 1851 vm_prot_t prot, boolean_t wired) 1852{ 1853 vm_offset_t pa, opa; 1854 pt_entry_t *pte; 1855 pt_entry_t origpte, newpte; 1856 pv_entry_t pv; 1857 vm_page_t mpte, om; 1858 int rw = 0; 1859 1860 if (pmap == NULL) 1861 return; 1862 1863 va &= ~PAGE_MASK; 1864 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 1865 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || 1866 (m->oflags & VPO_BUSY) != 0, 1867 ("pmap_enter: page %p is not busy", m)); 1868 1869 mpte = NULL; 1870 1871 vm_page_lock_queues(); 1872 PMAP_LOCK(pmap); 1873 1874 /* 1875 * In the case that a page table page is not resident, we are 1876 * creating it here. 1877 */ 1878 if (va < VM_MAXUSER_ADDRESS) { 1879 mpte = pmap_allocpte(pmap, va, M_WAITOK); 1880 } 1881 pte = pmap_pte(pmap, va); 1882 1883 /* 1884 * Page Directory table entry not valid, we need a new PT page 1885 */ 1886 if (pte == NULL) { 1887 panic("pmap_enter: invalid page directory, pdir=%p, va=%p", 1888 (void *)pmap->pm_segtab, (void *)va); 1889 } 1890 pa = VM_PAGE_TO_PHYS(m); 1891 om = NULL; 1892 origpte = *pte; 1893 opa = TLBLO_PTE_TO_PA(origpte); 1894 1895 /* 1896 * Mapping has not changed, must be protection or wiring change. 1897 */ 1898 if (pte_test(&origpte, PTE_V) && opa == pa) { 1899 /* 1900 * Wiring change, just update stats. We don't worry about 1901 * wiring PT pages as they remain resident as long as there 1902 * are valid mappings in them. Hence, if a user page is 1903 * wired, the PT page will be also. 1904 */ 1905 if (wired && !pte_test(&origpte, PTE_W)) 1906 pmap->pm_stats.wired_count++; 1907 else if (!wired && pte_test(&origpte, PTE_W)) 1908 pmap->pm_stats.wired_count--; 1909 1910 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), 1911 ("%s: modified page not writable: va: %p, pte: 0x%x", 1912 __func__, (void *)va, origpte)); 1913 1914 /* 1915 * Remove extra pte reference 1916 */ 1917 if (mpte) 1918 mpte->wire_count--; 1919 1920 if (page_is_managed(opa)) { 1921 om = m; 1922 } 1923 goto validate; 1924 } 1925 1926 pv = NULL; 1927 1928 /* 1929 * Mapping has changed, invalidate old range and fall through to 1930 * handle validating new mapping. 1931 */ 1932 if (opa) { 1933 if (pte_test(&origpte, PTE_W)) 1934 pmap->pm_stats.wired_count--; 1935 1936 if (page_is_managed(opa)) { 1937 om = PHYS_TO_VM_PAGE(opa); 1938 pv = pmap_pvh_remove(&om->md, pmap, va); 1939 } 1940 if (mpte != NULL) { 1941 mpte->wire_count--; 1942 KASSERT(mpte->wire_count > 0, 1943 ("pmap_enter: missing reference to page table page," 1944 " va: %p", (void *)va)); 1945 } 1946 } else 1947 pmap->pm_stats.resident_count++; 1948 1949 /* 1950 * Enter on the PV list if part of our managed memory. Note that we 1951 * raise IPL while manipulating pv_table since pmap_enter can be 1952 * called at interrupt time. 1953 */ 1954 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 1955 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, 1956 ("pmap_enter: managed mapping within the clean submap")); 1957 if (pv == NULL) 1958 pv = get_pv_entry(pmap); 1959 pv->pv_va = va; 1960 pv->pv_pmap = pmap; 1961 pv->pv_ptem = mpte; 1962 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 1963 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1964 m->md.pv_list_count++; 1965 } else if (pv != NULL) 1966 free_pv_entry(pv); 1967 1968 /* 1969 * Increment counters 1970 */ 1971 if (wired) 1972 pmap->pm_stats.wired_count++; 1973 1974validate: 1975 if ((access & VM_PROT_WRITE) != 0) 1976 m->md.pv_flags |= PV_TABLE_MOD | PV_TABLE_REF; 1977 rw = init_pte_prot(va, m, prot); 1978 1979#ifdef PMAP_DEBUG 1980 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 1981#endif 1982 /* 1983 * Now validate mapping with desired protection/wiring. 1984 */ 1985 newpte = TLBLO_PA_TO_PFN(pa) | rw | PTE_V; 1986 1987 if (is_cacheable_mem(pa)) 1988 newpte |= PTE_C_CACHE; 1989 else 1990 newpte |= PTE_C_UNCACHED; 1991 1992 if (wired) 1993 newpte |= PTE_W; 1994 1995 if (is_kernel_pmap(pmap)) 1996 newpte |= PTE_G; 1997 1998 /* 1999 * if the mapping or permission bits are different, we need to 2000 * update the pte. 2001 */ 2002 if (origpte != newpte) { 2003 if (pte_test(&origpte, PTE_V)) { 2004 *pte = newpte; 2005 if (page_is_managed(opa) && (opa != pa)) { 2006 if (om->md.pv_flags & PV_TABLE_REF) 2007 vm_page_flag_set(om, PG_REFERENCED); 2008 om->md.pv_flags &= 2009 ~(PV_TABLE_REF | PV_TABLE_MOD); 2010 } 2011 if (pte_test(&origpte, PTE_D)) { 2012 KASSERT(!pte_test(&origpte, PTE_RO), 2013 ("pmap_enter: modified page not writable:" 2014 " va: %p, pte: 0x%x", (void *)va, origpte)); 2015 if (page_is_managed(opa)) 2016 vm_page_dirty(om); 2017 } 2018 if (page_is_managed(opa) && 2019 TAILQ_EMPTY(&om->md.pv_list)) 2020 vm_page_flag_clear(om, PG_WRITEABLE); 2021 } else { 2022 *pte = newpte; 2023 } 2024 } 2025 pmap_update_page(pmap, va, newpte); 2026 2027 /* 2028 * Sync I & D caches for executable pages. Do this only if the the 2029 * target pmap belongs to the current process. Otherwise, an 2030 * unresolvable TLB miss may occur. 2031 */ 2032 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && 2033 (prot & VM_PROT_EXECUTE)) { 2034 mips_icache_sync_range(va, PAGE_SIZE); 2035 mips_dcache_wbinv_range(va, PAGE_SIZE); 2036 } 2037 vm_page_unlock_queues(); 2038 PMAP_UNLOCK(pmap); 2039} 2040 2041/* 2042 * this code makes some *MAJOR* assumptions: 2043 * 1. Current pmap & pmap exists. 2044 * 2. Not wired. 2045 * 3. Read access. 2046 * 4. No page table pages. 2047 * but is *MUCH* faster than pmap_enter... 2048 */ 2049 2050void 2051pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2052{ 2053 2054 vm_page_lock_queues(); 2055 PMAP_LOCK(pmap); 2056 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 2057 vm_page_unlock_queues(); 2058 PMAP_UNLOCK(pmap); 2059} 2060 2061static vm_page_t 2062pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 2063 vm_prot_t prot, vm_page_t mpte) 2064{ 2065 pt_entry_t *pte; 2066 vm_offset_t pa; 2067 2068 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2069 (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, 2070 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2071 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2072 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2073 2074 /* 2075 * In the case that a page table page is not resident, we are 2076 * creating it here. 2077 */ 2078 if (va < VM_MAXUSER_ADDRESS) { 2079 pd_entry_t *pde; 2080 unsigned ptepindex; 2081 2082 /* 2083 * Calculate pagetable page index 2084 */ 2085 ptepindex = pmap_pde_pindex(va); 2086 if (mpte && (mpte->pindex == ptepindex)) { 2087 mpte->wire_count++; 2088 } else { 2089 /* 2090 * Get the page directory entry 2091 */ 2092 pde = pmap_pde(pmap, va); 2093 2094 /* 2095 * If the page table page is mapped, we just 2096 * increment the hold count, and activate it. 2097 */ 2098 if (pde && *pde != 0) { 2099 if (pmap->pm_ptphint && 2100 (pmap->pm_ptphint->pindex == ptepindex)) { 2101 mpte = pmap->pm_ptphint; 2102 } else { 2103 mpte = PHYS_TO_VM_PAGE( 2104 MIPS_DIRECT_TO_PHYS(*pde)); 2105 pmap->pm_ptphint = mpte; 2106 } 2107 mpte->wire_count++; 2108 } else { 2109 mpte = _pmap_allocpte(pmap, ptepindex, 2110 M_NOWAIT); 2111 if (mpte == NULL) 2112 return (mpte); 2113 } 2114 } 2115 } else { 2116 mpte = NULL; 2117 } 2118 2119 pte = pmap_pte(pmap, va); 2120 if (pte_test(pte, PTE_V)) { 2121 if (mpte != NULL) { 2122 mpte->wire_count--; 2123 mpte = NULL; 2124 } 2125 return (mpte); 2126 } 2127 2128 /* 2129 * Enter on the PV list if part of our managed memory. 2130 */ 2131 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && 2132 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) { 2133 if (mpte != NULL) { 2134 pmap_unwire_pte_hold(pmap, va, mpte); 2135 mpte = NULL; 2136 } 2137 return (mpte); 2138 } 2139 2140 /* 2141 * Increment counters 2142 */ 2143 pmap->pm_stats.resident_count++; 2144 2145 pa = VM_PAGE_TO_PHYS(m); 2146 2147 /* 2148 * Now validate mapping with RO protection 2149 */ 2150 *pte = TLBLO_PA_TO_PFN(pa) | PTE_V; 2151 2152 if (is_cacheable_mem(pa)) 2153 *pte |= PTE_C_CACHE; 2154 else 2155 *pte |= PTE_C_UNCACHED; 2156 2157 if (is_kernel_pmap(pmap)) 2158 *pte |= PTE_G; 2159 else { 2160 *pte |= PTE_RO; 2161 /* 2162 * Sync I & D caches. Do this only if the the target pmap 2163 * belongs to the current process. Otherwise, an 2164 * unresolvable TLB miss may occur. */ 2165 if (pmap == &curproc->p_vmspace->vm_pmap) { 2166 va &= ~PAGE_MASK; 2167 mips_icache_sync_range(va, PAGE_SIZE); 2168 mips_dcache_wbinv_range(va, PAGE_SIZE); 2169 } 2170 } 2171 return (mpte); 2172} 2173 2174/* 2175 * Make a temporary mapping for a physical address. This is only intended 2176 * to be used for panic dumps. 2177 * 2178 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2179 */ 2180void * 2181pmap_kenter_temporary(vm_paddr_t pa, int i) 2182{ 2183 vm_offset_t va; 2184 2185 if (i != 0) 2186 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n", 2187 __func__); 2188 2189 if (MIPS_DIRECT_MAPPABLE(pa)) { 2190 va = MIPS_PHYS_TO_DIRECT(pa); 2191 } else { 2192#ifndef __mips_n64 /* XXX : to be converted to new style */ 2193 int cpu; 2194 register_t intr; 2195 struct local_sysmaps *sysm; 2196 pt_entry_t *pte, npte; 2197 2198 /* If this is used other than for dumps, we may need to leave 2199 * interrupts disasbled on return. If crash dumps don't work when 2200 * we get to this point, we might want to consider this (leaving things 2201 * disabled as a starting point ;-) 2202 */ 2203 intr = intr_disable(); 2204 cpu = PCPU_GET(cpuid); 2205 sysm = &sysmap_lmem[cpu]; 2206 /* Since this is for the debugger, no locks or any other fun */ 2207 npte = TLBLO_PA_TO_PFN(pa) | PTE_D | PTE_V | PTE_G | PTE_W | PTE_C_CACHE; 2208 pte = pmap_pte(kernel_pmap, sysm->base); 2209 *pte = npte; 2210 sysm->valid1 = 1; 2211 pmap_update_page(kernel_pmap, sysm->base, npte); 2212 va = sysm->base; 2213 intr_restore(intr); 2214#endif 2215 } 2216 return ((void *)va); 2217} 2218 2219void 2220pmap_kenter_temporary_free(vm_paddr_t pa) 2221{ 2222#ifndef __mips_n64 /* XXX : to be converted to new style */ 2223 int cpu; 2224 register_t intr; 2225 struct local_sysmaps *sysm; 2226#endif 2227 2228 if (MIPS_DIRECT_MAPPABLE(pa)) { 2229 /* nothing to do for this case */ 2230 return; 2231 } 2232#ifndef __mips_n64 /* XXX : to be converted to new style */ 2233 cpu = PCPU_GET(cpuid); 2234 sysm = &sysmap_lmem[cpu]; 2235 if (sysm->valid1) { 2236 pt_entry_t *pte; 2237 2238 intr = intr_disable(); 2239 pte = pmap_pte(kernel_pmap, sysm->base); 2240 *pte = PTE_G; 2241 pmap_invalidate_page(kernel_pmap, sysm->base); 2242 intr_restore(intr); 2243 sysm->valid1 = 0; 2244 } 2245#endif 2246} 2247 2248/* 2249 * Moved the code to Machine Independent 2250 * vm_map_pmap_enter() 2251 */ 2252 2253/* 2254 * Maps a sequence of resident pages belonging to the same object. 2255 * The sequence begins with the given page m_start. This page is 2256 * mapped at the given virtual address start. Each subsequent page is 2257 * mapped at a virtual address that is offset from start by the same 2258 * amount as the page is offset from m_start within the object. The 2259 * last page in the sequence is the page with the largest offset from 2260 * m_start that can be mapped at a virtual address less than the given 2261 * virtual address end. Not every virtual page between start and end 2262 * is mapped; only those for which a resident page exists with the 2263 * corresponding offset from m_start are mapped. 2264 */ 2265void 2266pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2267 vm_page_t m_start, vm_prot_t prot) 2268{ 2269 vm_page_t m, mpte; 2270 vm_pindex_t diff, psize; 2271 2272 VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); 2273 psize = atop(end - start); 2274 mpte = NULL; 2275 m = m_start; 2276 vm_page_lock_queues(); 2277 PMAP_LOCK(pmap); 2278 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2279 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, 2280 prot, mpte); 2281 m = TAILQ_NEXT(m, listq); 2282 } 2283 vm_page_unlock_queues(); 2284 PMAP_UNLOCK(pmap); 2285} 2286 2287/* 2288 * pmap_object_init_pt preloads the ptes for a given object 2289 * into the specified pmap. This eliminates the blast of soft 2290 * faults on process startup and immediately after an mmap. 2291 */ 2292void 2293pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2294 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2295{ 2296 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2297 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2298 ("pmap_object_init_pt: non-device object")); 2299} 2300 2301/* 2302 * Routine: pmap_change_wiring 2303 * Function: Change the wiring attribute for a map/virtual-address 2304 * pair. 2305 * In/out conditions: 2306 * The mapping must already exist in the pmap. 2307 */ 2308void 2309pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 2310{ 2311 pt_entry_t *pte; 2312 2313 if (pmap == NULL) 2314 return; 2315 2316 PMAP_LOCK(pmap); 2317 pte = pmap_pte(pmap, va); 2318 2319 if (wired && !pte_test(pte, PTE_W)) 2320 pmap->pm_stats.wired_count++; 2321 else if (!wired && pte_test(pte, PTE_W)) 2322 pmap->pm_stats.wired_count--; 2323 2324 /* 2325 * Wiring is not a hardware characteristic so there is no need to 2326 * invalidate TLB. 2327 */ 2328 if (wired) 2329 pte_set(pte, PTE_W); 2330 else 2331 pte_clear(pte, PTE_W); 2332 PMAP_UNLOCK(pmap); 2333} 2334 2335/* 2336 * Copy the range specified by src_addr/len 2337 * from the source map to the range dst_addr/len 2338 * in the destination map. 2339 * 2340 * This routine is only advisory and need not do anything. 2341 */ 2342 2343void 2344pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2345 vm_size_t len, vm_offset_t src_addr) 2346{ 2347} 2348 2349/* 2350 * pmap_zero_page zeros the specified hardware page by mapping 2351 * the page into KVM and using bzero to clear its contents. 2352 * 2353 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2354 */ 2355void 2356pmap_zero_page(vm_page_t m) 2357{ 2358 vm_offset_t va; 2359 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2360 2361 if (MIPS_DIRECT_MAPPABLE(phys)) { 2362 va = MIPS_PHYS_TO_DIRECT(phys); 2363 bzero((caddr_t)va, PAGE_SIZE); 2364 mips_dcache_wbinv_range(va, PAGE_SIZE); 2365 } else { 2366 va = pmap_lmem_map1(phys); 2367 bzero((caddr_t)va, PAGE_SIZE); 2368 mips_dcache_wbinv_range(va, PAGE_SIZE); 2369 pmap_lmem_unmap(); 2370 } 2371} 2372 2373/* 2374 * pmap_zero_page_area zeros the specified hardware page by mapping 2375 * the page into KVM and using bzero to clear its contents. 2376 * 2377 * off and size may not cover an area beyond a single hardware page. 2378 */ 2379void 2380pmap_zero_page_area(vm_page_t m, int off, int size) 2381{ 2382 vm_offset_t va; 2383 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2384 2385 if (MIPS_DIRECT_MAPPABLE(phys)) { 2386 va = MIPS_PHYS_TO_DIRECT(phys); 2387 bzero((char *)(caddr_t)va + off, size); 2388 mips_dcache_wbinv_range(va + off, size); 2389 } else { 2390 va = pmap_lmem_map1(phys); 2391 bzero((char *)va + off, size); 2392 mips_dcache_wbinv_range(va + off, size); 2393 pmap_lmem_unmap(); 2394 } 2395} 2396 2397void 2398pmap_zero_page_idle(vm_page_t m) 2399{ 2400 vm_offset_t va; 2401 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2402 2403 if (MIPS_DIRECT_MAPPABLE(phys)) { 2404 va = MIPS_PHYS_TO_DIRECT(phys); 2405 bzero((caddr_t)va, PAGE_SIZE); 2406 mips_dcache_wbinv_range(va, PAGE_SIZE); 2407 } else { 2408 va = pmap_lmem_map1(phys); 2409 bzero((caddr_t)va, PAGE_SIZE); 2410 mips_dcache_wbinv_range(va, PAGE_SIZE); 2411 pmap_lmem_unmap(); 2412 } 2413} 2414 2415/* 2416 * pmap_copy_page copies the specified (machine independent) 2417 * page by mapping the page into virtual memory and using 2418 * bcopy to copy the page, one machine dependent page at a 2419 * time. 2420 * 2421 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2422 */ 2423void 2424pmap_copy_page(vm_page_t src, vm_page_t dst) 2425{ 2426 vm_offset_t va_src, va_dst; 2427 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src); 2428 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst); 2429 2430 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) { 2431 /* easy case, all can be accessed via KSEG0 */ 2432 /* 2433 * Flush all caches for VA that are mapped to this page 2434 * to make sure that data in SDRAM is up to date 2435 */ 2436 pmap_flush_pvcache(src); 2437 mips_dcache_wbinv_range_index( 2438 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE); 2439 va_src = MIPS_PHYS_TO_DIRECT(phys_src); 2440 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst); 2441 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE); 2442 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2443 } else { 2444 va_src = pmap_lmem_map2(phys_src, phys_dst); 2445 va_dst = va_src + PAGE_SIZE; 2446 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE); 2447 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2448 pmap_lmem_unmap(); 2449 } 2450} 2451 2452/* 2453 * Returns true if the pmap's pv is one of the first 2454 * 16 pvs linked to from this page. This count may 2455 * be changed upwards or downwards in the future; it 2456 * is only necessary that true be returned for a small 2457 * subset of pmaps for proper page aging. 2458 */ 2459boolean_t 2460pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 2461{ 2462 pv_entry_t pv; 2463 int loops = 0; 2464 boolean_t rv; 2465 2466 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2467 ("pmap_page_exists_quick: page %p is not managed", m)); 2468 rv = FALSE; 2469 vm_page_lock_queues(); 2470 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2471 if (pv->pv_pmap == pmap) { 2472 rv = TRUE; 2473 break; 2474 } 2475 loops++; 2476 if (loops >= 16) 2477 break; 2478 } 2479 vm_page_unlock_queues(); 2480 return (rv); 2481} 2482 2483/* 2484 * Remove all pages from specified address space 2485 * this aids process exit speeds. Also, this code 2486 * is special cased for current process only, but 2487 * can have the more generic (and slightly slower) 2488 * mode enabled. This is much faster than pmap_remove 2489 * in the case of running down an entire address space. 2490 */ 2491void 2492pmap_remove_pages(pmap_t pmap) 2493{ 2494 pt_entry_t *pte, tpte; 2495 pv_entry_t pv, npv; 2496 vm_page_t m; 2497 2498 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 2499 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2500 return; 2501 } 2502 vm_page_lock_queues(); 2503 PMAP_LOCK(pmap); 2504 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv != NULL; pv = npv) { 2505 2506 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2507 if (!pte_test(pte, PTE_V)) 2508 panic("pmap_remove_pages: page on pm_pvlist has no pte"); 2509 tpte = *pte; 2510 2511/* 2512 * We cannot remove wired pages from a process' mapping at this time 2513 */ 2514 if (pte_test(&tpte, PTE_W)) { 2515 npv = TAILQ_NEXT(pv, pv_plist); 2516 continue; 2517 } 2518 *pte = is_kernel_pmap(pmap) ? PTE_G : 0; 2519 2520 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte)); 2521 KASSERT(m != NULL, 2522 ("pmap_remove_pages: bad tpte %x", tpte)); 2523 2524 pv->pv_pmap->pm_stats.resident_count--; 2525 2526 /* 2527 * Update the vm_page_t clean and reference bits. 2528 */ 2529 if (pte_test(&tpte, PTE_D)) { 2530 vm_page_dirty(m); 2531 } 2532 npv = TAILQ_NEXT(pv, pv_plist); 2533 TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); 2534 2535 m->md.pv_list_count--; 2536 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2537 if (TAILQ_FIRST(&m->md.pv_list) == NULL) { 2538 vm_page_flag_clear(m, PG_WRITEABLE); 2539 } 2540 pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); 2541 free_pv_entry(pv); 2542 } 2543 pmap_invalidate_all(pmap); 2544 PMAP_UNLOCK(pmap); 2545 vm_page_unlock_queues(); 2546} 2547 2548/* 2549 * pmap_testbit tests bits in pte's 2550 * note that the testbit/changebit routines are inline, 2551 * and a lot of things compile-time evaluate. 2552 */ 2553static boolean_t 2554pmap_testbit(vm_page_t m, int bit) 2555{ 2556 pv_entry_t pv; 2557 pt_entry_t *pte; 2558 boolean_t rv = FALSE; 2559 2560 if (m->flags & PG_FICTITIOUS) 2561 return (rv); 2562 2563 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 2564 return (rv); 2565 2566 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2567 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2568 PMAP_LOCK(pv->pv_pmap); 2569 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2570 rv = pte_test(pte, bit); 2571 PMAP_UNLOCK(pv->pv_pmap); 2572 if (rv) 2573 break; 2574 } 2575 return (rv); 2576} 2577 2578/* 2579 * this routine is used to clear dirty bits in ptes 2580 */ 2581static __inline void 2582pmap_changebit(vm_page_t m, int bit, boolean_t setem) 2583{ 2584 pv_entry_t pv; 2585 pt_entry_t *pte; 2586 2587 if (m->flags & PG_FICTITIOUS) 2588 return; 2589 2590 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2591 /* 2592 * Loop over all current mappings setting/clearing as appropos If 2593 * setting RO do we need to clear the VAC? 2594 */ 2595 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2596 PMAP_LOCK(pv->pv_pmap); 2597 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2598 if (setem) { 2599 *pte |= bit; 2600 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte); 2601 } else { 2602 pt_entry_t pbits = *pte; 2603 2604 if (pbits & bit) { 2605 if (bit == PTE_D) { 2606 if (pbits & PTE_D) 2607 vm_page_dirty(m); 2608 *pte = (pbits & ~PTE_D) | PTE_RO; 2609 } else { 2610 *pte = pbits & ~bit; 2611 } 2612 pmap_update_page(pv->pv_pmap, pv->pv_va, *pte); 2613 } 2614 } 2615 PMAP_UNLOCK(pv->pv_pmap); 2616 } 2617 if (!setem && bit == PTE_D) 2618 vm_page_flag_clear(m, PG_WRITEABLE); 2619} 2620 2621/* 2622 * pmap_page_wired_mappings: 2623 * 2624 * Return the number of managed mappings to the given physical page 2625 * that are wired. 2626 */ 2627int 2628pmap_page_wired_mappings(vm_page_t m) 2629{ 2630 pv_entry_t pv; 2631 pmap_t pmap; 2632 pt_entry_t *pte; 2633 int count; 2634 2635 count = 0; 2636 if ((m->flags & PG_FICTITIOUS) != 0) 2637 return (count); 2638 vm_page_lock_queues(); 2639 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2640 pmap = pv->pv_pmap; 2641 PMAP_LOCK(pmap); 2642 pte = pmap_pte(pmap, pv->pv_va); 2643 if (pte_test(pte, PTE_W)) 2644 count++; 2645 PMAP_UNLOCK(pmap); 2646 } 2647 vm_page_unlock_queues(); 2648 return (count); 2649} 2650 2651/* 2652 * Clear the write and modified bits in each of the given page's mappings. 2653 */ 2654void 2655pmap_remove_write(vm_page_t m) 2656{ 2657 pv_entry_t pv, npv; 2658 vm_offset_t va; 2659 pt_entry_t *pte; 2660 2661 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2662 ("pmap_remove_write: page %p is not managed", m)); 2663 2664 /* 2665 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by 2666 * another thread while the object is locked. Thus, if PG_WRITEABLE 2667 * is clear, no page table entries need updating. 2668 */ 2669 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2670 if ((m->oflags & VPO_BUSY) == 0 && 2671 (m->flags & PG_WRITEABLE) == 0) 2672 return; 2673 2674 /* 2675 * Loop over all current mappings setting/clearing as appropos. 2676 */ 2677 vm_page_lock_queues(); 2678 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = npv) { 2679 npv = TAILQ_NEXT(pv, pv_plist); 2680 pte = pmap_pte(pv->pv_pmap, pv->pv_va); 2681 if (pte == NULL || !pte_test(pte, PTE_V)) 2682 panic("page on pm_pvlist has no pte"); 2683 2684 va = pv->pv_va; 2685 pmap_protect(pv->pv_pmap, va, va + PAGE_SIZE, 2686 VM_PROT_READ | VM_PROT_EXECUTE); 2687 } 2688 vm_page_flag_clear(m, PG_WRITEABLE); 2689 vm_page_unlock_queues(); 2690} 2691 2692/* 2693 * pmap_ts_referenced: 2694 * 2695 * Return the count of reference bits for a page, clearing all of them. 2696 */ 2697int 2698pmap_ts_referenced(vm_page_t m) 2699{ 2700 2701 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2702 ("pmap_ts_referenced: page %p is not managed", m)); 2703 if (m->md.pv_flags & PV_TABLE_REF) { 2704 vm_page_lock_queues(); 2705 m->md.pv_flags &= ~PV_TABLE_REF; 2706 vm_page_unlock_queues(); 2707 return (1); 2708 } 2709 return (0); 2710} 2711 2712/* 2713 * pmap_is_modified: 2714 * 2715 * Return whether or not the specified physical page was modified 2716 * in any physical maps. 2717 */ 2718boolean_t 2719pmap_is_modified(vm_page_t m) 2720{ 2721 boolean_t rv; 2722 2723 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2724 ("pmap_is_modified: page %p is not managed", m)); 2725 2726 /* 2727 * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be 2728 * concurrently set while the object is locked. Thus, if PG_WRITEABLE 2729 * is clear, no PTEs can have PTE_D set. 2730 */ 2731 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2732 if ((m->oflags & VPO_BUSY) == 0 && 2733 (m->flags & PG_WRITEABLE) == 0) 2734 return (FALSE); 2735 vm_page_lock_queues(); 2736 if (m->md.pv_flags & PV_TABLE_MOD) 2737 rv = TRUE; 2738 else 2739 rv = pmap_testbit(m, PTE_D); 2740 vm_page_unlock_queues(); 2741 return (rv); 2742} 2743 2744/* N/C */ 2745 2746/* 2747 * pmap_is_prefaultable: 2748 * 2749 * Return whether or not the specified virtual address is elgible 2750 * for prefault. 2751 */ 2752boolean_t 2753pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2754{ 2755 pd_entry_t *pde; 2756 pt_entry_t *pte; 2757 boolean_t rv; 2758 2759 rv = FALSE; 2760 PMAP_LOCK(pmap); 2761 pde = pmap_pde(pmap, addr); 2762 if (pde != NULL && *pde != 0) { 2763 pte = pmap_pde_to_pte(pde, addr); 2764 rv = (*pte == 0); 2765 } 2766 PMAP_UNLOCK(pmap); 2767 return (rv); 2768} 2769 2770/* 2771 * Clear the modify bits on the specified physical page. 2772 */ 2773void 2774pmap_clear_modify(vm_page_t m) 2775{ 2776 2777 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2778 ("pmap_clear_modify: page %p is not managed", m)); 2779 VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); 2780 KASSERT((m->oflags & VPO_BUSY) == 0, 2781 ("pmap_clear_modify: page %p is busy", m)); 2782 2783 /* 2784 * If the page is not PG_WRITEABLE, then no PTEs can have PTE_D set. 2785 * If the object containing the page is locked and the page is not 2786 * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. 2787 */ 2788 if ((m->flags & PG_WRITEABLE) == 0) 2789 return; 2790 vm_page_lock_queues(); 2791 if (m->md.pv_flags & PV_TABLE_MOD) { 2792 pmap_changebit(m, PTE_D, FALSE); 2793 m->md.pv_flags &= ~PV_TABLE_MOD; 2794 } 2795 vm_page_unlock_queues(); 2796} 2797 2798/* 2799 * pmap_is_referenced: 2800 * 2801 * Return whether or not the specified physical page was referenced 2802 * in any physical maps. 2803 */ 2804boolean_t 2805pmap_is_referenced(vm_page_t m) 2806{ 2807 2808 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2809 ("pmap_is_referenced: page %p is not managed", m)); 2810 return ((m->md.pv_flags & PV_TABLE_REF) != 0); 2811} 2812 2813/* 2814 * pmap_clear_reference: 2815 * 2816 * Clear the reference bit on the specified physical page. 2817 */ 2818void 2819pmap_clear_reference(vm_page_t m) 2820{ 2821 2822 KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, 2823 ("pmap_clear_reference: page %p is not managed", m)); 2824 vm_page_lock_queues(); 2825 if (m->md.pv_flags & PV_TABLE_REF) { 2826 m->md.pv_flags &= ~PV_TABLE_REF; 2827 } 2828 vm_page_unlock_queues(); 2829} 2830 2831/* 2832 * Miscellaneous support routines follow 2833 */ 2834 2835/* 2836 * Map a set of physical memory pages into the kernel virtual 2837 * address space. Return a pointer to where it is mapped. This 2838 * routine is intended to be used for mapping device memory, 2839 * NOT real memory. 2840 */ 2841 2842/* 2843 * Map a set of physical memory pages into the kernel virtual 2844 * address space. Return a pointer to where it is mapped. This 2845 * routine is intended to be used for mapping device memory, 2846 * NOT real memory. 2847 * 2848 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit. 2849 */ 2850void * 2851pmap_mapdev(vm_offset_t pa, vm_size_t size) 2852{ 2853 vm_offset_t va, tmpva, offset; 2854 2855 /* 2856 * KSEG1 maps only first 512M of phys address space. For 2857 * pa > 0x20000000 we should make proper mapping * using pmap_kenter. 2858 */ 2859 if (MIPS_DIRECT_MAPPABLE(pa + size - 1)) 2860 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); 2861 else { 2862 offset = pa & PAGE_MASK; 2863 size = roundup(size + offset, PAGE_SIZE); 2864 2865 va = kmem_alloc_nofault(kernel_map, size); 2866 if (!va) 2867 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2868 pa = trunc_page(pa); 2869 for (tmpva = va; size > 0;) { 2870 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED); 2871 size -= PAGE_SIZE; 2872 tmpva += PAGE_SIZE; 2873 pa += PAGE_SIZE; 2874 } 2875 } 2876 2877 return ((void *)(va + offset)); 2878} 2879 2880void 2881pmap_unmapdev(vm_offset_t va, vm_size_t size) 2882{ 2883#ifndef __mips_n64 2884 vm_offset_t base, offset, tmpva; 2885 2886 /* If the address is within KSEG1 then there is nothing to do */ 2887 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END) 2888 return; 2889 2890 base = trunc_page(va); 2891 offset = va & PAGE_MASK; 2892 size = roundup(size + offset, PAGE_SIZE); 2893 for (tmpva = base; tmpva < base + size; tmpva += PAGE_SIZE) 2894 pmap_kremove(tmpva); 2895 kmem_free(kernel_map, base, size); 2896#endif 2897} 2898 2899/* 2900 * perform the pmap work for mincore 2901 */ 2902int 2903pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 2904{ 2905 pt_entry_t *ptep, pte; 2906 vm_offset_t pa; 2907 vm_page_t m; 2908 int val; 2909 boolean_t managed; 2910 2911 PMAP_LOCK(pmap); 2912retry: 2913 ptep = pmap_pte(pmap, addr); 2914 pte = (ptep != NULL) ? *ptep : 0; 2915 if (!pte_test(&pte, PTE_V)) { 2916 val = 0; 2917 goto out; 2918 } 2919 val = MINCORE_INCORE; 2920 if (pte_test(&pte, PTE_D)) 2921 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 2922 pa = TLBLO_PTE_TO_PA(pte); 2923 managed = page_is_managed(pa); 2924 if (managed) { 2925 /* 2926 * This may falsely report the given address as 2927 * MINCORE_REFERENCED. Unfortunately, due to the lack of 2928 * per-PTE reference information, it is impossible to 2929 * determine if the address is MINCORE_REFERENCED. 2930 */ 2931 m = PHYS_TO_VM_PAGE(pa); 2932 if ((m->flags & PG_REFERENCED) != 0) 2933 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 2934 } 2935 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 2936 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { 2937 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 2938 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 2939 goto retry; 2940 } else 2941out: 2942 PA_UNLOCK_COND(*locked_pa); 2943 PMAP_UNLOCK(pmap); 2944 return (val); 2945} 2946 2947void 2948pmap_activate(struct thread *td) 2949{ 2950 pmap_t pmap, oldpmap; 2951 struct proc *p = td->td_proc; 2952 2953 critical_enter(); 2954 2955 pmap = vmspace_pmap(p->p_vmspace); 2956 oldpmap = PCPU_GET(curpmap); 2957 2958 if (oldpmap) 2959 atomic_clear_32(&oldpmap->pm_active, PCPU_GET(cpumask)); 2960 atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask)); 2961 pmap_asid_alloc(pmap); 2962 if (td == curthread) { 2963 PCPU_SET(segbase, pmap->pm_segtab); 2964 mips_wr_entryhi(pmap->pm_asid[PCPU_GET(cpuid)].asid); 2965 } 2966 2967 PCPU_SET(curpmap, pmap); 2968 critical_exit(); 2969} 2970 2971void 2972pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 2973{ 2974} 2975 2976/* 2977 * Increase the starting virtual address of the given mapping if a 2978 * different alignment might result in more superpage mappings. 2979 */ 2980void 2981pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 2982 vm_offset_t *addr, vm_size_t size) 2983{ 2984 vm_offset_t superpage_offset; 2985 2986 if (size < NBSEG) 2987 return; 2988 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 2989 offset += ptoa(object->pg_color); 2990 superpage_offset = offset & SEGMASK; 2991 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG || 2992 (*addr & SEGMASK) == superpage_offset) 2993 return; 2994 if ((*addr & SEGMASK) < superpage_offset) 2995 *addr = (*addr & ~SEGMASK) + superpage_offset; 2996 else 2997 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset; 2998} 2999 3000/* 3001 * Increase the starting virtual address of the given mapping so 3002 * that it is aligned to not be the second page in a TLB entry. 3003 * This routine assumes that the length is appropriately-sized so 3004 * that the allocation does not share a TLB entry at all if required. 3005 */ 3006void 3007pmap_align_tlb(vm_offset_t *addr) 3008{ 3009 if ((*addr & PAGE_SIZE) == 0) 3010 return; 3011 *addr += PAGE_SIZE; 3012 return; 3013} 3014 3015#ifdef DDB 3016DB_SHOW_COMMAND(ptable, ddb_pid_dump) 3017{ 3018 pmap_t pmap; 3019 struct thread *td = NULL; 3020 struct proc *p; 3021 int i, j, k; 3022 vm_paddr_t pa; 3023 vm_offset_t va; 3024 3025 if (have_addr) { 3026 td = db_lookup_thread(addr, TRUE); 3027 if (td == NULL) { 3028 db_printf("Invalid pid or tid"); 3029 return; 3030 } 3031 p = td->td_proc; 3032 if (p->p_vmspace == NULL) { 3033 db_printf("No vmspace for process"); 3034 return; 3035 } 3036 pmap = vmspace_pmap(p->p_vmspace); 3037 } else 3038 pmap = kernel_pmap; 3039 3040 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n", 3041 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid, 3042 pmap->pm_asid[0].gen); 3043 for (i = 0; i < NPDEPG; i++) { 3044 pd_entry_t *pdpe; 3045 pt_entry_t *pde; 3046 pt_entry_t pte; 3047 3048 pdpe = (pd_entry_t *)pmap->pm_segtab[i]; 3049 if (pdpe == NULL) 3050 continue; 3051 db_printf("[%4d] %p\n", i, pdpe); 3052#ifdef __mips_n64 3053 for (j = 0; j < NPDEPG; j++) { 3054 pde = (pt_entry_t *)pdpe[j]; 3055 if (pde == NULL) 3056 continue; 3057 db_printf("\t[%4d] %p\n", j, pde); 3058#else 3059 { 3060 j = 0; 3061 pde = (pt_entry_t *)pdpe; 3062#endif 3063 for (k = 0; k < NPTEPG; k++) { 3064 pte = pde[k]; 3065 if (pte == 0 || !pte_test(&pte, PTE_V)) 3066 continue; 3067 pa = TLBLO_PTE_TO_PA(pte); 3068 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT); 3069 db_printf("\t\t[%04d] va: %p pte: %8x pa:%lx\n", 3070 k, (void *)va, pte, (u_long)pa); 3071 } 3072 } 3073 } 3074} 3075#endif 3076 3077#if defined(DEBUG) 3078 3079static void pads(pmap_t pm); 3080void pmap_pvdump(vm_offset_t pa); 3081 3082/* print address space of pmap*/ 3083static void 3084pads(pmap_t pm) 3085{ 3086 unsigned va, i, j; 3087 pt_entry_t *ptep; 3088 3089 if (pm == kernel_pmap) 3090 return; 3091 for (i = 0; i < NPTEPG; i++) 3092 if (pm->pm_segtab[i]) 3093 for (j = 0; j < NPTEPG; j++) { 3094 va = (i << SEGSHIFT) + (j << PAGE_SHIFT); 3095 if (pm == kernel_pmap && va < KERNBASE) 3096 continue; 3097 if (pm != kernel_pmap && 3098 va >= VM_MAXUSER_ADDRESS) 3099 continue; 3100 ptep = pmap_pte(pm, va); 3101 if (pmap_pte_v(ptep)) 3102 printf("%x:%x ", va, *(int *)ptep); 3103 } 3104 3105} 3106 3107void 3108pmap_pvdump(vm_offset_t pa) 3109{ 3110 register pv_entry_t pv; 3111 vm_page_t m; 3112 3113 printf("pa %x", pa); 3114 m = PHYS_TO_VM_PAGE(pa); 3115 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3116 pv = TAILQ_NEXT(pv, pv_list)) { 3117 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); 3118 pads(pv->pv_pmap); 3119 } 3120 printf(" "); 3121} 3122 3123/* N/C */ 3124#endif 3125 3126 3127/* 3128 * Allocate TLB address space tag (called ASID or TLBPID) and return it. 3129 * It takes almost as much or more time to search the TLB for a 3130 * specific ASID and flush those entries as it does to flush the entire TLB. 3131 * Therefore, when we allocate a new ASID, we just take the next number. When 3132 * we run out of numbers, we flush the TLB, increment the generation count 3133 * and start over. ASID zero is reserved for kernel use. 3134 */ 3135static void 3136pmap_asid_alloc(pmap) 3137 pmap_t pmap; 3138{ 3139 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED && 3140 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation)); 3141 else { 3142 if (PCPU_GET(next_asid) == pmap_max_asid) { 3143 tlb_invalidate_all_user(NULL); 3144 PCPU_SET(asid_generation, 3145 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK); 3146 if (PCPU_GET(asid_generation) == 0) { 3147 PCPU_SET(asid_generation, 1); 3148 } 3149 PCPU_SET(next_asid, 1); /* 0 means invalid */ 3150 } 3151 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid); 3152 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation); 3153 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1); 3154 } 3155} 3156 3157int 3158page_is_managed(vm_offset_t pa) 3159{ 3160 vm_offset_t pgnum = mips_btop(pa); 3161 3162 if (pgnum >= first_page) { 3163 vm_page_t m; 3164 3165 m = PHYS_TO_VM_PAGE(pa); 3166 if (m == NULL) 3167 return (0); 3168 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) 3169 return (1); 3170 } 3171 return (0); 3172} 3173 3174static int 3175init_pte_prot(vm_offset_t va, vm_page_t m, vm_prot_t prot) 3176{ 3177 int rw; 3178 3179 if (!(prot & VM_PROT_WRITE)) 3180 rw = PTE_V | PTE_RO | PTE_C_CACHE; 3181 else if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { 3182 if ((m->md.pv_flags & PV_TABLE_MOD) != 0) 3183 rw = PTE_V | PTE_D | PTE_C_CACHE; 3184 else 3185 rw = PTE_V | PTE_C_CACHE; 3186 vm_page_flag_set(m, PG_WRITEABLE); 3187 } else 3188 /* Needn't emulate a modified bit for unmanaged pages. */ 3189 rw = PTE_V | PTE_D | PTE_C_CACHE; 3190 return (rw); 3191} 3192 3193/* 3194 * pmap_emulate_modified : do dirty bit emulation 3195 * 3196 * On SMP, update just the local TLB, other CPUs will update their 3197 * TLBs from PTE lazily, if they get the exception. 3198 * Returns 0 in case of sucess, 1 if the page is read only and we 3199 * need to fault. 3200 */ 3201int 3202pmap_emulate_modified(pmap_t pmap, vm_offset_t va) 3203{ 3204 vm_page_t m; 3205 pt_entry_t *pte; 3206 vm_offset_t pa; 3207 3208 PMAP_LOCK(pmap); 3209 pte = pmap_pte(pmap, va); 3210 if (pte == NULL) 3211 panic("pmap_emulate_modified: can't find PTE"); 3212#ifdef SMP 3213 /* It is possible that some other CPU changed m-bit */ 3214 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) { 3215 pmap_update_page_local(pmap, va, *pte); 3216 PMAP_UNLOCK(pmap); 3217 return (0); 3218 } 3219#else 3220 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) 3221 panic("pmap_emulate_modified: invalid pte"); 3222#endif 3223 if (pte_test(pte, PTE_RO)) { 3224 /* write to read only page in the kernel */ 3225 PMAP_UNLOCK(pmap); 3226 return (1); 3227 } 3228 pte_set(pte, PTE_D); 3229 pmap_update_page_local(pmap, va, *pte); 3230 pa = TLBLO_PTE_TO_PA(*pte); 3231 if (!page_is_managed(pa)) 3232 panic("pmap_emulate_modified: unmanaged page"); 3233 m = PHYS_TO_VM_PAGE(pa); 3234 m->md.pv_flags |= (PV_TABLE_REF | PV_TABLE_MOD); 3235 PMAP_UNLOCK(pmap); 3236 return (0); 3237} 3238 3239/* 3240 * Routine: pmap_kextract 3241 * Function: 3242 * Extract the physical page address associated 3243 * virtual address. 3244 */ 3245 /* PMAP_INLINE */ vm_offset_t 3246pmap_kextract(vm_offset_t va) 3247{ 3248 int mapped; 3249 3250 /* 3251 * First, the direct-mapped regions. 3252 */ 3253#if defined(__mips_n64) 3254 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) 3255 return (MIPS_XKPHYS_TO_PHYS(va)); 3256#endif 3257 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END) 3258 return (MIPS_KSEG0_TO_PHYS(va)); 3259 3260 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END) 3261 return (MIPS_KSEG1_TO_PHYS(va)); 3262 3263 /* 3264 * User virtual addresses. 3265 */ 3266 if (va < VM_MAXUSER_ADDRESS) { 3267 pt_entry_t *ptep; 3268 3269 if (curproc && curproc->p_vmspace) { 3270 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va); 3271 if (ptep) { 3272 return (TLBLO_PTE_TO_PA(*ptep) | 3273 (va & PAGE_MASK)); 3274 } 3275 return (0); 3276 } 3277 } 3278 3279 /* 3280 * Should be kernel virtual here, otherwise fail 3281 */ 3282 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END); 3283#if defined(__mips_n64) 3284 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END); 3285#endif 3286 /* 3287 * Kernel virtual. 3288 */ 3289 3290 if (mapped) { 3291 pt_entry_t *ptep; 3292 3293 /* Is the kernel pmap initialized? */ 3294 if (kernel_pmap->pm_active) { 3295 /* It's inside the virtual address range */ 3296 ptep = pmap_pte(kernel_pmap, va); 3297 if (ptep) { 3298 return (TLBLO_PTE_TO_PA(*ptep) | 3299 (va & PAGE_MASK)); 3300 } 3301 } 3302 return (0); 3303 } 3304 3305 panic("%s for unknown address space %p.", __func__, (void *)va); 3306} 3307 3308 3309void 3310pmap_flush_pvcache(vm_page_t m) 3311{ 3312 pv_entry_t pv; 3313 3314 if (m != NULL) { 3315 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3316 pv = TAILQ_NEXT(pv, pv_list)) { 3317 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 3318 } 3319 } 3320} 3321