pmap.c revision 255028
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * the Systems Programming Group of the University of Utah Computer 11 * Science Department and William Jolitz of UUNET Technologies Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 38 * from: src/sys/i386/i386/pmap.c,v 1.250.2.8 2000/11/21 00:09:14 ps 39 * JNPR: pmap.c,v 1.11.2.1 2007/08/16 11:51:06 girish 40 */ 41 42/* 43 * Manages physical address maps. 44 * 45 * Since the information managed by this module is 46 * also stored by the logical address mapping module, 47 * this module may throw away valid virtual-to-physical 48 * mappings at almost any time. However, invalidations 49 * of virtual-to-physical mappings must be done as 50 * requested. 51 * 52 * In order to cope with hardware architectures which 53 * make virtual-to-physical map invalidates expensive, 54 * this module may delay invalidate or reduced protection 55 * operations until such time as they are actually 56 * necessary. This module is given full information as 57 * to which processors are currently using which maps, 58 * and to when physical maps must be made correct. 59 */ 60 61#include <sys/cdefs.h> 62__FBSDID("$FreeBSD: head/sys/mips/mips/pmap.c 255028 2013-08-29 15:49:05Z alc $"); 63 64#include "opt_ddb.h" 65#include "opt_pmap.h" 66 67#include <sys/param.h> 68#include <sys/systm.h> 69#include <sys/lock.h> 70#include <sys/mman.h> 71#include <sys/msgbuf.h> 72#include <sys/mutex.h> 73#include <sys/pcpu.h> 74#include <sys/proc.h> 75#include <sys/rwlock.h> 76#include <sys/sched.h> 77#ifdef SMP 78#include <sys/smp.h> 79#else 80#include <sys/cpuset.h> 81#endif 82#include <sys/sysctl.h> 83#include <sys/vmmeter.h> 84 85#ifdef DDB 86#include <ddb/ddb.h> 87#endif 88 89#include <vm/vm.h> 90#include <vm/vm_param.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_page.h> 93#include <vm/vm_map.h> 94#include <vm/vm_object.h> 95#include <vm/vm_extern.h> 96#include <vm/vm_pageout.h> 97#include <vm/vm_pager.h> 98#include <vm/uma.h> 99 100#include <machine/cache.h> 101#include <machine/md_var.h> 102#include <machine/tlb.h> 103 104#undef PMAP_DEBUG 105 106#if !defined(DIAGNOSTIC) 107#define PMAP_INLINE __inline 108#else 109#define PMAP_INLINE 110#endif 111 112#ifdef PV_STATS 113#define PV_STAT(x) do { x ; } while (0) 114#else 115#define PV_STAT(x) do { } while (0) 116#endif 117 118/* 119 * Get PDEs and PTEs for user/kernel address space 120 */ 121#define pmap_seg_index(v) (((v) >> SEGSHIFT) & (NPDEPG - 1)) 122#define pmap_pde_index(v) (((v) >> PDRSHIFT) & (NPDEPG - 1)) 123#define pmap_pte_index(v) (((v) >> PAGE_SHIFT) & (NPTEPG - 1)) 124#define pmap_pde_pindex(v) ((v) >> PDRSHIFT) 125 126#ifdef __mips_n64 127#define NUPDE (NPDEPG * NPDEPG) 128#define NUSERPGTBLS (NUPDE + NPDEPG) 129#else 130#define NUPDE (NPDEPG) 131#define NUSERPGTBLS (NUPDE) 132#endif 133 134#define is_kernel_pmap(x) ((x) == kernel_pmap) 135 136struct pmap kernel_pmap_store; 137pd_entry_t *kernel_segmap; 138 139vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 140vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 141 142static int nkpt; 143unsigned pmap_max_asid; /* max ASID supported by the system */ 144 145#define PMAP_ASID_RESERVED 0 146 147vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; 148 149static void pmap_asid_alloc(pmap_t pmap); 150 151static struct rwlock_padalign pvh_global_lock; 152 153/* 154 * Data for the pv entry allocation mechanism 155 */ 156static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks); 157static int pv_entry_count; 158 159static void free_pv_chunk(struct pv_chunk *pc); 160static void free_pv_entry(pmap_t pmap, pv_entry_t pv); 161static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); 162static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap); 163static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); 164static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, 165 vm_offset_t va); 166static vm_page_t pmap_alloc_direct_page(unsigned int index, int req); 167static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, 168 vm_page_t m, vm_prot_t prot, vm_page_t mpte); 169static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va, 170 pd_entry_t pde); 171static void pmap_remove_page(struct pmap *pmap, vm_offset_t va); 172static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va); 173static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, 174 vm_offset_t va, vm_page_t m); 175static void pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte); 176static void pmap_invalidate_all(pmap_t pmap); 177static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va); 178static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m); 179 180static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags); 181static vm_page_t _pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags); 182static int pmap_unuse_pt(pmap_t, vm_offset_t, pd_entry_t); 183static pt_entry_t init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot); 184 185static void pmap_invalidate_page_action(void *arg); 186static void pmap_invalidate_range_action(void *arg); 187static void pmap_update_page_action(void *arg); 188 189#ifndef __mips_n64 190/* 191 * This structure is for high memory (memory above 512Meg in 32 bit) support. 192 * The highmem area does not have a KSEG0 mapping, and we need a mechanism to 193 * do temporary per-CPU mappings for pmap_zero_page, pmap_copy_page etc. 194 * 195 * At bootup, we reserve 2 virtual pages per CPU for mapping highmem pages. To 196 * access a highmem physical address on a CPU, we map the physical address to 197 * the reserved virtual address for the CPU in the kernel pagetable. This is 198 * done with interrupts disabled(although a spinlock and sched_pin would be 199 * sufficient). 200 */ 201struct local_sysmaps { 202 vm_offset_t base; 203 uint32_t saved_intr; 204 uint16_t valid1, valid2; 205}; 206static struct local_sysmaps sysmap_lmem[MAXCPU]; 207 208static __inline void 209pmap_alloc_lmem_map(void) 210{ 211 int i; 212 213 for (i = 0; i < MAXCPU; i++) { 214 sysmap_lmem[i].base = virtual_avail; 215 virtual_avail += PAGE_SIZE * 2; 216 sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0; 217 } 218} 219 220static __inline vm_offset_t 221pmap_lmem_map1(vm_paddr_t phys) 222{ 223 struct local_sysmaps *sysm; 224 pt_entry_t *pte, npte; 225 vm_offset_t va; 226 uint32_t intr; 227 int cpu; 228 229 intr = intr_disable(); 230 cpu = PCPU_GET(cpuid); 231 sysm = &sysmap_lmem[cpu]; 232 sysm->saved_intr = intr; 233 va = sysm->base; 234 npte = TLBLO_PA_TO_PFN(phys) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G; 235 pte = pmap_pte(kernel_pmap, va); 236 *pte = npte; 237 sysm->valid1 = 1; 238 return (va); 239} 240 241static __inline vm_offset_t 242pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 243{ 244 struct local_sysmaps *sysm; 245 pt_entry_t *pte, npte; 246 vm_offset_t va1, va2; 247 uint32_t intr; 248 int cpu; 249 250 intr = intr_disable(); 251 cpu = PCPU_GET(cpuid); 252 sysm = &sysmap_lmem[cpu]; 253 sysm->saved_intr = intr; 254 va1 = sysm->base; 255 va2 = sysm->base + PAGE_SIZE; 256 npte = TLBLO_PA_TO_PFN(phys1) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G; 257 pte = pmap_pte(kernel_pmap, va1); 258 *pte = npte; 259 npte = TLBLO_PA_TO_PFN(phys2) | PTE_C_CACHE | PTE_D | PTE_V | PTE_G; 260 pte = pmap_pte(kernel_pmap, va2); 261 *pte = npte; 262 sysm->valid1 = 1; 263 sysm->valid2 = 1; 264 return (va1); 265} 266 267static __inline void 268pmap_lmem_unmap(void) 269{ 270 struct local_sysmaps *sysm; 271 pt_entry_t *pte; 272 int cpu; 273 274 cpu = PCPU_GET(cpuid); 275 sysm = &sysmap_lmem[cpu]; 276 pte = pmap_pte(kernel_pmap, sysm->base); 277 *pte = PTE_G; 278 tlb_invalidate_address(kernel_pmap, sysm->base); 279 sysm->valid1 = 0; 280 if (sysm->valid2) { 281 pte = pmap_pte(kernel_pmap, sysm->base + PAGE_SIZE); 282 *pte = PTE_G; 283 tlb_invalidate_address(kernel_pmap, sysm->base + PAGE_SIZE); 284 sysm->valid2 = 0; 285 } 286 intr_restore(sysm->saved_intr); 287} 288#else /* __mips_n64 */ 289 290static __inline void 291pmap_alloc_lmem_map(void) 292{ 293} 294 295static __inline vm_offset_t 296pmap_lmem_map1(vm_paddr_t phys) 297{ 298 299 return (0); 300} 301 302static __inline vm_offset_t 303pmap_lmem_map2(vm_paddr_t phys1, vm_paddr_t phys2) 304{ 305 306 return (0); 307} 308 309static __inline vm_offset_t 310pmap_lmem_unmap(void) 311{ 312 313 return (0); 314} 315#endif /* !__mips_n64 */ 316 317/* 318 * Page table entry lookup routines. 319 */ 320static __inline pd_entry_t * 321pmap_segmap(pmap_t pmap, vm_offset_t va) 322{ 323 324 return (&pmap->pm_segtab[pmap_seg_index(va)]); 325} 326 327#ifdef __mips_n64 328static __inline pd_entry_t * 329pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 330{ 331 pd_entry_t *pde; 332 333 pde = (pd_entry_t *)*pdpe; 334 return (&pde[pmap_pde_index(va)]); 335} 336 337static __inline pd_entry_t * 338pmap_pde(pmap_t pmap, vm_offset_t va) 339{ 340 pd_entry_t *pdpe; 341 342 pdpe = pmap_segmap(pmap, va); 343 if (*pdpe == NULL) 344 return (NULL); 345 346 return (pmap_pdpe_to_pde(pdpe, va)); 347} 348#else 349static __inline pd_entry_t * 350pmap_pdpe_to_pde(pd_entry_t *pdpe, vm_offset_t va) 351{ 352 353 return (pdpe); 354} 355 356static __inline 357pd_entry_t *pmap_pde(pmap_t pmap, vm_offset_t va) 358{ 359 360 return (pmap_segmap(pmap, va)); 361} 362#endif 363 364static __inline pt_entry_t * 365pmap_pde_to_pte(pd_entry_t *pde, vm_offset_t va) 366{ 367 pt_entry_t *pte; 368 369 pte = (pt_entry_t *)*pde; 370 return (&pte[pmap_pte_index(va)]); 371} 372 373pt_entry_t * 374pmap_pte(pmap_t pmap, vm_offset_t va) 375{ 376 pd_entry_t *pde; 377 378 pde = pmap_pde(pmap, va); 379 if (pde == NULL || *pde == NULL) 380 return (NULL); 381 382 return (pmap_pde_to_pte(pde, va)); 383} 384 385vm_offset_t 386pmap_steal_memory(vm_size_t size) 387{ 388 vm_paddr_t bank_size, pa; 389 vm_offset_t va; 390 391 size = round_page(size); 392 bank_size = phys_avail[1] - phys_avail[0]; 393 while (size > bank_size) { 394 int i; 395 396 for (i = 0; phys_avail[i + 2]; i += 2) { 397 phys_avail[i] = phys_avail[i + 2]; 398 phys_avail[i + 1] = phys_avail[i + 3]; 399 } 400 phys_avail[i] = 0; 401 phys_avail[i + 1] = 0; 402 if (!phys_avail[0]) 403 panic("pmap_steal_memory: out of memory"); 404 bank_size = phys_avail[1] - phys_avail[0]; 405 } 406 407 pa = phys_avail[0]; 408 phys_avail[0] += size; 409 if (MIPS_DIRECT_MAPPABLE(pa) == 0) 410 panic("Out of memory below 512Meg?"); 411 va = MIPS_PHYS_TO_DIRECT(pa); 412 bzero((caddr_t)va, size); 413 return (va); 414} 415 416/* 417 * Bootstrap the system enough to run with virtual memory. This 418 * assumes that the phys_avail array has been initialized. 419 */ 420static void 421pmap_create_kernel_pagetable(void) 422{ 423 int i, j; 424 vm_offset_t ptaddr; 425 pt_entry_t *pte; 426#ifdef __mips_n64 427 pd_entry_t *pde; 428 vm_offset_t pdaddr; 429 int npt, npde; 430#endif 431 432 /* 433 * Allocate segment table for the kernel 434 */ 435 kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE); 436 437 /* 438 * Allocate second level page tables for the kernel 439 */ 440#ifdef __mips_n64 441 npde = howmany(NKPT, NPDEPG); 442 pdaddr = pmap_steal_memory(PAGE_SIZE * npde); 443#endif 444 nkpt = NKPT; 445 ptaddr = pmap_steal_memory(PAGE_SIZE * nkpt); 446 447 /* 448 * The R[4-7]?00 stores only one copy of the Global bit in the 449 * translation lookaside buffer for each 2 page entry. Thus invalid 450 * entrys must have the Global bit set so when Entry LO and Entry HI 451 * G bits are anded together they will produce a global bit to store 452 * in the tlb. 453 */ 454 for (i = 0, pte = (pt_entry_t *)ptaddr; i < (nkpt * NPTEPG); i++, pte++) 455 *pte = PTE_G; 456 457#ifdef __mips_n64 458 for (i = 0, npt = nkpt; npt > 0; i++) { 459 kernel_segmap[i] = (pd_entry_t)(pdaddr + i * PAGE_SIZE); 460 pde = (pd_entry_t *)kernel_segmap[i]; 461 462 for (j = 0; j < NPDEPG && npt > 0; j++, npt--) 463 pde[j] = (pd_entry_t)(ptaddr + (i * NPDEPG + j) * PAGE_SIZE); 464 } 465#else 466 for (i = 0, j = pmap_seg_index(VM_MIN_KERNEL_ADDRESS); i < nkpt; i++, j++) 467 kernel_segmap[j] = (pd_entry_t)(ptaddr + (i * PAGE_SIZE)); 468#endif 469 470 PMAP_LOCK_INIT(kernel_pmap); 471 kernel_pmap->pm_segtab = kernel_segmap; 472 CPU_FILL(&kernel_pmap->pm_active); 473 TAILQ_INIT(&kernel_pmap->pm_pvchunk); 474 kernel_pmap->pm_asid[0].asid = PMAP_ASID_RESERVED; 475 kernel_pmap->pm_asid[0].gen = 0; 476 kernel_vm_end += nkpt * NPTEPG * PAGE_SIZE; 477} 478 479void 480pmap_bootstrap(void) 481{ 482 int i; 483 int need_local_mappings = 0; 484 485 /* Sort. */ 486again: 487 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 488 /* 489 * Keep the memory aligned on page boundary. 490 */ 491 phys_avail[i] = round_page(phys_avail[i]); 492 phys_avail[i + 1] = trunc_page(phys_avail[i + 1]); 493 494 if (i < 2) 495 continue; 496 if (phys_avail[i - 2] > phys_avail[i]) { 497 vm_paddr_t ptemp[2]; 498 499 ptemp[0] = phys_avail[i + 0]; 500 ptemp[1] = phys_avail[i + 1]; 501 502 phys_avail[i + 0] = phys_avail[i - 2]; 503 phys_avail[i + 1] = phys_avail[i - 1]; 504 505 phys_avail[i - 2] = ptemp[0]; 506 phys_avail[i - 1] = ptemp[1]; 507 goto again; 508 } 509 } 510 511 /* 512 * In 32 bit, we may have memory which cannot be mapped directly. 513 * This memory will need temporary mapping before it can be 514 * accessed. 515 */ 516 if (!MIPS_DIRECT_MAPPABLE(phys_avail[i - 1] - 1)) 517 need_local_mappings = 1; 518 519 /* 520 * Copy the phys_avail[] array before we start stealing memory from it. 521 */ 522 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 523 physmem_desc[i] = phys_avail[i]; 524 physmem_desc[i + 1] = phys_avail[i + 1]; 525 } 526 527 Maxmem = atop(phys_avail[i - 1]); 528 529 if (bootverbose) { 530 printf("Physical memory chunk(s):\n"); 531 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 532 vm_paddr_t size; 533 534 size = phys_avail[i + 1] - phys_avail[i]; 535 printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n", 536 (uintmax_t) phys_avail[i], 537 (uintmax_t) phys_avail[i + 1] - 1, 538 (uintmax_t) size, (uintmax_t) size / PAGE_SIZE); 539 } 540 printf("Maxmem is 0x%0jx\n", ptoa((uintmax_t)Maxmem)); 541 } 542 /* 543 * Steal the message buffer from the beginning of memory. 544 */ 545 msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize); 546 msgbufinit(msgbufp, msgbufsize); 547 548 /* 549 * Steal thread0 kstack. 550 */ 551 kstack0 = pmap_steal_memory(KSTACK_PAGES << PAGE_SHIFT); 552 553 virtual_avail = VM_MIN_KERNEL_ADDRESS; 554 virtual_end = VM_MAX_KERNEL_ADDRESS; 555 556#ifdef SMP 557 /* 558 * Steal some virtual address space to map the pcpu area. 559 */ 560 virtual_avail = roundup2(virtual_avail, PAGE_SIZE * 2); 561 pcpup = (struct pcpu *)virtual_avail; 562 virtual_avail += PAGE_SIZE * 2; 563 564 /* 565 * Initialize the wired TLB entry mapping the pcpu region for 566 * the BSP at 'pcpup'. Up until this point we were operating 567 * with the 'pcpup' for the BSP pointing to a virtual address 568 * in KSEG0 so there was no need for a TLB mapping. 569 */ 570 mips_pcpu_tlb_init(PCPU_ADDR(0)); 571 572 if (bootverbose) 573 printf("pcpu is available at virtual address %p.\n", pcpup); 574#endif 575 576 if (need_local_mappings) 577 pmap_alloc_lmem_map(); 578 pmap_create_kernel_pagetable(); 579 pmap_max_asid = VMNUM_PIDS; 580 mips_wr_entryhi(0); 581 mips_wr_pagemask(0); 582 583 /* 584 * Initialize the global pv list lock. 585 */ 586 rw_init(&pvh_global_lock, "pmap pv global"); 587} 588 589/* 590 * Initialize a vm_page's machine-dependent fields. 591 */ 592void 593pmap_page_init(vm_page_t m) 594{ 595 596 TAILQ_INIT(&m->md.pv_list); 597 m->md.pv_flags = 0; 598} 599 600/* 601 * Initialize the pmap module. 602 * Called by vm_init, to initialize any structures that the pmap 603 * system needs to map virtual memory. 604 */ 605void 606pmap_init(void) 607{ 608} 609 610/*************************************************** 611 * Low level helper routines..... 612 ***************************************************/ 613 614#ifdef SMP 615static __inline void 616pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 617{ 618 int cpuid, cpu, self; 619 cpuset_t active_cpus; 620 621 sched_pin(); 622 if (is_kernel_pmap(pmap)) { 623 smp_rendezvous(NULL, fn, NULL, arg); 624 goto out; 625 } 626 /* Force ASID update on inactive CPUs */ 627 CPU_FOREACH(cpu) { 628 if (!CPU_ISSET(cpu, &pmap->pm_active)) 629 pmap->pm_asid[cpu].gen = 0; 630 } 631 cpuid = PCPU_GET(cpuid); 632 /* 633 * XXX: barrier/locking for active? 634 * 635 * Take a snapshot of active here, any further changes are ignored. 636 * tlb update/invalidate should be harmless on inactive CPUs 637 */ 638 active_cpus = pmap->pm_active; 639 self = CPU_ISSET(cpuid, &active_cpus); 640 CPU_CLR(cpuid, &active_cpus); 641 /* Optimize for the case where this cpu is the only active one */ 642 if (CPU_EMPTY(&active_cpus)) { 643 if (self) 644 fn(arg); 645 } else { 646 if (self) 647 CPU_SET(cpuid, &active_cpus); 648 smp_rendezvous_cpus(active_cpus, NULL, fn, NULL, arg); 649 } 650out: 651 sched_unpin(); 652} 653#else /* !SMP */ 654static __inline void 655pmap_call_on_active_cpus(pmap_t pmap, void (*fn)(void *), void *arg) 656{ 657 int cpuid; 658 659 if (is_kernel_pmap(pmap)) { 660 fn(arg); 661 return; 662 } 663 cpuid = PCPU_GET(cpuid); 664 if (!CPU_ISSET(cpuid, &pmap->pm_active)) 665 pmap->pm_asid[cpuid].gen = 0; 666 else 667 fn(arg); 668} 669#endif /* SMP */ 670 671static void 672pmap_invalidate_all(pmap_t pmap) 673{ 674 675 pmap_call_on_active_cpus(pmap, 676 (void (*)(void *))tlb_invalidate_all_user, pmap); 677} 678 679struct pmap_invalidate_page_arg { 680 pmap_t pmap; 681 vm_offset_t va; 682}; 683 684static void 685pmap_invalidate_page_action(void *arg) 686{ 687 struct pmap_invalidate_page_arg *p = arg; 688 689 tlb_invalidate_address(p->pmap, p->va); 690} 691 692static void 693pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 694{ 695 struct pmap_invalidate_page_arg arg; 696 697 arg.pmap = pmap; 698 arg.va = va; 699 pmap_call_on_active_cpus(pmap, pmap_invalidate_page_action, &arg); 700} 701 702struct pmap_invalidate_range_arg { 703 pmap_t pmap; 704 vm_offset_t sva; 705 vm_offset_t eva; 706}; 707 708static void 709pmap_invalidate_range_action(void *arg) 710{ 711 struct pmap_invalidate_range_arg *p = arg; 712 713 tlb_invalidate_range(p->pmap, p->sva, p->eva); 714} 715 716static void 717pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 718{ 719 struct pmap_invalidate_range_arg arg; 720 721 arg.pmap = pmap; 722 arg.sva = sva; 723 arg.eva = eva; 724 pmap_call_on_active_cpus(pmap, pmap_invalidate_range_action, &arg); 725} 726 727struct pmap_update_page_arg { 728 pmap_t pmap; 729 vm_offset_t va; 730 pt_entry_t pte; 731}; 732 733static void 734pmap_update_page_action(void *arg) 735{ 736 struct pmap_update_page_arg *p = arg; 737 738 tlb_update(p->pmap, p->va, p->pte); 739} 740 741static void 742pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte) 743{ 744 struct pmap_update_page_arg arg; 745 746 arg.pmap = pmap; 747 arg.va = va; 748 arg.pte = pte; 749 pmap_call_on_active_cpus(pmap, pmap_update_page_action, &arg); 750} 751 752/* 753 * Routine: pmap_extract 754 * Function: 755 * Extract the physical page address associated 756 * with the given map/virtual_address pair. 757 */ 758vm_paddr_t 759pmap_extract(pmap_t pmap, vm_offset_t va) 760{ 761 pt_entry_t *pte; 762 vm_offset_t retval = 0; 763 764 PMAP_LOCK(pmap); 765 pte = pmap_pte(pmap, va); 766 if (pte) { 767 retval = TLBLO_PTE_TO_PA(*pte) | (va & PAGE_MASK); 768 } 769 PMAP_UNLOCK(pmap); 770 return (retval); 771} 772 773/* 774 * Routine: pmap_extract_and_hold 775 * Function: 776 * Atomically extract and hold the physical page 777 * with the given pmap and virtual address pair 778 * if that mapping permits the given protection. 779 */ 780vm_page_t 781pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 782{ 783 pt_entry_t pte, *ptep; 784 vm_paddr_t pa, pte_pa; 785 vm_page_t m; 786 787 m = NULL; 788 pa = 0; 789 PMAP_LOCK(pmap); 790retry: 791 ptep = pmap_pte(pmap, va); 792 if (ptep != NULL) { 793 pte = *ptep; 794 if (pte_test(&pte, PTE_V) && (!pte_test(&pte, PTE_RO) || 795 (prot & VM_PROT_WRITE) == 0)) { 796 pte_pa = TLBLO_PTE_TO_PA(pte); 797 if (vm_page_pa_tryrelock(pmap, pte_pa, &pa)) 798 goto retry; 799 m = PHYS_TO_VM_PAGE(pte_pa); 800 vm_page_hold(m); 801 } 802 } 803 PA_UNLOCK_COND(pa); 804 PMAP_UNLOCK(pmap); 805 return (m); 806} 807 808/*************************************************** 809 * Low level mapping routines..... 810 ***************************************************/ 811 812/* 813 * add a wired page to the kva 814 */ 815void 816pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr) 817{ 818 pt_entry_t *pte; 819 pt_entry_t opte, npte; 820 821#ifdef PMAP_DEBUG 822 printf("pmap_kenter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 823#endif 824 825 pte = pmap_pte(kernel_pmap, va); 826 opte = *pte; 827 npte = TLBLO_PA_TO_PFN(pa) | attr | PTE_D | PTE_V | PTE_G; 828 *pte = npte; 829 if (pte_test(&opte, PTE_V) && opte != npte) 830 pmap_update_page(kernel_pmap, va, npte); 831} 832 833void 834pmap_kenter(vm_offset_t va, vm_paddr_t pa) 835{ 836 837 KASSERT(is_cacheable_mem(pa), 838 ("pmap_kenter: memory at 0x%lx is not cacheable", (u_long)pa)); 839 840 pmap_kenter_attr(va, pa, PTE_C_CACHE); 841} 842 843/* 844 * remove a page from the kernel pagetables 845 */ 846 /* PMAP_INLINE */ void 847pmap_kremove(vm_offset_t va) 848{ 849 pt_entry_t *pte; 850 851 /* 852 * Write back all caches from the page being destroyed 853 */ 854 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 855 856 pte = pmap_pte(kernel_pmap, va); 857 *pte = PTE_G; 858 pmap_invalidate_page(kernel_pmap, va); 859} 860 861/* 862 * Used to map a range of physical addresses into kernel 863 * virtual address space. 864 * 865 * The value passed in '*virt' is a suggested virtual address for 866 * the mapping. Architectures which can support a direct-mapped 867 * physical to virtual region can return the appropriate address 868 * within that region, leaving '*virt' unchanged. Other 869 * architectures should map the pages starting at '*virt' and 870 * update '*virt' with the first usable address after the mapped 871 * region. 872 * 873 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 874 */ 875vm_offset_t 876pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot) 877{ 878 vm_offset_t va, sva; 879 880 if (MIPS_DIRECT_MAPPABLE(end - 1)) 881 return (MIPS_PHYS_TO_DIRECT(start)); 882 883 va = sva = *virt; 884 while (start < end) { 885 pmap_kenter(va, start); 886 va += PAGE_SIZE; 887 start += PAGE_SIZE; 888 } 889 *virt = va; 890 return (sva); 891} 892 893/* 894 * Add a list of wired pages to the kva 895 * this routine is only used for temporary 896 * kernel mappings that do not need to have 897 * page modification or references recorded. 898 * Note that old mappings are simply written 899 * over. The page *must* be wired. 900 */ 901void 902pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 903{ 904 int i; 905 vm_offset_t origva = va; 906 907 for (i = 0; i < count; i++) { 908 pmap_flush_pvcache(m[i]); 909 pmap_kenter(va, VM_PAGE_TO_PHYS(m[i])); 910 va += PAGE_SIZE; 911 } 912 913 mips_dcache_wbinv_range_index(origva, PAGE_SIZE*count); 914} 915 916/* 917 * this routine jerks page mappings from the 918 * kernel -- it is meant only for temporary mappings. 919 */ 920void 921pmap_qremove(vm_offset_t va, int count) 922{ 923 pt_entry_t *pte; 924 vm_offset_t origva; 925 926 if (count < 1) 927 return; 928 mips_dcache_wbinv_range_index(va, PAGE_SIZE * count); 929 origva = va; 930 do { 931 pte = pmap_pte(kernel_pmap, va); 932 *pte = PTE_G; 933 va += PAGE_SIZE; 934 } while (--count > 0); 935 pmap_invalidate_range(kernel_pmap, origva, va); 936} 937 938/*************************************************** 939 * Page table page management routines..... 940 ***************************************************/ 941 942/* 943 * Decrements a page table page's wire count, which is used to record the 944 * number of valid page table entries within the page. If the wire count 945 * drops to zero, then the page table page is unmapped. Returns TRUE if the 946 * page table page was unmapped and FALSE otherwise. 947 */ 948static PMAP_INLINE boolean_t 949pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m) 950{ 951 952 --m->wire_count; 953 if (m->wire_count == 0) { 954 _pmap_unwire_ptp(pmap, va, m); 955 return (TRUE); 956 } else 957 return (FALSE); 958} 959 960static void 961_pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m) 962{ 963 pd_entry_t *pde; 964 965 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 966 /* 967 * unmap the page table page 968 */ 969#ifdef __mips_n64 970 if (m->pindex < NUPDE) 971 pde = pmap_pde(pmap, va); 972 else 973 pde = pmap_segmap(pmap, va); 974#else 975 pde = pmap_pde(pmap, va); 976#endif 977 *pde = 0; 978 pmap->pm_stats.resident_count--; 979 980#ifdef __mips_n64 981 if (m->pindex < NUPDE) { 982 pd_entry_t *pdp; 983 vm_page_t pdpg; 984 985 /* 986 * Recursively decrement next level pagetable refcount 987 */ 988 pdp = (pd_entry_t *)*pmap_segmap(pmap, va); 989 pdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pdp)); 990 pmap_unwire_ptp(pmap, va, pdpg); 991 } 992#endif 993 994 /* 995 * If the page is finally unwired, simply free it. 996 */ 997 vm_page_free_zero(m); 998 atomic_subtract_int(&cnt.v_wire_count, 1); 999} 1000 1001/* 1002 * After removing a page table entry, this routine is used to 1003 * conditionally free the page, and manage the hold/wire counts. 1004 */ 1005static int 1006pmap_unuse_pt(pmap_t pmap, vm_offset_t va, pd_entry_t pde) 1007{ 1008 vm_page_t mpte; 1009 1010 if (va >= VM_MAXUSER_ADDRESS) 1011 return (0); 1012 KASSERT(pde != 0, ("pmap_unuse_pt: pde != 0")); 1013 mpte = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(pde)); 1014 return (pmap_unwire_ptp(pmap, va, mpte)); 1015} 1016 1017void 1018pmap_pinit0(pmap_t pmap) 1019{ 1020 int i; 1021 1022 PMAP_LOCK_INIT(pmap); 1023 pmap->pm_segtab = kernel_segmap; 1024 CPU_ZERO(&pmap->pm_active); 1025 for (i = 0; i < MAXCPU; i++) { 1026 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1027 pmap->pm_asid[i].gen = 0; 1028 } 1029 PCPU_SET(curpmap, pmap); 1030 TAILQ_INIT(&pmap->pm_pvchunk); 1031 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1032} 1033 1034void 1035pmap_grow_direct_page_cache() 1036{ 1037 1038#ifdef __mips_n64 1039 vm_pageout_grow_cache(3, 0, MIPS_XKPHYS_LARGEST_PHYS); 1040#else 1041 vm_pageout_grow_cache(3, 0, MIPS_KSEG0_LARGEST_PHYS); 1042#endif 1043} 1044 1045static vm_page_t 1046pmap_alloc_direct_page(unsigned int index, int req) 1047{ 1048 vm_page_t m; 1049 1050 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, req | VM_ALLOC_WIRED | 1051 VM_ALLOC_ZERO); 1052 if (m == NULL) 1053 return (NULL); 1054 1055 if ((m->flags & PG_ZERO) == 0) 1056 pmap_zero_page(m); 1057 1058 m->pindex = index; 1059 return (m); 1060} 1061 1062/* 1063 * Initialize a preallocated and zeroed pmap structure, 1064 * such as one in a vmspace structure. 1065 */ 1066int 1067pmap_pinit(pmap_t pmap) 1068{ 1069 vm_offset_t ptdva; 1070 vm_page_t ptdpg; 1071 int i; 1072 1073 /* 1074 * allocate the page directory page 1075 */ 1076 while ((ptdpg = pmap_alloc_direct_page(NUSERPGTBLS, VM_ALLOC_NORMAL)) == NULL) 1077 pmap_grow_direct_page_cache(); 1078 1079 ptdva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(ptdpg)); 1080 pmap->pm_segtab = (pd_entry_t *)ptdva; 1081 CPU_ZERO(&pmap->pm_active); 1082 for (i = 0; i < MAXCPU; i++) { 1083 pmap->pm_asid[i].asid = PMAP_ASID_RESERVED; 1084 pmap->pm_asid[i].gen = 0; 1085 } 1086 TAILQ_INIT(&pmap->pm_pvchunk); 1087 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 1088 1089 return (1); 1090} 1091 1092/* 1093 * this routine is called if the page table page is not 1094 * mapped correctly. 1095 */ 1096static vm_page_t 1097_pmap_allocpte(pmap_t pmap, unsigned ptepindex, int flags) 1098{ 1099 vm_offset_t pageva; 1100 vm_page_t m; 1101 1102 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1103 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1104 ("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1105 1106 /* 1107 * Find or fabricate a new pagetable page 1108 */ 1109 if ((m = pmap_alloc_direct_page(ptepindex, VM_ALLOC_NORMAL)) == NULL) { 1110 if (flags & M_WAITOK) { 1111 PMAP_UNLOCK(pmap); 1112 rw_wunlock(&pvh_global_lock); 1113 pmap_grow_direct_page_cache(); 1114 rw_wlock(&pvh_global_lock); 1115 PMAP_LOCK(pmap); 1116 } 1117 1118 /* 1119 * Indicate the need to retry. While waiting, the page 1120 * table page may have been allocated. 1121 */ 1122 return (NULL); 1123 } 1124 1125 /* 1126 * Map the pagetable page into the process address space, if it 1127 * isn't already there. 1128 */ 1129 pageva = MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1130 1131#ifdef __mips_n64 1132 if (ptepindex >= NUPDE) { 1133 pmap->pm_segtab[ptepindex - NUPDE] = (pd_entry_t)pageva; 1134 } else { 1135 pd_entry_t *pdep, *pde; 1136 int segindex = ptepindex >> (SEGSHIFT - PDRSHIFT); 1137 int pdeindex = ptepindex & (NPDEPG - 1); 1138 vm_page_t pg; 1139 1140 pdep = &pmap->pm_segtab[segindex]; 1141 if (*pdep == NULL) { 1142 /* recurse for allocating page dir */ 1143 if (_pmap_allocpte(pmap, NUPDE + segindex, 1144 flags) == NULL) { 1145 /* alloc failed, release current */ 1146 --m->wire_count; 1147 atomic_subtract_int(&cnt.v_wire_count, 1); 1148 vm_page_free_zero(m); 1149 return (NULL); 1150 } 1151 } else { 1152 pg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pdep)); 1153 pg->wire_count++; 1154 } 1155 /* Next level entry */ 1156 pde = (pd_entry_t *)*pdep; 1157 pde[pdeindex] = (pd_entry_t)pageva; 1158 } 1159#else 1160 pmap->pm_segtab[ptepindex] = (pd_entry_t)pageva; 1161#endif 1162 pmap->pm_stats.resident_count++; 1163 return (m); 1164} 1165 1166static vm_page_t 1167pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags) 1168{ 1169 unsigned ptepindex; 1170 pd_entry_t *pde; 1171 vm_page_t m; 1172 1173 KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT || 1174 (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK, 1175 ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK")); 1176 1177 /* 1178 * Calculate pagetable page index 1179 */ 1180 ptepindex = pmap_pde_pindex(va); 1181retry: 1182 /* 1183 * Get the page directory entry 1184 */ 1185 pde = pmap_pde(pmap, va); 1186 1187 /* 1188 * If the page table page is mapped, we just increment the hold 1189 * count, and activate it. 1190 */ 1191 if (pde != NULL && *pde != NULL) { 1192 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(*pde)); 1193 m->wire_count++; 1194 } else { 1195 /* 1196 * Here if the pte page isn't mapped, or if it has been 1197 * deallocated. 1198 */ 1199 m = _pmap_allocpte(pmap, ptepindex, flags); 1200 if (m == NULL && (flags & M_WAITOK)) 1201 goto retry; 1202 } 1203 return (m); 1204} 1205 1206 1207/*************************************************** 1208 * Pmap allocation/deallocation routines. 1209 ***************************************************/ 1210 1211/* 1212 * Release any resources held by the given physical map. 1213 * Called when a pmap initialized by pmap_pinit is being released. 1214 * Should only be called if the map contains no valid mappings. 1215 */ 1216void 1217pmap_release(pmap_t pmap) 1218{ 1219 vm_offset_t ptdva; 1220 vm_page_t ptdpg; 1221 1222 KASSERT(pmap->pm_stats.resident_count == 0, 1223 ("pmap_release: pmap resident count %ld != 0", 1224 pmap->pm_stats.resident_count)); 1225 1226 ptdva = (vm_offset_t)pmap->pm_segtab; 1227 ptdpg = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS(ptdva)); 1228 1229 ptdpg->wire_count--; 1230 atomic_subtract_int(&cnt.v_wire_count, 1); 1231 vm_page_free_zero(ptdpg); 1232} 1233 1234/* 1235 * grow the number of kernel page table entries, if needed 1236 */ 1237void 1238pmap_growkernel(vm_offset_t addr) 1239{ 1240 vm_page_t nkpg; 1241 pd_entry_t *pde, *pdpe; 1242 pt_entry_t *pte; 1243 int i; 1244 1245 mtx_assert(&kernel_map->system_mtx, MA_OWNED); 1246 addr = roundup2(addr, NBSEG); 1247 if (addr - 1 >= kernel_map->max_offset) 1248 addr = kernel_map->max_offset; 1249 while (kernel_vm_end < addr) { 1250 pdpe = pmap_segmap(kernel_pmap, kernel_vm_end); 1251#ifdef __mips_n64 1252 if (*pdpe == 0) { 1253 /* new intermediate page table entry */ 1254 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1255 if (nkpg == NULL) 1256 panic("pmap_growkernel: no memory to grow kernel"); 1257 *pdpe = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1258 continue; /* try again */ 1259 } 1260#endif 1261 pde = pmap_pdpe_to_pde(pdpe, kernel_vm_end); 1262 if (*pde != 0) { 1263 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1264 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1265 kernel_vm_end = kernel_map->max_offset; 1266 break; 1267 } 1268 continue; 1269 } 1270 1271 /* 1272 * This index is bogus, but out of the way 1273 */ 1274 nkpg = pmap_alloc_direct_page(nkpt, VM_ALLOC_INTERRUPT); 1275 if (!nkpg) 1276 panic("pmap_growkernel: no memory to grow kernel"); 1277 nkpt++; 1278 *pde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(nkpg)); 1279 1280 /* 1281 * The R[4-7]?00 stores only one copy of the Global bit in 1282 * the translation lookaside buffer for each 2 page entry. 1283 * Thus invalid entrys must have the Global bit set so when 1284 * Entry LO and Entry HI G bits are anded together they will 1285 * produce a global bit to store in the tlb. 1286 */ 1287 pte = (pt_entry_t *)*pde; 1288 for (i = 0; i < NPTEPG; i++) 1289 pte[i] = PTE_G; 1290 1291 kernel_vm_end = (kernel_vm_end + NBPDR) & ~PDRMASK; 1292 if (kernel_vm_end - 1 >= kernel_map->max_offset) { 1293 kernel_vm_end = kernel_map->max_offset; 1294 break; 1295 } 1296 } 1297} 1298 1299/*************************************************** 1300 * page management routines. 1301 ***************************************************/ 1302 1303CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); 1304#ifdef __mips_n64 1305CTASSERT(_NPCM == 3); 1306CTASSERT(_NPCPV == 168); 1307#else 1308CTASSERT(_NPCM == 11); 1309CTASSERT(_NPCPV == 336); 1310#endif 1311 1312static __inline struct pv_chunk * 1313pv_to_chunk(pv_entry_t pv) 1314{ 1315 1316 return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); 1317} 1318 1319#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) 1320 1321#ifdef __mips_n64 1322#define PC_FREE0_1 0xfffffffffffffffful 1323#define PC_FREE2 0x000000fffffffffful 1324#else 1325#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ 1326#define PC_FREE10 0x0000fffful /* Free values for index 10 */ 1327#endif 1328 1329static const u_long pc_freemask[_NPCM] = { 1330#ifdef __mips_n64 1331 PC_FREE0_1, PC_FREE0_1, PC_FREE2 1332#else 1333 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1334 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1335 PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, 1336 PC_FREE0_9, PC_FREE10 1337#endif 1338}; 1339 1340static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); 1341 1342SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, 1343 "Current number of pv entries"); 1344 1345#ifdef PV_STATS 1346static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; 1347 1348SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0, 1349 "Current number of pv entry chunks"); 1350SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0, 1351 "Current number of pv entry chunks allocated"); 1352SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0, 1353 "Current number of pv entry chunks frees"); 1354SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0, 1355 "Number of times tried to get a chunk page but failed."); 1356 1357static long pv_entry_frees, pv_entry_allocs; 1358static int pv_entry_spare; 1359 1360SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0, 1361 "Current number of pv entry frees"); 1362SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0, 1363 "Current number of pv entry allocs"); 1364SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0, 1365 "Current number of spare pv entries"); 1366#endif 1367 1368/* 1369 * We are in a serious low memory condition. Resort to 1370 * drastic measures to free some pages so we can allocate 1371 * another pv entry chunk. 1372 */ 1373static vm_page_t 1374pmap_pv_reclaim(pmap_t locked_pmap) 1375{ 1376 struct pch newtail; 1377 struct pv_chunk *pc; 1378 pd_entry_t *pde; 1379 pmap_t pmap; 1380 pt_entry_t *pte, oldpte; 1381 pv_entry_t pv; 1382 vm_offset_t va; 1383 vm_page_t m, m_pc; 1384 u_long inuse; 1385 int bit, field, freed, idx; 1386 1387 PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); 1388 pmap = NULL; 1389 m_pc = NULL; 1390 TAILQ_INIT(&newtail); 1391 while ((pc = TAILQ_FIRST(&pv_chunks)) != NULL) { 1392 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1393 if (pmap != pc->pc_pmap) { 1394 if (pmap != NULL) { 1395 pmap_invalidate_all(pmap); 1396 if (pmap != locked_pmap) 1397 PMAP_UNLOCK(pmap); 1398 } 1399 pmap = pc->pc_pmap; 1400 /* Avoid deadlock and lock recursion. */ 1401 if (pmap > locked_pmap) 1402 PMAP_LOCK(pmap); 1403 else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) { 1404 pmap = NULL; 1405 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 1406 continue; 1407 } 1408 } 1409 1410 /* 1411 * Destroy every non-wired, 4 KB page mapping in the chunk. 1412 */ 1413 freed = 0; 1414 for (field = 0; field < _NPCM; field++) { 1415 for (inuse = ~pc->pc_map[field] & pc_freemask[field]; 1416 inuse != 0; inuse &= ~(1UL << bit)) { 1417 bit = ffsl(inuse) - 1; 1418 idx = field * sizeof(inuse) * NBBY + bit; 1419 pv = &pc->pc_pventry[idx]; 1420 va = pv->pv_va; 1421 pde = pmap_pde(pmap, va); 1422 KASSERT(pde != NULL && *pde != 0, 1423 ("pmap_pv_reclaim: pde")); 1424 pte = pmap_pde_to_pte(pde, va); 1425 oldpte = *pte; 1426 if (pte_test(&oldpte, PTE_W)) 1427 continue; 1428 if (is_kernel_pmap(pmap)) 1429 *pte = PTE_G; 1430 else 1431 *pte = 0; 1432 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte)); 1433 if (pte_test(&oldpte, PTE_D)) 1434 vm_page_dirty(m); 1435 if (m->md.pv_flags & PV_TABLE_REF) 1436 vm_page_aflag_set(m, PGA_REFERENCED); 1437 m->md.pv_flags &= ~PV_TABLE_REF; 1438 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1439 if (TAILQ_EMPTY(&m->md.pv_list)) 1440 vm_page_aflag_clear(m, PGA_WRITEABLE); 1441 pc->pc_map[field] |= 1UL << bit; 1442 pmap_unuse_pt(pmap, va, *pde); 1443 freed++; 1444 } 1445 } 1446 if (freed == 0) { 1447 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 1448 continue; 1449 } 1450 /* Every freed mapping is for a 4 KB page. */ 1451 pmap->pm_stats.resident_count -= freed; 1452 PV_STAT(pv_entry_frees += freed); 1453 PV_STAT(pv_entry_spare += freed); 1454 pv_entry_count -= freed; 1455 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1456 for (field = 0; field < _NPCM; field++) 1457 if (pc->pc_map[field] != pc_freemask[field]) { 1458 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 1459 pc_list); 1460 TAILQ_INSERT_TAIL(&newtail, pc, pc_lru); 1461 1462 /* 1463 * One freed pv entry in locked_pmap is 1464 * sufficient. 1465 */ 1466 if (pmap == locked_pmap) 1467 goto out; 1468 break; 1469 } 1470 if (field == _NPCM) { 1471 PV_STAT(pv_entry_spare -= _NPCPV); 1472 PV_STAT(pc_chunk_count--); 1473 PV_STAT(pc_chunk_frees++); 1474 /* Entire chunk is free; return it. */ 1475 m_pc = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS( 1476 (vm_offset_t)pc)); 1477 break; 1478 } 1479 } 1480out: 1481 TAILQ_CONCAT(&pv_chunks, &newtail, pc_lru); 1482 if (pmap != NULL) { 1483 pmap_invalidate_all(pmap); 1484 if (pmap != locked_pmap) 1485 PMAP_UNLOCK(pmap); 1486 } 1487 return (m_pc); 1488} 1489 1490/* 1491 * free the pv_entry back to the free list 1492 */ 1493static void 1494free_pv_entry(pmap_t pmap, pv_entry_t pv) 1495{ 1496 struct pv_chunk *pc; 1497 int bit, field, idx; 1498 1499 rw_assert(&pvh_global_lock, RA_WLOCKED); 1500 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1501 PV_STAT(pv_entry_frees++); 1502 PV_STAT(pv_entry_spare++); 1503 pv_entry_count--; 1504 pc = pv_to_chunk(pv); 1505 idx = pv - &pc->pc_pventry[0]; 1506 field = idx / (sizeof(u_long) * NBBY); 1507 bit = idx % (sizeof(u_long) * NBBY); 1508 pc->pc_map[field] |= 1ul << bit; 1509 for (idx = 0; idx < _NPCM; idx++) 1510 if (pc->pc_map[idx] != pc_freemask[idx]) { 1511 /* 1512 * 98% of the time, pc is already at the head of the 1513 * list. If it isn't already, move it to the head. 1514 */ 1515 if (__predict_false(TAILQ_FIRST(&pmap->pm_pvchunk) != 1516 pc)) { 1517 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1518 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, 1519 pc_list); 1520 } 1521 return; 1522 } 1523 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1524 free_pv_chunk(pc); 1525} 1526 1527static void 1528free_pv_chunk(struct pv_chunk *pc) 1529{ 1530 vm_page_t m; 1531 1532 TAILQ_REMOVE(&pv_chunks, pc, pc_lru); 1533 PV_STAT(pv_entry_spare -= _NPCPV); 1534 PV_STAT(pc_chunk_count--); 1535 PV_STAT(pc_chunk_frees++); 1536 /* entire chunk is free, return it */ 1537 m = PHYS_TO_VM_PAGE(MIPS_DIRECT_TO_PHYS((vm_offset_t)pc)); 1538 vm_page_unwire(m, 0); 1539 vm_page_free(m); 1540} 1541 1542/* 1543 * get a new pv_entry, allocating a block from the system 1544 * when needed. 1545 */ 1546static pv_entry_t 1547get_pv_entry(pmap_t pmap, boolean_t try) 1548{ 1549 struct pv_chunk *pc; 1550 pv_entry_t pv; 1551 vm_page_t m; 1552 int bit, field, idx; 1553 1554 rw_assert(&pvh_global_lock, RA_WLOCKED); 1555 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1556 PV_STAT(pv_entry_allocs++); 1557 pv_entry_count++; 1558retry: 1559 pc = TAILQ_FIRST(&pmap->pm_pvchunk); 1560 if (pc != NULL) { 1561 for (field = 0; field < _NPCM; field++) { 1562 if (pc->pc_map[field]) { 1563 bit = ffsl(pc->pc_map[field]) - 1; 1564 break; 1565 } 1566 } 1567 if (field < _NPCM) { 1568 idx = field * sizeof(pc->pc_map[field]) * NBBY + bit; 1569 pv = &pc->pc_pventry[idx]; 1570 pc->pc_map[field] &= ~(1ul << bit); 1571 /* If this was the last item, move it to tail */ 1572 for (field = 0; field < _NPCM; field++) 1573 if (pc->pc_map[field] != 0) { 1574 PV_STAT(pv_entry_spare--); 1575 return (pv); /* not full, return */ 1576 } 1577 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 1578 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); 1579 PV_STAT(pv_entry_spare--); 1580 return (pv); 1581 } 1582 } 1583 /* No free items, allocate another chunk */ 1584 m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, VM_ALLOC_NORMAL | 1585 VM_ALLOC_WIRED); 1586 if (m == NULL) { 1587 if (try) { 1588 pv_entry_count--; 1589 PV_STAT(pc_chunk_tryfail++); 1590 return (NULL); 1591 } 1592 m = pmap_pv_reclaim(pmap); 1593 if (m == NULL) 1594 goto retry; 1595 } 1596 PV_STAT(pc_chunk_count++); 1597 PV_STAT(pc_chunk_allocs++); 1598 pc = (struct pv_chunk *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(m)); 1599 pc->pc_pmap = pmap; 1600 pc->pc_map[0] = pc_freemask[0] & ~1ul; /* preallocated bit 0 */ 1601 for (field = 1; field < _NPCM; field++) 1602 pc->pc_map[field] = pc_freemask[field]; 1603 TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); 1604 pv = &pc->pc_pventry[0]; 1605 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); 1606 PV_STAT(pv_entry_spare += _NPCPV - 1); 1607 return (pv); 1608} 1609 1610static pv_entry_t 1611pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1612{ 1613 pv_entry_t pv; 1614 1615 rw_assert(&pvh_global_lock, RA_WLOCKED); 1616 TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { 1617 if (pmap == PV_PMAP(pv) && va == pv->pv_va) { 1618 TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); 1619 break; 1620 } 1621 } 1622 return (pv); 1623} 1624 1625static void 1626pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va) 1627{ 1628 pv_entry_t pv; 1629 1630 pv = pmap_pvh_remove(pvh, pmap, va); 1631 KASSERT(pv != NULL, ("pmap_pvh_free: pv not found, pa %lx va %lx", 1632 (u_long)VM_PAGE_TO_PHYS(__containerof(pvh, struct vm_page, md)), 1633 (u_long)va)); 1634 free_pv_entry(pmap, pv); 1635} 1636 1637static void 1638pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) 1639{ 1640 1641 rw_assert(&pvh_global_lock, RA_WLOCKED); 1642 pmap_pvh_free(&m->md, pmap, va); 1643 if (TAILQ_EMPTY(&m->md.pv_list)) 1644 vm_page_aflag_clear(m, PGA_WRITEABLE); 1645} 1646 1647/* 1648 * Conditionally create a pv entry. 1649 */ 1650static boolean_t 1651pmap_try_insert_pv_entry(pmap_t pmap, vm_page_t mpte, vm_offset_t va, 1652 vm_page_t m) 1653{ 1654 pv_entry_t pv; 1655 1656 rw_assert(&pvh_global_lock, RA_WLOCKED); 1657 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1658 if ((pv = get_pv_entry(pmap, TRUE)) != NULL) { 1659 pv->pv_va = va; 1660 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 1661 return (TRUE); 1662 } else 1663 return (FALSE); 1664} 1665 1666/* 1667 * pmap_remove_pte: do the things to unmap a page in a process 1668 */ 1669static int 1670pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va, 1671 pd_entry_t pde) 1672{ 1673 pt_entry_t oldpte; 1674 vm_page_t m; 1675 vm_paddr_t pa; 1676 1677 rw_assert(&pvh_global_lock, RA_WLOCKED); 1678 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1679 1680 /* 1681 * Write back all cache lines from the page being unmapped. 1682 */ 1683 mips_dcache_wbinv_range_index(va, PAGE_SIZE); 1684 1685 oldpte = *ptq; 1686 if (is_kernel_pmap(pmap)) 1687 *ptq = PTE_G; 1688 else 1689 *ptq = 0; 1690 1691 if (pte_test(&oldpte, PTE_W)) 1692 pmap->pm_stats.wired_count -= 1; 1693 1694 pmap->pm_stats.resident_count -= 1; 1695 1696 if (pte_test(&oldpte, PTE_MANAGED)) { 1697 pa = TLBLO_PTE_TO_PA(oldpte); 1698 m = PHYS_TO_VM_PAGE(pa); 1699 if (pte_test(&oldpte, PTE_D)) { 1700 KASSERT(!pte_test(&oldpte, PTE_RO), 1701 ("%s: modified page not writable: va: %p, pte: %#jx", 1702 __func__, (void *)va, (uintmax_t)oldpte)); 1703 vm_page_dirty(m); 1704 } 1705 if (m->md.pv_flags & PV_TABLE_REF) 1706 vm_page_aflag_set(m, PGA_REFERENCED); 1707 m->md.pv_flags &= ~PV_TABLE_REF; 1708 1709 pmap_remove_entry(pmap, m, va); 1710 } 1711 return (pmap_unuse_pt(pmap, va, pde)); 1712} 1713 1714/* 1715 * Remove a single page from a process address space 1716 */ 1717static void 1718pmap_remove_page(struct pmap *pmap, vm_offset_t va) 1719{ 1720 pd_entry_t *pde; 1721 pt_entry_t *ptq; 1722 1723 rw_assert(&pvh_global_lock, RA_WLOCKED); 1724 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1725 pde = pmap_pde(pmap, va); 1726 if (pde == NULL || *pde == 0) 1727 return; 1728 ptq = pmap_pde_to_pte(pde, va); 1729 1730 /* 1731 * If there is no pte for this address, just skip it! 1732 */ 1733 if (!pte_test(ptq, PTE_V)) 1734 return; 1735 1736 (void)pmap_remove_pte(pmap, ptq, va, *pde); 1737 pmap_invalidate_page(pmap, va); 1738} 1739 1740/* 1741 * Remove the given range of addresses from the specified map. 1742 * 1743 * It is assumed that the start and end are properly 1744 * rounded to the page size. 1745 */ 1746void 1747pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1748{ 1749 pd_entry_t *pde, *pdpe; 1750 pt_entry_t *pte; 1751 vm_offset_t va, va_next; 1752 1753 /* 1754 * Perform an unsynchronized read. This is, however, safe. 1755 */ 1756 if (pmap->pm_stats.resident_count == 0) 1757 return; 1758 1759 rw_wlock(&pvh_global_lock); 1760 PMAP_LOCK(pmap); 1761 1762 /* 1763 * special handling of removing one page. a very common operation 1764 * and easy to short circuit some code. 1765 */ 1766 if ((sva + PAGE_SIZE) == eva) { 1767 pmap_remove_page(pmap, sva); 1768 goto out; 1769 } 1770 for (; sva < eva; sva = va_next) { 1771 pdpe = pmap_segmap(pmap, sva); 1772#ifdef __mips_n64 1773 if (*pdpe == 0) { 1774 va_next = (sva + NBSEG) & ~SEGMASK; 1775 if (va_next < sva) 1776 va_next = eva; 1777 continue; 1778 } 1779#endif 1780 va_next = (sva + NBPDR) & ~PDRMASK; 1781 if (va_next < sva) 1782 va_next = eva; 1783 1784 pde = pmap_pdpe_to_pde(pdpe, sva); 1785 if (*pde == NULL) 1786 continue; 1787 1788 /* 1789 * Limit our scan to either the end of the va represented 1790 * by the current page table page, or to the end of the 1791 * range being removed. 1792 */ 1793 if (va_next > eva) 1794 va_next = eva; 1795 1796 va = va_next; 1797 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 1798 sva += PAGE_SIZE) { 1799 if (!pte_test(pte, PTE_V)) { 1800 if (va != va_next) { 1801 pmap_invalidate_range(pmap, va, sva); 1802 va = va_next; 1803 } 1804 continue; 1805 } 1806 if (va == va_next) 1807 va = sva; 1808 if (pmap_remove_pte(pmap, pte, sva, *pde)) { 1809 sva += PAGE_SIZE; 1810 break; 1811 } 1812 } 1813 if (va != va_next) 1814 pmap_invalidate_range(pmap, va, sva); 1815 } 1816out: 1817 rw_wunlock(&pvh_global_lock); 1818 PMAP_UNLOCK(pmap); 1819} 1820 1821/* 1822 * Routine: pmap_remove_all 1823 * Function: 1824 * Removes this physical page from 1825 * all physical maps in which it resides. 1826 * Reflects back modify bits to the pager. 1827 * 1828 * Notes: 1829 * Original versions of this routine were very 1830 * inefficient because they iteratively called 1831 * pmap_remove (slow...) 1832 */ 1833 1834void 1835pmap_remove_all(vm_page_t m) 1836{ 1837 pv_entry_t pv; 1838 pmap_t pmap; 1839 pd_entry_t *pde; 1840 pt_entry_t *pte, tpte; 1841 1842 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 1843 ("pmap_remove_all: page %p is not managed", m)); 1844 rw_wlock(&pvh_global_lock); 1845 1846 if (m->md.pv_flags & PV_TABLE_REF) 1847 vm_page_aflag_set(m, PGA_REFERENCED); 1848 1849 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1850 pmap = PV_PMAP(pv); 1851 PMAP_LOCK(pmap); 1852 1853 /* 1854 * If it's last mapping writeback all caches from 1855 * the page being destroyed 1856 */ 1857 if (TAILQ_NEXT(pv, pv_list) == NULL) 1858 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 1859 1860 pmap->pm_stats.resident_count--; 1861 1862 pde = pmap_pde(pmap, pv->pv_va); 1863 KASSERT(pde != NULL && *pde != 0, ("pmap_remove_all: pde")); 1864 pte = pmap_pde_to_pte(pde, pv->pv_va); 1865 1866 tpte = *pte; 1867 if (is_kernel_pmap(pmap)) 1868 *pte = PTE_G; 1869 else 1870 *pte = 0; 1871 1872 if (pte_test(&tpte, PTE_W)) 1873 pmap->pm_stats.wired_count--; 1874 1875 /* 1876 * Update the vm_page_t clean and reference bits. 1877 */ 1878 if (pte_test(&tpte, PTE_D)) { 1879 KASSERT(!pte_test(&tpte, PTE_RO), 1880 ("%s: modified page not writable: va: %p, pte: %#jx", 1881 __func__, (void *)pv->pv_va, (uintmax_t)tpte)); 1882 vm_page_dirty(m); 1883 } 1884 pmap_invalidate_page(pmap, pv->pv_va); 1885 1886 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 1887 pmap_unuse_pt(pmap, pv->pv_va, *pde); 1888 free_pv_entry(pmap, pv); 1889 PMAP_UNLOCK(pmap); 1890 } 1891 1892 vm_page_aflag_clear(m, PGA_WRITEABLE); 1893 m->md.pv_flags &= ~PV_TABLE_REF; 1894 rw_wunlock(&pvh_global_lock); 1895} 1896 1897/* 1898 * Set the physical protection on the 1899 * specified range of this map as requested. 1900 */ 1901void 1902pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1903{ 1904 pt_entry_t pbits, *pte; 1905 pd_entry_t *pde, *pdpe; 1906 vm_offset_t va, va_next; 1907 vm_paddr_t pa; 1908 vm_page_t m; 1909 1910 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1911 pmap_remove(pmap, sva, eva); 1912 return; 1913 } 1914 if (prot & VM_PROT_WRITE) 1915 return; 1916 1917 rw_wlock(&pvh_global_lock); 1918 PMAP_LOCK(pmap); 1919 for (; sva < eva; sva = va_next) { 1920 pdpe = pmap_segmap(pmap, sva); 1921#ifdef __mips_n64 1922 if (*pdpe == 0) { 1923 va_next = (sva + NBSEG) & ~SEGMASK; 1924 if (va_next < sva) 1925 va_next = eva; 1926 continue; 1927 } 1928#endif 1929 va_next = (sva + NBPDR) & ~PDRMASK; 1930 if (va_next < sva) 1931 va_next = eva; 1932 1933 pde = pmap_pdpe_to_pde(pdpe, sva); 1934 if (*pde == NULL) 1935 continue; 1936 1937 /* 1938 * Limit our scan to either the end of the va represented 1939 * by the current page table page, or to the end of the 1940 * range being write protected. 1941 */ 1942 if (va_next > eva) 1943 va_next = eva; 1944 1945 va = va_next; 1946 for (pte = pmap_pde_to_pte(pde, sva); sva != va_next; pte++, 1947 sva += PAGE_SIZE) { 1948 pbits = *pte; 1949 if (!pte_test(&pbits, PTE_V) || pte_test(&pbits, 1950 PTE_RO)) { 1951 if (va != va_next) { 1952 pmap_invalidate_range(pmap, va, sva); 1953 va = va_next; 1954 } 1955 continue; 1956 } 1957 pte_set(&pbits, PTE_RO); 1958 if (pte_test(&pbits, PTE_D)) { 1959 pte_clear(&pbits, PTE_D); 1960 if (pte_test(&pbits, PTE_MANAGED)) { 1961 pa = TLBLO_PTE_TO_PA(pbits); 1962 m = PHYS_TO_VM_PAGE(pa); 1963 vm_page_dirty(m); 1964 } 1965 if (va == va_next) 1966 va = sva; 1967 } else { 1968 /* 1969 * Unless PTE_D is set, any TLB entries 1970 * mapping "sva" don't allow write access, so 1971 * they needn't be invalidated. 1972 */ 1973 if (va != va_next) { 1974 pmap_invalidate_range(pmap, va, sva); 1975 va = va_next; 1976 } 1977 } 1978 *pte = pbits; 1979 } 1980 if (va != va_next) 1981 pmap_invalidate_range(pmap, va, sva); 1982 } 1983 rw_wunlock(&pvh_global_lock); 1984 PMAP_UNLOCK(pmap); 1985} 1986 1987/* 1988 * Insert the given physical page (p) at 1989 * the specified virtual address (v) in the 1990 * target physical map with the protection requested. 1991 * 1992 * If specified, the page will be wired down, meaning 1993 * that the related pte can not be reclaimed. 1994 * 1995 * NB: This is the only routine which MAY NOT lazy-evaluate 1996 * or lose information. That is, this routine must actually 1997 * insert this page into the given map NOW. 1998 */ 1999void 2000pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, 2001 vm_prot_t prot, boolean_t wired) 2002{ 2003 vm_paddr_t pa, opa; 2004 pt_entry_t *pte; 2005 pt_entry_t origpte, newpte; 2006 pv_entry_t pv; 2007 vm_page_t mpte, om; 2008 2009 va &= ~PAGE_MASK; 2010 KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); 2011 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || 2012 va >= kmi.clean_eva, 2013 ("pmap_enter: managed mapping within the clean submap")); 2014 KASSERT((m->oflags & VPO_UNMANAGED) != 0 || vm_page_xbusied(m), 2015 ("pmap_enter: page %p is not busy", m)); 2016 pa = VM_PAGE_TO_PHYS(m); 2017 newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, access, prot); 2018 if (wired) 2019 newpte |= PTE_W; 2020 if (is_kernel_pmap(pmap)) 2021 newpte |= PTE_G; 2022 if (is_cacheable_mem(pa)) 2023 newpte |= PTE_C_CACHE; 2024 else 2025 newpte |= PTE_C_UNCACHED; 2026 2027 mpte = NULL; 2028 2029 rw_wlock(&pvh_global_lock); 2030 PMAP_LOCK(pmap); 2031 2032 /* 2033 * In the case that a page table page is not resident, we are 2034 * creating it here. 2035 */ 2036 if (va < VM_MAXUSER_ADDRESS) { 2037 mpte = pmap_allocpte(pmap, va, M_WAITOK); 2038 } 2039 pte = pmap_pte(pmap, va); 2040 2041 /* 2042 * Page Directory table entry not valid, we need a new PT page 2043 */ 2044 if (pte == NULL) { 2045 panic("pmap_enter: invalid page directory, pdir=%p, va=%p", 2046 (void *)pmap->pm_segtab, (void *)va); 2047 } 2048 om = NULL; 2049 origpte = *pte; 2050 opa = TLBLO_PTE_TO_PA(origpte); 2051 2052 /* 2053 * Mapping has not changed, must be protection or wiring change. 2054 */ 2055 if (pte_test(&origpte, PTE_V) && opa == pa) { 2056 /* 2057 * Wiring change, just update stats. We don't worry about 2058 * wiring PT pages as they remain resident as long as there 2059 * are valid mappings in them. Hence, if a user page is 2060 * wired, the PT page will be also. 2061 */ 2062 if (wired && !pte_test(&origpte, PTE_W)) 2063 pmap->pm_stats.wired_count++; 2064 else if (!wired && pte_test(&origpte, PTE_W)) 2065 pmap->pm_stats.wired_count--; 2066 2067 KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), 2068 ("%s: modified page not writable: va: %p, pte: %#jx", 2069 __func__, (void *)va, (uintmax_t)origpte)); 2070 2071 /* 2072 * Remove extra pte reference 2073 */ 2074 if (mpte) 2075 mpte->wire_count--; 2076 2077 if (pte_test(&origpte, PTE_MANAGED)) { 2078 m->md.pv_flags |= PV_TABLE_REF; 2079 om = m; 2080 newpte |= PTE_MANAGED; 2081 if (!pte_test(&newpte, PTE_RO)) 2082 vm_page_aflag_set(m, PGA_WRITEABLE); 2083 } 2084 goto validate; 2085 } 2086 2087 pv = NULL; 2088 2089 /* 2090 * Mapping has changed, invalidate old range and fall through to 2091 * handle validating new mapping. 2092 */ 2093 if (opa) { 2094 if (pte_test(&origpte, PTE_W)) 2095 pmap->pm_stats.wired_count--; 2096 2097 if (pte_test(&origpte, PTE_MANAGED)) { 2098 om = PHYS_TO_VM_PAGE(opa); 2099 pv = pmap_pvh_remove(&om->md, pmap, va); 2100 } 2101 if (mpte != NULL) { 2102 mpte->wire_count--; 2103 KASSERT(mpte->wire_count > 0, 2104 ("pmap_enter: missing reference to page table page," 2105 " va: %p", (void *)va)); 2106 } 2107 } else 2108 pmap->pm_stats.resident_count++; 2109 2110 /* 2111 * Enter on the PV list if part of our managed memory. 2112 */ 2113 if ((m->oflags & VPO_UNMANAGED) == 0) { 2114 m->md.pv_flags |= PV_TABLE_REF; 2115 if (pv == NULL) 2116 pv = get_pv_entry(pmap, FALSE); 2117 pv->pv_va = va; 2118 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 2119 newpte |= PTE_MANAGED; 2120 if (!pte_test(&newpte, PTE_RO)) 2121 vm_page_aflag_set(m, PGA_WRITEABLE); 2122 } else if (pv != NULL) 2123 free_pv_entry(pmap, pv); 2124 2125 /* 2126 * Increment counters 2127 */ 2128 if (wired) 2129 pmap->pm_stats.wired_count++; 2130 2131validate: 2132 2133#ifdef PMAP_DEBUG 2134 printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); 2135#endif 2136 2137 /* 2138 * if the mapping or permission bits are different, we need to 2139 * update the pte. 2140 */ 2141 if (origpte != newpte) { 2142 *pte = newpte; 2143 if (pte_test(&origpte, PTE_V)) { 2144 if (pte_test(&origpte, PTE_MANAGED) && opa != pa) { 2145 if (om->md.pv_flags & PV_TABLE_REF) 2146 vm_page_aflag_set(om, PGA_REFERENCED); 2147 om->md.pv_flags &= ~PV_TABLE_REF; 2148 } 2149 if (pte_test(&origpte, PTE_D)) { 2150 KASSERT(!pte_test(&origpte, PTE_RO), 2151 ("pmap_enter: modified page not writable:" 2152 " va: %p, pte: %#jx", (void *)va, (uintmax_t)origpte)); 2153 if (pte_test(&origpte, PTE_MANAGED)) 2154 vm_page_dirty(om); 2155 } 2156 if (pte_test(&origpte, PTE_MANAGED) && 2157 TAILQ_EMPTY(&om->md.pv_list)) 2158 vm_page_aflag_clear(om, PGA_WRITEABLE); 2159 pmap_update_page(pmap, va, newpte); 2160 } 2161 } 2162 2163 /* 2164 * Sync I & D caches for executable pages. Do this only if the 2165 * target pmap belongs to the current process. Otherwise, an 2166 * unresolvable TLB miss may occur. 2167 */ 2168 if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && 2169 (prot & VM_PROT_EXECUTE)) { 2170 mips_icache_sync_range(va, PAGE_SIZE); 2171 mips_dcache_wbinv_range(va, PAGE_SIZE); 2172 } 2173 rw_wunlock(&pvh_global_lock); 2174 PMAP_UNLOCK(pmap); 2175} 2176 2177/* 2178 * this code makes some *MAJOR* assumptions: 2179 * 1. Current pmap & pmap exists. 2180 * 2. Not wired. 2181 * 3. Read access. 2182 * 4. No page table pages. 2183 * but is *MUCH* faster than pmap_enter... 2184 */ 2185 2186void 2187pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) 2188{ 2189 2190 rw_wlock(&pvh_global_lock); 2191 PMAP_LOCK(pmap); 2192 (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL); 2193 rw_wunlock(&pvh_global_lock); 2194 PMAP_UNLOCK(pmap); 2195} 2196 2197static vm_page_t 2198pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, 2199 vm_prot_t prot, vm_page_t mpte) 2200{ 2201 pt_entry_t *pte; 2202 vm_paddr_t pa; 2203 2204 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || 2205 (m->oflags & VPO_UNMANAGED) != 0, 2206 ("pmap_enter_quick_locked: managed mapping within the clean submap")); 2207 rw_assert(&pvh_global_lock, RA_WLOCKED); 2208 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 2209 2210 /* 2211 * In the case that a page table page is not resident, we are 2212 * creating it here. 2213 */ 2214 if (va < VM_MAXUSER_ADDRESS) { 2215 pd_entry_t *pde; 2216 unsigned ptepindex; 2217 2218 /* 2219 * Calculate pagetable page index 2220 */ 2221 ptepindex = pmap_pde_pindex(va); 2222 if (mpte && (mpte->pindex == ptepindex)) { 2223 mpte->wire_count++; 2224 } else { 2225 /* 2226 * Get the page directory entry 2227 */ 2228 pde = pmap_pde(pmap, va); 2229 2230 /* 2231 * If the page table page is mapped, we just 2232 * increment the hold count, and activate it. 2233 */ 2234 if (pde && *pde != 0) { 2235 mpte = PHYS_TO_VM_PAGE( 2236 MIPS_DIRECT_TO_PHYS(*pde)); 2237 mpte->wire_count++; 2238 } else { 2239 mpte = _pmap_allocpte(pmap, ptepindex, 2240 M_NOWAIT); 2241 if (mpte == NULL) 2242 return (mpte); 2243 } 2244 } 2245 } else { 2246 mpte = NULL; 2247 } 2248 2249 pte = pmap_pte(pmap, va); 2250 if (pte_test(pte, PTE_V)) { 2251 if (mpte != NULL) { 2252 mpte->wire_count--; 2253 mpte = NULL; 2254 } 2255 return (mpte); 2256 } 2257 2258 /* 2259 * Enter on the PV list if part of our managed memory. 2260 */ 2261 if ((m->oflags & VPO_UNMANAGED) == 0 && 2262 !pmap_try_insert_pv_entry(pmap, mpte, va, m)) { 2263 if (mpte != NULL) { 2264 pmap_unwire_ptp(pmap, va, mpte); 2265 mpte = NULL; 2266 } 2267 return (mpte); 2268 } 2269 2270 /* 2271 * Increment counters 2272 */ 2273 pmap->pm_stats.resident_count++; 2274 2275 pa = VM_PAGE_TO_PHYS(m); 2276 2277 /* 2278 * Now validate mapping with RO protection 2279 */ 2280 *pte = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_V; 2281 if ((m->oflags & VPO_UNMANAGED) == 0) 2282 *pte |= PTE_MANAGED; 2283 2284 if (is_cacheable_mem(pa)) 2285 *pte |= PTE_C_CACHE; 2286 else 2287 *pte |= PTE_C_UNCACHED; 2288 2289 if (is_kernel_pmap(pmap)) 2290 *pte |= PTE_G; 2291 else { 2292 /* 2293 * Sync I & D caches. Do this only if the target pmap 2294 * belongs to the current process. Otherwise, an 2295 * unresolvable TLB miss may occur. */ 2296 if (pmap == &curproc->p_vmspace->vm_pmap) { 2297 va &= ~PAGE_MASK; 2298 mips_icache_sync_range(va, PAGE_SIZE); 2299 mips_dcache_wbinv_range(va, PAGE_SIZE); 2300 } 2301 } 2302 return (mpte); 2303} 2304 2305/* 2306 * Make a temporary mapping for a physical address. This is only intended 2307 * to be used for panic dumps. 2308 * 2309 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2310 */ 2311void * 2312pmap_kenter_temporary(vm_paddr_t pa, int i) 2313{ 2314 vm_offset_t va; 2315 2316 if (i != 0) 2317 printf("%s: ERROR!!! More than one page of virtual address mapping not supported\n", 2318 __func__); 2319 2320 if (MIPS_DIRECT_MAPPABLE(pa)) { 2321 va = MIPS_PHYS_TO_DIRECT(pa); 2322 } else { 2323#ifndef __mips_n64 /* XXX : to be converted to new style */ 2324 int cpu; 2325 register_t intr; 2326 struct local_sysmaps *sysm; 2327 pt_entry_t *pte, npte; 2328 2329 /* If this is used other than for dumps, we may need to leave 2330 * interrupts disasbled on return. If crash dumps don't work when 2331 * we get to this point, we might want to consider this (leaving things 2332 * disabled as a starting point ;-) 2333 */ 2334 intr = intr_disable(); 2335 cpu = PCPU_GET(cpuid); 2336 sysm = &sysmap_lmem[cpu]; 2337 /* Since this is for the debugger, no locks or any other fun */ 2338 npte = TLBLO_PA_TO_PFN(pa) | PTE_C_CACHE | PTE_D | PTE_V | 2339 PTE_G; 2340 pte = pmap_pte(kernel_pmap, sysm->base); 2341 *pte = npte; 2342 sysm->valid1 = 1; 2343 pmap_update_page(kernel_pmap, sysm->base, npte); 2344 va = sysm->base; 2345 intr_restore(intr); 2346#endif 2347 } 2348 return ((void *)va); 2349} 2350 2351void 2352pmap_kenter_temporary_free(vm_paddr_t pa) 2353{ 2354#ifndef __mips_n64 /* XXX : to be converted to new style */ 2355 int cpu; 2356 register_t intr; 2357 struct local_sysmaps *sysm; 2358#endif 2359 2360 if (MIPS_DIRECT_MAPPABLE(pa)) { 2361 /* nothing to do for this case */ 2362 return; 2363 } 2364#ifndef __mips_n64 /* XXX : to be converted to new style */ 2365 cpu = PCPU_GET(cpuid); 2366 sysm = &sysmap_lmem[cpu]; 2367 if (sysm->valid1) { 2368 pt_entry_t *pte; 2369 2370 intr = intr_disable(); 2371 pte = pmap_pte(kernel_pmap, sysm->base); 2372 *pte = PTE_G; 2373 pmap_invalidate_page(kernel_pmap, sysm->base); 2374 intr_restore(intr); 2375 sysm->valid1 = 0; 2376 } 2377#endif 2378} 2379 2380/* 2381 * Maps a sequence of resident pages belonging to the same object. 2382 * The sequence begins with the given page m_start. This page is 2383 * mapped at the given virtual address start. Each subsequent page is 2384 * mapped at a virtual address that is offset from start by the same 2385 * amount as the page is offset from m_start within the object. The 2386 * last page in the sequence is the page with the largest offset from 2387 * m_start that can be mapped at a virtual address less than the given 2388 * virtual address end. Not every virtual page between start and end 2389 * is mapped; only those for which a resident page exists with the 2390 * corresponding offset from m_start are mapped. 2391 */ 2392void 2393pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, 2394 vm_page_t m_start, vm_prot_t prot) 2395{ 2396 vm_page_t m, mpte; 2397 vm_pindex_t diff, psize; 2398 2399 VM_OBJECT_ASSERT_LOCKED(m_start->object); 2400 2401 psize = atop(end - start); 2402 mpte = NULL; 2403 m = m_start; 2404 rw_wlock(&pvh_global_lock); 2405 PMAP_LOCK(pmap); 2406 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 2407 mpte = pmap_enter_quick_locked(pmap, start + ptoa(diff), m, 2408 prot, mpte); 2409 m = TAILQ_NEXT(m, listq); 2410 } 2411 rw_wunlock(&pvh_global_lock); 2412 PMAP_UNLOCK(pmap); 2413} 2414 2415/* 2416 * pmap_object_init_pt preloads the ptes for a given object 2417 * into the specified pmap. This eliminates the blast of soft 2418 * faults on process startup and immediately after an mmap. 2419 */ 2420void 2421pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 2422 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2423{ 2424 VM_OBJECT_ASSERT_WLOCKED(object); 2425 KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, 2426 ("pmap_object_init_pt: non-device object")); 2427} 2428 2429/* 2430 * Routine: pmap_change_wiring 2431 * Function: Change the wiring attribute for a map/virtual-address 2432 * pair. 2433 * In/out conditions: 2434 * The mapping must already exist in the pmap. 2435 */ 2436void 2437pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 2438{ 2439 pt_entry_t *pte; 2440 2441 PMAP_LOCK(pmap); 2442 pte = pmap_pte(pmap, va); 2443 2444 if (wired && !pte_test(pte, PTE_W)) 2445 pmap->pm_stats.wired_count++; 2446 else if (!wired && pte_test(pte, PTE_W)) 2447 pmap->pm_stats.wired_count--; 2448 2449 /* 2450 * Wiring is not a hardware characteristic so there is no need to 2451 * invalidate TLB. 2452 */ 2453 if (wired) 2454 pte_set(pte, PTE_W); 2455 else 2456 pte_clear(pte, PTE_W); 2457 PMAP_UNLOCK(pmap); 2458} 2459 2460/* 2461 * Copy the range specified by src_addr/len 2462 * from the source map to the range dst_addr/len 2463 * in the destination map. 2464 * 2465 * This routine is only advisory and need not do anything. 2466 */ 2467 2468void 2469pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 2470 vm_size_t len, vm_offset_t src_addr) 2471{ 2472} 2473 2474/* 2475 * pmap_zero_page zeros the specified hardware page by mapping 2476 * the page into KVM and using bzero to clear its contents. 2477 * 2478 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2479 */ 2480void 2481pmap_zero_page(vm_page_t m) 2482{ 2483 vm_offset_t va; 2484 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2485 2486 if (MIPS_DIRECT_MAPPABLE(phys)) { 2487 va = MIPS_PHYS_TO_DIRECT(phys); 2488 bzero((caddr_t)va, PAGE_SIZE); 2489 mips_dcache_wbinv_range(va, PAGE_SIZE); 2490 } else { 2491 va = pmap_lmem_map1(phys); 2492 bzero((caddr_t)va, PAGE_SIZE); 2493 mips_dcache_wbinv_range(va, PAGE_SIZE); 2494 pmap_lmem_unmap(); 2495 } 2496} 2497 2498/* 2499 * pmap_zero_page_area zeros the specified hardware page by mapping 2500 * the page into KVM and using bzero to clear its contents. 2501 * 2502 * off and size may not cover an area beyond a single hardware page. 2503 */ 2504void 2505pmap_zero_page_area(vm_page_t m, int off, int size) 2506{ 2507 vm_offset_t va; 2508 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2509 2510 if (MIPS_DIRECT_MAPPABLE(phys)) { 2511 va = MIPS_PHYS_TO_DIRECT(phys); 2512 bzero((char *)(caddr_t)va + off, size); 2513 mips_dcache_wbinv_range(va + off, size); 2514 } else { 2515 va = pmap_lmem_map1(phys); 2516 bzero((char *)va + off, size); 2517 mips_dcache_wbinv_range(va + off, size); 2518 pmap_lmem_unmap(); 2519 } 2520} 2521 2522void 2523pmap_zero_page_idle(vm_page_t m) 2524{ 2525 vm_offset_t va; 2526 vm_paddr_t phys = VM_PAGE_TO_PHYS(m); 2527 2528 if (MIPS_DIRECT_MAPPABLE(phys)) { 2529 va = MIPS_PHYS_TO_DIRECT(phys); 2530 bzero((caddr_t)va, PAGE_SIZE); 2531 mips_dcache_wbinv_range(va, PAGE_SIZE); 2532 } else { 2533 va = pmap_lmem_map1(phys); 2534 bzero((caddr_t)va, PAGE_SIZE); 2535 mips_dcache_wbinv_range(va, PAGE_SIZE); 2536 pmap_lmem_unmap(); 2537 } 2538} 2539 2540/* 2541 * pmap_copy_page copies the specified (machine independent) 2542 * page by mapping the page into virtual memory and using 2543 * bcopy to copy the page, one machine dependent page at a 2544 * time. 2545 * 2546 * Use XKPHYS for 64 bit, and KSEG0 where possible for 32 bit. 2547 */ 2548void 2549pmap_copy_page(vm_page_t src, vm_page_t dst) 2550{ 2551 vm_offset_t va_src, va_dst; 2552 vm_paddr_t phys_src = VM_PAGE_TO_PHYS(src); 2553 vm_paddr_t phys_dst = VM_PAGE_TO_PHYS(dst); 2554 2555 if (MIPS_DIRECT_MAPPABLE(phys_src) && MIPS_DIRECT_MAPPABLE(phys_dst)) { 2556 /* easy case, all can be accessed via KSEG0 */ 2557 /* 2558 * Flush all caches for VA that are mapped to this page 2559 * to make sure that data in SDRAM is up to date 2560 */ 2561 pmap_flush_pvcache(src); 2562 mips_dcache_wbinv_range_index( 2563 MIPS_PHYS_TO_DIRECT(phys_dst), PAGE_SIZE); 2564 va_src = MIPS_PHYS_TO_DIRECT(phys_src); 2565 va_dst = MIPS_PHYS_TO_DIRECT(phys_dst); 2566 bcopy((caddr_t)va_src, (caddr_t)va_dst, PAGE_SIZE); 2567 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2568 } else { 2569 va_src = pmap_lmem_map2(phys_src, phys_dst); 2570 va_dst = va_src + PAGE_SIZE; 2571 bcopy((void *)va_src, (void *)va_dst, PAGE_SIZE); 2572 mips_dcache_wbinv_range(va_dst, PAGE_SIZE); 2573 pmap_lmem_unmap(); 2574 } 2575} 2576 2577int unmapped_buf_allowed; 2578 2579void 2580pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], 2581 vm_offset_t b_offset, int xfersize) 2582{ 2583 char *a_cp, *b_cp; 2584 vm_page_t a_m, b_m; 2585 vm_offset_t a_pg_offset, b_pg_offset; 2586 vm_paddr_t a_phys, b_phys; 2587 int cnt; 2588 2589 while (xfersize > 0) { 2590 a_pg_offset = a_offset & PAGE_MASK; 2591 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2592 a_m = ma[a_offset >> PAGE_SHIFT]; 2593 a_phys = VM_PAGE_TO_PHYS(a_m); 2594 b_pg_offset = b_offset & PAGE_MASK; 2595 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2596 b_m = mb[b_offset >> PAGE_SHIFT]; 2597 b_phys = VM_PAGE_TO_PHYS(b_m); 2598 if (MIPS_DIRECT_MAPPABLE(a_phys) && 2599 MIPS_DIRECT_MAPPABLE(b_phys)) { 2600 pmap_flush_pvcache(a_m); 2601 mips_dcache_wbinv_range_index( 2602 MIPS_PHYS_TO_DIRECT(b_phys), PAGE_SIZE); 2603 a_cp = (char *)MIPS_PHYS_TO_DIRECT(a_phys) + 2604 a_pg_offset; 2605 b_cp = (char *)MIPS_PHYS_TO_DIRECT(b_phys) + 2606 b_pg_offset; 2607 bcopy(a_cp, b_cp, cnt); 2608 mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt); 2609 } else { 2610 a_cp = (char *)pmap_lmem_map2(a_phys, b_phys); 2611 b_cp = (char *)a_cp + PAGE_SIZE; 2612 a_cp += a_pg_offset; 2613 b_cp += b_pg_offset; 2614 bcopy(a_cp, b_cp, cnt); 2615 mips_dcache_wbinv_range((vm_offset_t)b_cp, cnt); 2616 pmap_lmem_unmap(); 2617 } 2618 a_offset += cnt; 2619 b_offset += cnt; 2620 xfersize -= cnt; 2621 } 2622} 2623 2624/* 2625 * Returns true if the pmap's pv is one of the first 2626 * 16 pvs linked to from this page. This count may 2627 * be changed upwards or downwards in the future; it 2628 * is only necessary that true be returned for a small 2629 * subset of pmaps for proper page aging. 2630 */ 2631boolean_t 2632pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 2633{ 2634 pv_entry_t pv; 2635 int loops = 0; 2636 boolean_t rv; 2637 2638 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2639 ("pmap_page_exists_quick: page %p is not managed", m)); 2640 rv = FALSE; 2641 rw_wlock(&pvh_global_lock); 2642 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2643 if (PV_PMAP(pv) == pmap) { 2644 rv = TRUE; 2645 break; 2646 } 2647 loops++; 2648 if (loops >= 16) 2649 break; 2650 } 2651 rw_wunlock(&pvh_global_lock); 2652 return (rv); 2653} 2654 2655/* 2656 * Remove all pages from specified address space 2657 * this aids process exit speeds. Also, this code 2658 * is special cased for current process only, but 2659 * can have the more generic (and slightly slower) 2660 * mode enabled. This is much faster than pmap_remove 2661 * in the case of running down an entire address space. 2662 */ 2663void 2664pmap_remove_pages(pmap_t pmap) 2665{ 2666 pd_entry_t *pde; 2667 pt_entry_t *pte, tpte; 2668 pv_entry_t pv; 2669 vm_page_t m; 2670 struct pv_chunk *pc, *npc; 2671 u_long inuse, bitmask; 2672 int allfree, bit, field, idx; 2673 2674 if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { 2675 printf("warning: pmap_remove_pages called with non-current pmap\n"); 2676 return; 2677 } 2678 rw_wlock(&pvh_global_lock); 2679 PMAP_LOCK(pmap); 2680 TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { 2681 allfree = 1; 2682 for (field = 0; field < _NPCM; field++) { 2683 inuse = ~pc->pc_map[field] & pc_freemask[field]; 2684 while (inuse != 0) { 2685 bit = ffsl(inuse) - 1; 2686 bitmask = 1UL << bit; 2687 idx = field * sizeof(inuse) * NBBY + bit; 2688 pv = &pc->pc_pventry[idx]; 2689 inuse &= ~bitmask; 2690 2691 pde = pmap_pde(pmap, pv->pv_va); 2692 KASSERT(pde != NULL && *pde != 0, 2693 ("pmap_remove_pages: pde")); 2694 pte = pmap_pde_to_pte(pde, pv->pv_va); 2695 if (!pte_test(pte, PTE_V)) 2696 panic("pmap_remove_pages: bad pte"); 2697 tpte = *pte; 2698 2699/* 2700 * We cannot remove wired pages from a process' mapping at this time 2701 */ 2702 if (pte_test(&tpte, PTE_W)) { 2703 allfree = 0; 2704 continue; 2705 } 2706 *pte = is_kernel_pmap(pmap) ? PTE_G : 0; 2707 2708 m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(tpte)); 2709 KASSERT(m != NULL, 2710 ("pmap_remove_pages: bad tpte %#jx", 2711 (uintmax_t)tpte)); 2712 2713 /* 2714 * Update the vm_page_t clean and reference bits. 2715 */ 2716 if (pte_test(&tpte, PTE_D)) 2717 vm_page_dirty(m); 2718 2719 /* Mark free */ 2720 PV_STAT(pv_entry_frees++); 2721 PV_STAT(pv_entry_spare++); 2722 pv_entry_count--; 2723 pc->pc_map[field] |= bitmask; 2724 pmap->pm_stats.resident_count--; 2725 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 2726 if (TAILQ_EMPTY(&m->md.pv_list)) 2727 vm_page_aflag_clear(m, PGA_WRITEABLE); 2728 pmap_unuse_pt(pmap, pv->pv_va, *pde); 2729 } 2730 } 2731 if (allfree) { 2732 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); 2733 free_pv_chunk(pc); 2734 } 2735 } 2736 pmap_invalidate_all(pmap); 2737 PMAP_UNLOCK(pmap); 2738 rw_wunlock(&pvh_global_lock); 2739} 2740 2741/* 2742 * pmap_testbit tests bits in pte's 2743 */ 2744static boolean_t 2745pmap_testbit(vm_page_t m, int bit) 2746{ 2747 pv_entry_t pv; 2748 pmap_t pmap; 2749 pt_entry_t *pte; 2750 boolean_t rv = FALSE; 2751 2752 if (m->oflags & VPO_UNMANAGED) 2753 return (rv); 2754 2755 rw_assert(&pvh_global_lock, RA_WLOCKED); 2756 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2757 pmap = PV_PMAP(pv); 2758 PMAP_LOCK(pmap); 2759 pte = pmap_pte(pmap, pv->pv_va); 2760 rv = pte_test(pte, bit); 2761 PMAP_UNLOCK(pmap); 2762 if (rv) 2763 break; 2764 } 2765 return (rv); 2766} 2767 2768/* 2769 * pmap_page_wired_mappings: 2770 * 2771 * Return the number of managed mappings to the given physical page 2772 * that are wired. 2773 */ 2774int 2775pmap_page_wired_mappings(vm_page_t m) 2776{ 2777 pv_entry_t pv; 2778 pmap_t pmap; 2779 pt_entry_t *pte; 2780 int count; 2781 2782 count = 0; 2783 if ((m->oflags & VPO_UNMANAGED) != 0) 2784 return (count); 2785 rw_wlock(&pvh_global_lock); 2786 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2787 pmap = PV_PMAP(pv); 2788 PMAP_LOCK(pmap); 2789 pte = pmap_pte(pmap, pv->pv_va); 2790 if (pte_test(pte, PTE_W)) 2791 count++; 2792 PMAP_UNLOCK(pmap); 2793 } 2794 rw_wunlock(&pvh_global_lock); 2795 return (count); 2796} 2797 2798/* 2799 * Clear the write and modified bits in each of the given page's mappings. 2800 */ 2801void 2802pmap_remove_write(vm_page_t m) 2803{ 2804 pmap_t pmap; 2805 pt_entry_t pbits, *pte; 2806 pv_entry_t pv; 2807 2808 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2809 ("pmap_remove_write: page %p is not managed", m)); 2810 2811 /* 2812 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2813 * set by another thread while the object is locked. Thus, 2814 * if PGA_WRITEABLE is clear, no page table entries need updating. 2815 */ 2816 VM_OBJECT_ASSERT_WLOCKED(m->object); 2817 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2818 return; 2819 rw_wlock(&pvh_global_lock); 2820 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2821 pmap = PV_PMAP(pv); 2822 PMAP_LOCK(pmap); 2823 pte = pmap_pte(pmap, pv->pv_va); 2824 KASSERT(pte != NULL && pte_test(pte, PTE_V), 2825 ("page on pv_list has no pte")); 2826 pbits = *pte; 2827 if (pte_test(&pbits, PTE_D)) { 2828 pte_clear(&pbits, PTE_D); 2829 vm_page_dirty(m); 2830 } 2831 pte_set(&pbits, PTE_RO); 2832 if (pbits != *pte) { 2833 *pte = pbits; 2834 pmap_update_page(pmap, pv->pv_va, pbits); 2835 } 2836 PMAP_UNLOCK(pmap); 2837 } 2838 vm_page_aflag_clear(m, PGA_WRITEABLE); 2839 rw_wunlock(&pvh_global_lock); 2840} 2841 2842/* 2843 * pmap_ts_referenced: 2844 * 2845 * Return the count of reference bits for a page, clearing all of them. 2846 */ 2847int 2848pmap_ts_referenced(vm_page_t m) 2849{ 2850 2851 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2852 ("pmap_ts_referenced: page %p is not managed", m)); 2853 if (m->md.pv_flags & PV_TABLE_REF) { 2854 rw_wlock(&pvh_global_lock); 2855 m->md.pv_flags &= ~PV_TABLE_REF; 2856 rw_wunlock(&pvh_global_lock); 2857 return (1); 2858 } 2859 return (0); 2860} 2861 2862/* 2863 * pmap_is_modified: 2864 * 2865 * Return whether or not the specified physical page was modified 2866 * in any physical maps. 2867 */ 2868boolean_t 2869pmap_is_modified(vm_page_t m) 2870{ 2871 boolean_t rv; 2872 2873 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2874 ("pmap_is_modified: page %p is not managed", m)); 2875 2876 /* 2877 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2878 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2879 * is clear, no PTEs can have PTE_D set. 2880 */ 2881 VM_OBJECT_ASSERT_WLOCKED(m->object); 2882 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2883 return (FALSE); 2884 rw_wlock(&pvh_global_lock); 2885 rv = pmap_testbit(m, PTE_D); 2886 rw_wunlock(&pvh_global_lock); 2887 return (rv); 2888} 2889 2890/* N/C */ 2891 2892/* 2893 * pmap_is_prefaultable: 2894 * 2895 * Return whether or not the specified virtual address is elgible 2896 * for prefault. 2897 */ 2898boolean_t 2899pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 2900{ 2901 pd_entry_t *pde; 2902 pt_entry_t *pte; 2903 boolean_t rv; 2904 2905 rv = FALSE; 2906 PMAP_LOCK(pmap); 2907 pde = pmap_pde(pmap, addr); 2908 if (pde != NULL && *pde != 0) { 2909 pte = pmap_pde_to_pte(pde, addr); 2910 rv = (*pte == 0); 2911 } 2912 PMAP_UNLOCK(pmap); 2913 return (rv); 2914} 2915 2916/* 2917 * This function is advisory. 2918 */ 2919void 2920pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) 2921{ 2922} 2923 2924/* 2925 * Clear the modify bits on the specified physical page. 2926 */ 2927void 2928pmap_clear_modify(vm_page_t m) 2929{ 2930 pmap_t pmap; 2931 pt_entry_t *pte; 2932 pv_entry_t pv; 2933 2934 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2935 ("pmap_clear_modify: page %p is not managed", m)); 2936 VM_OBJECT_ASSERT_WLOCKED(m->object); 2937 KASSERT(!vm_page_xbusied(m), 2938 ("pmap_clear_modify: page %p is exclusive busied", m)); 2939 2940 /* 2941 * If the page is not PGA_WRITEABLE, then no PTEs can have PTE_D set. 2942 * If the object containing the page is locked and the page is not 2943 * write busied, then PGA_WRITEABLE cannot be concurrently set. 2944 */ 2945 if ((m->aflags & PGA_WRITEABLE) == 0) 2946 return; 2947 rw_wlock(&pvh_global_lock); 2948 TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { 2949 pmap = PV_PMAP(pv); 2950 PMAP_LOCK(pmap); 2951 pte = pmap_pte(pmap, pv->pv_va); 2952 if (pte_test(pte, PTE_D)) { 2953 pte_clear(pte, PTE_D); 2954 pmap_update_page(pmap, pv->pv_va, *pte); 2955 } 2956 PMAP_UNLOCK(pmap); 2957 } 2958 rw_wunlock(&pvh_global_lock); 2959} 2960 2961/* 2962 * pmap_is_referenced: 2963 * 2964 * Return whether or not the specified physical page was referenced 2965 * in any physical maps. 2966 */ 2967boolean_t 2968pmap_is_referenced(vm_page_t m) 2969{ 2970 2971 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2972 ("pmap_is_referenced: page %p is not managed", m)); 2973 return ((m->md.pv_flags & PV_TABLE_REF) != 0); 2974} 2975 2976/* 2977 * pmap_clear_reference: 2978 * 2979 * Clear the reference bit on the specified physical page. 2980 */ 2981void 2982pmap_clear_reference(vm_page_t m) 2983{ 2984 2985 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2986 ("pmap_clear_reference: page %p is not managed", m)); 2987 rw_wlock(&pvh_global_lock); 2988 if (m->md.pv_flags & PV_TABLE_REF) { 2989 m->md.pv_flags &= ~PV_TABLE_REF; 2990 } 2991 rw_wunlock(&pvh_global_lock); 2992} 2993 2994/* 2995 * Miscellaneous support routines follow 2996 */ 2997 2998/* 2999 * Map a set of physical memory pages into the kernel virtual 3000 * address space. Return a pointer to where it is mapped. This 3001 * routine is intended to be used for mapping device memory, 3002 * NOT real memory. 3003 * 3004 * Use XKPHYS uncached for 64 bit, and KSEG1 where possible for 32 bit. 3005 */ 3006void * 3007pmap_mapdev(vm_paddr_t pa, vm_size_t size) 3008{ 3009 vm_offset_t va, tmpva, offset; 3010 3011 /* 3012 * KSEG1 maps only first 512M of phys address space. For 3013 * pa > 0x20000000 we should make proper mapping * using pmap_kenter. 3014 */ 3015 if (MIPS_DIRECT_MAPPABLE(pa + size - 1)) 3016 return ((void *)MIPS_PHYS_TO_DIRECT_UNCACHED(pa)); 3017 else { 3018 offset = pa & PAGE_MASK; 3019 size = roundup(size + offset, PAGE_SIZE); 3020 3021 va = kva_alloc(size); 3022 if (!va) 3023 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 3024 pa = trunc_page(pa); 3025 for (tmpva = va; size > 0;) { 3026 pmap_kenter_attr(tmpva, pa, PTE_C_UNCACHED); 3027 size -= PAGE_SIZE; 3028 tmpva += PAGE_SIZE; 3029 pa += PAGE_SIZE; 3030 } 3031 } 3032 3033 return ((void *)(va + offset)); 3034} 3035 3036void 3037pmap_unmapdev(vm_offset_t va, vm_size_t size) 3038{ 3039#ifndef __mips_n64 3040 vm_offset_t base, offset; 3041 3042 /* If the address is within KSEG1 then there is nothing to do */ 3043 if (va >= MIPS_KSEG1_START && va <= MIPS_KSEG1_END) 3044 return; 3045 3046 base = trunc_page(va); 3047 offset = va & PAGE_MASK; 3048 size = roundup(size + offset, PAGE_SIZE); 3049 kva_free(base, size); 3050#endif 3051} 3052 3053/* 3054 * perform the pmap work for mincore 3055 */ 3056int 3057pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) 3058{ 3059 pt_entry_t *ptep, pte; 3060 vm_paddr_t pa; 3061 vm_page_t m; 3062 int val; 3063 3064 PMAP_LOCK(pmap); 3065retry: 3066 ptep = pmap_pte(pmap, addr); 3067 pte = (ptep != NULL) ? *ptep : 0; 3068 if (!pte_test(&pte, PTE_V)) { 3069 val = 0; 3070 goto out; 3071 } 3072 val = MINCORE_INCORE; 3073 if (pte_test(&pte, PTE_D)) 3074 val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; 3075 pa = TLBLO_PTE_TO_PA(pte); 3076 if (pte_test(&pte, PTE_MANAGED)) { 3077 /* 3078 * This may falsely report the given address as 3079 * MINCORE_REFERENCED. Unfortunately, due to the lack of 3080 * per-PTE reference information, it is impossible to 3081 * determine if the address is MINCORE_REFERENCED. 3082 */ 3083 m = PHYS_TO_VM_PAGE(pa); 3084 if ((m->aflags & PGA_REFERENCED) != 0) 3085 val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; 3086 } 3087 if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != 3088 (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && 3089 pte_test(&pte, PTE_MANAGED)) { 3090 /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ 3091 if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) 3092 goto retry; 3093 } else 3094out: 3095 PA_UNLOCK_COND(*locked_pa); 3096 PMAP_UNLOCK(pmap); 3097 return (val); 3098} 3099 3100void 3101pmap_activate(struct thread *td) 3102{ 3103 pmap_t pmap, oldpmap; 3104 struct proc *p = td->td_proc; 3105 u_int cpuid; 3106 3107 critical_enter(); 3108 3109 pmap = vmspace_pmap(p->p_vmspace); 3110 oldpmap = PCPU_GET(curpmap); 3111 cpuid = PCPU_GET(cpuid); 3112 3113 if (oldpmap) 3114 CPU_CLR_ATOMIC(cpuid, &oldpmap->pm_active); 3115 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 3116 pmap_asid_alloc(pmap); 3117 if (td == curthread) { 3118 PCPU_SET(segbase, pmap->pm_segtab); 3119 mips_wr_entryhi(pmap->pm_asid[cpuid].asid); 3120 } 3121 3122 PCPU_SET(curpmap, pmap); 3123 critical_exit(); 3124} 3125 3126void 3127pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) 3128{ 3129} 3130 3131/* 3132 * Increase the starting virtual address of the given mapping if a 3133 * different alignment might result in more superpage mappings. 3134 */ 3135void 3136pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, 3137 vm_offset_t *addr, vm_size_t size) 3138{ 3139 vm_offset_t superpage_offset; 3140 3141 if (size < NBSEG) 3142 return; 3143 if (object != NULL && (object->flags & OBJ_COLORED) != 0) 3144 offset += ptoa(object->pg_color); 3145 superpage_offset = offset & SEGMASK; 3146 if (size - ((NBSEG - superpage_offset) & SEGMASK) < NBSEG || 3147 (*addr & SEGMASK) == superpage_offset) 3148 return; 3149 if ((*addr & SEGMASK) < superpage_offset) 3150 *addr = (*addr & ~SEGMASK) + superpage_offset; 3151 else 3152 *addr = ((*addr + SEGMASK) & ~SEGMASK) + superpage_offset; 3153} 3154 3155#ifdef DDB 3156DB_SHOW_COMMAND(ptable, ddb_pid_dump) 3157{ 3158 pmap_t pmap; 3159 struct thread *td = NULL; 3160 struct proc *p; 3161 int i, j, k; 3162 vm_paddr_t pa; 3163 vm_offset_t va; 3164 3165 if (have_addr) { 3166 td = db_lookup_thread(addr, TRUE); 3167 if (td == NULL) { 3168 db_printf("Invalid pid or tid"); 3169 return; 3170 } 3171 p = td->td_proc; 3172 if (p->p_vmspace == NULL) { 3173 db_printf("No vmspace for process"); 3174 return; 3175 } 3176 pmap = vmspace_pmap(p->p_vmspace); 3177 } else 3178 pmap = kernel_pmap; 3179 3180 db_printf("pmap:%p segtab:%p asid:%x generation:%x\n", 3181 pmap, pmap->pm_segtab, pmap->pm_asid[0].asid, 3182 pmap->pm_asid[0].gen); 3183 for (i = 0; i < NPDEPG; i++) { 3184 pd_entry_t *pdpe; 3185 pt_entry_t *pde; 3186 pt_entry_t pte; 3187 3188 pdpe = (pd_entry_t *)pmap->pm_segtab[i]; 3189 if (pdpe == NULL) 3190 continue; 3191 db_printf("[%4d] %p\n", i, pdpe); 3192#ifdef __mips_n64 3193 for (j = 0; j < NPDEPG; j++) { 3194 pde = (pt_entry_t *)pdpe[j]; 3195 if (pde == NULL) 3196 continue; 3197 db_printf("\t[%4d] %p\n", j, pde); 3198#else 3199 { 3200 j = 0; 3201 pde = (pt_entry_t *)pdpe; 3202#endif 3203 for (k = 0; k < NPTEPG; k++) { 3204 pte = pde[k]; 3205 if (pte == 0 || !pte_test(&pte, PTE_V)) 3206 continue; 3207 pa = TLBLO_PTE_TO_PA(pte); 3208 va = ((u_long)i << SEGSHIFT) | (j << PDRSHIFT) | (k << PAGE_SHIFT); 3209 db_printf("\t\t[%04d] va: %p pte: %8jx pa:%jx\n", 3210 k, (void *)va, (uintmax_t)pte, (uintmax_t)pa); 3211 } 3212 } 3213 } 3214} 3215#endif 3216 3217#if defined(DEBUG) 3218 3219static void pads(pmap_t pm); 3220void pmap_pvdump(vm_offset_t pa); 3221 3222/* print address space of pmap*/ 3223static void 3224pads(pmap_t pm) 3225{ 3226 unsigned va, i, j; 3227 pt_entry_t *ptep; 3228 3229 if (pm == kernel_pmap) 3230 return; 3231 for (i = 0; i < NPTEPG; i++) 3232 if (pm->pm_segtab[i]) 3233 for (j = 0; j < NPTEPG; j++) { 3234 va = (i << SEGSHIFT) + (j << PAGE_SHIFT); 3235 if (pm == kernel_pmap && va < KERNBASE) 3236 continue; 3237 if (pm != kernel_pmap && 3238 va >= VM_MAXUSER_ADDRESS) 3239 continue; 3240 ptep = pmap_pte(pm, va); 3241 if (pte_test(ptep, PTE_V)) 3242 printf("%x:%x ", va, *(int *)ptep); 3243 } 3244 3245} 3246 3247void 3248pmap_pvdump(vm_offset_t pa) 3249{ 3250 register pv_entry_t pv; 3251 vm_page_t m; 3252 3253 printf("pa %x", pa); 3254 m = PHYS_TO_VM_PAGE(pa); 3255 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3256 pv = TAILQ_NEXT(pv, pv_list)) { 3257 printf(" -> pmap %p, va %x", (void *)pv->pv_pmap, pv->pv_va); 3258 pads(pv->pv_pmap); 3259 } 3260 printf(" "); 3261} 3262 3263/* N/C */ 3264#endif 3265 3266 3267/* 3268 * Allocate TLB address space tag (called ASID or TLBPID) and return it. 3269 * It takes almost as much or more time to search the TLB for a 3270 * specific ASID and flush those entries as it does to flush the entire TLB. 3271 * Therefore, when we allocate a new ASID, we just take the next number. When 3272 * we run out of numbers, we flush the TLB, increment the generation count 3273 * and start over. ASID zero is reserved for kernel use. 3274 */ 3275static void 3276pmap_asid_alloc(pmap) 3277 pmap_t pmap; 3278{ 3279 if (pmap->pm_asid[PCPU_GET(cpuid)].asid != PMAP_ASID_RESERVED && 3280 pmap->pm_asid[PCPU_GET(cpuid)].gen == PCPU_GET(asid_generation)); 3281 else { 3282 if (PCPU_GET(next_asid) == pmap_max_asid) { 3283 tlb_invalidate_all_user(NULL); 3284 PCPU_SET(asid_generation, 3285 (PCPU_GET(asid_generation) + 1) & ASIDGEN_MASK); 3286 if (PCPU_GET(asid_generation) == 0) { 3287 PCPU_SET(asid_generation, 1); 3288 } 3289 PCPU_SET(next_asid, 1); /* 0 means invalid */ 3290 } 3291 pmap->pm_asid[PCPU_GET(cpuid)].asid = PCPU_GET(next_asid); 3292 pmap->pm_asid[PCPU_GET(cpuid)].gen = PCPU_GET(asid_generation); 3293 PCPU_SET(next_asid, PCPU_GET(next_asid) + 1); 3294 } 3295} 3296 3297static pt_entry_t 3298init_pte_prot(vm_page_t m, vm_prot_t access, vm_prot_t prot) 3299{ 3300 pt_entry_t rw; 3301 3302 if (!(prot & VM_PROT_WRITE)) 3303 rw = PTE_V | PTE_RO; 3304 else if ((m->oflags & VPO_UNMANAGED) == 0) { 3305 if ((access & VM_PROT_WRITE) != 0) 3306 rw = PTE_V | PTE_D; 3307 else 3308 rw = PTE_V; 3309 } else 3310 /* Needn't emulate a modified bit for unmanaged pages. */ 3311 rw = PTE_V | PTE_D; 3312 return (rw); 3313} 3314 3315/* 3316 * pmap_emulate_modified : do dirty bit emulation 3317 * 3318 * On SMP, update just the local TLB, other CPUs will update their 3319 * TLBs from PTE lazily, if they get the exception. 3320 * Returns 0 in case of sucess, 1 if the page is read only and we 3321 * need to fault. 3322 */ 3323int 3324pmap_emulate_modified(pmap_t pmap, vm_offset_t va) 3325{ 3326 pt_entry_t *pte; 3327 3328 PMAP_LOCK(pmap); 3329 pte = pmap_pte(pmap, va); 3330 if (pte == NULL) 3331 panic("pmap_emulate_modified: can't find PTE"); 3332#ifdef SMP 3333 /* It is possible that some other CPU changed m-bit */ 3334 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) { 3335 tlb_update(pmap, va, *pte); 3336 PMAP_UNLOCK(pmap); 3337 return (0); 3338 } 3339#else 3340 if (!pte_test(pte, PTE_V) || pte_test(pte, PTE_D)) 3341 panic("pmap_emulate_modified: invalid pte"); 3342#endif 3343 if (pte_test(pte, PTE_RO)) { 3344 PMAP_UNLOCK(pmap); 3345 return (1); 3346 } 3347 pte_set(pte, PTE_D); 3348 tlb_update(pmap, va, *pte); 3349 if (!pte_test(pte, PTE_MANAGED)) 3350 panic("pmap_emulate_modified: unmanaged page"); 3351 PMAP_UNLOCK(pmap); 3352 return (0); 3353} 3354 3355/* 3356 * Routine: pmap_kextract 3357 * Function: 3358 * Extract the physical page address associated 3359 * virtual address. 3360 */ 3361vm_paddr_t 3362pmap_kextract(vm_offset_t va) 3363{ 3364 int mapped; 3365 3366 /* 3367 * First, the direct-mapped regions. 3368 */ 3369#if defined(__mips_n64) 3370 if (va >= MIPS_XKPHYS_START && va < MIPS_XKPHYS_END) 3371 return (MIPS_XKPHYS_TO_PHYS(va)); 3372#endif 3373 if (va >= MIPS_KSEG0_START && va < MIPS_KSEG0_END) 3374 return (MIPS_KSEG0_TO_PHYS(va)); 3375 3376 if (va >= MIPS_KSEG1_START && va < MIPS_KSEG1_END) 3377 return (MIPS_KSEG1_TO_PHYS(va)); 3378 3379 /* 3380 * User virtual addresses. 3381 */ 3382 if (va < VM_MAXUSER_ADDRESS) { 3383 pt_entry_t *ptep; 3384 3385 if (curproc && curproc->p_vmspace) { 3386 ptep = pmap_pte(&curproc->p_vmspace->vm_pmap, va); 3387 if (ptep) { 3388 return (TLBLO_PTE_TO_PA(*ptep) | 3389 (va & PAGE_MASK)); 3390 } 3391 return (0); 3392 } 3393 } 3394 3395 /* 3396 * Should be kernel virtual here, otherwise fail 3397 */ 3398 mapped = (va >= MIPS_KSEG2_START || va < MIPS_KSEG2_END); 3399#if defined(__mips_n64) 3400 mapped = mapped || (va >= MIPS_XKSEG_START || va < MIPS_XKSEG_END); 3401#endif 3402 /* 3403 * Kernel virtual. 3404 */ 3405 3406 if (mapped) { 3407 pt_entry_t *ptep; 3408 3409 /* Is the kernel pmap initialized? */ 3410 if (!CPU_EMPTY(&kernel_pmap->pm_active)) { 3411 /* It's inside the virtual address range */ 3412 ptep = pmap_pte(kernel_pmap, va); 3413 if (ptep) { 3414 return (TLBLO_PTE_TO_PA(*ptep) | 3415 (va & PAGE_MASK)); 3416 } 3417 } 3418 return (0); 3419 } 3420 3421 panic("%s for unknown address space %p.", __func__, (void *)va); 3422} 3423 3424 3425void 3426pmap_flush_pvcache(vm_page_t m) 3427{ 3428 pv_entry_t pv; 3429 3430 if (m != NULL) { 3431 for (pv = TAILQ_FIRST(&m->md.pv_list); pv; 3432 pv = TAILQ_NEXT(pv, pv_list)) { 3433 mips_dcache_wbinv_range_index(pv->pv_va, PAGE_SIZE); 3434 } 3435 } 3436} 3437