pmap.c revision 73903
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 1998,2000 Doug Rabson 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp 45 * with some ideas from NetBSD's alpha pmap 46 * $FreeBSD: head/sys/ia64/ia64/pmap.c 73903 2001-03-07 01:04:17Z jhb $ 47 */ 48 49/* 50 * Manages physical address maps. 51 * 52 * In addition to hardware address maps, this 53 * module is called upon to provide software-use-only 54 * maps which may or may not be stored in the same 55 * form as hardware maps. These pseudo-maps are 56 * used to store intermediate results from copy 57 * operations to and from address spaces. 58 * 59 * Since the information managed by this module is 60 * also stored by the logical address mapping module, 61 * this module may throw away valid virtual-to-physical 62 * mappings at almost any time. However, invalidations 63 * of virtual-to-physical mappings must be done as 64 * requested. 65 * 66 * In order to cope with hardware architectures which 67 * make virtual-to-physical map invalidates expensive, 68 * this module may delay invalidate or reduced protection 69 * operations until such time as they are actually 70 * necessary. This module is given full information as 71 * to which processors are currently using which maps, 72 * and to when physical maps must be made correct. 73 */ 74 75/* 76 * Following the Linux model, region IDs are allocated in groups of 77 * eight so that a single region ID can be used for as many RRs as we 78 * want by encoding the RR number into the low bits of the ID. 79 * 80 * We reserve region ID 0 for the kernel and allocate the remaining 81 * IDs for user pmaps. 82 * 83 * Region 0..4 84 * User virtually mapped 85 * 86 * Region 5 87 * Kernel virtually mapped 88 * 89 * Region 6 90 * Kernel physically mapped uncacheable 91 * 92 * Region 7 93 * Kernel physically mapped cacheable 94 */ 95 96#include <sys/param.h> 97#include <sys/systm.h> 98#include <sys/proc.h> 99#include <sys/msgbuf.h> 100#include <sys/vmmeter.h> 101#include <sys/mman.h> 102#include <sys/malloc.h> 103#include <sys/kernel.h> 104 105#include <vm/vm.h> 106#include <vm/vm_param.h> 107#include <sys/lock.h> 108#include <vm/vm_kern.h> 109#include <vm/vm_page.h> 110#include <vm/vm_map.h> 111#include <vm/vm_object.h> 112#include <vm/vm_extern.h> 113#include <vm/vm_pageout.h> 114#include <vm/vm_pager.h> 115#include <vm/vm_zone.h> 116 117#include <sys/user.h> 118 119#include <machine/md_var.h> 120 121MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures"); 122 123#ifndef PMAP_SHPGPERPROC 124#define PMAP_SHPGPERPROC 200 125#endif 126 127#if defined(DIAGNOSTIC) 128#define PMAP_DIAGNOSTIC 129#endif 130 131#define MINPV 2048 132 133#if 0 134#define PMAP_DIAGNOSTIC 135#define PMAP_DEBUG 136#endif 137 138#if !defined(PMAP_DIAGNOSTIC) 139#define PMAP_INLINE __inline 140#else 141#define PMAP_INLINE 142#endif 143 144#if 0 145 146static void 147pmap_break(void) 148{ 149} 150 151/* #define PMAP_DEBUG_VA(va) if ((va) == 0x120058000) pmap_break(); else */ 152 153#endif 154 155#ifndef PMAP_DEBUG_VA 156#define PMAP_DEBUG_VA(va) do {} while(0) 157#endif 158 159/* 160 * Get PDEs and PTEs for user/kernel address space 161 */ 162#define pmap_pte_w(pte) ((pte)->pte_ig & PTE_IG_WIRED) 163#define pmap_pte_managed(pte) ((pte)->pte_ig & PTE_IG_MANAGED) 164#define pmap_pte_v(pte) ((pte)->pte_p) 165#define pmap_pte_pa(pte) (((pte)->pte_ppn) << 12) 166#define pmap_pte_prot(pte) (((pte)->pte_ar << 2) | (pte)->pte_pl) 167 168#define pmap_pte_set_w(pte, v) ((v)?((pte)->pte_ig |= PTE_IG_WIRED) \ 169 :((pte)->pte_ig &= ~PTE_IG_WIRED)) 170#define pmap_pte_set_prot(pte, v) do { \ 171 (pte)->pte_ar = v >> 2; \ 172 (pte)->pte_pl = v & 3; \ 173} while (0) 174 175/* 176 * Given a map and a machine independent protection code, 177 * convert to an ia64 protection code. 178 */ 179#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) 180int protection_codes[2][8]; 181 182/* 183 * Return non-zero if this pmap is currently active 184 */ 185#define pmap_isactive(pmap) (pmap->pm_active) 186 187/* 188 * Statically allocated kernel pmap 189 */ 190static struct pmap kernel_pmap_store; 191pmap_t kernel_pmap; 192 193vm_offset_t avail_start; /* PA of first available physical page */ 194vm_offset_t avail_end; /* PA of last available physical page */ 195vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 196vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 197static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 198 199 200vm_offset_t kernel_vm_end; 201 202/* 203 * Values for ptc.e. XXX values for SKI. 204 */ 205static u_int64_t pmap_pte_e_base = 0x100000000; 206static u_int64_t pmap_pte_e_count1 = 3; 207static u_int64_t pmap_pte_e_count2 = 2; 208static u_int64_t pmap_pte_e_stride1 = 0x2000; 209static u_int64_t pmap_pte_e_stride2 = 0x100000000; 210 211/* 212 * Data for the RID allocator 213 */ 214static int pmap_nextrid; 215static int pmap_ridbits = 18; 216 217/* 218 * Data for the pv entry allocation mechanism 219 */ 220static vm_zone_t pvzone; 221static struct vm_zone pvzone_store; 222static struct vm_object pvzone_obj; 223static vm_zone_t pvbootzone; 224static struct vm_zone pvbootzone_store; 225static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 226static int pmap_pagedaemon_waken = 0; 227static struct pv_entry *pvinit; 228static struct pv_entry *pvbootinit; 229 230static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv)); 231static pv_entry_t get_pv_entry __P((void)); 232static void ia64_protection_init __P((void)); 233 234static void pmap_remove_all __P((vm_page_t m)); 235static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_page_t m)); 236 237vm_offset_t 238pmap_steal_memory(vm_size_t size) 239{ 240 vm_size_t bank_size; 241 vm_offset_t pa, va; 242 243 size = round_page(size); 244 245 bank_size = phys_avail[1] - phys_avail[0]; 246 while (size > bank_size) { 247 int i; 248 for (i = 0; phys_avail[i+2]; i+= 2) { 249 phys_avail[i] = phys_avail[i+2]; 250 phys_avail[i+1] = phys_avail[i+3]; 251 } 252 phys_avail[i] = 0; 253 phys_avail[i+1] = 0; 254 if (!phys_avail[0]) 255 panic("pmap_steal_memory: out of memory"); 256 bank_size = phys_avail[1] - phys_avail[0]; 257 } 258 259 pa = phys_avail[0]; 260 phys_avail[0] += size; 261 262 va = IA64_PHYS_TO_RR7(pa); 263 bzero((caddr_t) va, size); 264 return va; 265} 266 267/* 268 * Bootstrap the system enough to run with virtual memory. 269 */ 270void 271pmap_bootstrap() 272{ 273 int i; 274 int boot_pvs; 275 276 /* 277 * Setup RIDs. We use the bits above pmap_ridbits for a 278 * generation counter, saving generation zero for 279 * 'invalid'. RIDs 0..7 are reserved for the kernel. 280 */ 281 pmap_nextrid = (1 << pmap_ridbits) + 8; 282 283 avail_start = phys_avail[0]; 284 for (i = 0; phys_avail[i+2]; i+= 2) ; 285 avail_end = phys_avail[i+1]; 286 287 virtual_avail = IA64_RR_BASE(5); 288 virtual_end = IA64_RR_BASE(6)-1; 289 290 /* 291 * Initialize protection array. 292 */ 293 ia64_protection_init(); 294 295 /* 296 * The kernel's pmap is statically allocated so we don't have to use 297 * pmap_create, which is unlikely to work correctly at this part of 298 * the boot sequence (XXX and which no longer exists). 299 */ 300 kernel_pmap = &kernel_pmap_store; 301 kernel_pmap->pm_rid = 0; 302 kernel_pmap->pm_count = 1; 303 kernel_pmap->pm_active = 1; 304 TAILQ_INIT(&kernel_pmap->pm_pvlist); 305 306 /* 307 * Region 5 is mapped via the vhpt. 308 */ 309 ia64_set_rr(IA64_RR_BASE(5), 310 (5 << 8) | (PAGE_SHIFT << 2) | 1); 311 312 /* 313 * Region 6 is direct mapped UC and region 7 is direct mapped 314 * WC. The details of this is controlled by the Alt {I,D}TLB 315 * handlers. Here we just make sure that they have the largest 316 * possible page size to minimise TLB usage. 317 */ 318 ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2)); 319 ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); 320 321 /* 322 * We need some PVs to cope with pmap_kenter() calls prior to 323 * pmap_init(). This is all a bit flaky and needs to be 324 * rethought, probably by avoiding the zone allocator 325 * entirely. 326 */ 327 boot_pvs = 32768; 328 pvbootzone = &pvbootzone_store; 329 pvbootinit = (struct pv_entry *) 330 pmap_steal_memory(boot_pvs * sizeof (struct pv_entry)); 331 zbootinit(pvbootzone, "PV ENTRY", sizeof (struct pv_entry), 332 pvbootinit, boot_pvs); 333 334 /* 335 * Set up proc0's PCB. 336 */ 337#if 0 338 proc0.p_addr->u_pcb.pcb_hw.apcb_asn = 0; 339#endif 340} 341 342/* 343 * Initialize the pmap module. 344 * Called by vm_init, to initialize any structures that the pmap 345 * system needs to map virtual memory. 346 * pmap_init has been enhanced to support in a fairly consistant 347 * way, discontiguous physical memory. 348 */ 349void 350pmap_init(phys_start, phys_end) 351 vm_offset_t phys_start, phys_end; 352{ 353 int i; 354 int initial_pvs; 355 356 /* 357 * Allocate memory for random pmap data structures. Includes the 358 * pv_head_table. 359 */ 360 361 for(i = 0; i < vm_page_array_size; i++) { 362 vm_page_t m; 363 364 m = &vm_page_array[i]; 365 TAILQ_INIT(&m->md.pv_list); 366 m->md.pv_list_count = 0; 367 } 368 369 /* 370 * init the pv free list 371 */ 372 initial_pvs = vm_page_array_size; 373 if (initial_pvs < MINPV) 374 initial_pvs = MINPV; 375 pvzone = &pvzone_store; 376 pvinit = (struct pv_entry *) kmem_alloc(kernel_map, 377 initial_pvs * sizeof (struct pv_entry)); 378 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, 379 vm_page_array_size); 380 381 /* 382 * Now it is safe to enable pv_table recording. 383 */ 384 pmap_initialized = TRUE; 385} 386 387/* 388 * Initialize the address space (zone) for the pv_entries. Set a 389 * high water mark so that the system can recover from excessive 390 * numbers of pv entries. 391 */ 392void 393pmap_init2() 394{ 395 pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size; 396 pv_entry_high_water = 9 * (pv_entry_max / 10); 397 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); 398} 399 400 401/*************************************************** 402 * Manipulate TLBs for a pmap 403 ***************************************************/ 404 405static void 406pmap_invalidate_rid(pmap_t pmap) 407{ 408 KASSERT(pmap != kernel_pmap, 409 ("changing kernel_pmap's RID")); 410 KASSERT(pmap == PCPU_GET(current_pmap), 411 ("invalidating RID of non-current pmap")); 412 pmap_remove_pages(pmap, IA64_RR_BASE(0), IA64_RR_BASE(5)); 413 pmap->pm_rid = 0; 414} 415 416static void 417pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 418{ 419 KASSERT(pmap == PCPU_GET(current_pmap), 420 ("invalidating TLB for non-current pmap")); 421 ia64_ptc_l(va, PAGE_SHIFT << 2); 422} 423 424static void 425pmap_invalidate_all(pmap_t pmap) 426{ 427 u_int64_t addr; 428 int i, j; 429 u_int32_t psr; 430 431 KASSERT(pmap == PCPU_GET(current_pmap), 432 ("invalidating TLB for non-current pmap")); 433 434 psr = save_intr(); 435 disable_intr(); 436 addr = pmap_pte_e_base; 437 for (i = 0; i < pmap_pte_e_count1; i++) { 438 for (j = 0; j < pmap_pte_e_count2; j++) { 439 ia64_ptc_e(addr); 440 addr += pmap_pte_e_stride2; 441 } 442 addr += pmap_pte_e_stride1; 443 } 444 restore_intr(psr); 445} 446 447static void 448pmap_get_rid(pmap_t pmap) 449{ 450 if ((pmap_nextrid & ((1 << pmap_ridbits) - 1)) == 0) { 451 /* 452 * Start a new ASN generation. 453 * 454 * Invalidate all per-process mappings and I-cache 455 */ 456 pmap_nextrid += 8; 457 458 /* 459 * Since we are about to start re-using ASNs, we must 460 * clear out the TLB. 461 * with the ASN. 462 */ 463#if 0 464 IA64_TBIAP(); 465 ia64_pal_imb(); /* XXX overkill? */ 466#endif 467 } 468 pmap->pm_rid = pmap_nextrid; 469 pmap_nextrid += 8; 470} 471 472/*************************************************** 473 * Low level helper routines..... 474 ***************************************************/ 475 476/* 477 * Install a pte into the VHPT 478 */ 479static PMAP_INLINE void 480pmap_install_pte(struct ia64_lpte *vhpte, struct ia64_lpte *pte) 481{ 482 u_int64_t *vhp, *p; 483 484 /* invalidate the pte */ 485 atomic_set_64(&vhpte->pte_tag, 1L << 63); 486 ia64_mf(); /* make sure everyone sees */ 487 488 vhp = (u_int64_t *) vhpte; 489 p = (u_int64_t *) pte; 490 491 vhp[0] = p[0]; 492 vhp[1] = p[1]; 493 vhp[2] = p[2]; /* sets ti to one */ 494 495 ia64_mf(); 496} 497 498/* 499 * Compare essential parts of pte. 500 */ 501static PMAP_INLINE int 502pmap_equal_pte(struct ia64_lpte *pte1, struct ia64_lpte *pte2) 503{ 504 return *(u_int64_t *) pte1 == *(u_int64_t *) pte2; 505} 506 507/* 508 * this routine defines the region(s) of memory that should 509 * not be tested for the modified bit. 510 */ 511static PMAP_INLINE int 512pmap_track_modified(vm_offset_t va) 513{ 514 if ((va < clean_sva) || (va >= clean_eva)) 515 return 1; 516 else 517 return 0; 518} 519 520/* 521 * Create the UPAGES for a new process. 522 * This routine directly affects the fork perf for a process. 523 */ 524void 525pmap_new_proc(struct proc *p) 526{ 527 struct user *up; 528 529 /* 530 * Use contigmalloc for user area so that we can use a region 531 * 7 address for it which makes it impossible to accidentally 532 * lose when recording a trapframe. 533 */ 534 up = contigmalloc(UPAGES * PAGE_SIZE, M_PMAP, 535 M_WAITOK, 536 0ul, 537 256*1024*1024 - 1, 538 PAGE_SIZE, 539 256*1024*1024); 540 541 p->p_md.md_uservirt = up; 542 p->p_addr = (struct user *) 543 IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t) up)); 544} 545 546/* 547 * Dispose the UPAGES for a process that has exited. 548 * This routine directly impacts the exit perf of a process. 549 */ 550void 551pmap_dispose_proc(p) 552 struct proc *p; 553{ 554 contigfree(p->p_md.md_uservirt, UPAGES * PAGE_SIZE, M_PMAP); 555 p->p_md.md_uservirt = 0; 556 p->p_addr = 0; 557} 558 559/* 560 * Allow the UPAGES for a process to be prejudicially paged out. 561 */ 562void 563pmap_swapout_proc(p) 564 struct proc *p; 565{ 566#if 0 567 int i; 568 vm_object_t upobj; 569 vm_page_t m; 570 571 /* 572 * Make sure we aren't fpcurproc. 573 */ 574 ia64_fpstate_save(p, 1); 575 576 upobj = p->p_upages_obj; 577 /* 578 * let the upages be paged 579 */ 580 for(i=0;i<UPAGES;i++) { 581 if ((m = vm_page_lookup(upobj, i)) == NULL) 582 panic("pmap_swapout_proc: upage already missing???"); 583 vm_page_dirty(m); 584 vm_page_unwire(m, 0); 585 pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i); 586 } 587#endif 588} 589 590/* 591 * Bring the UPAGES for a specified process back in. 592 */ 593void 594pmap_swapin_proc(p) 595 struct proc *p; 596{ 597#if 0 598 int i,rv; 599 vm_object_t upobj; 600 vm_page_t m; 601 602 upobj = p->p_upages_obj; 603 for(i=0;i<UPAGES;i++) { 604 605 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 606 607 pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE, 608 VM_PAGE_TO_PHYS(m)); 609 610 if (m->valid != VM_PAGE_BITS_ALL) { 611 rv = vm_pager_get_pages(upobj, &m, 1, 0); 612 if (rv != VM_PAGER_OK) 613 panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid); 614 m = vm_page_lookup(upobj, i); 615 m->valid = VM_PAGE_BITS_ALL; 616 } 617 618 vm_page_wire(m); 619 vm_page_wakeup(m); 620 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 621 } 622#endif 623} 624 625/*************************************************** 626 * Page table page management routines..... 627 ***************************************************/ 628 629void 630pmap_pinit0(pmap) 631 struct pmap *pmap; 632{ 633 /* 634 * kernel_pmap is the same as any other pmap. 635 */ 636 pmap_pinit(pmap); 637 pmap->pm_flags = 0; 638 pmap->pm_rid = 0; 639 pmap->pm_count = 1; 640 pmap->pm_ptphint = NULL; 641 pmap->pm_active = 0; 642 TAILQ_INIT(&pmap->pm_pvlist); 643 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 644} 645 646/* 647 * Initialize a preallocated and zeroed pmap structure, 648 * such as one in a vmspace structure. 649 */ 650void 651pmap_pinit(pmap) 652 register struct pmap *pmap; 653{ 654 pmap->pm_flags = 0; 655 pmap->pm_rid = 0; 656 pmap->pm_count = 1; 657 pmap->pm_ptphint = NULL; 658 pmap->pm_active = 0; 659 TAILQ_INIT(&pmap->pm_pvlist); 660 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 661} 662 663/* 664 * Wire in kernel global address entries. To avoid a race condition 665 * between pmap initialization and pmap_growkernel, this procedure 666 * should be called after the vmspace is attached to the process 667 * but before this pmap is activated. 668 */ 669void 670pmap_pinit2(pmap) 671 struct pmap *pmap; 672{ 673} 674 675/*************************************************** 676* Pmap allocation/deallocation routines. 677 ***************************************************/ 678 679/* 680 * Release any resources held by the given physical map. 681 * Called when a pmap initialized by pmap_pinit is being released. 682 * Should only be called if the map contains no valid mappings. 683 */ 684void 685pmap_release(pmap_t pmap) 686{ 687#if defined(DIAGNOSTIC) 688 if (object->ref_count != 1) 689 panic("pmap_release: pteobj reference count != 1"); 690#endif 691} 692 693/* 694 * grow the number of kernel page table entries, if needed 695 */ 696void 697pmap_growkernel(vm_offset_t addr) 698{ 699} 700 701/* 702 * Retire the given physical map from service. 703 * Should only be called if the map contains 704 * no valid mappings. 705 */ 706void 707pmap_destroy(pmap_t pmap) 708{ 709 int count; 710 711 if (pmap == NULL) 712 return; 713 714 count = --pmap->pm_count; 715 if (count == 0) { 716 pmap_release(pmap); 717 panic("destroying a pmap is not yet implemented"); 718 } 719} 720 721/* 722 * Add a reference to the specified pmap. 723 */ 724void 725pmap_reference(pmap_t pmap) 726{ 727 if (pmap != NULL) { 728 pmap->pm_count++; 729 } 730} 731 732/*************************************************** 733* page management routines. 734 ***************************************************/ 735 736/* 737 * free the pv_entry back to the free list 738 */ 739static PMAP_INLINE void 740free_pv_entry(pv_entry_t pv) 741{ 742 pv_entry_count--; 743 zfree(pvzone, pv); 744} 745 746/* 747 * get a new pv_entry, allocating a block from the system 748 * when needed. 749 * the memory allocation is performed bypassing the malloc code 750 * because of the possibility of allocations at interrupt time. 751 */ 752static pv_entry_t 753get_pv_entry(void) 754{ 755 if (!pvinit) 756 return zalloc(pvbootzone); 757 758 pv_entry_count++; 759 if (pv_entry_high_water && 760 (pv_entry_count > pv_entry_high_water) && 761 (pmap_pagedaemon_waken == 0)) { 762 pmap_pagedaemon_waken = 1; 763 wakeup (&vm_pages_needed); 764 } 765 return (pv_entry_t) IA64_PHYS_TO_RR7(vtophys(zalloc(pvzone))); 766} 767 768/* 769 * Add a pv_entry to the VHPT. 770 */ 771static void 772pmap_enter_vhpt(pv_entry_t pv) 773{ 774 struct ia64_lpte *vhpte; 775 776 vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va); 777 778 pv->pv_pte.pte_chain = vhpte->pte_chain; 779 vhpte->pte_chain = ia64_tpa((vm_offset_t) pv); 780 781 if (!vhpte->pte_p && pv->pv_pte.pte_p) 782 pmap_install_pte(vhpte, &pv->pv_pte); 783 else 784 ia64_mf(); 785} 786 787/* 788 * Update VHPT after pv->pv_pte has changed. 789 */ 790static void 791pmap_update_vhpt(pv_entry_t pv) 792{ 793 struct ia64_lpte *vhpte; 794 795 vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va); 796 797 if ((!vhpte->pte_p || vhpte->pte_tag == pv->pv_pte.pte_tag) 798 && pv->pv_pte.pte_p) 799 pmap_install_pte(vhpte, &pv->pv_pte); 800} 801 802/* 803 * Remove a pv_entry from the VHPT. Return true if it worked. 804 */ 805static int 806pmap_remove_vhpt(pv_entry_t pv) 807{ 808 struct ia64_lpte *pte; 809 struct ia64_lpte *lpte; 810 struct ia64_lpte *vhpte; 811 u_int64_t tag; 812 813 vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va); 814 815 /* 816 * If the VHPTE is invalid, there can't be a collision chain. 817 */ 818 if (!vhpte->pte_p) { 819 KASSERT(!vhpte->pte_chain, ("bad vhpte")); 820 return 0; 821 } 822 823 lpte = vhpte; 824 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(vhpte->pte_chain); 825 tag = ia64_ttag(pv->pv_va); 826 827 while (pte->pte_tag != tag) { 828 lpte = pte; 829 if (pte->pte_chain) 830 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); 831 else 832 return 0; /* error here? */ 833 } 834 835 /* 836 * Snip this pv_entry out of the collision chain. 837 */ 838 lpte->pte_chain = pte->pte_chain; 839 840 /* 841 * If the VHPTE matches as well, change it to map the first 842 * element from the chain if there is one. 843 */ 844 if (vhpte->pte_tag == tag) { 845 if (vhpte->pte_chain) { 846 pte = (struct ia64_lpte *) 847 IA64_PHYS_TO_RR7(vhpte->pte_chain); 848 pmap_install_pte(vhpte, pte); 849 } else { 850 vhpte->pte_p = 0; 851 ia64_mf(); 852 } 853 } 854 855 return 1; 856} 857 858/* 859 * Make a pv_entry_t which maps the given virtual address. The pte 860 * will be initialised with pte_p = 0. The function pmap_set_pv() 861 * should be called to change the value of the pte. 862 * Must be called at splvm(). 863 */ 864static pv_entry_t 865pmap_make_pv(pmap_t pmap, vm_offset_t va) 866{ 867 pv_entry_t pv; 868 869 pv = get_pv_entry(); 870 bzero(pv, sizeof(*pv)); 871 pv->pv_va = va; 872 pv->pv_pmap = pmap; 873 874 pv->pv_pte.pte_p = 0; /* invalid for now */ 875 pv->pv_pte.pte_ma = PTE_MA_WB; /* cacheable, write-back */ 876 pv->pv_pte.pte_a = 0; 877 pv->pv_pte.pte_d = 0; 878 pv->pv_pte.pte_pl = 0; /* privilege level 0 */ 879 pv->pv_pte.pte_ar = 3; /* read/write/execute */ 880 pv->pv_pte.pte_ppn = 0; /* physical address */ 881 pv->pv_pte.pte_ed = 0; 882 pv->pv_pte.pte_ig = 0; 883 884 pv->pv_pte.pte_ps = PAGE_SHIFT; /* page size */ 885 pv->pv_pte.pte_key = 0; /* protection key */ 886 887 pv->pv_pte.pte_tag = ia64_ttag(va); 888 889 pmap_enter_vhpt(pv); 890 891 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 892 pmap->pm_stats.resident_count++; 893 894 return pv; 895} 896 897/* 898 * Initialise a pv_entry_t with a given physical address and 899 * protection code. If the passed vm_page_t is non-zero, the entry is 900 * added to its list of mappings. 901 * Must be called at splvm(). 902 */ 903static void 904pmap_set_pv(pmap_t pmap, pv_entry_t pv, vm_offset_t pa, 905 int prot, vm_page_t m) 906{ 907 if (pv->pv_pte.pte_p && pv->pv_pte.pte_ig & PTE_IG_MANAGED) { 908 vm_offset_t opa = pv->pv_pte.pte_ppn << 12; 909 vm_page_t om = PHYS_TO_VM_PAGE(opa); 910 911 TAILQ_REMOVE(&om->md.pv_list, pv, pv_list); 912 om->md.pv_list_count--; 913 914 if (TAILQ_FIRST(&om->md.pv_list) == NULL) 915 vm_page_flag_clear(om, PG_MAPPED | PG_WRITEABLE); 916 } 917 918 pv->pv_pte.pte_p = 1; /* set to valid */ 919 920 /* 921 * Only track access/modify for managed pages. 922 */ 923 if (m) { 924 pv->pv_pte.pte_a = 0; 925 pv->pv_pte.pte_d = 0; 926 } else { 927 pv->pv_pte.pte_a = 1; 928 pv->pv_pte.pte_d = 1; 929 } 930 931 pv->pv_pte.pte_pl = prot & 3; /* privilege level */ 932 pv->pv_pte.pte_ar = prot >> 2; /* access rights */ 933 pv->pv_pte.pte_ppn = pa >> 12; /* physical address */ 934 935 if (m) { 936 pv->pv_pte.pte_ig |= PTE_IG_MANAGED; 937 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 938 m->md.pv_list_count++; 939 } 940 941 /* 942 * Update the VHPT entry if it needs to change. 943 */ 944 pmap_update_vhpt(pv); 945} 946 947/* 948 * Remove a mapping represented by a particular pv_entry_t. If the 949 * passed vm_page_t is non-zero, then the entry is removed from it. 950 * Must be called at splvm(). 951 */ 952static int 953pmap_remove_pv(pmap_t pmap, pv_entry_t pv, vm_page_t m) 954{ 955 int rtval; 956 957 /* 958 * First remove from the VHPT. 959 */ 960 rtval = pmap_remove_vhpt(pv); 961 if (!rtval) 962 return rtval; 963 964 if ((pv->pv_pte.pte_ig & PTE_IG_MANAGED) && m) { 965 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 966 m->md.pv_list_count--; 967 968 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 969 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); 970 } 971 972 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 973 pmap->pm_stats.resident_count--; 974 975 free_pv_entry(pv); 976 977 return (rtval); 978} 979 980/* 981 * Find a pv given a pmap and virtual address. 982 */ 983static pv_entry_t 984pmap_find_pv(pmap_t pmap, vm_offset_t va) 985{ 986 struct ia64_lpte *pte; 987 u_int64_t tag; 988 989 pte = (struct ia64_lpte *) ia64_thash(va); 990 if (!pte->pte_chain) 991 return 0; 992 993 tag = ia64_ttag(va); 994 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); 995 996 while (pte->pte_tag != tag) { 997 if (pte->pte_chain) 998 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); 999 else 1000 return 0; 1001 } 1002 1003 return (pv_entry_t) pte; /* XXX wrong va */ 1004} 1005 1006/* 1007 * Routine: pmap_extract 1008 * Function: 1009 * Extract the physical page address associated 1010 * with the given map/virtual_address pair. 1011 */ 1012vm_offset_t 1013pmap_extract(pmap, va) 1014 register pmap_t pmap; 1015 vm_offset_t va; 1016{ 1017 pv_entry_t pv = pmap_find_pv(pmap, va); 1018 if (pv) 1019 return pmap_pte_pa(&pv->pv_pte); 1020 else 1021 return 0; 1022} 1023 1024/*************************************************** 1025 * Low level mapping routines..... 1026 ***************************************************/ 1027 1028/* 1029 * Add a list of wired pages to the kva 1030 * this routine is only used for temporary 1031 * kernel mappings that do not need to have 1032 * page modification or references recorded. 1033 * Note that old mappings are simply written 1034 * over. The page *must* be wired. 1035 */ 1036void 1037pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1038{ 1039 int i, inval; 1040 pv_entry_t pv; 1041 1042 for (i = 0; i < count; i++) { 1043 vm_offset_t tva = va + i * PAGE_SIZE; 1044 pv = pmap_find_pv(kernel_pmap, tva); 1045 inval = 0; 1046 if (!pv) 1047 pv = pmap_make_pv(kernel_pmap, tva); 1048 else 1049 inval = 1; 1050 1051 PMAP_DEBUG_VA(va); 1052 pmap_set_pv(kernel_pmap, pv, 1053 VM_PAGE_TO_PHYS(m[i]), 1054 (PTE_AR_RWX<<2) | PTE_PL_KERN, 0); 1055 if (inval) 1056 pmap_invalidate_page(kernel_pmap, tva); 1057 } 1058} 1059 1060/* 1061 * this routine jerks page mappings from the 1062 * kernel -- it is meant only for temporary mappings. 1063 */ 1064void 1065pmap_qremove(va, count) 1066 vm_offset_t va; 1067 int count; 1068{ 1069 int i; 1070 pv_entry_t pv; 1071 1072 for (i = 0; i < count; i++) { 1073 pv = pmap_find_pv(kernel_pmap, va); 1074 PMAP_DEBUG_VA(va); 1075 if (pv) { 1076 pmap_remove_pv(kernel_pmap, pv, 0); 1077 pmap_invalidate_page(kernel_pmap, va); 1078 } 1079 va += PAGE_SIZE; 1080 } 1081} 1082 1083/* 1084 * Add a wired page to the kva. 1085 */ 1086void 1087pmap_kenter(vm_offset_t va, vm_offset_t pa) 1088{ 1089 pv_entry_t pv; 1090 1091 pv = pmap_find_pv(kernel_pmap, va); 1092 if (!pv) 1093 pv = pmap_make_pv(kernel_pmap, va); 1094 pmap_set_pv(kernel_pmap, pv, 1095 pa, (PTE_AR_RWX<<2) | PTE_PL_KERN, 0); 1096 pmap_invalidate_page(kernel_pmap, va); 1097} 1098 1099/* 1100 * Remove a page from the kva 1101 */ 1102void 1103pmap_kremove(vm_offset_t va) 1104{ 1105 pv_entry_t pv; 1106 1107 pv = pmap_find_pv(kernel_pmap, va); 1108 if (pv) { 1109 pmap_remove_pv(kernel_pmap, pv, 0); 1110 pmap_invalidate_page(kernel_pmap, va); 1111 } 1112} 1113 1114/* 1115 * Used to map a range of physical addresses into kernel 1116 * virtual address space. 1117 * 1118 * For now, VM is already on, we only need to map the 1119 * specified memory. 1120 */ 1121vm_offset_t 1122pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot) 1123{ 1124 /* 1125 * XXX We should really try to use larger pagesizes here to 1126 * cut down the number of PVs used. 1127 */ 1128 while (start < end) { 1129 pmap_kenter(virt, start); 1130 virt += PAGE_SIZE; 1131 start += PAGE_SIZE; 1132 } 1133 return (virt); 1134} 1135 1136/* 1137 * This routine is very drastic, but can save the system 1138 * in a pinch. 1139 */ 1140void 1141pmap_collect() 1142{ 1143 int i; 1144 vm_page_t m; 1145 static int warningdone=0; 1146 1147 if (pmap_pagedaemon_waken == 0) 1148 return; 1149 1150 if (warningdone < 5) { 1151 printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 1152 warningdone++; 1153 } 1154 1155 for(i = 0; i < vm_page_array_size; i++) { 1156 m = &vm_page_array[i]; 1157 if (m->wire_count || m->hold_count || m->busy || 1158 (m->flags & PG_BUSY)) 1159 continue; 1160 pmap_remove_all(m); 1161 } 1162 pmap_pagedaemon_waken = 0; 1163} 1164 1165/* 1166 * Remove a single page from a process address space 1167 */ 1168static void 1169pmap_remove_page(pmap_t pmap, vm_offset_t va) 1170{ 1171 pv_entry_t pv; 1172 vm_page_t m; 1173 int rtval; 1174 int s; 1175 1176 s = splvm(); 1177 1178 pv = pmap_find_pv(pmap, va); 1179 1180 rtval = 0; 1181 if (pv) { 1182 m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1183 rtval = pmap_remove_pv(pmap, pv, m); 1184 pmap_invalidate_page(pmap, va); 1185 } 1186 1187 splx(s); 1188 return; 1189} 1190 1191/* 1192 * Remove the given range of addresses from the specified map. 1193 * 1194 * It is assumed that the start and end are properly 1195 * rounded to the page size. 1196 */ 1197void 1198pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1199{ 1200 vm_offset_t va, nva; 1201 1202 if (pmap == NULL) 1203 return; 1204 1205 if (pmap->pm_stats.resident_count == 0) 1206 return; 1207 1208 /* 1209 * special handling of removing one page. a very 1210 * common operation and easy to short circuit some 1211 * code. 1212 */ 1213 if (sva + PAGE_SIZE == eva) { 1214 pmap_remove_page(pmap, sva); 1215 return; 1216 } 1217 1218 if (atop(eva - sva) < pmap->pm_stats.resident_count) { 1219 for (va = sva; va < eva; va = nva) { 1220 pmap_remove_page(pmap, va); 1221 nva = va + PAGE_SIZE; 1222 } 1223 } else { 1224 pv_entry_t pv, pvnext; 1225 int s; 1226 1227 s = splvm(); 1228 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); 1229 pv; 1230 pv = pvnext) { 1231 pvnext = TAILQ_NEXT(pv, pv_plist); 1232 if (pv->pv_va >= sva && pv->pv_va < eva) { 1233 vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1234 va = pv->pv_va; 1235 pmap_remove_pv(pmap, pv, m); 1236 pmap_invalidate_page(pmap, va); 1237 } 1238 } 1239 splx(s); 1240 } 1241} 1242 1243/* 1244 * Routine: pmap_remove_all 1245 * Function: 1246 * Removes this physical page from 1247 * all physical maps in which it resides. 1248 * Reflects back modify bits to the pager. 1249 * 1250 * Notes: 1251 * Original versions of this routine were very 1252 * inefficient because they iteratively called 1253 * pmap_remove (slow...) 1254 */ 1255 1256static void 1257pmap_remove_all(vm_page_t m) 1258{ 1259 register pv_entry_t pv; 1260 int nmodify; 1261 int s; 1262 1263 nmodify = 0; 1264#if defined(PMAP_DIAGNOSTIC) 1265 /* 1266 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1267 * pages! 1268 */ 1269 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { 1270 panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); 1271 } 1272#endif 1273 1274 s = splvm(); 1275 1276 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1277 vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1278 vm_offset_t va = pv->pv_va; 1279 pmap_remove_pv(pv->pv_pmap, pv, m); 1280 pmap_invalidate_page(pv->pv_pmap, va); 1281 } 1282 1283 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); 1284 1285 splx(s); 1286 return; 1287} 1288 1289/* 1290 * Set the physical protection on the 1291 * specified range of this map as requested. 1292 */ 1293void 1294pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1295{ 1296 pmap_t oldpmap; 1297 pv_entry_t pv; 1298 int newprot; 1299 1300 if (pmap == NULL) 1301 return; 1302 1303 oldpmap = pmap_install(pmap); 1304 1305 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1306 pmap_remove(pmap, sva, eva); 1307 pmap_install(oldpmap); 1308 return; 1309 } 1310 1311 if (prot & VM_PROT_WRITE) { 1312 pmap_install(oldpmap); 1313 return; 1314 } 1315 1316 newprot = pte_prot(pmap, prot); 1317 1318 if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) 1319 panic("pmap_protect: unaligned addresses"); 1320 1321 while (sva < eva) { 1322 /* 1323 * If page is invalid, skip this page 1324 */ 1325 pv = pmap_find_pv(pmap, sva); 1326 if (!pv) { 1327 sva += PAGE_SIZE; 1328 continue; 1329 } 1330 1331 if (pmap_pte_prot(&pv->pv_pte) != newprot) { 1332 pmap_pte_set_prot(&pv->pv_pte, newprot); 1333 pmap_update_vhpt(pv); 1334 pmap_invalidate_page(pmap, sva); 1335 } 1336 1337 sva += PAGE_SIZE; 1338 } 1339 pmap_install(oldpmap); 1340} 1341 1342/* 1343 * Insert the given physical page (p) at 1344 * the specified virtual address (v) in the 1345 * target physical map with the protection requested. 1346 * 1347 * If specified, the page will be wired down, meaning 1348 * that the related pte can not be reclaimed. 1349 * 1350 * NB: This is the only routine which MAY NOT lazy-evaluate 1351 * or lose information. That is, this routine must actually 1352 * insert this page into the given map NOW. 1353 */ 1354void 1355pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1356 boolean_t wired) 1357{ 1358 pmap_t oldpmap; 1359 vm_offset_t pa; 1360 pv_entry_t pv; 1361 vm_offset_t opa; 1362 struct ia64_lpte origpte; 1363 int managed; 1364 1365 if (pmap == NULL) 1366 return; 1367 1368 oldpmap = pmap_install(pmap); 1369 1370 va &= ~PAGE_MASK; 1371#ifdef PMAP_DIAGNOSTIC 1372 if (va > VM_MAX_KERNEL_ADDRESS) 1373 panic("pmap_enter: toobig"); 1374#endif 1375 1376 pv = pmap_find_pv(pmap, va); 1377 if (!pv) 1378 pv = pmap_make_pv(pmap, va); 1379 1380 origpte = pv->pv_pte; 1381 if (origpte.pte_p) 1382 opa = pmap_pte_pa(&origpte); 1383 else 1384 opa = 0; 1385 1386 pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK; 1387 managed = 0; 1388 1389 /* 1390 * Mapping has not changed, must be protection or wiring change. 1391 */ 1392 if (origpte.pte_p && (opa == pa)) { 1393 /* 1394 * Wiring change, just update stats. We don't worry about 1395 * wiring PT pages as they remain resident as long as there 1396 * are valid mappings in them. Hence, if a user page is wired, 1397 * the PT page will be also. 1398 */ 1399 if (wired && ((origpte.pte_ig & PTE_IG_WIRED) == 0)) 1400 pmap->pm_stats.wired_count++; 1401 else if (!wired && (origpte.pte_ig & PTE_IG_WIRED)) 1402 pmap->pm_stats.wired_count--; 1403 1404 managed = origpte.pte_ig & PTE_IG_MANAGED; 1405 goto validate; 1406 } else { 1407 /* 1408 * Mapping has changed, invalidate old range and fall 1409 * through to handle validating new mapping. 1410 */ 1411 } 1412 1413 /* 1414 * Increment counters 1415 */ 1416 if (wired) 1417 pmap->pm_stats.wired_count++; 1418 1419validate: 1420 /* 1421 * Now validate mapping with desired protection/wiring. 1422 * This enters the pv_entry_t on the page's list if necessary. 1423 */ 1424 pmap_set_pv(pmap, pv, pa, pte_prot(pmap, prot), m); 1425 1426 if (wired) 1427 pv->pv_pte.pte_ig |= PTE_IG_WIRED; 1428 1429 /* 1430 * if the mapping or permission bits are different, we need 1431 * to invalidate the page. 1432 */ 1433 if (!pmap_equal_pte(&origpte, &pv->pv_pte)) { 1434 PMAP_DEBUG_VA(va); 1435 pmap_invalidate_page(pmap, va); 1436 } 1437 1438 pmap_install(oldpmap); 1439} 1440 1441/* 1442 * this code makes some *MAJOR* assumptions: 1443 * 1. Current pmap & pmap exists. 1444 * 2. Not wired. 1445 * 3. Read access. 1446 * 4. No page table pages. 1447 * 5. Tlbflush is deferred to calling procedure. 1448 * 6. Page IS managed. 1449 * but is *MUCH* faster than pmap_enter... 1450 */ 1451 1452static void 1453pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m) 1454{ 1455 pv_entry_t pv; 1456 int s; 1457 1458 s = splvm(); 1459 1460 pv = pmap_find_pv(pmap, va); 1461 if (!pv) 1462 pv = pmap_make_pv(pmap, va); 1463 1464 /* 1465 * Enter on the PV list if part of our managed memory. Note that we 1466 * raise IPL while manipulating pv_table since pmap_enter can be 1467 * called at interrupt time. 1468 */ 1469 PMAP_DEBUG_VA(va); 1470 pmap_set_pv(pmap, pv, VM_PAGE_TO_PHYS(m), 1471 (PTE_AR_R << 2) | PTE_PL_USER, m); 1472 1473 splx(s); 1474} 1475 1476/* 1477 * Make temporary mapping for a physical address. This is called 1478 * during dump. 1479 */ 1480void * 1481pmap_kenter_temporary(vm_offset_t pa, int i) 1482{ 1483 return (void *) IA64_PHYS_TO_RR7(pa - (i * PAGE_SIZE)); 1484} 1485 1486#define MAX_INIT_PT (96) 1487/* 1488 * pmap_object_init_pt preloads the ptes for a given object 1489 * into the specified pmap. This eliminates the blast of soft 1490 * faults on process startup and immediately after an mmap. 1491 */ 1492void 1493pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 1494 vm_object_t object, vm_pindex_t pindex, 1495 vm_size_t size, int limit) 1496{ 1497 pmap_t oldpmap; 1498 vm_offset_t tmpidx; 1499 int psize; 1500 vm_page_t p; 1501 int objpgs; 1502 1503 if (pmap == NULL || object == NULL) 1504 return; 1505 1506 oldpmap = pmap_install(pmap); 1507 1508 psize = ia64_btop(size); 1509 1510 if ((object->type != OBJT_VNODE) || 1511 (limit && (psize > MAX_INIT_PT) && 1512 (object->resident_page_count > MAX_INIT_PT))) { 1513 pmap_install(oldpmap); 1514 return; 1515 } 1516 1517 if (psize + pindex > object->size) 1518 psize = object->size - pindex; 1519 1520 /* 1521 * if we are processing a major portion of the object, then scan the 1522 * entire thing. 1523 */ 1524 if (psize > (object->resident_page_count >> 2)) { 1525 objpgs = psize; 1526 1527 for (p = TAILQ_FIRST(&object->memq); 1528 ((objpgs > 0) && (p != NULL)); 1529 p = TAILQ_NEXT(p, listq)) { 1530 1531 tmpidx = p->pindex; 1532 if (tmpidx < pindex) { 1533 continue; 1534 } 1535 tmpidx -= pindex; 1536 if (tmpidx >= psize) { 1537 continue; 1538 } 1539 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1540 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1541 if ((p->queue - p->pc) == PQ_CACHE) 1542 vm_page_deactivate(p); 1543 vm_page_busy(p); 1544 pmap_enter_quick(pmap, 1545 addr + ia64_ptob(tmpidx), p); 1546 vm_page_flag_set(p, PG_MAPPED); 1547 vm_page_wakeup(p); 1548 } 1549 objpgs -= 1; 1550 } 1551 } else { 1552 /* 1553 * else lookup the pages one-by-one. 1554 */ 1555 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 1556 p = vm_page_lookup(object, tmpidx + pindex); 1557 if (p && 1558 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1559 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1560 if ((p->queue - p->pc) == PQ_CACHE) 1561 vm_page_deactivate(p); 1562 vm_page_busy(p); 1563 pmap_enter_quick(pmap, 1564 addr + ia64_ptob(tmpidx), p); 1565 vm_page_flag_set(p, PG_MAPPED); 1566 vm_page_wakeup(p); 1567 } 1568 } 1569 } 1570 pmap_install(oldpmap); 1571 return; 1572} 1573 1574/* 1575 * pmap_prefault provides a quick way of clustering 1576 * pagefaults into a processes address space. It is a "cousin" 1577 * of pmap_object_init_pt, except it runs at page fault time instead 1578 * of mmap time. 1579 */ 1580#define PFBAK 4 1581#define PFFOR 4 1582#define PAGEORDER_SIZE (PFBAK+PFFOR) 1583 1584static int pmap_prefault_pageorder[] = { 1585 -PAGE_SIZE, PAGE_SIZE, 1586 -2 * PAGE_SIZE, 2 * PAGE_SIZE, 1587 -3 * PAGE_SIZE, 3 * PAGE_SIZE 1588 -4 * PAGE_SIZE, 4 * PAGE_SIZE 1589}; 1590 1591void 1592pmap_prefault(pmap, addra, entry) 1593 pmap_t pmap; 1594 vm_offset_t addra; 1595 vm_map_entry_t entry; 1596{ 1597 int i; 1598 vm_offset_t starta; 1599 vm_offset_t addr; 1600 vm_pindex_t pindex; 1601 vm_page_t m, mpte; 1602 vm_object_t object; 1603 1604 if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) 1605 return; 1606 1607 object = entry->object.vm_object; 1608 1609 starta = addra - PFBAK * PAGE_SIZE; 1610 if (starta < entry->start) { 1611 starta = entry->start; 1612 } else if (starta > addra) { 1613 starta = 0; 1614 } 1615 1616 mpte = NULL; 1617 for (i = 0; i < PAGEORDER_SIZE; i++) { 1618 vm_object_t lobject; 1619 pv_entry_t pv; 1620 1621 addr = addra + pmap_prefault_pageorder[i]; 1622 if (addr > addra + (PFFOR * PAGE_SIZE)) 1623 addr = 0; 1624 1625 if (addr < starta || addr >= entry->end) 1626 continue; 1627 1628 pv = pmap_find_pv(pmap, addr); 1629 if (pv) 1630 continue; 1631 1632 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1633 lobject = object; 1634 for (m = vm_page_lookup(lobject, pindex); 1635 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 1636 lobject = lobject->backing_object) { 1637 if (lobject->backing_object_offset & PAGE_MASK) 1638 break; 1639 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 1640 m = vm_page_lookup(lobject->backing_object, pindex); 1641 } 1642 1643 /* 1644 * give-up when a page is not in memory 1645 */ 1646 if (m == NULL) 1647 break; 1648 1649 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1650 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1651 1652 if ((m->queue - m->pc) == PQ_CACHE) { 1653 vm_page_deactivate(m); 1654 } 1655 vm_page_busy(m); 1656 pmap_enter_quick(pmap, addr, m); 1657 vm_page_flag_set(m, PG_MAPPED); 1658 vm_page_wakeup(m); 1659 } 1660 } 1661} 1662 1663/* 1664 * Routine: pmap_change_wiring 1665 * Function: Change the wiring attribute for a map/virtual-address 1666 * pair. 1667 * In/out conditions: 1668 * The mapping must already exist in the pmap. 1669 */ 1670void 1671pmap_change_wiring(pmap, va, wired) 1672 register pmap_t pmap; 1673 vm_offset_t va; 1674 boolean_t wired; 1675{ 1676 pmap_t oldpmap; 1677 pv_entry_t pv; 1678 1679 if (pmap == NULL) 1680 return; 1681 1682 oldpmap = pmap_install(pmap); 1683 1684 pv = pmap_find_pv(pmap, va); 1685 1686 if (wired && !pmap_pte_w(&pv->pv_pte)) 1687 pmap->pm_stats.wired_count++; 1688 else if (!wired && pmap_pte_w(&pv->pv_pte)) 1689 pmap->pm_stats.wired_count--; 1690 1691 /* 1692 * Wiring is not a hardware characteristic so there is no need to 1693 * invalidate TLB. 1694 */ 1695 pmap_pte_set_w(&pv->pv_pte, wired); 1696 1697 pmap_install(oldpmap); 1698} 1699 1700 1701 1702/* 1703 * Copy the range specified by src_addr/len 1704 * from the source map to the range dst_addr/len 1705 * in the destination map. 1706 * 1707 * This routine is only advisory and need not do anything. 1708 */ 1709 1710void 1711pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 1712 vm_offset_t src_addr) 1713{ 1714} 1715 1716/* 1717 * Routine: pmap_kernel 1718 * Function: 1719 * Returns the physical map handle for the kernel. 1720 */ 1721pmap_t 1722pmap_kernel() 1723{ 1724 return (kernel_pmap); 1725} 1726 1727/* 1728 * pmap_zero_page zeros the specified hardware page by 1729 * mapping it into virtual memory and using bzero to clear 1730 * its contents. 1731 */ 1732 1733void 1734pmap_zero_page(vm_offset_t pa) 1735{ 1736 vm_offset_t va = IA64_PHYS_TO_RR7(pa); 1737 bzero((caddr_t) va, PAGE_SIZE); 1738} 1739 1740 1741/* 1742 * pmap_zero_page_area zeros the specified hardware page by 1743 * mapping it into virtual memory and using bzero to clear 1744 * its contents. 1745 * 1746 * off and size must reside within a single page. 1747 */ 1748 1749void 1750pmap_zero_page_area(vm_offset_t pa, int off, int size) 1751{ 1752 vm_offset_t va = IA64_PHYS_TO_RR7(pa); 1753 bzero((char *)(caddr_t)va + off, size); 1754} 1755 1756/* 1757 * pmap_copy_page copies the specified (machine independent) 1758 * page by mapping the page into virtual memory and using 1759 * bcopy to copy the page, one machine dependent page at a 1760 * time. 1761 */ 1762void 1763pmap_copy_page(vm_offset_t src, vm_offset_t dst) 1764{ 1765 src = IA64_PHYS_TO_RR7(src); 1766 dst = IA64_PHYS_TO_RR7(dst); 1767 bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); 1768} 1769 1770 1771/* 1772 * Routine: pmap_pageable 1773 * Function: 1774 * Make the specified pages (by pmap, offset) 1775 * pageable (or not) as requested. 1776 * 1777 * A page which is not pageable may not take 1778 * a fault; therefore, its page table entry 1779 * must remain valid for the duration. 1780 * 1781 * This routine is merely advisory; pmap_enter 1782 * will specify that these pages are to be wired 1783 * down (or not) as appropriate. 1784 */ 1785void 1786pmap_pageable(pmap, sva, eva, pageable) 1787 pmap_t pmap; 1788 vm_offset_t sva, eva; 1789 boolean_t pageable; 1790{ 1791} 1792 1793/* 1794 * this routine returns true if a physical page resides 1795 * in the given pmap. 1796 */ 1797boolean_t 1798pmap_page_exists(pmap, m) 1799 pmap_t pmap; 1800 vm_page_t m; 1801{ 1802 register pv_entry_t pv; 1803 int s; 1804 1805 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1806 return FALSE; 1807 1808 s = splvm(); 1809 1810 /* 1811 * Not found, check current mappings returning immediately if found. 1812 */ 1813 for (pv = TAILQ_FIRST(&m->md.pv_list); 1814 pv; 1815 pv = TAILQ_NEXT(pv, pv_list)) { 1816 if (pv->pv_pmap == pmap) { 1817 splx(s); 1818 return TRUE; 1819 } 1820 } 1821 splx(s); 1822 return (FALSE); 1823} 1824 1825#define PMAP_REMOVE_PAGES_CURPROC_ONLY 1826/* 1827 * Remove all pages from specified address space 1828 * this aids process exit speeds. Also, this code 1829 * is special cased for current process only, but 1830 * can have the more generic (and slightly slower) 1831 * mode enabled. This is much faster than pmap_remove 1832 * in the case of running down an entire address space. 1833 */ 1834void 1835pmap_remove_pages(pmap, sva, eva) 1836 pmap_t pmap; 1837 vm_offset_t sva, eva; 1838{ 1839 pv_entry_t pv, npv; 1840 int s; 1841 1842#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 1843 if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) { 1844 printf("warning: pmap_remove_pages called with non-current pmap\n"); 1845 return; 1846 } 1847#endif 1848 1849 s = splvm(); 1850 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); 1851 pv; 1852 pv = npv) { 1853 vm_page_t m; 1854 1855 npv = TAILQ_NEXT(pv, pv_plist); 1856 1857 if (pv->pv_va >= eva || pv->pv_va < sva) { 1858 continue; 1859 } 1860 1861/* 1862 * We cannot remove wired pages from a process' mapping at this time 1863 */ 1864 if (pv->pv_pte.pte_ig & PTE_IG_WIRED) { 1865 continue; 1866 } 1867 1868 PMAP_DEBUG_VA(pv->pv_va); 1869 1870 m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1871 pmap_remove_pv(pmap, pv, m); 1872 } 1873 splx(s); 1874 1875 pmap_invalidate_all(pmap); 1876} 1877 1878/* 1879 * pmap_page_protect: 1880 * 1881 * Lower the permission for all mappings to a given page. 1882 */ 1883void 1884pmap_page_protect(vm_page_t m, vm_prot_t prot) 1885{ 1886 pv_entry_t pv; 1887 1888 if ((prot & VM_PROT_WRITE) != 0) 1889 return; 1890 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 1891 for (pv = TAILQ_FIRST(&m->md.pv_list); 1892 pv; 1893 pv = TAILQ_NEXT(pv, pv_list)) { 1894 int newprot = pte_prot(pv->pv_pmap, prot); 1895 pmap_t oldpmap = pmap_install(pv->pv_pmap); 1896 pmap_pte_set_prot(&pv->pv_pte, newprot); 1897 pmap_update_vhpt(pv); 1898 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1899 pmap_install(oldpmap); 1900 } 1901 } else { 1902 pmap_remove_all(m); 1903 } 1904} 1905 1906vm_offset_t 1907pmap_phys_address(ppn) 1908 int ppn; 1909{ 1910 return (ia64_ptob(ppn)); 1911} 1912 1913/* 1914 * pmap_ts_referenced: 1915 * 1916 * Return the count of reference bits for a page, clearing all of them. 1917 * 1918 */ 1919int 1920pmap_ts_referenced(vm_page_t m) 1921{ 1922 pv_entry_t pv; 1923 int count = 0; 1924 1925 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1926 return 0; 1927 1928 for (pv = TAILQ_FIRST(&m->md.pv_list); 1929 pv; 1930 pv = TAILQ_NEXT(pv, pv_list)) { 1931 if (pv->pv_pte.pte_a) { 1932 pmap_t oldpmap = pmap_install(pv->pv_pmap); 1933 count++; 1934 pv->pv_pte.pte_a = 0; 1935 pmap_update_vhpt(pv); 1936 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1937 pmap_install(oldpmap); 1938 } 1939 } 1940 1941 return count; 1942} 1943 1944#if 0 1945/* 1946 * pmap_is_referenced: 1947 * 1948 * Return whether or not the specified physical page was referenced 1949 * in any physical maps. 1950 */ 1951static boolean_t 1952pmap_is_referenced(vm_page_t m) 1953{ 1954 pv_entry_t pv; 1955 1956 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1957 return FALSE; 1958 1959 for (pv = TAILQ_FIRST(&m->md.pv_list); 1960 pv; 1961 pv = TAILQ_NEXT(pv, pv_list)) { 1962 if (pv->pv_pte.pte_a) { 1963 return 1; 1964 } 1965 } 1966 1967 return 0; 1968} 1969#endif 1970 1971/* 1972 * pmap_is_modified: 1973 * 1974 * Return whether or not the specified physical page was modified 1975 * in any physical maps. 1976 */ 1977boolean_t 1978pmap_is_modified(vm_page_t m) 1979{ 1980 pv_entry_t pv; 1981 1982 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1983 return FALSE; 1984 1985 for (pv = TAILQ_FIRST(&m->md.pv_list); 1986 pv; 1987 pv = TAILQ_NEXT(pv, pv_list)) { 1988 if (pv->pv_pte.pte_d) { 1989 return 1; 1990 } 1991 } 1992 1993 return 0; 1994} 1995 1996/* 1997 * Clear the modify bits on the specified physical page. 1998 */ 1999void 2000pmap_clear_modify(vm_page_t m) 2001{ 2002 pv_entry_t pv; 2003 2004 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 2005 return; 2006 2007 for (pv = TAILQ_FIRST(&m->md.pv_list); 2008 pv; 2009 pv = TAILQ_NEXT(pv, pv_list)) { 2010 if (pv->pv_pte.pte_d) { 2011 pmap_t oldpmap = pmap_install(pv->pv_pmap); 2012 pv->pv_pte.pte_d = 0; 2013 pmap_update_vhpt(pv); 2014 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 2015 pmap_install(oldpmap); 2016 } 2017 } 2018} 2019 2020/* 2021 * pmap_clear_reference: 2022 * 2023 * Clear the reference bit on the specified physical page. 2024 */ 2025void 2026pmap_clear_reference(vm_page_t m) 2027{ 2028 pv_entry_t pv; 2029 2030 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 2031 return; 2032 2033 for (pv = TAILQ_FIRST(&m->md.pv_list); 2034 pv; 2035 pv = TAILQ_NEXT(pv, pv_list)) { 2036 if (pv->pv_pte.pte_a) { 2037 pmap_t oldpmap = pmap_install(pv->pv_pmap); 2038 pv->pv_pte.pte_a = 0; 2039 pmap_update_vhpt(pv); 2040 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 2041 pmap_install(oldpmap); 2042 } 2043 } 2044} 2045 2046/* 2047 * Miscellaneous support routines follow 2048 */ 2049 2050static void 2051ia64_protection_init() 2052{ 2053 int prot, *kp, *up; 2054 2055 kp = protection_codes[0]; 2056 up = protection_codes[1]; 2057 2058 for (prot = 0; prot < 8; prot++) { 2059 switch (prot) { 2060 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2061 *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN; 2062 *up++ = (PTE_AR_R << 2) | PTE_PL_KERN; 2063 break; 2064 2065 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2066 *kp++ = (PTE_AR_X_RX << 2) | PTE_PL_KERN; 2067 *up++ = (PTE_AR_X_RX << 2) | PTE_PL_USER; 2068 break; 2069 2070 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2071 *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN; 2072 *up++ = (PTE_AR_RW << 2) | PTE_PL_USER; 2073 break; 2074 2075 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2076 *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN; 2077 *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER; 2078 break; 2079 2080 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2081 *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN; 2082 *up++ = (PTE_AR_R << 2) | PTE_PL_USER; 2083 break; 2084 2085 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2086 *kp++ = (PTE_AR_RX << 2) | PTE_PL_KERN; 2087 *up++ = (PTE_AR_RX << 2) | PTE_PL_USER; 2088 break; 2089 2090 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2091 *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN; 2092 *up++ = (PTE_AR_RW << 2) | PTE_PL_USER; 2093 break; 2094 2095 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2096 *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN; 2097 *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER; 2098 break; 2099 } 2100 } 2101} 2102 2103/* 2104 * Map a set of physical memory pages into the kernel virtual 2105 * address space. Return a pointer to where it is mapped. This 2106 * routine is intended to be used for mapping device memory, 2107 * NOT real memory. 2108 */ 2109void * 2110pmap_mapdev(pa, size) 2111 vm_offset_t pa; 2112 vm_size_t size; 2113{ 2114 return (void*) IA64_PHYS_TO_RR6(pa); 2115} 2116 2117/* 2118 * perform the pmap work for mincore 2119 */ 2120int 2121pmap_mincore(pmap, addr) 2122 pmap_t pmap; 2123 vm_offset_t addr; 2124{ 2125 pv_entry_t pv; 2126 struct ia64_lpte *pte; 2127 int val = 0; 2128 2129 pv = pmap_find_pv(pmap, addr); 2130 if (pv == 0) { 2131 return 0; 2132 } 2133 pte = &pv->pv_pte; 2134 2135 if (pmap_pte_v(pte)) { 2136 vm_page_t m; 2137 vm_offset_t pa; 2138 2139 val = MINCORE_INCORE; 2140 if ((pte->pte_ig & PTE_IG_MANAGED) == 0) 2141 return val; 2142 2143 pa = pmap_pte_pa(pte); 2144 2145 m = PHYS_TO_VM_PAGE(pa); 2146 2147 /* 2148 * Modified by us 2149 */ 2150 if (pte->pte_d) 2151 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 2152 /* 2153 * Modified by someone 2154 */ 2155 else if (pmap_is_modified(m)) 2156 val |= MINCORE_MODIFIED_OTHER; 2157 /* 2158 * Referenced by us 2159 */ 2160 if (pte->pte_a) 2161 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 2162 2163 /* 2164 * Referenced by someone 2165 */ 2166 else if (pmap_ts_referenced(m)) { 2167 val |= MINCORE_REFERENCED_OTHER; 2168 vm_page_flag_set(m, PG_REFERENCED); 2169 } 2170 } 2171 return val; 2172} 2173 2174void 2175pmap_activate(struct proc *p) 2176{ 2177 pmap_install(vmspace_pmap(p->p_vmspace)); 2178} 2179 2180pmap_t 2181pmap_install(pmap_t pmap) 2182{ 2183 pmap_t oldpmap; 2184 int rid; 2185 2186 oldpmap = PCPU_GET(current_pmap); 2187 2188 if (pmap == oldpmap || pmap == kernel_pmap) 2189 return pmap; 2190 2191 PCPU_SET(current_pmap, pmap); 2192 if (!pmap) { 2193 /* 2194 * RIDs 0..4 have no mappings to make sure we generate 2195 * page faults on accesses. 2196 */ 2197 ia64_set_rr(IA64_RR_BASE(0), (0 << 8)|(PAGE_SHIFT << 2)|1); 2198 ia64_set_rr(IA64_RR_BASE(1), (1 << 8)|(PAGE_SHIFT << 2)|1); 2199 ia64_set_rr(IA64_RR_BASE(2), (2 << 8)|(PAGE_SHIFT << 2)|1); 2200 ia64_set_rr(IA64_RR_BASE(3), (3 << 8)|(PAGE_SHIFT << 2)|1); 2201 ia64_set_rr(IA64_RR_BASE(4), (4 << 8)|(PAGE_SHIFT << 2)|1); 2202 return oldpmap; 2203 } 2204 2205 pmap->pm_active = 1; /* XXX use bitmap for SMP */ 2206 2207 reinstall: 2208 rid = pmap->pm_rid & ((1 << pmap_ridbits) - 1); 2209 ia64_set_rr(IA64_RR_BASE(0), ((rid + 0) << 8)|(PAGE_SHIFT << 2)|1); 2210 ia64_set_rr(IA64_RR_BASE(1), ((rid + 1) << 8)|(PAGE_SHIFT << 2)|1); 2211 ia64_set_rr(IA64_RR_BASE(2), ((rid + 2) << 8)|(PAGE_SHIFT << 2)|1); 2212 ia64_set_rr(IA64_RR_BASE(3), ((rid + 3) << 8)|(PAGE_SHIFT << 2)|1); 2213 ia64_set_rr(IA64_RR_BASE(4), ((rid + 4) << 8)|(PAGE_SHIFT << 2)|1); 2214 2215 /* 2216 * If we need a new RID, get it now. Note that we need to 2217 * remove our old mappings (if any) from the VHTP, so we will 2218 * run on the old RID for a moment while we invalidate the old 2219 * one. XXX maybe we should just clear out the VHTP when the 2220 * RID generation rolls over. 2221 */ 2222 if ((pmap->pm_rid>>pmap_ridbits) != (pmap_nextrid>>pmap_ridbits)) { 2223 if (pmap->pm_rid) 2224 pmap_invalidate_rid(pmap); 2225 pmap_get_rid(pmap); 2226 goto reinstall; 2227 } 2228 2229 return oldpmap; 2230} 2231 2232vm_offset_t 2233pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 2234{ 2235 2236 return addr; 2237} 2238 2239#if 0 2240#if defined(PMAP_DEBUG) 2241pmap_pid_dump(int pid) 2242{ 2243 pmap_t pmap; 2244 struct proc *p; 2245 int npte = 0; 2246 int index; 2247 ALLPROC_LOCK(AP_SHARED); 2248 LIST_FOREACH(p, &allproc, p_list) { 2249 if (p->p_pid != pid) 2250 continue; 2251 2252 if (p->p_vmspace) { 2253 int i,j; 2254 index = 0; 2255 pmap = vmspace_pmap(p->p_vmspace); 2256 for(i=0;i<1024;i++) { 2257 pd_entry_t *pde; 2258 pt_entry_t *pte; 2259 unsigned base = i << PDRSHIFT; 2260 2261 pde = &pmap->pm_pdir[i]; 2262 if (pde && pmap_pde_v(pde)) { 2263 for(j=0;j<1024;j++) { 2264 unsigned va = base + (j << PAGE_SHIFT); 2265 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2266 if (index) { 2267 index = 0; 2268 printf("\n"); 2269 } 2270 ALLPROC_LOCK(AP_RELEASE); 2271 return npte; 2272 } 2273 pte = pmap_pte_quick( pmap, va); 2274 if (pte && pmap_pte_v(pte)) { 2275 vm_offset_t pa; 2276 vm_page_t m; 2277 pa = *(int *)pte; 2278 m = PHYS_TO_VM_PAGE(pa); 2279 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2280 va, pa, m->hold_count, m->wire_count, m->flags); 2281 npte++; 2282 index++; 2283 if (index >= 2) { 2284 index = 0; 2285 printf("\n"); 2286 } else { 2287 printf(" "); 2288 } 2289 } 2290 } 2291 } 2292 } 2293 } 2294 } 2295 ALLPROC_LOCK(AP_RELEASE); 2296 return npte; 2297} 2298#endif 2299 2300#if defined(DEBUG) 2301 2302static void pads __P((pmap_t pm)); 2303static void pmap_pvdump __P((vm_page_t m)); 2304 2305/* print address space of pmap*/ 2306static void 2307pads(pm) 2308 pmap_t pm; 2309{ 2310 int i, j; 2311 vm_offset_t va; 2312 pt_entry_t *ptep; 2313 2314 if (pm == kernel_pmap) 2315 return; 2316 for (i = 0; i < 1024; i++) 2317 if (pm->pm_pdir[i]) 2318 for (j = 0; j < 1024; j++) { 2319 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 2320 if (pm == kernel_pmap && va < KERNBASE) 2321 continue; 2322 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2323 continue; 2324 ptep = pmap_pte_quick(pm, va); 2325 if (pmap_pte_v(ptep)) 2326 printf("%x:%x ", va, *(int *) ptep); 2327 }; 2328 2329} 2330 2331static void 2332pmap_pvdump(pa) 2333 vm_offset_t pa; 2334{ 2335 pv_entry_t pv; 2336 2337 printf("pa %x", pa); 2338 m = PHYS_TO_VM_PAGE(pa); 2339 for (pv = TAILQ_FIRST(&m->md.pv_list); 2340 pv; 2341 pv = TAILQ_NEXT(pv, pv_list)) { 2342 printf(" -> pmap %x, va %x", 2343 pv->pv_pmap, pv->pv_va); 2344 pads(pv->pv_pmap); 2345 } 2346 printf(" "); 2347} 2348#endif 2349#endif 2350