pmap.c revision 75668
1/* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * Copyright (c) 1994 John S. Dyson 5 * All rights reserved. 6 * Copyright (c) 1994 David Greenman 7 * All rights reserved. 8 * Copyright (c) 1998,2000 Doug Rabson 9 * All rights reserved. 10 * 11 * This code is derived from software contributed to Berkeley by 12 * the Systems Programming Group of the University of Utah Computer 13 * Science Department and William Jolitz of UUNET Technologies Inc. 14 * 15 * Redistribution and use in source and binary forms, with or without 16 * modification, are permitted provided that the following conditions 17 * are met: 18 * 1. Redistributions of source code must retain the above copyright 19 * notice, this list of conditions and the following disclaimer. 20 * 2. Redistributions in binary form must reproduce the above copyright 21 * notice, this list of conditions and the following disclaimer in the 22 * documentation and/or other materials provided with the distribution. 23 * 3. All advertising materials mentioning features or use of this software 24 * must display the following acknowledgement: 25 * This product includes software developed by the University of 26 * California, Berkeley and its contributors. 27 * 4. Neither the name of the University nor the names of its contributors 28 * may be used to endorse or promote products derived from this software 29 * without specific prior written permission. 30 * 31 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 32 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 33 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 34 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 35 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 39 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 40 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 41 * SUCH DAMAGE. 42 * 43 * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 44 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp 45 * with some ideas from NetBSD's alpha pmap 46 * $FreeBSD: head/sys/ia64/ia64/pmap.c 75668 2001-04-18 15:08:37Z dfr $ 47 */ 48 49/* 50 * Manages physical address maps. 51 * 52 * In addition to hardware address maps, this 53 * module is called upon to provide software-use-only 54 * maps which may or may not be stored in the same 55 * form as hardware maps. These pseudo-maps are 56 * used to store intermediate results from copy 57 * operations to and from address spaces. 58 * 59 * Since the information managed by this module is 60 * also stored by the logical address mapping module, 61 * this module may throw away valid virtual-to-physical 62 * mappings at almost any time. However, invalidations 63 * of virtual-to-physical mappings must be done as 64 * requested. 65 * 66 * In order to cope with hardware architectures which 67 * make virtual-to-physical map invalidates expensive, 68 * this module may delay invalidate or reduced protection 69 * operations until such time as they are actually 70 * necessary. This module is given full information as 71 * to which processors are currently using which maps, 72 * and to when physical maps must be made correct. 73 */ 74 75/* 76 * Following the Linux model, region IDs are allocated in groups of 77 * eight so that a single region ID can be used for as many RRs as we 78 * want by encoding the RR number into the low bits of the ID. 79 * 80 * We reserve region ID 0 for the kernel and allocate the remaining 81 * IDs for user pmaps. 82 * 83 * Region 0..4 84 * User virtually mapped 85 * 86 * Region 5 87 * Kernel virtually mapped 88 * 89 * Region 6 90 * Kernel physically mapped uncacheable 91 * 92 * Region 7 93 * Kernel physically mapped cacheable 94 */ 95 96#include <sys/param.h> 97#include <sys/systm.h> 98#include <sys/proc.h> 99#include <sys/msgbuf.h> 100#include <sys/vmmeter.h> 101#include <sys/mman.h> 102#include <sys/malloc.h> 103#include <sys/kernel.h> 104#include <sys/sx.h> 105 106#include <vm/vm.h> 107#include <vm/vm_param.h> 108#include <sys/lock.h> 109#include <vm/vm_kern.h> 110#include <vm/vm_page.h> 111#include <vm/vm_map.h> 112#include <vm/vm_object.h> 113#include <vm/vm_extern.h> 114#include <vm/vm_pageout.h> 115#include <vm/vm_pager.h> 116#include <vm/vm_zone.h> 117 118#include <sys/user.h> 119 120#include <machine/md_var.h> 121 122MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures"); 123 124#ifndef PMAP_SHPGPERPROC 125#define PMAP_SHPGPERPROC 200 126#endif 127 128#if defined(DIAGNOSTIC) 129#define PMAP_DIAGNOSTIC 130#endif 131 132#define MINPV 2048 133 134#if 0 135#define PMAP_DIAGNOSTIC 136#define PMAP_DEBUG 137#endif 138 139#if !defined(PMAP_DIAGNOSTIC) 140#define PMAP_INLINE __inline 141#else 142#define PMAP_INLINE 143#endif 144 145#if 0 146 147static void 148pmap_break(void) 149{ 150} 151 152/* #define PMAP_DEBUG_VA(va) if ((va) == 0x120058000) pmap_break(); else */ 153 154#endif 155 156#ifndef PMAP_DEBUG_VA 157#define PMAP_DEBUG_VA(va) do {} while(0) 158#endif 159 160/* 161 * Get PDEs and PTEs for user/kernel address space 162 */ 163#define pmap_pte_w(pte) ((pte)->pte_ig & PTE_IG_WIRED) 164#define pmap_pte_managed(pte) ((pte)->pte_ig & PTE_IG_MANAGED) 165#define pmap_pte_v(pte) ((pte)->pte_p) 166#define pmap_pte_pa(pte) (((pte)->pte_ppn) << 12) 167#define pmap_pte_prot(pte) (((pte)->pte_ar << 2) | (pte)->pte_pl) 168 169#define pmap_pte_set_w(pte, v) ((v)?((pte)->pte_ig |= PTE_IG_WIRED) \ 170 :((pte)->pte_ig &= ~PTE_IG_WIRED)) 171#define pmap_pte_set_prot(pte, v) do { \ 172 (pte)->pte_ar = v >> 2; \ 173 (pte)->pte_pl = v & 3; \ 174} while (0) 175 176/* 177 * Given a map and a machine independent protection code, 178 * convert to an ia64 protection code. 179 */ 180#define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) 181int protection_codes[2][8]; 182 183/* 184 * Return non-zero if this pmap is currently active 185 */ 186#define pmap_isactive(pmap) (pmap->pm_active) 187 188/* 189 * Statically allocated kernel pmap 190 */ 191static struct pmap kernel_pmap_store; 192pmap_t kernel_pmap; 193 194vm_offset_t avail_start; /* PA of first available physical page */ 195vm_offset_t avail_end; /* PA of last available physical page */ 196vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ 197vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ 198static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ 199 200 201vm_offset_t kernel_vm_end; 202 203/* 204 * Values for ptc.e. XXX values for SKI. 205 */ 206static u_int64_t pmap_pte_e_base = 0x100000000; 207static u_int64_t pmap_pte_e_count1 = 3; 208static u_int64_t pmap_pte_e_count2 = 2; 209static u_int64_t pmap_pte_e_stride1 = 0x2000; 210static u_int64_t pmap_pte_e_stride2 = 0x100000000; 211 212/* 213 * Data for the RID allocator 214 */ 215static int pmap_nextrid; 216static int pmap_ridbits = 18; 217 218/* 219 * Data for the pv entry allocation mechanism 220 */ 221static vm_zone_t pvzone; 222static struct vm_zone pvzone_store; 223static struct vm_object pvzone_obj; 224static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 225static int pmap_pagedaemon_waken = 0; 226static struct pv_entry *pvinit; 227 228static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv)); 229static pv_entry_t get_pv_entry __P((void)); 230static void ia64_protection_init __P((void)); 231 232static void pmap_remove_all __P((vm_page_t m)); 233static void pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, vm_page_t m)); 234 235vm_offset_t 236pmap_steal_memory(vm_size_t size) 237{ 238 vm_size_t bank_size; 239 vm_offset_t pa, va; 240 241 size = round_page(size); 242 243 bank_size = phys_avail[1] - phys_avail[0]; 244 while (size > bank_size) { 245 int i; 246 for (i = 0; phys_avail[i+2]; i+= 2) { 247 phys_avail[i] = phys_avail[i+2]; 248 phys_avail[i+1] = phys_avail[i+3]; 249 } 250 phys_avail[i] = 0; 251 phys_avail[i+1] = 0; 252 if (!phys_avail[0]) 253 panic("pmap_steal_memory: out of memory"); 254 bank_size = phys_avail[1] - phys_avail[0]; 255 } 256 257 pa = phys_avail[0]; 258 phys_avail[0] += size; 259 260 va = IA64_PHYS_TO_RR7(pa); 261 bzero((caddr_t) va, size); 262 return va; 263} 264 265/* 266 * Bootstrap the system enough to run with virtual memory. 267 */ 268void 269pmap_bootstrap() 270{ 271 int i; 272 273 /* 274 * Setup RIDs. We use the bits above pmap_ridbits for a 275 * generation counter, saving generation zero for 276 * 'invalid'. RIDs 0..7 are reserved for the kernel. 277 */ 278 pmap_nextrid = (1 << pmap_ridbits) + 8; 279 280 avail_start = phys_avail[0]; 281 for (i = 0; phys_avail[i+2]; i+= 2) ; 282 avail_end = phys_avail[i+1]; 283 284 virtual_avail = IA64_RR_BASE(5); 285 virtual_end = IA64_RR_BASE(6)-1; 286 287 /* 288 * Initialize protection array. 289 */ 290 ia64_protection_init(); 291 292 /* 293 * The kernel's pmap is statically allocated so we don't have to use 294 * pmap_create, which is unlikely to work correctly at this part of 295 * the boot sequence (XXX and which no longer exists). 296 */ 297 kernel_pmap = &kernel_pmap_store; 298 kernel_pmap->pm_rid = 0; 299 kernel_pmap->pm_count = 1; 300 kernel_pmap->pm_active = 1; 301 TAILQ_INIT(&kernel_pmap->pm_pvlist); 302 303 /* 304 * Region 5 is mapped via the vhpt. 305 */ 306 ia64_set_rr(IA64_RR_BASE(5), 307 (5 << 8) | (PAGE_SHIFT << 2) | 1); 308 309 /* 310 * Region 6 is direct mapped UC and region 7 is direct mapped 311 * WC. The details of this is controlled by the Alt {I,D}TLB 312 * handlers. Here we just make sure that they have the largest 313 * possible page size to minimise TLB usage. 314 */ 315 ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2)); 316 ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); 317 318 /* 319 * Set up proc0's PCB. 320 */ 321#if 0 322 proc0.p_addr->u_pcb.pcb_hw.apcb_asn = 0; 323#endif 324} 325 326/* 327 * Initialize the pmap module. 328 * Called by vm_init, to initialize any structures that the pmap 329 * system needs to map virtual memory. 330 * pmap_init has been enhanced to support in a fairly consistant 331 * way, discontiguous physical memory. 332 */ 333void 334pmap_init(phys_start, phys_end) 335 vm_offset_t phys_start, phys_end; 336{ 337 int i; 338 int initial_pvs; 339 340 /* 341 * Allocate memory for random pmap data structures. Includes the 342 * pv_head_table. 343 */ 344 345 for(i = 0; i < vm_page_array_size; i++) { 346 vm_page_t m; 347 348 m = &vm_page_array[i]; 349 TAILQ_INIT(&m->md.pv_list); 350 m->md.pv_list_count = 0; 351 } 352 353 /* 354 * init the pv free list 355 */ 356 initial_pvs = vm_page_array_size; 357 if (initial_pvs < MINPV) 358 initial_pvs = MINPV; 359 pvzone = &pvzone_store; 360 pvinit = (struct pv_entry *) kmem_alloc(kernel_map, 361 initial_pvs * sizeof (struct pv_entry)); 362 zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, 363 vm_page_array_size); 364 365 /* 366 * Now it is safe to enable pv_table recording. 367 */ 368 pmap_initialized = TRUE; 369} 370 371/* 372 * Initialize the address space (zone) for the pv_entries. Set a 373 * high water mark so that the system can recover from excessive 374 * numbers of pv entries. 375 */ 376void 377pmap_init2() 378{ 379 pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size; 380 pv_entry_high_water = 9 * (pv_entry_max / 10); 381 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); 382} 383 384 385/*************************************************** 386 * Manipulate TLBs for a pmap 387 ***************************************************/ 388 389static void 390pmap_invalidate_rid(pmap_t pmap) 391{ 392 KASSERT(pmap != kernel_pmap, 393 ("changing kernel_pmap's RID")); 394 KASSERT(pmap == PCPU_GET(current_pmap), 395 ("invalidating RID of non-current pmap")); 396 pmap_remove_pages(pmap, IA64_RR_BASE(0), IA64_RR_BASE(5)); 397 pmap->pm_rid = 0; 398} 399 400static void 401pmap_invalidate_page(pmap_t pmap, vm_offset_t va) 402{ 403 KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), 404 ("invalidating TLB for non-current pmap")); 405 ia64_ptc_l(va, PAGE_SHIFT << 2); 406} 407 408static void 409pmap_invalidate_all(pmap_t pmap) 410{ 411 u_int64_t addr; 412 int i, j; 413 critical_t psr; 414 415 KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), 416 ("invalidating TLB for non-current pmap")); 417 418 psr = critical_enter(); 419 addr = pmap_pte_e_base; 420 for (i = 0; i < pmap_pte_e_count1; i++) { 421 for (j = 0; j < pmap_pte_e_count2; j++) { 422 ia64_ptc_e(addr); 423 addr += pmap_pte_e_stride2; 424 } 425 addr += pmap_pte_e_stride1; 426 } 427 critical_exit(psr); 428} 429 430static void 431pmap_get_rid(pmap_t pmap) 432{ 433 if ((pmap_nextrid & ((1 << pmap_ridbits) - 1)) == 0) { 434 /* 435 * Start a new ASN generation. 436 * 437 * Invalidate all per-process mappings and I-cache 438 */ 439 pmap_nextrid += 8; 440 441 /* 442 * Since we are about to start re-using ASNs, we must 443 * clear out the TLB. 444 * with the ASN. 445 */ 446#if 0 447 IA64_TBIAP(); 448 ia64_pal_imb(); /* XXX overkill? */ 449#endif 450 } 451 pmap->pm_rid = pmap_nextrid; 452 pmap_nextrid += 8; 453} 454 455/*************************************************** 456 * Low level helper routines..... 457 ***************************************************/ 458 459/* 460 * Install a pte into the VHPT 461 */ 462static PMAP_INLINE void 463pmap_install_pte(struct ia64_lpte *vhpte, struct ia64_lpte *pte) 464{ 465 u_int64_t *vhp, *p; 466 467 /* invalidate the pte */ 468 atomic_set_64(&vhpte->pte_tag, 1L << 63); 469 ia64_mf(); /* make sure everyone sees */ 470 471 vhp = (u_int64_t *) vhpte; 472 p = (u_int64_t *) pte; 473 474 vhp[0] = p[0]; 475 vhp[1] = p[1]; 476 vhp[2] = p[2]; /* sets ti to one */ 477 478 ia64_mf(); 479} 480 481/* 482 * Compare essential parts of pte. 483 */ 484static PMAP_INLINE int 485pmap_equal_pte(struct ia64_lpte *pte1, struct ia64_lpte *pte2) 486{ 487 return *(u_int64_t *) pte1 == *(u_int64_t *) pte2; 488} 489 490/* 491 * this routine defines the region(s) of memory that should 492 * not be tested for the modified bit. 493 */ 494static PMAP_INLINE int 495pmap_track_modified(vm_offset_t va) 496{ 497 if ((va < clean_sva) || (va >= clean_eva)) 498 return 1; 499 else 500 return 0; 501} 502 503/* 504 * Create the UPAGES for a new process. 505 * This routine directly affects the fork perf for a process. 506 */ 507void 508pmap_new_proc(struct proc *p) 509{ 510 struct user *up; 511 512 /* 513 * Use contigmalloc for user area so that we can use a region 514 * 7 address for it which makes it impossible to accidentally 515 * lose when recording a trapframe. 516 */ 517 up = contigmalloc(UPAGES * PAGE_SIZE, M_PMAP, 518 M_WAITOK, 519 0ul, 520 256*1024*1024 - 1, 521 PAGE_SIZE, 522 256*1024*1024); 523 524 p->p_md.md_uservirt = up; 525 p->p_addr = (struct user *) 526 IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t) up)); 527} 528 529/* 530 * Dispose the UPAGES for a process that has exited. 531 * This routine directly impacts the exit perf of a process. 532 */ 533void 534pmap_dispose_proc(p) 535 struct proc *p; 536{ 537 contigfree(p->p_md.md_uservirt, UPAGES * PAGE_SIZE, M_PMAP); 538 p->p_md.md_uservirt = 0; 539 p->p_addr = 0; 540} 541 542/* 543 * Allow the UPAGES for a process to be prejudicially paged out. 544 */ 545void 546pmap_swapout_proc(p) 547 struct proc *p; 548{ 549#if 0 550 int i; 551 vm_object_t upobj; 552 vm_page_t m; 553 554 /* 555 * Make sure we aren't fpcurproc. 556 */ 557 ia64_fpstate_save(p, 1); 558 559 upobj = p->p_upages_obj; 560 /* 561 * let the upages be paged 562 */ 563 for(i=0;i<UPAGES;i++) { 564 if ((m = vm_page_lookup(upobj, i)) == NULL) 565 panic("pmap_swapout_proc: upage already missing???"); 566 vm_page_dirty(m); 567 vm_page_unwire(m, 0); 568 pmap_kremove((vm_offset_t)p->p_addr + PAGE_SIZE * i); 569 } 570#endif 571} 572 573/* 574 * Bring the UPAGES for a specified process back in. 575 */ 576void 577pmap_swapin_proc(p) 578 struct proc *p; 579{ 580#if 0 581 int i,rv; 582 vm_object_t upobj; 583 vm_page_t m; 584 585 upobj = p->p_upages_obj; 586 for(i=0;i<UPAGES;i++) { 587 588 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 589 590 pmap_kenter(((vm_offset_t) p->p_addr) + i * PAGE_SIZE, 591 VM_PAGE_TO_PHYS(m)); 592 593 if (m->valid != VM_PAGE_BITS_ALL) { 594 rv = vm_pager_get_pages(upobj, &m, 1, 0); 595 if (rv != VM_PAGER_OK) 596 panic("pmap_swapin_proc: cannot get upages for proc: %d\n", p->p_pid); 597 m = vm_page_lookup(upobj, i); 598 m->valid = VM_PAGE_BITS_ALL; 599 } 600 601 vm_page_wire(m); 602 vm_page_wakeup(m); 603 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 604 } 605#endif 606} 607 608/*************************************************** 609 * Page table page management routines..... 610 ***************************************************/ 611 612void 613pmap_pinit0(pmap) 614 struct pmap *pmap; 615{ 616 /* 617 * kernel_pmap is the same as any other pmap. 618 */ 619 pmap_pinit(pmap); 620 pmap->pm_flags = 0; 621 pmap->pm_rid = 0; 622 pmap->pm_count = 1; 623 pmap->pm_ptphint = NULL; 624 pmap->pm_active = 0; 625 TAILQ_INIT(&pmap->pm_pvlist); 626 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 627} 628 629/* 630 * Initialize a preallocated and zeroed pmap structure, 631 * such as one in a vmspace structure. 632 */ 633void 634pmap_pinit(pmap) 635 register struct pmap *pmap; 636{ 637 pmap->pm_flags = 0; 638 pmap->pm_rid = 0; 639 pmap->pm_count = 1; 640 pmap->pm_ptphint = NULL; 641 pmap->pm_active = 0; 642 TAILQ_INIT(&pmap->pm_pvlist); 643 bzero(&pmap->pm_stats, sizeof pmap->pm_stats); 644} 645 646/* 647 * Wire in kernel global address entries. To avoid a race condition 648 * between pmap initialization and pmap_growkernel, this procedure 649 * should be called after the vmspace is attached to the process 650 * but before this pmap is activated. 651 */ 652void 653pmap_pinit2(pmap) 654 struct pmap *pmap; 655{ 656} 657 658/*************************************************** 659* Pmap allocation/deallocation routines. 660 ***************************************************/ 661 662/* 663 * Release any resources held by the given physical map. 664 * Called when a pmap initialized by pmap_pinit is being released. 665 * Should only be called if the map contains no valid mappings. 666 */ 667void 668pmap_release(pmap_t pmap) 669{ 670#if defined(DIAGNOSTIC) 671 if (object->ref_count != 1) 672 panic("pmap_release: pteobj reference count != 1"); 673#endif 674} 675 676/* 677 * grow the number of kernel page table entries, if needed 678 */ 679void 680pmap_growkernel(vm_offset_t addr) 681{ 682} 683 684/* 685 * Retire the given physical map from service. 686 * Should only be called if the map contains 687 * no valid mappings. 688 */ 689void 690pmap_destroy(pmap_t pmap) 691{ 692 int count; 693 694 if (pmap == NULL) 695 return; 696 697 count = --pmap->pm_count; 698 if (count == 0) { 699 pmap_release(pmap); 700 panic("destroying a pmap is not yet implemented"); 701 } 702} 703 704/* 705 * Add a reference to the specified pmap. 706 */ 707void 708pmap_reference(pmap_t pmap) 709{ 710 if (pmap != NULL) { 711 pmap->pm_count++; 712 } 713} 714 715/*************************************************** 716* page management routines. 717 ***************************************************/ 718 719/* 720 * free the pv_entry back to the free list 721 */ 722static PMAP_INLINE void 723free_pv_entry(pv_entry_t pv) 724{ 725 pv_entry_count--; 726 zfree(pvzone, pv); 727} 728 729/* 730 * get a new pv_entry, allocating a block from the system 731 * when needed. 732 * the memory allocation is performed bypassing the malloc code 733 * because of the possibility of allocations at interrupt time. 734 */ 735static pv_entry_t 736get_pv_entry(void) 737{ 738 /* 739 * We can get called a few times really early before 740 * pmap_init() has finished allocating the pvzone (mostly as a 741 * result of the call to kmem_alloc() in pmap_init(). We allow 742 * a small number of entries to be allocated statically to 743 * cover this. 744 */ 745 if (!pvinit) { 746#define PV_BOOTSTRAP_NEEDED 512 747 static struct pv_entry pvbootentries[PV_BOOTSTRAP_NEEDED]; 748 static int pvbootnext = 0; 749 750 if (pvbootnext == PV_BOOTSTRAP_NEEDED) 751 panic("get_pv_entry: called too many times" 752 " before pmap_init is finished"); 753 return &pvbootentries[pvbootnext++]; 754 } 755 756 pv_entry_count++; 757 if (pv_entry_high_water && 758 (pv_entry_count > pv_entry_high_water) && 759 (pmap_pagedaemon_waken == 0)) { 760 pmap_pagedaemon_waken = 1; 761 wakeup (&vm_pages_needed); 762 } 763 return (pv_entry_t) IA64_PHYS_TO_RR7(vtophys(zalloc(pvzone))); 764} 765 766/* 767 * Add a pv_entry to the VHPT. 768 */ 769static void 770pmap_enter_vhpt(pv_entry_t pv) 771{ 772 struct ia64_lpte *vhpte; 773 774 vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va); 775 776 pv->pv_pte.pte_chain = vhpte->pte_chain; 777 vhpte->pte_chain = ia64_tpa((vm_offset_t) pv); 778 779 if (!vhpte->pte_p && pv->pv_pte.pte_p) 780 pmap_install_pte(vhpte, &pv->pv_pte); 781 else 782 ia64_mf(); 783} 784 785/* 786 * Update VHPT after pv->pv_pte has changed. 787 */ 788static void 789pmap_update_vhpt(pv_entry_t pv) 790{ 791 struct ia64_lpte *vhpte; 792 793 vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va); 794 795 if ((!vhpte->pte_p || vhpte->pte_tag == pv->pv_pte.pte_tag) 796 && pv->pv_pte.pte_p) 797 pmap_install_pte(vhpte, &pv->pv_pte); 798} 799 800/* 801 * Remove a pv_entry from the VHPT. Return true if it worked. 802 */ 803static int 804pmap_remove_vhpt(pv_entry_t pv) 805{ 806 struct ia64_lpte *pte; 807 struct ia64_lpte *lpte; 808 struct ia64_lpte *vhpte; 809 u_int64_t tag; 810 811 vhpte = (struct ia64_lpte *) ia64_thash(pv->pv_va); 812 813 /* 814 * If the VHPTE is invalid, there can't be a collision chain. 815 */ 816 if (!vhpte->pte_p) { 817 KASSERT(!vhpte->pte_chain, ("bad vhpte")); 818 return 0; 819 } 820 821 lpte = vhpte; 822 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(vhpte->pte_chain); 823 tag = ia64_ttag(pv->pv_va); 824 825 while (pte->pte_tag != tag) { 826 lpte = pte; 827 if (pte->pte_chain) 828 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); 829 else 830 return 0; /* error here? */ 831 } 832 833 /* 834 * Snip this pv_entry out of the collision chain. 835 */ 836 lpte->pte_chain = pte->pte_chain; 837 838 /* 839 * If the VHPTE matches as well, change it to map the first 840 * element from the chain if there is one. 841 */ 842 if (vhpte->pte_tag == tag) { 843 if (vhpte->pte_chain) { 844 pte = (struct ia64_lpte *) 845 IA64_PHYS_TO_RR7(vhpte->pte_chain); 846 pmap_install_pte(vhpte, pte); 847 } else { 848 vhpte->pte_p = 0; 849 ia64_mf(); 850 } 851 } 852 853 return 1; 854} 855 856/* 857 * Make a pv_entry_t which maps the given virtual address. The pte 858 * will be initialised with pte_p = 0. The function pmap_set_pv() 859 * should be called to change the value of the pte. 860 * Must be called at splvm(). 861 */ 862static pv_entry_t 863pmap_make_pv(pmap_t pmap, vm_offset_t va) 864{ 865 pv_entry_t pv; 866 867 pv = get_pv_entry(); 868 bzero(pv, sizeof(*pv)); 869 pv->pv_va = va; 870 pv->pv_pmap = pmap; 871 872 pv->pv_pte.pte_p = 0; /* invalid for now */ 873 pv->pv_pte.pte_ma = PTE_MA_WB; /* cacheable, write-back */ 874 pv->pv_pte.pte_a = 0; 875 pv->pv_pte.pte_d = 0; 876 pv->pv_pte.pte_pl = 0; /* privilege level 0 */ 877 pv->pv_pte.pte_ar = 3; /* read/write/execute */ 878 pv->pv_pte.pte_ppn = 0; /* physical address */ 879 pv->pv_pte.pte_ed = 0; 880 pv->pv_pte.pte_ig = 0; 881 882 pv->pv_pte.pte_ps = PAGE_SHIFT; /* page size */ 883 pv->pv_pte.pte_key = 0; /* protection key */ 884 885 pv->pv_pte.pte_tag = ia64_ttag(va); 886 887 pmap_enter_vhpt(pv); 888 889 TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); 890 pmap->pm_stats.resident_count++; 891 892 return pv; 893} 894 895/* 896 * Initialise a pv_entry_t with a given physical address and 897 * protection code. If the passed vm_page_t is non-zero, the entry is 898 * added to its list of mappings. 899 * Must be called at splvm(). 900 */ 901static void 902pmap_set_pv(pmap_t pmap, pv_entry_t pv, vm_offset_t pa, 903 int prot, vm_page_t m) 904{ 905 if (pv->pv_pte.pte_p && pv->pv_pte.pte_ig & PTE_IG_MANAGED) { 906 vm_offset_t opa = pv->pv_pte.pte_ppn << 12; 907 vm_page_t om = PHYS_TO_VM_PAGE(opa); 908 909 TAILQ_REMOVE(&om->md.pv_list, pv, pv_list); 910 om->md.pv_list_count--; 911 912 if (TAILQ_FIRST(&om->md.pv_list) == NULL) 913 vm_page_flag_clear(om, PG_MAPPED | PG_WRITEABLE); 914 } 915 916 pv->pv_pte.pte_p = 1; /* set to valid */ 917 918 /* 919 * Only track access/modify for managed pages. 920 */ 921 if (m) { 922 pv->pv_pte.pte_a = 0; 923 pv->pv_pte.pte_d = 0; 924 } else { 925 pv->pv_pte.pte_a = 1; 926 pv->pv_pte.pte_d = 1; 927 } 928 929 pv->pv_pte.pte_pl = prot & 3; /* privilege level */ 930 pv->pv_pte.pte_ar = prot >> 2; /* access rights */ 931 pv->pv_pte.pte_ppn = pa >> 12; /* physical address */ 932 933 if (m) { 934 pv->pv_pte.pte_ig |= PTE_IG_MANAGED; 935 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); 936 m->md.pv_list_count++; 937 } 938 939 /* 940 * Update the VHPT entry if it needs to change. 941 */ 942 pmap_update_vhpt(pv); 943} 944 945/* 946 * Remove a mapping represented by a particular pv_entry_t. If the 947 * passed vm_page_t is non-zero, then the entry is removed from it. 948 * Must be called at splvm(). 949 */ 950static int 951pmap_remove_pv(pmap_t pmap, pv_entry_t pv, vm_page_t m) 952{ 953 int rtval; 954 955 /* 956 * First remove from the VHPT. 957 */ 958 rtval = pmap_remove_vhpt(pv); 959 if (!rtval) 960 return rtval; 961 962 if ((pv->pv_pte.pte_ig & PTE_IG_MANAGED) && m) { 963 TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); 964 m->md.pv_list_count--; 965 966 if (TAILQ_FIRST(&m->md.pv_list) == NULL) 967 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); 968 } 969 970 TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); 971 pmap->pm_stats.resident_count--; 972 973 free_pv_entry(pv); 974 975 return (rtval); 976} 977 978/* 979 * Find a pv given a pmap and virtual address. 980 */ 981static pv_entry_t 982pmap_find_pv(pmap_t pmap, vm_offset_t va) 983{ 984 struct ia64_lpte *pte; 985 u_int64_t tag; 986 987 pte = (struct ia64_lpte *) ia64_thash(va); 988 if (!pte->pte_chain) 989 return 0; 990 991 tag = ia64_ttag(va); 992 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); 993 994 while (pte->pte_tag != tag) { 995 if (pte->pte_chain) 996 pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); 997 else 998 return 0; 999 } 1000 1001 return (pv_entry_t) pte; /* XXX wrong va */ 1002} 1003 1004/* 1005 * Routine: pmap_extract 1006 * Function: 1007 * Extract the physical page address associated 1008 * with the given map/virtual_address pair. 1009 */ 1010vm_offset_t 1011pmap_extract(pmap, va) 1012 register pmap_t pmap; 1013 vm_offset_t va; 1014{ 1015 pv_entry_t pv = pmap_find_pv(pmap, va); 1016 if (pv) 1017 return pmap_pte_pa(&pv->pv_pte); 1018 else 1019 return 0; 1020} 1021 1022/*************************************************** 1023 * Low level mapping routines..... 1024 ***************************************************/ 1025 1026/* 1027 * Add a list of wired pages to the kva 1028 * this routine is only used for temporary 1029 * kernel mappings that do not need to have 1030 * page modification or references recorded. 1031 * Note that old mappings are simply written 1032 * over. The page *must* be wired. 1033 */ 1034void 1035pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1036{ 1037 int i, inval; 1038 pv_entry_t pv; 1039 1040 for (i = 0; i < count; i++) { 1041 vm_offset_t tva = va + i * PAGE_SIZE; 1042 pv = pmap_find_pv(kernel_pmap, tva); 1043 inval = 0; 1044 if (!pv) 1045 pv = pmap_make_pv(kernel_pmap, tva); 1046 else 1047 inval = 1; 1048 1049 PMAP_DEBUG_VA(va); 1050 pmap_set_pv(kernel_pmap, pv, 1051 VM_PAGE_TO_PHYS(m[i]), 1052 (PTE_AR_RWX<<2) | PTE_PL_KERN, 0); 1053 if (inval) 1054 pmap_invalidate_page(kernel_pmap, tva); 1055 } 1056} 1057 1058/* 1059 * this routine jerks page mappings from the 1060 * kernel -- it is meant only for temporary mappings. 1061 */ 1062void 1063pmap_qremove(va, count) 1064 vm_offset_t va; 1065 int count; 1066{ 1067 int i; 1068 pv_entry_t pv; 1069 1070 for (i = 0; i < count; i++) { 1071 pv = pmap_find_pv(kernel_pmap, va); 1072 PMAP_DEBUG_VA(va); 1073 if (pv) { 1074 pmap_remove_pv(kernel_pmap, pv, 0); 1075 pmap_invalidate_page(kernel_pmap, va); 1076 } 1077 va += PAGE_SIZE; 1078 } 1079} 1080 1081/* 1082 * Add a wired page to the kva. 1083 */ 1084void 1085pmap_kenter(vm_offset_t va, vm_offset_t pa) 1086{ 1087 pv_entry_t pv; 1088 1089 pv = pmap_find_pv(kernel_pmap, va); 1090 if (!pv) 1091 pv = pmap_make_pv(kernel_pmap, va); 1092 pmap_set_pv(kernel_pmap, pv, 1093 pa, (PTE_AR_RWX<<2) | PTE_PL_KERN, 0); 1094 pmap_invalidate_page(kernel_pmap, va); 1095} 1096 1097/* 1098 * Remove a page from the kva 1099 */ 1100void 1101pmap_kremove(vm_offset_t va) 1102{ 1103 pv_entry_t pv; 1104 1105 pv = pmap_find_pv(kernel_pmap, va); 1106 if (pv) { 1107 pmap_remove_pv(kernel_pmap, pv, 0); 1108 pmap_invalidate_page(kernel_pmap, va); 1109 } 1110} 1111 1112/* 1113 * Used to map a range of physical addresses into kernel 1114 * virtual address space. 1115 * 1116 * The value passed in '*virt' is a suggested virtual address for 1117 * the mapping. Architectures which can support a direct-mapped 1118 * physical to virtual region can return the appropriate address 1119 * within that region, leaving '*virt' unchanged. Other 1120 * architectures should map the pages starting at '*virt' and 1121 * update '*virt' with the first usable address after the mapped 1122 * region. 1123 */ 1124vm_offset_t 1125pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 1126{ 1127 return IA64_PHYS_TO_RR7(start); 1128} 1129 1130/* 1131 * This routine is very drastic, but can save the system 1132 * in a pinch. 1133 */ 1134void 1135pmap_collect() 1136{ 1137 int i; 1138 vm_page_t m; 1139 static int warningdone=0; 1140 1141 if (pmap_pagedaemon_waken == 0) 1142 return; 1143 1144 if (warningdone < 5) { 1145 printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); 1146 warningdone++; 1147 } 1148 1149 for(i = 0; i < vm_page_array_size; i++) { 1150 m = &vm_page_array[i]; 1151 if (m->wire_count || m->hold_count || m->busy || 1152 (m->flags & PG_BUSY)) 1153 continue; 1154 pmap_remove_all(m); 1155 } 1156 pmap_pagedaemon_waken = 0; 1157} 1158 1159/* 1160 * Remove a single page from a process address space 1161 */ 1162static void 1163pmap_remove_page(pmap_t pmap, vm_offset_t va) 1164{ 1165 pv_entry_t pv; 1166 vm_page_t m; 1167 int rtval; 1168 int s; 1169 1170 s = splvm(); 1171 1172 pv = pmap_find_pv(pmap, va); 1173 1174 rtval = 0; 1175 if (pv) { 1176 m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1177 rtval = pmap_remove_pv(pmap, pv, m); 1178 pmap_invalidate_page(pmap, va); 1179 } 1180 1181 splx(s); 1182 return; 1183} 1184 1185/* 1186 * Remove the given range of addresses from the specified map. 1187 * 1188 * It is assumed that the start and end are properly 1189 * rounded to the page size. 1190 */ 1191void 1192pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1193{ 1194 vm_offset_t va, nva; 1195 1196 if (pmap == NULL) 1197 return; 1198 1199 if (pmap->pm_stats.resident_count == 0) 1200 return; 1201 1202 /* 1203 * special handling of removing one page. a very 1204 * common operation and easy to short circuit some 1205 * code. 1206 */ 1207 if (sva + PAGE_SIZE == eva) { 1208 pmap_remove_page(pmap, sva); 1209 return; 1210 } 1211 1212 if (atop(eva - sva) < pmap->pm_stats.resident_count) { 1213 for (va = sva; va < eva; va = nva) { 1214 pmap_remove_page(pmap, va); 1215 nva = va + PAGE_SIZE; 1216 } 1217 } else { 1218 pv_entry_t pv, pvnext; 1219 int s; 1220 1221 s = splvm(); 1222 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); 1223 pv; 1224 pv = pvnext) { 1225 pvnext = TAILQ_NEXT(pv, pv_plist); 1226 if (pv->pv_va >= sva && pv->pv_va < eva) { 1227 vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1228 va = pv->pv_va; 1229 pmap_remove_pv(pmap, pv, m); 1230 pmap_invalidate_page(pmap, va); 1231 } 1232 } 1233 splx(s); 1234 } 1235} 1236 1237/* 1238 * Routine: pmap_remove_all 1239 * Function: 1240 * Removes this physical page from 1241 * all physical maps in which it resides. 1242 * Reflects back modify bits to the pager. 1243 * 1244 * Notes: 1245 * Original versions of this routine were very 1246 * inefficient because they iteratively called 1247 * pmap_remove (slow...) 1248 */ 1249 1250static void 1251pmap_remove_all(vm_page_t m) 1252{ 1253 register pv_entry_t pv; 1254 int nmodify; 1255 int s; 1256 1257 nmodify = 0; 1258#if defined(PMAP_DIAGNOSTIC) 1259 /* 1260 * XXX this makes pmap_page_protect(NONE) illegal for non-managed 1261 * pages! 1262 */ 1263 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { 1264 panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); 1265 } 1266#endif 1267 1268 s = splvm(); 1269 1270 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { 1271 vm_page_t m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1272 vm_offset_t va = pv->pv_va; 1273 pmap_remove_pv(pv->pv_pmap, pv, m); 1274 pmap_invalidate_page(pv->pv_pmap, va); 1275 } 1276 1277 vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); 1278 1279 splx(s); 1280 return; 1281} 1282 1283/* 1284 * Set the physical protection on the 1285 * specified range of this map as requested. 1286 */ 1287void 1288pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1289{ 1290 pmap_t oldpmap; 1291 pv_entry_t pv; 1292 int newprot; 1293 1294 if (pmap == NULL) 1295 return; 1296 1297 oldpmap = pmap_install(pmap); 1298 1299 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1300 pmap_remove(pmap, sva, eva); 1301 pmap_install(oldpmap); 1302 return; 1303 } 1304 1305 if (prot & VM_PROT_WRITE) { 1306 pmap_install(oldpmap); 1307 return; 1308 } 1309 1310 newprot = pte_prot(pmap, prot); 1311 1312 if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) 1313 panic("pmap_protect: unaligned addresses"); 1314 1315 while (sva < eva) { 1316 /* 1317 * If page is invalid, skip this page 1318 */ 1319 pv = pmap_find_pv(pmap, sva); 1320 if (!pv) { 1321 sva += PAGE_SIZE; 1322 continue; 1323 } 1324 1325 if (pmap_pte_prot(&pv->pv_pte) != newprot) { 1326 pmap_pte_set_prot(&pv->pv_pte, newprot); 1327 pmap_update_vhpt(pv); 1328 pmap_invalidate_page(pmap, sva); 1329 } 1330 1331 sva += PAGE_SIZE; 1332 } 1333 pmap_install(oldpmap); 1334} 1335 1336/* 1337 * Insert the given physical page (p) at 1338 * the specified virtual address (v) in the 1339 * target physical map with the protection requested. 1340 * 1341 * If specified, the page will be wired down, meaning 1342 * that the related pte can not be reclaimed. 1343 * 1344 * NB: This is the only routine which MAY NOT lazy-evaluate 1345 * or lose information. That is, this routine must actually 1346 * insert this page into the given map NOW. 1347 */ 1348void 1349pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 1350 boolean_t wired) 1351{ 1352 pmap_t oldpmap; 1353 vm_offset_t pa; 1354 pv_entry_t pv; 1355 vm_offset_t opa; 1356 struct ia64_lpte origpte; 1357 int managed; 1358 1359 if (pmap == NULL) 1360 return; 1361 1362 oldpmap = pmap_install(pmap); 1363 1364 va &= ~PAGE_MASK; 1365#ifdef PMAP_DIAGNOSTIC 1366 if (va > VM_MAX_KERNEL_ADDRESS) 1367 panic("pmap_enter: toobig"); 1368#endif 1369 1370 pv = pmap_find_pv(pmap, va); 1371 if (!pv) 1372 pv = pmap_make_pv(pmap, va); 1373 1374 origpte = pv->pv_pte; 1375 if (origpte.pte_p) 1376 opa = pmap_pte_pa(&origpte); 1377 else 1378 opa = 0; 1379 1380 pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK; 1381 managed = 0; 1382 1383 /* 1384 * Mapping has not changed, must be protection or wiring change. 1385 */ 1386 if (origpte.pte_p && (opa == pa)) { 1387 /* 1388 * Wiring change, just update stats. We don't worry about 1389 * wiring PT pages as they remain resident as long as there 1390 * are valid mappings in them. Hence, if a user page is wired, 1391 * the PT page will be also. 1392 */ 1393 if (wired && ((origpte.pte_ig & PTE_IG_WIRED) == 0)) 1394 pmap->pm_stats.wired_count++; 1395 else if (!wired && (origpte.pte_ig & PTE_IG_WIRED)) 1396 pmap->pm_stats.wired_count--; 1397 1398 managed = origpte.pte_ig & PTE_IG_MANAGED; 1399 goto validate; 1400 } else { 1401 /* 1402 * Mapping has changed, invalidate old range and fall 1403 * through to handle validating new mapping. 1404 */ 1405 } 1406 1407 /* 1408 * Increment counters 1409 */ 1410 if (wired) 1411 pmap->pm_stats.wired_count++; 1412 1413validate: 1414 /* 1415 * Now validate mapping with desired protection/wiring. 1416 * This enters the pv_entry_t on the page's list if necessary. 1417 */ 1418 pmap_set_pv(pmap, pv, pa, pte_prot(pmap, prot), m); 1419 1420 if (wired) 1421 pv->pv_pte.pte_ig |= PTE_IG_WIRED; 1422 1423 /* 1424 * if the mapping or permission bits are different, we need 1425 * to invalidate the page. 1426 */ 1427 if (!pmap_equal_pte(&origpte, &pv->pv_pte)) { 1428 PMAP_DEBUG_VA(va); 1429 pmap_invalidate_page(pmap, va); 1430 } 1431 1432 pmap_install(oldpmap); 1433} 1434 1435/* 1436 * this code makes some *MAJOR* assumptions: 1437 * 1. Current pmap & pmap exists. 1438 * 2. Not wired. 1439 * 3. Read access. 1440 * 4. No page table pages. 1441 * 5. Tlbflush is deferred to calling procedure. 1442 * 6. Page IS managed. 1443 * but is *MUCH* faster than pmap_enter... 1444 */ 1445 1446static void 1447pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m) 1448{ 1449 pv_entry_t pv; 1450 int s; 1451 1452 s = splvm(); 1453 1454 pv = pmap_find_pv(pmap, va); 1455 if (!pv) 1456 pv = pmap_make_pv(pmap, va); 1457 1458 /* 1459 * Enter on the PV list if part of our managed memory. Note that we 1460 * raise IPL while manipulating pv_table since pmap_enter can be 1461 * called at interrupt time. 1462 */ 1463 PMAP_DEBUG_VA(va); 1464 pmap_set_pv(pmap, pv, VM_PAGE_TO_PHYS(m), 1465 (PTE_AR_R << 2) | PTE_PL_USER, m); 1466 1467 splx(s); 1468} 1469 1470/* 1471 * Make temporary mapping for a physical address. This is called 1472 * during dump. 1473 */ 1474void * 1475pmap_kenter_temporary(vm_offset_t pa, int i) 1476{ 1477 return (void *) IA64_PHYS_TO_RR7(pa - (i * PAGE_SIZE)); 1478} 1479 1480#define MAX_INIT_PT (96) 1481/* 1482 * pmap_object_init_pt preloads the ptes for a given object 1483 * into the specified pmap. This eliminates the blast of soft 1484 * faults on process startup and immediately after an mmap. 1485 */ 1486void 1487pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, 1488 vm_object_t object, vm_pindex_t pindex, 1489 vm_size_t size, int limit) 1490{ 1491 pmap_t oldpmap; 1492 vm_offset_t tmpidx; 1493 int psize; 1494 vm_page_t p; 1495 int objpgs; 1496 1497 if (pmap == NULL || object == NULL) 1498 return; 1499 1500 oldpmap = pmap_install(pmap); 1501 1502 psize = ia64_btop(size); 1503 1504 if ((object->type != OBJT_VNODE) || 1505 (limit && (psize > MAX_INIT_PT) && 1506 (object->resident_page_count > MAX_INIT_PT))) { 1507 pmap_install(oldpmap); 1508 return; 1509 } 1510 1511 if (psize + pindex > object->size) 1512 psize = object->size - pindex; 1513 1514 /* 1515 * if we are processing a major portion of the object, then scan the 1516 * entire thing. 1517 */ 1518 if (psize > (object->resident_page_count >> 2)) { 1519 objpgs = psize; 1520 1521 for (p = TAILQ_FIRST(&object->memq); 1522 ((objpgs > 0) && (p != NULL)); 1523 p = TAILQ_NEXT(p, listq)) { 1524 1525 tmpidx = p->pindex; 1526 if (tmpidx < pindex) { 1527 continue; 1528 } 1529 tmpidx -= pindex; 1530 if (tmpidx >= psize) { 1531 continue; 1532 } 1533 if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1534 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1535 if ((p->queue - p->pc) == PQ_CACHE) 1536 vm_page_deactivate(p); 1537 vm_page_busy(p); 1538 pmap_enter_quick(pmap, 1539 addr + ia64_ptob(tmpidx), p); 1540 vm_page_flag_set(p, PG_MAPPED); 1541 vm_page_wakeup(p); 1542 } 1543 objpgs -= 1; 1544 } 1545 } else { 1546 /* 1547 * else lookup the pages one-by-one. 1548 */ 1549 for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { 1550 p = vm_page_lookup(object, tmpidx + pindex); 1551 if (p && 1552 ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1553 (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1554 if ((p->queue - p->pc) == PQ_CACHE) 1555 vm_page_deactivate(p); 1556 vm_page_busy(p); 1557 pmap_enter_quick(pmap, 1558 addr + ia64_ptob(tmpidx), p); 1559 vm_page_flag_set(p, PG_MAPPED); 1560 vm_page_wakeup(p); 1561 } 1562 } 1563 } 1564 pmap_install(oldpmap); 1565 return; 1566} 1567 1568/* 1569 * pmap_prefault provides a quick way of clustering 1570 * pagefaults into a processes address space. It is a "cousin" 1571 * of pmap_object_init_pt, except it runs at page fault time instead 1572 * of mmap time. 1573 */ 1574#define PFBAK 4 1575#define PFFOR 4 1576#define PAGEORDER_SIZE (PFBAK+PFFOR) 1577 1578static int pmap_prefault_pageorder[] = { 1579 -PAGE_SIZE, PAGE_SIZE, 1580 -2 * PAGE_SIZE, 2 * PAGE_SIZE, 1581 -3 * PAGE_SIZE, 3 * PAGE_SIZE 1582 -4 * PAGE_SIZE, 4 * PAGE_SIZE 1583}; 1584 1585void 1586pmap_prefault(pmap, addra, entry) 1587 pmap_t pmap; 1588 vm_offset_t addra; 1589 vm_map_entry_t entry; 1590{ 1591 int i; 1592 vm_offset_t starta; 1593 vm_offset_t addr; 1594 vm_pindex_t pindex; 1595 vm_page_t m, mpte; 1596 vm_object_t object; 1597 1598 if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) 1599 return; 1600 1601 object = entry->object.vm_object; 1602 1603 starta = addra - PFBAK * PAGE_SIZE; 1604 if (starta < entry->start) { 1605 starta = entry->start; 1606 } else if (starta > addra) { 1607 starta = 0; 1608 } 1609 1610 mpte = NULL; 1611 for (i = 0; i < PAGEORDER_SIZE; i++) { 1612 vm_object_t lobject; 1613 pv_entry_t pv; 1614 1615 addr = addra + pmap_prefault_pageorder[i]; 1616 if (addr > addra + (PFFOR * PAGE_SIZE)) 1617 addr = 0; 1618 1619 if (addr < starta || addr >= entry->end) 1620 continue; 1621 1622 pv = pmap_find_pv(pmap, addr); 1623 if (pv) 1624 continue; 1625 1626 pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; 1627 lobject = object; 1628 for (m = vm_page_lookup(lobject, pindex); 1629 (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); 1630 lobject = lobject->backing_object) { 1631 if (lobject->backing_object_offset & PAGE_MASK) 1632 break; 1633 pindex += (lobject->backing_object_offset >> PAGE_SHIFT); 1634 m = vm_page_lookup(lobject->backing_object, pindex); 1635 } 1636 1637 /* 1638 * give-up when a page is not in memory 1639 */ 1640 if (m == NULL) 1641 break; 1642 1643 if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && 1644 (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { 1645 1646 if ((m->queue - m->pc) == PQ_CACHE) { 1647 vm_page_deactivate(m); 1648 } 1649 vm_page_busy(m); 1650 pmap_enter_quick(pmap, addr, m); 1651 vm_page_flag_set(m, PG_MAPPED); 1652 vm_page_wakeup(m); 1653 } 1654 } 1655} 1656 1657/* 1658 * Routine: pmap_change_wiring 1659 * Function: Change the wiring attribute for a map/virtual-address 1660 * pair. 1661 * In/out conditions: 1662 * The mapping must already exist in the pmap. 1663 */ 1664void 1665pmap_change_wiring(pmap, va, wired) 1666 register pmap_t pmap; 1667 vm_offset_t va; 1668 boolean_t wired; 1669{ 1670 pmap_t oldpmap; 1671 pv_entry_t pv; 1672 1673 if (pmap == NULL) 1674 return; 1675 1676 oldpmap = pmap_install(pmap); 1677 1678 pv = pmap_find_pv(pmap, va); 1679 1680 if (wired && !pmap_pte_w(&pv->pv_pte)) 1681 pmap->pm_stats.wired_count++; 1682 else if (!wired && pmap_pte_w(&pv->pv_pte)) 1683 pmap->pm_stats.wired_count--; 1684 1685 /* 1686 * Wiring is not a hardware characteristic so there is no need to 1687 * invalidate TLB. 1688 */ 1689 pmap_pte_set_w(&pv->pv_pte, wired); 1690 1691 pmap_install(oldpmap); 1692} 1693 1694 1695 1696/* 1697 * Copy the range specified by src_addr/len 1698 * from the source map to the range dst_addr/len 1699 * in the destination map. 1700 * 1701 * This routine is only advisory and need not do anything. 1702 */ 1703 1704void 1705pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, 1706 vm_offset_t src_addr) 1707{ 1708} 1709 1710/* 1711 * Routine: pmap_kernel 1712 * Function: 1713 * Returns the physical map handle for the kernel. 1714 */ 1715pmap_t 1716pmap_kernel() 1717{ 1718 return (kernel_pmap); 1719} 1720 1721/* 1722 * pmap_zero_page zeros the specified hardware page by 1723 * mapping it into virtual memory and using bzero to clear 1724 * its contents. 1725 */ 1726 1727void 1728pmap_zero_page(vm_offset_t pa) 1729{ 1730 vm_offset_t va = IA64_PHYS_TO_RR7(pa); 1731 bzero((caddr_t) va, PAGE_SIZE); 1732} 1733 1734 1735/* 1736 * pmap_zero_page_area zeros the specified hardware page by 1737 * mapping it into virtual memory and using bzero to clear 1738 * its contents. 1739 * 1740 * off and size must reside within a single page. 1741 */ 1742 1743void 1744pmap_zero_page_area(vm_offset_t pa, int off, int size) 1745{ 1746 vm_offset_t va = IA64_PHYS_TO_RR7(pa); 1747 bzero((char *)(caddr_t)va + off, size); 1748} 1749 1750/* 1751 * pmap_copy_page copies the specified (machine independent) 1752 * page by mapping the page into virtual memory and using 1753 * bcopy to copy the page, one machine dependent page at a 1754 * time. 1755 */ 1756void 1757pmap_copy_page(vm_offset_t src, vm_offset_t dst) 1758{ 1759 src = IA64_PHYS_TO_RR7(src); 1760 dst = IA64_PHYS_TO_RR7(dst); 1761 bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); 1762} 1763 1764 1765/* 1766 * Routine: pmap_pageable 1767 * Function: 1768 * Make the specified pages (by pmap, offset) 1769 * pageable (or not) as requested. 1770 * 1771 * A page which is not pageable may not take 1772 * a fault; therefore, its page table entry 1773 * must remain valid for the duration. 1774 * 1775 * This routine is merely advisory; pmap_enter 1776 * will specify that these pages are to be wired 1777 * down (or not) as appropriate. 1778 */ 1779void 1780pmap_pageable(pmap, sva, eva, pageable) 1781 pmap_t pmap; 1782 vm_offset_t sva, eva; 1783 boolean_t pageable; 1784{ 1785} 1786 1787/* 1788 * this routine returns true if a physical page resides 1789 * in the given pmap. 1790 */ 1791boolean_t 1792pmap_page_exists(pmap, m) 1793 pmap_t pmap; 1794 vm_page_t m; 1795{ 1796 register pv_entry_t pv; 1797 int s; 1798 1799 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1800 return FALSE; 1801 1802 s = splvm(); 1803 1804 /* 1805 * Not found, check current mappings returning immediately if found. 1806 */ 1807 for (pv = TAILQ_FIRST(&m->md.pv_list); 1808 pv; 1809 pv = TAILQ_NEXT(pv, pv_list)) { 1810 if (pv->pv_pmap == pmap) { 1811 splx(s); 1812 return TRUE; 1813 } 1814 } 1815 splx(s); 1816 return (FALSE); 1817} 1818 1819#define PMAP_REMOVE_PAGES_CURPROC_ONLY 1820/* 1821 * Remove all pages from specified address space 1822 * this aids process exit speeds. Also, this code 1823 * is special cased for current process only, but 1824 * can have the more generic (and slightly slower) 1825 * mode enabled. This is much faster than pmap_remove 1826 * in the case of running down an entire address space. 1827 */ 1828void 1829pmap_remove_pages(pmap, sva, eva) 1830 pmap_t pmap; 1831 vm_offset_t sva, eva; 1832{ 1833 pv_entry_t pv, npv; 1834 int s; 1835 1836#ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY 1837 if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) { 1838 printf("warning: pmap_remove_pages called with non-current pmap\n"); 1839 return; 1840 } 1841#endif 1842 1843 s = splvm(); 1844 for (pv = TAILQ_FIRST(&pmap->pm_pvlist); 1845 pv; 1846 pv = npv) { 1847 vm_page_t m; 1848 1849 npv = TAILQ_NEXT(pv, pv_plist); 1850 1851 if (pv->pv_va >= eva || pv->pv_va < sva) { 1852 continue; 1853 } 1854 1855/* 1856 * We cannot remove wired pages from a process' mapping at this time 1857 */ 1858 if (pv->pv_pte.pte_ig & PTE_IG_WIRED) { 1859 continue; 1860 } 1861 1862 PMAP_DEBUG_VA(pv->pv_va); 1863 1864 m = PHYS_TO_VM_PAGE(pmap_pte_pa(&pv->pv_pte)); 1865 pmap_remove_pv(pmap, pv, m); 1866 } 1867 splx(s); 1868 1869 pmap_invalidate_all(pmap); 1870} 1871 1872/* 1873 * pmap_page_protect: 1874 * 1875 * Lower the permission for all mappings to a given page. 1876 */ 1877void 1878pmap_page_protect(vm_page_t m, vm_prot_t prot) 1879{ 1880 pv_entry_t pv; 1881 1882 if ((prot & VM_PROT_WRITE) != 0) 1883 return; 1884 if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { 1885 for (pv = TAILQ_FIRST(&m->md.pv_list); 1886 pv; 1887 pv = TAILQ_NEXT(pv, pv_list)) { 1888 int newprot = pte_prot(pv->pv_pmap, prot); 1889 pmap_t oldpmap = pmap_install(pv->pv_pmap); 1890 pmap_pte_set_prot(&pv->pv_pte, newprot); 1891 pmap_update_vhpt(pv); 1892 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1893 pmap_install(oldpmap); 1894 } 1895 } else { 1896 pmap_remove_all(m); 1897 } 1898} 1899 1900vm_offset_t 1901pmap_phys_address(ppn) 1902 int ppn; 1903{ 1904 return (ia64_ptob(ppn)); 1905} 1906 1907/* 1908 * pmap_ts_referenced: 1909 * 1910 * Return the count of reference bits for a page, clearing all of them. 1911 * 1912 */ 1913int 1914pmap_ts_referenced(vm_page_t m) 1915{ 1916 pv_entry_t pv; 1917 int count = 0; 1918 1919 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1920 return 0; 1921 1922 for (pv = TAILQ_FIRST(&m->md.pv_list); 1923 pv; 1924 pv = TAILQ_NEXT(pv, pv_list)) { 1925 if (pv->pv_pte.pte_a) { 1926 pmap_t oldpmap = pmap_install(pv->pv_pmap); 1927 count++; 1928 pv->pv_pte.pte_a = 0; 1929 pmap_update_vhpt(pv); 1930 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 1931 pmap_install(oldpmap); 1932 } 1933 } 1934 1935 return count; 1936} 1937 1938#if 0 1939/* 1940 * pmap_is_referenced: 1941 * 1942 * Return whether or not the specified physical page was referenced 1943 * in any physical maps. 1944 */ 1945static boolean_t 1946pmap_is_referenced(vm_page_t m) 1947{ 1948 pv_entry_t pv; 1949 1950 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1951 return FALSE; 1952 1953 for (pv = TAILQ_FIRST(&m->md.pv_list); 1954 pv; 1955 pv = TAILQ_NEXT(pv, pv_list)) { 1956 if (pv->pv_pte.pte_a) { 1957 return 1; 1958 } 1959 } 1960 1961 return 0; 1962} 1963#endif 1964 1965/* 1966 * pmap_is_modified: 1967 * 1968 * Return whether or not the specified physical page was modified 1969 * in any physical maps. 1970 */ 1971boolean_t 1972pmap_is_modified(vm_page_t m) 1973{ 1974 pv_entry_t pv; 1975 1976 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1977 return FALSE; 1978 1979 for (pv = TAILQ_FIRST(&m->md.pv_list); 1980 pv; 1981 pv = TAILQ_NEXT(pv, pv_list)) { 1982 if (pv->pv_pte.pte_d) { 1983 return 1; 1984 } 1985 } 1986 1987 return 0; 1988} 1989 1990/* 1991 * Clear the modify bits on the specified physical page. 1992 */ 1993void 1994pmap_clear_modify(vm_page_t m) 1995{ 1996 pv_entry_t pv; 1997 1998 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1999 return; 2000 2001 for (pv = TAILQ_FIRST(&m->md.pv_list); 2002 pv; 2003 pv = TAILQ_NEXT(pv, pv_list)) { 2004 if (pv->pv_pte.pte_d) { 2005 pmap_t oldpmap = pmap_install(pv->pv_pmap); 2006 pv->pv_pte.pte_d = 0; 2007 pmap_update_vhpt(pv); 2008 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 2009 pmap_install(oldpmap); 2010 } 2011 } 2012} 2013 2014/* 2015 * pmap_clear_reference: 2016 * 2017 * Clear the reference bit on the specified physical page. 2018 */ 2019void 2020pmap_clear_reference(vm_page_t m) 2021{ 2022 pv_entry_t pv; 2023 2024 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 2025 return; 2026 2027 for (pv = TAILQ_FIRST(&m->md.pv_list); 2028 pv; 2029 pv = TAILQ_NEXT(pv, pv_list)) { 2030 if (pv->pv_pte.pte_a) { 2031 pmap_t oldpmap = pmap_install(pv->pv_pmap); 2032 pv->pv_pte.pte_a = 0; 2033 pmap_update_vhpt(pv); 2034 pmap_invalidate_page(pv->pv_pmap, pv->pv_va); 2035 pmap_install(oldpmap); 2036 } 2037 } 2038} 2039 2040/* 2041 * Miscellaneous support routines follow 2042 */ 2043 2044static void 2045ia64_protection_init() 2046{ 2047 int prot, *kp, *up; 2048 2049 kp = protection_codes[0]; 2050 up = protection_codes[1]; 2051 2052 for (prot = 0; prot < 8; prot++) { 2053 switch (prot) { 2054 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 2055 *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN; 2056 *up++ = (PTE_AR_R << 2) | PTE_PL_KERN; 2057 break; 2058 2059 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 2060 *kp++ = (PTE_AR_X_RX << 2) | PTE_PL_KERN; 2061 *up++ = (PTE_AR_X_RX << 2) | PTE_PL_USER; 2062 break; 2063 2064 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 2065 *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN; 2066 *up++ = (PTE_AR_RW << 2) | PTE_PL_USER; 2067 break; 2068 2069 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 2070 *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN; 2071 *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER; 2072 break; 2073 2074 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 2075 *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN; 2076 *up++ = (PTE_AR_R << 2) | PTE_PL_USER; 2077 break; 2078 2079 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 2080 *kp++ = (PTE_AR_RX << 2) | PTE_PL_KERN; 2081 *up++ = (PTE_AR_RX << 2) | PTE_PL_USER; 2082 break; 2083 2084 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 2085 *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN; 2086 *up++ = (PTE_AR_RW << 2) | PTE_PL_USER; 2087 break; 2088 2089 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 2090 *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN; 2091 *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER; 2092 break; 2093 } 2094 } 2095} 2096 2097/* 2098 * Map a set of physical memory pages into the kernel virtual 2099 * address space. Return a pointer to where it is mapped. This 2100 * routine is intended to be used for mapping device memory, 2101 * NOT real memory. 2102 */ 2103void * 2104pmap_mapdev(pa, size) 2105 vm_offset_t pa; 2106 vm_size_t size; 2107{ 2108 return (void*) IA64_PHYS_TO_RR6(pa); 2109} 2110 2111/* 2112 * perform the pmap work for mincore 2113 */ 2114int 2115pmap_mincore(pmap, addr) 2116 pmap_t pmap; 2117 vm_offset_t addr; 2118{ 2119 pv_entry_t pv; 2120 struct ia64_lpte *pte; 2121 int val = 0; 2122 2123 pv = pmap_find_pv(pmap, addr); 2124 if (pv == 0) { 2125 return 0; 2126 } 2127 pte = &pv->pv_pte; 2128 2129 if (pmap_pte_v(pte)) { 2130 vm_page_t m; 2131 vm_offset_t pa; 2132 2133 val = MINCORE_INCORE; 2134 if ((pte->pte_ig & PTE_IG_MANAGED) == 0) 2135 return val; 2136 2137 pa = pmap_pte_pa(pte); 2138 2139 m = PHYS_TO_VM_PAGE(pa); 2140 2141 /* 2142 * Modified by us 2143 */ 2144 if (pte->pte_d) 2145 val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; 2146 /* 2147 * Modified by someone 2148 */ 2149 else if (pmap_is_modified(m)) 2150 val |= MINCORE_MODIFIED_OTHER; 2151 /* 2152 * Referenced by us 2153 */ 2154 if (pte->pte_a) 2155 val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; 2156 2157 /* 2158 * Referenced by someone 2159 */ 2160 else if (pmap_ts_referenced(m)) { 2161 val |= MINCORE_REFERENCED_OTHER; 2162 vm_page_flag_set(m, PG_REFERENCED); 2163 } 2164 } 2165 return val; 2166} 2167 2168void 2169pmap_activate(struct proc *p) 2170{ 2171 pmap_install(vmspace_pmap(p->p_vmspace)); 2172} 2173 2174pmap_t 2175pmap_install(pmap_t pmap) 2176{ 2177 pmap_t oldpmap; 2178 int rid; 2179 2180 oldpmap = PCPU_GET(current_pmap); 2181 2182 if (pmap == oldpmap || pmap == kernel_pmap) 2183 return pmap; 2184 2185 PCPU_SET(current_pmap, pmap); 2186 if (!pmap) { 2187 /* 2188 * RIDs 0..4 have no mappings to make sure we generate 2189 * page faults on accesses. 2190 */ 2191 ia64_set_rr(IA64_RR_BASE(0), (0 << 8)|(PAGE_SHIFT << 2)|1); 2192 ia64_set_rr(IA64_RR_BASE(1), (1 << 8)|(PAGE_SHIFT << 2)|1); 2193 ia64_set_rr(IA64_RR_BASE(2), (2 << 8)|(PAGE_SHIFT << 2)|1); 2194 ia64_set_rr(IA64_RR_BASE(3), (3 << 8)|(PAGE_SHIFT << 2)|1); 2195 ia64_set_rr(IA64_RR_BASE(4), (4 << 8)|(PAGE_SHIFT << 2)|1); 2196 return oldpmap; 2197 } 2198 2199 pmap->pm_active = 1; /* XXX use bitmap for SMP */ 2200 2201 reinstall: 2202 rid = pmap->pm_rid & ((1 << pmap_ridbits) - 1); 2203 ia64_set_rr(IA64_RR_BASE(0), ((rid + 0) << 8)|(PAGE_SHIFT << 2)|1); 2204 ia64_set_rr(IA64_RR_BASE(1), ((rid + 1) << 8)|(PAGE_SHIFT << 2)|1); 2205 ia64_set_rr(IA64_RR_BASE(2), ((rid + 2) << 8)|(PAGE_SHIFT << 2)|1); 2206 ia64_set_rr(IA64_RR_BASE(3), ((rid + 3) << 8)|(PAGE_SHIFT << 2)|1); 2207 ia64_set_rr(IA64_RR_BASE(4), ((rid + 4) << 8)|(PAGE_SHIFT << 2)|1); 2208 2209 /* 2210 * If we need a new RID, get it now. Note that we need to 2211 * remove our old mappings (if any) from the VHTP, so we will 2212 * run on the old RID for a moment while we invalidate the old 2213 * one. XXX maybe we should just clear out the VHTP when the 2214 * RID generation rolls over. 2215 */ 2216 if ((pmap->pm_rid>>pmap_ridbits) != (pmap_nextrid>>pmap_ridbits)) { 2217 if (pmap->pm_rid) 2218 pmap_invalidate_rid(pmap); 2219 pmap_get_rid(pmap); 2220 goto reinstall; 2221 } 2222 2223 return oldpmap; 2224} 2225 2226vm_offset_t 2227pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 2228{ 2229 2230 return addr; 2231} 2232 2233#if 0 2234#if defined(PMAP_DEBUG) 2235pmap_pid_dump(int pid) 2236{ 2237 pmap_t pmap; 2238 struct proc *p; 2239 int npte = 0; 2240 int index; 2241 2242 sx_slock(&allproc_lock); 2243 LIST_FOREACH(p, &allproc, p_list) { 2244 if (p->p_pid != pid) 2245 continue; 2246 2247 if (p->p_vmspace) { 2248 int i,j; 2249 index = 0; 2250 pmap = vmspace_pmap(p->p_vmspace); 2251 for(i=0;i<1024;i++) { 2252 pd_entry_t *pde; 2253 pt_entry_t *pte; 2254 unsigned base = i << PDRSHIFT; 2255 2256 pde = &pmap->pm_pdir[i]; 2257 if (pde && pmap_pde_v(pde)) { 2258 for(j=0;j<1024;j++) { 2259 unsigned va = base + (j << PAGE_SHIFT); 2260 if (va >= (vm_offset_t) VM_MIN_KERNEL_ADDRESS) { 2261 if (index) { 2262 index = 0; 2263 printf("\n"); 2264 } 2265 sx_sunlock(&allproc_lock); 2266 return npte; 2267 } 2268 pte = pmap_pte_quick( pmap, va); 2269 if (pte && pmap_pte_v(pte)) { 2270 vm_offset_t pa; 2271 vm_page_t m; 2272 pa = *(int *)pte; 2273 m = PHYS_TO_VM_PAGE(pa); 2274 printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", 2275 va, pa, m->hold_count, m->wire_count, m->flags); 2276 npte++; 2277 index++; 2278 if (index >= 2) { 2279 index = 0; 2280 printf("\n"); 2281 } else { 2282 printf(" "); 2283 } 2284 } 2285 } 2286 } 2287 } 2288 } 2289 } 2290 sx_sunlock(&allproc_lock); 2291 return npte; 2292} 2293#endif 2294 2295#if defined(DEBUG) 2296 2297static void pads __P((pmap_t pm)); 2298static void pmap_pvdump __P((vm_page_t m)); 2299 2300/* print address space of pmap*/ 2301static void 2302pads(pm) 2303 pmap_t pm; 2304{ 2305 int i, j; 2306 vm_offset_t va; 2307 pt_entry_t *ptep; 2308 2309 if (pm == kernel_pmap) 2310 return; 2311 for (i = 0; i < 1024; i++) 2312 if (pm->pm_pdir[i]) 2313 for (j = 0; j < 1024; j++) { 2314 va = (i << PDRSHIFT) + (j << PAGE_SHIFT); 2315 if (pm == kernel_pmap && va < KERNBASE) 2316 continue; 2317 if (pm != kernel_pmap && va > UPT_MAX_ADDRESS) 2318 continue; 2319 ptep = pmap_pte_quick(pm, va); 2320 if (pmap_pte_v(ptep)) 2321 printf("%x:%x ", va, *(int *) ptep); 2322 }; 2323 2324} 2325 2326static void 2327pmap_pvdump(pa) 2328 vm_offset_t pa; 2329{ 2330 pv_entry_t pv; 2331 2332 printf("pa %x", pa); 2333 m = PHYS_TO_VM_PAGE(pa); 2334 for (pv = TAILQ_FIRST(&m->md.pv_list); 2335 pv; 2336 pv = TAILQ_NEXT(pv, pv_list)) { 2337 printf(" -> pmap %x, va %x", 2338 pv->pv_pmap, pv->pv_va); 2339 pads(pv->pv_pmap); 2340 } 2341 printf(" "); 2342} 2343#endif 2344#endif 2345