mmu_oea.c revision 83682
1/* 2 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 3 * Copyright (C) 1995, 1996 TooLs GmbH. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 3. All advertising materials mentioning features or use of this software 15 * must display the following acknowledgement: 16 * This product includes software developed by TooLs GmbH. 17 * 4. The name of TooLs GmbH may not be used to endorse or promote products 18 * derived from this software without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 * 31 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 32 */ 33/* 34 * Copyright (C) 2001 Benno Rice. 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 46 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 47 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 48 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 49 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 50 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 51 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 52 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 53 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 54 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 55 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 56 */ 57 58#ifndef lint 59static const char rcsid[] = 60 "$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 83682 2001-09-20 00:47:17Z mp $"; 61#endif /* not lint */ 62 63#include <sys/param.h> 64#include <sys/systm.h> 65#include <sys/kernel.h> 66#include <sys/proc.h> 67#include <sys/malloc.h> 68#include <sys/msgbuf.h> 69#include <sys/vmmeter.h> 70#include <sys/mman.h> 71#include <sys/queue.h> 72#include <sys/mutex.h> 73 74#include <vm/vm.h> 75#include <vm/vm_param.h> 76#include <sys/lock.h> 77#include <vm/vm_kern.h> 78#include <vm/vm_page.h> 79#include <vm/vm_map.h> 80#include <vm/vm_object.h> 81#include <vm/vm_extern.h> 82#include <vm/vm_pageout.h> 83#include <vm/vm_pager.h> 84#include <vm/vm_zone.h> 85 86#include <sys/user.h> 87 88#include <machine/pcb.h> 89#include <machine/powerpc.h> 90#include <machine/pte.h> 91 92pte_t *ptable; 93int ptab_cnt; 94u_int ptab_mask; 95#define HTABSIZE (ptab_cnt * 64) 96 97#define MINPV 2048 98 99struct pte_ovfl { 100 LIST_ENTRY(pte_ovfl) po_list; /* Linked list of overflow entries */ 101 struct pte po_pte; /* PTE for this mapping */ 102}; 103 104LIST_HEAD(pte_ovtab, pte_ovfl) *potable; /* Overflow entries for ptable */ 105 106static struct pmap kernel_pmap_store; 107pmap_t kernel_pmap; 108 109static int npgs; 110static u_int nextavail; 111 112#ifndef MSGBUFADDR 113extern vm_offset_t msgbuf_paddr; 114#endif 115 116static struct mem_region *mem, *avail; 117 118vm_offset_t avail_start; 119vm_offset_t avail_end; 120vm_offset_t virtual_avail; 121vm_offset_t virtual_end; 122 123vm_offset_t kernel_vm_end; 124 125static int pmap_pagedaemon_waken = 0; 126 127extern unsigned int Maxmem; 128 129#define ATTRSHFT 4 130 131struct pv_entry *pv_table; 132 133static vm_zone_t pvzone; 134static struct vm_zone pvzone_store; 135static struct vm_object pvzone_obj; 136static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; 137static struct pv_entry *pvinit; 138 139#if !defined(PMAP_SHPGPERPROC) 140#define PMAP_SHPGPERPROC 200 141#endif 142 143struct pv_page; 144struct pv_page_info { 145 LIST_ENTRY(pv_page) pgi_list; 146 struct pv_entry *pgi_freelist; 147 int pgi_nfree; 148}; 149#define NPVPPG ((PAGE_SIZE - sizeof(struct pv_page_info)) / sizeof(struct pv_entry)) 150struct pv_page { 151 struct pv_page_info pvp_pgi; 152 struct pv_entry pvp_pv[NPVPPG]; 153}; 154LIST_HEAD(pv_page_list, pv_page) pv_page_freelist; 155int pv_nfree; 156int pv_pcnt; 157static struct pv_entry *pmap_alloc_pv(void); 158static void pmap_free_pv(struct pv_entry *); 159 160struct po_page; 161struct po_page_info { 162 LIST_ENTRY(po_page) pgi_list; 163 vm_page_t pgi_page; 164 LIST_HEAD(po_freelist, pte_ovfl) pgi_freelist; 165 int pgi_nfree; 166}; 167#define NPOPPG ((PAGE_SIZE - sizeof(struct po_page_info)) / sizeof(struct pte_ovfl)) 168struct po_page { 169 struct po_page_info pop_pgi; 170 struct pte_ovfl pop_po[NPOPPG]; 171}; 172LIST_HEAD(po_page_list, po_page) po_page_freelist; 173int po_nfree; 174int po_pcnt; 175static struct pte_ovfl *poalloc(void); 176static void pofree(struct pte_ovfl *, int); 177 178static u_int usedsr[NPMAPS / sizeof(u_int) / 8]; 179 180static int pmap_initialized; 181 182int pte_spill(vm_offset_t); 183 184/* 185 * These small routines may have to be replaced, 186 * if/when we support processors other that the 604. 187 */ 188static __inline void 189tlbie(vm_offset_t ea) 190{ 191 192 __asm __volatile ("tlbie %0" :: "r"(ea)); 193} 194 195static __inline void 196tlbsync(void) 197{ 198 199 __asm __volatile ("sync; tlbsync; sync"); 200} 201 202static __inline void 203tlbia(void) 204{ 205 vm_offset_t i; 206 207 __asm __volatile ("sync"); 208 for (i = 0; i < (vm_offset_t)0x00040000; i += 0x00001000) { 209 tlbie(i); 210 } 211 tlbsync(); 212} 213 214static __inline int 215ptesr(sr_t *sr, vm_offset_t addr) 216{ 217 218 return sr[(u_int)addr >> ADDR_SR_SHFT]; 219} 220 221static __inline int 222pteidx(sr_t sr, vm_offset_t addr) 223{ 224 int hash; 225 226 hash = (sr & SR_VSID) ^ (((u_int)addr & ADDR_PIDX) >> ADDR_PIDX_SHFT); 227 return hash & ptab_mask; 228} 229 230static __inline int 231ptematch(pte_t *ptp, sr_t sr, vm_offset_t va, int which) 232{ 233 234 return ptp->pte_hi == (((sr & SR_VSID) << PTE_VSID_SHFT) | 235 (((u_int)va >> ADDR_API_SHFT) & PTE_API) | which); 236} 237 238static __inline struct pv_entry * 239pa_to_pv(vm_offset_t pa) 240{ 241#if 0 /* XXX */ 242 int bank, pg; 243 244 bank = vm_physseg_find(atop(pa), &pg); 245 if (bank == -1) 246 return NULL; 247 return &vm_physmem[bank].pmseg.pvent[pg]; 248#endif 249 return (NULL); 250} 251 252static __inline char * 253pa_to_attr(vm_offset_t pa) 254{ 255#if 0 /* XXX */ 256 int bank, pg; 257 258 bank = vm_physseg_find(atop(pa), &pg); 259 if (bank == -1) 260 return NULL; 261 return &vm_physmem[bank].pmseg.attrs[pg]; 262#endif 263 return (NULL); 264} 265 266/* 267 * Try to insert page table entry *pt into the ptable at idx. 268 * 269 * Note: *pt mustn't have PTE_VALID set. 270 * This is done here as required by Book III, 4.12. 271 */ 272static int 273pte_insert(int idx, pte_t *pt) 274{ 275 pte_t *ptp; 276 int i; 277 278 /* 279 * First try primary hash. 280 */ 281 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 282 if (!(ptp->pte_hi & PTE_VALID)) { 283 *ptp = *pt; 284 ptp->pte_hi &= ~PTE_HID; 285 __asm __volatile ("sync"); 286 ptp->pte_hi |= PTE_VALID; 287 return 1; 288 } 289 } 290 291 /* 292 * Then try secondary hash. 293 */ 294 295 idx ^= ptab_mask; 296 297 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 298 if (!(ptp->pte_hi & PTE_VALID)) { 299 *ptp = *pt; 300 ptp->pte_hi |= PTE_HID; 301 __asm __volatile ("sync"); 302 ptp->pte_hi |= PTE_VALID; 303 return 1; 304 } 305 } 306 307 return 0; 308} 309 310/* 311 * Spill handler. 312 * 313 * Tries to spill a page table entry from the overflow area. 314 * Note that this routine runs in real mode on a separate stack, 315 * with interrupts disabled. 316 */ 317int 318pte_spill(vm_offset_t addr) 319{ 320 int idx, i; 321 sr_t sr; 322 struct pte_ovfl *po; 323 pte_t ps; 324 pte_t *pt; 325 326 __asm ("mfsrin %0,%1" : "=r"(sr) : "r"(addr)); 327 idx = pteidx(sr, addr); 328 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) { 329 if (ptematch(&po->po_pte, sr, addr, 0)) { 330 /* 331 * Now found an entry to be spilled into the real 332 * ptable. 333 */ 334 if (pte_insert(idx, &po->po_pte)) { 335 LIST_REMOVE(po, po_list); 336 pofree(po, 0); 337 return 1; 338 } 339 /* 340 * Have to substitute some entry. Use the primary 341 * hash for this. 342 * 343 * Use low bits of timebase as random generator 344 */ 345 __asm ("mftb %0" : "=r"(i)); 346 pt = ptable + idx * 8 + (i & 7); 347 pt->pte_hi &= ~PTE_VALID; 348 ps = *pt; 349 __asm __volatile ("sync"); 350 tlbie(addr); 351 tlbsync(); 352 *pt = po->po_pte; 353 __asm __volatile ("sync"); 354 pt->pte_hi |= PTE_VALID; 355 po->po_pte = ps; 356 if (ps.pte_hi & PTE_HID) { 357 /* 358 * We took an entry that was on the alternate 359 * hash chain, so move it to it's original 360 * chain. 361 */ 362 po->po_pte.pte_hi &= ~PTE_HID; 363 LIST_REMOVE(po, po_list); 364 LIST_INSERT_HEAD(potable + (idx ^ ptab_mask), 365 po, po_list); 366 } 367 return 1; 368 } 369 } 370 371 return 0; 372} 373 374/* 375 * This is called during powerpc_init, before the system is really initialized. 376 */ 377void 378pmap_setavailmem(u_int kernelstart, u_int kernelend) 379{ 380 struct mem_region *mp, *mp1; 381 int cnt, i; 382 u_int s, e, sz; 383 384 /* 385 * Get memory. 386 */ 387 mem_regions(&mem, &avail); 388 for (mp = mem; mp->size; mp++) 389 Maxmem += btoc(mp->size); 390 391 /* 392 * Count the number of available entries. 393 */ 394 for (cnt = 0, mp = avail; mp->size; mp++) { 395 cnt++; 396 } 397 398 /* 399 * Page align all regions. 400 * Non-page aligned memory isn't very interesting to us. 401 * Also, sort the entries for ascending addresses. 402 */ 403 kernelstart &= ~PAGE_MASK; 404 kernelend = (kernelend + PAGE_MASK) & ~PAGE_MASK; 405 for (mp = avail; mp->size; mp++) { 406 s = mp->start; 407 e = mp->start + mp->size; 408 /* 409 * Check whether this region holds all of the kernel. 410 */ 411 if (s < kernelstart && e > kernelend) { 412 avail[cnt].start = kernelend; 413 avail[cnt++].size = e - kernelend; 414 e = kernelstart; 415 } 416 /* 417 * Look whether this regions starts within the kernel. 418 */ 419 if (s >= kernelstart && s < kernelend) { 420 if (e <= kernelend) 421 goto empty; 422 s = kernelend; 423 } 424 /* 425 * Now look whether this region ends within the kernel. 426 */ 427 if (e > kernelstart && e <= kernelend) { 428 if (s >= kernelstart) 429 goto empty; 430 e = kernelstart; 431 } 432 /* 433 * Now page align the start and size of the region. 434 */ 435 s = round_page(s); 436 e = trunc_page(e); 437 if (e < s) { 438 e = s; 439 } 440 sz = e - s; 441 /* 442 * Check whether some memory is left here. 443 */ 444 if (sz == 0) { 445 empty: 446 bcopy(mp + 1, mp, 447 (cnt - (mp - avail)) * sizeof *mp); 448 cnt--; 449 mp--; 450 continue; 451 } 452 453 /* 454 * Do an insertion sort. 455 */ 456 npgs += btoc(sz); 457 458 for (mp1 = avail; mp1 < mp; mp1++) { 459 if (s < mp1->start) { 460 break; 461 } 462 } 463 464 if (mp1 < mp) { 465 bcopy(mp1, mp1 + 1, (char *)mp - (char *)mp1); 466 mp1->start = s; 467 mp1->size = sz; 468 } else { 469 mp->start = s; 470 mp->size = sz; 471 } 472 } 473 474#ifdef HTABENTS 475 ptab_cnt = HTABENTS; 476#else 477 ptab_cnt = (Maxmem + 1) / 2; 478 479 /* The minimum is 1024 PTEGs. */ 480 if (ptab_cnt < 1024) { 481 ptab_cnt = 1024; 482 } 483 484 /* Round up to power of 2. */ 485 __asm ("cntlzw %0,%1" : "=r"(i) : "r"(ptab_cnt - 1)); 486 ptab_cnt = 1 << (32 - i); 487#endif 488 489 /* 490 * Find suitably aligned memory for HTAB. 491 */ 492 for (mp = avail; mp->size; mp++) { 493 s = roundup(mp->start, HTABSIZE) - mp->start; 494 495 if (mp->size < s + HTABSIZE) { 496 continue; 497 } 498 499 ptable = (pte_t *)(mp->start + s); 500 501 if (mp->size == s + HTABSIZE) { 502 if (s) 503 mp->size = s; 504 else { 505 bcopy(mp + 1, mp, 506 (cnt - (mp - avail)) * sizeof *mp); 507 mp = avail; 508 } 509 break; 510 } 511 512 if (s != 0) { 513 bcopy(mp, mp + 1, 514 (cnt - (mp - avail)) * sizeof *mp); 515 mp++->size = s; 516 cnt++; 517 } 518 519 mp->start += s + HTABSIZE; 520 mp->size -= s + HTABSIZE; 521 break; 522 } 523 524 if (!mp->size) { 525 panic("not enough memory?"); 526 } 527 528 npgs -= btoc(HTABSIZE); 529 bzero((void *)ptable, HTABSIZE); 530 ptab_mask = ptab_cnt - 1; 531 532 /* 533 * We cannot do pmap_steal_memory here, 534 * since we don't run with translation enabled yet. 535 */ 536 s = sizeof(struct pte_ovtab) * ptab_cnt; 537 sz = round_page(s); 538 539 for (mp = avail; mp->size; mp++) { 540 if (mp->size >= sz) { 541 break; 542 } 543 } 544 545 if (!mp->size) { 546 panic("not enough memory?"); 547 } 548 549 npgs -= btoc(sz); 550 potable = (struct pte_ovtab *)mp->start; 551 mp->size -= sz; 552 mp->start += sz; 553 554 if (mp->size <= 0) { 555 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp); 556 } 557 558 for (i = 0; i < ptab_cnt; i++) { 559 LIST_INIT(potable + i); 560 } 561 562#ifndef MSGBUFADDR 563 /* 564 * allow for msgbuf 565 */ 566 sz = round_page(MSGBUFSIZE); 567 mp = NULL; 568 569 for (mp1 = avail; mp1->size; mp1++) { 570 if (mp1->size >= sz) { 571 mp = mp1; 572 } 573 } 574 575 if (mp == NULL) { 576 panic("not enough memory?"); 577 } 578 579 npgs -= btoc(sz); 580 msgbuf_paddr = mp->start + mp->size - sz; 581 mp->size -= sz; 582 583 if (mp->size <= 0) { 584 bcopy(mp + 1, mp, (cnt - (mp - avail)) * sizeof *mp); 585 } 586#endif 587 588 nextavail = avail->start; 589 avail_start = avail->start; 590 for (mp = avail, i = 0; mp->size; mp++) { 591 avail_end = mp->start + mp->size; 592 phys_avail[i++] = mp->start; 593 phys_avail[i++] = mp->start + mp->size; 594 } 595 596 597} 598 599void 600pmap_bootstrap() 601{ 602 int i; 603 604 /* 605 * Initialize kernel pmap and hardware. 606 */ 607 kernel_pmap = &kernel_pmap_store; 608 609 { 610 int batu, batl; 611 612 batu = 0x80001ffe; 613 batl = 0x80000012; 614 615 __asm ("mtdbatu 1,%0; mtdbatl 1,%1" :: "r" (batu), "r" (batl)); 616 } 617 618 619#if NPMAPS >= KERNEL_SEGMENT / 16 620 usedsr[KERNEL_SEGMENT / 16 / (sizeof usedsr[0] * 8)] 621 |= 1 << ((KERNEL_SEGMENT / 16) % (sizeof usedsr[0] * 8)); 622#endif 623 624#if 0 /* XXX */ 625 for (i = 0; i < 16; i++) { 626 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 627 __asm __volatile ("mtsrin %0,%1" 628 :: "r"(EMPTY_SEGMENT), "r"(i << ADDR_SR_SHFT)); 629 } 630#endif 631 632 for (i = 0; i < 16; i++) { 633 int j; 634 635 __asm __volatile ("mfsrin %0,%1" 636 : "=r" (j) 637 : "r" (i << ADDR_SR_SHFT)); 638 639 kernel_pmap->pm_sr[i] = j; 640 } 641 642 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 643 __asm __volatile ("mtsr %0,%1" 644 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 645 646 __asm __volatile ("sync; mtsdr1 %0; isync" 647 :: "r"((u_int)ptable | (ptab_mask >> 10))); 648 649 tlbia(); 650 651 virtual_avail = VM_MIN_KERNEL_ADDRESS; 652 virtual_end = VM_MAX_KERNEL_ADDRESS; 653} 654 655/* 656 * Initialize anything else for pmap handling. 657 * Called during vm_init(). 658 */ 659void 660pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) 661{ 662 int initial_pvs; 663 664 /* 665 * init the pv free list 666 */ 667 initial_pvs = vm_page_array_size; 668 if (initial_pvs < MINPV) { 669 initial_pvs = MINPV; 670 } 671 pvzone = &pvzone_store; 672 pvinit = (struct pv_entry *) kmem_alloc(kernel_map, 673 initial_pvs * sizeof(struct pv_entry)); 674 zbootinit(pvzone, "PV ENTRY", sizeof(struct pv_entry), pvinit, 675 vm_page_array_size); 676 677 pmap_initialized = TRUE; 678} 679 680/* 681 * Initialize a preallocated and zeroed pmap structure. 682 */ 683void 684pmap_pinit(struct pmap *pm) 685{ 686 int i, j; 687 688 /* 689 * Allocate some segment registers for this pmap. 690 */ 691 pm->pm_refs = 1; 692 for (i = 0; i < sizeof usedsr / sizeof usedsr[0]; i++) { 693 if (usedsr[i] != 0xffffffff) { 694 j = ffs(~usedsr[i]) - 1; 695 usedsr[i] |= 1 << j; 696 pm->pm_sr[0] = (i * sizeof usedsr[0] * 8 + j) * 16; 697 for (i = 1; i < 16; i++) { 698 pm->pm_sr[i] = pm->pm_sr[i - 1] + 1; 699 } 700 return; 701 } 702 } 703 panic("out of segments"); 704} 705 706void 707pmap_pinit2(pmap_t pmap) 708{ 709 710 /* 711 * Nothing to be done. 712 */ 713 return; 714} 715 716/* 717 * Add a reference to the given pmap. 718 */ 719void 720pmap_reference(struct pmap *pm) 721{ 722 723 pm->pm_refs++; 724} 725 726/* 727 * Retire the given pmap from service. 728 * Should only be called if the map contains no valid mappings. 729 */ 730void 731pmap_destroy(struct pmap *pm) 732{ 733 734 if (--pm->pm_refs == 0) { 735 pmap_release(pm); 736 free((caddr_t)pm, M_VMPGDATA); 737 } 738} 739 740/* 741 * Release any resources held by the given physical map. 742 * Called when a pmap initialized by pmap_pinit is being released. 743 */ 744void 745pmap_release(struct pmap *pm) 746{ 747 int i, j; 748 749 if (!pm->pm_sr[0]) { 750 panic("pmap_release"); 751 } 752 i = pm->pm_sr[0] / 16; 753 j = i % (sizeof usedsr[0] * 8); 754 i /= sizeof usedsr[0] * 8; 755 usedsr[i] &= ~(1 << j); 756} 757 758/* 759 * Copy the range specified by src_addr/len 760 * from the source map to the range dst_addr/len 761 * in the destination map. 762 * 763 * This routine is only advisory and need not do anything. 764 */ 765void 766pmap_copy(struct pmap *dst_pmap, struct pmap *src_pmap, vm_offset_t dst_addr, 767 vm_size_t len, vm_offset_t src_addr) 768{ 769 770 return; 771} 772 773/* 774 * Garbage collects the physical map system for 775 * pages which are no longer used. 776 * Success need not be guaranteed -- that is, there 777 * may well be pages which are not referenced, but 778 * others may be collected. 779 * Called by the pageout daemon when pages are scarce. 780 */ 781void 782pmap_collect(void) 783{ 784 785 return; 786} 787 788/* 789 * Fill the given physical page with zeroes. 790 */ 791void 792pmap_zero_page(vm_offset_t pa) 793{ 794#if 0 795 bzero((caddr_t)pa, PAGE_SIZE); 796#else 797 int i; 798 799 for (i = PAGE_SIZE/CACHELINESIZE; i > 0; i--) { 800 __asm __volatile ("dcbz 0,%0" :: "r"(pa)); 801 pa += CACHELINESIZE; 802 } 803#endif 804} 805 806void 807pmap_zero_page_area(vm_offset_t pa, int off, int size) 808{ 809 810 bzero((caddr_t)pa + off, size); 811} 812 813/* 814 * Copy the given physical source page to its destination. 815 */ 816void 817pmap_copy_page(vm_offset_t src, vm_offset_t dst) 818{ 819 820 bcopy((caddr_t)src, (caddr_t)dst, PAGE_SIZE); 821} 822 823static struct pv_entry * 824pmap_alloc_pv() 825{ 826 pv_entry_count++; 827 828 if (pv_entry_high_water && 829 (pv_entry_count > pv_entry_high_water) && 830 (pmap_pagedaemon_waken == 0)) { 831 pmap_pagedaemon_waken = 1; 832 wakeup(&vm_pages_needed); 833 } 834 835 return zalloc(pvzone); 836} 837 838static void 839pmap_free_pv(struct pv_entry *pv) 840{ 841 842 pv_entry_count--; 843 zfree(pvzone, pv); 844} 845 846/* 847 * We really hope that we don't need overflow entries 848 * before the VM system is initialized! 849 * 850 * XXX: Should really be switched over to the zone allocator. 851 */ 852static struct pte_ovfl * 853poalloc() 854{ 855 struct po_page *pop; 856 struct pte_ovfl *po; 857 vm_page_t mem; 858 int i; 859 860 if (!pmap_initialized) { 861 panic("poalloc"); 862 } 863 864 if (po_nfree == 0) { 865 /* 866 * Since we cannot use maps for potable allocation, 867 * we have to steal some memory from the VM system. XXX 868 */ 869 mem = vm_page_alloc(NULL, 0, VM_ALLOC_SYSTEM); 870 po_pcnt++; 871 pop = (struct po_page *)VM_PAGE_TO_PHYS(mem); 872 pop->pop_pgi.pgi_page = mem; 873 LIST_INIT(&pop->pop_pgi.pgi_freelist); 874 for (i = NPOPPG - 1, po = pop->pop_po + 1; --i >= 0; po++) { 875 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, 876 po_list); 877 } 878 po_nfree += pop->pop_pgi.pgi_nfree = NPOPPG - 1; 879 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list); 880 po = pop->pop_po; 881 } else { 882 po_nfree--; 883 pop = po_page_freelist.lh_first; 884 if (--pop->pop_pgi.pgi_nfree <= 0) { 885 LIST_REMOVE(pop, pop_pgi.pgi_list); 886 } 887 po = pop->pop_pgi.pgi_freelist.lh_first; 888 LIST_REMOVE(po, po_list); 889 } 890 891 return po; 892} 893 894static void 895pofree(struct pte_ovfl *po, int freepage) 896{ 897 struct po_page *pop; 898 899 pop = (struct po_page *)trunc_page((vm_offset_t)po); 900 switch (++pop->pop_pgi.pgi_nfree) { 901 case NPOPPG: 902 if (!freepage) { 903 break; 904 } 905 po_nfree -= NPOPPG - 1; 906 po_pcnt--; 907 LIST_REMOVE(pop, pop_pgi.pgi_list); 908 vm_page_free(pop->pop_pgi.pgi_page); 909 return; 910 case 1: 911 LIST_INSERT_HEAD(&po_page_freelist, pop, pop_pgi.pgi_list); 912 default: 913 break; 914 } 915 LIST_INSERT_HEAD(&pop->pop_pgi.pgi_freelist, po, po_list); 916 po_nfree++; 917} 918 919/* 920 * This returns whether this is the first mapping of a page. 921 */ 922static int 923pmap_enter_pv(int pteidx, vm_offset_t va, vm_offset_t pa) 924{ 925 struct pv_entry *pv, *npv; 926 int s, first; 927 928 if (!pmap_initialized) { 929 return 0; 930 } 931 932 s = splimp(); 933 934 pv = pa_to_pv(pa); 935 first = pv->pv_idx; 936 if (pv->pv_idx == -1) { 937 /* 938 * No entries yet, use header as the first entry. 939 */ 940 pv->pv_va = va; 941 pv->pv_idx = pteidx; 942 pv->pv_next = NULL; 943 } else { 944 /* 945 * There is at least one other VA mapping this page. 946 * Place this entry after the header. 947 */ 948 npv = pmap_alloc_pv(); 949 npv->pv_va = va; 950 npv->pv_idx = pteidx; 951 npv->pv_next = pv->pv_next; 952 pv->pv_next = npv; 953 } 954 splx(s); 955 return first; 956} 957 958static void 959pmap_remove_pv(int pteidx, vm_offset_t va, vm_offset_t pa, struct pte *pte) 960{ 961 struct pv_entry *pv, *npv; 962 char *attr; 963 964 /* 965 * First transfer reference/change bits to cache. 966 */ 967 attr = pa_to_attr(pa); 968 if (attr == NULL) { 969 return; 970 } 971 *attr |= (pte->pte_lo & (PTE_REF | PTE_CHG)) >> ATTRSHFT; 972 973 /* 974 * Remove from the PV table. 975 */ 976 pv = pa_to_pv(pa); 977 978 /* 979 * If it is the first entry on the list, it is actually 980 * in the header and we must copy the following entry up 981 * to the header. Otherwise we must search the list for 982 * the entry. In either case we free the now unused entry. 983 */ 984 if (pteidx == pv->pv_idx && va == pv->pv_va) { 985 npv = pv->pv_next; 986 if (npv) { 987 *pv = *npv; 988 pmap_free_pv(npv); 989 } else { 990 pv->pv_idx = -1; 991 } 992 } else { 993 for (; (npv = pv->pv_next); pv = npv) { 994 if (pteidx == npv->pv_idx && va == npv->pv_va) { 995 break; 996 } 997 } 998 if (npv) { 999 pv->pv_next = npv->pv_next; 1000 pmap_free_pv(npv); 1001 } 1002#ifdef DIAGNOSTIC 1003 else { 1004 panic("pmap_remove_pv: not on list\n"); 1005 } 1006#endif 1007 } 1008} 1009 1010/* 1011 * Insert physical page at pa into the given pmap at virtual address va. 1012 */ 1013void 1014pmap_enter(pmap_t pm, vm_offset_t va, vm_page_t pg, vm_prot_t prot, 1015 boolean_t wired) 1016{ 1017 sr_t sr; 1018 int idx, s; 1019 pte_t pte; 1020 struct pte_ovfl *po; 1021 struct mem_region *mp; 1022 vm_offset_t pa; 1023 1024 pa = VM_PAGE_TO_PHYS(pg) & ~PAGE_MASK; 1025 1026 /* 1027 * Have to remove any existing mapping first. 1028 */ 1029 pmap_remove(pm, va, va + PAGE_SIZE); 1030 1031 /* 1032 * Compute the HTAB index. 1033 */ 1034 idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1035 /* 1036 * Construct the PTE. 1037 * 1038 * Note: Don't set the valid bit for correct operation of tlb update. 1039 */ 1040 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) 1041 | ((va & ADDR_PIDX) >> ADDR_API_SHFT); 1042 pte.pte_lo = (pa & PTE_RPGN) | PTE_M | PTE_I | PTE_G; 1043 1044 for (mp = mem; mp->size; mp++) { 1045 if (pa >= mp->start && pa < mp->start + mp->size) { 1046 pte.pte_lo &= ~(PTE_I | PTE_G); 1047 break; 1048 } 1049 } 1050 if (prot & VM_PROT_WRITE) { 1051 pte.pte_lo |= PTE_RW; 1052 } else { 1053 pte.pte_lo |= PTE_RO; 1054 } 1055 1056 /* 1057 * Now record mapping for later back-translation. 1058 */ 1059 if (pmap_initialized && (pg->flags & PG_FICTITIOUS) == 0) { 1060 if (pmap_enter_pv(idx, va, pa)) { 1061 /* 1062 * Flush the real memory from the cache. 1063 */ 1064 __syncicache((void *)pa, PAGE_SIZE); 1065 } 1066 } 1067 1068 s = splimp(); 1069 pm->pm_stats.resident_count++; 1070 /* 1071 * Try to insert directly into HTAB. 1072 */ 1073 if (pte_insert(idx, &pte)) { 1074 splx(s); 1075 return; 1076 } 1077 1078 /* 1079 * Have to allocate overflow entry. 1080 * 1081 * Note, that we must use real addresses for these. 1082 */ 1083 po = poalloc(); 1084 po->po_pte = pte; 1085 LIST_INSERT_HEAD(potable + idx, po, po_list); 1086 splx(s); 1087} 1088 1089void 1090pmap_kenter(vm_offset_t va, vm_offset_t pa) 1091{ 1092 struct vm_page pg; 1093 1094 pg.phys_addr = pa; 1095 pmap_enter(kernel_pmap, va, &pg, VM_PROT_READ|VM_PROT_WRITE, TRUE); 1096} 1097 1098void 1099pmap_kremove(vm_offset_t va) 1100{ 1101 pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 1102} 1103 1104/* 1105 * Remove the given range of mapping entries. 1106 */ 1107void 1108pmap_remove(struct pmap *pm, vm_offset_t va, vm_offset_t endva) 1109{ 1110 int idx, i, s; 1111 sr_t sr; 1112 pte_t *ptp; 1113 struct pte_ovfl *po, *npo; 1114 1115 s = splimp(); 1116 while (va < endva) { 1117 idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1118 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1119 if (ptematch(ptp, sr, va, PTE_VALID)) { 1120 pmap_remove_pv(idx, va, ptp->pte_lo, ptp); 1121 ptp->pte_hi &= ~PTE_VALID; 1122 __asm __volatile ("sync"); 1123 tlbie(va); 1124 tlbsync(); 1125 pm->pm_stats.resident_count--; 1126 } 1127 } 1128 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; 1129 ptp++) { 1130 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) { 1131 pmap_remove_pv(idx, va, ptp->pte_lo, ptp); 1132 ptp->pte_hi &= ~PTE_VALID; 1133 __asm __volatile ("sync"); 1134 tlbie(va); 1135 tlbsync(); 1136 pm->pm_stats.resident_count--; 1137 } 1138 } 1139 for (po = potable[idx].lh_first; po; po = npo) { 1140 npo = po->po_list.le_next; 1141 if (ptematch(&po->po_pte, sr, va, 0)) { 1142 pmap_remove_pv(idx, va, po->po_pte.pte_lo, 1143 &po->po_pte); 1144 LIST_REMOVE(po, po_list); 1145 pofree(po, 1); 1146 pm->pm_stats.resident_count--; 1147 } 1148 } 1149 va += PAGE_SIZE; 1150 } 1151 splx(s); 1152} 1153 1154static pte_t * 1155pte_find(struct pmap *pm, vm_offset_t va) 1156{ 1157 int idx, i; 1158 sr_t sr; 1159 pte_t *ptp; 1160 struct pte_ovfl *po; 1161 1162 idx = pteidx(sr = ptesr(pm->pm_sr, va), va); 1163 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1164 if (ptematch(ptp, sr, va, PTE_VALID)) { 1165 return ptp; 1166 } 1167 } 1168 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; ptp++) { 1169 if (ptematch(ptp, sr, va, PTE_VALID | PTE_HID)) { 1170 return ptp; 1171 } 1172 } 1173 for (po = potable[idx].lh_first; po; po = po->po_list.le_next) { 1174 if (ptematch(&po->po_pte, sr, va, 0)) { 1175 return &po->po_pte; 1176 } 1177 } 1178 return 0; 1179} 1180 1181/* 1182 * Get the physical page address for the given pmap/virtual address. 1183 */ 1184vm_offset_t 1185pmap_extract(pmap_t pm, vm_offset_t va) 1186{ 1187 pte_t *ptp; 1188 int s; 1189 1190 s = splimp(); 1191 1192 if (!(ptp = pte_find(pm, va))) { 1193 splx(s); 1194 return (0); 1195 } 1196 splx(s); 1197 return ((ptp->pte_lo & PTE_RPGN) | (va & ADDR_POFF)); 1198} 1199 1200/* 1201 * Lower the protection on the specified range of this pmap. 1202 * 1203 * There are only two cases: either the protection is going to 0, 1204 * or it is going to read-only. 1205 */ 1206void 1207pmap_protect(struct pmap *pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1208{ 1209 pte_t *ptp; 1210 int valid, s; 1211 1212 if (prot & VM_PROT_READ) { 1213 s = splimp(); 1214 while (sva < eva) { 1215 ptp = pte_find(pm, sva); 1216 if (ptp) { 1217 valid = ptp->pte_hi & PTE_VALID; 1218 ptp->pte_hi &= ~PTE_VALID; 1219 __asm __volatile ("sync"); 1220 tlbie(sva); 1221 tlbsync(); 1222 ptp->pte_lo &= ~PTE_PP; 1223 ptp->pte_lo |= PTE_RO; 1224 __asm __volatile ("sync"); 1225 ptp->pte_hi |= valid; 1226 } 1227 sva += PAGE_SIZE; 1228 } 1229 splx(s); 1230 return; 1231 } 1232 pmap_remove(pm, sva, eva); 1233} 1234 1235boolean_t 1236ptemodify(vm_page_t pg, u_int mask, u_int val) 1237{ 1238 vm_offset_t pa; 1239 struct pv_entry *pv; 1240 pte_t *ptp; 1241 struct pte_ovfl *po; 1242 int i, s; 1243 char *attr; 1244 int rv; 1245 1246 pa = VM_PAGE_TO_PHYS(pg); 1247 1248 /* 1249 * First modify bits in cache. 1250 */ 1251 attr = pa_to_attr(pa); 1252 if (attr == NULL) { 1253 return FALSE; 1254 } 1255 1256 *attr &= ~mask >> ATTRSHFT; 1257 *attr |= val >> ATTRSHFT; 1258 1259 pv = pa_to_pv(pa); 1260 if (pv->pv_idx < 0) { 1261 return FALSE; 1262 } 1263 1264 rv = FALSE; 1265 s = splimp(); 1266 for (; pv; pv = pv->pv_next) { 1267 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) { 1268 if ((ptp->pte_hi & PTE_VALID) 1269 && (ptp->pte_lo & PTE_RPGN) == pa) { 1270 ptp->pte_hi &= ~PTE_VALID; 1271 __asm __volatile ("sync"); 1272 tlbie(pv->pv_va); 1273 tlbsync(); 1274 rv |= ptp->pte_lo & mask; 1275 ptp->pte_lo &= ~mask; 1276 ptp->pte_lo |= val; 1277 __asm __volatile ("sync"); 1278 ptp->pte_hi |= PTE_VALID; 1279 } 1280 } 1281 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8; 1282 --i >= 0; ptp++) { 1283 if ((ptp->pte_hi & PTE_VALID) 1284 && (ptp->pte_lo & PTE_RPGN) == pa) { 1285 ptp->pte_hi &= ~PTE_VALID; 1286 __asm __volatile ("sync"); 1287 tlbie(pv->pv_va); 1288 tlbsync(); 1289 rv |= ptp->pte_lo & mask; 1290 ptp->pte_lo &= ~mask; 1291 ptp->pte_lo |= val; 1292 __asm __volatile ("sync"); 1293 ptp->pte_hi |= PTE_VALID; 1294 } 1295 } 1296 for (po = potable[pv->pv_idx].lh_first; po; 1297 po = po->po_list.le_next) { 1298 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1299 rv |= ptp->pte_lo & mask; 1300 po->po_pte.pte_lo &= ~mask; 1301 po->po_pte.pte_lo |= val; 1302 } 1303 } 1304 } 1305 splx(s); 1306 return rv != 0; 1307} 1308 1309int 1310ptebits(vm_page_t pg, int bit) 1311{ 1312 struct pv_entry *pv; 1313 pte_t *ptp; 1314 struct pte_ovfl *po; 1315 int i, s, bits; 1316 char *attr; 1317 vm_offset_t pa; 1318 1319 bits = 0; 1320 pa = VM_PAGE_TO_PHYS(pg); 1321 1322 /* 1323 * First try the cache. 1324 */ 1325 attr = pa_to_attr(pa); 1326 if (attr == NULL) { 1327 return 0; 1328 } 1329 bits |= (*attr << ATTRSHFT) & bit; 1330 if (bits == bit) { 1331 return bits; 1332 } 1333 1334 pv = pa_to_pv(pa); 1335 if (pv->pv_idx < 0) { 1336 return 0; 1337 } 1338 1339 s = splimp(); 1340 for (; pv; pv = pv->pv_next) { 1341 for (ptp = ptable + pv->pv_idx * 8, i = 8; --i >= 0; ptp++) { 1342 if ((ptp->pte_hi & PTE_VALID) 1343 && (ptp->pte_lo & PTE_RPGN) == pa) { 1344 bits |= ptp->pte_lo & bit; 1345 if (bits == bit) { 1346 splx(s); 1347 return bits; 1348 } 1349 } 1350 } 1351 for (ptp = ptable + (pv->pv_idx ^ ptab_mask) * 8, i = 8; 1352 --i >= 0; ptp++) { 1353 if ((ptp->pte_hi & PTE_VALID) 1354 && (ptp->pte_lo & PTE_RPGN) == pa) { 1355 bits |= ptp->pte_lo & bit; 1356 if (bits == bit) { 1357 splx(s); 1358 return bits; 1359 } 1360 } 1361 } 1362 for (po = potable[pv->pv_idx].lh_first; po; 1363 po = po->po_list.le_next) { 1364 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1365 bits |= po->po_pte.pte_lo & bit; 1366 if (bits == bit) { 1367 splx(s); 1368 return bits; 1369 } 1370 } 1371 } 1372 } 1373 splx(s); 1374 return bits; 1375} 1376 1377/* 1378 * Lower the protection on the specified physical page. 1379 * 1380 * There are only two cases: either the protection is going to 0, 1381 * or it is going to read-only. 1382 */ 1383void 1384pmap_page_protect(vm_page_t m, vm_prot_t prot) 1385{ 1386 vm_offset_t pa; 1387 vm_offset_t va; 1388 pte_t *ptp; 1389 struct pte_ovfl *po, *npo; 1390 int i, s, idx; 1391 struct pv_entry *pv; 1392 1393 pa = VM_PAGE_TO_PHYS(m); 1394 1395 pa &= ~ADDR_POFF; 1396 if (prot & VM_PROT_READ) { 1397 ptemodify(m, PTE_PP, PTE_RO); 1398 return; 1399 } 1400 1401 pv = pa_to_pv(pa); 1402 if (pv == NULL) { 1403 return; 1404 } 1405 1406 s = splimp(); 1407 while (pv->pv_idx >= 0) { 1408 idx = pv->pv_idx; 1409 va = pv->pv_va; 1410 for (ptp = ptable + idx * 8, i = 8; --i >= 0; ptp++) { 1411 if ((ptp->pte_hi & PTE_VALID) 1412 && (ptp->pte_lo & PTE_RPGN) == pa) { 1413 pmap_remove_pv(idx, va, pa, ptp); 1414 ptp->pte_hi &= ~PTE_VALID; 1415 __asm __volatile ("sync"); 1416 tlbie(va); 1417 tlbsync(); 1418 goto next; 1419 } 1420 } 1421 for (ptp = ptable + (idx ^ ptab_mask) * 8, i = 8; --i >= 0; 1422 ptp++) { 1423 if ((ptp->pte_hi & PTE_VALID) 1424 && (ptp->pte_lo & PTE_RPGN) == pa) { 1425 pmap_remove_pv(idx, va, pa, ptp); 1426 ptp->pte_hi &= ~PTE_VALID; 1427 __asm __volatile ("sync"); 1428 tlbie(va); 1429 tlbsync(); 1430 goto next; 1431 } 1432 } 1433 for (po = potable[idx].lh_first; po; po = npo) { 1434 npo = po->po_list.le_next; 1435 if ((po->po_pte.pte_lo & PTE_RPGN) == pa) { 1436 pmap_remove_pv(idx, va, pa, &po->po_pte); 1437 LIST_REMOVE(po, po_list); 1438 pofree(po, 1); 1439 goto next; 1440 } 1441 } 1442next: 1443 } 1444 splx(s); 1445} 1446 1447/* 1448 * Activate the address space for the specified process. If the process 1449 * is the current process, load the new MMU context. 1450 */ 1451void 1452pmap_activate(struct thread *td) 1453{ 1454 struct pcb *pcb; 1455 pmap_t pmap; 1456 pmap_t rpm; 1457 int psl, i, ksr, seg; 1458 1459 pcb = td->td_pcb; 1460 pmap = vmspace_pmap(td->td_proc->p_vmspace); 1461 1462 /* 1463 * XXX Normally performed in cpu_fork(). 1464 */ 1465 if (pcb->pcb_pm != pmap) { 1466 pcb->pcb_pm = pmap; 1467 (vm_offset_t) pcb->pcb_pmreal = pmap_extract(kernel_pmap, 1468 (vm_offset_t)pcb->pcb_pm); 1469 } 1470 1471 if (td == curthread) { 1472 /* Disable interrupts while switching. */ 1473 psl = mfmsr(); 1474 mtmsr(psl & ~PSL_EE); 1475 1476#if 0 /* XXX */ 1477 /* Store pointer to new current pmap. */ 1478 curpm = pcb->pcb_pmreal; 1479#endif 1480 1481 /* Save kernel SR. */ 1482 __asm __volatile("mfsr %0,14" : "=r"(ksr) :); 1483 1484 /* 1485 * Set new segment registers. We use the pmap's real 1486 * address to avoid accessibility problems. 1487 */ 1488 rpm = pcb->pcb_pmreal; 1489 for (i = 0; i < 16; i++) { 1490 seg = rpm->pm_sr[i]; 1491 __asm __volatile("mtsrin %0,%1" 1492 :: "r"(seg), "r"(i << ADDR_SR_SHFT)); 1493 } 1494 1495 /* Restore kernel SR. */ 1496 __asm __volatile("mtsr 14,%0" :: "r"(ksr)); 1497 1498 /* Interrupts are OK again. */ 1499 mtmsr(psl); 1500 } 1501} 1502 1503/* 1504 * Add a list of wired pages to the kva 1505 * this routine is only used for temporary 1506 * kernel mappings that do not need to have 1507 * page modification or references recorded. 1508 * Note that old mappings are simply written 1509 * over. The page *must* be wired. 1510 */ 1511void 1512pmap_qenter(vm_offset_t va, vm_page_t *m, int count) 1513{ 1514 int i; 1515 1516 for (i = 0; i < count; i++) { 1517 vm_offset_t tva = va + i * PAGE_SIZE; 1518 pmap_kenter(tva, VM_PAGE_TO_PHYS(m[i])); 1519 } 1520} 1521 1522/* 1523 * this routine jerks page mappings from the 1524 * kernel -- it is meant only for temporary mappings. 1525 */ 1526void 1527pmap_qremove(vm_offset_t va, int count) 1528{ 1529 vm_offset_t end_va; 1530 1531 end_va = va + count*PAGE_SIZE; 1532 1533 while (va < end_va) { 1534 unsigned *pte; 1535 1536 pte = (unsigned *)vtopte(va); 1537 *pte = 0; 1538 tlbie(va); 1539 va += PAGE_SIZE; 1540 } 1541} 1542 1543/* 1544 * pmap_ts_referenced: 1545 * 1546 * Return the count of reference bits for a page, clearing all of them. 1547 */ 1548int 1549pmap_ts_referenced(vm_page_t m) 1550{ 1551 1552 /* XXX: coming soon... */ 1553 return (0); 1554} 1555 1556/* 1557 * this routine returns true if a physical page resides 1558 * in the given pmap. 1559 */ 1560boolean_t 1561pmap_page_exists(pmap_t pmap, vm_page_t m) 1562{ 1563#if 0 /* XXX: This must go! */ 1564 register pv_entry_t pv; 1565 int s; 1566 1567 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1568 return FALSE; 1569 1570 s = splvm(); 1571 1572 /* 1573 * Not found, check current mappings returning immediately if found. 1574 */ 1575 for (pv = pv_table; pv; pv = pv->pv_next) { 1576 if (pv->pv_pmap == pmap) { 1577 splx(s); 1578 return TRUE; 1579 } 1580 } 1581 splx(s); 1582#endif 1583 return (FALSE); 1584} 1585 1586/* 1587 * Used to map a range of physical addresses into kernel 1588 * virtual address space. 1589 * 1590 * For now, VM is already on, we only need to map the 1591 * specified memory. 1592 */ 1593vm_offset_t 1594pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) 1595{ 1596 vm_offset_t sva, va; 1597 1598 sva = *virt; 1599 va = sva; 1600 1601 while (start < end) { 1602 pmap_kenter(va, start); 1603 va += PAGE_SIZE; 1604 start += PAGE_SIZE; 1605 } 1606 1607 *virt = va; 1608 return (sva); 1609} 1610 1611vm_offset_t 1612pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) 1613{ 1614 1615 return (addr); 1616} 1617 1618int 1619pmap_mincore(pmap_t pmap, vm_offset_t addr) 1620{ 1621 1622 /* XXX: coming soon... */ 1623 return (0); 1624} 1625 1626void 1627pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, 1628 vm_pindex_t pindex, vm_size_t size, int limit) 1629{ 1630 1631 /* XXX: coming soon... */ 1632 return; 1633} 1634 1635void 1636pmap_growkernel(vm_offset_t addr) 1637{ 1638 1639 /* XXX: coming soon... */ 1640 return; 1641} 1642 1643/* 1644 * Initialize the address space (zone) for the pv_entries. Set a 1645 * high water mark so that the system can recover from excessive 1646 * numbers of pv entries. 1647 */ 1648void 1649pmap_init2() 1650{ 1651 int shpgperproc = PMAP_SHPGPERPROC; 1652 1653 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1654 pv_entry_max = shpgperproc * maxproc + vm_page_array_size; 1655 pv_entry_high_water = 9 * (pv_entry_max / 10); 1656 zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); 1657} 1658 1659void 1660pmap_swapin_proc(struct proc *p) 1661{ 1662 1663 /* XXX: coming soon... */ 1664 return; 1665} 1666 1667void 1668pmap_swapout_proc(struct proc *p) 1669{ 1670 1671 /* XXX: coming soon... */ 1672 return; 1673} 1674 1675 1676/* 1677 * Create the kernel stack (including pcb for i386) for a new thread. 1678 * This routine directly affects the fork perf for a process and 1679 * create performance for a thread. 1680 */ 1681void 1682pmap_new_thread(td) 1683 struct thread *td; 1684{ 1685 /* XXX: coming soon... */ 1686 return; 1687} 1688 1689/* 1690 * Dispose the kernel stack for a thread that has exited. 1691 * This routine directly impacts the exit perf of a process and thread. 1692 */ 1693void 1694pmap_dispose_thread(td) 1695 struct thread *td; 1696{ 1697 /* XXX: coming soon... */ 1698 return; 1699} 1700 1701/* 1702 * Allow the Kernel stack for a thread to be prejudicially paged out. 1703 */ 1704void 1705pmap_swapout_thread(td) 1706 struct thread *td; 1707{ 1708 int i; 1709 vm_object_t ksobj; 1710 vm_offset_t ks; 1711 vm_page_t m; 1712 1713 ksobj = td->td_kstack_obj; 1714 ks = td->td_kstack; 1715 for (i = 0; i < KSTACK_PAGES; i++) { 1716 m = vm_page_lookup(ksobj, i); 1717 if (m == NULL) 1718 panic("pmap_swapout_thread: kstack already missing?"); 1719 vm_page_dirty(m); 1720 vm_page_unwire(m, 0); 1721 pmap_kremove(ks + i * PAGE_SIZE); 1722 } 1723} 1724 1725/* 1726 * Bring the kernel stack for a specified thread back in. 1727 */ 1728void 1729pmap_swapin_thread(td) 1730 struct thread *td; 1731{ 1732 int i, rv; 1733 vm_object_t ksobj; 1734 vm_offset_t ks; 1735 vm_page_t m; 1736 1737 ksobj = td->td_kstack_obj; 1738 ks = td->td_kstack; 1739 for (i = 0; i < KSTACK_PAGES; i++) { 1740 m = vm_page_grab(ksobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1741 pmap_kenter(ks + i * PAGE_SIZE, VM_PAGE_TO_PHYS(m)); 1742 if (m->valid != VM_PAGE_BITS_ALL) { 1743 rv = vm_pager_get_pages(ksobj, &m, 1, 0); 1744 if (rv != VM_PAGER_OK) 1745 panic("pmap_swapin_thread: cannot get kstack for proc: %d\n", td->td_proc->p_pid); 1746 m = vm_page_lookup(ksobj, i); 1747 m->valid = VM_PAGE_BITS_ALL; 1748 } 1749 vm_page_wire(m); 1750 vm_page_wakeup(m); 1751 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1752 } 1753} 1754 1755void 1756pmap_pageable(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, boolean_t pageable) 1757{ 1758 1759 return; 1760} 1761 1762void 1763pmap_change_wiring(pmap_t pmap, vm_offset_t va, boolean_t wired) 1764{ 1765 1766 /* XXX: coming soon... */ 1767 return; 1768} 1769 1770void 1771pmap_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) 1772{ 1773 1774 /* XXX: coming soon... */ 1775 return; 1776} 1777 1778void 1779pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) 1780{ 1781 1782 /* XXX: coming soon... */ 1783 return; 1784} 1785 1786void 1787pmap_pinit0(pmap_t pmap) 1788{ 1789 1790 /* XXX: coming soon... */ 1791 return; 1792} 1793 1794void 1795pmap_dispose_proc(struct proc *p) 1796{ 1797 1798 /* XXX: coming soon... */ 1799 return; 1800} 1801 1802vm_offset_t 1803pmap_steal_memory(vm_size_t size) 1804{ 1805 vm_size_t bank_size; 1806 vm_offset_t pa; 1807 1808 size = round_page(size); 1809 1810 bank_size = phys_avail[1] - phys_avail[0]; 1811 while (size > bank_size) { 1812 int i; 1813 for (i = 0; phys_avail[i+2]; i+= 2) { 1814 phys_avail[i] = phys_avail[i+2]; 1815 phys_avail[i+1] = phys_avail[i+3]; 1816 } 1817 phys_avail[i] = 0; 1818 phys_avail[i+1] = 0; 1819 if (!phys_avail[0]) 1820 panic("pmap_steal_memory: out of memory"); 1821 bank_size = phys_avail[1] - phys_avail[0]; 1822 } 1823 1824 pa = phys_avail[0]; 1825 phys_avail[0] += size; 1826 1827 bzero((caddr_t) pa, size); 1828 return pa; 1829} 1830 1831/* 1832 * Create the UAREA_PAGES for a new process. 1833 * This routine directly affects the fork perf for a process. 1834 */ 1835void 1836pmap_new_proc(struct proc *p) 1837{ 1838 int i; 1839 vm_object_t upobj; 1840 vm_offset_t up; 1841 vm_page_t m; 1842 pte_t pte; 1843 sr_t sr; 1844 int idx; 1845 vm_offset_t va; 1846 1847 /* 1848 * allocate object for the upages 1849 */ 1850 upobj = p->p_upages_obj; 1851 if (upobj == NULL) { 1852 upobj = vm_object_allocate(OBJT_DEFAULT, UAREA_PAGES); 1853 p->p_upages_obj = upobj; 1854 } 1855 1856 /* get a kernel virtual address for the UAREA_PAGES for this proc */ 1857 up = (vm_offset_t)p->p_uarea; 1858 if (up == 0) { 1859 up = kmem_alloc_nofault(kernel_map, UAREA_PAGES * PAGE_SIZE); 1860 if (up == 0) 1861 panic("pmap_new_proc: upage allocation failed"); 1862 p->p_uarea = (struct user *)up; 1863 } 1864 1865 for (i = 0; i < UAREA_PAGES; i++) { 1866 /* 1867 * Get a kernel stack page 1868 */ 1869 m = vm_page_grab(upobj, i, VM_ALLOC_NORMAL | VM_ALLOC_RETRY); 1870 1871 /* 1872 * Wire the page 1873 */ 1874 m->wire_count++; 1875 cnt.v_wire_count++; 1876 1877 /* 1878 * Enter the page into the kernel address space. 1879 */ 1880 va = up + i * PAGE_SIZE; 1881 idx = pteidx(sr = ptesr(kernel_pmap->pm_sr, va), va); 1882 1883 pte.pte_hi = ((sr & SR_VSID) << PTE_VSID_SHFT) | 1884 ((va & ADDR_PIDX) >> ADDR_API_SHFT); 1885 pte.pte_lo = (VM_PAGE_TO_PHYS(m) & PTE_RPGN) | PTE_M | PTE_I | 1886 PTE_G | PTE_RW; 1887 1888 if (!pte_insert(idx, &pte)) { 1889 struct pte_ovfl *po; 1890 1891 po = poalloc(); 1892 po->po_pte = pte; 1893 LIST_INSERT_HEAD(potable + idx, po, po_list); 1894 } 1895 1896 tlbie(va); 1897 1898 vm_page_wakeup(m); 1899 vm_page_flag_clear(m, PG_ZERO); 1900 vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE); 1901 m->valid = VM_PAGE_BITS_ALL; 1902 } 1903} 1904