mmu_oea.c revision 134453
1/* 2 * Copyright (c) 2001 The NetBSD Foundation, Inc. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to The NetBSD Foundation 6 * by Matt Thomas <matt@3am-software.com> of Allegro Networks, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the NetBSD 19 * Foundation, Inc. and its contributors. 20 * 4. Neither the name of The NetBSD Foundation nor the names of its 21 * contributors may be used to endorse or promote products derived 22 * from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 25 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36/* 37 * Copyright (C) 1995, 1996 Wolfgang Solfrank. 38 * Copyright (C) 1995, 1996 TooLs GmbH. 39 * All rights reserved. 40 * 41 * Redistribution and use in source and binary forms, with or without 42 * modification, are permitted provided that the following conditions 43 * are met: 44 * 1. Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * 2. Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in the 48 * documentation and/or other materials provided with the distribution. 49 * 3. All advertising materials mentioning features or use of this software 50 * must display the following acknowledgement: 51 * This product includes software developed by TooLs GmbH. 52 * 4. The name of TooLs GmbH may not be used to endorse or promote products 53 * derived from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR 56 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 57 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 58 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 60 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 61 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 62 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 63 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 64 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 * $NetBSD: pmap.c,v 1.28 2000/03/26 20:42:36 kleink Exp $ 67 */ 68/* 69 * Copyright (C) 2001 Benno Rice. 70 * All rights reserved. 71 * 72 * Redistribution and use in source and binary forms, with or without 73 * modification, are permitted provided that the following conditions 74 * are met: 75 * 1. Redistributions of source code must retain the above copyright 76 * notice, this list of conditions and the following disclaimer. 77 * 2. Redistributions in binary form must reproduce the above copyright 78 * notice, this list of conditions and the following disclaimer in the 79 * documentation and/or other materials provided with the distribution. 80 * 81 * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR 82 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 83 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 84 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 85 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 86 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 87 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 88 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 89 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 90 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 91 */ 92 93#include <sys/cdefs.h> 94__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea.c 134453 2004-08-28 20:27:12Z alc $"); 95 96/* 97 * Manages physical address maps. 98 * 99 * In addition to hardware address maps, this module is called upon to 100 * provide software-use-only maps which may or may not be stored in the 101 * same form as hardware maps. These pseudo-maps are used to store 102 * intermediate results from copy operations to and from address spaces. 103 * 104 * Since the information managed by this module is also stored by the 105 * logical address mapping module, this module may throw away valid virtual 106 * to physical mappings at almost any time. However, invalidations of 107 * mappings must be done as requested. 108 * 109 * In order to cope with hardware architectures which make virtual to 110 * physical map invalidates expensive, this module may delay invalidate 111 * reduced protection operations until such time as they are actually 112 * necessary. This module is given full information as to which processors 113 * are currently using which maps, and to when physical maps must be made 114 * correct. 115 */ 116 117#include "opt_kstack_pages.h" 118 119#include <sys/param.h> 120#include <sys/kernel.h> 121#include <sys/ktr.h> 122#include <sys/lock.h> 123#include <sys/msgbuf.h> 124#include <sys/mutex.h> 125#include <sys/proc.h> 126#include <sys/sysctl.h> 127#include <sys/systm.h> 128#include <sys/vmmeter.h> 129 130#include <dev/ofw/openfirm.h> 131 132#include <vm/vm.h> 133#include <vm/vm_param.h> 134#include <vm/vm_kern.h> 135#include <vm/vm_page.h> 136#include <vm/vm_map.h> 137#include <vm/vm_object.h> 138#include <vm/vm_extern.h> 139#include <vm/vm_pageout.h> 140#include <vm/vm_pager.h> 141#include <vm/uma.h> 142 143#include <machine/cpu.h> 144#include <machine/powerpc.h> 145#include <machine/bat.h> 146#include <machine/frame.h> 147#include <machine/md_var.h> 148#include <machine/psl.h> 149#include <machine/pte.h> 150#include <machine/sr.h> 151 152#define PMAP_DEBUG 153 154#define TODO panic("%s: not implemented", __func__); 155 156#define TLBIE(va) __asm __volatile("tlbie %0" :: "r"(va)) 157#define TLBSYNC() __asm __volatile("tlbsync"); 158#define SYNC() __asm __volatile("sync"); 159#define EIEIO() __asm __volatile("eieio"); 160 161#define VSID_MAKE(sr, hash) ((sr) | (((hash) & 0xfffff) << 4)) 162#define VSID_TO_SR(vsid) ((vsid) & 0xf) 163#define VSID_TO_HASH(vsid) (((vsid) >> 4) & 0xfffff) 164 165#define PVO_PTEGIDX_MASK 0x0007 /* which PTEG slot */ 166#define PVO_PTEGIDX_VALID 0x0008 /* slot is valid */ 167#define PVO_WIRED 0x0010 /* PVO entry is wired */ 168#define PVO_MANAGED 0x0020 /* PVO entry is managed */ 169#define PVO_EXECUTABLE 0x0040 /* PVO entry is executable */ 170#define PVO_BOOTSTRAP 0x0080 /* PVO entry allocated during 171 bootstrap */ 172#define PVO_VADDR(pvo) ((pvo)->pvo_vaddr & ~ADDR_POFF) 173#define PVO_ISEXECUTABLE(pvo) ((pvo)->pvo_vaddr & PVO_EXECUTABLE) 174#define PVO_PTEGIDX_GET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK) 175#define PVO_PTEGIDX_ISSET(pvo) ((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID) 176#define PVO_PTEGIDX_CLR(pvo) \ 177 ((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK))) 178#define PVO_PTEGIDX_SET(pvo, i) \ 179 ((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID)) 180 181#define PMAP_PVO_CHECK(pvo) 182 183struct ofw_map { 184 vm_offset_t om_va; 185 vm_size_t om_len; 186 vm_offset_t om_pa; 187 u_int om_mode; 188}; 189 190int pmap_bootstrapped = 0; 191 192/* 193 * Virtual and physical address of message buffer. 194 */ 195struct msgbuf *msgbufp; 196vm_offset_t msgbuf_phys; 197 198int pmap_pagedaemon_waken; 199 200/* 201 * Map of physical memory regions. 202 */ 203vm_offset_t phys_avail[128]; 204u_int phys_avail_count; 205static struct mem_region *regions; 206static struct mem_region *pregions; 207int regions_sz, pregions_sz; 208static struct ofw_map *translations; 209 210/* 211 * First and last available kernel virtual addresses. 212 */ 213vm_offset_t virtual_avail; 214vm_offset_t virtual_end; 215vm_offset_t kernel_vm_end; 216 217/* 218 * Kernel pmap. 219 */ 220struct pmap kernel_pmap_store; 221extern struct pmap ofw_pmap; 222 223/* 224 * PTEG data. 225 */ 226static struct pteg *pmap_pteg_table; 227u_int pmap_pteg_count; 228u_int pmap_pteg_mask; 229 230/* 231 * PVO data. 232 */ 233struct pvo_head *pmap_pvo_table; /* pvo entries by pteg index */ 234struct pvo_head pmap_pvo_kunmanaged = 235 LIST_HEAD_INITIALIZER(pmap_pvo_kunmanaged); /* list of unmanaged pages */ 236struct pvo_head pmap_pvo_unmanaged = 237 LIST_HEAD_INITIALIZER(pmap_pvo_unmanaged); /* list of unmanaged pages */ 238 239uma_zone_t pmap_upvo_zone; /* zone for pvo entries for unmanaged pages */ 240uma_zone_t pmap_mpvo_zone; /* zone for pvo entries for managed pages */ 241 242#define BPVO_POOL_SIZE 32768 243static struct pvo_entry *pmap_bpvo_pool; 244static int pmap_bpvo_pool_index = 0; 245 246#define VSID_NBPW (sizeof(u_int32_t) * 8) 247static u_int pmap_vsid_bitmap[NPMAPS / VSID_NBPW]; 248 249static boolean_t pmap_initialized = FALSE; 250 251/* 252 * Statistics. 253 */ 254u_int pmap_pte_valid = 0; 255u_int pmap_pte_overflow = 0; 256u_int pmap_pte_replacements = 0; 257u_int pmap_pvo_entries = 0; 258u_int pmap_pvo_enter_calls = 0; 259u_int pmap_pvo_remove_calls = 0; 260u_int pmap_pte_spills = 0; 261SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_valid, CTLFLAG_RD, &pmap_pte_valid, 262 0, ""); 263SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_overflow, CTLFLAG_RD, 264 &pmap_pte_overflow, 0, ""); 265SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_replacements, CTLFLAG_RD, 266 &pmap_pte_replacements, 0, ""); 267SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_entries, CTLFLAG_RD, &pmap_pvo_entries, 268 0, ""); 269SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_enter_calls, CTLFLAG_RD, 270 &pmap_pvo_enter_calls, 0, ""); 271SYSCTL_INT(_machdep, OID_AUTO, pmap_pvo_remove_calls, CTLFLAG_RD, 272 &pmap_pvo_remove_calls, 0, ""); 273SYSCTL_INT(_machdep, OID_AUTO, pmap_pte_spills, CTLFLAG_RD, 274 &pmap_pte_spills, 0, ""); 275 276struct pvo_entry *pmap_pvo_zeropage; 277 278vm_offset_t pmap_rkva_start = VM_MIN_KERNEL_ADDRESS; 279u_int pmap_rkva_count = 4; 280 281/* 282 * Allocate physical memory for use in pmap_bootstrap. 283 */ 284static vm_offset_t pmap_bootstrap_alloc(vm_size_t, u_int); 285 286/* 287 * PTE calls. 288 */ 289static int pmap_pte_insert(u_int, struct pte *); 290 291/* 292 * PVO calls. 293 */ 294static int pmap_pvo_enter(pmap_t, uma_zone_t, struct pvo_head *, 295 vm_offset_t, vm_offset_t, u_int, int); 296static void pmap_pvo_remove(struct pvo_entry *, int); 297static struct pvo_entry *pmap_pvo_find_va(pmap_t, vm_offset_t, int *); 298static struct pte *pmap_pvo_to_pte(const struct pvo_entry *, int); 299 300/* 301 * Utility routines. 302 */ 303static struct pvo_entry *pmap_rkva_alloc(void); 304static void pmap_pa_map(struct pvo_entry *, vm_offset_t, 305 struct pte *, int *); 306static void pmap_pa_unmap(struct pvo_entry *, struct pte *, int *); 307static void pmap_syncicache(vm_offset_t, vm_size_t); 308static boolean_t pmap_query_bit(vm_page_t, int); 309static u_int pmap_clear_bit(vm_page_t, int, int *); 310static void tlbia(void); 311 312static __inline int 313va_to_sr(u_int *sr, vm_offset_t va) 314{ 315 return (sr[(uintptr_t)va >> ADDR_SR_SHFT]); 316} 317 318static __inline u_int 319va_to_pteg(u_int sr, vm_offset_t addr) 320{ 321 u_int hash; 322 323 hash = (sr & SR_VSID_MASK) ^ (((u_int)addr & ADDR_PIDX) >> 324 ADDR_PIDX_SHFT); 325 return (hash & pmap_pteg_mask); 326} 327 328static __inline struct pvo_head * 329pa_to_pvoh(vm_offset_t pa, vm_page_t *pg_p) 330{ 331 struct vm_page *pg; 332 333 pg = PHYS_TO_VM_PAGE(pa); 334 335 if (pg_p != NULL) 336 *pg_p = pg; 337 338 if (pg == NULL) 339 return (&pmap_pvo_unmanaged); 340 341 return (&pg->md.mdpg_pvoh); 342} 343 344static __inline struct pvo_head * 345vm_page_to_pvoh(vm_page_t m) 346{ 347 348 return (&m->md.mdpg_pvoh); 349} 350 351static __inline void 352pmap_attr_clear(vm_page_t m, int ptebit) 353{ 354 355 m->md.mdpg_attrs &= ~ptebit; 356} 357 358static __inline int 359pmap_attr_fetch(vm_page_t m) 360{ 361 362 return (m->md.mdpg_attrs); 363} 364 365static __inline void 366pmap_attr_save(vm_page_t m, int ptebit) 367{ 368 369 m->md.mdpg_attrs |= ptebit; 370} 371 372static __inline int 373pmap_pte_compare(const struct pte *pt, const struct pte *pvo_pt) 374{ 375 if (pt->pte_hi == pvo_pt->pte_hi) 376 return (1); 377 378 return (0); 379} 380 381static __inline int 382pmap_pte_match(struct pte *pt, u_int sr, vm_offset_t va, int which) 383{ 384 return (pt->pte_hi & ~PTE_VALID) == 385 (((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 386 ((va >> ADDR_API_SHFT) & PTE_API) | which); 387} 388 389static __inline void 390pmap_pte_create(struct pte *pt, u_int sr, vm_offset_t va, u_int pte_lo) 391{ 392 /* 393 * Construct a PTE. Default to IMB initially. Valid bit only gets 394 * set when the real pte is set in memory. 395 * 396 * Note: Don't set the valid bit for correct operation of tlb update. 397 */ 398 pt->pte_hi = ((sr & SR_VSID_MASK) << PTE_VSID_SHFT) | 399 (((va & ADDR_PIDX) >> ADDR_API_SHFT) & PTE_API); 400 pt->pte_lo = pte_lo; 401} 402 403static __inline void 404pmap_pte_synch(struct pte *pt, struct pte *pvo_pt) 405{ 406 407 pvo_pt->pte_lo |= pt->pte_lo & (PTE_REF | PTE_CHG); 408} 409 410static __inline void 411pmap_pte_clear(struct pte *pt, vm_offset_t va, int ptebit) 412{ 413 414 /* 415 * As shown in Section 7.6.3.2.3 416 */ 417 pt->pte_lo &= ~ptebit; 418 TLBIE(va); 419 EIEIO(); 420 TLBSYNC(); 421 SYNC(); 422} 423 424static __inline void 425pmap_pte_set(struct pte *pt, struct pte *pvo_pt) 426{ 427 428 pvo_pt->pte_hi |= PTE_VALID; 429 430 /* 431 * Update the PTE as defined in section 7.6.3.1. 432 * Note that the REF/CHG bits are from pvo_pt and thus should havce 433 * been saved so this routine can restore them (if desired). 434 */ 435 pt->pte_lo = pvo_pt->pte_lo; 436 EIEIO(); 437 pt->pte_hi = pvo_pt->pte_hi; 438 SYNC(); 439 pmap_pte_valid++; 440} 441 442static __inline void 443pmap_pte_unset(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 444{ 445 446 pvo_pt->pte_hi &= ~PTE_VALID; 447 448 /* 449 * Force the reg & chg bits back into the PTEs. 450 */ 451 SYNC(); 452 453 /* 454 * Invalidate the pte. 455 */ 456 pt->pte_hi &= ~PTE_VALID; 457 458 SYNC(); 459 TLBIE(va); 460 EIEIO(); 461 TLBSYNC(); 462 SYNC(); 463 464 /* 465 * Save the reg & chg bits. 466 */ 467 pmap_pte_synch(pt, pvo_pt); 468 pmap_pte_valid--; 469} 470 471static __inline void 472pmap_pte_change(struct pte *pt, struct pte *pvo_pt, vm_offset_t va) 473{ 474 475 /* 476 * Invalidate the PTE 477 */ 478 pmap_pte_unset(pt, pvo_pt, va); 479 pmap_pte_set(pt, pvo_pt); 480} 481 482/* 483 * Quick sort callout for comparing memory regions. 484 */ 485static int mr_cmp(const void *a, const void *b); 486static int om_cmp(const void *a, const void *b); 487 488static int 489mr_cmp(const void *a, const void *b) 490{ 491 const struct mem_region *regiona; 492 const struct mem_region *regionb; 493 494 regiona = a; 495 regionb = b; 496 if (regiona->mr_start < regionb->mr_start) 497 return (-1); 498 else if (regiona->mr_start > regionb->mr_start) 499 return (1); 500 else 501 return (0); 502} 503 504static int 505om_cmp(const void *a, const void *b) 506{ 507 const struct ofw_map *mapa; 508 const struct ofw_map *mapb; 509 510 mapa = a; 511 mapb = b; 512 if (mapa->om_pa < mapb->om_pa) 513 return (-1); 514 else if (mapa->om_pa > mapb->om_pa) 515 return (1); 516 else 517 return (0); 518} 519 520void 521pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend) 522{ 523 ihandle_t mmui; 524 phandle_t chosen, mmu; 525 int sz; 526 int i, j; 527 int ofw_mappings; 528 vm_size_t size, physsz; 529 vm_offset_t pa, va, off; 530 u_int batl, batu; 531 532 /* 533 * Set up BAT0 to map the lowest 256 MB area 534 */ 535 battable[0x0].batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 536 battable[0x0].batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 537 538 /* 539 * Map PCI memory space. 540 */ 541 battable[0x8].batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 542 battable[0x8].batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 543 544 battable[0x9].batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 545 battable[0x9].batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 546 547 battable[0xa].batl = BATL(0xa0000000, BAT_I|BAT_G, BAT_PP_RW); 548 battable[0xa].batu = BATU(0xa0000000, BAT_BL_256M, BAT_Vs); 549 550 battable[0xb].batl = BATL(0xb0000000, BAT_I|BAT_G, BAT_PP_RW); 551 battable[0xb].batu = BATU(0xb0000000, BAT_BL_256M, BAT_Vs); 552 553 /* 554 * Map obio devices. 555 */ 556 battable[0xf].batl = BATL(0xf0000000, BAT_I|BAT_G, BAT_PP_RW); 557 battable[0xf].batu = BATU(0xf0000000, BAT_BL_256M, BAT_Vs); 558 559 /* 560 * Use an IBAT and a DBAT to map the bottom segment of memory 561 * where we are. 562 */ 563 batu = BATU(0x00000000, BAT_BL_256M, BAT_Vs); 564 batl = BATL(0x00000000, BAT_M, BAT_PP_RW); 565 __asm ("mtibatu 0,%0; mtibatl 0,%1; isync; \n" 566 "mtdbatu 0,%0; mtdbatl 0,%1; isync" 567 :: "r"(batu), "r"(batl)); 568 569#if 0 570 /* map frame buffer */ 571 batu = BATU(0x90000000, BAT_BL_256M, BAT_Vs); 572 batl = BATL(0x90000000, BAT_I|BAT_G, BAT_PP_RW); 573 __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 574 :: "r"(batu), "r"(batl)); 575#endif 576 577#if 1 578 /* map pci space */ 579 batu = BATU(0x80000000, BAT_BL_256M, BAT_Vs); 580 batl = BATL(0x80000000, BAT_I|BAT_G, BAT_PP_RW); 581 __asm ("mtdbatu 1,%0; mtdbatl 1,%1; isync" 582 :: "r"(batu), "r"(batl)); 583#endif 584 585 /* 586 * Set the start and end of kva. 587 */ 588 virtual_avail = VM_MIN_KERNEL_ADDRESS; 589 virtual_end = VM_MAX_KERNEL_ADDRESS; 590 591 mem_regions(&pregions, &pregions_sz, ®ions, ®ions_sz); 592 CTR0(KTR_PMAP, "pmap_bootstrap: physical memory"); 593 594 qsort(pregions, pregions_sz, sizeof(*pregions), mr_cmp); 595 for (i = 0; i < pregions_sz; i++) { 596 vm_offset_t pa; 597 vm_offset_t end; 598 599 CTR3(KTR_PMAP, "physregion: %#x - %#x (%#x)", 600 pregions[i].mr_start, 601 pregions[i].mr_start + pregions[i].mr_size, 602 pregions[i].mr_size); 603 /* 604 * Install entries into the BAT table to allow all 605 * of physmem to be convered by on-demand BAT entries. 606 * The loop will sometimes set the same battable element 607 * twice, but that's fine since they won't be used for 608 * a while yet. 609 */ 610 pa = pregions[i].mr_start & 0xf0000000; 611 end = pregions[i].mr_start + pregions[i].mr_size; 612 do { 613 u_int n = pa >> ADDR_SR_SHFT; 614 615 battable[n].batl = BATL(pa, BAT_M, BAT_PP_RW); 616 battable[n].batu = BATU(pa, BAT_BL_256M, BAT_Vs); 617 pa += SEGMENT_LENGTH; 618 } while (pa < end); 619 } 620 621 if (sizeof(phys_avail)/sizeof(phys_avail[0]) < regions_sz) 622 panic("pmap_bootstrap: phys_avail too small"); 623 qsort(regions, regions_sz, sizeof(*regions), mr_cmp); 624 phys_avail_count = 0; 625 physsz = 0; 626 for (i = 0, j = 0; i < regions_sz; i++, j += 2) { 627 CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start, 628 regions[i].mr_start + regions[i].mr_size, 629 regions[i].mr_size); 630 phys_avail[j] = regions[i].mr_start; 631 phys_avail[j + 1] = regions[i].mr_start + regions[i].mr_size; 632 phys_avail_count++; 633 physsz += regions[i].mr_size; 634 } 635 physmem = btoc(physsz); 636 637 /* 638 * Allocate PTEG table. 639 */ 640#ifdef PTEGCOUNT 641 pmap_pteg_count = PTEGCOUNT; 642#else 643 pmap_pteg_count = 0x1000; 644 645 while (pmap_pteg_count < physmem) 646 pmap_pteg_count <<= 1; 647 648 pmap_pteg_count >>= 1; 649#endif /* PTEGCOUNT */ 650 651 size = pmap_pteg_count * sizeof(struct pteg); 652 CTR2(KTR_PMAP, "pmap_bootstrap: %d PTEGs, %d bytes", pmap_pteg_count, 653 size); 654 pmap_pteg_table = (struct pteg *)pmap_bootstrap_alloc(size, size); 655 CTR1(KTR_PMAP, "pmap_bootstrap: PTEG table at %p", pmap_pteg_table); 656 bzero((void *)pmap_pteg_table, pmap_pteg_count * sizeof(struct pteg)); 657 pmap_pteg_mask = pmap_pteg_count - 1; 658 659 /* 660 * Allocate pv/overflow lists. 661 */ 662 size = sizeof(struct pvo_head) * pmap_pteg_count; 663 pmap_pvo_table = (struct pvo_head *)pmap_bootstrap_alloc(size, 664 PAGE_SIZE); 665 CTR1(KTR_PMAP, "pmap_bootstrap: PVO table at %p", pmap_pvo_table); 666 for (i = 0; i < pmap_pteg_count; i++) 667 LIST_INIT(&pmap_pvo_table[i]); 668 669 /* 670 * Allocate the message buffer. 671 */ 672 msgbuf_phys = pmap_bootstrap_alloc(MSGBUF_SIZE, 0); 673 674 /* 675 * Initialise the unmanaged pvo pool. 676 */ 677 pmap_bpvo_pool = (struct pvo_entry *)pmap_bootstrap_alloc( 678 BPVO_POOL_SIZE*sizeof(struct pvo_entry), 0); 679 pmap_bpvo_pool_index = 0; 680 681 /* 682 * Make sure kernel vsid is allocated as well as VSID 0. 683 */ 684 pmap_vsid_bitmap[(KERNEL_VSIDBITS & (NPMAPS - 1)) / VSID_NBPW] 685 |= 1 << (KERNEL_VSIDBITS % VSID_NBPW); 686 pmap_vsid_bitmap[0] |= 1; 687 688 /* 689 * Set up the Open Firmware pmap and add it's mappings. 690 */ 691 pmap_pinit(&ofw_pmap); 692 ofw_pmap.pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 693 ofw_pmap.pm_sr[KERNEL2_SR] = KERNEL2_SEGMENT; 694 if ((chosen = OF_finddevice("/chosen")) == -1) 695 panic("pmap_bootstrap: can't find /chosen"); 696 OF_getprop(chosen, "mmu", &mmui, 4); 697 if ((mmu = OF_instance_to_package(mmui)) == -1) 698 panic("pmap_bootstrap: can't get mmu package"); 699 if ((sz = OF_getproplen(mmu, "translations")) == -1) 700 panic("pmap_bootstrap: can't get ofw translation count"); 701 translations = NULL; 702 for (i = 0; phys_avail[i] != 0; i += 2) { 703 if (phys_avail[i + 1] >= sz) { 704 translations = (struct ofw_map *)phys_avail[i]; 705 break; 706 } 707 } 708 if (translations == NULL) 709 panic("pmap_bootstrap: no space to copy translations"); 710 bzero(translations, sz); 711 if (OF_getprop(mmu, "translations", translations, sz) == -1) 712 panic("pmap_bootstrap: can't get ofw translations"); 713 CTR0(KTR_PMAP, "pmap_bootstrap: translations"); 714 sz /= sizeof(*translations); 715 qsort(translations, sz, sizeof (*translations), om_cmp); 716 for (i = 0, ofw_mappings = 0; i < sz; i++) { 717 CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x", 718 translations[i].om_pa, translations[i].om_va, 719 translations[i].om_len); 720 721 /* 722 * If the mapping is 1:1, let the RAM and device on-demand 723 * BAT tables take care of the translation. 724 */ 725 if (translations[i].om_va == translations[i].om_pa) 726 continue; 727 728 /* Enter the pages */ 729 for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) { 730 struct vm_page m; 731 732 m.phys_addr = translations[i].om_pa + off; 733 pmap_enter(&ofw_pmap, translations[i].om_va + off, &m, 734 VM_PROT_ALL, 1); 735 ofw_mappings++; 736 } 737 } 738#ifdef SMP 739 TLBSYNC(); 740#endif 741 742 /* 743 * Initialize the kernel pmap (which is statically allocated). 744 */ 745 PMAP_LOCK_INIT(kernel_pmap); 746 for (i = 0; i < 16; i++) { 747 kernel_pmap->pm_sr[i] = EMPTY_SEGMENT; 748 } 749 kernel_pmap->pm_sr[KERNEL_SR] = KERNEL_SEGMENT; 750 kernel_pmap->pm_sr[KERNEL2_SR] = KERNEL_SEGMENT; 751 kernel_pmap->pm_active = ~0; 752 753 /* 754 * Allocate a kernel stack with a guard page for thread0 and map it 755 * into the kernel page map. 756 */ 757 pa = pmap_bootstrap_alloc(KSTACK_PAGES * PAGE_SIZE, 0); 758 kstack0_phys = pa; 759 kstack0 = virtual_avail + (KSTACK_GUARD_PAGES * PAGE_SIZE); 760 CTR2(KTR_PMAP, "pmap_bootstrap: kstack0 at %#x (%#x)", kstack0_phys, 761 kstack0); 762 virtual_avail += (KSTACK_PAGES + KSTACK_GUARD_PAGES) * PAGE_SIZE; 763 for (i = 0; i < KSTACK_PAGES; i++) { 764 pa = kstack0_phys + i * PAGE_SIZE; 765 va = kstack0 + i * PAGE_SIZE; 766 pmap_kenter(va, pa); 767 TLBIE(va); 768 } 769 770 /* 771 * Calculate the last available physical address. 772 */ 773 for (i = 0; phys_avail[i + 2] != 0; i += 2) 774 ; 775 Maxmem = powerpc_btop(phys_avail[i + 1]); 776 777 /* 778 * Allocate virtual address space for the message buffer. 779 */ 780 msgbufp = (struct msgbuf *)virtual_avail; 781 virtual_avail += round_page(MSGBUF_SIZE); 782 783 /* 784 * Initialize hardware. 785 */ 786 for (i = 0; i < 16; i++) { 787 mtsrin(i << ADDR_SR_SHFT, EMPTY_SEGMENT); 788 } 789 __asm __volatile ("mtsr %0,%1" 790 :: "n"(KERNEL_SR), "r"(KERNEL_SEGMENT)); 791 __asm __volatile ("sync; mtsdr1 %0; isync" 792 :: "r"((u_int)pmap_pteg_table | (pmap_pteg_mask >> 10))); 793 tlbia(); 794 795 pmap_bootstrapped++; 796} 797 798/* 799 * Activate a user pmap. The pmap must be activated before it's address 800 * space can be accessed in any way. 801 */ 802void 803pmap_activate(struct thread *td) 804{ 805 pmap_t pm, pmr; 806 807 /* 808 * Load all the data we need up front to encourage the compiler to 809 * not issue any loads while we have interrupts disabled below. 810 */ 811 pm = &td->td_proc->p_vmspace->vm_pmap; 812 813 if ((pmr = (pmap_t)pmap_kextract((vm_offset_t)pm)) == NULL) 814 pmr = pm; 815 816 pm->pm_active |= PCPU_GET(cpumask); 817 PCPU_SET(curpmap, pmr); 818} 819 820void 821pmap_deactivate(struct thread *td) 822{ 823 pmap_t pm; 824 825 pm = &td->td_proc->p_vmspace->vm_pmap; 826 pm->pm_active &= ~(PCPU_GET(cpumask)); 827 PCPU_SET(curpmap, NULL); 828} 829 830vm_offset_t 831pmap_addr_hint(vm_object_t object, vm_offset_t va, vm_size_t size) 832{ 833 834 return (va); 835} 836 837void 838pmap_change_wiring(pmap_t pm, vm_offset_t va, boolean_t wired) 839{ 840 struct pvo_entry *pvo; 841 842 PMAP_LOCK(pm); 843 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 844 845 if (pvo != NULL) { 846 if (wired) { 847 if ((pvo->pvo_vaddr & PVO_WIRED) == 0) 848 pm->pm_stats.wired_count++; 849 pvo->pvo_vaddr |= PVO_WIRED; 850 } else { 851 if ((pvo->pvo_vaddr & PVO_WIRED) != 0) 852 pm->pm_stats.wired_count--; 853 pvo->pvo_vaddr &= ~PVO_WIRED; 854 } 855 } 856 PMAP_UNLOCK(pm); 857} 858 859void 860pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, 861 vm_size_t len, vm_offset_t src_addr) 862{ 863 864 /* 865 * This is not needed as it's mainly an optimisation. 866 * It may want to be implemented later though. 867 */ 868} 869 870void 871pmap_copy_page(vm_page_t msrc, vm_page_t mdst) 872{ 873 vm_offset_t dst; 874 vm_offset_t src; 875 876 dst = VM_PAGE_TO_PHYS(mdst); 877 src = VM_PAGE_TO_PHYS(msrc); 878 879 kcopy((void *)src, (void *)dst, PAGE_SIZE); 880} 881 882/* 883 * Zero a page of physical memory by temporarily mapping it into the tlb. 884 */ 885void 886pmap_zero_page(vm_page_t m) 887{ 888 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 889 caddr_t va; 890 891 if (pa < SEGMENT_LENGTH) { 892 va = (caddr_t) pa; 893 } else if (pmap_initialized) { 894 if (pmap_pvo_zeropage == NULL) 895 pmap_pvo_zeropage = pmap_rkva_alloc(); 896 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 897 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 898 } else { 899 panic("pmap_zero_page: can't zero pa %#x", pa); 900 } 901 902 bzero(va, PAGE_SIZE); 903 904 if (pa >= SEGMENT_LENGTH) 905 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 906} 907 908void 909pmap_zero_page_area(vm_page_t m, int off, int size) 910{ 911 vm_offset_t pa = VM_PAGE_TO_PHYS(m); 912 caddr_t va; 913 914 if (pa < SEGMENT_LENGTH) { 915 va = (caddr_t) pa; 916 } else if (pmap_initialized) { 917 if (pmap_pvo_zeropage == NULL) 918 pmap_pvo_zeropage = pmap_rkva_alloc(); 919 pmap_pa_map(pmap_pvo_zeropage, pa, NULL, NULL); 920 va = (caddr_t)PVO_VADDR(pmap_pvo_zeropage); 921 } else { 922 panic("pmap_zero_page: can't zero pa %#x", pa); 923 } 924 925 bzero(va + off, size); 926 927 if (pa >= SEGMENT_LENGTH) 928 pmap_pa_unmap(pmap_pvo_zeropage, NULL, NULL); 929} 930 931void 932pmap_zero_page_idle(vm_page_t m) 933{ 934 935 /* XXX this is called outside of Giant, is pmap_zero_page safe? */ 936 /* XXX maybe have a dedicated mapping for this to avoid the problem? */ 937 mtx_lock(&Giant); 938 pmap_zero_page(m); 939 mtx_unlock(&Giant); 940} 941 942/* 943 * Map the given physical page at the specified virtual address in the 944 * target pmap with the protection requested. If specified the page 945 * will be wired down. 946 */ 947void 948pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, 949 boolean_t wired) 950{ 951 struct pvo_head *pvo_head; 952 uma_zone_t zone; 953 vm_page_t pg; 954 u_int pte_lo, pvo_flags, was_exec, i; 955 int error; 956 957 if (!pmap_initialized) { 958 pvo_head = &pmap_pvo_kunmanaged; 959 zone = pmap_upvo_zone; 960 pvo_flags = 0; 961 pg = NULL; 962 was_exec = PTE_EXEC; 963 } else { 964 pvo_head = vm_page_to_pvoh(m); 965 pg = m; 966 zone = pmap_mpvo_zone; 967 pvo_flags = PVO_MANAGED; 968 was_exec = 0; 969 } 970 if (pmap_bootstrapped) { 971 vm_page_lock_queues(); 972 PMAP_LOCK(pmap); 973 } 974 975 /* 976 * If this is a managed page, and it's the first reference to the page, 977 * clear the execness of the page. Otherwise fetch the execness. 978 */ 979 if (pg != NULL) { 980 if (LIST_EMPTY(pvo_head)) { 981 pmap_attr_clear(pg, PTE_EXEC); 982 } else { 983 was_exec = pmap_attr_fetch(pg) & PTE_EXEC; 984 } 985 } 986 987 988 /* 989 * Assume the page is cache inhibited and access is guarded unless 990 * it's in our available memory array. 991 */ 992 pte_lo = PTE_I | PTE_G; 993 for (i = 0; i < pregions_sz; i++) { 994 if ((VM_PAGE_TO_PHYS(m) >= pregions[i].mr_start) && 995 (VM_PAGE_TO_PHYS(m) < 996 (pregions[i].mr_start + pregions[i].mr_size))) { 997 pte_lo &= ~(PTE_I | PTE_G); 998 break; 999 } 1000 } 1001 1002 if (prot & VM_PROT_WRITE) 1003 pte_lo |= PTE_BW; 1004 else 1005 pte_lo |= PTE_BR; 1006 1007 pvo_flags |= (prot & VM_PROT_EXECUTE); 1008 1009 if (wired) 1010 pvo_flags |= PVO_WIRED; 1011 1012 error = pmap_pvo_enter(pmap, zone, pvo_head, va, VM_PAGE_TO_PHYS(m), 1013 pte_lo, pvo_flags); 1014 1015 /* 1016 * Flush the real page from the instruction cache if this page is 1017 * mapped executable and cacheable and was not previously mapped (or 1018 * was not mapped executable). 1019 */ 1020 if (error == 0 && (pvo_flags & PVO_EXECUTABLE) && 1021 (pte_lo & PTE_I) == 0 && was_exec == 0) { 1022 /* 1023 * Flush the real memory from the cache. 1024 */ 1025 pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1026 if (pg != NULL) 1027 pmap_attr_save(pg, PTE_EXEC); 1028 } 1029 if (pmap_bootstrapped) 1030 vm_page_unlock_queues(); 1031 1032 /* XXX syncicache always until problems are sorted */ 1033 pmap_syncicache(VM_PAGE_TO_PHYS(m), PAGE_SIZE); 1034 if (pmap_bootstrapped) 1035 PMAP_UNLOCK(pmap); 1036} 1037 1038vm_page_t 1039pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_page_t mpte) 1040{ 1041 1042 mtx_lock(&Giant); 1043 pmap_enter(pm, va, m, VM_PROT_READ | VM_PROT_EXECUTE, FALSE); 1044 mtx_unlock(&Giant); 1045 return (NULL); 1046} 1047 1048vm_paddr_t 1049pmap_extract(pmap_t pm, vm_offset_t va) 1050{ 1051 struct pvo_entry *pvo; 1052 vm_paddr_t pa; 1053 1054 PMAP_LOCK(pm); 1055 pvo = pmap_pvo_find_va(pm, va & ~ADDR_POFF, NULL); 1056 if (pvo == NULL) 1057 pa = 0; 1058 else 1059 pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1060 PMAP_UNLOCK(pm); 1061 return (pa); 1062} 1063 1064/* 1065 * Atomically extract and hold the physical page with the given 1066 * pmap and virtual address pair if that mapping permits the given 1067 * protection. 1068 */ 1069vm_page_t 1070pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) 1071{ 1072 struct pvo_entry *pvo; 1073 vm_page_t m; 1074 1075 m = NULL; 1076 mtx_lock(&Giant); 1077 vm_page_lock_queues(); 1078 PMAP_LOCK(pmap); 1079 pvo = pmap_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); 1080 if (pvo != NULL && (pvo->pvo_pte.pte_hi & PTE_VALID) && 1081 ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_RW || 1082 (prot & VM_PROT_WRITE) == 0)) { 1083 m = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1084 vm_page_hold(m); 1085 } 1086 vm_page_unlock_queues(); 1087 PMAP_UNLOCK(pmap); 1088 mtx_unlock(&Giant); 1089 return (m); 1090} 1091 1092/* 1093 * Grow the number of kernel page table entries. Unneeded. 1094 */ 1095void 1096pmap_growkernel(vm_offset_t addr) 1097{ 1098} 1099 1100void 1101pmap_init(void) 1102{ 1103 1104 CTR0(KTR_PMAP, "pmap_init"); 1105 1106 pmap_upvo_zone = uma_zcreate("UPVO entry", sizeof (struct pvo_entry), 1107 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1108 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1109 pmap_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), 1110 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 1111 UMA_ZONE_VM | UMA_ZONE_NOFREE); 1112 pmap_initialized = TRUE; 1113} 1114 1115void 1116pmap_init2(void) 1117{ 1118 1119 CTR0(KTR_PMAP, "pmap_init2"); 1120} 1121 1122boolean_t 1123pmap_is_modified(vm_page_t m) 1124{ 1125 1126 if ((m->flags & (PG_FICTITIOUS |PG_UNMANAGED)) != 0) 1127 return (FALSE); 1128 1129 return (pmap_query_bit(m, PTE_CHG)); 1130} 1131 1132/* 1133 * pmap_is_prefaultable: 1134 * 1135 * Return whether or not the specified virtual address is elgible 1136 * for prefault. 1137 */ 1138boolean_t 1139pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) 1140{ 1141 1142 return (FALSE); 1143} 1144 1145void 1146pmap_clear_reference(vm_page_t m) 1147{ 1148 1149 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1150 return; 1151 pmap_clear_bit(m, PTE_REF, NULL); 1152} 1153 1154void 1155pmap_clear_modify(vm_page_t m) 1156{ 1157 1158 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1159 return; 1160 pmap_clear_bit(m, PTE_CHG, NULL); 1161} 1162 1163/* 1164 * pmap_ts_referenced: 1165 * 1166 * Return a count of reference bits for a page, clearing those bits. 1167 * It is not necessary for every reference bit to be cleared, but it 1168 * is necessary that 0 only be returned when there are truly no 1169 * reference bits set. 1170 * 1171 * XXX: The exact number of bits to check and clear is a matter that 1172 * should be tested and standardized at some point in the future for 1173 * optimal aging of shared pages. 1174 */ 1175int 1176pmap_ts_referenced(vm_page_t m) 1177{ 1178 int count; 1179 1180 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 1181 return (0); 1182 1183 count = pmap_clear_bit(m, PTE_REF, NULL); 1184 1185 return (count); 1186} 1187 1188/* 1189 * Map a wired page into kernel virtual address space. 1190 */ 1191void 1192pmap_kenter(vm_offset_t va, vm_offset_t pa) 1193{ 1194 u_int pte_lo; 1195 int error; 1196 int i; 1197 1198#if 0 1199 if (va < VM_MIN_KERNEL_ADDRESS) 1200 panic("pmap_kenter: attempt to enter non-kernel address %#x", 1201 va); 1202#endif 1203 1204 pte_lo = PTE_I | PTE_G; 1205 for (i = 0; i < pregions_sz; i++) { 1206 if ((pa >= pregions[i].mr_start) && 1207 (pa < (pregions[i].mr_start + pregions[i].mr_size))) { 1208 pte_lo &= ~(PTE_I | PTE_G); 1209 break; 1210 } 1211 } 1212 1213 error = pmap_pvo_enter(kernel_pmap, pmap_upvo_zone, 1214 &pmap_pvo_kunmanaged, va, pa, pte_lo, PVO_WIRED); 1215 1216 if (error != 0 && error != ENOENT) 1217 panic("pmap_kenter: failed to enter va %#x pa %#x: %d", va, 1218 pa, error); 1219 1220 /* 1221 * Flush the real memory from the instruction cache. 1222 */ 1223 if ((pte_lo & (PTE_I | PTE_G)) == 0) { 1224 pmap_syncicache(pa, PAGE_SIZE); 1225 } 1226} 1227 1228/* 1229 * Extract the physical page address associated with the given kernel virtual 1230 * address. 1231 */ 1232vm_offset_t 1233pmap_kextract(vm_offset_t va) 1234{ 1235 struct pvo_entry *pvo; 1236 vm_paddr_t pa; 1237 1238#ifdef UMA_MD_SMALL_ALLOC 1239 /* 1240 * Allow direct mappings 1241 */ 1242 if (va < VM_MIN_KERNEL_ADDRESS) { 1243 return (va); 1244 } 1245#endif 1246 1247 PMAP_LOCK(kernel_pmap); 1248 pvo = pmap_pvo_find_va(kernel_pmap, va & ~ADDR_POFF, NULL); 1249 KASSERT(pvo != NULL, ("pmap_kextract: no addr found")); 1250 pa = (pvo->pvo_pte.pte_lo & PTE_RPGN) | (va & ADDR_POFF); 1251 PMAP_UNLOCK(kernel_pmap); 1252 return (pa); 1253} 1254 1255/* 1256 * Remove a wired page from kernel virtual address space. 1257 */ 1258void 1259pmap_kremove(vm_offset_t va) 1260{ 1261 1262 pmap_remove(kernel_pmap, va, va + PAGE_SIZE); 1263} 1264 1265/* 1266 * Map a range of physical addresses into kernel virtual address space. 1267 * 1268 * The value passed in *virt is a suggested virtual address for the mapping. 1269 * Architectures which can support a direct-mapped physical to virtual region 1270 * can return the appropriate address within that region, leaving '*virt' 1271 * unchanged. We cannot and therefore do not; *virt is updated with the 1272 * first usable address after the mapped region. 1273 */ 1274vm_offset_t 1275pmap_map(vm_offset_t *virt, vm_offset_t pa_start, vm_offset_t pa_end, int prot) 1276{ 1277 vm_offset_t sva, va; 1278 1279 sva = *virt; 1280 va = sva; 1281 for (; pa_start < pa_end; pa_start += PAGE_SIZE, va += PAGE_SIZE) 1282 pmap_kenter(va, pa_start); 1283 *virt = va; 1284 return (sva); 1285} 1286 1287int 1288pmap_mincore(pmap_t pmap, vm_offset_t addr) 1289{ 1290 TODO; 1291 return (0); 1292} 1293 1294void 1295pmap_object_init_pt(pmap_t pm, vm_offset_t addr, vm_object_t object, 1296 vm_pindex_t pindex, vm_size_t size) 1297{ 1298 1299 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 1300 KASSERT(object->type == OBJT_DEVICE, 1301 ("pmap_object_init_pt: non-device object")); 1302 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1303 ("pmap_object_init_pt: non current pmap")); 1304} 1305 1306/* 1307 * Lower the permission for all mappings to a given page. 1308 */ 1309void 1310pmap_page_protect(vm_page_t m, vm_prot_t prot) 1311{ 1312 struct pvo_head *pvo_head; 1313 struct pvo_entry *pvo, *next_pvo; 1314 struct pte *pt; 1315 pmap_t pmap; 1316 1317 /* 1318 * Since the routine only downgrades protection, if the 1319 * maximal protection is desired, there isn't any change 1320 * to be made. 1321 */ 1322 if ((prot & (VM_PROT_READ|VM_PROT_WRITE)) == 1323 (VM_PROT_READ|VM_PROT_WRITE)) 1324 return; 1325 1326 pvo_head = vm_page_to_pvoh(m); 1327 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1328 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1329 PMAP_PVO_CHECK(pvo); /* sanity check */ 1330 pmap = pvo->pvo_pmap; 1331 PMAP_LOCK(pmap); 1332 1333 /* 1334 * Downgrading to no mapping at all, we just remove the entry. 1335 */ 1336 if ((prot & VM_PROT_READ) == 0) { 1337 pmap_pvo_remove(pvo, -1); 1338 PMAP_UNLOCK(pmap); 1339 continue; 1340 } 1341 1342 /* 1343 * If EXEC permission is being revoked, just clear the flag 1344 * in the PVO. 1345 */ 1346 if ((prot & VM_PROT_EXECUTE) == 0) 1347 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1348 1349 /* 1350 * If this entry is already RO, don't diddle with the page 1351 * table. 1352 */ 1353 if ((pvo->pvo_pte.pte_lo & PTE_PP) == PTE_BR) { 1354 PMAP_UNLOCK(pmap); 1355 PMAP_PVO_CHECK(pvo); 1356 continue; 1357 } 1358 1359 /* 1360 * Grab the PTE before we diddle the bits so pvo_to_pte can 1361 * verify the pte contents are as expected. 1362 */ 1363 pt = pmap_pvo_to_pte(pvo, -1); 1364 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1365 pvo->pvo_pte.pte_lo |= PTE_BR; 1366 if (pt != NULL) 1367 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1368 PMAP_UNLOCK(pmap); 1369 PMAP_PVO_CHECK(pvo); /* sanity check */ 1370 } 1371 1372 /* 1373 * Downgrading from writeable: clear the VM page flag 1374 */ 1375 if ((prot & VM_PROT_WRITE) != VM_PROT_WRITE) 1376 vm_page_flag_clear(m, PG_WRITEABLE); 1377} 1378 1379/* 1380 * Returns true if the pmap's pv is one of the first 1381 * 16 pvs linked to from this page. This count may 1382 * be changed upwards or downwards in the future; it 1383 * is only necessary that true be returned for a small 1384 * subset of pmaps for proper page aging. 1385 */ 1386boolean_t 1387pmap_page_exists_quick(pmap_t pmap, vm_page_t m) 1388{ 1389 int loops; 1390 struct pvo_entry *pvo; 1391 1392 if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) 1393 return FALSE; 1394 1395 loops = 0; 1396 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 1397 if (pvo->pvo_pmap == pmap) 1398 return (TRUE); 1399 if (++loops >= 16) 1400 break; 1401 } 1402 1403 return (FALSE); 1404} 1405 1406static u_int pmap_vsidcontext; 1407 1408void 1409pmap_pinit(pmap_t pmap) 1410{ 1411 int i, mask; 1412 u_int entropy; 1413 1414 KASSERT((int)pmap < VM_MIN_KERNEL_ADDRESS, ("pmap_pinit: virt pmap")); 1415 PMAP_LOCK_INIT(pmap); 1416 1417 entropy = 0; 1418 __asm __volatile("mftb %0" : "=r"(entropy)); 1419 1420 /* 1421 * Allocate some segment registers for this pmap. 1422 */ 1423 for (i = 0; i < NPMAPS; i += VSID_NBPW) { 1424 u_int hash, n; 1425 1426 /* 1427 * Create a new value by mutiplying by a prime and adding in 1428 * entropy from the timebase register. This is to make the 1429 * VSID more random so that the PT hash function collides 1430 * less often. (Note that the prime casues gcc to do shifts 1431 * instead of a multiply.) 1432 */ 1433 pmap_vsidcontext = (pmap_vsidcontext * 0x1105) + entropy; 1434 hash = pmap_vsidcontext & (NPMAPS - 1); 1435 if (hash == 0) /* 0 is special, avoid it */ 1436 continue; 1437 n = hash >> 5; 1438 mask = 1 << (hash & (VSID_NBPW - 1)); 1439 hash = (pmap_vsidcontext & 0xfffff); 1440 if (pmap_vsid_bitmap[n] & mask) { /* collision? */ 1441 /* anything free in this bucket? */ 1442 if (pmap_vsid_bitmap[n] == 0xffffffff) { 1443 entropy = (pmap_vsidcontext >> 20); 1444 continue; 1445 } 1446 i = ffs(~pmap_vsid_bitmap[i]) - 1; 1447 mask = 1 << i; 1448 hash &= 0xfffff & ~(VSID_NBPW - 1); 1449 hash |= i; 1450 } 1451 pmap_vsid_bitmap[n] |= mask; 1452 for (i = 0; i < 16; i++) 1453 pmap->pm_sr[i] = VSID_MAKE(i, hash); 1454 return; 1455 } 1456 1457 panic("pmap_pinit: out of segments"); 1458} 1459 1460/* 1461 * Initialize the pmap associated with process 0. 1462 */ 1463void 1464pmap_pinit0(pmap_t pm) 1465{ 1466 1467 pmap_pinit(pm); 1468 bzero(&pm->pm_stats, sizeof(pm->pm_stats)); 1469} 1470 1471/* 1472 * Set the physical protection on the specified range of this map as requested. 1473 */ 1474void 1475pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) 1476{ 1477 struct pvo_entry *pvo; 1478 struct pte *pt; 1479 int pteidx; 1480 1481 CTR4(KTR_PMAP, "pmap_protect: pm=%p sva=%#x eva=%#x prot=%#x", pm, sva, 1482 eva, prot); 1483 1484 1485 KASSERT(pm == &curproc->p_vmspace->vm_pmap || pm == kernel_pmap, 1486 ("pmap_protect: non current pmap")); 1487 1488 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1489 mtx_lock(&Giant); 1490 pmap_remove(pm, sva, eva); 1491 mtx_unlock(&Giant); 1492 return; 1493 } 1494 1495 mtx_lock(&Giant); 1496 vm_page_lock_queues(); 1497 PMAP_LOCK(pm); 1498 for (; sva < eva; sva += PAGE_SIZE) { 1499 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1500 if (pvo == NULL) 1501 continue; 1502 1503 if ((prot & VM_PROT_EXECUTE) == 0) 1504 pvo->pvo_vaddr &= ~PVO_EXECUTABLE; 1505 1506 /* 1507 * Grab the PTE pointer before we diddle with the cached PTE 1508 * copy. 1509 */ 1510 pt = pmap_pvo_to_pte(pvo, pteidx); 1511 /* 1512 * Change the protection of the page. 1513 */ 1514 pvo->pvo_pte.pte_lo &= ~PTE_PP; 1515 pvo->pvo_pte.pte_lo |= PTE_BR; 1516 1517 /* 1518 * If the PVO is in the page table, update that pte as well. 1519 */ 1520 if (pt != NULL) 1521 pmap_pte_change(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1522 } 1523 vm_page_unlock_queues(); 1524 PMAP_UNLOCK(pm); 1525 mtx_unlock(&Giant); 1526} 1527 1528/* 1529 * Map a list of wired pages into kernel virtual address space. This is 1530 * intended for temporary mappings which do not need page modification or 1531 * references recorded. Existing mappings in the region are overwritten. 1532 */ 1533void 1534pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) 1535{ 1536 vm_offset_t va; 1537 1538 va = sva; 1539 while (count-- > 0) { 1540 pmap_kenter(va, VM_PAGE_TO_PHYS(*m)); 1541 va += PAGE_SIZE; 1542 m++; 1543 } 1544} 1545 1546/* 1547 * Remove page mappings from kernel virtual address space. Intended for 1548 * temporary mappings entered by pmap_qenter. 1549 */ 1550void 1551pmap_qremove(vm_offset_t sva, int count) 1552{ 1553 vm_offset_t va; 1554 1555 va = sva; 1556 while (count-- > 0) { 1557 pmap_kremove(va); 1558 va += PAGE_SIZE; 1559 } 1560} 1561 1562void 1563pmap_release(pmap_t pmap) 1564{ 1565 int idx, mask; 1566 1567 /* 1568 * Free segment register's VSID 1569 */ 1570 if (pmap->pm_sr[0] == 0) 1571 panic("pmap_release"); 1572 1573 idx = VSID_TO_HASH(pmap->pm_sr[0]) & (NPMAPS-1); 1574 mask = 1 << (idx % VSID_NBPW); 1575 idx /= VSID_NBPW; 1576 pmap_vsid_bitmap[idx] &= ~mask; 1577 PMAP_LOCK_DESTROY(pmap); 1578} 1579 1580/* 1581 * Remove the given range of addresses from the specified map. 1582 */ 1583void 1584pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1585{ 1586 struct pvo_entry *pvo; 1587 int pteidx; 1588 1589 vm_page_lock_queues(); 1590 PMAP_LOCK(pm); 1591 for (; sva < eva; sva += PAGE_SIZE) { 1592 pvo = pmap_pvo_find_va(pm, sva, &pteidx); 1593 if (pvo != NULL) { 1594 pmap_pvo_remove(pvo, pteidx); 1595 } 1596 } 1597 vm_page_unlock_queues(); 1598 PMAP_UNLOCK(pm); 1599} 1600 1601/* 1602 * Remove physical page from all pmaps in which it resides. pmap_pvo_remove() 1603 * will reflect changes in pte's back to the vm_page. 1604 */ 1605void 1606pmap_remove_all(vm_page_t m) 1607{ 1608 struct pvo_head *pvo_head; 1609 struct pvo_entry *pvo, *next_pvo; 1610 pmap_t pmap; 1611 1612 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1613 1614 pvo_head = vm_page_to_pvoh(m); 1615 for (pvo = LIST_FIRST(pvo_head); pvo != NULL; pvo = next_pvo) { 1616 next_pvo = LIST_NEXT(pvo, pvo_vlink); 1617 1618 PMAP_PVO_CHECK(pvo); /* sanity check */ 1619 pmap = pvo->pvo_pmap; 1620 PMAP_LOCK(pmap); 1621 pmap_pvo_remove(pvo, -1); 1622 PMAP_UNLOCK(pmap); 1623 } 1624 vm_page_flag_clear(m, PG_WRITEABLE); 1625} 1626 1627/* 1628 * Remove all pages from specified address space, this aids process exit 1629 * speeds. This is much faster than pmap_remove in the case of running down 1630 * an entire address space. Only works for the current pmap. 1631 */ 1632void 1633pmap_remove_pages(pmap_t pm, vm_offset_t sva, vm_offset_t eva) 1634{ 1635} 1636 1637/* 1638 * Allocate a physical page of memory directly from the phys_avail map. 1639 * Can only be called from pmap_bootstrap before avail start and end are 1640 * calculated. 1641 */ 1642static vm_offset_t 1643pmap_bootstrap_alloc(vm_size_t size, u_int align) 1644{ 1645 vm_offset_t s, e; 1646 int i, j; 1647 1648 size = round_page(size); 1649 for (i = 0; phys_avail[i + 1] != 0; i += 2) { 1650 if (align != 0) 1651 s = (phys_avail[i] + align - 1) & ~(align - 1); 1652 else 1653 s = phys_avail[i]; 1654 e = s + size; 1655 1656 if (s < phys_avail[i] || e > phys_avail[i + 1]) 1657 continue; 1658 1659 if (s == phys_avail[i]) { 1660 phys_avail[i] += size; 1661 } else if (e == phys_avail[i + 1]) { 1662 phys_avail[i + 1] -= size; 1663 } else { 1664 for (j = phys_avail_count * 2; j > i; j -= 2) { 1665 phys_avail[j] = phys_avail[j - 2]; 1666 phys_avail[j + 1] = phys_avail[j - 1]; 1667 } 1668 1669 phys_avail[i + 3] = phys_avail[i + 1]; 1670 phys_avail[i + 1] = s; 1671 phys_avail[i + 2] = e; 1672 phys_avail_count++; 1673 } 1674 1675 return (s); 1676 } 1677 panic("pmap_bootstrap_alloc: could not allocate memory"); 1678} 1679 1680/* 1681 * Return an unmapped pvo for a kernel virtual address. 1682 * Used by pmap functions that operate on physical pages. 1683 */ 1684static struct pvo_entry * 1685pmap_rkva_alloc(void) 1686{ 1687 struct pvo_entry *pvo; 1688 struct pte *pt; 1689 vm_offset_t kva; 1690 int pteidx; 1691 1692 if (pmap_rkva_count == 0) 1693 panic("pmap_rkva_alloc: no more reserved KVAs"); 1694 1695 kva = pmap_rkva_start + (PAGE_SIZE * --pmap_rkva_count); 1696 pmap_kenter(kva, 0); 1697 1698 pvo = pmap_pvo_find_va(kernel_pmap, kva, &pteidx); 1699 1700 if (pvo == NULL) 1701 panic("pmap_kva_alloc: pmap_pvo_find_va failed"); 1702 1703 pt = pmap_pvo_to_pte(pvo, pteidx); 1704 1705 if (pt == NULL) 1706 panic("pmap_kva_alloc: pmap_pvo_to_pte failed"); 1707 1708 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1709 PVO_PTEGIDX_CLR(pvo); 1710 1711 pmap_pte_overflow++; 1712 1713 return (pvo); 1714} 1715 1716static void 1717pmap_pa_map(struct pvo_entry *pvo, vm_offset_t pa, struct pte *saved_pt, 1718 int *depth_p) 1719{ 1720 struct pte *pt; 1721 1722 /* 1723 * If this pvo already has a valid pte, we need to save it so it can 1724 * be restored later. We then just reload the new PTE over the old 1725 * slot. 1726 */ 1727 if (saved_pt != NULL) { 1728 pt = pmap_pvo_to_pte(pvo, -1); 1729 1730 if (pt != NULL) { 1731 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1732 PVO_PTEGIDX_CLR(pvo); 1733 pmap_pte_overflow++; 1734 } 1735 1736 *saved_pt = pvo->pvo_pte; 1737 1738 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1739 } 1740 1741 pvo->pvo_pte.pte_lo |= pa; 1742 1743 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1744 panic("pmap_pa_map: could not spill pvo %p", pvo); 1745 1746 if (depth_p != NULL) 1747 (*depth_p)++; 1748} 1749 1750static void 1751pmap_pa_unmap(struct pvo_entry *pvo, struct pte *saved_pt, int *depth_p) 1752{ 1753 struct pte *pt; 1754 1755 pt = pmap_pvo_to_pte(pvo, -1); 1756 1757 if (pt != NULL) { 1758 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1759 PVO_PTEGIDX_CLR(pvo); 1760 pmap_pte_overflow++; 1761 } 1762 1763 pvo->pvo_pte.pte_lo &= ~PTE_RPGN; 1764 1765 /* 1766 * If there is a saved PTE and it's valid, restore it and return. 1767 */ 1768 if (saved_pt != NULL && (saved_pt->pte_lo & PTE_RPGN) != 0) { 1769 if (depth_p != NULL && --(*depth_p) == 0) 1770 panic("pmap_pa_unmap: restoring but depth == 0"); 1771 1772 pvo->pvo_pte = *saved_pt; 1773 1774 if (!pmap_pte_spill(pvo->pvo_vaddr)) 1775 panic("pmap_pa_unmap: could not spill pvo %p", pvo); 1776 } 1777} 1778 1779static void 1780pmap_syncicache(vm_offset_t pa, vm_size_t len) 1781{ 1782 __syncicache((void *)pa, len); 1783} 1784 1785static void 1786tlbia(void) 1787{ 1788 caddr_t i; 1789 1790 SYNC(); 1791 for (i = 0; i < (caddr_t)0x00040000; i += 0x00001000) { 1792 TLBIE(i); 1793 EIEIO(); 1794 } 1795 TLBSYNC(); 1796 SYNC(); 1797} 1798 1799static int 1800pmap_pvo_enter(pmap_t pm, uma_zone_t zone, struct pvo_head *pvo_head, 1801 vm_offset_t va, vm_offset_t pa, u_int pte_lo, int flags) 1802{ 1803 struct pvo_entry *pvo; 1804 u_int sr; 1805 int first; 1806 u_int ptegidx; 1807 int i; 1808 int bootstrap; 1809 1810 pmap_pvo_enter_calls++; 1811 first = 0; 1812 1813 bootstrap = 0; 1814 1815 /* 1816 * Compute the PTE Group index. 1817 */ 1818 va &= ~ADDR_POFF; 1819 sr = va_to_sr(pm->pm_sr, va); 1820 ptegidx = va_to_pteg(sr, va); 1821 1822 /* 1823 * Remove any existing mapping for this page. Reuse the pvo entry if 1824 * there is a mapping. 1825 */ 1826 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1827 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1828 if ((pvo->pvo_pte.pte_lo & PTE_RPGN) == pa && 1829 (pvo->pvo_pte.pte_lo & PTE_PP) == 1830 (pte_lo & PTE_PP)) { 1831 return (0); 1832 } 1833 pmap_pvo_remove(pvo, -1); 1834 break; 1835 } 1836 } 1837 1838 /* 1839 * If we aren't overwriting a mapping, try to allocate. 1840 */ 1841 if (pmap_initialized) { 1842 pvo = uma_zalloc(zone, M_NOWAIT); 1843 } else { 1844 if (pmap_bpvo_pool_index >= BPVO_POOL_SIZE) { 1845 panic("pmap_enter: bpvo pool exhausted, %d, %d, %d", 1846 pmap_bpvo_pool_index, BPVO_POOL_SIZE, 1847 BPVO_POOL_SIZE * sizeof(struct pvo_entry)); 1848 } 1849 pvo = &pmap_bpvo_pool[pmap_bpvo_pool_index]; 1850 pmap_bpvo_pool_index++; 1851 bootstrap = 1; 1852 } 1853 1854 if (pvo == NULL) { 1855 return (ENOMEM); 1856 } 1857 1858 pmap_pvo_entries++; 1859 pvo->pvo_vaddr = va; 1860 pvo->pvo_pmap = pm; 1861 LIST_INSERT_HEAD(&pmap_pvo_table[ptegidx], pvo, pvo_olink); 1862 pvo->pvo_vaddr &= ~ADDR_POFF; 1863 if (flags & VM_PROT_EXECUTE) 1864 pvo->pvo_vaddr |= PVO_EXECUTABLE; 1865 if (flags & PVO_WIRED) 1866 pvo->pvo_vaddr |= PVO_WIRED; 1867 if (pvo_head != &pmap_pvo_kunmanaged) 1868 pvo->pvo_vaddr |= PVO_MANAGED; 1869 if (bootstrap) 1870 pvo->pvo_vaddr |= PVO_BOOTSTRAP; 1871 pmap_pte_create(&pvo->pvo_pte, sr, va, pa | pte_lo); 1872 1873 /* 1874 * Remember if the list was empty and therefore will be the first 1875 * item. 1876 */ 1877 if (LIST_FIRST(pvo_head) == NULL) 1878 first = 1; 1879 1880 LIST_INSERT_HEAD(pvo_head, pvo, pvo_vlink); 1881 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1882 pm->pm_stats.wired_count++; 1883 pm->pm_stats.resident_count++; 1884 1885 /* 1886 * We hope this succeeds but it isn't required. 1887 */ 1888 i = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 1889 if (i >= 0) { 1890 PVO_PTEGIDX_SET(pvo, i); 1891 } else { 1892 panic("pmap_pvo_enter: overflow"); 1893 pmap_pte_overflow++; 1894 } 1895 1896 return (first ? ENOENT : 0); 1897} 1898 1899static void 1900pmap_pvo_remove(struct pvo_entry *pvo, int pteidx) 1901{ 1902 struct pte *pt; 1903 1904 /* 1905 * If there is an active pte entry, we need to deactivate it (and 1906 * save the ref & cfg bits). 1907 */ 1908 pt = pmap_pvo_to_pte(pvo, pteidx); 1909 if (pt != NULL) { 1910 pmap_pte_unset(pt, &pvo->pvo_pte, pvo->pvo_vaddr); 1911 PVO_PTEGIDX_CLR(pvo); 1912 } else { 1913 pmap_pte_overflow--; 1914 } 1915 1916 /* 1917 * Update our statistics. 1918 */ 1919 pvo->pvo_pmap->pm_stats.resident_count--; 1920 if (pvo->pvo_pte.pte_lo & PVO_WIRED) 1921 pvo->pvo_pmap->pm_stats.wired_count--; 1922 1923 /* 1924 * Save the REF/CHG bits into their cache if the page is managed. 1925 */ 1926 if (pvo->pvo_vaddr & PVO_MANAGED) { 1927 struct vm_page *pg; 1928 1929 pg = PHYS_TO_VM_PAGE(pvo->pvo_pte.pte_lo & PTE_RPGN); 1930 if (pg != NULL) { 1931 pmap_attr_save(pg, pvo->pvo_pte.pte_lo & 1932 (PTE_REF | PTE_CHG)); 1933 } 1934 } 1935 1936 /* 1937 * Remove this PVO from the PV list. 1938 */ 1939 LIST_REMOVE(pvo, pvo_vlink); 1940 1941 /* 1942 * Remove this from the overflow list and return it to the pool 1943 * if we aren't going to reuse it. 1944 */ 1945 LIST_REMOVE(pvo, pvo_olink); 1946 if (!(pvo->pvo_vaddr & PVO_BOOTSTRAP)) 1947 uma_zfree(pvo->pvo_vaddr & PVO_MANAGED ? pmap_mpvo_zone : 1948 pmap_upvo_zone, pvo); 1949 pmap_pvo_entries--; 1950 pmap_pvo_remove_calls++; 1951} 1952 1953static __inline int 1954pmap_pvo_pte_index(const struct pvo_entry *pvo, int ptegidx) 1955{ 1956 int pteidx; 1957 1958 /* 1959 * We can find the actual pte entry without searching by grabbing 1960 * the PTEG index from 3 unused bits in pte_lo[11:9] and by 1961 * noticing the HID bit. 1962 */ 1963 pteidx = ptegidx * 8 + PVO_PTEGIDX_GET(pvo); 1964 if (pvo->pvo_pte.pte_hi & PTE_HID) 1965 pteidx ^= pmap_pteg_mask * 8; 1966 1967 return (pteidx); 1968} 1969 1970static struct pvo_entry * 1971pmap_pvo_find_va(pmap_t pm, vm_offset_t va, int *pteidx_p) 1972{ 1973 struct pvo_entry *pvo; 1974 int ptegidx; 1975 u_int sr; 1976 1977 va &= ~ADDR_POFF; 1978 sr = va_to_sr(pm->pm_sr, va); 1979 ptegidx = va_to_pteg(sr, va); 1980 1981 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 1982 if (pvo->pvo_pmap == pm && PVO_VADDR(pvo) == va) { 1983 if (pteidx_p) 1984 *pteidx_p = pmap_pvo_pte_index(pvo, ptegidx); 1985 return (pvo); 1986 } 1987 } 1988 1989 return (NULL); 1990} 1991 1992static struct pte * 1993pmap_pvo_to_pte(const struct pvo_entry *pvo, int pteidx) 1994{ 1995 struct pte *pt; 1996 1997 /* 1998 * If we haven't been supplied the ptegidx, calculate it. 1999 */ 2000 if (pteidx == -1) { 2001 int ptegidx; 2002 u_int sr; 2003 2004 sr = va_to_sr(pvo->pvo_pmap->pm_sr, pvo->pvo_vaddr); 2005 ptegidx = va_to_pteg(sr, pvo->pvo_vaddr); 2006 pteidx = pmap_pvo_pte_index(pvo, ptegidx); 2007 } 2008 2009 pt = &pmap_pteg_table[pteidx >> 3].pt[pteidx & 7]; 2010 2011 if ((pvo->pvo_pte.pte_hi & PTE_VALID) && !PVO_PTEGIDX_ISSET(pvo)) { 2012 panic("pmap_pvo_to_pte: pvo %p has valid pte in pvo but no " 2013 "valid pte index", pvo); 2014 } 2015 2016 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0 && PVO_PTEGIDX_ISSET(pvo)) { 2017 panic("pmap_pvo_to_pte: pvo %p has valid pte index in pvo " 2018 "pvo but no valid pte", pvo); 2019 } 2020 2021 if ((pt->pte_hi ^ (pvo->pvo_pte.pte_hi & ~PTE_VALID)) == PTE_VALID) { 2022 if ((pvo->pvo_pte.pte_hi & PTE_VALID) == 0) { 2023 panic("pmap_pvo_to_pte: pvo %p has valid pte in " 2024 "pmap_pteg_table %p but invalid in pvo", pvo, pt); 2025 } 2026 2027 if (((pt->pte_lo ^ pvo->pvo_pte.pte_lo) & ~(PTE_CHG|PTE_REF)) 2028 != 0) { 2029 panic("pmap_pvo_to_pte: pvo %p pte does not match " 2030 "pte %p in pmap_pteg_table", pvo, pt); 2031 } 2032 2033 return (pt); 2034 } 2035 2036 if (pvo->pvo_pte.pte_hi & PTE_VALID) { 2037 panic("pmap_pvo_to_pte: pvo %p has invalid pte %p in " 2038 "pmap_pteg_table but valid in pvo", pvo, pt); 2039 } 2040 2041 return (NULL); 2042} 2043 2044/* 2045 * XXX: THIS STUFF SHOULD BE IN pte.c? 2046 */ 2047int 2048pmap_pte_spill(vm_offset_t addr) 2049{ 2050 struct pvo_entry *source_pvo, *victim_pvo; 2051 struct pvo_entry *pvo; 2052 int ptegidx, i, j; 2053 u_int sr; 2054 struct pteg *pteg; 2055 struct pte *pt; 2056 2057 pmap_pte_spills++; 2058 2059 sr = mfsrin(addr); 2060 ptegidx = va_to_pteg(sr, addr); 2061 2062 /* 2063 * Have to substitute some entry. Use the primary hash for this. 2064 * Use low bits of timebase as random generator. 2065 */ 2066 pteg = &pmap_pteg_table[ptegidx]; 2067 __asm __volatile("mftb %0" : "=r"(i)); 2068 i &= 7; 2069 pt = &pteg->pt[i]; 2070 2071 source_pvo = NULL; 2072 victim_pvo = NULL; 2073 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx], pvo_olink) { 2074 /* 2075 * We need to find a pvo entry for this address. 2076 */ 2077 PMAP_PVO_CHECK(pvo); 2078 if (source_pvo == NULL && 2079 pmap_pte_match(&pvo->pvo_pte, sr, addr, 2080 pvo->pvo_pte.pte_hi & PTE_HID)) { 2081 /* 2082 * Now found an entry to be spilled into the pteg. 2083 * The PTE is now valid, so we know it's active. 2084 */ 2085 j = pmap_pte_insert(ptegidx, &pvo->pvo_pte); 2086 2087 if (j >= 0) { 2088 PVO_PTEGIDX_SET(pvo, j); 2089 pmap_pte_overflow--; 2090 PMAP_PVO_CHECK(pvo); 2091 return (1); 2092 } 2093 2094 source_pvo = pvo; 2095 2096 if (victim_pvo != NULL) 2097 break; 2098 } 2099 2100 /* 2101 * We also need the pvo entry of the victim we are replacing 2102 * so save the R & C bits of the PTE. 2103 */ 2104 if ((pt->pte_hi & PTE_HID) == 0 && victim_pvo == NULL && 2105 pmap_pte_compare(pt, &pvo->pvo_pte)) { 2106 victim_pvo = pvo; 2107 if (source_pvo != NULL) 2108 break; 2109 } 2110 } 2111 2112 if (source_pvo == NULL) 2113 return (0); 2114 2115 if (victim_pvo == NULL) { 2116 if ((pt->pte_hi & PTE_HID) == 0) 2117 panic("pmap_pte_spill: victim p-pte (%p) has no pvo" 2118 "entry", pt); 2119 2120 /* 2121 * If this is a secondary PTE, we need to search it's primary 2122 * pvo bucket for the matching PVO. 2123 */ 2124 LIST_FOREACH(pvo, &pmap_pvo_table[ptegidx ^ pmap_pteg_mask], 2125 pvo_olink) { 2126 PMAP_PVO_CHECK(pvo); 2127 /* 2128 * We also need the pvo entry of the victim we are 2129 * replacing so save the R & C bits of the PTE. 2130 */ 2131 if (pmap_pte_compare(pt, &pvo->pvo_pte)) { 2132 victim_pvo = pvo; 2133 break; 2134 } 2135 } 2136 2137 if (victim_pvo == NULL) 2138 panic("pmap_pte_spill: victim s-pte (%p) has no pvo" 2139 "entry", pt); 2140 } 2141 2142 /* 2143 * We are invalidating the TLB entry for the EA we are replacing even 2144 * though it's valid. If we don't, we lose any ref/chg bit changes 2145 * contained in the TLB entry. 2146 */ 2147 source_pvo->pvo_pte.pte_hi &= ~PTE_HID; 2148 2149 pmap_pte_unset(pt, &victim_pvo->pvo_pte, victim_pvo->pvo_vaddr); 2150 pmap_pte_set(pt, &source_pvo->pvo_pte); 2151 2152 PVO_PTEGIDX_CLR(victim_pvo); 2153 PVO_PTEGIDX_SET(source_pvo, i); 2154 pmap_pte_replacements++; 2155 2156 PMAP_PVO_CHECK(victim_pvo); 2157 PMAP_PVO_CHECK(source_pvo); 2158 2159 return (1); 2160} 2161 2162static int 2163pmap_pte_insert(u_int ptegidx, struct pte *pvo_pt) 2164{ 2165 struct pte *pt; 2166 int i; 2167 2168 /* 2169 * First try primary hash. 2170 */ 2171 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2172 if ((pt->pte_hi & PTE_VALID) == 0) { 2173 pvo_pt->pte_hi &= ~PTE_HID; 2174 pmap_pte_set(pt, pvo_pt); 2175 return (i); 2176 } 2177 } 2178 2179 /* 2180 * Now try secondary hash. 2181 */ 2182 ptegidx ^= pmap_pteg_mask; 2183 ptegidx++; 2184 for (pt = pmap_pteg_table[ptegidx].pt, i = 0; i < 8; i++, pt++) { 2185 if ((pt->pte_hi & PTE_VALID) == 0) { 2186 pvo_pt->pte_hi |= PTE_HID; 2187 pmap_pte_set(pt, pvo_pt); 2188 return (i); 2189 } 2190 } 2191 2192 panic("pmap_pte_insert: overflow"); 2193 return (-1); 2194} 2195 2196static boolean_t 2197pmap_query_bit(vm_page_t m, int ptebit) 2198{ 2199 struct pvo_entry *pvo; 2200 struct pte *pt; 2201 2202#if 0 2203 if (pmap_attr_fetch(m) & ptebit) 2204 return (TRUE); 2205#endif 2206 2207 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2208 PMAP_PVO_CHECK(pvo); /* sanity check */ 2209 2210 /* 2211 * See if we saved the bit off. If so, cache it and return 2212 * success. 2213 */ 2214 if (pvo->pvo_pte.pte_lo & ptebit) { 2215 pmap_attr_save(m, ptebit); 2216 PMAP_PVO_CHECK(pvo); /* sanity check */ 2217 return (TRUE); 2218 } 2219 } 2220 2221 /* 2222 * No luck, now go through the hard part of looking at the PTEs 2223 * themselves. Sync so that any pending REF/CHG bits are flushed to 2224 * the PTEs. 2225 */ 2226 SYNC(); 2227 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2228 PMAP_PVO_CHECK(pvo); /* sanity check */ 2229 2230 /* 2231 * See if this pvo has a valid PTE. if so, fetch the 2232 * REF/CHG bits from the valid PTE. If the appropriate 2233 * ptebit is set, cache it and return success. 2234 */ 2235 pt = pmap_pvo_to_pte(pvo, -1); 2236 if (pt != NULL) { 2237 pmap_pte_synch(pt, &pvo->pvo_pte); 2238 if (pvo->pvo_pte.pte_lo & ptebit) { 2239 pmap_attr_save(m, ptebit); 2240 PMAP_PVO_CHECK(pvo); /* sanity check */ 2241 return (TRUE); 2242 } 2243 } 2244 } 2245 2246 return (FALSE); 2247} 2248 2249static u_int 2250pmap_clear_bit(vm_page_t m, int ptebit, int *origbit) 2251{ 2252 u_int count; 2253 struct pvo_entry *pvo; 2254 struct pte *pt; 2255 int rv; 2256 2257 /* 2258 * Clear the cached value. 2259 */ 2260 rv = pmap_attr_fetch(m); 2261 pmap_attr_clear(m, ptebit); 2262 2263 /* 2264 * Sync so that any pending REF/CHG bits are flushed to the PTEs (so 2265 * we can reset the right ones). note that since the pvo entries and 2266 * list heads are accessed via BAT0 and are never placed in the page 2267 * table, we don't have to worry about further accesses setting the 2268 * REF/CHG bits. 2269 */ 2270 SYNC(); 2271 2272 /* 2273 * For each pvo entry, clear the pvo's ptebit. If this pvo has a 2274 * valid pte clear the ptebit from the valid pte. 2275 */ 2276 count = 0; 2277 LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { 2278 PMAP_PVO_CHECK(pvo); /* sanity check */ 2279 pt = pmap_pvo_to_pte(pvo, -1); 2280 if (pt != NULL) { 2281 pmap_pte_synch(pt, &pvo->pvo_pte); 2282 if (pvo->pvo_pte.pte_lo & ptebit) { 2283 count++; 2284 pmap_pte_clear(pt, PVO_VADDR(pvo), ptebit); 2285 } 2286 } 2287 rv |= pvo->pvo_pte.pte_lo; 2288 pvo->pvo_pte.pte_lo &= ~ptebit; 2289 PMAP_PVO_CHECK(pvo); /* sanity check */ 2290 } 2291 2292 if (origbit != NULL) { 2293 *origbit = rv; 2294 } 2295 2296 return (count); 2297} 2298 2299/* 2300 * Return true if the physical range is encompassed by the battable[idx] 2301 */ 2302static int 2303pmap_bat_mapped(int idx, vm_offset_t pa, vm_size_t size) 2304{ 2305 u_int prot; 2306 u_int32_t start; 2307 u_int32_t end; 2308 u_int32_t bat_ble; 2309 2310 /* 2311 * Return immediately if not a valid mapping 2312 */ 2313 if (!battable[idx].batu & BAT_Vs) 2314 return (EINVAL); 2315 2316 /* 2317 * The BAT entry must be cache-inhibited, guarded, and r/w 2318 * so it can function as an i/o page 2319 */ 2320 prot = battable[idx].batl & (BAT_I|BAT_G|BAT_PP_RW); 2321 if (prot != (BAT_I|BAT_G|BAT_PP_RW)) 2322 return (EPERM); 2323 2324 /* 2325 * The address should be within the BAT range. Assume that the 2326 * start address in the BAT has the correct alignment (thus 2327 * not requiring masking) 2328 */ 2329 start = battable[idx].batl & BAT_PBS; 2330 bat_ble = (battable[idx].batu & ~(BAT_EBS)) | 0x03; 2331 end = start | (bat_ble << 15) | 0x7fff; 2332 2333 if ((pa < start) || ((pa + size) > end)) 2334 return (ERANGE); 2335 2336 return (0); 2337} 2338 2339int 2340pmap_dev_direct_mapped(vm_offset_t pa, vm_size_t size) 2341{ 2342 int i; 2343 2344 /* 2345 * This currently does not work for entries that 2346 * overlap 256M BAT segments. 2347 */ 2348 2349 for(i = 0; i < 16; i++) 2350 if (pmap_bat_mapped(i, pa, size) == 0) 2351 return (0); 2352 2353 return (EFAULT); 2354} 2355 2356/* 2357 * Map a set of physical memory pages into the kernel virtual 2358 * address space. Return a pointer to where it is mapped. This 2359 * routine is intended to be used for mapping device memory, 2360 * NOT real memory. 2361 */ 2362void * 2363pmap_mapdev(vm_offset_t pa, vm_size_t size) 2364{ 2365 vm_offset_t va, tmpva, ppa, offset; 2366 int i; 2367 2368 ppa = trunc_page(pa); 2369 offset = pa & PAGE_MASK; 2370 size = roundup(offset + size, PAGE_SIZE); 2371 2372 GIANT_REQUIRED; 2373 2374 /* 2375 * If the physical address lies within a valid BAT table entry, 2376 * return the 1:1 mapping. This currently doesn't work 2377 * for regions that overlap 256M BAT segments. 2378 */ 2379 for (i = 0; i < 16; i++) { 2380 if (pmap_bat_mapped(i, pa, size) == 0) 2381 return ((void *) pa); 2382 } 2383 2384 va = kmem_alloc_nofault(kernel_map, size); 2385 if (!va) 2386 panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); 2387 2388 for (tmpva = va; size > 0;) { 2389 pmap_kenter(tmpva, ppa); 2390 TLBIE(tmpva); /* XXX or should it be invalidate-all ? */ 2391 size -= PAGE_SIZE; 2392 tmpva += PAGE_SIZE; 2393 ppa += PAGE_SIZE; 2394 } 2395 2396 return ((void *)(va + offset)); 2397} 2398 2399void 2400pmap_unmapdev(vm_offset_t va, vm_size_t size) 2401{ 2402 vm_offset_t base, offset; 2403 2404 /* 2405 * If this is outside kernel virtual space, then it's a 2406 * battable entry and doesn't require unmapping 2407 */ 2408 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2409 base = trunc_page(va); 2410 offset = va & PAGE_MASK; 2411 size = roundup(offset + size, PAGE_SIZE); 2412 kmem_free(kernel_map, base, size); 2413 } 2414} 2415