192 193static tlbtid_t tid_alloc(struct pmap *); 194 195static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 196 197static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 198static void tlb1_write_entry(unsigned int); 199static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 200static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 201 202static vm_size_t tsize2size(unsigned int); 203static unsigned int size2tsize(vm_size_t); 204static unsigned int ilog2(unsigned int); 205 206static void set_mas4_defaults(void); 207 208static inline void tlb0_flush_entry(vm_offset_t); 209static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 210 211/**************************************************************************/ 212/* Page table management */ 213/**************************************************************************/ 214 215static struct rwlock_padalign pvh_global_lock; 216 217/* Data for the pv entry allocation mechanism */ 218static uma_zone_t pvzone; 219static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 220 221#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 222 223#ifndef PMAP_SHPGPERPROC 224#define PMAP_SHPGPERPROC 200 225#endif 226 227static void ptbl_init(void); 228static struct ptbl_buf *ptbl_buf_alloc(void); 229static void ptbl_buf_free(struct ptbl_buf *); 230static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 231 232static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 233static void ptbl_free(mmu_t, pmap_t, unsigned int); 234static void ptbl_hold(mmu_t, pmap_t, unsigned int); 235static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 236 237static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 238static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 239static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 240static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 241 242static pv_entry_t pv_alloc(void); 243static void pv_free(pv_entry_t); 244static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 245static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 246 247/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 248#define PTBL_BUFS (128 * 16) 249 250struct ptbl_buf { 251 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 252 vm_offset_t kva; /* va of mapping */ 253}; 254 255/* ptbl free list and a lock used for access synchronization. */ 256static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 257static struct mtx ptbl_buf_freelist_lock; 258 259/* Base address of kva space allocated fot ptbl bufs. */ 260static vm_offset_t ptbl_buf_pool_vabase; 261 262/* Pointer to ptbl_buf structures. */ 263static struct ptbl_buf *ptbl_bufs; 264 265void pmap_bootstrap_ap(volatile uint32_t *); 266 267/* 268 * Kernel MMU interface 269 */ 270static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 271static void mmu_booke_clear_modify(mmu_t, vm_page_t); 272static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 273 vm_size_t, vm_offset_t); 274static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 275static void mmu_booke_copy_pages(mmu_t, vm_page_t *, 276 vm_offset_t, vm_page_t *, vm_offset_t, int); 277static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 278 vm_prot_t, boolean_t); 279static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 280 vm_page_t, vm_prot_t); 281static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 282 vm_prot_t); 283static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 284static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 285 vm_prot_t); 286static void mmu_booke_init(mmu_t); 287static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 288static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 289static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 290static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 291static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 292 int); 293static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 294 vm_paddr_t *); 295static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 296 vm_object_t, vm_pindex_t, vm_size_t); 297static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 298static void mmu_booke_page_init(mmu_t, vm_page_t); 299static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 300static void mmu_booke_pinit(mmu_t, pmap_t); 301static void mmu_booke_pinit0(mmu_t, pmap_t); 302static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 303 vm_prot_t); 304static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 305static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 306static void mmu_booke_release(mmu_t, pmap_t); 307static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 308static void mmu_booke_remove_all(mmu_t, vm_page_t); 309static void mmu_booke_remove_write(mmu_t, vm_page_t); 310static void mmu_booke_zero_page(mmu_t, vm_page_t); 311static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 312static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 313static void mmu_booke_activate(mmu_t, struct thread *); 314static void mmu_booke_deactivate(mmu_t, struct thread *); 315static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 316static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 317static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 318static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 319static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 320static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 321static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 322static void mmu_booke_kremove(mmu_t, vm_offset_t); 323static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 324static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 325 vm_size_t); 326static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 327 vm_size_t, vm_size_t *); 328static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 329 vm_size_t, vm_offset_t); 330static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 331 332static mmu_method_t mmu_booke_methods[] = { 333 /* pmap dispatcher interface */ 334 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 335 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 336 MMUMETHOD(mmu_copy, mmu_booke_copy), 337 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 338 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 339 MMUMETHOD(mmu_enter, mmu_booke_enter), 340 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 341 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 342 MMUMETHOD(mmu_extract, mmu_booke_extract), 343 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 344 MMUMETHOD(mmu_init, mmu_booke_init), 345 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 346 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 347 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 348 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 349 MMUMETHOD(mmu_map, mmu_booke_map), 350 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 351 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 352 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 353 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 354 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 355 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 356 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 357 MMUMETHOD(mmu_protect, mmu_booke_protect), 358 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 359 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 360 MMUMETHOD(mmu_release, mmu_booke_release), 361 MMUMETHOD(mmu_remove, mmu_booke_remove), 362 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 363 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 364 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 365 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 366 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 367 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 368 MMUMETHOD(mmu_activate, mmu_booke_activate), 369 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 370 371 /* Internal interfaces */ 372 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 373 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 374 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 375 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 376 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 377 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 378 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 379/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 380 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 381 382 /* dumpsys() support */ 383 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 384 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 385 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 386 387 { 0, 0 } 388}; 389 390MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 391 392static __inline uint32_t 393tlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 394{ 395 uint32_t attrib; 396 int i; 397 398 if (ma != VM_MEMATTR_DEFAULT) { 399 switch (ma) { 400 case VM_MEMATTR_UNCACHEABLE: 401 return (PTE_I | PTE_G); 402 case VM_MEMATTR_WRITE_COMBINING: 403 case VM_MEMATTR_WRITE_BACK: 404 case VM_MEMATTR_PREFETCHABLE: 405 return (PTE_I); 406 case VM_MEMATTR_WRITE_THROUGH: 407 return (PTE_W | PTE_M); 408 } 409 } 410 411 /* 412 * Assume the page is cache inhibited and access is guarded unless 413 * it's in our available memory array. 414 */ 415 attrib = _TLB_ENTRY_IO; 416 for (i = 0; i < physmem_regions_sz; i++) { 417 if ((pa >= physmem_regions[i].mr_start) && 418 (pa < (physmem_regions[i].mr_start + 419 physmem_regions[i].mr_size))) { 420 attrib = _TLB_ENTRY_MEM; 421 break; 422 } 423 } 424 425 return (attrib); 426} 427 428static inline void 429tlb_miss_lock(void) 430{ 431#ifdef SMP 432 struct pcpu *pc; 433 434 if (!smp_started) 435 return; 436 437 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 438 if (pc != pcpup) { 439 440 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 441 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 442 443 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 444 ("tlb_miss_lock: tried to lock self")); 445 446 tlb_lock(pc->pc_booke_tlb_lock); 447 448 CTR1(KTR_PMAP, "%s: locked", __func__); 449 } 450 } 451#endif 452} 453 454static inline void 455tlb_miss_unlock(void) 456{ 457#ifdef SMP 458 struct pcpu *pc; 459 460 if (!smp_started) 461 return; 462 463 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 464 if (pc != pcpup) { 465 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 466 __func__, pc->pc_cpuid); 467 468 tlb_unlock(pc->pc_booke_tlb_lock); 469 470 CTR1(KTR_PMAP, "%s: unlocked", __func__); 471 } 472 } 473#endif 474} 475 476/* Return number of entries in TLB0. */ 477static __inline void 478tlb0_get_tlbconf(void) 479{ 480 uint32_t tlb0_cfg; 481 482 tlb0_cfg = mfspr(SPR_TLB0CFG); 483 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 484 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 485 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 486} 487 488/* Initialize pool of kva ptbl buffers. */ 489static void 490ptbl_init(void) 491{ 492 int i; 493 494 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 495 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 496 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 497 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 498 499 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 500 TAILQ_INIT(&ptbl_buf_freelist); 501 502 for (i = 0; i < PTBL_BUFS; i++) { 503 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 504 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 505 } 506} 507 508/* Get a ptbl_buf from the freelist. */ 509static struct ptbl_buf * 510ptbl_buf_alloc(void) 511{ 512 struct ptbl_buf *buf; 513 514 mtx_lock(&ptbl_buf_freelist_lock); 515 buf = TAILQ_FIRST(&ptbl_buf_freelist); 516 if (buf != NULL) 517 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 518 mtx_unlock(&ptbl_buf_freelist_lock); 519 520 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 521 522 return (buf); 523} 524 525/* Return ptbl buff to free pool. */ 526static void 527ptbl_buf_free(struct ptbl_buf *buf) 528{ 529 530 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 531 532 mtx_lock(&ptbl_buf_freelist_lock); 533 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 534 mtx_unlock(&ptbl_buf_freelist_lock); 535} 536 537/* 538 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 539 */ 540static void 541ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 542{ 543 struct ptbl_buf *pbuf; 544 545 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 546 547 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 548 549 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 550 if (pbuf->kva == (vm_offset_t)ptbl) { 551 /* Remove from pmap ptbl buf list. */ 552 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 553 554 /* Free corresponding ptbl buf. */ 555 ptbl_buf_free(pbuf); 556 break; 557 } 558} 559 560/* Allocate page table. */ 561static pte_t * 562ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 563{ 564 vm_page_t mtbl[PTBL_PAGES]; 565 vm_page_t m; 566 struct ptbl_buf *pbuf; 567 unsigned int pidx; 568 pte_t *ptbl; 569 int i; 570 571 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 572 (pmap == kernel_pmap), pdir_idx); 573 574 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 575 ("ptbl_alloc: invalid pdir_idx")); 576 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 577 ("pte_alloc: valid ptbl entry exists!")); 578 579 pbuf = ptbl_buf_alloc(); 580 if (pbuf == NULL) 581 panic("pte_alloc: couldn't alloc kernel virtual memory"); 582 583 ptbl = (pte_t *)pbuf->kva; 584 585 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 586 587 /* Allocate ptbl pages, this will sleep! */ 588 for (i = 0; i < PTBL_PAGES; i++) { 589 pidx = (PTBL_PAGES * pdir_idx) + i; 590 while ((m = vm_page_alloc(NULL, pidx, 591 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 592 593 PMAP_UNLOCK(pmap); 594 rw_wunlock(&pvh_global_lock); 595 VM_WAIT; 596 rw_wlock(&pvh_global_lock); 597 PMAP_LOCK(pmap); 598 } 599 mtbl[i] = m; 600 } 601 602 /* Map allocated pages into kernel_pmap. */ 603 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 604 605 /* Zero whole ptbl. */ 606 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 607 608 /* Add pbuf to the pmap ptbl bufs list. */ 609 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 610 611 return (ptbl); 612} 613 614/* Free ptbl pages and invalidate pdir entry. */ 615static void 616ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 617{ 618 pte_t *ptbl; 619 vm_paddr_t pa; 620 vm_offset_t va; 621 vm_page_t m; 622 int i; 623 624 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 625 (pmap == kernel_pmap), pdir_idx); 626 627 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 628 ("ptbl_free: invalid pdir_idx")); 629 630 ptbl = pmap->pm_pdir[pdir_idx]; 631 632 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 633 634 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 635 636 /* 637 * Invalidate the pdir entry as soon as possible, so that other CPUs 638 * don't attempt to look up the page tables we are releasing. 639 */ 640 mtx_lock_spin(&tlbivax_mutex); 641 tlb_miss_lock(); 642 643 pmap->pm_pdir[pdir_idx] = NULL; 644 645 tlb_miss_unlock(); 646 mtx_unlock_spin(&tlbivax_mutex); 647 648 for (i = 0; i < PTBL_PAGES; i++) { 649 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 650 pa = pte_vatopa(mmu, kernel_pmap, va); 651 m = PHYS_TO_VM_PAGE(pa); 652 vm_page_free_zero(m); 653 atomic_subtract_int(&cnt.v_wire_count, 1); 654 mmu_booke_kremove(mmu, va); 655 } 656 657 ptbl_free_pmap_ptbl(pmap, ptbl); 658} 659 660/* 661 * Decrement ptbl pages hold count and attempt to free ptbl pages. 662 * Called when removing pte entry from ptbl. 663 * 664 * Return 1 if ptbl pages were freed. 665 */ 666static int 667ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 668{ 669 pte_t *ptbl; 670 vm_paddr_t pa; 671 vm_page_t m; 672 int i; 673 674 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 675 (pmap == kernel_pmap), pdir_idx); 676 677 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 678 ("ptbl_unhold: invalid pdir_idx")); 679 KASSERT((pmap != kernel_pmap), 680 ("ptbl_unhold: unholding kernel ptbl!")); 681 682 ptbl = pmap->pm_pdir[pdir_idx]; 683 684 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 685 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 686 ("ptbl_unhold: non kva ptbl")); 687 688 /* decrement hold count */ 689 for (i = 0; i < PTBL_PAGES; i++) { 690 pa = pte_vatopa(mmu, kernel_pmap, 691 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 692 m = PHYS_TO_VM_PAGE(pa); 693 m->wire_count--; 694 } 695 696 /* 697 * Free ptbl pages if there are no pte etries in this ptbl. 698 * wire_count has the same value for all ptbl pages, so check the last 699 * page. 700 */ 701 if (m->wire_count == 0) { 702 ptbl_free(mmu, pmap, pdir_idx); 703 704 //debugf("ptbl_unhold: e (freed ptbl)\n"); 705 return (1); 706 } 707 708 return (0); 709} 710 711/* 712 * Increment hold count for ptbl pages. This routine is used when a new pte 713 * entry is being inserted into the ptbl. 714 */ 715static void 716ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 717{ 718 vm_paddr_t pa; 719 pte_t *ptbl; 720 vm_page_t m; 721 int i; 722 723 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 724 pdir_idx); 725 726 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 727 ("ptbl_hold: invalid pdir_idx")); 728 KASSERT((pmap != kernel_pmap), 729 ("ptbl_hold: holding kernel ptbl!")); 730 731 ptbl = pmap->pm_pdir[pdir_idx]; 732 733 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 734 735 for (i = 0; i < PTBL_PAGES; i++) { 736 pa = pte_vatopa(mmu, kernel_pmap, 737 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 738 m = PHYS_TO_VM_PAGE(pa); 739 m->wire_count++; 740 } 741} 742 743/* Allocate pv_entry structure. */ 744pv_entry_t 745pv_alloc(void) 746{ 747 pv_entry_t pv; 748 749 pv_entry_count++; 750 if (pv_entry_count > pv_entry_high_water) 751 pagedaemon_wakeup(); 752 pv = uma_zalloc(pvzone, M_NOWAIT); 753 754 return (pv); 755} 756 757/* Free pv_entry structure. */ 758static __inline void 759pv_free(pv_entry_t pve) 760{ 761 762 pv_entry_count--; 763 uma_zfree(pvzone, pve); 764} 765 766 767/* Allocate and initialize pv_entry structure. */ 768static void 769pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 770{ 771 pv_entry_t pve; 772 773 //int su = (pmap == kernel_pmap); 774 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 775 // (u_int32_t)pmap, va, (u_int32_t)m); 776 777 pve = pv_alloc(); 778 if (pve == NULL) 779 panic("pv_insert: no pv entries!"); 780 781 pve->pv_pmap = pmap; 782 pve->pv_va = va; 783 784 /* add to pv_list */ 785 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 786 rw_assert(&pvh_global_lock, RA_WLOCKED); 787 788 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 789 790 //debugf("pv_insert: e\n"); 791} 792 793/* Destroy pv entry. */ 794static void 795pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 796{ 797 pv_entry_t pve; 798 799 //int su = (pmap == kernel_pmap); 800 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 801 802 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 803 rw_assert(&pvh_global_lock, RA_WLOCKED); 804 805 /* find pv entry */ 806 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 807 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 808 /* remove from pv_list */ 809 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 810 if (TAILQ_EMPTY(&m->md.pv_list)) 811 vm_page_aflag_clear(m, PGA_WRITEABLE); 812 813 /* free pv entry struct */ 814 pv_free(pve); 815 break; 816 } 817 } 818 819 //debugf("pv_remove: e\n"); 820} 821 822/* 823 * Clean pte entry, try to free page table page if requested. 824 * 825 * Return 1 if ptbl pages were freed, otherwise return 0. 826 */ 827static int 828pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 829{ 830 unsigned int pdir_idx = PDIR_IDX(va); 831 unsigned int ptbl_idx = PTBL_IDX(va); 832 vm_page_t m; 833 pte_t *ptbl; 834 pte_t *pte; 835 836 //int su = (pmap == kernel_pmap); 837 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 838 // su, (u_int32_t)pmap, va, flags); 839 840 ptbl = pmap->pm_pdir[pdir_idx]; 841 KASSERT(ptbl, ("pte_remove: null ptbl")); 842 843 pte = &ptbl[ptbl_idx]; 844 845 if (pte == NULL || !PTE_ISVALID(pte)) 846 return (0); 847 848 if (PTE_ISWIRED(pte)) 849 pmap->pm_stats.wired_count--; 850 851 /* Handle managed entry. */ 852 if (PTE_ISMANAGED(pte)) { 853 /* Get vm_page_t for mapped pte. */ 854 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 855 856 if (PTE_ISMODIFIED(pte)) 857 vm_page_dirty(m); 858 859 if (PTE_ISREFERENCED(pte)) 860 vm_page_aflag_set(m, PGA_REFERENCED); 861 862 pv_remove(pmap, va, m); 863 } 864 865 mtx_lock_spin(&tlbivax_mutex); 866 tlb_miss_lock(); 867 868 tlb0_flush_entry(va); 869 pte->flags = 0; 870 pte->rpn = 0; 871 872 tlb_miss_unlock(); 873 mtx_unlock_spin(&tlbivax_mutex); 874 875 pmap->pm_stats.resident_count--; 876 877 if (flags & PTBL_UNHOLD) { 878 //debugf("pte_remove: e (unhold)\n"); 879 return (ptbl_unhold(mmu, pmap, pdir_idx)); 880 } 881 882 //debugf("pte_remove: e\n"); 883 return (0); 884} 885 886/* 887 * Insert PTE for a given page and virtual address. 888 */ 889static void 890pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 891{ 892 unsigned int pdir_idx = PDIR_IDX(va); 893 unsigned int ptbl_idx = PTBL_IDX(va); 894 pte_t *ptbl, *pte; 895 896 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 897 pmap == kernel_pmap, pmap, va); 898 899 /* Get the page table pointer. */ 900 ptbl = pmap->pm_pdir[pdir_idx]; 901 902 if (ptbl == NULL) { 903 /* Allocate page table pages. */ 904 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 905 } else { 906 /* 907 * Check if there is valid mapping for requested 908 * va, if there is, remove it. 909 */ 910 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 911 if (PTE_ISVALID(pte)) { 912 pte_remove(mmu, pmap, va, PTBL_HOLD); 913 } else { 914 /* 915 * pte is not used, increment hold count 916 * for ptbl pages. 917 */ 918 if (pmap != kernel_pmap) 919 ptbl_hold(mmu, pmap, pdir_idx); 920 } 921 } 922 923 /* 924 * Insert pv_entry into pv_list for mapped page if part of managed 925 * memory. 926 */ 927 if ((m->oflags & VPO_UNMANAGED) == 0) { 928 flags |= PTE_MANAGED; 929 930 /* Create and insert pv entry. */ 931 pv_insert(pmap, va, m); 932 } 933 934 pmap->pm_stats.resident_count++; 935 936 mtx_lock_spin(&tlbivax_mutex); 937 tlb_miss_lock(); 938 939 tlb0_flush_entry(va); 940 if (pmap->pm_pdir[pdir_idx] == NULL) { 941 /* 942 * If we just allocated a new page table, hook it in 943 * the pdir. 944 */ 945 pmap->pm_pdir[pdir_idx] = ptbl; 946 } 947 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 948 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 949 pte->flags |= (PTE_VALID | flags); 950 951 tlb_miss_unlock(); 952 mtx_unlock_spin(&tlbivax_mutex); 953} 954 955/* Return the pa for the given pmap/va. */ 956static vm_paddr_t 957pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 958{ 959 vm_paddr_t pa = 0; 960 pte_t *pte; 961 962 pte = pte_find(mmu, pmap, va); 963 if ((pte != NULL) && PTE_ISVALID(pte)) 964 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 965 return (pa); 966} 967 968/* Get a pointer to a PTE in a page table. */ 969static pte_t * 970pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 971{ 972 unsigned int pdir_idx = PDIR_IDX(va); 973 unsigned int ptbl_idx = PTBL_IDX(va); 974 975 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 976 977 if (pmap->pm_pdir[pdir_idx]) 978 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 979 980 return (NULL); 981} 982 983/**************************************************************************/ 984/* PMAP related */ 985/**************************************************************************/ 986 987/* 988 * This is called during booke_init, before the system is really initialized. 989 */ 990static void 991mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 992{ 993 vm_offset_t phys_kernelend; 994 struct mem_region *mp, *mp1; 995 int cnt, i, j; 996 u_int s, e, sz; 997 u_int phys_avail_count; 998 vm_size_t physsz, hwphyssz, kstack0_sz; 999 vm_offset_t kernel_pdir, kstack0, va; 1000 vm_paddr_t kstack0_phys; 1001 void *dpcpu; 1002 pte_t *pte; 1003 1004 debugf("mmu_booke_bootstrap: entered\n"); 1005 1006 /* Initialize invalidation mutex */ 1007 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 1008 1009 /* Read TLB0 size and associativity. */ 1010 tlb0_get_tlbconf(); 1011 1012 /* 1013 * Align kernel start and end address (kernel image). 1014 * Note that kernel end does not necessarily relate to kernsize. 1015 * kernsize is the size of the kernel that is actually mapped. 1016 * Also note that "start - 1" is deliberate. With SMP, the 1017 * entry point is exactly a page from the actual load address. 1018 * As such, trunc_page() has no effect and we're off by a page. 1019 * Since we always have the ELF header between the load address 1020 * and the entry point, we can safely subtract 1 to compensate. 1021 */ 1022 kernstart = trunc_page(start - 1); 1023 data_start = round_page(kernelend); 1024 data_end = data_start; 1025 1026 /* 1027 * Addresses of preloaded modules (like file systems) use 1028 * physical addresses. Make sure we relocate those into 1029 * virtual addresses. 1030 */ 1031 preload_addr_relocate = kernstart - kernload; 1032 1033 /* Allocate the dynamic per-cpu area. */ 1034 dpcpu = (void *)data_end; 1035 data_end += DPCPU_SIZE; 1036 1037 /* Allocate space for the message buffer. */ 1038 msgbufp = (struct msgbuf *)data_end; 1039 data_end += msgbufsize; 1040 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1041 data_end); 1042 1043 data_end = round_page(data_end); 1044 1045 /* Allocate space for ptbl_bufs. */ 1046 ptbl_bufs = (struct ptbl_buf *)data_end; 1047 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1048 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1049 data_end); 1050 1051 data_end = round_page(data_end); 1052 1053 /* Allocate PTE tables for kernel KVA. */ 1054 kernel_pdir = data_end; 1055 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1056 PDIR_SIZE - 1) / PDIR_SIZE; 1057 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1058 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1059 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1060 1061 debugf(" data_end: 0x%08x\n", data_end); 1062 if (data_end - kernstart > kernsize) { 1063 kernsize += tlb1_mapin_region(kernstart + kernsize, 1064 kernload + kernsize, (data_end - kernstart) - kernsize); 1065 } 1066 data_end = kernstart + kernsize; 1067 debugf(" updated data_end: 0x%08x\n", data_end); 1068 1069 /* 1070 * Clear the structures - note we can only do it safely after the 1071 * possible additional TLB1 translations are in place (above) so that 1072 * all range up to the currently calculated 'data_end' is covered. 1073 */ 1074 dpcpu_init(dpcpu, 0); 1075 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1076 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1077 1078 /*******************************************************/ 1079 /* Set the start and end of kva. */ 1080 /*******************************************************/ 1081 virtual_avail = round_page(data_end); 1082 virtual_end = VM_MAX_KERNEL_ADDRESS; 1083 1084 /* Allocate KVA space for page zero/copy operations. */ 1085 zero_page_va = virtual_avail; 1086 virtual_avail += PAGE_SIZE; 1087 zero_page_idle_va = virtual_avail; 1088 virtual_avail += PAGE_SIZE; 1089 copy_page_src_va = virtual_avail; 1090 virtual_avail += PAGE_SIZE; 1091 copy_page_dst_va = virtual_avail; 1092 virtual_avail += PAGE_SIZE; 1093 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1094 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1095 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1096 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1097 1098 /* Initialize page zero/copy mutexes. */ 1099 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1100 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1101 1102 /* Allocate KVA space for ptbl bufs. */ 1103 ptbl_buf_pool_vabase = virtual_avail; 1104 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1105 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1106 ptbl_buf_pool_vabase, virtual_avail); 1107 1108 /* Calculate corresponding physical addresses for the kernel region. */ 1109 phys_kernelend = kernload + kernsize; 1110 debugf("kernel image and allocated data:\n"); 1111 debugf(" kernload = 0x%08x\n", kernload); 1112 debugf(" kernstart = 0x%08x\n", kernstart); 1113 debugf(" kernsize = 0x%08x\n", kernsize); 1114 1115 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1116 panic("mmu_booke_bootstrap: phys_avail too small"); 1117 1118 /* 1119 * Remove kernel physical address range from avail regions list. Page 1120 * align all regions. Non-page aligned memory isn't very interesting 1121 * to us. Also, sort the entries for ascending addresses. 1122 */ 1123 1124 /* Retrieve phys/avail mem regions */ 1125 mem_regions(&physmem_regions, &physmem_regions_sz, 1126 &availmem_regions, &availmem_regions_sz); 1127 sz = 0; 1128 cnt = availmem_regions_sz; 1129 debugf("processing avail regions:\n"); 1130 for (mp = availmem_regions; mp->mr_size; mp++) { 1131 s = mp->mr_start; 1132 e = mp->mr_start + mp->mr_size; 1133 debugf(" %08x-%08x -> ", s, e); 1134 /* Check whether this region holds all of the kernel. */ 1135 if (s < kernload && e > phys_kernelend) { 1136 availmem_regions[cnt].mr_start = phys_kernelend; 1137 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1138 e = kernload; 1139 } 1140 /* Look whether this regions starts within the kernel. */ 1141 if (s >= kernload && s < phys_kernelend) { 1142 if (e <= phys_kernelend) 1143 goto empty; 1144 s = phys_kernelend; 1145 } 1146 /* Now look whether this region ends within the kernel. */ 1147 if (e > kernload && e <= phys_kernelend) { 1148 if (s >= kernload) 1149 goto empty; 1150 e = kernload; 1151 } 1152 /* Now page align the start and size of the region. */ 1153 s = round_page(s); 1154 e = trunc_page(e); 1155 if (e < s) 1156 e = s; 1157 sz = e - s; 1158 debugf("%08x-%08x = %x\n", s, e, sz); 1159 1160 /* Check whether some memory is left here. */ 1161 if (sz == 0) { 1162 empty: 1163 memmove(mp, mp + 1, 1164 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1165 cnt--; 1166 mp--; 1167 continue; 1168 } 1169 1170 /* Do an insertion sort. */ 1171 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1172 if (s < mp1->mr_start) 1173 break; 1174 if (mp1 < mp) { 1175 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1176 mp1->mr_start = s; 1177 mp1->mr_size = sz; 1178 } else { 1179 mp->mr_start = s; 1180 mp->mr_size = sz; 1181 } 1182 } 1183 availmem_regions_sz = cnt; 1184 1185 /*******************************************************/ 1186 /* Steal physical memory for kernel stack from the end */ 1187 /* of the first avail region */ 1188 /*******************************************************/ 1189 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1190 kstack0_phys = availmem_regions[0].mr_start + 1191 availmem_regions[0].mr_size; 1192 kstack0_phys -= kstack0_sz; 1193 availmem_regions[0].mr_size -= kstack0_sz; 1194 1195 /*******************************************************/ 1196 /* Fill in phys_avail table, based on availmem_regions */ 1197 /*******************************************************/ 1198 phys_avail_count = 0; 1199 physsz = 0; 1200 hwphyssz = 0; 1201 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1202 1203 debugf("fill in phys_avail:\n"); 1204 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1205 1206 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1207 availmem_regions[i].mr_start, 1208 availmem_regions[i].mr_start + 1209 availmem_regions[i].mr_size, 1210 availmem_regions[i].mr_size); 1211 1212 if (hwphyssz != 0 && 1213 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1214 debugf(" hw.physmem adjust\n"); 1215 if (physsz < hwphyssz) { 1216 phys_avail[j] = availmem_regions[i].mr_start; 1217 phys_avail[j + 1] = 1218 availmem_regions[i].mr_start + 1219 hwphyssz - physsz; 1220 physsz = hwphyssz; 1221 phys_avail_count++; 1222 } 1223 break; 1224 } 1225 1226 phys_avail[j] = availmem_regions[i].mr_start; 1227 phys_avail[j + 1] = availmem_regions[i].mr_start + 1228 availmem_regions[i].mr_size; 1229 phys_avail_count++; 1230 physsz += availmem_regions[i].mr_size; 1231 } 1232 physmem = btoc(physsz); 1233 1234 /* Calculate the last available physical address. */ 1235 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1236 ; 1237 Maxmem = powerpc_btop(phys_avail[i + 1]); 1238 1239 debugf("Maxmem = 0x%08lx\n", Maxmem); 1240 debugf("phys_avail_count = %d\n", phys_avail_count); 1241 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1242 physmem); 1243 1244 /*******************************************************/ 1245 /* Initialize (statically allocated) kernel pmap. */ 1246 /*******************************************************/ 1247 PMAP_LOCK_INIT(kernel_pmap); 1248 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1249 1250 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1251 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1252 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1253 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1254 1255 /* Initialize kernel pdir */ 1256 for (i = 0; i < kernel_ptbls; i++) 1257 kernel_pmap->pm_pdir[kptbl_min + i] = 1258 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1259 1260 for (i = 0; i < MAXCPU; i++) { 1261 kernel_pmap->pm_tid[i] = TID_KERNEL; 1262 1263 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1264 tidbusy[i][0] = kernel_pmap; 1265 } 1266 1267 /* 1268 * Fill in PTEs covering kernel code and data. They are not required 1269 * for address translation, as this area is covered by static TLB1 1270 * entries, but for pte_vatopa() to work correctly with kernel area 1271 * addresses. 1272 */ 1273 for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1274 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1275 pte->rpn = kernload + (va - kernstart); 1276 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1277 PTE_VALID; 1278 } 1279 /* Mark kernel_pmap active on all CPUs */ 1280 CPU_FILL(&kernel_pmap->pm_active); 1281 1282 /* 1283 * Initialize the global pv list lock. 1284 */ 1285 rw_init(&pvh_global_lock, "pmap pv global"); 1286 1287 /*******************************************************/ 1288 /* Final setup */ 1289 /*******************************************************/ 1290 1291 /* Enter kstack0 into kernel map, provide guard page */ 1292 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1293 thread0.td_kstack = kstack0; 1294 thread0.td_kstack_pages = KSTACK_PAGES; 1295 1296 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1297 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1298 kstack0_phys, kstack0_phys + kstack0_sz); 1299 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1300 1301 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1302 for (i = 0; i < KSTACK_PAGES; i++) { 1303 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1304 kstack0 += PAGE_SIZE; 1305 kstack0_phys += PAGE_SIZE; 1306 } 1307 1308 debugf("virtual_avail = %08x\n", virtual_avail); 1309 debugf("virtual_end = %08x\n", virtual_end); 1310 1311 debugf("mmu_booke_bootstrap: exit\n"); 1312} 1313 1314void 1315pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1316{ 1317 int i; 1318 1319 /* 1320 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1321 * have the snapshot of its contents in the s/w tlb1[] table, so use 1322 * these values directly to (re)program AP's TLB1 hardware. 1323 */ 1324 for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1325 /* Skip invalid entries */ 1326 if (!(tlb1[i].mas1 & MAS1_VALID)) 1327 continue; 1328 1329 tlb1_write_entry(i); 1330 } 1331 1332 set_mas4_defaults(); 1333} 1334 1335/* 1336 * Get the physical page address for the given pmap/virtual address. 1337 */ 1338static vm_paddr_t 1339mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1340{ 1341 vm_paddr_t pa; 1342 1343 PMAP_LOCK(pmap); 1344 pa = pte_vatopa(mmu, pmap, va); 1345 PMAP_UNLOCK(pmap); 1346 1347 return (pa); 1348} 1349 1350/* 1351 * Extract the physical page address associated with the given 1352 * kernel virtual address. 1353 */ 1354static vm_paddr_t 1355mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1356{ 1357 int i; 1358 1359 /* Check TLB1 mappings */ 1360 for (i = 0; i < tlb1_idx; i++) { 1361 if (!(tlb1[i].mas1 & MAS1_VALID)) 1362 continue; 1363 if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size) 1364 return (tlb1[i].phys + (va - tlb1[i].virt)); 1365 } 1366 1367 return (pte_vatopa(mmu, kernel_pmap, va)); 1368} 1369 1370/* 1371 * Initialize the pmap module. 1372 * Called by vm_init, to initialize any structures that the pmap 1373 * system needs to map virtual memory. 1374 */ 1375static void 1376mmu_booke_init(mmu_t mmu) 1377{ 1378 int shpgperproc = PMAP_SHPGPERPROC; 1379 1380 /* 1381 * Initialize the address space (zone) for the pv entries. Set a 1382 * high water mark so that the system can recover from excessive 1383 * numbers of pv entries. 1384 */ 1385 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1386 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1387 1388 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1389 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1390 1391 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1392 pv_entry_high_water = 9 * (pv_entry_max / 10); 1393 1394 uma_zone_reserve_kva(pvzone, pv_entry_max); 1395 1396 /* Pre-fill pvzone with initial number of pv entries. */ 1397 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1398 1399 /* Initialize ptbl allocation. */ 1400 ptbl_init(); 1401} 1402 1403/* 1404 * Map a list of wired pages into kernel virtual address space. This is 1405 * intended for temporary mappings which do not need page modification or 1406 * references recorded. Existing mappings in the region are overwritten. 1407 */ 1408static void 1409mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1410{ 1411 vm_offset_t va; 1412 1413 va = sva; 1414 while (count-- > 0) { 1415 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1416 va += PAGE_SIZE; 1417 m++; 1418 } 1419} 1420 1421/* 1422 * Remove page mappings from kernel virtual address space. Intended for 1423 * temporary mappings entered by mmu_booke_qenter. 1424 */ 1425static void 1426mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1427{ 1428 vm_offset_t va; 1429 1430 va = sva; 1431 while (count-- > 0) { 1432 mmu_booke_kremove(mmu, va); 1433 va += PAGE_SIZE; 1434 } 1435} 1436 1437/* 1438 * Map a wired page into kernel virtual address space. 1439 */ 1440static void 1441mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1442{ 1443 1444 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1445} 1446 1447static void 1448mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1449{ 1450 unsigned int pdir_idx = PDIR_IDX(va); 1451 unsigned int ptbl_idx = PTBL_IDX(va); 1452 uint32_t flags; 1453 pte_t *pte; 1454 1455 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1456 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1457 1458 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1459 flags |= tlb_calc_wimg(pa, ma); 1460 1461 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1462 1463 mtx_lock_spin(&tlbivax_mutex); 1464 tlb_miss_lock(); 1465 1466 if (PTE_ISVALID(pte)) { 1467 1468 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1469 1470 /* Flush entry from TLB0 */ 1471 tlb0_flush_entry(va); 1472 } 1473 1474 pte->rpn = pa & ~PTE_PA_MASK; 1475 pte->flags = flags; 1476 1477 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1478 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1479 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1480 1481 /* Flush the real memory from the instruction cache. */ 1482 if ((flags & (PTE_I | PTE_G)) == 0) { 1483 __syncicache((void *)va, PAGE_SIZE); 1484 } 1485 1486 tlb_miss_unlock(); 1487 mtx_unlock_spin(&tlbivax_mutex); 1488} 1489 1490/* 1491 * Remove a page from kernel page table. 1492 */ 1493static void 1494mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1495{ 1496 unsigned int pdir_idx = PDIR_IDX(va); 1497 unsigned int ptbl_idx = PTBL_IDX(va); 1498 pte_t *pte; 1499 1500// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1501 1502 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1503 (va <= VM_MAX_KERNEL_ADDRESS)), 1504 ("mmu_booke_kremove: invalid va")); 1505 1506 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1507 1508 if (!PTE_ISVALID(pte)) { 1509 1510 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1511 1512 return; 1513 } 1514 1515 mtx_lock_spin(&tlbivax_mutex); 1516 tlb_miss_lock(); 1517 1518 /* Invalidate entry in TLB0, update PTE. */ 1519 tlb0_flush_entry(va); 1520 pte->flags = 0; 1521 pte->rpn = 0; 1522 1523 tlb_miss_unlock(); 1524 mtx_unlock_spin(&tlbivax_mutex); 1525} 1526 1527/* 1528 * Initialize pmap associated with process 0. 1529 */ 1530static void 1531mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1532{ 1533 1534 PMAP_LOCK_INIT(pmap); 1535 mmu_booke_pinit(mmu, pmap); 1536 PCPU_SET(curpmap, pmap); 1537} 1538 1539/* 1540 * Initialize a preallocated and zeroed pmap structure, 1541 * such as one in a vmspace structure. 1542 */ 1543static void 1544mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1545{ 1546 int i; 1547 1548 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1549 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1550 1551 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1552 1553 for (i = 0; i < MAXCPU; i++) 1554 pmap->pm_tid[i] = TID_NONE; 1555 CPU_ZERO(&kernel_pmap->pm_active); 1556 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1557 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1558 TAILQ_INIT(&pmap->pm_ptbl_list); 1559} 1560 1561/* 1562 * Release any resources held by the given physical map. 1563 * Called when a pmap initialized by mmu_booke_pinit is being released. 1564 * Should only be called if the map contains no valid mappings. 1565 */ 1566static void 1567mmu_booke_release(mmu_t mmu, pmap_t pmap) 1568{ 1569 1570 KASSERT(pmap->pm_stats.resident_count == 0, 1571 ("pmap_release: pmap resident count %ld != 0", 1572 pmap->pm_stats.resident_count)); 1573} 1574 1575/* 1576 * Insert the given physical page at the specified virtual address in the 1577 * target physical map with the protection requested. If specified the page 1578 * will be wired down. 1579 */ 1580static void 1581mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1582 vm_prot_t prot, boolean_t wired) 1583{ 1584 1585 rw_wlock(&pvh_global_lock); 1586 PMAP_LOCK(pmap); 1587 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1588 rw_wunlock(&pvh_global_lock); 1589 PMAP_UNLOCK(pmap); 1590} 1591 1592static void 1593mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1594 vm_prot_t prot, boolean_t wired) 1595{ 1596 pte_t *pte; 1597 vm_paddr_t pa; 1598 uint32_t flags; 1599 int su, sync; 1600 1601 pa = VM_PAGE_TO_PHYS(m); 1602 su = (pmap == kernel_pmap); 1603 sync = 0; 1604 1605 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1606 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1607 // (u_int32_t)pmap, su, pmap->pm_tid, 1608 // (u_int32_t)m, va, pa, prot, wired); 1609 1610 if (su) { 1611 KASSERT(((va >= virtual_avail) && 1612 (va <= VM_MAX_KERNEL_ADDRESS)), 1613 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1614 } else { 1615 KASSERT((va <= VM_MAXUSER_ADDRESS), 1616 ("mmu_booke_enter_locked: user pmap, non user va")); 1617 } 1618 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1619 VM_OBJECT_ASSERT_LOCKED(m->object); 1620 1621 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1622 1623 /* 1624 * If there is an existing mapping, and the physical address has not 1625 * changed, must be protection or wiring change. 1626 */ 1627 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1628 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1629 1630 /* 1631 * Before actually updating pte->flags we calculate and 1632 * prepare its new value in a helper var. 1633 */ 1634 flags = pte->flags; 1635 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1636 1637 /* Wiring change, just update stats. */ 1638 if (wired) { 1639 if (!PTE_ISWIRED(pte)) { 1640 flags |= PTE_WIRED; 1641 pmap->pm_stats.wired_count++; 1642 } 1643 } else { 1644 if (PTE_ISWIRED(pte)) { 1645 flags &= ~PTE_WIRED; 1646 pmap->pm_stats.wired_count--; 1647 } 1648 } 1649 1650 if (prot & VM_PROT_WRITE) { 1651 /* Add write permissions. */ 1652 flags |= PTE_SW; 1653 if (!su) 1654 flags |= PTE_UW; 1655 1656 if ((flags & PTE_MANAGED) != 0) 1657 vm_page_aflag_set(m, PGA_WRITEABLE); 1658 } else { 1659 /* Handle modified pages, sense modify status. */ 1660 1661 /* 1662 * The PTE_MODIFIED flag could be set by underlying 1663 * TLB misses since we last read it (above), possibly 1664 * other CPUs could update it so we check in the PTE 1665 * directly rather than rely on that saved local flags 1666 * copy. 1667 */ 1668 if (PTE_ISMODIFIED(pte)) 1669 vm_page_dirty(m); 1670 } 1671 1672 if (prot & VM_PROT_EXECUTE) { 1673 flags |= PTE_SX; 1674 if (!su) 1675 flags |= PTE_UX; 1676 1677 /* 1678 * Check existing flags for execute permissions: if we 1679 * are turning execute permissions on, icache should 1680 * be flushed. 1681 */ 1682 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1683 sync++; 1684 } 1685 1686 flags &= ~PTE_REFERENCED; 1687 1688 /* 1689 * The new flags value is all calculated -- only now actually 1690 * update the PTE. 1691 */ 1692 mtx_lock_spin(&tlbivax_mutex); 1693 tlb_miss_lock(); 1694 1695 tlb0_flush_entry(va); 1696 pte->flags = flags; 1697 1698 tlb_miss_unlock(); 1699 mtx_unlock_spin(&tlbivax_mutex); 1700 1701 } else { 1702 /* 1703 * If there is an existing mapping, but it's for a different 1704 * physical address, pte_enter() will delete the old mapping. 1705 */ 1706 //if ((pte != NULL) && PTE_ISVALID(pte)) 1707 // debugf("mmu_booke_enter_locked: replace\n"); 1708 //else 1709 // debugf("mmu_booke_enter_locked: new\n"); 1710 1711 /* Now set up the flags and install the new mapping. */ 1712 flags = (PTE_SR | PTE_VALID); 1713 flags |= PTE_M; 1714 1715 if (!su) 1716 flags |= PTE_UR; 1717 1718 if (prot & VM_PROT_WRITE) { 1719 flags |= PTE_SW; 1720 if (!su) 1721 flags |= PTE_UW; 1722 1723 if ((m->oflags & VPO_UNMANAGED) == 0) 1724 vm_page_aflag_set(m, PGA_WRITEABLE); 1725 } 1726 1727 if (prot & VM_PROT_EXECUTE) { 1728 flags |= PTE_SX; 1729 if (!su) 1730 flags |= PTE_UX; 1731 } 1732 1733 /* If its wired update stats. */ 1734 if (wired) { 1735 pmap->pm_stats.wired_count++; 1736 flags |= PTE_WIRED; 1737 } 1738 1739 pte_enter(mmu, pmap, m, va, flags); 1740 1741 /* Flush the real memory from the instruction cache. */ 1742 if (prot & VM_PROT_EXECUTE) 1743 sync++; 1744 } 1745 1746 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1747 __syncicache((void *)va, PAGE_SIZE); 1748 sync = 0; 1749 } 1750} 1751 1752/* 1753 * Maps a sequence of resident pages belonging to the same object. 1754 * The sequence begins with the given page m_start. This page is 1755 * mapped at the given virtual address start. Each subsequent page is 1756 * mapped at a virtual address that is offset from start by the same 1757 * amount as the page is offset from m_start within the object. The 1758 * last page in the sequence is the page with the largest offset from 1759 * m_start that can be mapped at a virtual address less than the given 1760 * virtual address end. Not every virtual page between start and end 1761 * is mapped; only those for which a resident page exists with the 1762 * corresponding offset from m_start are mapped. 1763 */ 1764static void 1765mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1766 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1767{ 1768 vm_page_t m; 1769 vm_pindex_t diff, psize; 1770 1771 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1772 1773 psize = atop(end - start); 1774 m = m_start; 1775 rw_wlock(&pvh_global_lock); 1776 PMAP_LOCK(pmap); 1777 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1778 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1779 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1780 m = TAILQ_NEXT(m, listq); 1781 } 1782 rw_wunlock(&pvh_global_lock); 1783 PMAP_UNLOCK(pmap); 1784} 1785 1786static void 1787mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1788 vm_prot_t prot) 1789{ 1790 1791 rw_wlock(&pvh_global_lock); 1792 PMAP_LOCK(pmap); 1793 mmu_booke_enter_locked(mmu, pmap, va, m, 1794 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1795 rw_wunlock(&pvh_global_lock); 1796 PMAP_UNLOCK(pmap); 1797} 1798 1799/* 1800 * Remove the given range of addresses from the specified map. 1801 * 1802 * It is assumed that the start and end are properly rounded to the page size. 1803 */ 1804static void 1805mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1806{ 1807 pte_t *pte; 1808 uint8_t hold_flag; 1809 1810 int su = (pmap == kernel_pmap); 1811 1812 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1813 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1814 1815 if (su) { 1816 KASSERT(((va >= virtual_avail) && 1817 (va <= VM_MAX_KERNEL_ADDRESS)), 1818 ("mmu_booke_remove: kernel pmap, non kernel va")); 1819 } else { 1820 KASSERT((va <= VM_MAXUSER_ADDRESS), 1821 ("mmu_booke_remove: user pmap, non user va")); 1822 } 1823 1824 if (PMAP_REMOVE_DONE(pmap)) { 1825 //debugf("mmu_booke_remove: e (empty)\n"); 1826 return; 1827 } 1828 1829 hold_flag = PTBL_HOLD_FLAG(pmap); 1830 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1831 1832 rw_wlock(&pvh_global_lock); 1833 PMAP_LOCK(pmap); 1834 for (; va < endva; va += PAGE_SIZE) { 1835 pte = pte_find(mmu, pmap, va); 1836 if ((pte != NULL) && PTE_ISVALID(pte)) 1837 pte_remove(mmu, pmap, va, hold_flag); 1838 } 1839 PMAP_UNLOCK(pmap); 1840 rw_wunlock(&pvh_global_lock); 1841 1842 //debugf("mmu_booke_remove: e\n"); 1843} 1844 1845/* 1846 * Remove physical page from all pmaps in which it resides. 1847 */ 1848static void 1849mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1850{ 1851 pv_entry_t pv, pvn; 1852 uint8_t hold_flag; 1853 1854 rw_wlock(&pvh_global_lock); 1855 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1856 pvn = TAILQ_NEXT(pv, pv_link); 1857 1858 PMAP_LOCK(pv->pv_pmap); 1859 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1860 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1861 PMAP_UNLOCK(pv->pv_pmap); 1862 } 1863 vm_page_aflag_clear(m, PGA_WRITEABLE); 1864 rw_wunlock(&pvh_global_lock); 1865} 1866 1867/* 1868 * Map a range of physical addresses into kernel virtual address space. 1869 */ 1870static vm_offset_t 1871mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1872 vm_paddr_t pa_end, int prot) 1873{ 1874 vm_offset_t sva = *virt; 1875 vm_offset_t va = sva; 1876 1877 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1878 // sva, pa_start, pa_end); 1879 1880 while (pa_start < pa_end) { 1881 mmu_booke_kenter(mmu, va, pa_start); 1882 va += PAGE_SIZE; 1883 pa_start += PAGE_SIZE; 1884 } 1885 *virt = va; 1886 1887 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1888 return (sva); 1889} 1890 1891/* 1892 * The pmap must be activated before it's address space can be accessed in any 1893 * way. 1894 */ 1895static void 1896mmu_booke_activate(mmu_t mmu, struct thread *td) 1897{ 1898 pmap_t pmap; 1899 u_int cpuid; 1900 1901 pmap = &td->td_proc->p_vmspace->vm_pmap; 1902 1903 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1904 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1905 1906 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1907 1908 mtx_lock_spin(&sched_lock); 1909 1910 cpuid = PCPU_GET(cpuid); 1911 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1912 PCPU_SET(curpmap, pmap); 1913 1914 if (pmap->pm_tid[cpuid] == TID_NONE) 1915 tid_alloc(pmap); 1916 1917 /* Load PID0 register with pmap tid value. */ 1918 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1919 __asm __volatile("isync"); 1920 1921 mtx_unlock_spin(&sched_lock); 1922 1923 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1924 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1925} 1926 1927/* 1928 * Deactivate the specified process's address space. 1929 */ 1930static void 1931mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1932{ 1933 pmap_t pmap; 1934 1935 pmap = &td->td_proc->p_vmspace->vm_pmap; 1936 1937 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1938 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1939 1940 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1941 PCPU_SET(curpmap, NULL); 1942} 1943 1944/* 1945 * Copy the range specified by src_addr/len 1946 * from the source map to the range dst_addr/len 1947 * in the destination map. 1948 * 1949 * This routine is only advisory and need not do anything. 1950 */ 1951static void 1952mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1953 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1954{ 1955 1956} 1957 1958/* 1959 * Set the physical protection on the specified range of this map as requested. 1960 */ 1961static void 1962mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1963 vm_prot_t prot) 1964{ 1965 vm_offset_t va; 1966 vm_page_t m; 1967 pte_t *pte; 1968 1969 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1970 mmu_booke_remove(mmu, pmap, sva, eva); 1971 return; 1972 } 1973 1974 if (prot & VM_PROT_WRITE) 1975 return; 1976 1977 PMAP_LOCK(pmap); 1978 for (va = sva; va < eva; va += PAGE_SIZE) { 1979 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1980 if (PTE_ISVALID(pte)) { 1981 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1982 1983 mtx_lock_spin(&tlbivax_mutex); 1984 tlb_miss_lock(); 1985 1986 /* Handle modified pages. */ 1987 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1988 vm_page_dirty(m); 1989 1990 tlb0_flush_entry(va); 1991 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1992 1993 tlb_miss_unlock(); 1994 mtx_unlock_spin(&tlbivax_mutex); 1995 } 1996 } 1997 } 1998 PMAP_UNLOCK(pmap); 1999} 2000 2001/* 2002 * Clear the write and modified bits in each of the given page's mappings. 2003 */ 2004static void 2005mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 2006{ 2007 pv_entry_t pv; 2008 pte_t *pte; 2009 2010 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2011 ("mmu_booke_remove_write: page %p is not managed", m)); 2012 2013 /* 2014 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2015 * set by another thread while the object is locked. Thus, 2016 * if PGA_WRITEABLE is clear, no page table entries need updating. 2017 */ 2018 VM_OBJECT_ASSERT_WLOCKED(m->object); 2019 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2020 return; 2021 rw_wlock(&pvh_global_lock); 2022 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2023 PMAP_LOCK(pv->pv_pmap); 2024 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2025 if (PTE_ISVALID(pte)) { 2026 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2027 2028 mtx_lock_spin(&tlbivax_mutex); 2029 tlb_miss_lock(); 2030 2031 /* Handle modified pages. */ 2032 if (PTE_ISMODIFIED(pte)) 2033 vm_page_dirty(m); 2034 2035 /* Flush mapping from TLB0. */ 2036 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2037 2038 tlb_miss_unlock(); 2039 mtx_unlock_spin(&tlbivax_mutex); 2040 } 2041 } 2042 PMAP_UNLOCK(pv->pv_pmap); 2043 } 2044 vm_page_aflag_clear(m, PGA_WRITEABLE); 2045 rw_wunlock(&pvh_global_lock); 2046} 2047 2048static void 2049mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2050{ 2051 pte_t *pte; 2052 pmap_t pmap; 2053 vm_page_t m; 2054 vm_offset_t addr; 2055 vm_paddr_t pa; 2056 int active, valid; 2057 2058 va = trunc_page(va); 2059 sz = round_page(sz); 2060 2061 rw_wlock(&pvh_global_lock); 2062 pmap = PCPU_GET(curpmap); 2063 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2064 while (sz > 0) { 2065 PMAP_LOCK(pm); 2066 pte = pte_find(mmu, pm, va); 2067 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2068 if (valid) 2069 pa = PTE_PA(pte); 2070 PMAP_UNLOCK(pm); 2071 if (valid) { 2072 if (!active) { 2073 /* Create a mapping in the active pmap. */ 2074 addr = 0; 2075 m = PHYS_TO_VM_PAGE(pa); 2076 PMAP_LOCK(pmap); 2077 pte_enter(mmu, pmap, m, addr, 2078 PTE_SR | PTE_VALID | PTE_UR); 2079 __syncicache((void *)addr, PAGE_SIZE); 2080 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2081 PMAP_UNLOCK(pmap); 2082 } else 2083 __syncicache((void *)va, PAGE_SIZE); 2084 } 2085 va += PAGE_SIZE; 2086 sz -= PAGE_SIZE; 2087 } 2088 rw_wunlock(&pvh_global_lock); 2089} 2090 2091/* 2092 * Atomically extract and hold the physical page with the given 2093 * pmap and virtual address pair if that mapping permits the given 2094 * protection. 2095 */ 2096static vm_page_t 2097mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2098 vm_prot_t prot) 2099{ 2100 pte_t *pte; 2101 vm_page_t m; 2102 uint32_t pte_wbit; 2103 vm_paddr_t pa; 2104 2105 m = NULL; 2106 pa = 0; 2107 PMAP_LOCK(pmap); 2108retry: 2109 pte = pte_find(mmu, pmap, va); 2110 if ((pte != NULL) && PTE_ISVALID(pte)) { 2111 if (pmap == kernel_pmap) 2112 pte_wbit = PTE_SW; 2113 else 2114 pte_wbit = PTE_UW; 2115 2116 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2117 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2118 goto retry; 2119 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2120 vm_page_hold(m); 2121 } 2122 } 2123 2124 PA_UNLOCK_COND(pa); 2125 PMAP_UNLOCK(pmap); 2126 return (m); 2127} 2128 2129/* 2130 * Initialize a vm_page's machine-dependent fields. 2131 */ 2132static void 2133mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2134{ 2135 2136 TAILQ_INIT(&m->md.pv_list); 2137} 2138 2139/* 2140 * mmu_booke_zero_page_area zeros the specified hardware page by 2141 * mapping it into virtual memory and using bzero to clear 2142 * its contents. 2143 * 2144 * off and size must reside within a single page. 2145 */ 2146static void 2147mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2148{ 2149 vm_offset_t va; 2150 2151 /* XXX KASSERT off and size are within a single page? */ 2152 2153 mtx_lock(&zero_page_mutex); 2154 va = zero_page_va; 2155 2156 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2157 bzero((caddr_t)va + off, size); 2158 mmu_booke_kremove(mmu, va); 2159 2160 mtx_unlock(&zero_page_mutex); 2161} 2162 2163/* 2164 * mmu_booke_zero_page zeros the specified hardware page. 2165 */ 2166static void 2167mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2168{ 2169 2170 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2171} 2172 2173/* 2174 * mmu_booke_copy_page copies the specified (machine independent) page by 2175 * mapping the page into virtual memory and using memcopy to copy the page, 2176 * one machine dependent page at a time. 2177 */ 2178static void 2179mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2180{ 2181 vm_offset_t sva, dva; 2182 2183 sva = copy_page_src_va; 2184 dva = copy_page_dst_va; 2185 2186 mtx_lock(©_page_mutex); 2187 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2188 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2189 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2190 mmu_booke_kremove(mmu, dva); 2191 mmu_booke_kremove(mmu, sva); 2192 mtx_unlock(©_page_mutex); 2193} 2194 2195static inline void 2196mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2197 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2198{ 2199 void *a_cp, *b_cp; 2200 vm_offset_t a_pg_offset, b_pg_offset; 2201 int cnt; 2202 2203 mtx_lock(©_page_mutex); 2204 while (xfersize > 0) { 2205 a_pg_offset = a_offset & PAGE_MASK; 2206 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2207 mmu_booke_kenter(mmu, copy_page_src_va, 2208 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2209 a_cp = (char *)copy_page_src_va + a_pg_offset; 2210 b_pg_offset = b_offset & PAGE_MASK; 2211 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2212 mmu_booke_kenter(mmu, copy_page_dst_va, 2213 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2214 b_cp = (char *)copy_page_dst_va + b_pg_offset; 2215 bcopy(a_cp, b_cp, cnt); 2216 mmu_booke_kremove(mmu, copy_page_dst_va); 2217 mmu_booke_kremove(mmu, copy_page_src_va); 2218 a_offset += cnt; 2219 b_offset += cnt; 2220 xfersize -= cnt; 2221 } 2222 mtx_unlock(©_page_mutex); 2223} 2224 2225/* 2226 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2227 * into virtual memory and using bzero to clear its contents. This is intended 2228 * to be called from the vm_pagezero process only and outside of Giant. No 2229 * lock is required. 2230 */ 2231static void 2232mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2233{ 2234 vm_offset_t va; 2235 2236 va = zero_page_idle_va; 2237 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2238 bzero((caddr_t)va, PAGE_SIZE); 2239 mmu_booke_kremove(mmu, va); 2240} 2241 2242/* 2243 * Return whether or not the specified physical page was modified 2244 * in any of physical maps. 2245 */ 2246static boolean_t 2247mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2248{ 2249 pte_t *pte; 2250 pv_entry_t pv; 2251 boolean_t rv; 2252 2253 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2254 ("mmu_booke_is_modified: page %p is not managed", m)); 2255 rv = FALSE; 2256 2257 /* 2258 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2259 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2260 * is clear, no PTEs can be modified. 2261 */ 2262 VM_OBJECT_ASSERT_WLOCKED(m->object); 2263 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2264 return (rv); 2265 rw_wlock(&pvh_global_lock); 2266 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2267 PMAP_LOCK(pv->pv_pmap); 2268 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2269 PTE_ISVALID(pte)) { 2270 if (PTE_ISMODIFIED(pte)) 2271 rv = TRUE; 2272 } 2273 PMAP_UNLOCK(pv->pv_pmap); 2274 if (rv) 2275 break; 2276 } 2277 rw_wunlock(&pvh_global_lock); 2278 return (rv); 2279} 2280 2281/* 2282 * Return whether or not the specified virtual address is eligible 2283 * for prefault. 2284 */ 2285static boolean_t 2286mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2287{ 2288 2289 return (FALSE); 2290} 2291 2292/* 2293 * Return whether or not the specified physical page was referenced 2294 * in any physical maps. 2295 */ 2296static boolean_t 2297mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2298{ 2299 pte_t *pte; 2300 pv_entry_t pv; 2301 boolean_t rv; 2302 2303 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2304 ("mmu_booke_is_referenced: page %p is not managed", m)); 2305 rv = FALSE; 2306 rw_wlock(&pvh_global_lock); 2307 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2308 PMAP_LOCK(pv->pv_pmap); 2309 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2310 PTE_ISVALID(pte)) { 2311 if (PTE_ISREFERENCED(pte)) 2312 rv = TRUE; 2313 } 2314 PMAP_UNLOCK(pv->pv_pmap); 2315 if (rv) 2316 break; 2317 } 2318 rw_wunlock(&pvh_global_lock); 2319 return (rv); 2320} 2321 2322/* 2323 * Clear the modify bits on the specified physical page. 2324 */ 2325static void 2326mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2327{ 2328 pte_t *pte; 2329 pv_entry_t pv; 2330 2331 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2332 ("mmu_booke_clear_modify: page %p is not managed", m)); 2333 VM_OBJECT_ASSERT_WLOCKED(m->object); 2334 KASSERT(!vm_page_xbusied(m), 2335 ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 2336 2337 /* 2338 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2339 * If the object containing the page is locked and the page is not 2340 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 2341 */ 2342 if ((m->aflags & PGA_WRITEABLE) == 0) 2343 return; 2344 rw_wlock(&pvh_global_lock); 2345 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2346 PMAP_LOCK(pv->pv_pmap); 2347 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2348 PTE_ISVALID(pte)) { 2349 mtx_lock_spin(&tlbivax_mutex); 2350 tlb_miss_lock(); 2351 2352 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2353 tlb0_flush_entry(pv->pv_va); 2354 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2355 PTE_REFERENCED); 2356 } 2357 2358 tlb_miss_unlock(); 2359 mtx_unlock_spin(&tlbivax_mutex); 2360 } 2361 PMAP_UNLOCK(pv->pv_pmap); 2362 } 2363 rw_wunlock(&pvh_global_lock); 2364} 2365 2366/* 2367 * Return a count of reference bits for a page, clearing those bits. 2368 * It is not necessary for every reference bit to be cleared, but it 2369 * is necessary that 0 only be returned when there are truly no 2370 * reference bits set. 2371 * 2372 * XXX: The exact number of bits to check and clear is a matter that 2373 * should be tested and standardized at some point in the future for 2374 * optimal aging of shared pages. 2375 */ 2376static int 2377mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2378{ 2379 pte_t *pte; 2380 pv_entry_t pv; 2381 int count; 2382 2383 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2384 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2385 count = 0; 2386 rw_wlock(&pvh_global_lock); 2387 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2388 PMAP_LOCK(pv->pv_pmap); 2389 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2390 PTE_ISVALID(pte)) { 2391 if (PTE_ISREFERENCED(pte)) { 2392 mtx_lock_spin(&tlbivax_mutex); 2393 tlb_miss_lock(); 2394 2395 tlb0_flush_entry(pv->pv_va); 2396 pte->flags &= ~PTE_REFERENCED; 2397 2398 tlb_miss_unlock(); 2399 mtx_unlock_spin(&tlbivax_mutex); 2400 2401 if (++count > 4) { 2402 PMAP_UNLOCK(pv->pv_pmap); 2403 break; 2404 } 2405 } 2406 } 2407 PMAP_UNLOCK(pv->pv_pmap); 2408 } 2409 rw_wunlock(&pvh_global_lock); 2410 return (count); 2411} 2412 2413/* 2414 * Change wiring attribute for a map/virtual-address pair. 2415 */ 2416static void 2417mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2418{ 2419 pte_t *pte; 2420 2421 PMAP_LOCK(pmap); 2422 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2423 if (wired) { 2424 if (!PTE_ISWIRED(pte)) { 2425 pte->flags |= PTE_WIRED; 2426 pmap->pm_stats.wired_count++; 2427 } 2428 } else { 2429 if (PTE_ISWIRED(pte)) { 2430 pte->flags &= ~PTE_WIRED; 2431 pmap->pm_stats.wired_count--; 2432 } 2433 } 2434 } 2435 PMAP_UNLOCK(pmap); 2436} 2437 2438/* 2439 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2440 * page. This count may be changed upwards or downwards in the future; it is 2441 * only necessary that true be returned for a small subset of pmaps for proper 2442 * page aging. 2443 */ 2444static boolean_t 2445mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2446{ 2447 pv_entry_t pv; 2448 int loops; 2449 boolean_t rv; 2450 2451 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2452 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2453 loops = 0; 2454 rv = FALSE; 2455 rw_wlock(&pvh_global_lock); 2456 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2457 if (pv->pv_pmap == pmap) { 2458 rv = TRUE; 2459 break; 2460 } 2461 if (++loops >= 16) 2462 break; 2463 } 2464 rw_wunlock(&pvh_global_lock); 2465 return (rv); 2466} 2467 2468/* 2469 * Return the number of managed mappings to the given physical page that are 2470 * wired. 2471 */ 2472static int 2473mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2474{ 2475 pv_entry_t pv; 2476 pte_t *pte; 2477 int count = 0; 2478 2479 if ((m->oflags & VPO_UNMANAGED) != 0) 2480 return (count); 2481 rw_wlock(&pvh_global_lock); 2482 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2483 PMAP_LOCK(pv->pv_pmap); 2484 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2485 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2486 count++; 2487 PMAP_UNLOCK(pv->pv_pmap); 2488 } 2489 rw_wunlock(&pvh_global_lock); 2490 return (count); 2491} 2492 2493static int 2494mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2495{ 2496 int i; 2497 vm_offset_t va; 2498 2499 /* 2500 * This currently does not work for entries that 2501 * overlap TLB1 entries. 2502 */ 2503 for (i = 0; i < tlb1_idx; i ++) { 2504 if (tlb1_iomapped(i, pa, size, &va) == 0) 2505 return (0); 2506 } 2507 2508 return (EFAULT); 2509} 2510 2511vm_offset_t 2512mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2513 vm_size_t *sz) 2514{ 2515 vm_paddr_t pa, ppa; 2516 vm_offset_t va; 2517 vm_size_t gran; 2518 2519 /* Raw physical memory dumps don't have a virtual address. */ 2520 if (md->md_vaddr == ~0UL) { 2521 /* We always map a 256MB page at 256M. */ 2522 gran = 256 * 1024 * 1024; 2523 pa = md->md_paddr + ofs; 2524 ppa = pa & ~(gran - 1); 2525 ofs = pa - ppa; 2526 va = gran; 2527 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2528 if (*sz > (gran - ofs)) 2529 *sz = gran - ofs; 2530 return (va + ofs); 2531 } 2532 2533 /* Minidumps are based on virtual memory addresses. */ 2534 va = md->md_vaddr + ofs; 2535 if (va >= kernstart + kernsize) { 2536 gran = PAGE_SIZE - (va & PAGE_MASK); 2537 if (*sz > gran) 2538 *sz = gran; 2539 } 2540 return (va); 2541} 2542 2543void 2544mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2545 vm_offset_t va) 2546{ 2547 2548 /* Raw physical memory dumps don't have a virtual address. */ 2549 if (md->md_vaddr == ~0UL) { 2550 tlb1_idx--; 2551 tlb1[tlb1_idx].mas1 = 0; 2552 tlb1[tlb1_idx].mas2 = 0; 2553 tlb1[tlb1_idx].mas3 = 0; 2554 tlb1_write_entry(tlb1_idx); 2555 return; 2556 } 2557 2558 /* Minidumps are based on virtual memory addresses. */ 2559 /* Nothing to do... */ 2560} 2561 2562struct pmap_md * 2563mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2564{ 2565 static struct pmap_md md; 2566 pte_t *pte; 2567 vm_offset_t va; 2568 2569 if (dumpsys_minidump) { 2570 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2571 if (prev == NULL) { 2572 /* 1st: kernel .data and .bss. */ 2573 md.md_index = 1; 2574 md.md_vaddr = trunc_page((uintptr_t)_etext); 2575 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2576 return (&md); 2577 } 2578 switch (prev->md_index) { 2579 case 1: 2580 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2581 md.md_index = 2; 2582 md.md_vaddr = data_start; 2583 md.md_size = data_end - data_start; 2584 break; 2585 case 2: 2586 /* 3rd: kernel VM. */ 2587 va = prev->md_vaddr + prev->md_size; 2588 /* Find start of next chunk (from va). */ 2589 while (va < virtual_end) { 2590 /* Don't dump the buffer cache. */ 2591 if (va >= kmi.buffer_sva && 2592 va < kmi.buffer_eva) { 2593 va = kmi.buffer_eva; 2594 continue; 2595 } 2596 pte = pte_find(mmu, kernel_pmap, va); 2597 if (pte != NULL && PTE_ISVALID(pte)) 2598 break; 2599 va += PAGE_SIZE; 2600 } 2601 if (va < virtual_end) { 2602 md.md_vaddr = va; 2603 va += PAGE_SIZE; 2604 /* Find last page in chunk. */ 2605 while (va < virtual_end) { 2606 /* Don't run into the buffer cache. */ 2607 if (va == kmi.buffer_sva) 2608 break; 2609 pte = pte_find(mmu, kernel_pmap, va); 2610 if (pte == NULL || !PTE_ISVALID(pte)) 2611 break; 2612 va += PAGE_SIZE; 2613 } 2614 md.md_size = va - md.md_vaddr; 2615 break; 2616 } 2617 md.md_index = 3; 2618 /* FALLTHROUGH */ 2619 default: 2620 return (NULL); 2621 } 2622 } else { /* minidumps */ 2623 mem_regions(&physmem_regions, &physmem_regions_sz, 2624 &availmem_regions, &availmem_regions_sz); 2625 2626 if (prev == NULL) { 2627 /* first physical chunk. */ 2628 md.md_paddr = physmem_regions[0].mr_start; 2629 md.md_size = physmem_regions[0].mr_size; 2630 md.md_vaddr = ~0UL; 2631 md.md_index = 1; 2632 } else if (md.md_index < physmem_regions_sz) { 2633 md.md_paddr = physmem_regions[md.md_index].mr_start; 2634 md.md_size = physmem_regions[md.md_index].mr_size; 2635 md.md_vaddr = ~0UL; 2636 md.md_index++; 2637 } else { 2638 /* There's no next physical chunk. */ 2639 return (NULL); 2640 } 2641 } 2642 2643 return (&md); 2644} 2645 2646/* 2647 * Map a set of physical memory pages into the kernel virtual address space. 2648 * Return a pointer to where it is mapped. This routine is intended to be used 2649 * for mapping device memory, NOT real memory. 2650 */ 2651static void * 2652mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2653{ 2654 2655 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2656} 2657 2658static void * 2659mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2660{ 2661 void *res; 2662 uintptr_t va; 2663 vm_size_t sz; 2664 int i; 2665 2666 /* 2667 * Check if this is premapped in TLB1. Note: this should probably also 2668 * check whether a sequence of TLB1 entries exist that match the 2669 * requirement, but now only checks the easy case. 2670 */ 2671 if (ma == VM_MEMATTR_DEFAULT) { 2672 for (i = 0; i < tlb1_idx; i++) { 2673 if (!(tlb1[i].mas1 & MAS1_VALID)) 2674 continue; 2675 if (pa >= tlb1[i].phys && 2676 (pa + size) <= (tlb1[i].phys + tlb1[i].size)) 2677 return (void *)(tlb1[i].virt + 2678 (pa - tlb1[i].phys)); 2679 } 2680 } 2681 2682 size = roundup(size, PAGE_SIZE); 2683
| 193 194static tlbtid_t tid_alloc(struct pmap *); 195 196static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t); 197 198static int tlb1_set_entry(vm_offset_t, vm_offset_t, vm_size_t, uint32_t); 199static void tlb1_write_entry(unsigned int); 200static int tlb1_iomapped(int, vm_paddr_t, vm_size_t, vm_offset_t *); 201static vm_size_t tlb1_mapin_region(vm_offset_t, vm_paddr_t, vm_size_t); 202 203static vm_size_t tsize2size(unsigned int); 204static unsigned int size2tsize(vm_size_t); 205static unsigned int ilog2(unsigned int); 206 207static void set_mas4_defaults(void); 208 209static inline void tlb0_flush_entry(vm_offset_t); 210static inline unsigned int tlb0_tableidx(vm_offset_t, unsigned int); 211 212/**************************************************************************/ 213/* Page table management */ 214/**************************************************************************/ 215 216static struct rwlock_padalign pvh_global_lock; 217 218/* Data for the pv entry allocation mechanism */ 219static uma_zone_t pvzone; 220static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; 221 222#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ 223 224#ifndef PMAP_SHPGPERPROC 225#define PMAP_SHPGPERPROC 200 226#endif 227 228static void ptbl_init(void); 229static struct ptbl_buf *ptbl_buf_alloc(void); 230static void ptbl_buf_free(struct ptbl_buf *); 231static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); 232 233static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int); 234static void ptbl_free(mmu_t, pmap_t, unsigned int); 235static void ptbl_hold(mmu_t, pmap_t, unsigned int); 236static int ptbl_unhold(mmu_t, pmap_t, unsigned int); 237 238static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); 239static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); 240static void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t); 241static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); 242 243static pv_entry_t pv_alloc(void); 244static void pv_free(pv_entry_t); 245static void pv_insert(pmap_t, vm_offset_t, vm_page_t); 246static void pv_remove(pmap_t, vm_offset_t, vm_page_t); 247 248/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */ 249#define PTBL_BUFS (128 * 16) 250 251struct ptbl_buf { 252 TAILQ_ENTRY(ptbl_buf) link; /* list link */ 253 vm_offset_t kva; /* va of mapping */ 254}; 255 256/* ptbl free list and a lock used for access synchronization. */ 257static TAILQ_HEAD(, ptbl_buf) ptbl_buf_freelist; 258static struct mtx ptbl_buf_freelist_lock; 259 260/* Base address of kva space allocated fot ptbl bufs. */ 261static vm_offset_t ptbl_buf_pool_vabase; 262 263/* Pointer to ptbl_buf structures. */ 264static struct ptbl_buf *ptbl_bufs; 265 266void pmap_bootstrap_ap(volatile uint32_t *); 267 268/* 269 * Kernel MMU interface 270 */ 271static void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t); 272static void mmu_booke_clear_modify(mmu_t, vm_page_t); 273static void mmu_booke_copy(mmu_t, pmap_t, pmap_t, vm_offset_t, 274 vm_size_t, vm_offset_t); 275static void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t); 276static void mmu_booke_copy_pages(mmu_t, vm_page_t *, 277 vm_offset_t, vm_page_t *, vm_offset_t, int); 278static void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, 279 vm_prot_t, boolean_t); 280static void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 281 vm_page_t, vm_prot_t); 282static void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, 283 vm_prot_t); 284static vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t); 285static vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, 286 vm_prot_t); 287static void mmu_booke_init(mmu_t); 288static boolean_t mmu_booke_is_modified(mmu_t, vm_page_t); 289static boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t); 290static boolean_t mmu_booke_is_referenced(mmu_t, vm_page_t); 291static int mmu_booke_ts_referenced(mmu_t, vm_page_t); 292static vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_paddr_t, vm_paddr_t, 293 int); 294static int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t, 295 vm_paddr_t *); 296static void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, 297 vm_object_t, vm_pindex_t, vm_size_t); 298static boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t); 299static void mmu_booke_page_init(mmu_t, vm_page_t); 300static int mmu_booke_page_wired_mappings(mmu_t, vm_page_t); 301static void mmu_booke_pinit(mmu_t, pmap_t); 302static void mmu_booke_pinit0(mmu_t, pmap_t); 303static void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, 304 vm_prot_t); 305static void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int); 306static void mmu_booke_qremove(mmu_t, vm_offset_t, int); 307static void mmu_booke_release(mmu_t, pmap_t); 308static void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t); 309static void mmu_booke_remove_all(mmu_t, vm_page_t); 310static void mmu_booke_remove_write(mmu_t, vm_page_t); 311static void mmu_booke_zero_page(mmu_t, vm_page_t); 312static void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int); 313static void mmu_booke_zero_page_idle(mmu_t, vm_page_t); 314static void mmu_booke_activate(mmu_t, struct thread *); 315static void mmu_booke_deactivate(mmu_t, struct thread *); 316static void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t); 317static void *mmu_booke_mapdev(mmu_t, vm_paddr_t, vm_size_t); 318static void *mmu_booke_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t); 319static void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t); 320static vm_paddr_t mmu_booke_kextract(mmu_t, vm_offset_t); 321static void mmu_booke_kenter(mmu_t, vm_offset_t, vm_paddr_t); 322static void mmu_booke_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t); 323static void mmu_booke_kremove(mmu_t, vm_offset_t); 324static boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t); 325static void mmu_booke_sync_icache(mmu_t, pmap_t, vm_offset_t, 326 vm_size_t); 327static vm_offset_t mmu_booke_dumpsys_map(mmu_t, struct pmap_md *, 328 vm_size_t, vm_size_t *); 329static void mmu_booke_dumpsys_unmap(mmu_t, struct pmap_md *, 330 vm_size_t, vm_offset_t); 331static struct pmap_md *mmu_booke_scan_md(mmu_t, struct pmap_md *); 332 333static mmu_method_t mmu_booke_methods[] = { 334 /* pmap dispatcher interface */ 335 MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring), 336 MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify), 337 MMUMETHOD(mmu_copy, mmu_booke_copy), 338 MMUMETHOD(mmu_copy_page, mmu_booke_copy_page), 339 MMUMETHOD(mmu_copy_pages, mmu_booke_copy_pages), 340 MMUMETHOD(mmu_enter, mmu_booke_enter), 341 MMUMETHOD(mmu_enter_object, mmu_booke_enter_object), 342 MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick), 343 MMUMETHOD(mmu_extract, mmu_booke_extract), 344 MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold), 345 MMUMETHOD(mmu_init, mmu_booke_init), 346 MMUMETHOD(mmu_is_modified, mmu_booke_is_modified), 347 MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable), 348 MMUMETHOD(mmu_is_referenced, mmu_booke_is_referenced), 349 MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced), 350 MMUMETHOD(mmu_map, mmu_booke_map), 351 MMUMETHOD(mmu_mincore, mmu_booke_mincore), 352 MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt), 353 MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick), 354 MMUMETHOD(mmu_page_init, mmu_booke_page_init), 355 MMUMETHOD(mmu_page_wired_mappings, mmu_booke_page_wired_mappings), 356 MMUMETHOD(mmu_pinit, mmu_booke_pinit), 357 MMUMETHOD(mmu_pinit0, mmu_booke_pinit0), 358 MMUMETHOD(mmu_protect, mmu_booke_protect), 359 MMUMETHOD(mmu_qenter, mmu_booke_qenter), 360 MMUMETHOD(mmu_qremove, mmu_booke_qremove), 361 MMUMETHOD(mmu_release, mmu_booke_release), 362 MMUMETHOD(mmu_remove, mmu_booke_remove), 363 MMUMETHOD(mmu_remove_all, mmu_booke_remove_all), 364 MMUMETHOD(mmu_remove_write, mmu_booke_remove_write), 365 MMUMETHOD(mmu_sync_icache, mmu_booke_sync_icache), 366 MMUMETHOD(mmu_zero_page, mmu_booke_zero_page), 367 MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area), 368 MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle), 369 MMUMETHOD(mmu_activate, mmu_booke_activate), 370 MMUMETHOD(mmu_deactivate, mmu_booke_deactivate), 371 372 /* Internal interfaces */ 373 MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap), 374 MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped), 375 MMUMETHOD(mmu_mapdev, mmu_booke_mapdev), 376 MMUMETHOD(mmu_mapdev_attr, mmu_booke_mapdev_attr), 377 MMUMETHOD(mmu_kenter, mmu_booke_kenter), 378 MMUMETHOD(mmu_kenter_attr, mmu_booke_kenter_attr), 379 MMUMETHOD(mmu_kextract, mmu_booke_kextract), 380/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ 381 MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), 382 383 /* dumpsys() support */ 384 MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), 385 MMUMETHOD(mmu_dumpsys_unmap, mmu_booke_dumpsys_unmap), 386 MMUMETHOD(mmu_scan_md, mmu_booke_scan_md), 387 388 { 0, 0 } 389}; 390 391MMU_DEF(booke_mmu, MMU_TYPE_BOOKE, mmu_booke_methods, 0); 392 393static __inline uint32_t 394tlb_calc_wimg(vm_offset_t pa, vm_memattr_t ma) 395{ 396 uint32_t attrib; 397 int i; 398 399 if (ma != VM_MEMATTR_DEFAULT) { 400 switch (ma) { 401 case VM_MEMATTR_UNCACHEABLE: 402 return (PTE_I | PTE_G); 403 case VM_MEMATTR_WRITE_COMBINING: 404 case VM_MEMATTR_WRITE_BACK: 405 case VM_MEMATTR_PREFETCHABLE: 406 return (PTE_I); 407 case VM_MEMATTR_WRITE_THROUGH: 408 return (PTE_W | PTE_M); 409 } 410 } 411 412 /* 413 * Assume the page is cache inhibited and access is guarded unless 414 * it's in our available memory array. 415 */ 416 attrib = _TLB_ENTRY_IO; 417 for (i = 0; i < physmem_regions_sz; i++) { 418 if ((pa >= physmem_regions[i].mr_start) && 419 (pa < (physmem_regions[i].mr_start + 420 physmem_regions[i].mr_size))) { 421 attrib = _TLB_ENTRY_MEM; 422 break; 423 } 424 } 425 426 return (attrib); 427} 428 429static inline void 430tlb_miss_lock(void) 431{ 432#ifdef SMP 433 struct pcpu *pc; 434 435 if (!smp_started) 436 return; 437 438 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 439 if (pc != pcpup) { 440 441 CTR3(KTR_PMAP, "%s: tlb miss LOCK of CPU=%d, " 442 "tlb_lock=%p", __func__, pc->pc_cpuid, pc->pc_booke_tlb_lock); 443 444 KASSERT((pc->pc_cpuid != PCPU_GET(cpuid)), 445 ("tlb_miss_lock: tried to lock self")); 446 447 tlb_lock(pc->pc_booke_tlb_lock); 448 449 CTR1(KTR_PMAP, "%s: locked", __func__); 450 } 451 } 452#endif 453} 454 455static inline void 456tlb_miss_unlock(void) 457{ 458#ifdef SMP 459 struct pcpu *pc; 460 461 if (!smp_started) 462 return; 463 464 STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) { 465 if (pc != pcpup) { 466 CTR2(KTR_PMAP, "%s: tlb miss UNLOCK of CPU=%d", 467 __func__, pc->pc_cpuid); 468 469 tlb_unlock(pc->pc_booke_tlb_lock); 470 471 CTR1(KTR_PMAP, "%s: unlocked", __func__); 472 } 473 } 474#endif 475} 476 477/* Return number of entries in TLB0. */ 478static __inline void 479tlb0_get_tlbconf(void) 480{ 481 uint32_t tlb0_cfg; 482 483 tlb0_cfg = mfspr(SPR_TLB0CFG); 484 tlb0_entries = tlb0_cfg & TLBCFG_NENTRY_MASK; 485 tlb0_ways = (tlb0_cfg & TLBCFG_ASSOC_MASK) >> TLBCFG_ASSOC_SHIFT; 486 tlb0_entries_per_way = tlb0_entries / tlb0_ways; 487} 488 489/* Initialize pool of kva ptbl buffers. */ 490static void 491ptbl_init(void) 492{ 493 int i; 494 495 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 496 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 497 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 498 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); 499 500 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 501 TAILQ_INIT(&ptbl_buf_freelist); 502 503 for (i = 0; i < PTBL_BUFS; i++) { 504 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 505 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 506 } 507} 508 509/* Get a ptbl_buf from the freelist. */ 510static struct ptbl_buf * 511ptbl_buf_alloc(void) 512{ 513 struct ptbl_buf *buf; 514 515 mtx_lock(&ptbl_buf_freelist_lock); 516 buf = TAILQ_FIRST(&ptbl_buf_freelist); 517 if (buf != NULL) 518 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 519 mtx_unlock(&ptbl_buf_freelist_lock); 520 521 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 522 523 return (buf); 524} 525 526/* Return ptbl buff to free pool. */ 527static void 528ptbl_buf_free(struct ptbl_buf *buf) 529{ 530 531 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 532 533 mtx_lock(&ptbl_buf_freelist_lock); 534 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link); 535 mtx_unlock(&ptbl_buf_freelist_lock); 536} 537 538/* 539 * Search the list of allocated ptbl bufs and find on list of allocated ptbls 540 */ 541static void 542ptbl_free_pmap_ptbl(pmap_t pmap, pte_t *ptbl) 543{ 544 struct ptbl_buf *pbuf; 545 546 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 547 548 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 549 550 TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) 551 if (pbuf->kva == (vm_offset_t)ptbl) { 552 /* Remove from pmap ptbl buf list. */ 553 TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link); 554 555 /* Free corresponding ptbl buf. */ 556 ptbl_buf_free(pbuf); 557 break; 558 } 559} 560 561/* Allocate page table. */ 562static pte_t * 563ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 564{ 565 vm_page_t mtbl[PTBL_PAGES]; 566 vm_page_t m; 567 struct ptbl_buf *pbuf; 568 unsigned int pidx; 569 pte_t *ptbl; 570 int i; 571 572 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 573 (pmap == kernel_pmap), pdir_idx); 574 575 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 576 ("ptbl_alloc: invalid pdir_idx")); 577 KASSERT((pmap->pm_pdir[pdir_idx] == NULL), 578 ("pte_alloc: valid ptbl entry exists!")); 579 580 pbuf = ptbl_buf_alloc(); 581 if (pbuf == NULL) 582 panic("pte_alloc: couldn't alloc kernel virtual memory"); 583 584 ptbl = (pte_t *)pbuf->kva; 585 586 CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl); 587 588 /* Allocate ptbl pages, this will sleep! */ 589 for (i = 0; i < PTBL_PAGES; i++) { 590 pidx = (PTBL_PAGES * pdir_idx) + i; 591 while ((m = vm_page_alloc(NULL, pidx, 592 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { 593 594 PMAP_UNLOCK(pmap); 595 rw_wunlock(&pvh_global_lock); 596 VM_WAIT; 597 rw_wlock(&pvh_global_lock); 598 PMAP_LOCK(pmap); 599 } 600 mtbl[i] = m; 601 } 602 603 /* Map allocated pages into kernel_pmap. */ 604 mmu_booke_qenter(mmu, (vm_offset_t)ptbl, mtbl, PTBL_PAGES); 605 606 /* Zero whole ptbl. */ 607 bzero((caddr_t)ptbl, PTBL_PAGES * PAGE_SIZE); 608 609 /* Add pbuf to the pmap ptbl bufs list. */ 610 TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link); 611 612 return (ptbl); 613} 614 615/* Free ptbl pages and invalidate pdir entry. */ 616static void 617ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 618{ 619 pte_t *ptbl; 620 vm_paddr_t pa; 621 vm_offset_t va; 622 vm_page_t m; 623 int i; 624 625 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 626 (pmap == kernel_pmap), pdir_idx); 627 628 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 629 ("ptbl_free: invalid pdir_idx")); 630 631 ptbl = pmap->pm_pdir[pdir_idx]; 632 633 CTR2(KTR_PMAP, "%s: ptbl = %p", __func__, ptbl); 634 635 KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); 636 637 /* 638 * Invalidate the pdir entry as soon as possible, so that other CPUs 639 * don't attempt to look up the page tables we are releasing. 640 */ 641 mtx_lock_spin(&tlbivax_mutex); 642 tlb_miss_lock(); 643 644 pmap->pm_pdir[pdir_idx] = NULL; 645 646 tlb_miss_unlock(); 647 mtx_unlock_spin(&tlbivax_mutex); 648 649 for (i = 0; i < PTBL_PAGES; i++) { 650 va = ((vm_offset_t)ptbl + (i * PAGE_SIZE)); 651 pa = pte_vatopa(mmu, kernel_pmap, va); 652 m = PHYS_TO_VM_PAGE(pa); 653 vm_page_free_zero(m); 654 atomic_subtract_int(&cnt.v_wire_count, 1); 655 mmu_booke_kremove(mmu, va); 656 } 657 658 ptbl_free_pmap_ptbl(pmap, ptbl); 659} 660 661/* 662 * Decrement ptbl pages hold count and attempt to free ptbl pages. 663 * Called when removing pte entry from ptbl. 664 * 665 * Return 1 if ptbl pages were freed. 666 */ 667static int 668ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 669{ 670 pte_t *ptbl; 671 vm_paddr_t pa; 672 vm_page_t m; 673 int i; 674 675 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 676 (pmap == kernel_pmap), pdir_idx); 677 678 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 679 ("ptbl_unhold: invalid pdir_idx")); 680 KASSERT((pmap != kernel_pmap), 681 ("ptbl_unhold: unholding kernel ptbl!")); 682 683 ptbl = pmap->pm_pdir[pdir_idx]; 684 685 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 686 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 687 ("ptbl_unhold: non kva ptbl")); 688 689 /* decrement hold count */ 690 for (i = 0; i < PTBL_PAGES; i++) { 691 pa = pte_vatopa(mmu, kernel_pmap, 692 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 693 m = PHYS_TO_VM_PAGE(pa); 694 m->wire_count--; 695 } 696 697 /* 698 * Free ptbl pages if there are no pte etries in this ptbl. 699 * wire_count has the same value for all ptbl pages, so check the last 700 * page. 701 */ 702 if (m->wire_count == 0) { 703 ptbl_free(mmu, pmap, pdir_idx); 704 705 //debugf("ptbl_unhold: e (freed ptbl)\n"); 706 return (1); 707 } 708 709 return (0); 710} 711 712/* 713 * Increment hold count for ptbl pages. This routine is used when a new pte 714 * entry is being inserted into the ptbl. 715 */ 716static void 717ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 718{ 719 vm_paddr_t pa; 720 pte_t *ptbl; 721 vm_page_t m; 722 int i; 723 724 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 725 pdir_idx); 726 727 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 728 ("ptbl_hold: invalid pdir_idx")); 729 KASSERT((pmap != kernel_pmap), 730 ("ptbl_hold: holding kernel ptbl!")); 731 732 ptbl = pmap->pm_pdir[pdir_idx]; 733 734 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 735 736 for (i = 0; i < PTBL_PAGES; i++) { 737 pa = pte_vatopa(mmu, kernel_pmap, 738 (vm_offset_t)ptbl + (i * PAGE_SIZE)); 739 m = PHYS_TO_VM_PAGE(pa); 740 m->wire_count++; 741 } 742} 743 744/* Allocate pv_entry structure. */ 745pv_entry_t 746pv_alloc(void) 747{ 748 pv_entry_t pv; 749 750 pv_entry_count++; 751 if (pv_entry_count > pv_entry_high_water) 752 pagedaemon_wakeup(); 753 pv = uma_zalloc(pvzone, M_NOWAIT); 754 755 return (pv); 756} 757 758/* Free pv_entry structure. */ 759static __inline void 760pv_free(pv_entry_t pve) 761{ 762 763 pv_entry_count--; 764 uma_zfree(pvzone, pve); 765} 766 767 768/* Allocate and initialize pv_entry structure. */ 769static void 770pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 771{ 772 pv_entry_t pve; 773 774 //int su = (pmap == kernel_pmap); 775 //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su, 776 // (u_int32_t)pmap, va, (u_int32_t)m); 777 778 pve = pv_alloc(); 779 if (pve == NULL) 780 panic("pv_insert: no pv entries!"); 781 782 pve->pv_pmap = pmap; 783 pve->pv_va = va; 784 785 /* add to pv_list */ 786 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 787 rw_assert(&pvh_global_lock, RA_WLOCKED); 788 789 TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link); 790 791 //debugf("pv_insert: e\n"); 792} 793 794/* Destroy pv entry. */ 795static void 796pv_remove(pmap_t pmap, vm_offset_t va, vm_page_t m) 797{ 798 pv_entry_t pve; 799 800 //int su = (pmap == kernel_pmap); 801 //debugf("pv_remove: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va); 802 803 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 804 rw_assert(&pvh_global_lock, RA_WLOCKED); 805 806 /* find pv entry */ 807 TAILQ_FOREACH(pve, &m->md.pv_list, pv_link) { 808 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 809 /* remove from pv_list */ 810 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 811 if (TAILQ_EMPTY(&m->md.pv_list)) 812 vm_page_aflag_clear(m, PGA_WRITEABLE); 813 814 /* free pv entry struct */ 815 pv_free(pve); 816 break; 817 } 818 } 819 820 //debugf("pv_remove: e\n"); 821} 822 823/* 824 * Clean pte entry, try to free page table page if requested. 825 * 826 * Return 1 if ptbl pages were freed, otherwise return 0. 827 */ 828static int 829pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) 830{ 831 unsigned int pdir_idx = PDIR_IDX(va); 832 unsigned int ptbl_idx = PTBL_IDX(va); 833 vm_page_t m; 834 pte_t *ptbl; 835 pte_t *pte; 836 837 //int su = (pmap == kernel_pmap); 838 //debugf("pte_remove: s (su = %d pmap = 0x%08x va = 0x%08x flags = %d)\n", 839 // su, (u_int32_t)pmap, va, flags); 840 841 ptbl = pmap->pm_pdir[pdir_idx]; 842 KASSERT(ptbl, ("pte_remove: null ptbl")); 843 844 pte = &ptbl[ptbl_idx]; 845 846 if (pte == NULL || !PTE_ISVALID(pte)) 847 return (0); 848 849 if (PTE_ISWIRED(pte)) 850 pmap->pm_stats.wired_count--; 851 852 /* Handle managed entry. */ 853 if (PTE_ISMANAGED(pte)) { 854 /* Get vm_page_t for mapped pte. */ 855 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 856 857 if (PTE_ISMODIFIED(pte)) 858 vm_page_dirty(m); 859 860 if (PTE_ISREFERENCED(pte)) 861 vm_page_aflag_set(m, PGA_REFERENCED); 862 863 pv_remove(pmap, va, m); 864 } 865 866 mtx_lock_spin(&tlbivax_mutex); 867 tlb_miss_lock(); 868 869 tlb0_flush_entry(va); 870 pte->flags = 0; 871 pte->rpn = 0; 872 873 tlb_miss_unlock(); 874 mtx_unlock_spin(&tlbivax_mutex); 875 876 pmap->pm_stats.resident_count--; 877 878 if (flags & PTBL_UNHOLD) { 879 //debugf("pte_remove: e (unhold)\n"); 880 return (ptbl_unhold(mmu, pmap, pdir_idx)); 881 } 882 883 //debugf("pte_remove: e\n"); 884 return (0); 885} 886 887/* 888 * Insert PTE for a given page and virtual address. 889 */ 890static void 891pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags) 892{ 893 unsigned int pdir_idx = PDIR_IDX(va); 894 unsigned int ptbl_idx = PTBL_IDX(va); 895 pte_t *ptbl, *pte; 896 897 CTR4(KTR_PMAP, "%s: su = %d pmap = %p va = %p", __func__, 898 pmap == kernel_pmap, pmap, va); 899 900 /* Get the page table pointer. */ 901 ptbl = pmap->pm_pdir[pdir_idx]; 902 903 if (ptbl == NULL) { 904 /* Allocate page table pages. */ 905 ptbl = ptbl_alloc(mmu, pmap, pdir_idx); 906 } else { 907 /* 908 * Check if there is valid mapping for requested 909 * va, if there is, remove it. 910 */ 911 pte = &pmap->pm_pdir[pdir_idx][ptbl_idx]; 912 if (PTE_ISVALID(pte)) { 913 pte_remove(mmu, pmap, va, PTBL_HOLD); 914 } else { 915 /* 916 * pte is not used, increment hold count 917 * for ptbl pages. 918 */ 919 if (pmap != kernel_pmap) 920 ptbl_hold(mmu, pmap, pdir_idx); 921 } 922 } 923 924 /* 925 * Insert pv_entry into pv_list for mapped page if part of managed 926 * memory. 927 */ 928 if ((m->oflags & VPO_UNMANAGED) == 0) { 929 flags |= PTE_MANAGED; 930 931 /* Create and insert pv entry. */ 932 pv_insert(pmap, va, m); 933 } 934 935 pmap->pm_stats.resident_count++; 936 937 mtx_lock_spin(&tlbivax_mutex); 938 tlb_miss_lock(); 939 940 tlb0_flush_entry(va); 941 if (pmap->pm_pdir[pdir_idx] == NULL) { 942 /* 943 * If we just allocated a new page table, hook it in 944 * the pdir. 945 */ 946 pmap->pm_pdir[pdir_idx] = ptbl; 947 } 948 pte = &(pmap->pm_pdir[pdir_idx][ptbl_idx]); 949 pte->rpn = VM_PAGE_TO_PHYS(m) & ~PTE_PA_MASK; 950 pte->flags |= (PTE_VALID | flags); 951 952 tlb_miss_unlock(); 953 mtx_unlock_spin(&tlbivax_mutex); 954} 955 956/* Return the pa for the given pmap/va. */ 957static vm_paddr_t 958pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) 959{ 960 vm_paddr_t pa = 0; 961 pte_t *pte; 962 963 pte = pte_find(mmu, pmap, va); 964 if ((pte != NULL) && PTE_ISVALID(pte)) 965 pa = (PTE_PA(pte) | (va & PTE_PA_MASK)); 966 return (pa); 967} 968 969/* Get a pointer to a PTE in a page table. */ 970static pte_t * 971pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) 972{ 973 unsigned int pdir_idx = PDIR_IDX(va); 974 unsigned int ptbl_idx = PTBL_IDX(va); 975 976 KASSERT((pmap != NULL), ("pte_find: invalid pmap")); 977 978 if (pmap->pm_pdir[pdir_idx]) 979 return (&(pmap->pm_pdir[pdir_idx][ptbl_idx])); 980 981 return (NULL); 982} 983 984/**************************************************************************/ 985/* PMAP related */ 986/**************************************************************************/ 987 988/* 989 * This is called during booke_init, before the system is really initialized. 990 */ 991static void 992mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) 993{ 994 vm_offset_t phys_kernelend; 995 struct mem_region *mp, *mp1; 996 int cnt, i, j; 997 u_int s, e, sz; 998 u_int phys_avail_count; 999 vm_size_t physsz, hwphyssz, kstack0_sz; 1000 vm_offset_t kernel_pdir, kstack0, va; 1001 vm_paddr_t kstack0_phys; 1002 void *dpcpu; 1003 pte_t *pte; 1004 1005 debugf("mmu_booke_bootstrap: entered\n"); 1006 1007 /* Initialize invalidation mutex */ 1008 mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN); 1009 1010 /* Read TLB0 size and associativity. */ 1011 tlb0_get_tlbconf(); 1012 1013 /* 1014 * Align kernel start and end address (kernel image). 1015 * Note that kernel end does not necessarily relate to kernsize. 1016 * kernsize is the size of the kernel that is actually mapped. 1017 * Also note that "start - 1" is deliberate. With SMP, the 1018 * entry point is exactly a page from the actual load address. 1019 * As such, trunc_page() has no effect and we're off by a page. 1020 * Since we always have the ELF header between the load address 1021 * and the entry point, we can safely subtract 1 to compensate. 1022 */ 1023 kernstart = trunc_page(start - 1); 1024 data_start = round_page(kernelend); 1025 data_end = data_start; 1026 1027 /* 1028 * Addresses of preloaded modules (like file systems) use 1029 * physical addresses. Make sure we relocate those into 1030 * virtual addresses. 1031 */ 1032 preload_addr_relocate = kernstart - kernload; 1033 1034 /* Allocate the dynamic per-cpu area. */ 1035 dpcpu = (void *)data_end; 1036 data_end += DPCPU_SIZE; 1037 1038 /* Allocate space for the message buffer. */ 1039 msgbufp = (struct msgbuf *)data_end; 1040 data_end += msgbufsize; 1041 debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp, 1042 data_end); 1043 1044 data_end = round_page(data_end); 1045 1046 /* Allocate space for ptbl_bufs. */ 1047 ptbl_bufs = (struct ptbl_buf *)data_end; 1048 data_end += sizeof(struct ptbl_buf) * PTBL_BUFS; 1049 debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs, 1050 data_end); 1051 1052 data_end = round_page(data_end); 1053 1054 /* Allocate PTE tables for kernel KVA. */ 1055 kernel_pdir = data_end; 1056 kernel_ptbls = (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1057 PDIR_SIZE - 1) / PDIR_SIZE; 1058 data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; 1059 debugf(" kernel ptbls: %d\n", kernel_ptbls); 1060 debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end); 1061 1062 debugf(" data_end: 0x%08x\n", data_end); 1063 if (data_end - kernstart > kernsize) { 1064 kernsize += tlb1_mapin_region(kernstart + kernsize, 1065 kernload + kernsize, (data_end - kernstart) - kernsize); 1066 } 1067 data_end = kernstart + kernsize; 1068 debugf(" updated data_end: 0x%08x\n", data_end); 1069 1070 /* 1071 * Clear the structures - note we can only do it safely after the 1072 * possible additional TLB1 translations are in place (above) so that 1073 * all range up to the currently calculated 'data_end' is covered. 1074 */ 1075 dpcpu_init(dpcpu, 0); 1076 memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); 1077 memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); 1078 1079 /*******************************************************/ 1080 /* Set the start and end of kva. */ 1081 /*******************************************************/ 1082 virtual_avail = round_page(data_end); 1083 virtual_end = VM_MAX_KERNEL_ADDRESS; 1084 1085 /* Allocate KVA space for page zero/copy operations. */ 1086 zero_page_va = virtual_avail; 1087 virtual_avail += PAGE_SIZE; 1088 zero_page_idle_va = virtual_avail; 1089 virtual_avail += PAGE_SIZE; 1090 copy_page_src_va = virtual_avail; 1091 virtual_avail += PAGE_SIZE; 1092 copy_page_dst_va = virtual_avail; 1093 virtual_avail += PAGE_SIZE; 1094 debugf("zero_page_va = 0x%08x\n", zero_page_va); 1095 debugf("zero_page_idle_va = 0x%08x\n", zero_page_idle_va); 1096 debugf("copy_page_src_va = 0x%08x\n", copy_page_src_va); 1097 debugf("copy_page_dst_va = 0x%08x\n", copy_page_dst_va); 1098 1099 /* Initialize page zero/copy mutexes. */ 1100 mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF); 1101 mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF); 1102 1103 /* Allocate KVA space for ptbl bufs. */ 1104 ptbl_buf_pool_vabase = virtual_avail; 1105 virtual_avail += PTBL_BUFS * PTBL_PAGES * PAGE_SIZE; 1106 debugf("ptbl_buf_pool_vabase = 0x%08x end = 0x%08x\n", 1107 ptbl_buf_pool_vabase, virtual_avail); 1108 1109 /* Calculate corresponding physical addresses for the kernel region. */ 1110 phys_kernelend = kernload + kernsize; 1111 debugf("kernel image and allocated data:\n"); 1112 debugf(" kernload = 0x%08x\n", kernload); 1113 debugf(" kernstart = 0x%08x\n", kernstart); 1114 debugf(" kernsize = 0x%08x\n", kernsize); 1115 1116 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1117 panic("mmu_booke_bootstrap: phys_avail too small"); 1118 1119 /* 1120 * Remove kernel physical address range from avail regions list. Page 1121 * align all regions. Non-page aligned memory isn't very interesting 1122 * to us. Also, sort the entries for ascending addresses. 1123 */ 1124 1125 /* Retrieve phys/avail mem regions */ 1126 mem_regions(&physmem_regions, &physmem_regions_sz, 1127 &availmem_regions, &availmem_regions_sz); 1128 sz = 0; 1129 cnt = availmem_regions_sz; 1130 debugf("processing avail regions:\n"); 1131 for (mp = availmem_regions; mp->mr_size; mp++) { 1132 s = mp->mr_start; 1133 e = mp->mr_start + mp->mr_size; 1134 debugf(" %08x-%08x -> ", s, e); 1135 /* Check whether this region holds all of the kernel. */ 1136 if (s < kernload && e > phys_kernelend) { 1137 availmem_regions[cnt].mr_start = phys_kernelend; 1138 availmem_regions[cnt++].mr_size = e - phys_kernelend; 1139 e = kernload; 1140 } 1141 /* Look whether this regions starts within the kernel. */ 1142 if (s >= kernload && s < phys_kernelend) { 1143 if (e <= phys_kernelend) 1144 goto empty; 1145 s = phys_kernelend; 1146 } 1147 /* Now look whether this region ends within the kernel. */ 1148 if (e > kernload && e <= phys_kernelend) { 1149 if (s >= kernload) 1150 goto empty; 1151 e = kernload; 1152 } 1153 /* Now page align the start and size of the region. */ 1154 s = round_page(s); 1155 e = trunc_page(e); 1156 if (e < s) 1157 e = s; 1158 sz = e - s; 1159 debugf("%08x-%08x = %x\n", s, e, sz); 1160 1161 /* Check whether some memory is left here. */ 1162 if (sz == 0) { 1163 empty: 1164 memmove(mp, mp + 1, 1165 (cnt - (mp - availmem_regions)) * sizeof(*mp)); 1166 cnt--; 1167 mp--; 1168 continue; 1169 } 1170 1171 /* Do an insertion sort. */ 1172 for (mp1 = availmem_regions; mp1 < mp; mp1++) 1173 if (s < mp1->mr_start) 1174 break; 1175 if (mp1 < mp) { 1176 memmove(mp1 + 1, mp1, (char *)mp - (char *)mp1); 1177 mp1->mr_start = s; 1178 mp1->mr_size = sz; 1179 } else { 1180 mp->mr_start = s; 1181 mp->mr_size = sz; 1182 } 1183 } 1184 availmem_regions_sz = cnt; 1185 1186 /*******************************************************/ 1187 /* Steal physical memory for kernel stack from the end */ 1188 /* of the first avail region */ 1189 /*******************************************************/ 1190 kstack0_sz = KSTACK_PAGES * PAGE_SIZE; 1191 kstack0_phys = availmem_regions[0].mr_start + 1192 availmem_regions[0].mr_size; 1193 kstack0_phys -= kstack0_sz; 1194 availmem_regions[0].mr_size -= kstack0_sz; 1195 1196 /*******************************************************/ 1197 /* Fill in phys_avail table, based on availmem_regions */ 1198 /*******************************************************/ 1199 phys_avail_count = 0; 1200 physsz = 0; 1201 hwphyssz = 0; 1202 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1203 1204 debugf("fill in phys_avail:\n"); 1205 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1206 1207 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1208 availmem_regions[i].mr_start, 1209 availmem_regions[i].mr_start + 1210 availmem_regions[i].mr_size, 1211 availmem_regions[i].mr_size); 1212 1213 if (hwphyssz != 0 && 1214 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1215 debugf(" hw.physmem adjust\n"); 1216 if (physsz < hwphyssz) { 1217 phys_avail[j] = availmem_regions[i].mr_start; 1218 phys_avail[j + 1] = 1219 availmem_regions[i].mr_start + 1220 hwphyssz - physsz; 1221 physsz = hwphyssz; 1222 phys_avail_count++; 1223 } 1224 break; 1225 } 1226 1227 phys_avail[j] = availmem_regions[i].mr_start; 1228 phys_avail[j + 1] = availmem_regions[i].mr_start + 1229 availmem_regions[i].mr_size; 1230 phys_avail_count++; 1231 physsz += availmem_regions[i].mr_size; 1232 } 1233 physmem = btoc(physsz); 1234 1235 /* Calculate the last available physical address. */ 1236 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1237 ; 1238 Maxmem = powerpc_btop(phys_avail[i + 1]); 1239 1240 debugf("Maxmem = 0x%08lx\n", Maxmem); 1241 debugf("phys_avail_count = %d\n", phys_avail_count); 1242 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1243 physmem); 1244 1245 /*******************************************************/ 1246 /* Initialize (statically allocated) kernel pmap. */ 1247 /*******************************************************/ 1248 PMAP_LOCK_INIT(kernel_pmap); 1249 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1250 1251 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); 1252 debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls); 1253 debugf("kernel pdir range: 0x%08x - 0x%08x\n", 1254 kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1); 1255 1256 /* Initialize kernel pdir */ 1257 for (i = 0; i < kernel_ptbls; i++) 1258 kernel_pmap->pm_pdir[kptbl_min + i] = 1259 (pte_t *)(kernel_pdir + (i * PAGE_SIZE * PTBL_PAGES)); 1260 1261 for (i = 0; i < MAXCPU; i++) { 1262 kernel_pmap->pm_tid[i] = TID_KERNEL; 1263 1264 /* Initialize each CPU's tidbusy entry 0 with kernel_pmap */ 1265 tidbusy[i][0] = kernel_pmap; 1266 } 1267 1268 /* 1269 * Fill in PTEs covering kernel code and data. They are not required 1270 * for address translation, as this area is covered by static TLB1 1271 * entries, but for pte_vatopa() to work correctly with kernel area 1272 * addresses. 1273 */ 1274 for (va = kernstart; va < data_end; va += PAGE_SIZE) { 1275 pte = &(kernel_pmap->pm_pdir[PDIR_IDX(va)][PTBL_IDX(va)]); 1276 pte->rpn = kernload + (va - kernstart); 1277 pte->flags = PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | 1278 PTE_VALID; 1279 } 1280 /* Mark kernel_pmap active on all CPUs */ 1281 CPU_FILL(&kernel_pmap->pm_active); 1282 1283 /* 1284 * Initialize the global pv list lock. 1285 */ 1286 rw_init(&pvh_global_lock, "pmap pv global"); 1287 1288 /*******************************************************/ 1289 /* Final setup */ 1290 /*******************************************************/ 1291 1292 /* Enter kstack0 into kernel map, provide guard page */ 1293 kstack0 = virtual_avail + KSTACK_GUARD_PAGES * PAGE_SIZE; 1294 thread0.td_kstack = kstack0; 1295 thread0.td_kstack_pages = KSTACK_PAGES; 1296 1297 debugf("kstack_sz = 0x%08x\n", kstack0_sz); 1298 debugf("kstack0_phys at 0x%08x - 0x%08x\n", 1299 kstack0_phys, kstack0_phys + kstack0_sz); 1300 debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz); 1301 1302 virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz; 1303 for (i = 0; i < KSTACK_PAGES; i++) { 1304 mmu_booke_kenter(mmu, kstack0, kstack0_phys); 1305 kstack0 += PAGE_SIZE; 1306 kstack0_phys += PAGE_SIZE; 1307 } 1308 1309 debugf("virtual_avail = %08x\n", virtual_avail); 1310 debugf("virtual_end = %08x\n", virtual_end); 1311 1312 debugf("mmu_booke_bootstrap: exit\n"); 1313} 1314 1315void 1316pmap_bootstrap_ap(volatile uint32_t *trcp __unused) 1317{ 1318 int i; 1319 1320 /* 1321 * Finish TLB1 configuration: the BSP already set up its TLB1 and we 1322 * have the snapshot of its contents in the s/w tlb1[] table, so use 1323 * these values directly to (re)program AP's TLB1 hardware. 1324 */ 1325 for (i = bp_ntlb1s; i < tlb1_idx; i++) { 1326 /* Skip invalid entries */ 1327 if (!(tlb1[i].mas1 & MAS1_VALID)) 1328 continue; 1329 1330 tlb1_write_entry(i); 1331 } 1332 1333 set_mas4_defaults(); 1334} 1335 1336/* 1337 * Get the physical page address for the given pmap/virtual address. 1338 */ 1339static vm_paddr_t 1340mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va) 1341{ 1342 vm_paddr_t pa; 1343 1344 PMAP_LOCK(pmap); 1345 pa = pte_vatopa(mmu, pmap, va); 1346 PMAP_UNLOCK(pmap); 1347 1348 return (pa); 1349} 1350 1351/* 1352 * Extract the physical page address associated with the given 1353 * kernel virtual address. 1354 */ 1355static vm_paddr_t 1356mmu_booke_kextract(mmu_t mmu, vm_offset_t va) 1357{ 1358 int i; 1359 1360 /* Check TLB1 mappings */ 1361 for (i = 0; i < tlb1_idx; i++) { 1362 if (!(tlb1[i].mas1 & MAS1_VALID)) 1363 continue; 1364 if (va >= tlb1[i].virt && va < tlb1[i].virt + tlb1[i].size) 1365 return (tlb1[i].phys + (va - tlb1[i].virt)); 1366 } 1367 1368 return (pte_vatopa(mmu, kernel_pmap, va)); 1369} 1370 1371/* 1372 * Initialize the pmap module. 1373 * Called by vm_init, to initialize any structures that the pmap 1374 * system needs to map virtual memory. 1375 */ 1376static void 1377mmu_booke_init(mmu_t mmu) 1378{ 1379 int shpgperproc = PMAP_SHPGPERPROC; 1380 1381 /* 1382 * Initialize the address space (zone) for the pv entries. Set a 1383 * high water mark so that the system can recover from excessive 1384 * numbers of pv entries. 1385 */ 1386 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1387 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1388 1389 TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); 1390 pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; 1391 1392 TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); 1393 pv_entry_high_water = 9 * (pv_entry_max / 10); 1394 1395 uma_zone_reserve_kva(pvzone, pv_entry_max); 1396 1397 /* Pre-fill pvzone with initial number of pv entries. */ 1398 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1399 1400 /* Initialize ptbl allocation. */ 1401 ptbl_init(); 1402} 1403 1404/* 1405 * Map a list of wired pages into kernel virtual address space. This is 1406 * intended for temporary mappings which do not need page modification or 1407 * references recorded. Existing mappings in the region are overwritten. 1408 */ 1409static void 1410mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1411{ 1412 vm_offset_t va; 1413 1414 va = sva; 1415 while (count-- > 0) { 1416 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1417 va += PAGE_SIZE; 1418 m++; 1419 } 1420} 1421 1422/* 1423 * Remove page mappings from kernel virtual address space. Intended for 1424 * temporary mappings entered by mmu_booke_qenter. 1425 */ 1426static void 1427mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1428{ 1429 vm_offset_t va; 1430 1431 va = sva; 1432 while (count-- > 0) { 1433 mmu_booke_kremove(mmu, va); 1434 va += PAGE_SIZE; 1435 } 1436} 1437 1438/* 1439 * Map a wired page into kernel virtual address space. 1440 */ 1441static void 1442mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_paddr_t pa) 1443{ 1444 1445 mmu_booke_kenter_attr(mmu, va, pa, VM_MEMATTR_DEFAULT); 1446} 1447 1448static void 1449mmu_booke_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma) 1450{ 1451 unsigned int pdir_idx = PDIR_IDX(va); 1452 unsigned int ptbl_idx = PTBL_IDX(va); 1453 uint32_t flags; 1454 pte_t *pte; 1455 1456 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1457 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); 1458 1459 flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID; 1460 flags |= tlb_calc_wimg(pa, ma); 1461 1462 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1463 1464 mtx_lock_spin(&tlbivax_mutex); 1465 tlb_miss_lock(); 1466 1467 if (PTE_ISVALID(pte)) { 1468 1469 CTR1(KTR_PMAP, "%s: replacing entry!", __func__); 1470 1471 /* Flush entry from TLB0 */ 1472 tlb0_flush_entry(va); 1473 } 1474 1475 pte->rpn = pa & ~PTE_PA_MASK; 1476 pte->flags = flags; 1477 1478 //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x " 1479 // "pa=0x%08x rpn=0x%08x flags=0x%08x\n", 1480 // pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags); 1481 1482 /* Flush the real memory from the instruction cache. */ 1483 if ((flags & (PTE_I | PTE_G)) == 0) { 1484 __syncicache((void *)va, PAGE_SIZE); 1485 } 1486 1487 tlb_miss_unlock(); 1488 mtx_unlock_spin(&tlbivax_mutex); 1489} 1490 1491/* 1492 * Remove a page from kernel page table. 1493 */ 1494static void 1495mmu_booke_kremove(mmu_t mmu, vm_offset_t va) 1496{ 1497 unsigned int pdir_idx = PDIR_IDX(va); 1498 unsigned int ptbl_idx = PTBL_IDX(va); 1499 pte_t *pte; 1500 1501// CTR2(KTR_PMAP,("%s: s (va = 0x%08x)\n", __func__, va)); 1502 1503 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1504 (va <= VM_MAX_KERNEL_ADDRESS)), 1505 ("mmu_booke_kremove: invalid va")); 1506 1507 pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]); 1508 1509 if (!PTE_ISVALID(pte)) { 1510 1511 CTR1(KTR_PMAP, "%s: invalid pte", __func__); 1512 1513 return; 1514 } 1515 1516 mtx_lock_spin(&tlbivax_mutex); 1517 tlb_miss_lock(); 1518 1519 /* Invalidate entry in TLB0, update PTE. */ 1520 tlb0_flush_entry(va); 1521 pte->flags = 0; 1522 pte->rpn = 0; 1523 1524 tlb_miss_unlock(); 1525 mtx_unlock_spin(&tlbivax_mutex); 1526} 1527 1528/* 1529 * Initialize pmap associated with process 0. 1530 */ 1531static void 1532mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1533{ 1534 1535 PMAP_LOCK_INIT(pmap); 1536 mmu_booke_pinit(mmu, pmap); 1537 PCPU_SET(curpmap, pmap); 1538} 1539 1540/* 1541 * Initialize a preallocated and zeroed pmap structure, 1542 * such as one in a vmspace structure. 1543 */ 1544static void 1545mmu_booke_pinit(mmu_t mmu, pmap_t pmap) 1546{ 1547 int i; 1548 1549 CTR4(KTR_PMAP, "%s: pmap = %p, proc %d '%s'", __func__, pmap, 1550 curthread->td_proc->p_pid, curthread->td_proc->p_comm); 1551 1552 KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap")); 1553 1554 for (i = 0; i < MAXCPU; i++) 1555 pmap->pm_tid[i] = TID_NONE; 1556 CPU_ZERO(&kernel_pmap->pm_active); 1557 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); 1558 bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES); 1559 TAILQ_INIT(&pmap->pm_ptbl_list); 1560} 1561 1562/* 1563 * Release any resources held by the given physical map. 1564 * Called when a pmap initialized by mmu_booke_pinit is being released. 1565 * Should only be called if the map contains no valid mappings. 1566 */ 1567static void 1568mmu_booke_release(mmu_t mmu, pmap_t pmap) 1569{ 1570 1571 KASSERT(pmap->pm_stats.resident_count == 0, 1572 ("pmap_release: pmap resident count %ld != 0", 1573 pmap->pm_stats.resident_count)); 1574} 1575 1576/* 1577 * Insert the given physical page at the specified virtual address in the 1578 * target physical map with the protection requested. If specified the page 1579 * will be wired down. 1580 */ 1581static void 1582mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1583 vm_prot_t prot, boolean_t wired) 1584{ 1585 1586 rw_wlock(&pvh_global_lock); 1587 PMAP_LOCK(pmap); 1588 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1589 rw_wunlock(&pvh_global_lock); 1590 PMAP_UNLOCK(pmap); 1591} 1592 1593static void 1594mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1595 vm_prot_t prot, boolean_t wired) 1596{ 1597 pte_t *pte; 1598 vm_paddr_t pa; 1599 uint32_t flags; 1600 int su, sync; 1601 1602 pa = VM_PAGE_TO_PHYS(m); 1603 su = (pmap == kernel_pmap); 1604 sync = 0; 1605 1606 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1607 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1608 // (u_int32_t)pmap, su, pmap->pm_tid, 1609 // (u_int32_t)m, va, pa, prot, wired); 1610 1611 if (su) { 1612 KASSERT(((va >= virtual_avail) && 1613 (va <= VM_MAX_KERNEL_ADDRESS)), 1614 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); 1615 } else { 1616 KASSERT((va <= VM_MAXUSER_ADDRESS), 1617 ("mmu_booke_enter_locked: user pmap, non user va")); 1618 } 1619 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) 1620 VM_OBJECT_ASSERT_LOCKED(m->object); 1621 1622 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1623 1624 /* 1625 * If there is an existing mapping, and the physical address has not 1626 * changed, must be protection or wiring change. 1627 */ 1628 if (((pte = pte_find(mmu, pmap, va)) != NULL) && 1629 (PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) { 1630 1631 /* 1632 * Before actually updating pte->flags we calculate and 1633 * prepare its new value in a helper var. 1634 */ 1635 flags = pte->flags; 1636 flags &= ~(PTE_UW | PTE_UX | PTE_SW | PTE_SX | PTE_MODIFIED); 1637 1638 /* Wiring change, just update stats. */ 1639 if (wired) { 1640 if (!PTE_ISWIRED(pte)) { 1641 flags |= PTE_WIRED; 1642 pmap->pm_stats.wired_count++; 1643 } 1644 } else { 1645 if (PTE_ISWIRED(pte)) { 1646 flags &= ~PTE_WIRED; 1647 pmap->pm_stats.wired_count--; 1648 } 1649 } 1650 1651 if (prot & VM_PROT_WRITE) { 1652 /* Add write permissions. */ 1653 flags |= PTE_SW; 1654 if (!su) 1655 flags |= PTE_UW; 1656 1657 if ((flags & PTE_MANAGED) != 0) 1658 vm_page_aflag_set(m, PGA_WRITEABLE); 1659 } else { 1660 /* Handle modified pages, sense modify status. */ 1661 1662 /* 1663 * The PTE_MODIFIED flag could be set by underlying 1664 * TLB misses since we last read it (above), possibly 1665 * other CPUs could update it so we check in the PTE 1666 * directly rather than rely on that saved local flags 1667 * copy. 1668 */ 1669 if (PTE_ISMODIFIED(pte)) 1670 vm_page_dirty(m); 1671 } 1672 1673 if (prot & VM_PROT_EXECUTE) { 1674 flags |= PTE_SX; 1675 if (!su) 1676 flags |= PTE_UX; 1677 1678 /* 1679 * Check existing flags for execute permissions: if we 1680 * are turning execute permissions on, icache should 1681 * be flushed. 1682 */ 1683 if ((pte->flags & (PTE_UX | PTE_SX)) == 0) 1684 sync++; 1685 } 1686 1687 flags &= ~PTE_REFERENCED; 1688 1689 /* 1690 * The new flags value is all calculated -- only now actually 1691 * update the PTE. 1692 */ 1693 mtx_lock_spin(&tlbivax_mutex); 1694 tlb_miss_lock(); 1695 1696 tlb0_flush_entry(va); 1697 pte->flags = flags; 1698 1699 tlb_miss_unlock(); 1700 mtx_unlock_spin(&tlbivax_mutex); 1701 1702 } else { 1703 /* 1704 * If there is an existing mapping, but it's for a different 1705 * physical address, pte_enter() will delete the old mapping. 1706 */ 1707 //if ((pte != NULL) && PTE_ISVALID(pte)) 1708 // debugf("mmu_booke_enter_locked: replace\n"); 1709 //else 1710 // debugf("mmu_booke_enter_locked: new\n"); 1711 1712 /* Now set up the flags and install the new mapping. */ 1713 flags = (PTE_SR | PTE_VALID); 1714 flags |= PTE_M; 1715 1716 if (!su) 1717 flags |= PTE_UR; 1718 1719 if (prot & VM_PROT_WRITE) { 1720 flags |= PTE_SW; 1721 if (!su) 1722 flags |= PTE_UW; 1723 1724 if ((m->oflags & VPO_UNMANAGED) == 0) 1725 vm_page_aflag_set(m, PGA_WRITEABLE); 1726 } 1727 1728 if (prot & VM_PROT_EXECUTE) { 1729 flags |= PTE_SX; 1730 if (!su) 1731 flags |= PTE_UX; 1732 } 1733 1734 /* If its wired update stats. */ 1735 if (wired) { 1736 pmap->pm_stats.wired_count++; 1737 flags |= PTE_WIRED; 1738 } 1739 1740 pte_enter(mmu, pmap, m, va, flags); 1741 1742 /* Flush the real memory from the instruction cache. */ 1743 if (prot & VM_PROT_EXECUTE) 1744 sync++; 1745 } 1746 1747 if (sync && (su || pmap == PCPU_GET(curpmap))) { 1748 __syncicache((void *)va, PAGE_SIZE); 1749 sync = 0; 1750 } 1751} 1752 1753/* 1754 * Maps a sequence of resident pages belonging to the same object. 1755 * The sequence begins with the given page m_start. This page is 1756 * mapped at the given virtual address start. Each subsequent page is 1757 * mapped at a virtual address that is offset from start by the same 1758 * amount as the page is offset from m_start within the object. The 1759 * last page in the sequence is the page with the largest offset from 1760 * m_start that can be mapped at a virtual address less than the given 1761 * virtual address end. Not every virtual page between start and end 1762 * is mapped; only those for which a resident page exists with the 1763 * corresponding offset from m_start are mapped. 1764 */ 1765static void 1766mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start, 1767 vm_offset_t end, vm_page_t m_start, vm_prot_t prot) 1768{ 1769 vm_page_t m; 1770 vm_pindex_t diff, psize; 1771 1772 VM_OBJECT_ASSERT_LOCKED(m_start->object); 1773 1774 psize = atop(end - start); 1775 m = m_start; 1776 rw_wlock(&pvh_global_lock); 1777 PMAP_LOCK(pmap); 1778 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { 1779 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1780 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1781 m = TAILQ_NEXT(m, listq); 1782 } 1783 rw_wunlock(&pvh_global_lock); 1784 PMAP_UNLOCK(pmap); 1785} 1786 1787static void 1788mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1789 vm_prot_t prot) 1790{ 1791 1792 rw_wlock(&pvh_global_lock); 1793 PMAP_LOCK(pmap); 1794 mmu_booke_enter_locked(mmu, pmap, va, m, 1795 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1796 rw_wunlock(&pvh_global_lock); 1797 PMAP_UNLOCK(pmap); 1798} 1799 1800/* 1801 * Remove the given range of addresses from the specified map. 1802 * 1803 * It is assumed that the start and end are properly rounded to the page size. 1804 */ 1805static void 1806mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1807{ 1808 pte_t *pte; 1809 uint8_t hold_flag; 1810 1811 int su = (pmap == kernel_pmap); 1812 1813 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1814 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1815 1816 if (su) { 1817 KASSERT(((va >= virtual_avail) && 1818 (va <= VM_MAX_KERNEL_ADDRESS)), 1819 ("mmu_booke_remove: kernel pmap, non kernel va")); 1820 } else { 1821 KASSERT((va <= VM_MAXUSER_ADDRESS), 1822 ("mmu_booke_remove: user pmap, non user va")); 1823 } 1824 1825 if (PMAP_REMOVE_DONE(pmap)) { 1826 //debugf("mmu_booke_remove: e (empty)\n"); 1827 return; 1828 } 1829 1830 hold_flag = PTBL_HOLD_FLAG(pmap); 1831 //debugf("mmu_booke_remove: hold_flag = %d\n", hold_flag); 1832 1833 rw_wlock(&pvh_global_lock); 1834 PMAP_LOCK(pmap); 1835 for (; va < endva; va += PAGE_SIZE) { 1836 pte = pte_find(mmu, pmap, va); 1837 if ((pte != NULL) && PTE_ISVALID(pte)) 1838 pte_remove(mmu, pmap, va, hold_flag); 1839 } 1840 PMAP_UNLOCK(pmap); 1841 rw_wunlock(&pvh_global_lock); 1842 1843 //debugf("mmu_booke_remove: e\n"); 1844} 1845 1846/* 1847 * Remove physical page from all pmaps in which it resides. 1848 */ 1849static void 1850mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1851{ 1852 pv_entry_t pv, pvn; 1853 uint8_t hold_flag; 1854 1855 rw_wlock(&pvh_global_lock); 1856 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1857 pvn = TAILQ_NEXT(pv, pv_link); 1858 1859 PMAP_LOCK(pv->pv_pmap); 1860 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1861 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1862 PMAP_UNLOCK(pv->pv_pmap); 1863 } 1864 vm_page_aflag_clear(m, PGA_WRITEABLE); 1865 rw_wunlock(&pvh_global_lock); 1866} 1867 1868/* 1869 * Map a range of physical addresses into kernel virtual address space. 1870 */ 1871static vm_offset_t 1872mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_paddr_t pa_start, 1873 vm_paddr_t pa_end, int prot) 1874{ 1875 vm_offset_t sva = *virt; 1876 vm_offset_t va = sva; 1877 1878 //debugf("mmu_booke_map: s (sva = 0x%08x pa_start = 0x%08x pa_end = 0x%08x)\n", 1879 // sva, pa_start, pa_end); 1880 1881 while (pa_start < pa_end) { 1882 mmu_booke_kenter(mmu, va, pa_start); 1883 va += PAGE_SIZE; 1884 pa_start += PAGE_SIZE; 1885 } 1886 *virt = va; 1887 1888 //debugf("mmu_booke_map: e (va = 0x%08x)\n", va); 1889 return (sva); 1890} 1891 1892/* 1893 * The pmap must be activated before it's address space can be accessed in any 1894 * way. 1895 */ 1896static void 1897mmu_booke_activate(mmu_t mmu, struct thread *td) 1898{ 1899 pmap_t pmap; 1900 u_int cpuid; 1901 1902 pmap = &td->td_proc->p_vmspace->vm_pmap; 1903 1904 CTR5(KTR_PMAP, "%s: s (td = %p, proc = '%s', id = %d, pmap = 0x%08x)", 1905 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1906 1907 KASSERT((pmap != kernel_pmap), ("mmu_booke_activate: kernel_pmap!")); 1908 1909 mtx_lock_spin(&sched_lock); 1910 1911 cpuid = PCPU_GET(cpuid); 1912 CPU_SET_ATOMIC(cpuid, &pmap->pm_active); 1913 PCPU_SET(curpmap, pmap); 1914 1915 if (pmap->pm_tid[cpuid] == TID_NONE) 1916 tid_alloc(pmap); 1917 1918 /* Load PID0 register with pmap tid value. */ 1919 mtspr(SPR_PID0, pmap->pm_tid[cpuid]); 1920 __asm __volatile("isync"); 1921 1922 mtx_unlock_spin(&sched_lock); 1923 1924 CTR3(KTR_PMAP, "%s: e (tid = %d for '%s')", __func__, 1925 pmap->pm_tid[PCPU_GET(cpuid)], td->td_proc->p_comm); 1926} 1927 1928/* 1929 * Deactivate the specified process's address space. 1930 */ 1931static void 1932mmu_booke_deactivate(mmu_t mmu, struct thread *td) 1933{ 1934 pmap_t pmap; 1935 1936 pmap = &td->td_proc->p_vmspace->vm_pmap; 1937 1938 CTR5(KTR_PMAP, "%s: td=%p, proc = '%s', id = %d, pmap = 0x%08x", 1939 __func__, td, td->td_proc->p_comm, td->td_proc->p_pid, pmap); 1940 1941 CPU_CLR_ATOMIC(PCPU_GET(cpuid), &pmap->pm_active); 1942 PCPU_SET(curpmap, NULL); 1943} 1944 1945/* 1946 * Copy the range specified by src_addr/len 1947 * from the source map to the range dst_addr/len 1948 * in the destination map. 1949 * 1950 * This routine is only advisory and need not do anything. 1951 */ 1952static void 1953mmu_booke_copy(mmu_t mmu, pmap_t dst_pmap, pmap_t src_pmap, 1954 vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) 1955{ 1956 1957} 1958 1959/* 1960 * Set the physical protection on the specified range of this map as requested. 1961 */ 1962static void 1963mmu_booke_protect(mmu_t mmu, pmap_t pmap, vm_offset_t sva, vm_offset_t eva, 1964 vm_prot_t prot) 1965{ 1966 vm_offset_t va; 1967 vm_page_t m; 1968 pte_t *pte; 1969 1970 if ((prot & VM_PROT_READ) == VM_PROT_NONE) { 1971 mmu_booke_remove(mmu, pmap, sva, eva); 1972 return; 1973 } 1974 1975 if (prot & VM_PROT_WRITE) 1976 return; 1977 1978 PMAP_LOCK(pmap); 1979 for (va = sva; va < eva; va += PAGE_SIZE) { 1980 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 1981 if (PTE_ISVALID(pte)) { 1982 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 1983 1984 mtx_lock_spin(&tlbivax_mutex); 1985 tlb_miss_lock(); 1986 1987 /* Handle modified pages. */ 1988 if (PTE_ISMODIFIED(pte) && PTE_ISMANAGED(pte)) 1989 vm_page_dirty(m); 1990 1991 tlb0_flush_entry(va); 1992 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 1993 1994 tlb_miss_unlock(); 1995 mtx_unlock_spin(&tlbivax_mutex); 1996 } 1997 } 1998 } 1999 PMAP_UNLOCK(pmap); 2000} 2001 2002/* 2003 * Clear the write and modified bits in each of the given page's mappings. 2004 */ 2005static void 2006mmu_booke_remove_write(mmu_t mmu, vm_page_t m) 2007{ 2008 pv_entry_t pv; 2009 pte_t *pte; 2010 2011 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2012 ("mmu_booke_remove_write: page %p is not managed", m)); 2013 2014 /* 2015 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2016 * set by another thread while the object is locked. Thus, 2017 * if PGA_WRITEABLE is clear, no page table entries need updating. 2018 */ 2019 VM_OBJECT_ASSERT_WLOCKED(m->object); 2020 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2021 return; 2022 rw_wlock(&pvh_global_lock); 2023 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2024 PMAP_LOCK(pv->pv_pmap); 2025 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) { 2026 if (PTE_ISVALID(pte)) { 2027 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2028 2029 mtx_lock_spin(&tlbivax_mutex); 2030 tlb_miss_lock(); 2031 2032 /* Handle modified pages. */ 2033 if (PTE_ISMODIFIED(pte)) 2034 vm_page_dirty(m); 2035 2036 /* Flush mapping from TLB0. */ 2037 pte->flags &= ~(PTE_UW | PTE_SW | PTE_MODIFIED); 2038 2039 tlb_miss_unlock(); 2040 mtx_unlock_spin(&tlbivax_mutex); 2041 } 2042 } 2043 PMAP_UNLOCK(pv->pv_pmap); 2044 } 2045 vm_page_aflag_clear(m, PGA_WRITEABLE); 2046 rw_wunlock(&pvh_global_lock); 2047} 2048 2049static void 2050mmu_booke_sync_icache(mmu_t mmu, pmap_t pm, vm_offset_t va, vm_size_t sz) 2051{ 2052 pte_t *pte; 2053 pmap_t pmap; 2054 vm_page_t m; 2055 vm_offset_t addr; 2056 vm_paddr_t pa; 2057 int active, valid; 2058 2059 va = trunc_page(va); 2060 sz = round_page(sz); 2061 2062 rw_wlock(&pvh_global_lock); 2063 pmap = PCPU_GET(curpmap); 2064 active = (pm == kernel_pmap || pm == pmap) ? 1 : 0; 2065 while (sz > 0) { 2066 PMAP_LOCK(pm); 2067 pte = pte_find(mmu, pm, va); 2068 valid = (pte != NULL && PTE_ISVALID(pte)) ? 1 : 0; 2069 if (valid) 2070 pa = PTE_PA(pte); 2071 PMAP_UNLOCK(pm); 2072 if (valid) { 2073 if (!active) { 2074 /* Create a mapping in the active pmap. */ 2075 addr = 0; 2076 m = PHYS_TO_VM_PAGE(pa); 2077 PMAP_LOCK(pmap); 2078 pte_enter(mmu, pmap, m, addr, 2079 PTE_SR | PTE_VALID | PTE_UR); 2080 __syncicache((void *)addr, PAGE_SIZE); 2081 pte_remove(mmu, pmap, addr, PTBL_UNHOLD); 2082 PMAP_UNLOCK(pmap); 2083 } else 2084 __syncicache((void *)va, PAGE_SIZE); 2085 } 2086 va += PAGE_SIZE; 2087 sz -= PAGE_SIZE; 2088 } 2089 rw_wunlock(&pvh_global_lock); 2090} 2091 2092/* 2093 * Atomically extract and hold the physical page with the given 2094 * pmap and virtual address pair if that mapping permits the given 2095 * protection. 2096 */ 2097static vm_page_t 2098mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 2099 vm_prot_t prot) 2100{ 2101 pte_t *pte; 2102 vm_page_t m; 2103 uint32_t pte_wbit; 2104 vm_paddr_t pa; 2105 2106 m = NULL; 2107 pa = 0; 2108 PMAP_LOCK(pmap); 2109retry: 2110 pte = pte_find(mmu, pmap, va); 2111 if ((pte != NULL) && PTE_ISVALID(pte)) { 2112 if (pmap == kernel_pmap) 2113 pte_wbit = PTE_SW; 2114 else 2115 pte_wbit = PTE_UW; 2116 2117 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 2118 if (vm_page_pa_tryrelock(pmap, PTE_PA(pte), &pa)) 2119 goto retry; 2120 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); 2121 vm_page_hold(m); 2122 } 2123 } 2124 2125 PA_UNLOCK_COND(pa); 2126 PMAP_UNLOCK(pmap); 2127 return (m); 2128} 2129 2130/* 2131 * Initialize a vm_page's machine-dependent fields. 2132 */ 2133static void 2134mmu_booke_page_init(mmu_t mmu, vm_page_t m) 2135{ 2136 2137 TAILQ_INIT(&m->md.pv_list); 2138} 2139 2140/* 2141 * mmu_booke_zero_page_area zeros the specified hardware page by 2142 * mapping it into virtual memory and using bzero to clear 2143 * its contents. 2144 * 2145 * off and size must reside within a single page. 2146 */ 2147static void 2148mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 2149{ 2150 vm_offset_t va; 2151 2152 /* XXX KASSERT off and size are within a single page? */ 2153 2154 mtx_lock(&zero_page_mutex); 2155 va = zero_page_va; 2156 2157 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2158 bzero((caddr_t)va + off, size); 2159 mmu_booke_kremove(mmu, va); 2160 2161 mtx_unlock(&zero_page_mutex); 2162} 2163 2164/* 2165 * mmu_booke_zero_page zeros the specified hardware page. 2166 */ 2167static void 2168mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2169{ 2170 2171 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); 2172} 2173 2174/* 2175 * mmu_booke_copy_page copies the specified (machine independent) page by 2176 * mapping the page into virtual memory and using memcopy to copy the page, 2177 * one machine dependent page at a time. 2178 */ 2179static void 2180mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t dm) 2181{ 2182 vm_offset_t sva, dva; 2183 2184 sva = copy_page_src_va; 2185 dva = copy_page_dst_va; 2186 2187 mtx_lock(©_page_mutex); 2188 mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm)); 2189 mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm)); 2190 memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE); 2191 mmu_booke_kremove(mmu, dva); 2192 mmu_booke_kremove(mmu, sva); 2193 mtx_unlock(©_page_mutex); 2194} 2195 2196static inline void 2197mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset, 2198 vm_page_t *mb, vm_offset_t b_offset, int xfersize) 2199{ 2200 void *a_cp, *b_cp; 2201 vm_offset_t a_pg_offset, b_pg_offset; 2202 int cnt; 2203 2204 mtx_lock(©_page_mutex); 2205 while (xfersize > 0) { 2206 a_pg_offset = a_offset & PAGE_MASK; 2207 cnt = min(xfersize, PAGE_SIZE - a_pg_offset); 2208 mmu_booke_kenter(mmu, copy_page_src_va, 2209 VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT])); 2210 a_cp = (char *)copy_page_src_va + a_pg_offset; 2211 b_pg_offset = b_offset & PAGE_MASK; 2212 cnt = min(cnt, PAGE_SIZE - b_pg_offset); 2213 mmu_booke_kenter(mmu, copy_page_dst_va, 2214 VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT])); 2215 b_cp = (char *)copy_page_dst_va + b_pg_offset; 2216 bcopy(a_cp, b_cp, cnt); 2217 mmu_booke_kremove(mmu, copy_page_dst_va); 2218 mmu_booke_kremove(mmu, copy_page_src_va); 2219 a_offset += cnt; 2220 b_offset += cnt; 2221 xfersize -= cnt; 2222 } 2223 mtx_unlock(©_page_mutex); 2224} 2225 2226/* 2227 * mmu_booke_zero_page_idle zeros the specified hardware page by mapping it 2228 * into virtual memory and using bzero to clear its contents. This is intended 2229 * to be called from the vm_pagezero process only and outside of Giant. No 2230 * lock is required. 2231 */ 2232static void 2233mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2234{ 2235 vm_offset_t va; 2236 2237 va = zero_page_idle_va; 2238 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2239 bzero((caddr_t)va, PAGE_SIZE); 2240 mmu_booke_kremove(mmu, va); 2241} 2242 2243/* 2244 * Return whether or not the specified physical page was modified 2245 * in any of physical maps. 2246 */ 2247static boolean_t 2248mmu_booke_is_modified(mmu_t mmu, vm_page_t m) 2249{ 2250 pte_t *pte; 2251 pv_entry_t pv; 2252 boolean_t rv; 2253 2254 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2255 ("mmu_booke_is_modified: page %p is not managed", m)); 2256 rv = FALSE; 2257 2258 /* 2259 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be 2260 * concurrently set while the object is locked. Thus, if PGA_WRITEABLE 2261 * is clear, no PTEs can be modified. 2262 */ 2263 VM_OBJECT_ASSERT_WLOCKED(m->object); 2264 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0) 2265 return (rv); 2266 rw_wlock(&pvh_global_lock); 2267 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2268 PMAP_LOCK(pv->pv_pmap); 2269 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2270 PTE_ISVALID(pte)) { 2271 if (PTE_ISMODIFIED(pte)) 2272 rv = TRUE; 2273 } 2274 PMAP_UNLOCK(pv->pv_pmap); 2275 if (rv) 2276 break; 2277 } 2278 rw_wunlock(&pvh_global_lock); 2279 return (rv); 2280} 2281 2282/* 2283 * Return whether or not the specified virtual address is eligible 2284 * for prefault. 2285 */ 2286static boolean_t 2287mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2288{ 2289 2290 return (FALSE); 2291} 2292 2293/* 2294 * Return whether or not the specified physical page was referenced 2295 * in any physical maps. 2296 */ 2297static boolean_t 2298mmu_booke_is_referenced(mmu_t mmu, vm_page_t m) 2299{ 2300 pte_t *pte; 2301 pv_entry_t pv; 2302 boolean_t rv; 2303 2304 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2305 ("mmu_booke_is_referenced: page %p is not managed", m)); 2306 rv = FALSE; 2307 rw_wlock(&pvh_global_lock); 2308 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2309 PMAP_LOCK(pv->pv_pmap); 2310 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2311 PTE_ISVALID(pte)) { 2312 if (PTE_ISREFERENCED(pte)) 2313 rv = TRUE; 2314 } 2315 PMAP_UNLOCK(pv->pv_pmap); 2316 if (rv) 2317 break; 2318 } 2319 rw_wunlock(&pvh_global_lock); 2320 return (rv); 2321} 2322 2323/* 2324 * Clear the modify bits on the specified physical page. 2325 */ 2326static void 2327mmu_booke_clear_modify(mmu_t mmu, vm_page_t m) 2328{ 2329 pte_t *pte; 2330 pv_entry_t pv; 2331 2332 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2333 ("mmu_booke_clear_modify: page %p is not managed", m)); 2334 VM_OBJECT_ASSERT_WLOCKED(m->object); 2335 KASSERT(!vm_page_xbusied(m), 2336 ("mmu_booke_clear_modify: page %p is exclusive busied", m)); 2337 2338 /* 2339 * If the page is not PG_AWRITEABLE, then no PTEs can be modified. 2340 * If the object containing the page is locked and the page is not 2341 * exclusive busied, then PG_AWRITEABLE cannot be concurrently set. 2342 */ 2343 if ((m->aflags & PGA_WRITEABLE) == 0) 2344 return; 2345 rw_wlock(&pvh_global_lock); 2346 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2347 PMAP_LOCK(pv->pv_pmap); 2348 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2349 PTE_ISVALID(pte)) { 2350 mtx_lock_spin(&tlbivax_mutex); 2351 tlb_miss_lock(); 2352 2353 if (pte->flags & (PTE_SW | PTE_UW | PTE_MODIFIED)) { 2354 tlb0_flush_entry(pv->pv_va); 2355 pte->flags &= ~(PTE_SW | PTE_UW | PTE_MODIFIED | 2356 PTE_REFERENCED); 2357 } 2358 2359 tlb_miss_unlock(); 2360 mtx_unlock_spin(&tlbivax_mutex); 2361 } 2362 PMAP_UNLOCK(pv->pv_pmap); 2363 } 2364 rw_wunlock(&pvh_global_lock); 2365} 2366 2367/* 2368 * Return a count of reference bits for a page, clearing those bits. 2369 * It is not necessary for every reference bit to be cleared, but it 2370 * is necessary that 0 only be returned when there are truly no 2371 * reference bits set. 2372 * 2373 * XXX: The exact number of bits to check and clear is a matter that 2374 * should be tested and standardized at some point in the future for 2375 * optimal aging of shared pages. 2376 */ 2377static int 2378mmu_booke_ts_referenced(mmu_t mmu, vm_page_t m) 2379{ 2380 pte_t *pte; 2381 pv_entry_t pv; 2382 int count; 2383 2384 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2385 ("mmu_booke_ts_referenced: page %p is not managed", m)); 2386 count = 0; 2387 rw_wlock(&pvh_global_lock); 2388 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2389 PMAP_LOCK(pv->pv_pmap); 2390 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL && 2391 PTE_ISVALID(pte)) { 2392 if (PTE_ISREFERENCED(pte)) { 2393 mtx_lock_spin(&tlbivax_mutex); 2394 tlb_miss_lock(); 2395 2396 tlb0_flush_entry(pv->pv_va); 2397 pte->flags &= ~PTE_REFERENCED; 2398 2399 tlb_miss_unlock(); 2400 mtx_unlock_spin(&tlbivax_mutex); 2401 2402 if (++count > 4) { 2403 PMAP_UNLOCK(pv->pv_pmap); 2404 break; 2405 } 2406 } 2407 } 2408 PMAP_UNLOCK(pv->pv_pmap); 2409 } 2410 rw_wunlock(&pvh_global_lock); 2411 return (count); 2412} 2413 2414/* 2415 * Change wiring attribute for a map/virtual-address pair. 2416 */ 2417static void 2418mmu_booke_change_wiring(mmu_t mmu, pmap_t pmap, vm_offset_t va, boolean_t wired) 2419{ 2420 pte_t *pte; 2421 2422 PMAP_LOCK(pmap); 2423 if ((pte = pte_find(mmu, pmap, va)) != NULL) { 2424 if (wired) { 2425 if (!PTE_ISWIRED(pte)) { 2426 pte->flags |= PTE_WIRED; 2427 pmap->pm_stats.wired_count++; 2428 } 2429 } else { 2430 if (PTE_ISWIRED(pte)) { 2431 pte->flags &= ~PTE_WIRED; 2432 pmap->pm_stats.wired_count--; 2433 } 2434 } 2435 } 2436 PMAP_UNLOCK(pmap); 2437} 2438 2439/* 2440 * Return true if the pmap's pv is one of the first 16 pvs linked to from this 2441 * page. This count may be changed upwards or downwards in the future; it is 2442 * only necessary that true be returned for a small subset of pmaps for proper 2443 * page aging. 2444 */ 2445static boolean_t 2446mmu_booke_page_exists_quick(mmu_t mmu, pmap_t pmap, vm_page_t m) 2447{ 2448 pv_entry_t pv; 2449 int loops; 2450 boolean_t rv; 2451 2452 KASSERT((m->oflags & VPO_UNMANAGED) == 0, 2453 ("mmu_booke_page_exists_quick: page %p is not managed", m)); 2454 loops = 0; 2455 rv = FALSE; 2456 rw_wlock(&pvh_global_lock); 2457 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2458 if (pv->pv_pmap == pmap) { 2459 rv = TRUE; 2460 break; 2461 } 2462 if (++loops >= 16) 2463 break; 2464 } 2465 rw_wunlock(&pvh_global_lock); 2466 return (rv); 2467} 2468 2469/* 2470 * Return the number of managed mappings to the given physical page that are 2471 * wired. 2472 */ 2473static int 2474mmu_booke_page_wired_mappings(mmu_t mmu, vm_page_t m) 2475{ 2476 pv_entry_t pv; 2477 pte_t *pte; 2478 int count = 0; 2479 2480 if ((m->oflags & VPO_UNMANAGED) != 0) 2481 return (count); 2482 rw_wlock(&pvh_global_lock); 2483 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { 2484 PMAP_LOCK(pv->pv_pmap); 2485 if ((pte = pte_find(mmu, pv->pv_pmap, pv->pv_va)) != NULL) 2486 if (PTE_ISVALID(pte) && PTE_ISWIRED(pte)) 2487 count++; 2488 PMAP_UNLOCK(pv->pv_pmap); 2489 } 2490 rw_wunlock(&pvh_global_lock); 2491 return (count); 2492} 2493 2494static int 2495mmu_booke_dev_direct_mapped(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2496{ 2497 int i; 2498 vm_offset_t va; 2499 2500 /* 2501 * This currently does not work for entries that 2502 * overlap TLB1 entries. 2503 */ 2504 for (i = 0; i < tlb1_idx; i ++) { 2505 if (tlb1_iomapped(i, pa, size, &va) == 0) 2506 return (0); 2507 } 2508 2509 return (EFAULT); 2510} 2511 2512vm_offset_t 2513mmu_booke_dumpsys_map(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2514 vm_size_t *sz) 2515{ 2516 vm_paddr_t pa, ppa; 2517 vm_offset_t va; 2518 vm_size_t gran; 2519 2520 /* Raw physical memory dumps don't have a virtual address. */ 2521 if (md->md_vaddr == ~0UL) { 2522 /* We always map a 256MB page at 256M. */ 2523 gran = 256 * 1024 * 1024; 2524 pa = md->md_paddr + ofs; 2525 ppa = pa & ~(gran - 1); 2526 ofs = pa - ppa; 2527 va = gran; 2528 tlb1_set_entry(va, ppa, gran, _TLB_ENTRY_IO); 2529 if (*sz > (gran - ofs)) 2530 *sz = gran - ofs; 2531 return (va + ofs); 2532 } 2533 2534 /* Minidumps are based on virtual memory addresses. */ 2535 va = md->md_vaddr + ofs; 2536 if (va >= kernstart + kernsize) { 2537 gran = PAGE_SIZE - (va & PAGE_MASK); 2538 if (*sz > gran) 2539 *sz = gran; 2540 } 2541 return (va); 2542} 2543 2544void 2545mmu_booke_dumpsys_unmap(mmu_t mmu, struct pmap_md *md, vm_size_t ofs, 2546 vm_offset_t va) 2547{ 2548 2549 /* Raw physical memory dumps don't have a virtual address. */ 2550 if (md->md_vaddr == ~0UL) { 2551 tlb1_idx--; 2552 tlb1[tlb1_idx].mas1 = 0; 2553 tlb1[tlb1_idx].mas2 = 0; 2554 tlb1[tlb1_idx].mas3 = 0; 2555 tlb1_write_entry(tlb1_idx); 2556 return; 2557 } 2558 2559 /* Minidumps are based on virtual memory addresses. */ 2560 /* Nothing to do... */ 2561} 2562 2563struct pmap_md * 2564mmu_booke_scan_md(mmu_t mmu, struct pmap_md *prev) 2565{ 2566 static struct pmap_md md; 2567 pte_t *pte; 2568 vm_offset_t va; 2569 2570 if (dumpsys_minidump) { 2571 md.md_paddr = ~0UL; /* Minidumps use virtual addresses. */ 2572 if (prev == NULL) { 2573 /* 1st: kernel .data and .bss. */ 2574 md.md_index = 1; 2575 md.md_vaddr = trunc_page((uintptr_t)_etext); 2576 md.md_size = round_page((uintptr_t)_end) - md.md_vaddr; 2577 return (&md); 2578 } 2579 switch (prev->md_index) { 2580 case 1: 2581 /* 2nd: msgbuf and tables (see pmap_bootstrap()). */ 2582 md.md_index = 2; 2583 md.md_vaddr = data_start; 2584 md.md_size = data_end - data_start; 2585 break; 2586 case 2: 2587 /* 3rd: kernel VM. */ 2588 va = prev->md_vaddr + prev->md_size; 2589 /* Find start of next chunk (from va). */ 2590 while (va < virtual_end) { 2591 /* Don't dump the buffer cache. */ 2592 if (va >= kmi.buffer_sva && 2593 va < kmi.buffer_eva) { 2594 va = kmi.buffer_eva; 2595 continue; 2596 } 2597 pte = pte_find(mmu, kernel_pmap, va); 2598 if (pte != NULL && PTE_ISVALID(pte)) 2599 break; 2600 va += PAGE_SIZE; 2601 } 2602 if (va < virtual_end) { 2603 md.md_vaddr = va; 2604 va += PAGE_SIZE; 2605 /* Find last page in chunk. */ 2606 while (va < virtual_end) { 2607 /* Don't run into the buffer cache. */ 2608 if (va == kmi.buffer_sva) 2609 break; 2610 pte = pte_find(mmu, kernel_pmap, va); 2611 if (pte == NULL || !PTE_ISVALID(pte)) 2612 break; 2613 va += PAGE_SIZE; 2614 } 2615 md.md_size = va - md.md_vaddr; 2616 break; 2617 } 2618 md.md_index = 3; 2619 /* FALLTHROUGH */ 2620 default: 2621 return (NULL); 2622 } 2623 } else { /* minidumps */ 2624 mem_regions(&physmem_regions, &physmem_regions_sz, 2625 &availmem_regions, &availmem_regions_sz); 2626 2627 if (prev == NULL) { 2628 /* first physical chunk. */ 2629 md.md_paddr = physmem_regions[0].mr_start; 2630 md.md_size = physmem_regions[0].mr_size; 2631 md.md_vaddr = ~0UL; 2632 md.md_index = 1; 2633 } else if (md.md_index < physmem_regions_sz) { 2634 md.md_paddr = physmem_regions[md.md_index].mr_start; 2635 md.md_size = physmem_regions[md.md_index].mr_size; 2636 md.md_vaddr = ~0UL; 2637 md.md_index++; 2638 } else { 2639 /* There's no next physical chunk. */ 2640 return (NULL); 2641 } 2642 } 2643 2644 return (&md); 2645} 2646 2647/* 2648 * Map a set of physical memory pages into the kernel virtual address space. 2649 * Return a pointer to where it is mapped. This routine is intended to be used 2650 * for mapping device memory, NOT real memory. 2651 */ 2652static void * 2653mmu_booke_mapdev(mmu_t mmu, vm_paddr_t pa, vm_size_t size) 2654{ 2655 2656 return (mmu_booke_mapdev_attr(mmu, pa, size, VM_MEMATTR_DEFAULT)); 2657} 2658 2659static void * 2660mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma) 2661{ 2662 void *res; 2663 uintptr_t va; 2664 vm_size_t sz; 2665 int i; 2666 2667 /* 2668 * Check if this is premapped in TLB1. Note: this should probably also 2669 * check whether a sequence of TLB1 entries exist that match the 2670 * requirement, but now only checks the easy case. 2671 */ 2672 if (ma == VM_MEMATTR_DEFAULT) { 2673 for (i = 0; i < tlb1_idx; i++) { 2674 if (!(tlb1[i].mas1 & MAS1_VALID)) 2675 continue; 2676 if (pa >= tlb1[i].phys && 2677 (pa + size) <= (tlb1[i].phys + tlb1[i].size)) 2678 return (void *)(tlb1[i].virt + 2679 (pa - tlb1[i].phys)); 2680 } 2681 } 2682 2683 size = roundup(size, PAGE_SIZE); 2684
|