pmap.c (187149) | pmap.c (187151) |
---|---|
1/*- 2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. | 1/*- 2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. |
14 * 3. The name of the author may not be used to endorse or promote products 15 * derived from this software without specific prior written permission. | |
16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 20 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 22 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 23 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF --- 9 unchanged lines hidden (view full) --- 33 /* 34 * VM layout notes: 35 * 36 * Kernel and user threads run within one common virtual address space 37 * defined by AS=0. 38 * 39 * Virtual address space layout: 40 * ----------------------------- | 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF --- 9 unchanged lines hidden (view full) --- 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- |
41 * 0x0000_0000 - 0xbfff_efff : user process 42 * 0xc000_0000 - 0xc1ff_ffff : kernel reserved 43 * 0xc000_0000 - kernelend : kernel code &data 44 * 0xc1ff_c000 - 0xc200_0000 : kstack0 45 * 0xc200_0000 - 0xffef_ffff : KVA 46 * 0xc200_0000 - 0xc200_3fff : reserved for page zero/copy 47 * 0xc200_4000 - ptbl buf end: reserved for ptbl bufs 48 * ptbl buf end- 0xffef_ffff : actual free KVA space 49 * 0xfff0_0000 - 0xffff_ffff : I/O devices region | 39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - kernelend : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region |
50 */ 51 52#include <sys/cdefs.h> | 49 */ 50 51#include <sys/cdefs.h> |
53__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 187149 2009-01-13 15:41:58Z raj $"); | 52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 187151 2009-01-13 16:15:49Z raj $"); |
54 55#include <sys/types.h> 56#include <sys/param.h> 57#include <sys/malloc.h> 58#include <sys/ktr.h> 59#include <sys/proc.h> 60#include <sys/user.h> 61#include <sys/queue.h> --- 315 unchanged lines hidden (view full) --- 377} 378 379/* Initialize pool of kva ptbl buffers. */ 380static void 381ptbl_init(void) 382{ 383 int i; 384 | 53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> --- 315 unchanged lines hidden (view full) --- 376} 377 378/* Initialize pool of kva ptbl buffers. */ 379static void 380ptbl_init(void) 381{ 382 int i; 383 |
385 //debugf("ptbl_init: s (ptbl_bufs = 0x%08x size 0x%08x)\n", 386 // (u_int32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 387 //debugf("ptbl_init: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)\n", 388 // ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); | 384 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 385 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 386 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 387 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); |
389 390 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 391 TAILQ_INIT(&ptbl_buf_freelist); 392 393 for (i = 0; i < PTBL_BUFS; i++) { 394 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 395 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 396 } | 388 389 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 390 TAILQ_INIT(&ptbl_buf_freelist); 391 392 for (i = 0; i < PTBL_BUFS; i++) { 393 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 394 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 395 } |
397 398 //debugf("ptbl_init: e\n"); | |
399} 400 401/* Get a ptbl_buf from the freelist. */ 402static struct ptbl_buf * 403ptbl_buf_alloc(void) 404{ 405 struct ptbl_buf *buf; 406 | 396} 397 398/* Get a ptbl_buf from the freelist. */ 399static struct ptbl_buf * 400ptbl_buf_alloc(void) 401{ 402 struct ptbl_buf *buf; 403 |
407 //debugf("ptbl_buf_alloc: s\n"); 408 | |
409 mtx_lock(&ptbl_buf_freelist_lock); 410 buf = TAILQ_FIRST(&ptbl_buf_freelist); 411 if (buf != NULL) 412 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 413 mtx_unlock(&ptbl_buf_freelist_lock); 414 | 404 mtx_lock(&ptbl_buf_freelist_lock); 405 buf = TAILQ_FIRST(&ptbl_buf_freelist); 406 if (buf != NULL) 407 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 408 mtx_unlock(&ptbl_buf_freelist_lock); 409 |
415 //debugf("ptbl_buf_alloc: e (buf = 0x%08x)\n", (u_int32_t)buf); | 410 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 411 |
416 return (buf); 417} 418 419/* Return ptbl buff to free pool. */ 420static void 421ptbl_buf_free(struct ptbl_buf *buf) 422{ 423 --- 134 unchanged lines hidden (view full) --- 558static int 559ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 560{ 561 pte_t *ptbl; 562 vm_paddr_t pa; 563 vm_page_t m; 564 int i; 565 | 412 return (buf); 413} 414 415/* Return ptbl buff to free pool. */ 416static void 417ptbl_buf_free(struct ptbl_buf *buf) 418{ 419 --- 134 unchanged lines hidden (view full) --- 554static int 555ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 556{ 557 pte_t *ptbl; 558 vm_paddr_t pa; 559 vm_page_t m; 560 int i; 561 |
566 //int su = (pmap == kernel_pmap); 567 //debugf("ptbl_unhold: s (pmap = %08x su = %d pdir_idx = %d)\n", 568 // (u_int32_t)pmap, su, pdir_idx); | 562 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 563 (pmap == kernel_pmap), pdir_idx); |
569 570 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 571 ("ptbl_unhold: invalid pdir_idx")); 572 KASSERT((pmap != kernel_pmap), 573 ("ptbl_unhold: unholding kernel ptbl!")); 574 575 ptbl = pmap->pm_pdir[pdir_idx]; 576 577 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 578 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 579 ("ptbl_unhold: non kva ptbl")); 580 581 /* decrement hold count */ 582 for (i = 0; i < PTBL_PAGES; i++) { | 564 565 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 566 ("ptbl_unhold: invalid pdir_idx")); 567 KASSERT((pmap != kernel_pmap), 568 ("ptbl_unhold: unholding kernel ptbl!")); 569 570 ptbl = pmap->pm_pdir[pdir_idx]; 571 572 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 573 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 574 ("ptbl_unhold: non kva ptbl")); 575 576 /* decrement hold count */ 577 for (i = 0; i < PTBL_PAGES; i++) { |
583 pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE)); | 578 pa = pte_vatopa(mmu, kernel_pmap, 579 (vm_offset_t)ptbl + (i * PAGE_SIZE)); |
584 m = PHYS_TO_VM_PAGE(pa); 585 m->wire_count--; 586 } 587 588 /* 589 * Free ptbl pages if there are no pte etries in this ptbl. | 580 m = PHYS_TO_VM_PAGE(pa); 581 m->wire_count--; 582 } 583 584 /* 585 * Free ptbl pages if there are no pte etries in this ptbl. |
590 * wire_count has the same value for all ptbl pages, so check 591 * the last page. | 586 * wire_count has the same value for all ptbl pages, so check the last 587 * page. |
592 */ 593 if (m->wire_count == 0) { 594 ptbl_free(mmu, pmap, pdir_idx); 595 596 //debugf("ptbl_unhold: e (freed ptbl)\n"); 597 return (1); 598 } 599 | 588 */ 589 if (m->wire_count == 0) { 590 ptbl_free(mmu, pmap, pdir_idx); 591 592 //debugf("ptbl_unhold: e (freed ptbl)\n"); 593 return (1); 594 } 595 |
600 //debugf("ptbl_unhold: e\n"); | |
601 return (0); 602} 603 604/* | 596 return (0); 597} 598 599/* |
605 * Increment hold count for ptbl pages. This routine is used when 606 * new pte entry is being inserted into ptbl. | 600 * Increment hold count for ptbl pages. This routine is used when a new pte 601 * entry is being inserted into the ptbl. |
607 */ 608static void 609ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 610{ 611 vm_paddr_t pa; 612 pte_t *ptbl; 613 vm_page_t m; 614 int i; 615 | 602 */ 603static void 604ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 605{ 606 vm_paddr_t pa; 607 pte_t *ptbl; 608 vm_page_t m; 609 int i; 610 |
616 //debugf("ptbl_hold: s (pmap = 0x%08x pdir_idx = %d)\n", (u_int32_t)pmap, pdir_idx); | 611 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 612 pdir_idx); |
617 618 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 619 ("ptbl_hold: invalid pdir_idx")); 620 KASSERT((pmap != kernel_pmap), 621 ("ptbl_hold: holding kernel ptbl!")); 622 623 ptbl = pmap->pm_pdir[pdir_idx]; 624 625 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 626 627 for (i = 0; i < PTBL_PAGES; i++) { | 613 614 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 615 ("ptbl_hold: invalid pdir_idx")); 616 KASSERT((pmap != kernel_pmap), 617 ("ptbl_hold: holding kernel ptbl!")); 618 619 ptbl = pmap->pm_pdir[pdir_idx]; 620 621 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 622 623 for (i = 0; i < PTBL_PAGES; i++) { |
628 pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE)); | 624 pa = pte_vatopa(mmu, kernel_pmap, 625 (vm_offset_t)ptbl + (i * PAGE_SIZE)); |
629 m = PHYS_TO_VM_PAGE(pa); 630 m->wire_count++; 631 } | 626 m = PHYS_TO_VM_PAGE(pa); 627 m->wire_count++; 628 } |
632 633 //debugf("ptbl_hold: e\n"); | |
634} 635 636/* Allocate pv_entry structure. */ 637pv_entry_t 638pv_alloc(void) 639{ 640 pv_entry_t pv; 641 | 629} 630 631/* Allocate pv_entry structure. */ 632pv_entry_t 633pv_alloc(void) 634{ 635 pv_entry_t pv; 636 |
642 debugf("pv_alloc: s\n"); 643 | |
644 pv_entry_count++; | 637 pv_entry_count++; |
645 if ((pv_entry_count > pv_entry_high_water) && (pagedaemon_waken == 0)) { | 638 if ((pv_entry_count > pv_entry_high_water) && 639 (pagedaemon_waken == 0)) { |
646 pagedaemon_waken = 1; | 640 pagedaemon_waken = 1; |
647 wakeup (&vm_pages_needed); | 641 wakeup(&vm_pages_needed); |
648 } 649 pv = uma_zalloc(pvzone, M_NOWAIT); 650 | 642 } 643 pv = uma_zalloc(pvzone, M_NOWAIT); 644 |
651 debugf("pv_alloc: e\n"); | |
652 return (pv); 653} 654 655/* Free pv_entry structure. */ 656static __inline void 657pv_free(pv_entry_t pve) 658{ | 645 return (pv); 646} 647 648/* Free pv_entry structure. */ 649static __inline void 650pv_free(pv_entry_t pve) 651{ |
659 //debugf("pv_free: s\n"); | |
660 661 pv_entry_count--; 662 uma_zfree(pvzone, pve); | 652 653 pv_entry_count--; 654 uma_zfree(pvzone, pve); |
663 664 //debugf("pv_free: e\n"); | |
665} 666 667 668/* Allocate and initialize pv_entry structure. */ 669static void 670pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 671{ 672 pv_entry_t pve; --- 35 unchanged lines hidden (view full) --- 708 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 709 /* remove from pv_list */ 710 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 711 if (TAILQ_EMPTY(&m->md.pv_list)) 712 vm_page_flag_clear(m, PG_WRITEABLE); 713 714 /* free pv entry struct */ 715 pv_free(pve); | 655} 656 657 658/* Allocate and initialize pv_entry structure. */ 659static void 660pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 661{ 662 pv_entry_t pve; --- 35 unchanged lines hidden (view full) --- 698 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 699 /* remove from pv_list */ 700 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 701 if (TAILQ_EMPTY(&m->md.pv_list)) 702 vm_page_flag_clear(m, PG_WRITEABLE); 703 704 /* free pv entry struct */ 705 pv_free(pve); |
716 | |
717 break; 718 } 719 } 720 721 //debugf("pv_remove: e\n"); 722} 723 724/* 725 * Clean pte entry, try to free page table page if requested. 726 * 727 * Return 1 if ptbl pages were freed, otherwise return 0. 728 */ 729static int | 706 break; 707 } 708 } 709 710 //debugf("pv_remove: e\n"); 711} 712 713/* 714 * Clean pte entry, try to free page table page if requested. 715 * 716 * Return 1 if ptbl pages were freed, otherwise return 0. 717 */ 718static int |
730pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags) | 719pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) |
731{ 732 unsigned int pdir_idx = PDIR_IDX(va); 733 unsigned int ptbl_idx = PTBL_IDX(va); 734 vm_page_t m; 735 pte_t *ptbl; 736 pte_t *pte; 737 738 //int su = (pmap == kernel_pmap); --- 259 unchanged lines hidden (view full) --- 998 debugf(" kernelstart = 0x%08x\n", kernelstart); 999 debugf(" kernelend = 0x%08x\n", kernelend); 1000 debugf(" kernel size = 0x%08x\n", kernelend - kernelstart); 1001 1002 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 1003 panic("mmu_booke_bootstrap: phys_avail too small"); 1004 1005 /* | 720{ 721 unsigned int pdir_idx = PDIR_IDX(va); 722 unsigned int ptbl_idx = PTBL_IDX(va); 723 vm_page_t m; 724 pte_t *ptbl; 725 pte_t *pte; 726 727 //int su = (pmap == kernel_pmap); --- 259 unchanged lines hidden (view full) --- 987 debugf(" kernelstart = 0x%08x\n", kernelstart); 988 debugf(" kernelend = 0x%08x\n", kernelend); 989 debugf(" kernel size = 0x%08x\n", kernelend - kernelstart); 990 991 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 992 panic("mmu_booke_bootstrap: phys_avail too small"); 993 994 /* |
1006 * Removed kernel physical address range from avail 1007 * regions list. Page align all regions. 1008 * Non-page aligned memory isn't very interesting to us. 1009 * Also, sort the entries for ascending addresses. | 995 * Remove kernel physical address range from avail regions list. Page 996 * align all regions. Non-page aligned memory isn't very interesting 997 * to us. Also, sort the entries for ascending addresses. |
1010 */ 1011 sz = 0; 1012 cnt = availmem_regions_sz; 1013 debugf("processing avail regions:\n"); 1014 for (mp = availmem_regions; mp->mr_size; mp++) { 1015 s = mp->mr_start; 1016 e = mp->mr_start + mp->mr_size; 1017 debugf(" %08x-%08x -> ", s, e); --- 66 unchanged lines hidden (view full) --- 1084 hwphyssz = 0; 1085 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1086 1087 debugf("fill in phys_avail:\n"); 1088 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1089 1090 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1091 availmem_regions[i].mr_start, | 998 */ 999 sz = 0; 1000 cnt = availmem_regions_sz; 1001 debugf("processing avail regions:\n"); 1002 for (mp = availmem_regions; mp->mr_size; mp++) { 1003 s = mp->mr_start; 1004 e = mp->mr_start + mp->mr_size; 1005 debugf(" %08x-%08x -> ", s, e); --- 66 unchanged lines hidden (view full) --- 1072 hwphyssz = 0; 1073 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1074 1075 debugf("fill in phys_avail:\n"); 1076 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1077 1078 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1079 availmem_regions[i].mr_start, |
1092 availmem_regions[i].mr_start + availmem_regions[i].mr_size, | 1080 availmem_regions[i].mr_start + 1081 availmem_regions[i].mr_size, |
1093 availmem_regions[i].mr_size); 1094 1095 if (hwphyssz != 0 && 1096 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1097 debugf(" hw.physmem adjust\n"); 1098 if (physsz < hwphyssz) { 1099 phys_avail[j] = availmem_regions[i].mr_start; 1100 phys_avail[j + 1] = --- 15 unchanged lines hidden (view full) --- 1116 1117 /* Calculate the last available physical address. */ 1118 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1119 ; 1120 Maxmem = powerpc_btop(phys_avail[i + 1]); 1121 1122 debugf("Maxmem = 0x%08lx\n", Maxmem); 1123 debugf("phys_avail_count = %d\n", phys_avail_count); | 1082 availmem_regions[i].mr_size); 1083 1084 if (hwphyssz != 0 && 1085 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1086 debugf(" hw.physmem adjust\n"); 1087 if (physsz < hwphyssz) { 1088 phys_avail[j] = availmem_regions[i].mr_start; 1089 phys_avail[j + 1] = --- 15 unchanged lines hidden (view full) --- 1105 1106 /* Calculate the last available physical address. */ 1107 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1108 ; 1109 Maxmem = powerpc_btop(phys_avail[i + 1]); 1110 1111 debugf("Maxmem = 0x%08lx\n", Maxmem); 1112 debugf("phys_avail_count = %d\n", phys_avail_count); |
1124 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, physmem); | 1113 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1114 physmem); |
1125 1126 /*******************************************************/ 1127 /* Initialize (statically allocated) kernel pmap. */ 1128 /*******************************************************/ 1129 PMAP_LOCK_INIT(kernel_pmap); 1130 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1131 1132 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); --- 73 unchanged lines hidden (view full) --- 1206 * Called by vm_init, to initialize any structures that the pmap 1207 * system needs to map virtual memory. 1208 */ 1209static void 1210mmu_booke_init(mmu_t mmu) 1211{ 1212 int shpgperproc = PMAP_SHPGPERPROC; 1213 | 1115 1116 /*******************************************************/ 1117 /* Initialize (statically allocated) kernel pmap. */ 1118 /*******************************************************/ 1119 PMAP_LOCK_INIT(kernel_pmap); 1120 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1121 1122 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); --- 73 unchanged lines hidden (view full) --- 1196 * Called by vm_init, to initialize any structures that the pmap 1197 * system needs to map virtual memory. 1198 */ 1199static void 1200mmu_booke_init(mmu_t mmu) 1201{ 1202 int shpgperproc = PMAP_SHPGPERPROC; 1203 |
1214 //debugf("mmu_booke_init: s\n"); 1215 | |
1216 /* 1217 * Initialize the address space (zone) for the pv entries. Set a 1218 * high water mark so that the system can recover from excessive 1219 * numbers of pv entries. 1220 */ 1221 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1222 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1223 --- 5 unchanged lines hidden (view full) --- 1229 1230 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1231 1232 /* Pre-fill pvzone with initial number of pv entries. */ 1233 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1234 1235 /* Initialize ptbl allocation. */ 1236 ptbl_init(); | 1204 /* 1205 * Initialize the address space (zone) for the pv entries. Set a 1206 * high water mark so that the system can recover from excessive 1207 * numbers of pv entries. 1208 */ 1209 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1210 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1211 --- 5 unchanged lines hidden (view full) --- 1217 1218 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1219 1220 /* Pre-fill pvzone with initial number of pv entries. */ 1221 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1222 1223 /* Initialize ptbl allocation. */ 1224 ptbl_init(); |
1237 1238 //debugf("mmu_booke_init: e\n"); | |
1239} 1240 1241/* 1242 * Map a list of wired pages into kernel virtual address space. This is 1243 * intended for temporary mappings which do not need page modification or 1244 * references recorded. Existing mappings in the region are overwritten. 1245 */ 1246static void 1247mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1248{ 1249 vm_offset_t va; 1250 | 1225} 1226 1227/* 1228 * Map a list of wired pages into kernel virtual address space. This is 1229 * intended for temporary mappings which do not need page modification or 1230 * references recorded. Existing mappings in the region are overwritten. 1231 */ 1232static void 1233mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1234{ 1235 vm_offset_t va; 1236 |
1251 //debugf("mmu_booke_qenter: s (sva = 0x%08x count = %d)\n", sva, count); 1252 | |
1253 va = sva; 1254 while (count-- > 0) { 1255 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1256 va += PAGE_SIZE; 1257 m++; 1258 } | 1237 va = sva; 1238 while (count-- > 0) { 1239 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1240 va += PAGE_SIZE; 1241 m++; 1242 } |
1259 1260 //debugf("mmu_booke_qenter: e\n"); | |
1261} 1262 1263/* 1264 * Remove page mappings from kernel virtual address space. Intended for 1265 * temporary mappings entered by mmu_booke_qenter. 1266 */ 1267static void 1268mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1269{ 1270 vm_offset_t va; 1271 | 1243} 1244 1245/* 1246 * Remove page mappings from kernel virtual address space. Intended for 1247 * temporary mappings entered by mmu_booke_qenter. 1248 */ 1249static void 1250mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1251{ 1252 vm_offset_t va; 1253 |
1272 //debugf("mmu_booke_qremove: s (sva = 0x%08x count = %d)\n", sva, count); 1273 | |
1274 va = sva; 1275 while (count-- > 0) { 1276 mmu_booke_kremove(mmu, va); 1277 va += PAGE_SIZE; 1278 } | 1254 va = sva; 1255 while (count-- > 0) { 1256 mmu_booke_kremove(mmu, va); 1257 va += PAGE_SIZE; 1258 } |
1279 1280 //debugf("mmu_booke_qremove: e\n"); | |
1281} 1282 1283/* 1284 * Map a wired page into kernel virtual address space. 1285 */ 1286static void 1287mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1288{ 1289 unsigned int pdir_idx = PDIR_IDX(va); 1290 unsigned int ptbl_idx = PTBL_IDX(va); | 1259} 1260 1261/* 1262 * Map a wired page into kernel virtual address space. 1263 */ 1264static void 1265mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1266{ 1267 unsigned int pdir_idx = PDIR_IDX(va); 1268 unsigned int ptbl_idx = PTBL_IDX(va); |
1291 u_int32_t flags; | 1269 uint32_t flags; |
1292 pte_t *pte; 1293 | 1270 pte_t *pte; 1271 |
1294 //debugf("mmu_booke_kenter: s (pdir_idx = %d ptbl_idx = %d va=0x%08x pa=0x%08x)\n", 1295 // pdir_idx, ptbl_idx, va, pa); | 1272 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1273 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); |
1296 | 1274 |
1297 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)), 1298 ("mmu_booke_kenter: invalid va")); 1299 | |
1300#if 0 1301 /* assume IO mapping, set I, G bits */ 1302 flags = (PTE_G | PTE_I | PTE_FAKE); 1303 1304 /* if mapping is within system memory, do not set I, G bits */ 1305 for (i = 0; i < totalmem_regions_sz; i++) { 1306 if ((pa >= totalmem_regions[i].mr_start) && 1307 (pa < (totalmem_regions[i].mr_start + --- 72 unchanged lines hidden (view full) --- 1380} 1381 1382/* 1383 * Initialize pmap associated with process 0. 1384 */ 1385static void 1386mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1387{ | 1275#if 0 1276 /* assume IO mapping, set I, G bits */ 1277 flags = (PTE_G | PTE_I | PTE_FAKE); 1278 1279 /* if mapping is within system memory, do not set I, G bits */ 1280 for (i = 0; i < totalmem_regions_sz; i++) { 1281 if ((pa >= totalmem_regions[i].mr_start) && 1282 (pa < (totalmem_regions[i].mr_start + --- 72 unchanged lines hidden (view full) --- 1355} 1356 1357/* 1358 * Initialize pmap associated with process 0. 1359 */ 1360static void 1361mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1362{ |
1388 //debugf("mmu_booke_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap); | 1363 |
1389 mmu_booke_pinit(mmu, pmap); 1390 PCPU_SET(curpmap, pmap); | 1364 mmu_booke_pinit(mmu, pmap); 1365 PCPU_SET(curpmap, pmap); |
1391 //debugf("mmu_booke_pinit0: e\n"); | |
1392} 1393 1394/* 1395 * Initialize a preallocated and zeroed pmap structure, 1396 * such as one in a vmspace structure. 1397 */ 1398static void 1399mmu_booke_pinit(mmu_t mmu, pmap_t pmap) --- 18 unchanged lines hidden (view full) --- 1418 * Release any resources held by the given physical map. 1419 * Called when a pmap initialized by mmu_booke_pinit is being released. 1420 * Should only be called if the map contains no valid mappings. 1421 */ 1422static void 1423mmu_booke_release(mmu_t mmu, pmap_t pmap) 1424{ 1425 | 1366} 1367 1368/* 1369 * Initialize a preallocated and zeroed pmap structure, 1370 * such as one in a vmspace structure. 1371 */ 1372static void 1373mmu_booke_pinit(mmu_t mmu, pmap_t pmap) --- 18 unchanged lines hidden (view full) --- 1392 * Release any resources held by the given physical map. 1393 * Called when a pmap initialized by mmu_booke_pinit is being released. 1394 * Should only be called if the map contains no valid mappings. 1395 */ 1396static void 1397mmu_booke_release(mmu_t mmu, pmap_t pmap) 1398{ 1399 |
1426 //debugf("mmu_booke_release: s\n"); | 1400 printf("mmu_booke_release: s\n"); |
1427 | 1401 |
1428 PMAP_LOCK_DESTROY(pmap); | 1402 KASSERT(pmap->pm_stats.resident_count == 0, 1403 ("pmap_release: pmap resident count %ld != 0", 1404 pmap->pm_stats.resident_count)); |
1429 | 1405 |
1430 //debugf("mmu_booke_release: e\n"); | 1406 PMAP_LOCK_DESTROY(pmap); |
1431} 1432 1433#if 0 1434/* Not needed, kernel page tables are statically allocated. */ 1435void 1436mmu_booke_growkernel(vm_offset_t maxkvaddr) 1437{ 1438} 1439#endif 1440 1441/* 1442 * Insert the given physical page at the specified virtual address in the 1443 * target physical map with the protection requested. If specified the page 1444 * will be wired down. 1445 */ 1446static void 1447mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1448 vm_prot_t prot, boolean_t wired) 1449{ | 1407} 1408 1409#if 0 1410/* Not needed, kernel page tables are statically allocated. */ 1411void 1412mmu_booke_growkernel(vm_offset_t maxkvaddr) 1413{ 1414} 1415#endif 1416 1417/* 1418 * Insert the given physical page at the specified virtual address in the 1419 * target physical map with the protection requested. If specified the page 1420 * will be wired down. 1421 */ 1422static void 1423mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1424 vm_prot_t prot, boolean_t wired) 1425{ |
1426 |
|
1450 vm_page_lock_queues(); 1451 PMAP_LOCK(pmap); 1452 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1453 vm_page_unlock_queues(); 1454 PMAP_UNLOCK(pmap); 1455} 1456 1457static void 1458mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1459 vm_prot_t prot, boolean_t wired) 1460{ 1461 pte_t *pte; 1462 vm_paddr_t pa; | 1427 vm_page_lock_queues(); 1428 PMAP_LOCK(pmap); 1429 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1430 vm_page_unlock_queues(); 1431 PMAP_UNLOCK(pmap); 1432} 1433 1434static void 1435mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1436 vm_prot_t prot, boolean_t wired) 1437{ 1438 pte_t *pte; 1439 vm_paddr_t pa; |
1463 u_int32_t flags; | 1440 uint32_t flags; |
1464 int su, sync; 1465 1466 pa = VM_PAGE_TO_PHYS(m); 1467 su = (pmap == kernel_pmap); 1468 sync = 0; 1469 1470 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1471 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1472 // (u_int32_t)pmap, su, pmap->pm_tid, 1473 // (u_int32_t)m, va, pa, prot, wired); 1474 1475 if (su) { | 1441 int su, sync; 1442 1443 pa = VM_PAGE_TO_PHYS(m); 1444 su = (pmap == kernel_pmap); 1445 sync = 0; 1446 1447 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1448 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1449 // (u_int32_t)pmap, su, pmap->pm_tid, 1450 // (u_int32_t)m, va, pa, prot, wired); 1451 1452 if (su) { |
1476 KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), 1477 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); | 1453 KASSERT(((va >= virtual_avail) && 1454 (va <= VM_MAX_KERNEL_ADDRESS)), 1455 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); |
1478 } else { 1479 KASSERT((va <= VM_MAXUSER_ADDRESS), | 1456 } else { 1457 KASSERT((va <= VM_MAXUSER_ADDRESS), |
1480 ("mmu_booke_enter_locked: user pmap, non user va")); | 1458 ("mmu_booke_enter_locked: user pmap, non user va")); |
1481 } 1482 1483 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1484 1485 /* 1486 * If there is an existing mapping, and the physical address has not 1487 * changed, must be protection or wiring change. 1488 */ --- 122 unchanged lines hidden (view full) --- 1611 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1612 1613 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1614 1615 pte_enter(mmu, pmap, m, va, flags); 1616 __syncicache((void *)va, PAGE_SIZE); 1617 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1618 } | 1459 } 1460 1461 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1462 1463 /* 1464 * If there is an existing mapping, and the physical address has not 1465 * changed, must be protection or wiring change. 1466 */ --- 122 unchanged lines hidden (view full) --- 1589 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1590 1591 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1592 1593 pte_enter(mmu, pmap, m, va, flags); 1594 __syncicache((void *)va, PAGE_SIZE); 1595 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1596 } |
1619 1620 //debugf("mmu_booke_enter_locked: e\n"); | |
1621} 1622 1623/* 1624 * Maps a sequence of resident pages belonging to the same object. 1625 * The sequence begins with the given page m_start. This page is 1626 * mapped at the given virtual address start. Each subsequent page is 1627 * mapped at a virtual address that is offset from start by the same 1628 * amount as the page is offset from m_start within the object. The --- 9 unchanged lines hidden (view full) --- 1638{ 1639 vm_page_t m; 1640 vm_pindex_t diff, psize; 1641 1642 psize = atop(end - start); 1643 m = m_start; 1644 PMAP_LOCK(pmap); 1645 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { | 1597} 1598 1599/* 1600 * Maps a sequence of resident pages belonging to the same object. 1601 * The sequence begins with the given page m_start. This page is 1602 * mapped at the given virtual address start. Each subsequent page is 1603 * mapped at a virtual address that is offset from start by the same 1604 * amount as the page is offset from m_start within the object. The --- 9 unchanged lines hidden (view full) --- 1614{ 1615 vm_page_t m; 1616 vm_pindex_t diff, psize; 1617 1618 psize = atop(end - start); 1619 m = m_start; 1620 PMAP_LOCK(pmap); 1621 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { |
1646 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, prot & 1647 (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); | 1622 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1623 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); |
1648 m = TAILQ_NEXT(m, listq); 1649 } 1650 PMAP_UNLOCK(pmap); 1651} 1652 1653static void 1654mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1655 vm_prot_t prot) 1656{ 1657 | 1624 m = TAILQ_NEXT(m, listq); 1625 } 1626 PMAP_UNLOCK(pmap); 1627} 1628 1629static void 1630mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1631 vm_prot_t prot) 1632{ 1633 |
1658 //debugf("mmu_booke_enter_quick: s\n"); 1659 | |
1660 PMAP_LOCK(pmap); 1661 mmu_booke_enter_locked(mmu, pmap, va, m, 1662 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1663 PMAP_UNLOCK(pmap); | 1634 PMAP_LOCK(pmap); 1635 mmu_booke_enter_locked(mmu, pmap, va, m, 1636 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1637 PMAP_UNLOCK(pmap); |
1664 1665 //debugf("mmu_booke_enter_quick e\n"); | |
1666} 1667 1668/* 1669 * Remove the given range of addresses from the specified map. 1670 * 1671 * It is assumed that the start and end are properly rounded to the page size. 1672 */ 1673static void 1674mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1675{ 1676 pte_t *pte; | 1638} 1639 1640/* 1641 * Remove the given range of addresses from the specified map. 1642 * 1643 * It is assumed that the start and end are properly rounded to the page size. 1644 */ 1645static void 1646mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1647{ 1648 pte_t *pte; |
1677 u_int8_t hold_flag; | 1649 uint8_t hold_flag; |
1678 1679 int su = (pmap == kernel_pmap); 1680 1681 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1682 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1683 1684 if (su) { | 1650 1651 int su = (pmap == kernel_pmap); 1652 1653 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1654 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1655 1656 if (su) { |
1685 KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)), 1686 ("mmu_booke_enter: kernel pmap, non kernel va")); | 1657 KASSERT(((va >= virtual_avail) && 1658 (va <= VM_MAX_KERNEL_ADDRESS)), 1659 ("mmu_booke_remove: kernel pmap, non kernel va")); |
1687 } else { 1688 KASSERT((va <= VM_MAXUSER_ADDRESS), | 1660 } else { 1661 KASSERT((va <= VM_MAXUSER_ADDRESS), |
1689 ("mmu_booke_enter: user pmap, non user va")); | 1662 ("mmu_booke_remove: user pmap, non user va")); |
1690 } 1691 1692 if (PMAP_REMOVE_DONE(pmap)) { 1693 //debugf("mmu_booke_remove: e (empty)\n"); 1694 return; 1695 } 1696 1697 hold_flag = PTBL_HOLD_FLAG(pmap); --- 14 unchanged lines hidden (view full) --- 1712 1713/* 1714 * Remove physical page from all pmaps in which it resides. 1715 */ 1716static void 1717mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1718{ 1719 pv_entry_t pv, pvn; | 1663 } 1664 1665 if (PMAP_REMOVE_DONE(pmap)) { 1666 //debugf("mmu_booke_remove: e (empty)\n"); 1667 return; 1668 } 1669 1670 hold_flag = PTBL_HOLD_FLAG(pmap); --- 14 unchanged lines hidden (view full) --- 1685 1686/* 1687 * Remove physical page from all pmaps in which it resides. 1688 */ 1689static void 1690mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1691{ 1692 pv_entry_t pv, pvn; |
1720 u_int8_t hold_flag; | 1693 uint8_t hold_flag; |
1721 | 1694 |
1722 //debugf("mmu_booke_remove_all: s\n"); 1723 | |
1724 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1725 1726 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1727 pvn = TAILQ_NEXT(pv, pv_link); 1728 1729 PMAP_LOCK(pv->pv_pmap); 1730 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1731 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1732 PMAP_UNLOCK(pv->pv_pmap); 1733 } 1734 vm_page_flag_clear(m, PG_WRITEABLE); | 1695 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1696 1697 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1698 pvn = TAILQ_NEXT(pv, pv_link); 1699 1700 PMAP_LOCK(pv->pv_pmap); 1701 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1702 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1703 PMAP_UNLOCK(pv->pv_pmap); 1704 } 1705 vm_page_flag_clear(m, PG_WRITEABLE); |
1735 1736 //debugf("mmu_booke_remove_all: e\n"); | |
1737} 1738 1739/* 1740 * Map a range of physical addresses into kernel virtual address space. | 1706} 1707 1708/* 1709 * Map a range of physical addresses into kernel virtual address space. |
1741 * 1742 * The value passed in *virt is a suggested virtual address for the mapping. 1743 * Architectures which can support a direct-mapped physical to virtual region 1744 * can return the appropriate address within that region, leaving '*virt' 1745 * unchanged. We cannot and therefore do not; *virt is updated with the 1746 * first usable address after the mapped region. | |
1747 */ 1748static vm_offset_t 1749mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1750 vm_offset_t pa_end, int prot) 1751{ 1752 vm_offset_t sva = *virt; 1753 vm_offset_t va = sva; 1754 --- 193 unchanged lines hidden (view full) --- 1948 * protection. 1949 */ 1950static vm_page_t 1951mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1952 vm_prot_t prot) 1953{ 1954 pte_t *pte; 1955 vm_page_t m; | 1710 */ 1711static vm_offset_t 1712mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1713 vm_offset_t pa_end, int prot) 1714{ 1715 vm_offset_t sva = *virt; 1716 vm_offset_t va = sva; 1717 --- 193 unchanged lines hidden (view full) --- 1911 * protection. 1912 */ 1913static vm_page_t 1914mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1915 vm_prot_t prot) 1916{ 1917 pte_t *pte; 1918 vm_page_t m; |
1956 u_int32_t pte_wbit; | 1919 uint32_t pte_wbit; |
1957 1958 m = NULL; 1959 vm_page_lock_queues(); 1960 PMAP_LOCK(pmap); | 1920 1921 m = NULL; 1922 vm_page_lock_queues(); 1923 PMAP_LOCK(pmap); |
1961 pte = pte_find(mmu, pmap, va); | |
1962 | 1924 |
1925 pte = pte_find(mmu, pmap, va); |
|
1963 if ((pte != NULL) && PTE_ISVALID(pte)) { 1964 if (pmap == kernel_pmap) 1965 pte_wbit = PTE_SW; 1966 else 1967 pte_wbit = PTE_UW; 1968 1969 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1970 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); --- 23 unchanged lines hidden (view full) --- 1994 * 1995 * off and size must reside within a single page. 1996 */ 1997static void 1998mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1999{ 2000 vm_offset_t va; 2001 | 1926 if ((pte != NULL) && PTE_ISVALID(pte)) { 1927 if (pmap == kernel_pmap) 1928 pte_wbit = PTE_SW; 1929 else 1930 pte_wbit = PTE_UW; 1931 1932 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1933 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); --- 23 unchanged lines hidden (view full) --- 1957 * 1958 * off and size must reside within a single page. 1959 */ 1960static void 1961mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1962{ 1963 vm_offset_t va; 1964 |
2002 //debugf("mmu_booke_zero_page_area: s\n"); | 1965 /* XXX KASSERT off and size are within a single page? */ |
2003 2004 mtx_lock(&zero_page_mutex); 2005 va = zero_page_va; 2006 2007 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2008 bzero((caddr_t)va + off, size); 2009 mmu_booke_kremove(mmu, va); 2010 2011 mtx_unlock(&zero_page_mutex); | 1966 1967 mtx_lock(&zero_page_mutex); 1968 va = zero_page_va; 1969 1970 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1971 bzero((caddr_t)va + off, size); 1972 mmu_booke_kremove(mmu, va); 1973 1974 mtx_unlock(&zero_page_mutex); |
2012 2013 //debugf("mmu_booke_zero_page_area: e\n"); | |
2014} 2015 2016/* 2017 * mmu_booke_zero_page zeros the specified hardware page. 2018 */ 2019static void 2020mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 2021{ 2022 | 1975} 1976 1977/* 1978 * mmu_booke_zero_page zeros the specified hardware page. 1979 */ 1980static void 1981mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1982{ 1983 |
2023 //debugf("mmu_booke_zero_page: s\n"); | |
2024 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); | 1984 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); |
2025 //debugf("mmu_booke_zero_page: e\n"); | |
2026} 2027 2028/* 2029 * mmu_booke_copy_page copies the specified (machine independent) page by 2030 * mapping the page into virtual memory and using memcopy to copy the page, 2031 * one machine dependent page at a time. 2032 */ 2033static void --- 31 unchanged lines hidden (view full) --- 2065 * to be called from the vm_pagezero process only and outside of Giant. No 2066 * lock is required. 2067 */ 2068static void 2069mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2070{ 2071 vm_offset_t va; 2072 | 1985} 1986 1987/* 1988 * mmu_booke_copy_page copies the specified (machine independent) page by 1989 * mapping the page into virtual memory and using memcopy to copy the page, 1990 * one machine dependent page at a time. 1991 */ 1992static void --- 31 unchanged lines hidden (view full) --- 2024 * to be called from the vm_pagezero process only and outside of Giant. No 2025 * lock is required. 2026 */ 2027static void 2028mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2029{ 2030 vm_offset_t va; 2031 |
2073 //debugf("mmu_booke_zero_page_idle: s\n"); 2074 | |
2075 va = zero_page_idle_va; 2076 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2077 bzero((caddr_t)va, PAGE_SIZE); 2078 mmu_booke_kremove(mmu, va); | 2032 va = zero_page_idle_va; 2033 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2034 bzero((caddr_t)va, PAGE_SIZE); 2035 mmu_booke_kremove(mmu, va); |
2079 2080 //debugf("mmu_booke_zero_page_idle: e\n"); | |
2081} 2082 2083/* 2084 * Return whether or not the specified physical page was modified 2085 * in any of physical maps. 2086 */ 2087static boolean_t 2088mmu_booke_is_modified(mmu_t mmu, vm_page_t m) --- 18 unchanged lines hidden (view full) --- 2107 } 2108make_sure_to_unlock: 2109 PMAP_UNLOCK(pv->pv_pmap); 2110 } 2111 return (FALSE); 2112} 2113 2114/* | 2036} 2037 2038/* 2039 * Return whether or not the specified physical page was modified 2040 * in any of physical maps. 2041 */ 2042static boolean_t 2043mmu_booke_is_modified(mmu_t mmu, vm_page_t m) --- 18 unchanged lines hidden (view full) --- 2062 } 2063make_sure_to_unlock: 2064 PMAP_UNLOCK(pv->pv_pmap); 2065 } 2066 return (FALSE); 2067} 2068 2069/* |
2115 * Return whether or not the specified virtual address is elgible | 2070 * Return whether or not the specified virtual address is eligible |
2116 * for prefault. 2117 */ 2118static boolean_t 2119mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2120{ 2121 2122 return (FALSE); 2123} --- 151 unchanged lines hidden (view full) --- 2275 int loops; 2276 2277 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2278 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2279 return (FALSE); 2280 2281 loops = 0; 2282 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { | 2071 * for prefault. 2072 */ 2073static boolean_t 2074mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2075{ 2076 2077 return (FALSE); 2078} --- 151 unchanged lines hidden (view full) --- 2230 int loops; 2231 2232 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2233 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2234 return (FALSE); 2235 2236 loops = 0; 2237 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { |
2283 | |
2284 if (pv->pv_pmap == pmap) 2285 return (TRUE); 2286 2287 if (++loops >= 16) 2288 break; 2289 } 2290 return (FALSE); 2291} --- 74 unchanged lines hidden (view full) --- 2366/* 2367 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2368 */ 2369static void 2370mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2371{ 2372 vm_offset_t base, offset; 2373 | 2238 if (pv->pv_pmap == pmap) 2239 return (TRUE); 2240 2241 if (++loops >= 16) 2242 break; 2243 } 2244 return (FALSE); 2245} --- 74 unchanged lines hidden (view full) --- 2320/* 2321 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2322 */ 2323static void 2324mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2325{ 2326 vm_offset_t base, offset; 2327 |
2374 //debugf("mmu_booke_unmapdev: s (va = 0x%08x)\n", va); 2375 | |
2376 /* 2377 * Unmap only if this is inside kernel virtual space. 2378 */ 2379 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2380 base = trunc_page(va); 2381 offset = va & PAGE_MASK; 2382 size = roundup(offset + size, PAGE_SIZE); 2383 kmem_free(kernel_map, base, size); 2384 } | 2328 /* 2329 * Unmap only if this is inside kernel virtual space. 2330 */ 2331 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2332 base = trunc_page(va); 2333 offset = va & PAGE_MASK; 2334 size = roundup(offset + size, PAGE_SIZE); 2335 kmem_free(kernel_map, base, size); 2336 } |
2385 2386 //debugf("mmu_booke_unmapdev: e\n"); | |
2387} 2388 2389/* | 2337} 2338 2339/* |
2390 * mmu_booke_object_init_pt preloads the ptes for a given object 2391 * into the specified pmap. This eliminates the blast of soft 2392 * faults on process startup and immediately after an mmap. | 2340 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2341 * specified pmap. This eliminates the blast of soft faults on process startup 2342 * and immediately after an mmap. |
2393 */ 2394static void 2395mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2396 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2397{ | 2343 */ 2344static void 2345mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2346 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2347{ |
2348 |
|
2398 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2399 KASSERT(object->type == OBJT_DEVICE, 2400 ("mmu_booke_object_init_pt: non-device object")); 2401} 2402 2403/* 2404 * Perform the pmap work for mincore. 2405 */ --- 164 unchanged lines hidden (view full) --- 2570 2571/* 2572 * Write given entry to TLB1 hardware. 2573 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2574 */ 2575static void 2576tlb1_write_entry(unsigned int idx) 2577{ | 2349 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2350 KASSERT(object->type == OBJT_DEVICE, 2351 ("mmu_booke_object_init_pt: non-device object")); 2352} 2353 2354/* 2355 * Perform the pmap work for mincore. 2356 */ --- 164 unchanged lines hidden (view full) --- 2521 2522/* 2523 * Write given entry to TLB1 hardware. 2524 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2525 */ 2526static void 2527tlb1_write_entry(unsigned int idx) 2528{ |
2578 u_int32_t mas0, mas7; | 2529 uint32_t mas0, mas7; |
2579 2580 //debugf("tlb1_write_entry: s\n"); 2581 2582 /* Clear high order RPN bits */ 2583 mas7 = 0; 2584 2585 /* Select entry */ 2586 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2587 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2588 2589 mtspr(SPR_MAS0, mas0); | 2530 2531 //debugf("tlb1_write_entry: s\n"); 2532 2533 /* Clear high order RPN bits */ 2534 mas7 = 0; 2535 2536 /* Select entry */ 2537 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2538 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2539 2540 mtspr(SPR_MAS0, mas0); |
2590 __asm volatile("isync"); | 2541 __asm __volatile("isync"); |
2591 mtspr(SPR_MAS1, tlb1[idx].mas1); | 2542 mtspr(SPR_MAS1, tlb1[idx].mas1); |
2592 __asm volatile("isync"); | 2543 __asm __volatile("isync"); |
2593 mtspr(SPR_MAS2, tlb1[idx].mas2); | 2544 mtspr(SPR_MAS2, tlb1[idx].mas2); |
2594 __asm volatile("isync"); | 2545 __asm __volatile("isync"); |
2595 mtspr(SPR_MAS3, tlb1[idx].mas3); | 2546 mtspr(SPR_MAS3, tlb1[idx].mas3); |
2596 __asm volatile("isync"); | 2547 __asm __volatile("isync"); |
2597 mtspr(SPR_MAS7, mas7); | 2548 mtspr(SPR_MAS7, mas7); |
2598 __asm volatile("isync; tlbwe; isync; msync"); | 2549 __asm __volatile("isync; tlbwe; isync; msync"); |
2599 2600 //debugf("tlb1_write_entry: e\n");; 2601} 2602 2603/* 2604 * Return the largest uint value log such that 2^log <= num. 2605 */ 2606static unsigned int --- 88 unchanged lines hidden (view full) --- 2695 return (-1); 2696 else if (*sza < *szb) 2697 return (1); 2698 else 2699 return (0); 2700} 2701 2702/* | 2550 2551 //debugf("tlb1_write_entry: e\n");; 2552} 2553 2554/* 2555 * Return the largest uint value log such that 2^log <= num. 2556 */ 2557static unsigned int --- 88 unchanged lines hidden (view full) --- 2646 return (-1); 2647 else if (*sza < *szb) 2648 return (1); 2649 else 2650 return (0); 2651} 2652 2653/* |
2703 * Mapin contiguous RAM region into the TLB1 using maximum of | 2654 * Map in contiguous RAM region into the TLB1 using maximum of |
2704 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2705 * | 2655 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2656 * |
2706 * If necessarry round up last entry size and return total size | 2657 * If necessary round up last entry size and return total size |
2707 * used by all allocated entries. 2708 */ 2709vm_size_t 2710tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2711{ 2712 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2713 vm_size_t mapped_size, sz, esz; 2714 unsigned int log; 2715 int i; 2716 | 2658 * used by all allocated entries. 2659 */ 2660vm_size_t 2661tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2662{ 2663 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2664 vm_size_t mapped_size, sz, esz; 2665 unsigned int log; 2666 int i; 2667 |
2717 debugf("tlb1_mapin_region:\n"); 2718 debugf(" region size = 0x%08x va = 0x%08x pa = 0x%08x\n", size, va, pa); | 2668 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2669 __func__, size, va, pa); |
2719 2720 mapped_size = 0; 2721 sz = size; 2722 memset(entry_size, 0, sizeof(entry_size)); 2723 2724 /* Calculate entry sizes. */ 2725 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2726 --- 19 unchanged lines hidden (view full) --- 2746 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2747 sizeof(vm_size_t), tlb1_entry_size_cmp); 2748 2749 /* Load TLB1 entries. */ 2750 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2751 esz = entry_size[i]; 2752 if (!esz) 2753 break; | 2670 2671 mapped_size = 0; 2672 sz = size; 2673 memset(entry_size, 0, sizeof(entry_size)); 2674 2675 /* Calculate entry sizes. */ 2676 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2677 --- 19 unchanged lines hidden (view full) --- 2697 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2698 sizeof(vm_size_t), tlb1_entry_size_cmp); 2699 2700 /* Load TLB1 entries. */ 2701 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2702 esz = entry_size[i]; 2703 if (!esz) 2704 break; |
2754 debugf(" entry %d: sz = 0x%08x (va = 0x%08x pa = 0x%08x)\n", 2755 tlb1_idx, esz, va, pa); | 2705 2706 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2707 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2708 |
2756 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2757 2758 va += esz; 2759 pa += esz; 2760 } 2761 | 2709 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2710 2711 va += esz; 2712 pa += esz; 2713 } 2714 |
2762 debugf(" mapped size 0x%08x (wasted space 0x%08x)\n", 2763 mapped_size, mapped_size - size); | 2715 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2716 __func__, mapped_size, mapped_size - size); |
2764 2765 return (mapped_size); 2766} 2767 2768/* 2769 * TLB1 initialization routine, to be called after the very first 2770 * assembler level setup done in locore.S. 2771 */ 2772void 2773tlb1_init(vm_offset_t ccsrbar) 2774{ 2775 uint32_t mas0; 2776 | 2717 2718 return (mapped_size); 2719} 2720 2721/* 2722 * TLB1 initialization routine, to be called after the very first 2723 * assembler level setup done in locore.S. 2724 */ 2725void 2726tlb1_init(vm_offset_t ccsrbar) 2727{ 2728 uint32_t mas0; 2729 |
2777 /* TBL1[1] is used to map the kernel. Save that entry. */ | 2730 /* TLB1[1] is used to map the kernel. Save that entry. */ |
2778 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2779 mtspr(SPR_MAS0, mas0); 2780 __asm __volatile("isync; tlbre"); 2781 2782 tlb1[1].mas1 = mfspr(SPR_MAS1); 2783 tlb1[1].mas2 = mfspr(SPR_MAS2); 2784 tlb1[1].mas3 = mfspr(SPR_MAS3); 2785 --- 13 unchanged lines hidden (view full) --- 2799 2800/* 2801 * Setup MAS4 defaults. 2802 * These values are loaded to MAS0-2 on a TLB miss. 2803 */ 2804static void 2805set_mas4_defaults(void) 2806{ | 2731 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2732 mtspr(SPR_MAS0, mas0); 2733 __asm __volatile("isync; tlbre"); 2734 2735 tlb1[1].mas1 = mfspr(SPR_MAS1); 2736 tlb1[1].mas2 = mfspr(SPR_MAS2); 2737 tlb1[1].mas3 = mfspr(SPR_MAS3); 2738 --- 13 unchanged lines hidden (view full) --- 2752 2753/* 2754 * Setup MAS4 defaults. 2755 * These values are loaded to MAS0-2 on a TLB miss. 2756 */ 2757static void 2758set_mas4_defaults(void) 2759{ |
2807 u_int32_t mas4; | 2760 uint32_t mas4; |
2808 2809 /* Defaults: TLB0, PID0, TSIZED=4K */ 2810 mas4 = MAS4_TLBSELD0; 2811 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2812 2813 mtspr(SPR_MAS4, mas4); | 2761 2762 /* Defaults: TLB0, PID0, TSIZED=4K */ 2763 mas4 = MAS4_TLBSELD0; 2764 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2765 2766 mtspr(SPR_MAS4, mas4); |
2814 __asm volatile("isync"); | 2767 __asm __volatile("isync"); |
2815} 2816 2817/* 2818 * Print out contents of the MAS registers for each TLB1 entry 2819 */ 2820void 2821tlb1_print_tlbentries(void) 2822{ --- 32 unchanged lines hidden (view full) --- 2855 2856/* 2857 * Return 0 if the physical IO range is encompassed by one of the 2858 * the TLB1 entries, otherwise return related error code. 2859 */ 2860static int 2861tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2862{ | 2768} 2769 2770/* 2771 * Print out contents of the MAS registers for each TLB1 entry 2772 */ 2773void 2774tlb1_print_tlbentries(void) 2775{ --- 32 unchanged lines hidden (view full) --- 2808 2809/* 2810 * Return 0 if the physical IO range is encompassed by one of the 2811 * the TLB1 entries, otherwise return related error code. 2812 */ 2813static int 2814tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2815{ |
2863 u_int32_t prot; | 2816 uint32_t prot; |
2864 vm_paddr_t pa_start; 2865 vm_paddr_t pa_end; 2866 unsigned int entry_tsize; 2867 vm_size_t entry_size; 2868 2869 *va = (vm_offset_t)NULL; 2870 2871 /* Skip invalid entries */ --- 30 unchanged lines hidden --- | 2817 vm_paddr_t pa_start; 2818 vm_paddr_t pa_end; 2819 unsigned int entry_tsize; 2820 vm_size_t entry_size; 2821 2822 *va = (vm_offset_t)NULL; 2823 2824 /* Skip invalid entries */ --- 30 unchanged lines hidden --- |