Deleted Added
full compact
mmu_oea64.c (282264) mmu_oea64.c (285148)
1/*-
2 * Copyright (c) 2008-2015 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2008-2015 Nathan Whitehorn
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *

--- 11 unchanged lines hidden (view full) ---

20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <sys/cdefs.h>
28__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 282264 2015-04-30 01:24:25Z jhibbits $");
28__FBSDID("$FreeBSD: head/sys/powerpc/aim/mmu_oea64.c 285148 2015-07-04 19:00:38Z jhibbits $");
29
30/*
31 * Manages physical address maps.
32 *
33 * Since the information managed by this module is also stored by the
34 * logical address mapping module, this module may throw away valid virtual
35 * to physical mappings at almost any time. However, invalidations of
36 * mappings must be done as requested.

--- 184 unchanged lines hidden (view full) ---

221
222/*
223 * Utility routines.
224 */
225static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t);
226static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
227static void moea64_kremove(mmu_t, vm_offset_t);
228static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
29
30/*
31 * Manages physical address maps.
32 *
33 * Since the information managed by this module is also stored by the
34 * logical address mapping module, this module may throw away valid virtual
35 * to physical mappings at almost any time. However, invalidations of
36 * mappings must be done as requested.

--- 184 unchanged lines hidden (view full) ---

221
222/*
223 * Utility routines.
224 */
225static boolean_t moea64_query_bit(mmu_t, vm_page_t, uint64_t);
226static u_int moea64_clear_bit(mmu_t, vm_page_t, uint64_t);
227static void moea64_kremove(mmu_t, vm_offset_t);
228static void moea64_syncicache(mmu_t, pmap_t pmap, vm_offset_t va,
229 vm_offset_t pa, vm_size_t sz);
229 vm_paddr_t pa, vm_size_t sz);
230
231/*
232 * Kernel MMU interface
233 */
234void moea64_clear_modify(mmu_t, vm_page_t);
235void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
236void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
237 vm_page_t *mb, vm_offset_t b_offset, int xfersize);

--- 24 unchanged lines hidden (view full) ---

262void moea64_remove_write(mmu_t, vm_page_t);
263void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
264void moea64_zero_page(mmu_t, vm_page_t);
265void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
266void moea64_zero_page_idle(mmu_t, vm_page_t);
267void moea64_activate(mmu_t, struct thread *);
268void moea64_deactivate(mmu_t, struct thread *);
269void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
230
231/*
232 * Kernel MMU interface
233 */
234void moea64_clear_modify(mmu_t, vm_page_t);
235void moea64_copy_page(mmu_t, vm_page_t, vm_page_t);
236void moea64_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offset_t a_offset,
237 vm_page_t *mb, vm_offset_t b_offset, int xfersize);

--- 24 unchanged lines hidden (view full) ---

262void moea64_remove_write(mmu_t, vm_page_t);
263void moea64_unwire(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
264void moea64_zero_page(mmu_t, vm_page_t);
265void moea64_zero_page_area(mmu_t, vm_page_t, int, int);
266void moea64_zero_page_idle(mmu_t, vm_page_t);
267void moea64_activate(mmu_t, struct thread *);
268void moea64_deactivate(mmu_t, struct thread *);
269void *moea64_mapdev(mmu_t, vm_paddr_t, vm_size_t);
270void *moea64_mapdev_attr(mmu_t, vm_offset_t, vm_size_t, vm_memattr_t);
270void *moea64_mapdev_attr(mmu_t, vm_paddr_t, vm_size_t, vm_memattr_t);
271void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
272vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
273void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
271void moea64_unmapdev(mmu_t, vm_offset_t, vm_size_t);
272vm_paddr_t moea64_kextract(mmu_t, vm_offset_t);
273void moea64_page_set_memattr(mmu_t, vm_page_t m, vm_memattr_t ma);
274void moea64_kenter_attr(mmu_t, vm_offset_t, vm_offset_t, vm_memattr_t ma);
274void moea64_kenter_attr(mmu_t, vm_offset_t, vm_paddr_t, vm_memattr_t ma);
275void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
276boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
277static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
278void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
279 void **va);
280void moea64_scan_init(mmu_t mmu);
281
282static mmu_method_t moea64_methods[] = {

--- 131 unchanged lines hidden (view full) ---

414 else
415 lpte->pte_lo |= LPTE_BR;
416
417 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
418 lpte->pte_lo |= LPTE_NOEXEC;
419}
420
421static __inline uint64_t
275void moea64_kenter(mmu_t, vm_offset_t, vm_paddr_t);
276boolean_t moea64_dev_direct_mapped(mmu_t, vm_paddr_t, vm_size_t);
277static void moea64_sync_icache(mmu_t, pmap_t, vm_offset_t, vm_size_t);
278void moea64_dumpsys_map(mmu_t mmu, vm_paddr_t pa, size_t sz,
279 void **va);
280void moea64_scan_init(mmu_t mmu);
281
282static mmu_method_t moea64_methods[] = {

--- 131 unchanged lines hidden (view full) ---

414 else
415 lpte->pte_lo |= LPTE_BR;
416
417 if (!(pvo->pvo_pte.prot & VM_PROT_EXECUTE))
418 lpte->pte_lo |= LPTE_NOEXEC;
419}
420
421static __inline uint64_t
422moea64_calc_wimg(vm_offset_t pa, vm_memattr_t ma)
422moea64_calc_wimg(vm_paddr_t pa, vm_memattr_t ma)
423{
424 uint64_t pte_lo;
425 int i;
426
427 if (ma != VM_MEMATTR_DEFAULT) {
428 switch (ma) {
429 case VM_MEMATTR_UNCACHEABLE:
430 return (LPTE_I | LPTE_G);

--- 618 unchanged lines hidden (view full) ---

1049/*
1050 * This goes through and sets the physical address of our
1051 * special scratch PTE to the PA we want to zero or copy. Because
1052 * of locking issues (this can get called in pvo_enter() by
1053 * the UMA allocator), we can't use most other utility functions here
1054 */
1055
1056static __inline
423{
424 uint64_t pte_lo;
425 int i;
426
427 if (ma != VM_MEMATTR_DEFAULT) {
428 switch (ma) {
429 case VM_MEMATTR_UNCACHEABLE:
430 return (LPTE_I | LPTE_G);

--- 618 unchanged lines hidden (view full) ---

1049/*
1050 * This goes through and sets the physical address of our
1051 * special scratch PTE to the PA we want to zero or copy. Because
1052 * of locking issues (this can get called in pvo_enter() by
1053 * the UMA allocator), we can't use most other utility functions here
1054 */
1055
1056static __inline
1057void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_offset_t pa) {
1057void moea64_set_scratchpage_pa(mmu_t mmup, int which, vm_paddr_t pa) {
1058
1059 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1060 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1061
1062 moea64_scratchpage_pvo[which]->pvo_pte.pa =
1063 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1064 MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
1065 MOEA64_PTE_INVALIDATE);

--- 88 unchanged lines hidden (view full) ---

1154 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1155 xfersize);
1156 }
1157}
1158
1159void
1160moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1161{
1058
1059 KASSERT(!hw_direct_map, ("Using OEA64 scratchpage with a direct map!"));
1060 mtx_assert(&moea64_scratchpage_mtx, MA_OWNED);
1061
1062 moea64_scratchpage_pvo[which]->pvo_pte.pa =
1063 moea64_calc_wimg(pa, VM_MEMATTR_DEFAULT) | (uint64_t)pa;
1064 MOEA64_PTE_REPLACE(mmup, moea64_scratchpage_pvo[which],
1065 MOEA64_PTE_INVALIDATE);

--- 88 unchanged lines hidden (view full) ---

1154 moea64_copy_pages_nodmap(mmu, ma, a_offset, mb, b_offset,
1155 xfersize);
1156 }
1157}
1158
1159void
1160moea64_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size)
1161{
1162 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1162 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1163
1164 if (size + off > PAGE_SIZE)
1165 panic("moea64_zero_page: size + off > PAGE_SIZE");
1166
1167 if (hw_direct_map) {
1168 bzero((caddr_t)pa + off, size);
1169 } else {
1170 mtx_lock(&moea64_scratchpage_mtx);

--- 4 unchanged lines hidden (view full) ---

1175}
1176
1177/*
1178 * Zero a page of physical memory by temporarily mapping it
1179 */
1180void
1181moea64_zero_page(mmu_t mmu, vm_page_t m)
1182{
1163
1164 if (size + off > PAGE_SIZE)
1165 panic("moea64_zero_page: size + off > PAGE_SIZE");
1166
1167 if (hw_direct_map) {
1168 bzero((caddr_t)pa + off, size);
1169 } else {
1170 mtx_lock(&moea64_scratchpage_mtx);

--- 4 unchanged lines hidden (view full) ---

1175}
1176
1177/*
1178 * Zero a page of physical memory by temporarily mapping it
1179 */
1180void
1181moea64_zero_page(mmu_t mmu, vm_page_t m)
1182{
1183 vm_offset_t pa = VM_PAGE_TO_PHYS(m);
1183 vm_paddr_t pa = VM_PAGE_TO_PHYS(m);
1184 vm_offset_t va, off;
1185
1186 if (!hw_direct_map) {
1187 mtx_lock(&moea64_scratchpage_mtx);
1188
1189 moea64_set_scratchpage_pa(mmu, 0, pa);
1190 va = moea64_scratchpage_va[0];
1191 } else {

--- 113 unchanged lines hidden (view full) ---

1305 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1306 vm_page_aflag_set(m, PGA_EXECUTABLE);
1307 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1308 }
1309 return (KERN_SUCCESS);
1310}
1311
1312static void
1184 vm_offset_t va, off;
1185
1186 if (!hw_direct_map) {
1187 mtx_lock(&moea64_scratchpage_mtx);
1188
1189 moea64_set_scratchpage_pa(mmu, 0, pa);
1190 va = moea64_scratchpage_va[0];
1191 } else {

--- 113 unchanged lines hidden (view full) ---

1305 (pte_lo & (LPTE_I | LPTE_G | LPTE_NOEXEC)) == 0) {
1306 vm_page_aflag_set(m, PGA_EXECUTABLE);
1307 moea64_syncicache(mmu, pmap, va, VM_PAGE_TO_PHYS(m), PAGE_SIZE);
1308 }
1309 return (KERN_SUCCESS);
1310}
1311
1312static void
1313moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t pa,
1313moea64_syncicache(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
1314 vm_size_t sz)
1315{
1316
1317 /*
1318 * This is much trickier than on older systems because
1319 * we can't sync the icache on physical addresses directly
1320 * without a direct map. Instead we check a couple of cases
1321 * where the memory is already mapped in and, failing that,

--- 365 unchanged lines hidden (view full) ---

1687 m->md.mdpg_cache_attrs = ma;
1688 PV_PAGE_UNLOCK(m);
1689}
1690
1691/*
1692 * Map a wired page into kernel virtual address space.
1693 */
1694void
1314 vm_size_t sz)
1315{
1316
1317 /*
1318 * This is much trickier than on older systems because
1319 * we can't sync the icache on physical addresses directly
1320 * without a direct map. Instead we check a couple of cases
1321 * where the memory is already mapped in and, failing that,

--- 365 unchanged lines hidden (view full) ---

1687 m->md.mdpg_cache_attrs = ma;
1688 PV_PAGE_UNLOCK(m);
1689}
1690
1691/*
1692 * Map a wired page into kernel virtual address space.
1693 */
1694void
1695moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_offset_t pa, vm_memattr_t ma)
1695moea64_kenter_attr(mmu_t mmu, vm_offset_t va, vm_paddr_t pa, vm_memattr_t ma)
1696{
1697 int error;
1698 struct pvo_entry *pvo, *oldpvo;
1699
1700 pvo = alloc_pvo_entry(0);
1701 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1702 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1703 pvo->pvo_vaddr |= PVO_WIRED;

--- 808 unchanged lines hidden (view full) ---

2512
2513/*
2514 * Map a set of physical memory pages into the kernel virtual
2515 * address space. Return a pointer to where it is mapped. This
2516 * routine is intended to be used for mapping device memory,
2517 * NOT real memory.
2518 */
2519void *
1696{
1697 int error;
1698 struct pvo_entry *pvo, *oldpvo;
1699
1700 pvo = alloc_pvo_entry(0);
1701 pvo->pvo_pte.prot = VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
1702 pvo->pvo_pte.pa = (pa & ~ADDR_POFF) | moea64_calc_wimg(pa, ma);
1703 pvo->pvo_vaddr |= PVO_WIRED;

--- 808 unchanged lines hidden (view full) ---

2512
2513/*
2514 * Map a set of physical memory pages into the kernel virtual
2515 * address space. Return a pointer to where it is mapped. This
2516 * routine is intended to be used for mapping device memory,
2517 * NOT real memory.
2518 */
2519void *
2520moea64_mapdev_attr(mmu_t mmu, vm_offset_t pa, vm_size_t size, vm_memattr_t ma)
2520moea64_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_size_t size, vm_memattr_t ma)
2521{
2522 vm_offset_t va, tmpva, ppa, offset;
2523
2524 ppa = trunc_page(pa);
2525 offset = pa & PAGE_MASK;
2526 size = roundup2(offset + size, PAGE_SIZE);
2527
2528 va = kva_alloc(size);

--- 126 unchanged lines hidden ---
2521{
2522 vm_offset_t va, tmpva, ppa, offset;
2523
2524 ppa = trunc_page(pa);
2525 offset = pa & PAGE_MASK;
2526 size = roundup2(offset + size, PAGE_SIZE);
2527
2528 va = kva_alloc(size);

--- 126 unchanged lines hidden ---