1/*- 2 * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski <raj@semihalf.com> 3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com> 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. |
14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED 20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF --- 9 unchanged lines hidden (view full) --- 31 /* 32 * VM layout notes: 33 * 34 * Kernel and user threads run within one common virtual address space 35 * defined by AS=0. 36 * 37 * Virtual address space layout: 38 * ----------------------------- |
39 * 0x0000_0000 - 0xafff_ffff : user process 40 * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.) 41 * 0xc000_0000 - 0xc0ff_ffff : kernel reserved 42 * 0xc000_0000 - kernelend : kernel code+data, env, metadata etc. 43 * 0xc100_0000 - 0xfeef_ffff : KVA 44 * 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy 45 * 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs 46 * 0xc200_4000 - 0xc200_8fff : guard page + kstack0 47 * 0xc200_9000 - 0xfeef_ffff : actual free KVA space 48 * 0xfef0_0000 - 0xffff_ffff : I/O devices region |
49 */ 50 51#include <sys/cdefs.h> |
52__FBSDID("$FreeBSD: head/sys/powerpc/booke/pmap.c 187151 2009-01-13 16:15:49Z raj $"); |
53 54#include <sys/types.h> 55#include <sys/param.h> 56#include <sys/malloc.h> 57#include <sys/ktr.h> 58#include <sys/proc.h> 59#include <sys/user.h> 60#include <sys/queue.h> --- 315 unchanged lines hidden (view full) --- 376} 377 378/* Initialize pool of kva ptbl buffers. */ 379static void 380ptbl_init(void) 381{ 382 int i; 383 |
384 CTR3(KTR_PMAP, "%s: s (ptbl_bufs = 0x%08x size 0x%08x)", __func__, 385 (uint32_t)ptbl_bufs, sizeof(struct ptbl_buf) * PTBL_BUFS); 386 CTR3(KTR_PMAP, "%s: s (ptbl_buf_pool_vabase = 0x%08x size = 0x%08x)", 387 __func__, ptbl_buf_pool_vabase, PTBL_BUFS * PTBL_PAGES * PAGE_SIZE); |
388 389 mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF); 390 TAILQ_INIT(&ptbl_buf_freelist); 391 392 for (i = 0; i < PTBL_BUFS; i++) { 393 ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE; 394 TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link); 395 } |
396} 397 398/* Get a ptbl_buf from the freelist. */ 399static struct ptbl_buf * 400ptbl_buf_alloc(void) 401{ 402 struct ptbl_buf *buf; 403 |
404 mtx_lock(&ptbl_buf_freelist_lock); 405 buf = TAILQ_FIRST(&ptbl_buf_freelist); 406 if (buf != NULL) 407 TAILQ_REMOVE(&ptbl_buf_freelist, buf, link); 408 mtx_unlock(&ptbl_buf_freelist_lock); 409 |
410 CTR2(KTR_PMAP, "%s: buf = %p", __func__, buf); 411 |
412 return (buf); 413} 414 415/* Return ptbl buff to free pool. */ 416static void 417ptbl_buf_free(struct ptbl_buf *buf) 418{ 419 --- 134 unchanged lines hidden (view full) --- 554static int 555ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 556{ 557 pte_t *ptbl; 558 vm_paddr_t pa; 559 vm_page_t m; 560 int i; 561 |
562 CTR4(KTR_PMAP, "%s: pmap = %p su = %d pdir_idx = %d", __func__, pmap, 563 (pmap == kernel_pmap), pdir_idx); |
564 565 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 566 ("ptbl_unhold: invalid pdir_idx")); 567 KASSERT((pmap != kernel_pmap), 568 ("ptbl_unhold: unholding kernel ptbl!")); 569 570 ptbl = pmap->pm_pdir[pdir_idx]; 571 572 //debugf("ptbl_unhold: ptbl = 0x%08x\n", (u_int32_t)ptbl); 573 KASSERT(((vm_offset_t)ptbl >= VM_MIN_KERNEL_ADDRESS), 574 ("ptbl_unhold: non kva ptbl")); 575 576 /* decrement hold count */ 577 for (i = 0; i < PTBL_PAGES; i++) { |
578 pa = pte_vatopa(mmu, kernel_pmap, 579 (vm_offset_t)ptbl + (i * PAGE_SIZE)); |
580 m = PHYS_TO_VM_PAGE(pa); 581 m->wire_count--; 582 } 583 584 /* 585 * Free ptbl pages if there are no pte etries in this ptbl. |
586 * wire_count has the same value for all ptbl pages, so check the last 587 * page. |
588 */ 589 if (m->wire_count == 0) { 590 ptbl_free(mmu, pmap, pdir_idx); 591 592 //debugf("ptbl_unhold: e (freed ptbl)\n"); 593 return (1); 594 } 595 |
596 return (0); 597} 598 599/* |
600 * Increment hold count for ptbl pages. This routine is used when a new pte 601 * entry is being inserted into the ptbl. |
602 */ 603static void 604ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx) 605{ 606 vm_paddr_t pa; 607 pte_t *ptbl; 608 vm_page_t m; 609 int i; 610 |
611 CTR3(KTR_PMAP, "%s: pmap = %p pdir_idx = %d", __func__, pmap, 612 pdir_idx); |
613 614 KASSERT((pdir_idx <= (VM_MAXUSER_ADDRESS / PDIR_SIZE)), 615 ("ptbl_hold: invalid pdir_idx")); 616 KASSERT((pmap != kernel_pmap), 617 ("ptbl_hold: holding kernel ptbl!")); 618 619 ptbl = pmap->pm_pdir[pdir_idx]; 620 621 KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); 622 623 for (i = 0; i < PTBL_PAGES; i++) { |
624 pa = pte_vatopa(mmu, kernel_pmap, 625 (vm_offset_t)ptbl + (i * PAGE_SIZE)); |
626 m = PHYS_TO_VM_PAGE(pa); 627 m->wire_count++; 628 } |
629} 630 631/* Allocate pv_entry structure. */ 632pv_entry_t 633pv_alloc(void) 634{ 635 pv_entry_t pv; 636 |
637 pv_entry_count++; |
638 if ((pv_entry_count > pv_entry_high_water) && 639 (pagedaemon_waken == 0)) { |
640 pagedaemon_waken = 1; |
641 wakeup(&vm_pages_needed); |
642 } 643 pv = uma_zalloc(pvzone, M_NOWAIT); 644 |
645 return (pv); 646} 647 648/* Free pv_entry structure. */ 649static __inline void 650pv_free(pv_entry_t pve) 651{ |
652 653 pv_entry_count--; 654 uma_zfree(pvzone, pve); |
655} 656 657 658/* Allocate and initialize pv_entry structure. */ 659static void 660pv_insert(pmap_t pmap, vm_offset_t va, vm_page_t m) 661{ 662 pv_entry_t pve; --- 35 unchanged lines hidden (view full) --- 698 if ((pmap == pve->pv_pmap) && (va == pve->pv_va)) { 699 /* remove from pv_list */ 700 TAILQ_REMOVE(&m->md.pv_list, pve, pv_link); 701 if (TAILQ_EMPTY(&m->md.pv_list)) 702 vm_page_flag_clear(m, PG_WRITEABLE); 703 704 /* free pv entry struct */ 705 pv_free(pve); |
706 break; 707 } 708 } 709 710 //debugf("pv_remove: e\n"); 711} 712 713/* 714 * Clean pte entry, try to free page table page if requested. 715 * 716 * Return 1 if ptbl pages were freed, otherwise return 0. 717 */ 718static int |
719pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, uint8_t flags) |
720{ 721 unsigned int pdir_idx = PDIR_IDX(va); 722 unsigned int ptbl_idx = PTBL_IDX(va); 723 vm_page_t m; 724 pte_t *ptbl; 725 pte_t *pte; 726 727 //int su = (pmap == kernel_pmap); --- 259 unchanged lines hidden (view full) --- 987 debugf(" kernelstart = 0x%08x\n", kernelstart); 988 debugf(" kernelend = 0x%08x\n", kernelend); 989 debugf(" kernel size = 0x%08x\n", kernelend - kernelstart); 990 991 if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz) 992 panic("mmu_booke_bootstrap: phys_avail too small"); 993 994 /* |
995 * Remove kernel physical address range from avail regions list. Page 996 * align all regions. Non-page aligned memory isn't very interesting 997 * to us. Also, sort the entries for ascending addresses. |
998 */ 999 sz = 0; 1000 cnt = availmem_regions_sz; 1001 debugf("processing avail regions:\n"); 1002 for (mp = availmem_regions; mp->mr_size; mp++) { 1003 s = mp->mr_start; 1004 e = mp->mr_start + mp->mr_size; 1005 debugf(" %08x-%08x -> ", s, e); --- 66 unchanged lines hidden (view full) --- 1072 hwphyssz = 0; 1073 TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz); 1074 1075 debugf("fill in phys_avail:\n"); 1076 for (i = 0, j = 0; i < availmem_regions_sz; i++, j += 2) { 1077 1078 debugf(" region: 0x%08x - 0x%08x (0x%08x)\n", 1079 availmem_regions[i].mr_start, |
1080 availmem_regions[i].mr_start + 1081 availmem_regions[i].mr_size, |
1082 availmem_regions[i].mr_size); 1083 1084 if (hwphyssz != 0 && 1085 (physsz + availmem_regions[i].mr_size) >= hwphyssz) { 1086 debugf(" hw.physmem adjust\n"); 1087 if (physsz < hwphyssz) { 1088 phys_avail[j] = availmem_regions[i].mr_start; 1089 phys_avail[j + 1] = --- 15 unchanged lines hidden (view full) --- 1105 1106 /* Calculate the last available physical address. */ 1107 for (i = 0; phys_avail[i + 2] != 0; i += 2) 1108 ; 1109 Maxmem = powerpc_btop(phys_avail[i + 1]); 1110 1111 debugf("Maxmem = 0x%08lx\n", Maxmem); 1112 debugf("phys_avail_count = %d\n", phys_avail_count); |
1113 debugf("physsz = 0x%08x physmem = %ld (0x%08lx)\n", physsz, physmem, 1114 physmem); |
1115 1116 /*******************************************************/ 1117 /* Initialize (statically allocated) kernel pmap. */ 1118 /*******************************************************/ 1119 PMAP_LOCK_INIT(kernel_pmap); 1120 kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; 1121 1122 debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap); --- 73 unchanged lines hidden (view full) --- 1196 * Called by vm_init, to initialize any structures that the pmap 1197 * system needs to map virtual memory. 1198 */ 1199static void 1200mmu_booke_init(mmu_t mmu) 1201{ 1202 int shpgperproc = PMAP_SHPGPERPROC; 1203 |
1204 /* 1205 * Initialize the address space (zone) for the pv entries. Set a 1206 * high water mark so that the system can recover from excessive 1207 * numbers of pv entries. 1208 */ 1209 pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, 1210 NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); 1211 --- 5 unchanged lines hidden (view full) --- 1217 1218 uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); 1219 1220 /* Pre-fill pvzone with initial number of pv entries. */ 1221 uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); 1222 1223 /* Initialize ptbl allocation. */ 1224 ptbl_init(); |
1225} 1226 1227/* 1228 * Map a list of wired pages into kernel virtual address space. This is 1229 * intended for temporary mappings which do not need page modification or 1230 * references recorded. Existing mappings in the region are overwritten. 1231 */ 1232static void 1233mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count) 1234{ 1235 vm_offset_t va; 1236 |
1237 va = sva; 1238 while (count-- > 0) { 1239 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m)); 1240 va += PAGE_SIZE; 1241 m++; 1242 } |
1243} 1244 1245/* 1246 * Remove page mappings from kernel virtual address space. Intended for 1247 * temporary mappings entered by mmu_booke_qenter. 1248 */ 1249static void 1250mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count) 1251{ 1252 vm_offset_t va; 1253 |
1254 va = sva; 1255 while (count-- > 0) { 1256 mmu_booke_kremove(mmu, va); 1257 va += PAGE_SIZE; 1258 } |
1259} 1260 1261/* 1262 * Map a wired page into kernel virtual address space. 1263 */ 1264static void 1265mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa) 1266{ 1267 unsigned int pdir_idx = PDIR_IDX(va); 1268 unsigned int ptbl_idx = PTBL_IDX(va); |
1269 uint32_t flags; |
1270 pte_t *pte; 1271 |
1272 KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && 1273 (va <= VM_MAX_KERNEL_ADDRESS)), ("mmu_booke_kenter: invalid va")); |
1274 |
1275#if 0 1276 /* assume IO mapping, set I, G bits */ 1277 flags = (PTE_G | PTE_I | PTE_FAKE); 1278 1279 /* if mapping is within system memory, do not set I, G bits */ 1280 for (i = 0; i < totalmem_regions_sz; i++) { 1281 if ((pa >= totalmem_regions[i].mr_start) && 1282 (pa < (totalmem_regions[i].mr_start + --- 72 unchanged lines hidden (view full) --- 1355} 1356 1357/* 1358 * Initialize pmap associated with process 0. 1359 */ 1360static void 1361mmu_booke_pinit0(mmu_t mmu, pmap_t pmap) 1362{ |
1363 |
1364 mmu_booke_pinit(mmu, pmap); 1365 PCPU_SET(curpmap, pmap); |
1366} 1367 1368/* 1369 * Initialize a preallocated and zeroed pmap structure, 1370 * such as one in a vmspace structure. 1371 */ 1372static void 1373mmu_booke_pinit(mmu_t mmu, pmap_t pmap) --- 18 unchanged lines hidden (view full) --- 1392 * Release any resources held by the given physical map. 1393 * Called when a pmap initialized by mmu_booke_pinit is being released. 1394 * Should only be called if the map contains no valid mappings. 1395 */ 1396static void 1397mmu_booke_release(mmu_t mmu, pmap_t pmap) 1398{ 1399 |
1400 printf("mmu_booke_release: s\n"); |
1401 |
1402 KASSERT(pmap->pm_stats.resident_count == 0, 1403 ("pmap_release: pmap resident count %ld != 0", 1404 pmap->pm_stats.resident_count)); |
1405 |
1406 PMAP_LOCK_DESTROY(pmap); |
1407} 1408 1409#if 0 1410/* Not needed, kernel page tables are statically allocated. */ 1411void 1412mmu_booke_growkernel(vm_offset_t maxkvaddr) 1413{ 1414} 1415#endif 1416 1417/* 1418 * Insert the given physical page at the specified virtual address in the 1419 * target physical map with the protection requested. If specified the page 1420 * will be wired down. 1421 */ 1422static void 1423mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1424 vm_prot_t prot, boolean_t wired) 1425{ |
1426 |
1427 vm_page_lock_queues(); 1428 PMAP_LOCK(pmap); 1429 mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired); 1430 vm_page_unlock_queues(); 1431 PMAP_UNLOCK(pmap); 1432} 1433 1434static void 1435mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1436 vm_prot_t prot, boolean_t wired) 1437{ 1438 pte_t *pte; 1439 vm_paddr_t pa; |
1440 uint32_t flags; |
1441 int su, sync; 1442 1443 pa = VM_PAGE_TO_PHYS(m); 1444 su = (pmap == kernel_pmap); 1445 sync = 0; 1446 1447 //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x " 1448 // "pa=0x%08x prot=0x%08x wired=%d)\n", 1449 // (u_int32_t)pmap, su, pmap->pm_tid, 1450 // (u_int32_t)m, va, pa, prot, wired); 1451 1452 if (su) { |
1453 KASSERT(((va >= virtual_avail) && 1454 (va <= VM_MAX_KERNEL_ADDRESS)), 1455 ("mmu_booke_enter_locked: kernel pmap, non kernel va")); |
1456 } else { 1457 KASSERT((va <= VM_MAXUSER_ADDRESS), |
1458 ("mmu_booke_enter_locked: user pmap, non user va")); |
1459 } 1460 1461 PMAP_LOCK_ASSERT(pmap, MA_OWNED); 1462 1463 /* 1464 * If there is an existing mapping, and the physical address has not 1465 * changed, must be protection or wiring change. 1466 */ --- 122 unchanged lines hidden (view full) --- 1589 KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__)); 1590 1591 flags = PTE_SR | PTE_VALID | PTE_UR | PTE_M; 1592 1593 pte_enter(mmu, pmap, m, va, flags); 1594 __syncicache((void *)va, PAGE_SIZE); 1595 pte_remove(mmu, pmap, va, PTBL_UNHOLD); 1596 } |
1597} 1598 1599/* 1600 * Maps a sequence of resident pages belonging to the same object. 1601 * The sequence begins with the given page m_start. This page is 1602 * mapped at the given virtual address start. Each subsequent page is 1603 * mapped at a virtual address that is offset from start by the same 1604 * amount as the page is offset from m_start within the object. The --- 9 unchanged lines hidden (view full) --- 1614{ 1615 vm_page_t m; 1616 vm_pindex_t diff, psize; 1617 1618 psize = atop(end - start); 1619 m = m_start; 1620 PMAP_LOCK(pmap); 1621 while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { |
1622 mmu_booke_enter_locked(mmu, pmap, start + ptoa(diff), m, 1623 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); |
1624 m = TAILQ_NEXT(m, listq); 1625 } 1626 PMAP_UNLOCK(pmap); 1627} 1628 1629static void 1630mmu_booke_enter_quick(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, 1631 vm_prot_t prot) 1632{ 1633 |
1634 PMAP_LOCK(pmap); 1635 mmu_booke_enter_locked(mmu, pmap, va, m, 1636 prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); 1637 PMAP_UNLOCK(pmap); |
1638} 1639 1640/* 1641 * Remove the given range of addresses from the specified map. 1642 * 1643 * It is assumed that the start and end are properly rounded to the page size. 1644 */ 1645static void 1646mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) 1647{ 1648 pte_t *pte; |
1649 uint8_t hold_flag; |
1650 1651 int su = (pmap == kernel_pmap); 1652 1653 //debugf("mmu_booke_remove: s (su = %d pmap=0x%08x tid=%d va=0x%08x endva=0x%08x)\n", 1654 // su, (u_int32_t)pmap, pmap->pm_tid, va, endva); 1655 1656 if (su) { |
1657 KASSERT(((va >= virtual_avail) && 1658 (va <= VM_MAX_KERNEL_ADDRESS)), 1659 ("mmu_booke_remove: kernel pmap, non kernel va")); |
1660 } else { 1661 KASSERT((va <= VM_MAXUSER_ADDRESS), |
1662 ("mmu_booke_remove: user pmap, non user va")); |
1663 } 1664 1665 if (PMAP_REMOVE_DONE(pmap)) { 1666 //debugf("mmu_booke_remove: e (empty)\n"); 1667 return; 1668 } 1669 1670 hold_flag = PTBL_HOLD_FLAG(pmap); --- 14 unchanged lines hidden (view full) --- 1685 1686/* 1687 * Remove physical page from all pmaps in which it resides. 1688 */ 1689static void 1690mmu_booke_remove_all(mmu_t mmu, vm_page_t m) 1691{ 1692 pv_entry_t pv, pvn; |
1693 uint8_t hold_flag; |
1694 |
1695 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 1696 1697 for (pv = TAILQ_FIRST(&m->md.pv_list); pv != NULL; pv = pvn) { 1698 pvn = TAILQ_NEXT(pv, pv_link); 1699 1700 PMAP_LOCK(pv->pv_pmap); 1701 hold_flag = PTBL_HOLD_FLAG(pv->pv_pmap); 1702 pte_remove(mmu, pv->pv_pmap, pv->pv_va, hold_flag); 1703 PMAP_UNLOCK(pv->pv_pmap); 1704 } 1705 vm_page_flag_clear(m, PG_WRITEABLE); |
1706} 1707 1708/* 1709 * Map a range of physical addresses into kernel virtual address space. |
1710 */ 1711static vm_offset_t 1712mmu_booke_map(mmu_t mmu, vm_offset_t *virt, vm_offset_t pa_start, 1713 vm_offset_t pa_end, int prot) 1714{ 1715 vm_offset_t sva = *virt; 1716 vm_offset_t va = sva; 1717 --- 193 unchanged lines hidden (view full) --- 1911 * protection. 1912 */ 1913static vm_page_t 1914mmu_booke_extract_and_hold(mmu_t mmu, pmap_t pmap, vm_offset_t va, 1915 vm_prot_t prot) 1916{ 1917 pte_t *pte; 1918 vm_page_t m; |
1919 uint32_t pte_wbit; |
1920 1921 m = NULL; 1922 vm_page_lock_queues(); 1923 PMAP_LOCK(pmap); |
1924 |
1925 pte = pte_find(mmu, pmap, va); |
1926 if ((pte != NULL) && PTE_ISVALID(pte)) { 1927 if (pmap == kernel_pmap) 1928 pte_wbit = PTE_SW; 1929 else 1930 pte_wbit = PTE_UW; 1931 1932 if ((pte->flags & pte_wbit) || ((prot & VM_PROT_WRITE) == 0)) { 1933 m = PHYS_TO_VM_PAGE(PTE_PA(pte)); --- 23 unchanged lines hidden (view full) --- 1957 * 1958 * off and size must reside within a single page. 1959 */ 1960static void 1961mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int off, int size) 1962{ 1963 vm_offset_t va; 1964 |
1965 /* XXX KASSERT off and size are within a single page? */ |
1966 1967 mtx_lock(&zero_page_mutex); 1968 va = zero_page_va; 1969 1970 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 1971 bzero((caddr_t)va + off, size); 1972 mmu_booke_kremove(mmu, va); 1973 1974 mtx_unlock(&zero_page_mutex); |
1975} 1976 1977/* 1978 * mmu_booke_zero_page zeros the specified hardware page. 1979 */ 1980static void 1981mmu_booke_zero_page(mmu_t mmu, vm_page_t m) 1982{ 1983 |
1984 mmu_booke_zero_page_area(mmu, m, 0, PAGE_SIZE); |
1985} 1986 1987/* 1988 * mmu_booke_copy_page copies the specified (machine independent) page by 1989 * mapping the page into virtual memory and using memcopy to copy the page, 1990 * one machine dependent page at a time. 1991 */ 1992static void --- 31 unchanged lines hidden (view full) --- 2024 * to be called from the vm_pagezero process only and outside of Giant. No 2025 * lock is required. 2026 */ 2027static void 2028mmu_booke_zero_page_idle(mmu_t mmu, vm_page_t m) 2029{ 2030 vm_offset_t va; 2031 |
2032 va = zero_page_idle_va; 2033 mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m)); 2034 bzero((caddr_t)va, PAGE_SIZE); 2035 mmu_booke_kremove(mmu, va); |
2036} 2037 2038/* 2039 * Return whether or not the specified physical page was modified 2040 * in any of physical maps. 2041 */ 2042static boolean_t 2043mmu_booke_is_modified(mmu_t mmu, vm_page_t m) --- 18 unchanged lines hidden (view full) --- 2062 } 2063make_sure_to_unlock: 2064 PMAP_UNLOCK(pv->pv_pmap); 2065 } 2066 return (FALSE); 2067} 2068 2069/* |
2070 * Return whether or not the specified virtual address is eligible |
2071 * for prefault. 2072 */ 2073static boolean_t 2074mmu_booke_is_prefaultable(mmu_t mmu, pmap_t pmap, vm_offset_t addr) 2075{ 2076 2077 return (FALSE); 2078} --- 151 unchanged lines hidden (view full) --- 2230 int loops; 2231 2232 mtx_assert(&vm_page_queue_mtx, MA_OWNED); 2233 if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) 2234 return (FALSE); 2235 2236 loops = 0; 2237 TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { |
2238 if (pv->pv_pmap == pmap) 2239 return (TRUE); 2240 2241 if (++loops >= 16) 2242 break; 2243 } 2244 return (FALSE); 2245} --- 74 unchanged lines hidden (view full) --- 2320/* 2321 * 'Unmap' a range mapped by mmu_booke_mapdev(). 2322 */ 2323static void 2324mmu_booke_unmapdev(mmu_t mmu, vm_offset_t va, vm_size_t size) 2325{ 2326 vm_offset_t base, offset; 2327 |
2328 /* 2329 * Unmap only if this is inside kernel virtual space. 2330 */ 2331 if ((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)) { 2332 base = trunc_page(va); 2333 offset = va & PAGE_MASK; 2334 size = roundup(offset + size, PAGE_SIZE); 2335 kmem_free(kernel_map, base, size); 2336 } |
2337} 2338 2339/* |
2340 * mmu_booke_object_init_pt preloads the ptes for a given object into the 2341 * specified pmap. This eliminates the blast of soft faults on process startup 2342 * and immediately after an mmap. |
2343 */ 2344static void 2345mmu_booke_object_init_pt(mmu_t mmu, pmap_t pmap, vm_offset_t addr, 2346 vm_object_t object, vm_pindex_t pindex, vm_size_t size) 2347{ |
2348 |
2349 VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); 2350 KASSERT(object->type == OBJT_DEVICE, 2351 ("mmu_booke_object_init_pt: non-device object")); 2352} 2353 2354/* 2355 * Perform the pmap work for mincore. 2356 */ --- 164 unchanged lines hidden (view full) --- 2521 2522/* 2523 * Write given entry to TLB1 hardware. 2524 * Use 32 bit pa, clear 4 high-order bits of RPN (mas7). 2525 */ 2526static void 2527tlb1_write_entry(unsigned int idx) 2528{ |
2529 uint32_t mas0, mas7; |
2530 2531 //debugf("tlb1_write_entry: s\n"); 2532 2533 /* Clear high order RPN bits */ 2534 mas7 = 0; 2535 2536 /* Select entry */ 2537 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(idx); 2538 //debugf("tlb1_write_entry: mas0 = 0x%08x\n", mas0); 2539 2540 mtspr(SPR_MAS0, mas0); |
2541 __asm __volatile("isync"); |
2542 mtspr(SPR_MAS1, tlb1[idx].mas1); |
2543 __asm __volatile("isync"); |
2544 mtspr(SPR_MAS2, tlb1[idx].mas2); |
2545 __asm __volatile("isync"); |
2546 mtspr(SPR_MAS3, tlb1[idx].mas3); |
2547 __asm __volatile("isync"); |
2548 mtspr(SPR_MAS7, mas7); |
2549 __asm __volatile("isync; tlbwe; isync; msync"); |
2550 2551 //debugf("tlb1_write_entry: e\n");; 2552} 2553 2554/* 2555 * Return the largest uint value log such that 2^log <= num. 2556 */ 2557static unsigned int --- 88 unchanged lines hidden (view full) --- 2646 return (-1); 2647 else if (*sza < *szb) 2648 return (1); 2649 else 2650 return (0); 2651} 2652 2653/* |
2654 * Map in contiguous RAM region into the TLB1 using maximum of |
2655 * KERNEL_REGION_MAX_TLB_ENTRIES entries. 2656 * |
2657 * If necessary round up last entry size and return total size |
2658 * used by all allocated entries. 2659 */ 2660vm_size_t 2661tlb1_mapin_region(vm_offset_t va, vm_offset_t pa, vm_size_t size) 2662{ 2663 vm_size_t entry_size[KERNEL_REGION_MAX_TLB_ENTRIES]; 2664 vm_size_t mapped_size, sz, esz; 2665 unsigned int log; 2666 int i; 2667 |
2668 CTR4(KTR_PMAP, "%s: region size = 0x%08x va = 0x%08x pa = 0x%08x", 2669 __func__, size, va, pa); |
2670 2671 mapped_size = 0; 2672 sz = size; 2673 memset(entry_size, 0, sizeof(entry_size)); 2674 2675 /* Calculate entry sizes. */ 2676 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES && sz > 0; i++) { 2677 --- 19 unchanged lines hidden (view full) --- 2697 qsort(entry_size, KERNEL_REGION_MAX_TLB_ENTRIES, 2698 sizeof(vm_size_t), tlb1_entry_size_cmp); 2699 2700 /* Load TLB1 entries. */ 2701 for (i = 0; i < KERNEL_REGION_MAX_TLB_ENTRIES; i++) { 2702 esz = entry_size[i]; 2703 if (!esz) 2704 break; |
2705 2706 CTR5(KTR_PMAP, "%s: entry %d: sz = 0x%08x (va = 0x%08x " 2707 "pa = 0x%08x)", __func__, tlb1_idx, esz, va, pa); 2708 |
2709 tlb1_set_entry(va, pa, esz, _TLB_ENTRY_MEM); 2710 2711 va += esz; 2712 pa += esz; 2713 } 2714 |
2715 CTR3(KTR_PMAP, "%s: mapped size 0x%08x (wasted space 0x%08x)", 2716 __func__, mapped_size, mapped_size - size); |
2717 2718 return (mapped_size); 2719} 2720 2721/* 2722 * TLB1 initialization routine, to be called after the very first 2723 * assembler level setup done in locore.S. 2724 */ 2725void 2726tlb1_init(vm_offset_t ccsrbar) 2727{ 2728 uint32_t mas0; 2729 |
2730 /* TLB1[1] is used to map the kernel. Save that entry. */ |
2731 mas0 = MAS0_TLBSEL(1) | MAS0_ESEL(1); 2732 mtspr(SPR_MAS0, mas0); 2733 __asm __volatile("isync; tlbre"); 2734 2735 tlb1[1].mas1 = mfspr(SPR_MAS1); 2736 tlb1[1].mas2 = mfspr(SPR_MAS2); 2737 tlb1[1].mas3 = mfspr(SPR_MAS3); 2738 --- 13 unchanged lines hidden (view full) --- 2752 2753/* 2754 * Setup MAS4 defaults. 2755 * These values are loaded to MAS0-2 on a TLB miss. 2756 */ 2757static void 2758set_mas4_defaults(void) 2759{ |
2760 uint32_t mas4; |
2761 2762 /* Defaults: TLB0, PID0, TSIZED=4K */ 2763 mas4 = MAS4_TLBSELD0; 2764 mas4 |= (TLB_SIZE_4K << MAS4_TSIZED_SHIFT) & MAS4_TSIZED_MASK; 2765 2766 mtspr(SPR_MAS4, mas4); |
2767 __asm __volatile("isync"); |
2768} 2769 2770/* 2771 * Print out contents of the MAS registers for each TLB1 entry 2772 */ 2773void 2774tlb1_print_tlbentries(void) 2775{ --- 32 unchanged lines hidden (view full) --- 2808 2809/* 2810 * Return 0 if the physical IO range is encompassed by one of the 2811 * the TLB1 entries, otherwise return related error code. 2812 */ 2813static int 2814tlb1_iomapped(int i, vm_paddr_t pa, vm_size_t size, vm_offset_t *va) 2815{ |
2816 uint32_t prot; |
2817 vm_paddr_t pa_start; 2818 vm_paddr_t pa_end; 2819 unsigned int entry_tsize; 2820 vm_size_t entry_size; 2821 2822 *va = (vm_offset_t)NULL; 2823 2824 /* Skip invalid entries */ --- 30 unchanged lines hidden --- |