Deleted Added
full compact
pmap.c (305881) pmap.c (305882)
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm

--- 70 unchanged lines hidden (view full) ---

79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm

--- 70 unchanged lines hidden (view full) ---

79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
87__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/pmap.c 305881 2016-09-16 12:20:42Z andrew $");
87__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/pmap.c 305882 2016-09-16 12:36:11Z andrew $");
88
89/*
90 * Manages physical address maps.
91 *
92 * Since the information managed by this module is
93 * also stored by the logical address mapping module,
94 * this module may throw away valid virtual-to-physical
95 * mappings at almost any time. However, invalidations

--- 5 unchanged lines hidden (view full) ---

101 * this module may delay invalidate or reduced protection
102 * operations until such time as they are actually
103 * necessary. This module is given full information as
104 * to which processors are currently using which maps,
105 * and to when physical maps must be made correct.
106 */
107
108#include <sys/param.h>
88
89/*
90 * Manages physical address maps.
91 *
92 * Since the information managed by this module is
93 * also stored by the logical address mapping module,
94 * this module may throw away valid virtual-to-physical
95 * mappings at almost any time. However, invalidations

--- 5 unchanged lines hidden (view full) ---

101 * this module may delay invalidate or reduced protection
102 * operations until such time as they are actually
103 * necessary. This module is given full information as
104 * to which processors are currently using which maps,
105 * and to when physical maps must be made correct.
106 */
107
108#include <sys/param.h>
109#include <sys/bitstring.h>
109#include <sys/bus.h>
110#include <sys/systm.h>
111#include <sys/kernel.h>
112#include <sys/ktr.h>
113#include <sys/lock.h>
114#include <sys/malloc.h>
115#include <sys/mman.h>
116#include <sys/msgbuf.h>

--- 12 unchanged lines hidden (view full) ---

129#include <vm/vm_param.h>
130#include <vm/vm_kern.h>
131#include <vm/vm_page.h>
132#include <vm/vm_map.h>
133#include <vm/vm_object.h>
134#include <vm/vm_extern.h>
135#include <vm/vm_pageout.h>
136#include <vm/vm_pager.h>
110#include <sys/bus.h>
111#include <sys/systm.h>
112#include <sys/kernel.h>
113#include <sys/ktr.h>
114#include <sys/lock.h>
115#include <sys/malloc.h>
116#include <sys/mman.h>
117#include <sys/msgbuf.h>

--- 12 unchanged lines hidden (view full) ---

130#include <vm/vm_param.h>
131#include <vm/vm_kern.h>
132#include <vm/vm_page.h>
133#include <vm/vm_map.h>
134#include <vm/vm_object.h>
135#include <vm/vm_extern.h>
136#include <vm/vm_pageout.h>
137#include <vm/vm_pager.h>
138#include <vm/vm_phys.h>
137#include <vm/vm_radix.h>
138#include <vm/vm_reserv.h>
139#include <vm/uma.h>
140
141#include <machine/machdep.h>
142#include <machine/md_var.h>
143#include <machine/pcb.h>
144

--- 26 unchanged lines hidden (view full) ---

171
172#ifdef PV_STATS
173#define PV_STAT(x) do { x ; } while (0)
174#else
175#define PV_STAT(x) do { } while (0)
176#endif
177
178#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
139#include <vm/vm_radix.h>
140#include <vm/vm_reserv.h>
141#include <vm/uma.h>
142
143#include <machine/machdep.h>
144#include <machine/md_var.h>
145#include <machine/pcb.h>
146

--- 26 unchanged lines hidden (view full) ---

173
174#ifdef PV_STATS
175#define PV_STAT(x) do { x ; } while (0)
176#else
177#define PV_STAT(x) do { } while (0)
178#endif
179
180#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
181#define pa_to_pvh(pa) (&pv_table[pmap_l2_pindex(pa)])
179
180#define NPV_LIST_LOCKS MAXCPU
181
182#define PHYS_TO_PV_LIST_LOCK(pa) \
183 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
184
185#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
186 struct rwlock **_lockp = (lockp); \

--- 26 unchanged lines hidden (view full) ---

213struct pmap kernel_pmap_store;
214
215vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
216vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
217vm_offset_t kernel_vm_end = 0;
218
219struct msgbuf *msgbufp = NULL;
220
182
183#define NPV_LIST_LOCKS MAXCPU
184
185#define PHYS_TO_PV_LIST_LOCK(pa) \
186 (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
187
188#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
189 struct rwlock **_lockp = (lockp); \

--- 26 unchanged lines hidden (view full) ---

216struct pmap kernel_pmap_store;
217
218vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
219vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
220vm_offset_t kernel_vm_end = 0;
221
222struct msgbuf *msgbufp = NULL;
223
224/*
225 * Data for the pv entry allocation mechanism.
226 * Updates to pv_invl_gen are protected by the pv_list_locks[]
227 * elements, but reads are not.
228 */
229static struct md_page *pv_table;
230static struct md_page pv_dummy;
231
221vm_paddr_t dmap_phys_base; /* The start of the dmap region */
222vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
223vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
224
225/* This code assumes all L1 DMAP entries will be used */
226CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
227CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
228
229#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
230extern pt_entry_t pagetable_dmap[];
231
232vm_paddr_t dmap_phys_base; /* The start of the dmap region */
233vm_paddr_t dmap_phys_max; /* The limit of the dmap region */
234vm_offset_t dmap_max_addr; /* The virtual address limit of the dmap */
235
236/* This code assumes all L1 DMAP entries will be used */
237CTASSERT((DMAP_MIN_ADDRESS & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
238CTASSERT((DMAP_MAX_ADDRESS & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
239
240#define DMAP_TABLES ((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
241extern pt_entry_t pagetable_dmap[];
242
243static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
244
245static int superpages_enabled = 0;
246SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
247 CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
248 "Are large page mappings enabled?");
249
232/*
233 * Data for the pv entry allocation mechanism
234 */
235static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
236static struct mtx pv_chunks_mutex;
237static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
238
239static void free_pv_chunk(struct pv_chunk *pc);
240static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
241static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
242static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
243static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
244static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
245 vm_offset_t va);
250/*
251 * Data for the pv entry allocation mechanism
252 */
253static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
254static struct mtx pv_chunks_mutex;
255static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
256
257static void free_pv_chunk(struct pv_chunk *pc);
258static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
259static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
260static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
261static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
262static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
263 vm_offset_t va);
264
265static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
266static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
267static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
268static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
269 vm_offset_t va, struct rwlock **lockp);
270static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
246static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
247 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
248static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
249 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
250static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
251 vm_page_t m, struct rwlock **lockp);
252
253static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,

--- 163 unchanged lines hidden (view full) ---

417 *level = 3;
418 l3 = pmap_l2_to_l3(l2, va);
419 if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
420 return (NULL);
421
422 return (l3);
423}
424
271static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
272 vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
273static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
274 pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
275static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
276 vm_page_t m, struct rwlock **lockp);
277
278static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,

--- 163 unchanged lines hidden (view full) ---

442 *level = 3;
443 l3 = pmap_l2_to_l3(l2, va);
444 if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
445 return (NULL);
446
447 return (l3);
448}
449
450static inline bool
451pmap_superpages_enabled(void)
452{
453
454 return (superpages_enabled != 0);
455}
456
425bool
426pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
427 pd_entry_t **l2, pt_entry_t **l3)
428{
429 pd_entry_t *l0p, *l1p, *l2p;
430
431 if (pmap->pm_l0 == NULL)
432 return (false);

--- 39 unchanged lines hidden (view full) ---

472
473static __inline int
474pmap_l3_valid(pt_entry_t l3)
475{
476
477 return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
478}
479
457bool
458pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
459 pd_entry_t **l2, pt_entry_t **l3)
460{
461 pd_entry_t *l0p, *l1p, *l2p;
462
463 if (pmap->pm_l0 == NULL)
464 return (false);

--- 39 unchanged lines hidden (view full) ---

504
505static __inline int
506pmap_l3_valid(pt_entry_t l3)
507{
508
509 return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
510}
511
512
513/* Is a level 1 or 2entry a valid block and cacheable */
514CTASSERT(L1_BLOCK == L2_BLOCK);
480static __inline int
515static __inline int
516pmap_pte_valid_cacheable(pt_entry_t pte)
517{
518
519 return (((pte & ATTR_DESCR_MASK) == L1_BLOCK) &&
520 ((pte & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
521}
522
523static __inline int
481pmap_l3_valid_cacheable(pt_entry_t l3)
482{
483
484 return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) &&
485 ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
486}
487
488#define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte))

--- 340 unchanged lines hidden (view full) ---

829/*
830 * Initialize the pmap module.
831 * Called by vm_init, to initialize any structures that the pmap
832 * system needs to map virtual memory.
833 */
834void
835pmap_init(void)
836{
524pmap_l3_valid_cacheable(pt_entry_t l3)
525{
526
527 return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) &&
528 ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
529}
530
531#define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte))

--- 340 unchanged lines hidden (view full) ---

872/*
873 * Initialize the pmap module.
874 * Called by vm_init, to initialize any structures that the pmap
875 * system needs to map virtual memory.
876 */
877void
878pmap_init(void)
879{
837 int i;
880 vm_size_t s;
881 int i, pv_npg;
838
839 /*
882
883 /*
884 * Are large page mappings enabled?
885 */
886 TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
887
888 /*
840 * Initialize the pv chunk list mutex.
841 */
842 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
843
844 /*
845 * Initialize the pool of pv list locks.
846 */
847 for (i = 0; i < NPV_LIST_LOCKS; i++)
848 rw_init(&pv_list_locks[i], "pmap pv list");
889 * Initialize the pv chunk list mutex.
890 */
891 mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
892
893 /*
894 * Initialize the pool of pv list locks.
895 */
896 for (i = 0; i < NPV_LIST_LOCKS; i++)
897 rw_init(&pv_list_locks[i], "pmap pv list");
898
899 /*
900 * Calculate the size of the pv head table for superpages.
901 */
902 pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
903
904 /*
905 * Allocate memory for the pv head table for superpages.
906 */
907 s = (vm_size_t)(pv_npg * sizeof(struct md_page));
908 s = round_page(s);
909 pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
910 M_WAITOK | M_ZERO);
911 for (i = 0; i < pv_npg; i++)
912 TAILQ_INIT(&pv_table[i].pv_list);
913 TAILQ_INIT(&pv_dummy.pv_list);
849}
850
914}
915
916static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0,
917 "2MB page mapping counters");
918
919static u_long pmap_l2_demotions;
920SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
921 &pmap_l2_demotions, 0, "2MB page demotions");
922
923static u_long pmap_l2_p_failures;
924SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
925 &pmap_l2_p_failures, 0, "2MB page promotion failures");
926
927static u_long pmap_l2_promotions;
928SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
929 &pmap_l2_promotions, 0, "2MB page promotions");
930
851/*
852 * Invalidate a single TLB entry.
853 */
854PMAP_INLINE void
855pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
856{
857
858 sched_pin();

--- 90 unchanged lines hidden (view full) ---

949 * Atomically extract and hold the physical page
950 * with the given pmap and virtual address pair
951 * if that mapping permits the given protection.
952 */
953vm_page_t
954pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
955{
956 pt_entry_t *pte, tpte;
931/*
932 * Invalidate a single TLB entry.
933 */
934PMAP_INLINE void
935pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
936{
937
938 sched_pin();

--- 90 unchanged lines hidden (view full) ---

1029 * Atomically extract and hold the physical page
1030 * with the given pmap and virtual address pair
1031 * if that mapping permits the given protection.
1032 */
1033vm_page_t
1034pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1035{
1036 pt_entry_t *pte, tpte;
1037 vm_offset_t off;
957 vm_paddr_t pa;
958 vm_page_t m;
959 int lvl;
960
961 pa = 0;
962 m = NULL;
963 PMAP_LOCK(pmap);
964retry:

--- 5 unchanged lines hidden (view full) ---

970 ("pmap_extract_and_hold: Invalid level %d", lvl));
971 CTASSERT(L1_BLOCK == L2_BLOCK);
972 KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
973 (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
974 ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
975 tpte & ATTR_DESCR_MASK));
976 if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
977 ((prot & VM_PROT_WRITE) == 0)) {
1038 vm_paddr_t pa;
1039 vm_page_t m;
1040 int lvl;
1041
1042 pa = 0;
1043 m = NULL;
1044 PMAP_LOCK(pmap);
1045retry:

--- 5 unchanged lines hidden (view full) ---

1051 ("pmap_extract_and_hold: Invalid level %d", lvl));
1052 CTASSERT(L1_BLOCK == L2_BLOCK);
1053 KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1054 (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1055 ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1056 tpte & ATTR_DESCR_MASK));
1057 if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
1058 ((prot & VM_PROT_WRITE) == 0)) {
978 if (vm_page_pa_tryrelock(pmap, tpte & ~ATTR_MASK, &pa))
1059 switch(lvl) {
1060 case 1:
1061 off = va & L1_OFFSET;
1062 break;
1063 case 2:
1064 off = va & L2_OFFSET;
1065 break;
1066 case 3:
1067 default:
1068 off = 0;
1069 }
1070 if (vm_page_pa_tryrelock(pmap,
1071 (tpte & ~ATTR_MASK) | off, &pa))
979 goto retry;
1072 goto retry;
980 m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
1073 m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
981 vm_page_hold(m);
982 }
983 }
984 PA_UNLOCK_COND(pa);
985 PMAP_UNLOCK(pmap);
986 return (m);
987}
988

--- 351 unchanged lines hidden (view full) ---

1340
1341void
1342pmap_pinit0(pmap_t pmap)
1343{
1344
1345 PMAP_LOCK_INIT(pmap);
1346 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1347 pmap->pm_l0 = kernel_pmap->pm_l0;
1074 vm_page_hold(m);
1075 }
1076 }
1077 PA_UNLOCK_COND(pa);
1078 PMAP_UNLOCK(pmap);
1079 return (m);
1080}
1081

--- 351 unchanged lines hidden (view full) ---

1433
1434void
1435pmap_pinit0(pmap_t pmap)
1436{
1437
1438 PMAP_LOCK_INIT(pmap);
1439 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1440 pmap->pm_l0 = kernel_pmap->pm_l0;
1441 pmap->pm_root.rt_root = 0;
1348}
1349
1350int
1351pmap_pinit(pmap_t pmap)
1352{
1353 vm_paddr_t l0phys;
1354 vm_page_t l0pt;
1355

--- 5 unchanged lines hidden (view full) ---

1361 VM_WAIT;
1362
1363 l0phys = VM_PAGE_TO_PHYS(l0pt);
1364 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1365
1366 if ((l0pt->flags & PG_ZERO) == 0)
1367 pagezero(pmap->pm_l0);
1368
1442}
1443
1444int
1445pmap_pinit(pmap_t pmap)
1446{
1447 vm_paddr_t l0phys;
1448 vm_page_t l0pt;
1449

--- 5 unchanged lines hidden (view full) ---

1455 VM_WAIT;
1456
1457 l0phys = VM_PAGE_TO_PHYS(l0pt);
1458 pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1459
1460 if ((l0pt->flags & PG_ZERO) == 0)
1461 pagezero(pmap->pm_l0);
1462
1463 pmap->pm_root.rt_root = 0;
1369 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1370
1371 return (1);
1372}
1373
1374/*
1375 * This routine is called if the desired page table page does not exist.
1376 *

--- 129 unchanged lines hidden (view full) ---

1506 return (m);
1507}
1508
1509static vm_page_t
1510pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1511{
1512 vm_pindex_t ptepindex;
1513 pd_entry_t *pde, tpde;
1464 bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1465
1466 return (1);
1467}
1468
1469/*
1470 * This routine is called if the desired page table page does not exist.
1471 *

--- 129 unchanged lines hidden (view full) ---

1601 return (m);
1602}
1603
1604static vm_page_t
1605pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1606{
1607 vm_pindex_t ptepindex;
1608 pd_entry_t *pde, tpde;
1609#ifdef INVARIANTS
1610 pt_entry_t *pte;
1611#endif
1514 vm_page_t m;
1515 int lvl;
1516
1517 /*
1518 * Calculate pagetable page index
1519 */
1520 ptepindex = pmap_l2_pindex(va);
1521retry:
1522 /*
1523 * Get the page directory entry
1524 */
1525 pde = pmap_pde(pmap, va, &lvl);
1526
1527 /*
1528 * If the page table page is mapped, we just increment the hold count,
1529 * and activate it. If we get a level 2 pde it will point to a level 3
1530 * table.
1531 */
1612 vm_page_t m;
1613 int lvl;
1614
1615 /*
1616 * Calculate pagetable page index
1617 */
1618 ptepindex = pmap_l2_pindex(va);
1619retry:
1620 /*
1621 * Get the page directory entry
1622 */
1623 pde = pmap_pde(pmap, va, &lvl);
1624
1625 /*
1626 * If the page table page is mapped, we just increment the hold count,
1627 * and activate it. If we get a level 2 pde it will point to a level 3
1628 * table.
1629 */
1532 if (lvl == 2) {
1630 switch (lvl) {
1631 case -1:
1632 break;
1633 case 0:
1634#ifdef INVARIANTS
1635 pte = pmap_l0_to_l1(pde, va);
1636 KASSERT(pmap_load(pte) == 0,
1637 ("pmap_alloc_l3: TODO: l0 superpages"));
1638#endif
1639 break;
1640 case 1:
1641#ifdef INVARIANTS
1642 pte = pmap_l1_to_l2(pde, va);
1643 KASSERT(pmap_load(pte) == 0,
1644 ("pmap_alloc_l3: TODO: l1 superpages"));
1645#endif
1646 break;
1647 case 2:
1533 tpde = pmap_load(pde);
1534 if (tpde != 0) {
1535 m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1536 m->wire_count++;
1537 return (m);
1538 }
1648 tpde = pmap_load(pde);
1649 if (tpde != 0) {
1650 m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1651 m->wire_count++;
1652 return (m);
1653 }
1654 break;
1655 default:
1656 panic("pmap_alloc_l3: Invalid level %d", lvl);
1539 }
1540
1541 /*
1542 * Here if the pte page isn't mapped, or if it has been deallocated.
1543 */
1544 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1545 if (m == NULL && lockp != NULL)
1546 goto retry;

--- 14 unchanged lines hidden (view full) ---

1561void
1562pmap_release(pmap_t pmap)
1563{
1564 vm_page_t m;
1565
1566 KASSERT(pmap->pm_stats.resident_count == 0,
1567 ("pmap_release: pmap resident count %ld != 0",
1568 pmap->pm_stats.resident_count));
1657 }
1658
1659 /*
1660 * Here if the pte page isn't mapped, or if it has been deallocated.
1661 */
1662 m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1663 if (m == NULL && lockp != NULL)
1664 goto retry;

--- 14 unchanged lines hidden (view full) ---

1679void
1680pmap_release(pmap_t pmap)
1681{
1682 vm_page_t m;
1683
1684 KASSERT(pmap->pm_stats.resident_count == 0,
1685 ("pmap_release: pmap resident count %ld != 0",
1686 pmap->pm_stats.resident_count));
1687 KASSERT(vm_radix_is_empty(&pmap->pm_root),
1688 ("pmap_release: pmap has reserved page table page(s)"));
1569
1570 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1571
1572 m->wire_count--;
1573 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1574 vm_page_free_zero(m);
1575}
1576

--- 268 unchanged lines hidden (view full) ---

1845 pv = &pc->pc_pventry[0];
1846 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1847 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1848 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1849 return (pv);
1850}
1851
1852/*
1689
1690 m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1691
1692 m->wire_count--;
1693 atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1694 vm_page_free_zero(m);
1695}
1696

--- 268 unchanged lines hidden (view full) ---

1965 pv = &pc->pc_pventry[0];
1966 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1967 PV_STAT(atomic_add_long(&pv_entry_count, 1));
1968 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1969 return (pv);
1970}
1971
1972/*
1973 * Ensure that the number of spare PV entries in the specified pmap meets or
1974 * exceeds the given count, "needed".
1975 *
1976 * The given PV list lock may be released.
1977 */
1978static void
1979reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1980{
1981 struct pch new_tail;
1982 struct pv_chunk *pc;
1983 int avail, free;
1984 vm_page_t m;
1985
1986 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1987 KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1988
1989 /*
1990 * Newly allocated PV chunks must be stored in a private list until
1991 * the required number of PV chunks have been allocated. Otherwise,
1992 * reclaim_pv_chunk() could recycle one of these chunks. In
1993 * contrast, these chunks must be added to the pmap upon allocation.
1994 */
1995 TAILQ_INIT(&new_tail);
1996retry:
1997 avail = 0;
1998 TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1999 bit_count((bitstr_t *)pc->pc_map, 0,
2000 sizeof(pc->pc_map) * NBBY, &free);
2001 if (free == 0)
2002 break;
2003 avail += free;
2004 if (avail >= needed)
2005 break;
2006 }
2007 for (; avail < needed; avail += _NPCPV) {
2008 m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2009 VM_ALLOC_WIRED);
2010 if (m == NULL) {
2011 m = reclaim_pv_chunk(pmap, lockp);
2012 if (m == NULL)
2013 goto retry;
2014 }
2015 PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2016 PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2017 dump_add_page(m->phys_addr);
2018 pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2019 pc->pc_pmap = pmap;
2020 pc->pc_map[0] = PC_FREE0;
2021 pc->pc_map[1] = PC_FREE1;
2022 pc->pc_map[2] = PC_FREE2;
2023 TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2024 TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2025 PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2026 }
2027 if (!TAILQ_EMPTY(&new_tail)) {
2028 mtx_lock(&pv_chunks_mutex);
2029 TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2030 mtx_unlock(&pv_chunks_mutex);
2031 }
2032}
2033
2034/*
1853 * First find and then remove the pv entry for the specified pmap and virtual
1854 * address from the specified pv list. Returns the pv entry if found and NULL
1855 * otherwise. This operation can be performed on pv lists for either 4KB or
1856 * 2MB page mappings.
1857 */
1858static __inline pv_entry_t
1859pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1860{

--- 5 unchanged lines hidden (view full) ---

1866 pvh->pv_gen++;
1867 break;
1868 }
1869 }
1870 return (pv);
1871}
1872
1873/*
2035 * First find and then remove the pv entry for the specified pmap and virtual
2036 * address from the specified pv list. Returns the pv entry if found and NULL
2037 * otherwise. This operation can be performed on pv lists for either 4KB or
2038 * 2MB page mappings.
2039 */
2040static __inline pv_entry_t
2041pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2042{

--- 5 unchanged lines hidden (view full) ---

2048 pvh->pv_gen++;
2049 break;
2050 }
2051 }
2052 return (pv);
2053}
2054
2055/*
2056 * After demotion from a 2MB page mapping to 512 4KB page mappings,
2057 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2058 * entries for each of the 4KB page mappings.
2059 */
2060static void
2061pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2062 struct rwlock **lockp)
2063{
2064 struct md_page *pvh;
2065 struct pv_chunk *pc;
2066 pv_entry_t pv;
2067 vm_offset_t va_last;
2068 vm_page_t m;
2069 int bit, field;
2070
2071 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2072 KASSERT((pa & L2_OFFSET) == 0,
2073 ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2074 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2075
2076 /*
2077 * Transfer the 2mpage's pv entry for this mapping to the first
2078 * page's pv list. Once this transfer begins, the pv list lock
2079 * must not be released until the last pv entry is reinstantiated.
2080 */
2081 pvh = pa_to_pvh(pa);
2082 va = va & ~L2_OFFSET;
2083 pv = pmap_pvh_remove(pvh, pmap, va);
2084 KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2085 m = PHYS_TO_VM_PAGE(pa);
2086 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2087 m->md.pv_gen++;
2088 /* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2089 PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2090 va_last = va + L2_SIZE - PAGE_SIZE;
2091 for (;;) {
2092 pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2093 KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2094 pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2095 for (field = 0; field < _NPCM; field++) {
2096 while (pc->pc_map[field]) {
2097 bit = ffsl(pc->pc_map[field]) - 1;
2098 pc->pc_map[field] &= ~(1ul << bit);
2099 pv = &pc->pc_pventry[field * 64 + bit];
2100 va += PAGE_SIZE;
2101 pv->pv_va = va;
2102 m++;
2103 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2104 ("pmap_pv_demote_l2: page %p is not managed", m));
2105 TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2106 m->md.pv_gen++;
2107 if (va == va_last)
2108 goto out;
2109 }
2110 }
2111 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2112 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2113 }
2114out:
2115 if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2116 TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2117 TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2118 }
2119 PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2120 PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2121}
2122
2123/*
1874 * First find and then destroy the pv entry for the specified pmap and virtual
1875 * address. This operation can be performed on pv lists for either 4KB or 2MB
1876 * page mappings.
1877 */
1878static void
1879pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1880{
1881 pv_entry_t pv;

--- 27 unchanged lines hidden (view full) ---

1909
1910/*
1911 * pmap_remove_l3: do the things to unmap a page in a process
1912 */
1913static int
1914pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
1915 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
1916{
2124 * First find and then destroy the pv entry for the specified pmap and virtual
2125 * address. This operation can be performed on pv lists for either 4KB or 2MB
2126 * page mappings.
2127 */
2128static void
2129pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2130{
2131 pv_entry_t pv;

--- 27 unchanged lines hidden (view full) ---

2159
2160/*
2161 * pmap_remove_l3: do the things to unmap a page in a process
2162 */
2163static int
2164pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2165 pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2166{
2167 struct md_page *pvh;
1917 pt_entry_t old_l3;
1918 vm_page_t m;
1919
1920 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1921 if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
1922 cpu_dcache_wb_range(va, L3_SIZE);
1923 old_l3 = pmap_load_clear(l3);
1924 PTE_SYNC(l3);

--- 4 unchanged lines hidden (view full) ---

1929 if (old_l3 & ATTR_SW_MANAGED) {
1930 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
1931 if (pmap_page_dirty(old_l3))
1932 vm_page_dirty(m);
1933 if (old_l3 & ATTR_AF)
1934 vm_page_aflag_set(m, PGA_REFERENCED);
1935 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1936 pmap_pvh_free(&m->md, pmap, va);
2168 pt_entry_t old_l3;
2169 vm_page_t m;
2170
2171 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2172 if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
2173 cpu_dcache_wb_range(va, L3_SIZE);
2174 old_l3 = pmap_load_clear(l3);
2175 PTE_SYNC(l3);

--- 4 unchanged lines hidden (view full) ---

2180 if (old_l3 & ATTR_SW_MANAGED) {
2181 m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2182 if (pmap_page_dirty(old_l3))
2183 vm_page_dirty(m);
2184 if (old_l3 & ATTR_AF)
2185 vm_page_aflag_set(m, PGA_REFERENCED);
2186 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2187 pmap_pvh_free(&m->md, pmap, va);
2188 if (TAILQ_EMPTY(&m->md.pv_list) &&
2189 (m->flags & PG_FICTITIOUS) == 0) {
2190 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2191 if (TAILQ_EMPTY(&pvh->pv_list))
2192 vm_page_aflag_clear(m, PGA_WRITEABLE);
2193 }
1937 }
1938 return (pmap_unuse_l3(pmap, va, l2e, free));
1939}
1940
1941/*
1942 * Remove the given range of addresses from the specified map.
1943 *
1944 * It is assumed that the start and end are properly

--- 50 unchanged lines hidden (view full) ---

1995 va_next = eva;
1996
1997 l2 = pmap_l1_to_l2(l1, sva);
1998 if (l2 == NULL)
1999 continue;
2000
2001 l3_paddr = pmap_load(l2);
2002
2194 }
2195 return (pmap_unuse_l3(pmap, va, l2e, free));
2196}
2197
2198/*
2199 * Remove the given range of addresses from the specified map.
2200 *
2201 * It is assumed that the start and end are properly

--- 50 unchanged lines hidden (view full) ---

2252 va_next = eva;
2253
2254 l2 = pmap_l1_to_l2(l1, sva);
2255 if (l2 == NULL)
2256 continue;
2257
2258 l3_paddr = pmap_load(l2);
2259
2260 if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2261 /* TODO: Add pmap_remove_l2 */
2262 if (pmap_demote_l2_locked(pmap, l2, sva & ~L2_OFFSET,
2263 &lock) == NULL)
2264 continue;
2265 l3_paddr = pmap_load(l2);
2266 }
2267
2003 /*
2004 * Weed out invalid mappings.
2005 */
2006 if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2007 continue;
2008
2009 /*
2010 * Limit our scan to either the end of the va represented

--- 45 unchanged lines hidden (view full) ---

2056 * Original versions of this routine were very
2057 * inefficient because they iteratively called
2058 * pmap_remove (slow...)
2059 */
2060
2061void
2062pmap_remove_all(vm_page_t m)
2063{
2268 /*
2269 * Weed out invalid mappings.
2270 */
2271 if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2272 continue;
2273
2274 /*
2275 * Limit our scan to either the end of the va represented

--- 45 unchanged lines hidden (view full) ---

2321 * Original versions of this routine were very
2322 * inefficient because they iteratively called
2323 * pmap_remove (slow...)
2324 */
2325
2326void
2327pmap_remove_all(vm_page_t m)
2328{
2329 struct md_page *pvh;
2064 pv_entry_t pv;
2065 pmap_t pmap;
2066 struct rwlock *lock;
2067 pd_entry_t *pde, tpde;
2068 pt_entry_t *pte, tpte;
2330 pv_entry_t pv;
2331 pmap_t pmap;
2332 struct rwlock *lock;
2333 pd_entry_t *pde, tpde;
2334 pt_entry_t *pte, tpte;
2335 vm_offset_t va;
2069 struct spglist free;
2336 struct spglist free;
2070 int lvl, md_gen;
2337 int lvl, pvh_gen, md_gen;
2071
2072 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2073 ("pmap_remove_all: page %p is not managed", m));
2074 SLIST_INIT(&free);
2075 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2338
2339 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2340 ("pmap_remove_all: page %p is not managed", m));
2341 SLIST_INIT(&free);
2342 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2343 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2344 pa_to_pvh(VM_PAGE_TO_PHYS(m));
2076retry:
2077 rw_wlock(lock);
2345retry:
2346 rw_wlock(lock);
2347 while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2348 pmap = PV_PMAP(pv);
2349 if (!PMAP_TRYLOCK(pmap)) {
2350 pvh_gen = pvh->pv_gen;
2351 rw_wunlock(lock);
2352 PMAP_LOCK(pmap);
2353 rw_wlock(lock);
2354 if (pvh_gen != pvh->pv_gen) {
2355 rw_wunlock(lock);
2356 PMAP_UNLOCK(pmap);
2357 goto retry;
2358 }
2359 }
2360 va = pv->pv_va;
2361 pte = pmap_pte(pmap, va, &lvl);
2362 KASSERT(pte != NULL,
2363 ("pmap_remove_all: no page table entry found"));
2364 KASSERT(lvl == 2,
2365 ("pmap_remove_all: invalid pte level %d", lvl));
2366
2367 pmap_demote_l2_locked(pmap, pte, va, &lock);
2368 PMAP_UNLOCK(pmap);
2369 }
2078 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2079 pmap = PV_PMAP(pv);
2080 if (!PMAP_TRYLOCK(pmap)) {
2370 while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2371 pmap = PV_PMAP(pv);
2372 if (!PMAP_TRYLOCK(pmap)) {
2373 pvh_gen = pvh->pv_gen;
2081 md_gen = m->md.pv_gen;
2082 rw_wunlock(lock);
2083 PMAP_LOCK(pmap);
2084 rw_wlock(lock);
2374 md_gen = m->md.pv_gen;
2375 rw_wunlock(lock);
2376 PMAP_LOCK(pmap);
2377 rw_wlock(lock);
2085 if (md_gen != m->md.pv_gen) {
2378 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2086 rw_wunlock(lock);
2087 PMAP_UNLOCK(pmap);
2088 goto retry;
2089 }
2090 }
2091 pmap_resident_count_dec(pmap, 1);
2092
2093 pde = pmap_pde(pmap, pv->pv_va, &lvl);

--- 70 unchanged lines hidden (view full) ---

2164 continue;
2165 }
2166
2167 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2168 if (va_next < sva)
2169 va_next = eva;
2170
2171 l2 = pmap_l1_to_l2(l1, sva);
2379 rw_wunlock(lock);
2380 PMAP_UNLOCK(pmap);
2381 goto retry;
2382 }
2383 }
2384 pmap_resident_count_dec(pmap, 1);
2385
2386 pde = pmap_pde(pmap, pv->pv_va, &lvl);

--- 70 unchanged lines hidden (view full) ---

2457 continue;
2458 }
2459
2460 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2461 if (va_next < sva)
2462 va_next = eva;
2463
2464 l2 = pmap_l1_to_l2(l1, sva);
2172 if (l2 == NULL || (pmap_load(l2) & ATTR_DESCR_MASK) != L2_TABLE)
2465 if (pmap_load(l2) == 0)
2173 continue;
2174
2466 continue;
2467
2468 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
2469 l3p = pmap_demote_l2(pmap, l2, sva);
2470 if (l3p == NULL)
2471 continue;
2472 }
2473 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
2474 ("pmap_protect: Invalid L2 entry after demotion"));
2475
2175 if (va_next > eva)
2176 va_next = eva;
2177
2178 va = va_next;
2179 for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
2180 sva += L3_SIZE) {
2181 l3 = pmap_load(l3p);
2182 if (pmap_l3_valid(l3)) {

--- 6 unchanged lines hidden (view full) ---

2189 }
2190 PMAP_UNLOCK(pmap);
2191
2192 /* TODO: Only invalidate entries we are touching */
2193 pmap_invalidate_all(pmap);
2194}
2195
2196/*
2476 if (va_next > eva)
2477 va_next = eva;
2478
2479 va = va_next;
2480 for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
2481 sva += L3_SIZE) {
2482 l3 = pmap_load(l3p);
2483 if (pmap_l3_valid(l3)) {

--- 6 unchanged lines hidden (view full) ---

2490 }
2491 PMAP_UNLOCK(pmap);
2492
2493 /* TODO: Only invalidate entries we are touching */
2494 pmap_invalidate_all(pmap);
2495}
2496
2497/*
2498 * Inserts the specified page table page into the specified pmap's collection
2499 * of idle page table pages. Each of a pmap's page table pages is responsible
2500 * for mapping a distinct range of virtual addresses. The pmap's collection is
2501 * ordered by this virtual address range.
2502 */
2503static __inline int
2504pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
2505{
2506
2507 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2508 return (vm_radix_insert(&pmap->pm_root, mpte));
2509}
2510
2511/*
2512 * Looks for a page table page mapping the specified virtual address in the
2513 * specified pmap's collection of idle page table pages. Returns NULL if there
2514 * is no page table page corresponding to the specified virtual address.
2515 */
2516static __inline vm_page_t
2517pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
2518{
2519
2520 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2521 return (vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(va)));
2522}
2523
2524/*
2525 * Removes the specified page table page from the specified pmap's collection
2526 * of idle page table pages. The specified page table page must be a member of
2527 * the pmap's collection.
2528 */
2529static __inline void
2530pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
2531{
2532
2533 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2534 vm_radix_remove(&pmap->pm_root, mpte->pindex);
2535}
2536
2537/*
2538 * Performs a break-before-make update of a pmap entry. This is needed when
2539 * either promoting or demoting pages to ensure the TLB doesn't get into an
2540 * inconsistent state.
2541 */
2542static void
2543pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
2544 vm_offset_t va, vm_size_t size)
2545{
2546 register_t intr;
2547
2548 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2549
2550 /*
2551 * Ensure we don't get switched out with the page table in an
2552 * inconsistent state. We also need to ensure no interrupts fire
2553 * as they may make use of an address we are about to invalidate.
2554 */
2555 intr = intr_disable();
2556 critical_enter();
2557
2558 /* Clear the old mapping */
2559 pmap_load_clear(pte);
2560 PTE_SYNC(pte);
2561 pmap_invalidate_range(pmap, va, va + size);
2562
2563 /* Create the new mapping */
2564 pmap_load_store(pte, newpte);
2565 PTE_SYNC(pte);
2566
2567 critical_exit();
2568 intr_restore(intr);
2569}
2570
2571/*
2572 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
2573 * replace the many pv entries for the 4KB page mappings by a single pv entry
2574 * for the 2MB page mapping.
2575 */
2576static void
2577pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2578 struct rwlock **lockp)
2579{
2580 struct md_page *pvh;
2581 pv_entry_t pv;
2582 vm_offset_t va_last;
2583 vm_page_t m;
2584
2585 KASSERT((pa & L2_OFFSET) == 0,
2586 ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
2587 CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2588
2589 /*
2590 * Transfer the first page's pv entry for this mapping to the 2mpage's
2591 * pv list. Aside from avoiding the cost of a call to get_pv_entry(),
2592 * a transfer avoids the possibility that get_pv_entry() calls
2593 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
2594 * mappings that is being promoted.
2595 */
2596 m = PHYS_TO_VM_PAGE(pa);
2597 va = va & ~L2_OFFSET;
2598 pv = pmap_pvh_remove(&m->md, pmap, va);
2599 KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
2600 pvh = pa_to_pvh(pa);
2601 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2602 pvh->pv_gen++;
2603 /* Free the remaining NPTEPG - 1 pv entries. */
2604 va_last = va + L2_SIZE - PAGE_SIZE;
2605 do {
2606 m++;
2607 va += PAGE_SIZE;
2608 pmap_pvh_free(&m->md, pmap, va);
2609 } while (va < va_last);
2610}
2611
2612/*
2613 * Tries to promote the 512, contiguous 4KB page mappings that are within a
2614 * single level 2 table entry to a single 2MB page mapping. For promotion
2615 * to occur, two conditions must be met: (1) the 4KB page mappings must map
2616 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2617 * identical characteristics.
2618 */
2619static void
2620pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2621 struct rwlock **lockp)
2622{
2623 pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
2624 vm_page_t mpte;
2625 vm_offset_t sva;
2626
2627 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2628
2629 sva = va & ~L2_OFFSET;
2630 firstl3 = pmap_l2_to_l3(l2, sva);
2631 newl2 = pmap_load(firstl3);
2632
2633 /* Check the alingment is valid */
2634 if (((newl2 & ~ATTR_MASK) & L2_OFFSET) != 0) {
2635 atomic_add_long(&pmap_l2_p_failures, 1);
2636 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
2637 " in pmap %p", va, pmap);
2638 return;
2639 }
2640
2641 pa = newl2 + L2_SIZE - PAGE_SIZE;
2642 for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
2643 oldl3 = pmap_load(l3);
2644 if (oldl3 != pa) {
2645 atomic_add_long(&pmap_l2_p_failures, 1);
2646 CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
2647 " in pmap %p", va, pmap);
2648 return;
2649 }
2650 pa -= PAGE_SIZE;
2651 }
2652
2653 /*
2654 * Save the page table page in its current state until the L2
2655 * mapping the superpage is demoted by pmap_demote_l2() or
2656 * destroyed by pmap_remove_l3().
2657 */
2658 mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
2659 KASSERT(mpte >= vm_page_array &&
2660 mpte < &vm_page_array[vm_page_array_size],
2661 ("pmap_promote_l2: page table page is out of range"));
2662 KASSERT(mpte->pindex == pmap_l2_pindex(va),
2663 ("pmap_promote_l2: page table page's pindex is wrong"));
2664 if (pmap_insert_pt_page(pmap, mpte)) {
2665 atomic_add_long(&pmap_l2_p_failures, 1);
2666 CTR2(KTR_PMAP,
2667 "pmap_promote_l2: failure for va %#lx in pmap %p", va,
2668 pmap);
2669 return;
2670 }
2671
2672 if ((newl2 & ATTR_SW_MANAGED) != 0)
2673 pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
2674
2675 newl2 &= ~ATTR_DESCR_MASK;
2676 newl2 |= L2_BLOCK;
2677
2678 pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
2679
2680 atomic_add_long(&pmap_l2_promotions, 1);
2681 CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
2682 pmap);
2683}
2684
2685/*
2197 * Insert the given physical page (p) at
2198 * the specified virtual address (v) in the
2199 * target physical map with the protection requested.
2200 *
2201 * If specified, the page will be wired down, meaning
2202 * that the related pte can not be reclaimed.
2203 *
2204 * NB: This is the only routine which MAY NOT lazy-evaluate
2205 * or lose information. That is, this routine must actually
2206 * insert this page into the given map NOW.
2207 */
2208int
2209pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2210 u_int flags, int8_t psind __unused)
2211{
2212 struct rwlock *lock;
2213 pd_entry_t *pde;
2214 pt_entry_t new_l3, orig_l3;
2686 * Insert the given physical page (p) at
2687 * the specified virtual address (v) in the
2688 * target physical map with the protection requested.
2689 *
2690 * If specified, the page will be wired down, meaning
2691 * that the related pte can not be reclaimed.
2692 *
2693 * NB: This is the only routine which MAY NOT lazy-evaluate
2694 * or lose information. That is, this routine must actually
2695 * insert this page into the given map NOW.
2696 */
2697int
2698pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2699 u_int flags, int8_t psind __unused)
2700{
2701 struct rwlock *lock;
2702 pd_entry_t *pde;
2703 pt_entry_t new_l3, orig_l3;
2215 pt_entry_t *l3;
2704 pt_entry_t *l2, *l3;
2216 pv_entry_t pv;
2217 vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa;
2218 vm_page_t mpte, om, l1_m, l2_m, l3_m;
2219 boolean_t nosleep;
2220 int lvl;
2221
2222 va = trunc_page(va);
2223 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))

--- 10 unchanged lines hidden (view full) ---

2234
2235 CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
2236
2237 mpte = NULL;
2238
2239 lock = NULL;
2240 PMAP_LOCK(pmap);
2241
2705 pv_entry_t pv;
2706 vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa;
2707 vm_page_t mpte, om, l1_m, l2_m, l3_m;
2708 boolean_t nosleep;
2709 int lvl;
2710
2711 va = trunc_page(va);
2712 if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))

--- 10 unchanged lines hidden (view full) ---

2723
2724 CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
2725
2726 mpte = NULL;
2727
2728 lock = NULL;
2729 PMAP_LOCK(pmap);
2730
2731 pde = pmap_pde(pmap, va, &lvl);
2732 if (pde != NULL && lvl == 1) {
2733 l2 = pmap_l1_to_l2(pde, va);
2734 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
2735 (l3 = pmap_demote_l2_locked(pmap, l2, va & ~L2_OFFSET,
2736 &lock)) != NULL) {
2737 l3 = &l3[pmap_l3_index(va)];
2738 if (va < VM_MAXUSER_ADDRESS) {
2739 mpte = PHYS_TO_VM_PAGE(
2740 pmap_load(l2) & ~ATTR_MASK);
2741 mpte->wire_count++;
2742 }
2743 goto havel3;
2744 }
2745 }
2746
2242 if (va < VM_MAXUSER_ADDRESS) {
2243 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2244 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2245 if (mpte == NULL && nosleep) {
2246 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
2247 if (lock != NULL)
2248 rw_wunlock(lock);
2249 PMAP_UNLOCK(pmap);
2250 return (KERN_RESOURCE_SHORTAGE);
2251 }
2252 pde = pmap_pde(pmap, va, &lvl);
2253 KASSERT(pde != NULL,
2254 ("pmap_enter: Invalid page entry, va: 0x%lx", va));
2255 KASSERT(lvl == 2,
2256 ("pmap_enter: Invalid level %d", lvl));
2257
2258 l3 = pmap_l2_to_l3(pde, va);
2259 } else {
2747 if (va < VM_MAXUSER_ADDRESS) {
2748 nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2749 mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2750 if (mpte == NULL && nosleep) {
2751 CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
2752 if (lock != NULL)
2753 rw_wunlock(lock);
2754 PMAP_UNLOCK(pmap);
2755 return (KERN_RESOURCE_SHORTAGE);
2756 }
2757 pde = pmap_pde(pmap, va, &lvl);
2758 KASSERT(pde != NULL,
2759 ("pmap_enter: Invalid page entry, va: 0x%lx", va));
2760 KASSERT(lvl == 2,
2761 ("pmap_enter: Invalid level %d", lvl));
2762
2763 l3 = pmap_l2_to_l3(pde, va);
2764 } else {
2260 pde = pmap_pde(pmap, va, &lvl);
2261 /*
2262 * If we get a level 2 pde it must point to a level 3 entry
2263 * otherwise we will need to create the intermediate tables
2264 */
2265 if (lvl < 2) {
2266 switch(lvl) {
2267 default:
2268 case -1:

--- 46 unchanged lines hidden (view full) ---

2315 pmap_load_store(pde, l3_pa | L2_TABLE);
2316 PTE_SYNC(pde);
2317 break;
2318 }
2319 }
2320 l3 = pmap_l2_to_l3(pde, va);
2321 pmap_invalidate_page(pmap, va);
2322 }
2765 /*
2766 * If we get a level 2 pde it must point to a level 3 entry
2767 * otherwise we will need to create the intermediate tables
2768 */
2769 if (lvl < 2) {
2770 switch(lvl) {
2771 default:
2772 case -1:

--- 46 unchanged lines hidden (view full) ---

2819 pmap_load_store(pde, l3_pa | L2_TABLE);
2820 PTE_SYNC(pde);
2821 break;
2822 }
2823 }
2824 l3 = pmap_l2_to_l3(pde, va);
2825 pmap_invalidate_page(pmap, va);
2826 }
2827havel3:
2323
2324 om = NULL;
2325 orig_l3 = pmap_load(l3);
2326 opa = orig_l3 & ~ATTR_MASK;
2327
2328 /*
2329 * Is the specified virtual address already mapped?
2330 */

--- 63 unchanged lines hidden (view full) ---

2394 vm_page_aflag_set(m, PGA_WRITEABLE);
2395 }
2396
2397 /*
2398 * Update the L3 entry.
2399 */
2400 if (orig_l3 != 0) {
2401validate:
2828
2829 om = NULL;
2830 orig_l3 = pmap_load(l3);
2831 opa = orig_l3 & ~ATTR_MASK;
2832
2833 /*
2834 * Is the specified virtual address already mapped?
2835 */

--- 63 unchanged lines hidden (view full) ---

2899 vm_page_aflag_set(m, PGA_WRITEABLE);
2900 }
2901
2902 /*
2903 * Update the L3 entry.
2904 */
2905 if (orig_l3 != 0) {
2906validate:
2402 orig_l3 = pmap_load_store(l3, new_l3);
2403 PTE_SYNC(l3);
2907 orig_l3 = pmap_load(l3);
2404 opa = orig_l3 & ~ATTR_MASK;
2405
2406 if (opa != pa) {
2908 opa = orig_l3 & ~ATTR_MASK;
2909
2910 if (opa != pa) {
2911 pmap_update_entry(pmap, l3, new_l3, va, PAGE_SIZE);
2407 if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
2408 om = PHYS_TO_VM_PAGE(opa);
2409 if (pmap_page_dirty(orig_l3))
2410 vm_page_dirty(om);
2411 if ((orig_l3 & ATTR_AF) != 0)
2412 vm_page_aflag_set(om, PGA_REFERENCED);
2413 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2414 pmap_pvh_free(&om->md, pmap, va);
2912 if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
2913 om = PHYS_TO_VM_PAGE(opa);
2914 if (pmap_page_dirty(orig_l3))
2915 vm_page_dirty(om);
2916 if ((orig_l3 & ATTR_AF) != 0)
2917 vm_page_aflag_set(om, PGA_REFERENCED);
2918 CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2919 pmap_pvh_free(&om->md, pmap, va);
2920 if ((om->aflags & PGA_WRITEABLE) != 0 &&
2921 TAILQ_EMPTY(&om->md.pv_list) &&
2922 ((om->flags & PG_FICTITIOUS) != 0 ||
2923 TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
2924 vm_page_aflag_clear(om, PGA_WRITEABLE);
2415 }
2925 }
2416 } else if (pmap_page_dirty(orig_l3)) {
2417 if ((orig_l3 & ATTR_SW_MANAGED) != 0)
2926 } else {
2927 pmap_load_store(l3, new_l3);
2928 PTE_SYNC(l3);
2929 pmap_invalidate_page(pmap, va);
2930 if (pmap_page_dirty(orig_l3) &&
2931 (orig_l3 & ATTR_SW_MANAGED) != 0)
2418 vm_page_dirty(m);
2419 }
2420 } else {
2421 pmap_load_store(l3, new_l3);
2932 vm_page_dirty(m);
2933 }
2934 } else {
2935 pmap_load_store(l3, new_l3);
2422 PTE_SYNC(l3);
2423 }
2936 }
2937
2938 PTE_SYNC(l3);
2424 pmap_invalidate_page(pmap, va);
2939 pmap_invalidate_page(pmap, va);
2425 if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
2426 cpu_icache_sync_range(va, PAGE_SIZE);
2427
2940
2941 if (pmap != pmap_kernel()) {
2942 if (pmap == &curproc->p_vmspace->vm_pmap)
2943 cpu_icache_sync_range(va, PAGE_SIZE);
2944
2945 if ((mpte == NULL || mpte->wire_count == NL3PG) &&
2946 pmap_superpages_enabled() &&
2947 (m->flags & PG_FICTITIOUS) == 0 &&
2948 vm_reserv_level_iffullpop(m) == 0) {
2949 pmap_promote_l2(pmap, pde, va, &lock);
2950 }
2951 }
2952
2428 if (lock != NULL)
2429 rw_wunlock(lock);
2430 PMAP_UNLOCK(pmap);
2431 return (KERN_SUCCESS);
2432}
2433
2434/*
2435 * Maps a sequence of resident pages belonging to the same object.

--- 56 unchanged lines hidden (view full) ---

2492}
2493
2494static vm_page_t
2495pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2496 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
2497{
2498 struct spglist free;
2499 pd_entry_t *pde;
2953 if (lock != NULL)
2954 rw_wunlock(lock);
2955 PMAP_UNLOCK(pmap);
2956 return (KERN_SUCCESS);
2957}
2958
2959/*
2960 * Maps a sequence of resident pages belonging to the same object.

--- 56 unchanged lines hidden (view full) ---

3017}
3018
3019static vm_page_t
3020pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3021 vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3022{
3023 struct spglist free;
3024 pd_entry_t *pde;
2500 pt_entry_t *l3;
3025 pt_entry_t *l2, *l3;
2501 vm_paddr_t pa;
2502 int lvl;
2503
2504 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2505 (m->oflags & VPO_UNMANAGED) != 0,
2506 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2507 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2508

--- 18 unchanged lines hidden (view full) ---

2527 pde = pmap_pde(pmap, va, &lvl);
2528
2529 /*
2530 * If the page table page is mapped, we just increment
2531 * the hold count, and activate it. Otherwise, we
2532 * attempt to allocate a page table page. If this
2533 * attempt fails, we don't retry. Instead, we give up.
2534 */
3026 vm_paddr_t pa;
3027 int lvl;
3028
3029 KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3030 (m->oflags & VPO_UNMANAGED) != 0,
3031 ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3032 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3033

--- 18 unchanged lines hidden (view full) ---

3052 pde = pmap_pde(pmap, va, &lvl);
3053
3054 /*
3055 * If the page table page is mapped, we just increment
3056 * the hold count, and activate it. Otherwise, we
3057 * attempt to allocate a page table page. If this
3058 * attempt fails, we don't retry. Instead, we give up.
3059 */
3060 if (lvl == 1) {
3061 l2 = pmap_l1_to_l2(pde, va);
3062 if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3063 L2_BLOCK)
3064 return (NULL);
3065 }
2535 if (lvl == 2 && pmap_load(pde) != 0) {
2536 mpte =
2537 PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
2538 mpte->wire_count++;
2539 } else {
2540 /*
2541 * Pass NULL instead of the PV list lock
2542 * pointer, because we don't intend to sleep.

--- 41 unchanged lines hidden (view full) ---

2584 }
2585
2586 /*
2587 * Increment counters
2588 */
2589 pmap_resident_count_inc(pmap, 1);
2590
2591 pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3066 if (lvl == 2 && pmap_load(pde) != 0) {
3067 mpte =
3068 PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3069 mpte->wire_count++;
3070 } else {
3071 /*
3072 * Pass NULL instead of the PV list lock
3073 * pointer, because we don't intend to sleep.

--- 41 unchanged lines hidden (view full) ---

3115 }
3116
3117 /*
3118 * Increment counters
3119 */
3120 pmap_resident_count_inc(pmap, 1);
3121
3122 pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
2592 ATTR_AP(ATTR_AP_RW) | L3_PAGE;
3123 ATTR_AP(ATTR_AP_RO) | L3_PAGE;
2593
2594 /*
2595 * Now validate mapping with RO protection
2596 */
2597 if ((m->oflags & VPO_UNMANAGED) == 0)
2598 pa |= ATTR_SW_MANAGED;
2599 pmap_load_store(l3, pa);
2600 PTE_SYNC(l3);

--- 53 unchanged lines hidden (view full) ---

2654 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2655 if (va_next < sva)
2656 va_next = eva;
2657
2658 l2 = pmap_l1_to_l2(l1, sva);
2659 if (pmap_load(l2) == 0)
2660 continue;
2661
3124
3125 /*
3126 * Now validate mapping with RO protection
3127 */
3128 if ((m->oflags & VPO_UNMANAGED) == 0)
3129 pa |= ATTR_SW_MANAGED;
3130 pmap_load_store(l3, pa);
3131 PTE_SYNC(l3);

--- 53 unchanged lines hidden (view full) ---

3185 va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3186 if (va_next < sva)
3187 va_next = eva;
3188
3189 l2 = pmap_l1_to_l2(l1, sva);
3190 if (pmap_load(l2) == 0)
3191 continue;
3192
3193 if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3194 l3 = pmap_demote_l2(pmap, l2, sva);
3195 if (l3 == NULL)
3196 continue;
3197 }
3198 KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3199 ("pmap_unwire: Invalid l2 entry after demotion"));
3200
2662 if (va_next > eva)
2663 va_next = eva;
2664 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2665 sva += L3_SIZE) {
2666 if (pmap_load(l3) == 0)
2667 continue;
2668 if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
2669 panic("pmap_unwire: l3 %#jx is missing "

--- 138 unchanged lines hidden (view full) ---

2808 * 16 pvs linked to from this page. This count may
2809 * be changed upwards or downwards in the future; it
2810 * is only necessary that true be returned for a small
2811 * subset of pmaps for proper page aging.
2812 */
2813boolean_t
2814pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2815{
3201 if (va_next > eva)
3202 va_next = eva;
3203 for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3204 sva += L3_SIZE) {
3205 if (pmap_load(l3) == 0)
3206 continue;
3207 if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
3208 panic("pmap_unwire: l3 %#jx is missing "

--- 138 unchanged lines hidden (view full) ---

3347 * 16 pvs linked to from this page. This count may
3348 * be changed upwards or downwards in the future; it
3349 * is only necessary that true be returned for a small
3350 * subset of pmaps for proper page aging.
3351 */
3352boolean_t
3353pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3354{
3355 struct md_page *pvh;
2816 struct rwlock *lock;
2817 pv_entry_t pv;
2818 int loops = 0;
2819 boolean_t rv;
2820
2821 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2822 ("pmap_page_exists_quick: page %p is not managed", m));
2823 rv = FALSE;
2824 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2825 rw_rlock(lock);
2826 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
2827 if (PV_PMAP(pv) == pmap) {
2828 rv = TRUE;
2829 break;
2830 }
2831 loops++;
2832 if (loops >= 16)
2833 break;
2834 }
3356 struct rwlock *lock;
3357 pv_entry_t pv;
3358 int loops = 0;
3359 boolean_t rv;
3360
3361 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3362 ("pmap_page_exists_quick: page %p is not managed", m));
3363 rv = FALSE;
3364 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3365 rw_rlock(lock);
3366 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3367 if (PV_PMAP(pv) == pmap) {
3368 rv = TRUE;
3369 break;
3370 }
3371 loops++;
3372 if (loops >= 16)
3373 break;
3374 }
3375 if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
3376 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3377 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3378 if (PV_PMAP(pv) == pmap) {
3379 rv = TRUE;
3380 break;
3381 }
3382 loops++;
3383 if (loops >= 16)
3384 break;
3385 }
3386 }
2835 rw_runlock(lock);
2836 return (rv);
2837}
2838
2839/*
2840 * pmap_page_wired_mappings:
2841 *
2842 * Return the number of managed mappings to the given physical page
2843 * that are wired.
2844 */
2845int
2846pmap_page_wired_mappings(vm_page_t m)
2847{
2848 struct rwlock *lock;
3387 rw_runlock(lock);
3388 return (rv);
3389}
3390
3391/*
3392 * pmap_page_wired_mappings:
3393 *
3394 * Return the number of managed mappings to the given physical page
3395 * that are wired.
3396 */
3397int
3398pmap_page_wired_mappings(vm_page_t m)
3399{
3400 struct rwlock *lock;
3401 struct md_page *pvh;
2849 pmap_t pmap;
2850 pt_entry_t *pte;
2851 pv_entry_t pv;
3402 pmap_t pmap;
3403 pt_entry_t *pte;
3404 pv_entry_t pv;
2852 int count, lvl, md_gen;
3405 int count, lvl, md_gen, pvh_gen;
2853
2854 if ((m->oflags & VPO_UNMANAGED) != 0)
2855 return (0);
2856 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2857 rw_rlock(lock);
2858restart:
2859 count = 0;
2860 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {

--- 8 unchanged lines hidden (view full) ---

2869 goto restart;
2870 }
2871 }
2872 pte = pmap_pte(pmap, pv->pv_va, &lvl);
2873 if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
2874 count++;
2875 PMAP_UNLOCK(pmap);
2876 }
3406
3407 if ((m->oflags & VPO_UNMANAGED) != 0)
3408 return (0);
3409 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3410 rw_rlock(lock);
3411restart:
3412 count = 0;
3413 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {

--- 8 unchanged lines hidden (view full) ---

3422 goto restart;
3423 }
3424 }
3425 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3426 if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
3427 count++;
3428 PMAP_UNLOCK(pmap);
3429 }
3430 if ((m->flags & PG_FICTITIOUS) == 0) {
3431 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3432 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3433 pmap = PV_PMAP(pv);
3434 if (!PMAP_TRYLOCK(pmap)) {
3435 md_gen = m->md.pv_gen;
3436 pvh_gen = pvh->pv_gen;
3437 rw_runlock(lock);
3438 PMAP_LOCK(pmap);
3439 rw_rlock(lock);
3440 if (md_gen != m->md.pv_gen ||
3441 pvh_gen != pvh->pv_gen) {
3442 PMAP_UNLOCK(pmap);
3443 goto restart;
3444 }
3445 }
3446 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3447 if (pte != NULL &&
3448 (pmap_load(pte) & ATTR_SW_WIRED) != 0)
3449 count++;
3450 PMAP_UNLOCK(pmap);
3451 }
3452 }
2877 rw_runlock(lock);
2878 return (count);
2879}
2880
2881/*
2882 * Destroy all managed, non-wired mappings in the given user-space
2883 * pmap. This pmap cannot be active on any processor besides the
2884 * caller.

--- 10 unchanged lines hidden (view full) ---

2895 * this function starts.
2896 */
2897void
2898pmap_remove_pages(pmap_t pmap)
2899{
2900 pd_entry_t *pde;
2901 pt_entry_t *pte, tpte;
2902 struct spglist free;
3453 rw_runlock(lock);
3454 return (count);
3455}
3456
3457/*
3458 * Destroy all managed, non-wired mappings in the given user-space
3459 * pmap. This pmap cannot be active on any processor besides the
3460 * caller.

--- 10 unchanged lines hidden (view full) ---

3471 * this function starts.
3472 */
3473void
3474pmap_remove_pages(pmap_t pmap)
3475{
3476 pd_entry_t *pde;
3477 pt_entry_t *pte, tpte;
3478 struct spglist free;
2903 vm_page_t m;
3479 vm_page_t m, ml3, mt;
2904 pv_entry_t pv;
3480 pv_entry_t pv;
3481 struct md_page *pvh;
2905 struct pv_chunk *pc, *npc;
2906 struct rwlock *lock;
2907 int64_t bit;
2908 uint64_t inuse, bitmask;
2909 int allfree, field, freed, idx, lvl;
2910 vm_paddr_t pa;
2911
2912 lock = NULL;

--- 10 unchanged lines hidden (view full) ---

2923 bitmask = 1UL << bit;
2924 idx = field * 64 + bit;
2925 pv = &pc->pc_pventry[idx];
2926 inuse &= ~bitmask;
2927
2928 pde = pmap_pde(pmap, pv->pv_va, &lvl);
2929 KASSERT(pde != NULL,
2930 ("Attempting to remove an unmapped page"));
3482 struct pv_chunk *pc, *npc;
3483 struct rwlock *lock;
3484 int64_t bit;
3485 uint64_t inuse, bitmask;
3486 int allfree, field, freed, idx, lvl;
3487 vm_paddr_t pa;
3488
3489 lock = NULL;

--- 10 unchanged lines hidden (view full) ---

3500 bitmask = 1UL << bit;
3501 idx = field * 64 + bit;
3502 pv = &pc->pc_pventry[idx];
3503 inuse &= ~bitmask;
3504
3505 pde = pmap_pde(pmap, pv->pv_va, &lvl);
3506 KASSERT(pde != NULL,
3507 ("Attempting to remove an unmapped page"));
2931 KASSERT(lvl == 2,
2932 ("Invalid page directory level: %d", lvl));
2933
3508
2934 pte = pmap_l2_to_l3(pde, pv->pv_va);
2935 KASSERT(pte != NULL,
2936 ("Attempting to remove an unmapped page"));
3509 switch(lvl) {
3510 case 1:
3511 pte = pmap_l1_to_l2(pde, pv->pv_va);
3512 tpte = pmap_load(pte);
3513 KASSERT((tpte & ATTR_DESCR_MASK) ==
3514 L2_BLOCK,
3515 ("Attempting to remove an invalid "
3516 "block: %lx", tpte));
3517 tpte = pmap_load(pte);
3518 break;
3519 case 2:
3520 pte = pmap_l2_to_l3(pde, pv->pv_va);
3521 tpte = pmap_load(pte);
3522 KASSERT((tpte & ATTR_DESCR_MASK) ==
3523 L3_PAGE,
3524 ("Attempting to remove an invalid "
3525 "page: %lx", tpte));
3526 break;
3527 default:
3528 panic(
3529 "Invalid page directory level: %d",
3530 lvl);
3531 }
2937
3532
2938 tpte = pmap_load(pte);
2939
2940/*
2941 * We cannot remove wired pages from a process' mapping at this time
2942 */
2943 if (tpte & ATTR_SW_WIRED) {
2944 allfree = 0;
2945 continue;
2946 }
2947

--- 5 unchanged lines hidden (view full) ---

2953 m, (uintmax_t)m->phys_addr,
2954 (uintmax_t)tpte));
2955
2956 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
2957 m < &vm_page_array[vm_page_array_size],
2958 ("pmap_remove_pages: bad pte %#jx",
2959 (uintmax_t)tpte));
2960
3533/*
3534 * We cannot remove wired pages from a process' mapping at this time
3535 */
3536 if (tpte & ATTR_SW_WIRED) {
3537 allfree = 0;
3538 continue;
3539 }
3540

--- 5 unchanged lines hidden (view full) ---

3546 m, (uintmax_t)m->phys_addr,
3547 (uintmax_t)tpte));
3548
3549 KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
3550 m < &vm_page_array[vm_page_array_size],
3551 ("pmap_remove_pages: bad pte %#jx",
3552 (uintmax_t)tpte));
3553
2961 /* XXX: assumes tpte is level 3 */
2962 if (pmap_is_current(pmap) &&
2963 pmap_l3_valid_cacheable(tpte))
2964 cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
3554 if (pmap_is_current(pmap)) {
3555 if (lvl == 2 &&
3556 pmap_l3_valid_cacheable(tpte)) {
3557 cpu_dcache_wb_range(pv->pv_va,
3558 L3_SIZE);
3559 } else if (lvl == 1 &&
3560 pmap_pte_valid_cacheable(tpte)) {
3561 cpu_dcache_wb_range(pv->pv_va,
3562 L2_SIZE);
3563 }
3564 }
2965 pmap_load_clear(pte);
2966 PTE_SYNC(pte);
2967 pmap_invalidate_page(pmap, pv->pv_va);
2968
2969 /*
2970 * Update the vm_page_t clean/reference bits.
2971 */
3565 pmap_load_clear(pte);
3566 PTE_SYNC(pte);
3567 pmap_invalidate_page(pmap, pv->pv_va);
3568
3569 /*
3570 * Update the vm_page_t clean/reference bits.
3571 */
2972 if ((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
2973 vm_page_dirty(m);
3572 if ((tpte & ATTR_AP_RW_BIT) ==
3573 ATTR_AP(ATTR_AP_RW)) {
3574 switch (lvl) {
3575 case 1:
3576 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3577 vm_page_dirty(m);
3578 break;
3579 case 2:
3580 vm_page_dirty(m);
3581 break;
3582 }
3583 }
2974
2975 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
2976
2977 /* Mark free */
2978 pc->pc_map[field] |= bitmask;
3584
3585 CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
3586
3587 /* Mark free */
3588 pc->pc_map[field] |= bitmask;
2979
2980 pmap_resident_count_dec(pmap, 1);
2981 TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2982 m->md.pv_gen++;
2983
3589 switch (lvl) {
3590 case 1:
3591 pmap_resident_count_dec(pmap,
3592 L2_SIZE / PAGE_SIZE);
3593 pvh = pa_to_pvh(tpte & ~ATTR_MASK);
3594 TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
3595 pvh->pv_gen++;
3596 if (TAILQ_EMPTY(&pvh->pv_list)) {
3597 for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3598 if ((mt->aflags & PGA_WRITEABLE) != 0 &&
3599 TAILQ_EMPTY(&mt->md.pv_list))
3600 vm_page_aflag_clear(mt, PGA_WRITEABLE);
3601 }
3602 ml3 = pmap_lookup_pt_page(pmap,
3603 pv->pv_va);
3604 if (ml3 != NULL) {
3605 pmap_remove_pt_page(pmap, ml3);
3606 pmap_resident_count_dec(pmap,1);
3607 KASSERT(ml3->wire_count == NL3PG,
3608 ("pmap_remove_pages: l3 page wire count error"));
3609 ml3->wire_count = 0;
3610 pmap_add_delayed_free_list(ml3,
3611 &free, FALSE);
3612 atomic_subtract_int(
3613 &vm_cnt.v_wire_count, 1);
3614 }
3615 break;
3616 case 2:
3617 pmap_resident_count_dec(pmap, 1);
3618 TAILQ_REMOVE(&m->md.pv_list, pv,
3619 pv_next);
3620 m->md.pv_gen++;
3621 if ((m->aflags & PGA_WRITEABLE) != 0 &&
3622 TAILQ_EMPTY(&m->md.pv_list) &&
3623 (m->flags & PG_FICTITIOUS) == 0) {
3624 pvh = pa_to_pvh(
3625 VM_PAGE_TO_PHYS(m));
3626 if (TAILQ_EMPTY(&pvh->pv_list))
3627 vm_page_aflag_clear(m,
3628 PGA_WRITEABLE);
3629 }
3630 break;
3631 }
2984 pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde),
2985 &free);
2986 freed++;
2987 }
2988 }
2989 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
2990 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
2991 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));

--- 14 unchanged lines hidden (view full) ---

3006 * don't have a bit to see if it has been modified we have to assume it
3007 * has been if the page is read/write.
3008 */
3009static boolean_t
3010pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3011{
3012 struct rwlock *lock;
3013 pv_entry_t pv;
3632 pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde),
3633 &free);
3634 freed++;
3635 }
3636 }
3637 PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3638 PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3639 PV_STAT(atomic_subtract_long(&pv_entry_count, freed));

--- 14 unchanged lines hidden (view full) ---

3654 * don't have a bit to see if it has been modified we have to assume it
3655 * has been if the page is read/write.
3656 */
3657static boolean_t
3658pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3659{
3660 struct rwlock *lock;
3661 pv_entry_t pv;
3662 struct md_page *pvh;
3014 pt_entry_t *pte, mask, value;
3015 pmap_t pmap;
3663 pt_entry_t *pte, mask, value;
3664 pmap_t pmap;
3016 int lvl, md_gen;
3665 int lvl, md_gen, pvh_gen;
3017 boolean_t rv;
3018
3019 rv = FALSE;
3020 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3021 rw_rlock(lock);
3022restart:
3023 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3024 pmap = PV_PMAP(pv);

--- 20 unchanged lines hidden (view full) ---

3045 mask |= ATTR_AF | ATTR_DESCR_MASK;
3046 value |= ATTR_AF | L3_PAGE;
3047 }
3048 rv = (pmap_load(pte) & mask) == value;
3049 PMAP_UNLOCK(pmap);
3050 if (rv)
3051 goto out;
3052 }
3666 boolean_t rv;
3667
3668 rv = FALSE;
3669 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3670 rw_rlock(lock);
3671restart:
3672 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3673 pmap = PV_PMAP(pv);

--- 20 unchanged lines hidden (view full) ---

3694 mask |= ATTR_AF | ATTR_DESCR_MASK;
3695 value |= ATTR_AF | L3_PAGE;
3696 }
3697 rv = (pmap_load(pte) & mask) == value;
3698 PMAP_UNLOCK(pmap);
3699 if (rv)
3700 goto out;
3701 }
3702 if ((m->flags & PG_FICTITIOUS) == 0) {
3703 pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3704 TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3705 pmap = PV_PMAP(pv);
3706 if (!PMAP_TRYLOCK(pmap)) {
3707 md_gen = m->md.pv_gen;
3708 pvh_gen = pvh->pv_gen;
3709 rw_runlock(lock);
3710 PMAP_LOCK(pmap);
3711 rw_rlock(lock);
3712 if (md_gen != m->md.pv_gen ||
3713 pvh_gen != pvh->pv_gen) {
3714 PMAP_UNLOCK(pmap);
3715 goto restart;
3716 }
3717 }
3718 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3719 KASSERT(lvl == 2,
3720 ("pmap_page_test_mappings: Invalid level %d", lvl));
3721 mask = 0;
3722 value = 0;
3723 if (modified) {
3724 mask |= ATTR_AP_RW_BIT;
3725 value |= ATTR_AP(ATTR_AP_RW);
3726 }
3727 if (accessed) {
3728 mask |= ATTR_AF | ATTR_DESCR_MASK;
3729 value |= ATTR_AF | L2_BLOCK;
3730 }
3731 rv = (pmap_load(pte) & mask) == value;
3732 PMAP_UNLOCK(pmap);
3733 if (rv)
3734 goto out;
3735 }
3736 }
3053out:
3054 rw_runlock(lock);
3055 return (rv);
3056}
3057
3058/*
3059 * pmap_is_modified:
3060 *

--- 57 unchanged lines hidden (view full) ---

3118}
3119
3120/*
3121 * Clear the write and modified bits in each of the given page's mappings.
3122 */
3123void
3124pmap_remove_write(vm_page_t m)
3125{
3737out:
3738 rw_runlock(lock);
3739 return (rv);
3740}
3741
3742/*
3743 * pmap_is_modified:
3744 *

--- 57 unchanged lines hidden (view full) ---

3802}
3803
3804/*
3805 * Clear the write and modified bits in each of the given page's mappings.
3806 */
3807void
3808pmap_remove_write(vm_page_t m)
3809{
3810 struct md_page *pvh;
3126 pmap_t pmap;
3127 struct rwlock *lock;
3811 pmap_t pmap;
3812 struct rwlock *lock;
3128 pv_entry_t pv;
3813 pv_entry_t next_pv, pv;
3129 pt_entry_t oldpte, *pte;
3814 pt_entry_t oldpte, *pte;
3130 int lvl, md_gen;
3815 vm_offset_t va;
3816 int lvl, md_gen, pvh_gen;
3131
3132 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3133 ("pmap_remove_write: page %p is not managed", m));
3134
3135 /*
3136 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3137 * set by another thread while the object is locked. Thus,
3138 * if PGA_WRITEABLE is clear, no page table entries need updating.
3139 */
3140 VM_OBJECT_ASSERT_WLOCKED(m->object);
3141 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3142 return;
3143 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3817
3818 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3819 ("pmap_remove_write: page %p is not managed", m));
3820
3821 /*
3822 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3823 * set by another thread while the object is locked. Thus,
3824 * if PGA_WRITEABLE is clear, no page table entries need updating.
3825 */
3826 VM_OBJECT_ASSERT_WLOCKED(m->object);
3827 if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3828 return;
3829 lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3830 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
3831 pa_to_pvh(VM_PAGE_TO_PHYS(m));
3144retry_pv_loop:
3145 rw_wlock(lock);
3832retry_pv_loop:
3833 rw_wlock(lock);
3834 TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
3835 pmap = PV_PMAP(pv);
3836 if (!PMAP_TRYLOCK(pmap)) {
3837 pvh_gen = pvh->pv_gen;
3838 rw_wunlock(lock);
3839 PMAP_LOCK(pmap);
3840 rw_wlock(lock);
3841 if (pvh_gen != pvh->pv_gen) {
3842 PMAP_UNLOCK(pmap);
3843 rw_wunlock(lock);
3844 goto retry_pv_loop;
3845 }
3846 }
3847 va = pv->pv_va;
3848 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3849 if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
3850 pmap_demote_l2_locked(pmap, pte, va & ~L2_OFFSET,
3851 &lock);
3852 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3853 ("inconsistent pv lock %p %p for page %p",
3854 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3855 PMAP_UNLOCK(pmap);
3856 }
3146 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3147 pmap = PV_PMAP(pv);
3148 if (!PMAP_TRYLOCK(pmap)) {
3857 TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3858 pmap = PV_PMAP(pv);
3859 if (!PMAP_TRYLOCK(pmap)) {
3860 pvh_gen = pvh->pv_gen;
3149 md_gen = m->md.pv_gen;
3150 rw_wunlock(lock);
3151 PMAP_LOCK(pmap);
3152 rw_wlock(lock);
3861 md_gen = m->md.pv_gen;
3862 rw_wunlock(lock);
3863 PMAP_LOCK(pmap);
3864 rw_wlock(lock);
3153 if (md_gen != m->md.pv_gen) {
3865 if (pvh_gen != pvh->pv_gen ||
3866 md_gen != m->md.pv_gen) {
3154 PMAP_UNLOCK(pmap);
3155 rw_wunlock(lock);
3156 goto retry_pv_loop;
3157 }
3158 }
3159 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3160retry:
3161 oldpte = pmap_load(pte);

--- 30 unchanged lines hidden (view full) ---

3192 *
3193 * XXX: The exact number of bits to check and clear is a matter that
3194 * should be tested and standardized at some point in the future for
3195 * optimal aging of shared pages.
3196 */
3197int
3198pmap_ts_referenced(vm_page_t m)
3199{
3867 PMAP_UNLOCK(pmap);
3868 rw_wunlock(lock);
3869 goto retry_pv_loop;
3870 }
3871 }
3872 pte = pmap_pte(pmap, pv->pv_va, &lvl);
3873retry:
3874 oldpte = pmap_load(pte);

--- 30 unchanged lines hidden (view full) ---

3905 *
3906 * XXX: The exact number of bits to check and clear is a matter that
3907 * should be tested and standardized at some point in the future for
3908 * optimal aging of shared pages.
3909 */
3910int
3911pmap_ts_referenced(vm_page_t m)
3912{
3913 struct md_page *pvh;
3200 pv_entry_t pv, pvf;
3201 pmap_t pmap;
3202 struct rwlock *lock;
3203 pd_entry_t *pde, tpde;
3204 pt_entry_t *pte, tpte;
3914 pv_entry_t pv, pvf;
3915 pmap_t pmap;
3916 struct rwlock *lock;
3917 pd_entry_t *pde, tpde;
3918 pt_entry_t *pte, tpte;
3919 pt_entry_t *l3;
3920 vm_offset_t va;
3205 vm_paddr_t pa;
3921 vm_paddr_t pa;
3206 int cleared, md_gen, not_cleared, lvl;
3922 int cleared, md_gen, not_cleared, lvl, pvh_gen;
3207 struct spglist free;
3923 struct spglist free;
3924 bool demoted;
3208
3209 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3210 ("pmap_ts_referenced: page %p is not managed", m));
3211 SLIST_INIT(&free);
3212 cleared = 0;
3213 pa = VM_PAGE_TO_PHYS(m);
3214 lock = PHYS_TO_PV_LIST_LOCK(pa);
3925
3926 KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3927 ("pmap_ts_referenced: page %p is not managed", m));
3928 SLIST_INIT(&free);
3929 cleared = 0;
3930 pa = VM_PAGE_TO_PHYS(m);
3931 lock = PHYS_TO_PV_LIST_LOCK(pa);
3932 pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3215 rw_wlock(lock);
3216retry:
3217 not_cleared = 0;
3933 rw_wlock(lock);
3934retry:
3935 not_cleared = 0;
3936 if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3937 goto small_mappings;
3938 pv = pvf;
3939 do {
3940 if (pvf == NULL)
3941 pvf = pv;
3942 pmap = PV_PMAP(pv);
3943 if (!PMAP_TRYLOCK(pmap)) {
3944 pvh_gen = pvh->pv_gen;
3945 rw_wunlock(lock);
3946 PMAP_LOCK(pmap);
3947 rw_wlock(lock);
3948 if (pvh_gen != pvh->pv_gen) {
3949 PMAP_UNLOCK(pmap);
3950 goto retry;
3951 }
3952 }
3953 va = pv->pv_va;
3954 pde = pmap_pde(pmap, pv->pv_va, &lvl);
3955 KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
3956 KASSERT(lvl == 1,
3957 ("pmap_ts_referenced: invalid pde level %d", lvl));
3958 tpde = pmap_load(pde);
3959 KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
3960 ("pmap_ts_referenced: found an invalid l1 table"));
3961 pte = pmap_l1_to_l2(pde, pv->pv_va);
3962 tpte = pmap_load(pte);
3963 if ((tpte & ATTR_AF) != 0) {
3964 /*
3965 * Since this reference bit is shared by 512 4KB
3966 * pages, it should not be cleared every time it is
3967 * tested. Apply a simple "hash" function on the
3968 * physical page number, the virtual superpage number,
3969 * and the pmap address to select one 4KB page out of
3970 * the 512 on which testing the reference bit will
3971 * result in clearing that reference bit. This
3972 * function is designed to avoid the selection of the
3973 * same 4KB page for every 2MB page mapping.
3974 *
3975 * On demotion, a mapping that hasn't been referenced
3976 * is simply destroyed. To avoid the possibility of a
3977 * subsequent page fault on a demoted wired mapping,
3978 * always leave its reference bit set. Moreover,
3979 * since the superpage is wired, the current state of
3980 * its reference bit won't affect page replacement.
3981 */
3982 if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
3983 (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
3984 (tpte & ATTR_SW_WIRED) == 0) {
3985 if (safe_to_clear_referenced(pmap, tpte)) {
3986 /*
3987 * TODO: We don't handle the access
3988 * flag at all. We need to be able
3989 * to set it in the exception handler.
3990 */
3991 panic("ARM64TODO: "
3992 "safe_to_clear_referenced\n");
3993 } else if (pmap_demote_l2_locked(pmap, pte,
3994 pv->pv_va, &lock) != NULL) {
3995 demoted = true;
3996 va += VM_PAGE_TO_PHYS(m) -
3997 (tpte & ~ATTR_MASK);
3998 l3 = pmap_l2_to_l3(pte, va);
3999 pmap_remove_l3(pmap, l3, va,
4000 pmap_load(pte), NULL, &lock);
4001 } else
4002 demoted = true;
4003
4004 if (demoted) {
4005 /*
4006 * The superpage mapping was removed
4007 * entirely and therefore 'pv' is no
4008 * longer valid.
4009 */
4010 if (pvf == pv)
4011 pvf = NULL;
4012 pv = NULL;
4013 }
4014 cleared++;
4015 KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4016 ("inconsistent pv lock %p %p for page %p",
4017 lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4018 } else
4019 not_cleared++;
4020 }
4021 PMAP_UNLOCK(pmap);
4022 /* Rotate the PV list if it has more than one entry. */
4023 if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4024 TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4025 TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4026 pvh->pv_gen++;
4027 }
4028 if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
4029 goto out;
4030 } while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4031small_mappings:
3218 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3219 goto out;
3220 pv = pvf;
3221 do {
3222 if (pvf == NULL)
3223 pvf = pv;
3224 pmap = PV_PMAP(pv);
3225 if (!PMAP_TRYLOCK(pmap)) {
4032 if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4033 goto out;
4034 pv = pvf;
4035 do {
4036 if (pvf == NULL)
4037 pvf = pv;
4038 pmap = PV_PMAP(pv);
4039 if (!PMAP_TRYLOCK(pmap)) {
4040 pvh_gen = pvh->pv_gen;
3226 md_gen = m->md.pv_gen;
3227 rw_wunlock(lock);
3228 PMAP_LOCK(pmap);
3229 rw_wlock(lock);
4041 md_gen = m->md.pv_gen;
4042 rw_wunlock(lock);
4043 PMAP_LOCK(pmap);
4044 rw_wlock(lock);
3230 if (md_gen != m->md.pv_gen) {
4045 if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
3231 PMAP_UNLOCK(pmap);
3232 goto retry;
3233 }
3234 }
3235 pde = pmap_pde(pmap, pv->pv_va, &lvl);
3236 KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
3237 KASSERT(lvl == 2,
3238 ("pmap_ts_referenced: invalid pde level %d", lvl));

--- 96 unchanged lines hidden (view full) ---

3335 */
3336void
3337pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
3338{
3339
3340 m->md.pv_memattr = ma;
3341
3342 /*
4046 PMAP_UNLOCK(pmap);
4047 goto retry;
4048 }
4049 }
4050 pde = pmap_pde(pmap, pv->pv_va, &lvl);
4051 KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
4052 KASSERT(lvl == 2,
4053 ("pmap_ts_referenced: invalid pde level %d", lvl));

--- 96 unchanged lines hidden (view full) ---

4150 */
4151void
4152pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4153{
4154
4155 m->md.pv_memattr = ma;
4156
4157 /*
3343 * ARM64TODO: Implement the below (from the amd64 pmap)
3344 * If "m" is a normal page, update its direct mapping. This update
3345 * can be relied upon to perform any cache operations that are
3346 * required for data coherence.
3347 */
3348 if ((m->flags & PG_FICTITIOUS) == 0 &&
4158 * If "m" is a normal page, update its direct mapping. This update
4159 * can be relied upon to perform any cache operations that are
4160 * required for data coherence.
4161 */
4162 if ((m->flags & PG_FICTITIOUS) == 0 &&
3349 PHYS_IN_DMAP(VM_PAGE_TO_PHYS(m)))
3350 panic("ARM64TODO: pmap_page_set_memattr");
4163 pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
4164 m->md.pv_memattr) != 0)
4165 panic("memory attribute change on the direct map failed");
3351}
3352
3353/*
4166}
4167
4168/*
4169 * Changes the specified virtual address range's memory type to that given by
4170 * the parameter "mode". The specified virtual address range must be
4171 * completely contained within either the direct map or the kernel map. If
4172 * the virtual address range is contained within the kernel map, then the
4173 * memory type for each of the corresponding ranges of the direct map is also
4174 * changed. (The corresponding ranges of the direct map are those ranges that
4175 * map the same physical pages as the specified virtual address range.) These
4176 * changes to the direct map are necessary because Intel describes the
4177 * behavior of their processors as "undefined" if two or more mappings to the
4178 * same physical page have different memory types.
4179 *
4180 * Returns zero if the change completed successfully, and either EINVAL or
4181 * ENOMEM if the change failed. Specifically, EINVAL is returned if some part
4182 * of the virtual address range was not mapped, and ENOMEM is returned if
4183 * there was insufficient memory available to complete the change. In the
4184 * latter case, the memory type may have been changed on some part of the
4185 * virtual address range or the direct map.
4186 */
4187static int
4188pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
4189{
4190 int error;
4191
4192 PMAP_LOCK(kernel_pmap);
4193 error = pmap_change_attr_locked(va, size, mode);
4194 PMAP_UNLOCK(kernel_pmap);
4195 return (error);
4196}
4197
4198static int
4199pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
4200{
4201 vm_offset_t base, offset, tmpva;
4202 pt_entry_t l3, *pte, *newpte;
4203 int lvl;
4204
4205 PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
4206 base = trunc_page(va);
4207 offset = va & PAGE_MASK;
4208 size = round_page(offset + size);
4209
4210 if (!VIRT_IN_DMAP(base))
4211 return (EINVAL);
4212
4213 for (tmpva = base; tmpva < base + size; ) {
4214 pte = pmap_pte(kernel_pmap, va, &lvl);
4215 if (pte == NULL)
4216 return (EINVAL);
4217
4218 if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
4219 /*
4220 * We already have the correct attribute,
4221 * ignore this entry.
4222 */
4223 switch (lvl) {
4224 default:
4225 panic("Invalid DMAP table level: %d\n", lvl);
4226 case 1:
4227 tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
4228 break;
4229 case 2:
4230 tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
4231 break;
4232 case 3:
4233 tmpva += PAGE_SIZE;
4234 break;
4235 }
4236 } else {
4237 /*
4238 * Split the entry to an level 3 table, then
4239 * set the new attribute.
4240 */
4241 switch (lvl) {
4242 default:
4243 panic("Invalid DMAP table level: %d\n", lvl);
4244 case 1:
4245 newpte = pmap_demote_l1(kernel_pmap, pte,
4246 tmpva & ~L1_OFFSET);
4247 if (newpte == NULL)
4248 return (EINVAL);
4249 pte = pmap_l1_to_l2(pte, tmpva);
4250 case 2:
4251 newpte = pmap_demote_l2(kernel_pmap, pte,
4252 tmpva & ~L2_OFFSET);
4253 if (newpte == NULL)
4254 return (EINVAL);
4255 pte = pmap_l2_to_l3(pte, tmpva);
4256 case 3:
4257 /* Update the entry */
4258 l3 = pmap_load(pte);
4259 l3 &= ~ATTR_IDX_MASK;
4260 l3 |= ATTR_IDX(mode);
4261
4262 pmap_update_entry(kernel_pmap, pte, l3, tmpva,
4263 PAGE_SIZE);
4264
4265 /*
4266 * If moving to a non-cacheable entry flush
4267 * the cache.
4268 */
4269 if (mode == VM_MEMATTR_UNCACHEABLE)
4270 cpu_dcache_wbinv_range(tmpva, L3_SIZE);
4271
4272 break;
4273 }
4274 tmpva += PAGE_SIZE;
4275 }
4276 }
4277
4278 return (0);
4279}
4280
4281/*
4282 * Create an L2 table to map all addresses within an L1 mapping.
4283 */
4284static pt_entry_t *
4285pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
4286{
4287 pt_entry_t *l2, newl2, oldl1;
4288 vm_offset_t tmpl1;
4289 vm_paddr_t l2phys, phys;
4290 vm_page_t ml2;
4291 int i;
4292
4293 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4294 oldl1 = pmap_load(l1);
4295 KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
4296 ("pmap_demote_l1: Demoting a non-block entry"));
4297 KASSERT((va & L1_OFFSET) == 0,
4298 ("pmap_demote_l1: Invalid virtual address %#lx", va));
4299 KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
4300 ("pmap_demote_l1: Level 1 table shouldn't be managed"));
4301
4302 tmpl1 = 0;
4303 if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
4304 tmpl1 = kva_alloc(PAGE_SIZE);
4305 if (tmpl1 == 0)
4306 return (NULL);
4307 }
4308
4309 if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
4310 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
4311 CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
4312 " in pmap %p", va, pmap);
4313 return (NULL);
4314 }
4315
4316 l2phys = VM_PAGE_TO_PHYS(ml2);
4317 l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
4318
4319 /* Address the range points at */
4320 phys = oldl1 & ~ATTR_MASK;
4321 /* The attributed from the old l1 table to be copied */
4322 newl2 = oldl1 & ATTR_MASK;
4323
4324 /* Create the new entries */
4325 for (i = 0; i < Ln_ENTRIES; i++) {
4326 l2[i] = newl2 | phys;
4327 phys += L2_SIZE;
4328 }
4329 cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE);
4330 KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
4331 ("Invalid l2 page (%lx != %lx)", l2[0],
4332 (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
4333
4334 if (tmpl1 != 0) {
4335 pmap_kenter(tmpl1, PAGE_SIZE,
4336 DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY);
4337 l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
4338 }
4339
4340 pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
4341
4342 if (tmpl1 != 0) {
4343 pmap_kremove(tmpl1);
4344 kva_free(tmpl1, PAGE_SIZE);
4345 }
4346
4347 return (l2);
4348}
4349
4350/*
4351 * Create an L3 table to map all addresses within an L2 mapping.
4352 */
4353static pt_entry_t *
4354pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
4355 struct rwlock **lockp)
4356{
4357 pt_entry_t *l3, newl3, oldl2;
4358 vm_offset_t tmpl2;
4359 vm_paddr_t l3phys, phys;
4360 vm_page_t ml3;
4361 int i;
4362
4363 PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4364 l3 = NULL;
4365 oldl2 = pmap_load(l2);
4366 KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
4367 ("pmap_demote_l2: Demoting a non-block entry"));
4368 KASSERT((va & L2_OFFSET) == 0,
4369 ("pmap_demote_l2: Invalid virtual address %#lx", va));
4370
4371 tmpl2 = 0;
4372 if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
4373 tmpl2 = kva_alloc(PAGE_SIZE);
4374 if (tmpl2 == 0)
4375 return (NULL);
4376 }
4377
4378 if ((ml3 = pmap_lookup_pt_page(pmap, va)) != NULL) {
4379 pmap_remove_pt_page(pmap, ml3);
4380 } else {
4381 ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
4382 (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
4383 VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
4384 if (ml3 == NULL) {
4385 CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
4386 " in pmap %p", va, pmap);
4387 goto fail;
4388 }
4389 if (va < VM_MAXUSER_ADDRESS)
4390 pmap_resident_count_inc(pmap, 1);
4391 }
4392
4393 l3phys = VM_PAGE_TO_PHYS(ml3);
4394 l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
4395
4396 /* Address the range points at */
4397 phys = oldl2 & ~ATTR_MASK;
4398 /* The attributed from the old l2 table to be copied */
4399 newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE;
4400
4401 /*
4402 * If the page table page is new, initialize it.
4403 */
4404 if (ml3->wire_count == 1) {
4405 for (i = 0; i < Ln_ENTRIES; i++) {
4406 l3[i] = newl3 | phys;
4407 phys += L3_SIZE;
4408 }
4409 cpu_dcache_wb_range((vm_offset_t)l3, PAGE_SIZE);
4410 }
4411 KASSERT(l3[0] == ((oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE),
4412 ("Invalid l3 page (%lx != %lx)", l3[0],
4413 (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE));
4414
4415 /*
4416 * Map the temporary page so we don't lose access to the l2 table.
4417 */
4418 if (tmpl2 != 0) {
4419 pmap_kenter(tmpl2, PAGE_SIZE,
4420 DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY);
4421 l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
4422 }
4423
4424 /*
4425 * The spare PV entries must be reserved prior to demoting the
4426 * mapping, that is, prior to changing the PDE. Otherwise, the state
4427 * of the L2 and the PV lists will be inconsistent, which can result
4428 * in reclaim_pv_chunk() attempting to remove a PV entry from the
4429 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
4430 * PV entry for the 2MB page mapping that is being demoted.
4431 */
4432 if ((oldl2 & ATTR_SW_MANAGED) != 0)
4433 reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
4434
4435 pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
4436
4437 /*
4438 * Demote the PV entry.
4439 */
4440 if ((oldl2 & ATTR_SW_MANAGED) != 0)
4441 pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
4442
4443 atomic_add_long(&pmap_l2_demotions, 1);
4444 CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
4445 " in pmap %p %lx", va, pmap, l3[0]);
4446
4447fail:
4448 if (tmpl2 != 0) {
4449 pmap_kremove(tmpl2);
4450 kva_free(tmpl2, PAGE_SIZE);
4451 }
4452
4453 return (l3);
4454
4455}
4456
4457static pt_entry_t *
4458pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
4459{
4460 struct rwlock *lock;
4461 pt_entry_t *l3;
4462
4463 lock = NULL;
4464 l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
4465 if (lock != NULL)
4466 rw_wunlock(lock);
4467 return (l3);
4468}
4469
4470/*
3354 * perform the pmap work for mincore
3355 */
3356int
3357pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3358{
3359 pd_entry_t *l1p, l1;
3360 pd_entry_t *l2p, l2;
3361 pt_entry_t *l3p, l3;

--- 113 unchanged lines hidden (view full) ---

3475 sz -= len;
3476 va += len;
3477 /* Set the length for the next iteration */
3478 len = imin(PAGE_SIZE, sz);
3479 }
3480 }
3481}
3482
4471 * perform the pmap work for mincore
4472 */
4473int
4474pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
4475{
4476 pd_entry_t *l1p, l1;
4477 pd_entry_t *l2p, l2;
4478 pt_entry_t *l3p, l3;

--- 113 unchanged lines hidden (view full) ---

4592 sz -= len;
4593 va += len;
4594 /* Set the length for the next iteration */
4595 len = imin(PAGE_SIZE, sz);
4596 }
4597 }
4598}
4599
4600int
4601pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
4602{
4603#ifdef SMP
4604 uint64_t par;
4605#endif
4606
4607 switch (ESR_ELx_EXCEPTION(esr)) {
4608 case EXCP_DATA_ABORT_L:
4609 case EXCP_DATA_ABORT:
4610 break;
4611 default:
4612 return (KERN_FAILURE);
4613 }
4614
4615#ifdef SMP
4616 PMAP_LOCK(pmap);
4617 switch (esr & ISS_DATA_DFSC_MASK) {
4618 case ISS_DATA_DFSC_TF_L0:
4619 case ISS_DATA_DFSC_TF_L1:
4620 case ISS_DATA_DFSC_TF_L2:
4621 case ISS_DATA_DFSC_TF_L3:
4622 /* Ask the MMU to check the address */
4623 if (pmap == kernel_pmap)
4624 par = arm64_address_translate_s1e1r(far);
4625 else
4626 par = arm64_address_translate_s1e0r(far);
4627
4628 /*
4629 * If the translation was successful the address was invalid
4630 * due to a break-before-make sequence. We can unlock and
4631 * return success to the trap handler.
4632 */
4633 if (PAR_SUCCESS(par)) {
4634 PMAP_UNLOCK(pmap);
4635 return (KERN_SUCCESS);
4636 }
4637 break;
4638 default:
4639 break;
4640 }
4641 PMAP_UNLOCK(pmap);
4642#endif
4643
4644 return (KERN_FAILURE);
4645}
4646
3483/*
3484 * Increase the starting virtual address of the given mapping if a
3485 * different alignment might result in more superpage mappings.
3486 */
3487void
3488pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3489 vm_offset_t *addr, vm_size_t size)
3490{

--- 91 unchanged lines hidden ---
4647/*
4648 * Increase the starting virtual address of the given mapping if a
4649 * different alignment might result in more superpage mappings.
4650 */
4651void
4652pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4653 vm_offset_t *addr, vm_size_t size)
4654{

--- 91 unchanged lines hidden ---