pmap.c revision 305882
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 * Copyright (c) 1994 John S. Dyson
5 * All rights reserved.
6 * Copyright (c) 1994 David Greenman
7 * All rights reserved.
8 * Copyright (c) 2003 Peter Wemm
9 * All rights reserved.
10 * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11 * All rights reserved.
12 * Copyright (c) 2014 Andrew Turner
13 * All rights reserved.
14 * Copyright (c) 2014-2016 The FreeBSD Foundation
15 * All rights reserved.
16 *
17 * This code is derived from software contributed to Berkeley by
18 * the Systems Programming Group of the University of Utah Computer
19 * Science Department and William Jolitz of UUNET Technologies Inc.
20 *
21 * This software was developed by Andrew Turner under sponsorship from
22 * the FreeBSD Foundation.
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 *    notice, this list of conditions and the following disclaimer.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 *    notice, this list of conditions and the following disclaimer in the
31 *    documentation and/or other materials provided with the distribution.
32 * 3. All advertising materials mentioning features or use of this software
33 *    must display the following acknowledgement:
34 *	This product includes software developed by the University of
35 *	California, Berkeley and its contributors.
36 * 4. Neither the name of the University nor the names of its contributors
37 *    may be used to endorse or promote products derived from this software
38 *    without specific prior written permission.
39 *
40 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50 * SUCH DAMAGE.
51 *
52 *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
53 */
54/*-
55 * Copyright (c) 2003 Networks Associates Technology, Inc.
56 * All rights reserved.
57 *
58 * This software was developed for the FreeBSD Project by Jake Burkholder,
59 * Safeport Network Services, and Network Associates Laboratories, the
60 * Security Research Division of Network Associates, Inc. under
61 * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62 * CHATS research program.
63 *
64 * Redistribution and use in source and binary forms, with or without
65 * modification, are permitted provided that the following conditions
66 * are met:
67 * 1. Redistributions of source code must retain the above copyright
68 *    notice, this list of conditions and the following disclaimer.
69 * 2. Redistributions in binary form must reproduce the above copyright
70 *    notice, this list of conditions and the following disclaimer in the
71 *    documentation and/or other materials provided with the distribution.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83 * SUCH DAMAGE.
84 */
85
86#include <sys/cdefs.h>
87__FBSDID("$FreeBSD: stable/11/sys/arm64/arm64/pmap.c 305882 2016-09-16 12:36:11Z andrew $");
88
89/*
90 *	Manages physical address maps.
91 *
92 *	Since the information managed by this module is
93 *	also stored by the logical address mapping module,
94 *	this module may throw away valid virtual-to-physical
95 *	mappings at almost any time.  However, invalidations
96 *	of virtual-to-physical mappings must be done as
97 *	requested.
98 *
99 *	In order to cope with hardware architectures which
100 *	make virtual-to-physical map invalidates expensive,
101 *	this module may delay invalidate or reduced protection
102 *	operations until such time as they are actually
103 *	necessary.  This module is given full information as
104 *	to which processors are currently using which maps,
105 *	and to when physical maps must be made correct.
106 */
107
108#include <sys/param.h>
109#include <sys/bitstring.h>
110#include <sys/bus.h>
111#include <sys/systm.h>
112#include <sys/kernel.h>
113#include <sys/ktr.h>
114#include <sys/lock.h>
115#include <sys/malloc.h>
116#include <sys/mman.h>
117#include <sys/msgbuf.h>
118#include <sys/mutex.h>
119#include <sys/proc.h>
120#include <sys/rwlock.h>
121#include <sys/sx.h>
122#include <sys/vmem.h>
123#include <sys/vmmeter.h>
124#include <sys/sched.h>
125#include <sys/sysctl.h>
126#include <sys/_unrhdr.h>
127#include <sys/smp.h>
128
129#include <vm/vm.h>
130#include <vm/vm_param.h>
131#include <vm/vm_kern.h>
132#include <vm/vm_page.h>
133#include <vm/vm_map.h>
134#include <vm/vm_object.h>
135#include <vm/vm_extern.h>
136#include <vm/vm_pageout.h>
137#include <vm/vm_pager.h>
138#include <vm/vm_phys.h>
139#include <vm/vm_radix.h>
140#include <vm/vm_reserv.h>
141#include <vm/uma.h>
142
143#include <machine/machdep.h>
144#include <machine/md_var.h>
145#include <machine/pcb.h>
146
147#define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
148#define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
149#define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
150#define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
151
152#define	NUL0E		L0_ENTRIES
153#define	NUL1E		(NUL0E * NL1PG)
154#define	NUL2E		(NUL1E * NL2PG)
155
156#if !defined(DIAGNOSTIC)
157#ifdef __GNUC_GNU_INLINE__
158#define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
159#else
160#define PMAP_INLINE	extern inline
161#endif
162#else
163#define PMAP_INLINE
164#endif
165
166/*
167 * These are configured by the mair_el1 register. This is set up in locore.S
168 */
169#define	DEVICE_MEMORY	0
170#define	UNCACHED_MEMORY	1
171#define	CACHED_MEMORY	2
172
173
174#ifdef PV_STATS
175#define PV_STAT(x)	do { x ; } while (0)
176#else
177#define PV_STAT(x)	do { } while (0)
178#endif
179
180#define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
181#define	pa_to_pvh(pa)		(&pv_table[pmap_l2_pindex(pa)])
182
183#define	NPV_LIST_LOCKS	MAXCPU
184
185#define	PHYS_TO_PV_LIST_LOCK(pa)	\
186			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
187
188#define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
189	struct rwlock **_lockp = (lockp);		\
190	struct rwlock *_new_lock;			\
191							\
192	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
193	if (_new_lock != *_lockp) {			\
194		if (*_lockp != NULL)			\
195			rw_wunlock(*_lockp);		\
196		*_lockp = _new_lock;			\
197		rw_wlock(*_lockp);			\
198	}						\
199} while (0)
200
201#define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
202			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
203
204#define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
205	struct rwlock **_lockp = (lockp);		\
206							\
207	if (*_lockp != NULL) {				\
208		rw_wunlock(*_lockp);			\
209		*_lockp = NULL;				\
210	}						\
211} while (0)
212
213#define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
214			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
215
216struct pmap kernel_pmap_store;
217
218vm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
219vm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
220vm_offset_t kernel_vm_end = 0;
221
222struct msgbuf *msgbufp = NULL;
223
224/*
225 * Data for the pv entry allocation mechanism.
226 * Updates to pv_invl_gen are protected by the pv_list_locks[]
227 * elements, but reads are not.
228 */
229static struct md_page *pv_table;
230static struct md_page pv_dummy;
231
232vm_paddr_t dmap_phys_base;	/* The start of the dmap region */
233vm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
234vm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
235
236/* This code assumes all L1 DMAP entries will be used */
237CTASSERT((DMAP_MIN_ADDRESS  & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
238CTASSERT((DMAP_MAX_ADDRESS  & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
239
240#define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
241extern pt_entry_t pagetable_dmap[];
242
243static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters");
244
245static int superpages_enabled = 0;
246SYSCTL_INT(_vm_pmap, OID_AUTO, superpages_enabled,
247    CTLFLAG_RDTUN | CTLFLAG_NOFETCH, &superpages_enabled, 0,
248    "Are large page mappings enabled?");
249
250/*
251 * Data for the pv entry allocation mechanism
252 */
253static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
254static struct mtx pv_chunks_mutex;
255static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
256
257static void	free_pv_chunk(struct pv_chunk *pc);
258static void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
259static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
260static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
261static void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
262static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
263		    vm_offset_t va);
264
265static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode);
266static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode);
267static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va);
268static pt_entry_t *pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2,
269    vm_offset_t va, struct rwlock **lockp);
270static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va);
271static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
272    vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
273static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
274    pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
275static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
276    vm_page_t m, struct rwlock **lockp);
277
278static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
279		struct rwlock **lockp);
280
281static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
282    struct spglist *free);
283static int pmap_unuse_l3(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
284
285/*
286 * These load the old table data and store the new value.
287 * They need to be atomic as the System MMU may write to the table at
288 * the same time as the CPU.
289 */
290#define	pmap_load_store(table, entry) atomic_swap_64(table, entry)
291#define	pmap_set(table, mask) atomic_set_64(table, mask)
292#define	pmap_load_clear(table) atomic_swap_64(table, 0)
293#define	pmap_load(table) (*table)
294
295/********************/
296/* Inline functions */
297/********************/
298
299static __inline void
300pagecopy(void *s, void *d)
301{
302
303	memcpy(d, s, PAGE_SIZE);
304}
305
306#define	pmap_l0_index(va)	(((va) >> L0_SHIFT) & L0_ADDR_MASK)
307#define	pmap_l1_index(va)	(((va) >> L1_SHIFT) & Ln_ADDR_MASK)
308#define	pmap_l2_index(va)	(((va) >> L2_SHIFT) & Ln_ADDR_MASK)
309#define	pmap_l3_index(va)	(((va) >> L3_SHIFT) & Ln_ADDR_MASK)
310
311static __inline pd_entry_t *
312pmap_l0(pmap_t pmap, vm_offset_t va)
313{
314
315	return (&pmap->pm_l0[pmap_l0_index(va)]);
316}
317
318static __inline pd_entry_t *
319pmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
320{
321	pd_entry_t *l1;
322
323	l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
324	return (&l1[pmap_l1_index(va)]);
325}
326
327static __inline pd_entry_t *
328pmap_l1(pmap_t pmap, vm_offset_t va)
329{
330	pd_entry_t *l0;
331
332	l0 = pmap_l0(pmap, va);
333	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
334		return (NULL);
335
336	return (pmap_l0_to_l1(l0, va));
337}
338
339static __inline pd_entry_t *
340pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
341{
342	pd_entry_t *l2;
343
344	l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
345	return (&l2[pmap_l2_index(va)]);
346}
347
348static __inline pd_entry_t *
349pmap_l2(pmap_t pmap, vm_offset_t va)
350{
351	pd_entry_t *l1;
352
353	l1 = pmap_l1(pmap, va);
354	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
355		return (NULL);
356
357	return (pmap_l1_to_l2(l1, va));
358}
359
360static __inline pt_entry_t *
361pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
362{
363	pt_entry_t *l3;
364
365	l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
366	return (&l3[pmap_l3_index(va)]);
367}
368
369/*
370 * Returns the lowest valid pde for a given virtual address.
371 * The next level may or may not point to a valid page or block.
372 */
373static __inline pd_entry_t *
374pmap_pde(pmap_t pmap, vm_offset_t va, int *level)
375{
376	pd_entry_t *l0, *l1, *l2, desc;
377
378	l0 = pmap_l0(pmap, va);
379	desc = pmap_load(l0) & ATTR_DESCR_MASK;
380	if (desc != L0_TABLE) {
381		*level = -1;
382		return (NULL);
383	}
384
385	l1 = pmap_l0_to_l1(l0, va);
386	desc = pmap_load(l1) & ATTR_DESCR_MASK;
387	if (desc != L1_TABLE) {
388		*level = 0;
389		return (l0);
390	}
391
392	l2 = pmap_l1_to_l2(l1, va);
393	desc = pmap_load(l2) & ATTR_DESCR_MASK;
394	if (desc != L2_TABLE) {
395		*level = 1;
396		return (l1);
397	}
398
399	*level = 2;
400	return (l2);
401}
402
403/*
404 * Returns the lowest valid pte block or table entry for a given virtual
405 * address. If there are no valid entries return NULL and set the level to
406 * the first invalid level.
407 */
408static __inline pt_entry_t *
409pmap_pte(pmap_t pmap, vm_offset_t va, int *level)
410{
411	pd_entry_t *l1, *l2, desc;
412	pt_entry_t *l3;
413
414	l1 = pmap_l1(pmap, va);
415	if (l1 == NULL) {
416		*level = 0;
417		return (NULL);
418	}
419	desc = pmap_load(l1) & ATTR_DESCR_MASK;
420	if (desc == L1_BLOCK) {
421		*level = 1;
422		return (l1);
423	}
424
425	if (desc != L1_TABLE) {
426		*level = 1;
427		return (NULL);
428	}
429
430	l2 = pmap_l1_to_l2(l1, va);
431	desc = pmap_load(l2) & ATTR_DESCR_MASK;
432	if (desc == L2_BLOCK) {
433		*level = 2;
434		return (l2);
435	}
436
437	if (desc != L2_TABLE) {
438		*level = 2;
439		return (NULL);
440	}
441
442	*level = 3;
443	l3 = pmap_l2_to_l3(l2, va);
444	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
445		return (NULL);
446
447	return (l3);
448}
449
450static inline bool
451pmap_superpages_enabled(void)
452{
453
454	return (superpages_enabled != 0);
455}
456
457bool
458pmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
459    pd_entry_t **l2, pt_entry_t **l3)
460{
461	pd_entry_t *l0p, *l1p, *l2p;
462
463	if (pmap->pm_l0 == NULL)
464		return (false);
465
466	l0p = pmap_l0(pmap, va);
467	*l0 = l0p;
468
469	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
470		return (false);
471
472	l1p = pmap_l0_to_l1(l0p, va);
473	*l1 = l1p;
474
475	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
476		*l2 = NULL;
477		*l3 = NULL;
478		return (true);
479	}
480
481	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
482		return (false);
483
484	l2p = pmap_l1_to_l2(l1p, va);
485	*l2 = l2p;
486
487	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
488		*l3 = NULL;
489		return (true);
490	}
491
492	*l3 = pmap_l2_to_l3(l2p, va);
493
494	return (true);
495}
496
497static __inline int
498pmap_is_current(pmap_t pmap)
499{
500
501	return ((pmap == pmap_kernel()) ||
502	    (pmap == curthread->td_proc->p_vmspace->vm_map.pmap));
503}
504
505static __inline int
506pmap_l3_valid(pt_entry_t l3)
507{
508
509	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
510}
511
512
513/* Is a level 1 or 2entry a valid block and cacheable */
514CTASSERT(L1_BLOCK == L2_BLOCK);
515static __inline int
516pmap_pte_valid_cacheable(pt_entry_t pte)
517{
518
519	return (((pte & ATTR_DESCR_MASK) == L1_BLOCK) &&
520	    ((pte & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
521}
522
523static __inline int
524pmap_l3_valid_cacheable(pt_entry_t l3)
525{
526
527	return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) &&
528	    ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
529}
530
531#define	PTE_SYNC(pte)	cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte))
532
533/*
534 * Checks if the page is dirty. We currently lack proper tracking of this on
535 * arm64 so for now assume is a page mapped as rw was accessed it is.
536 */
537static inline int
538pmap_page_dirty(pt_entry_t pte)
539{
540
541	return ((pte & (ATTR_AF | ATTR_AP_RW_BIT)) ==
542	    (ATTR_AF | ATTR_AP(ATTR_AP_RW)));
543}
544
545static __inline void
546pmap_resident_count_inc(pmap_t pmap, int count)
547{
548
549	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
550	pmap->pm_stats.resident_count += count;
551}
552
553static __inline void
554pmap_resident_count_dec(pmap_t pmap, int count)
555{
556
557	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
558	KASSERT(pmap->pm_stats.resident_count >= count,
559	    ("pmap %p resident count underflow %ld %d", pmap,
560	    pmap->pm_stats.resident_count, count));
561	pmap->pm_stats.resident_count -= count;
562}
563
564static pt_entry_t *
565pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
566    u_int *l2_slot)
567{
568	pt_entry_t *l2;
569	pd_entry_t *l1;
570
571	l1 = (pd_entry_t *)l1pt;
572	*l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
573
574	/* Check locore has used a table L1 map */
575	KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
576	   ("Invalid bootstrap L1 table"));
577	/* Find the address of the L2 table */
578	l2 = (pt_entry_t *)init_pt_va;
579	*l2_slot = pmap_l2_index(va);
580
581	return (l2);
582}
583
584static vm_paddr_t
585pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
586{
587	u_int l1_slot, l2_slot;
588	pt_entry_t *l2;
589
590	l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
591
592	return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
593}
594
595static void
596pmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
597{
598	vm_offset_t va;
599	vm_paddr_t pa;
600	u_int l1_slot;
601
602	pa = dmap_phys_base = min_pa & ~L1_OFFSET;
603	va = DMAP_MIN_ADDRESS;
604	for (; va < DMAP_MAX_ADDRESS && pa < max_pa;
605	    pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
606		l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
607
608		pmap_load_store(&pagetable_dmap[l1_slot],
609		    (pa & ~L1_OFFSET) | ATTR_DEFAULT |
610		    ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
611	}
612
613	/* Set the upper limit of the DMAP region */
614	dmap_phys_max = pa;
615	dmap_max_addr = va;
616
617	cpu_dcache_wb_range((vm_offset_t)pagetable_dmap,
618	    PAGE_SIZE * DMAP_TABLES);
619	cpu_tlb_flushID();
620}
621
622static vm_offset_t
623pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
624{
625	vm_offset_t l2pt;
626	vm_paddr_t pa;
627	pd_entry_t *l1;
628	u_int l1_slot;
629
630	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
631
632	l1 = (pd_entry_t *)l1pt;
633	l1_slot = pmap_l1_index(va);
634	l2pt = l2_start;
635
636	for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
637		KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
638
639		pa = pmap_early_vtophys(l1pt, l2pt);
640		pmap_load_store(&l1[l1_slot],
641		    (pa & ~Ln_TABLE_MASK) | L1_TABLE);
642		l2pt += PAGE_SIZE;
643	}
644
645	/* Clean the L2 page table */
646	memset((void *)l2_start, 0, l2pt - l2_start);
647	cpu_dcache_wb_range(l2_start, l2pt - l2_start);
648
649	/* Flush the l1 table to ram */
650	cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE);
651
652	return l2pt;
653}
654
655static vm_offset_t
656pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
657{
658	vm_offset_t l2pt, l3pt;
659	vm_paddr_t pa;
660	pd_entry_t *l2;
661	u_int l2_slot;
662
663	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
664
665	l2 = pmap_l2(kernel_pmap, va);
666	l2 = (pd_entry_t *)rounddown2((uintptr_t)l2, PAGE_SIZE);
667	l2pt = (vm_offset_t)l2;
668	l2_slot = pmap_l2_index(va);
669	l3pt = l3_start;
670
671	for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
672		KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
673
674		pa = pmap_early_vtophys(l1pt, l3pt);
675		pmap_load_store(&l2[l2_slot],
676		    (pa & ~Ln_TABLE_MASK) | L2_TABLE);
677		l3pt += PAGE_SIZE;
678	}
679
680	/* Clean the L2 page table */
681	memset((void *)l3_start, 0, l3pt - l3_start);
682	cpu_dcache_wb_range(l3_start, l3pt - l3_start);
683
684	cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE);
685
686	return l3pt;
687}
688
689/*
690 *	Bootstrap the system enough to run with virtual memory.
691 */
692void
693pmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
694    vm_size_t kernlen)
695{
696	u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot;
697	uint64_t kern_delta;
698	pt_entry_t *l2;
699	vm_offset_t va, freemempos;
700	vm_offset_t dpcpu, msgbufpv;
701	vm_paddr_t pa, max_pa, min_pa;
702	int i;
703
704	kern_delta = KERNBASE - kernstart;
705	physmem = 0;
706
707	printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
708	printf("%lx\n", l1pt);
709	printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
710
711	/* Set this early so we can use the pagetable walking functions */
712	kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
713	PMAP_LOCK_INIT(kernel_pmap);
714
715	/* Assume the address we were loaded to is a valid physical address */
716	min_pa = max_pa = KERNBASE - kern_delta;
717
718	/*
719	 * Find the minimum physical address. physmap is sorted,
720	 * but may contain empty ranges.
721	 */
722	for (i = 0; i < (physmap_idx * 2); i += 2) {
723		if (physmap[i] == physmap[i + 1])
724			continue;
725		if (physmap[i] <= min_pa)
726			min_pa = physmap[i];
727		if (physmap[i + 1] > max_pa)
728			max_pa = physmap[i + 1];
729	}
730
731	/* Create a direct map region early so we can use it for pa -> va */
732	pmap_bootstrap_dmap(l1pt, min_pa, max_pa);
733
734	va = KERNBASE;
735	pa = KERNBASE - kern_delta;
736
737	/*
738	 * Start to initialise phys_avail by copying from physmap
739	 * up to the physical address KERNBASE points at.
740	 */
741	map_slot = avail_slot = 0;
742	for (; map_slot < (physmap_idx * 2) &&
743	    avail_slot < (PHYS_AVAIL_SIZE - 2); map_slot += 2) {
744		if (physmap[map_slot] == physmap[map_slot + 1])
745			continue;
746
747		if (physmap[map_slot] <= pa &&
748		    physmap[map_slot + 1] > pa)
749			break;
750
751		phys_avail[avail_slot] = physmap[map_slot];
752		phys_avail[avail_slot + 1] = physmap[map_slot + 1];
753		physmem += (phys_avail[avail_slot + 1] -
754		    phys_avail[avail_slot]) >> PAGE_SHIFT;
755		avail_slot += 2;
756	}
757
758	/* Add the memory before the kernel */
759	if (physmap[avail_slot] < pa && avail_slot < (PHYS_AVAIL_SIZE - 2)) {
760		phys_avail[avail_slot] = physmap[map_slot];
761		phys_avail[avail_slot + 1] = pa;
762		physmem += (phys_avail[avail_slot + 1] -
763		    phys_avail[avail_slot]) >> PAGE_SHIFT;
764		avail_slot += 2;
765	}
766	used_map_slot = map_slot;
767
768	/*
769	 * Read the page table to find out what is already mapped.
770	 * This assumes we have mapped a block of memory from KERNBASE
771	 * using a single L1 entry.
772	 */
773	l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
774
775	/* Sanity check the index, KERNBASE should be the first VA */
776	KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
777
778	/* Find how many pages we have mapped */
779	for (; l2_slot < Ln_ENTRIES; l2_slot++) {
780		if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0)
781			break;
782
783		/* Check locore used L2 blocks */
784		KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
785		    ("Invalid bootstrap L2 table"));
786		KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa,
787		    ("Incorrect PA in L2 table"));
788
789		va += L2_SIZE;
790		pa += L2_SIZE;
791	}
792
793	va = roundup2(va, L1_SIZE);
794
795	freemempos = KERNBASE + kernlen;
796	freemempos = roundup2(freemempos, PAGE_SIZE);
797	/* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */
798	freemempos = pmap_bootstrap_l2(l1pt, va, freemempos);
799	/* And the l3 tables for the early devmap */
800	freemempos = pmap_bootstrap_l3(l1pt,
801	    VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos);
802
803	cpu_tlb_flushID();
804
805#define alloc_pages(var, np)						\
806	(var) = freemempos;						\
807	freemempos += (np * PAGE_SIZE);					\
808	memset((char *)(var), 0, ((np) * PAGE_SIZE));
809
810	/* Allocate dynamic per-cpu area. */
811	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
812	dpcpu_init((void *)dpcpu, 0);
813
814	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
815	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
816	msgbufp = (void *)msgbufpv;
817
818	virtual_avail = roundup2(freemempos, L1_SIZE);
819	virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE;
820	kernel_vm_end = virtual_avail;
821
822	pa = pmap_early_vtophys(l1pt, freemempos);
823
824	/* Finish initialising physmap */
825	map_slot = used_map_slot;
826	for (; avail_slot < (PHYS_AVAIL_SIZE - 2) &&
827	    map_slot < (physmap_idx * 2); map_slot += 2) {
828		if (physmap[map_slot] == physmap[map_slot + 1])
829			continue;
830
831		/* Have we used the current range? */
832		if (physmap[map_slot + 1] <= pa)
833			continue;
834
835		/* Do we need to split the entry? */
836		if (physmap[map_slot] < pa) {
837			phys_avail[avail_slot] = pa;
838			phys_avail[avail_slot + 1] = physmap[map_slot + 1];
839		} else {
840			phys_avail[avail_slot] = physmap[map_slot];
841			phys_avail[avail_slot + 1] = physmap[map_slot + 1];
842		}
843		physmem += (phys_avail[avail_slot + 1] -
844		    phys_avail[avail_slot]) >> PAGE_SHIFT;
845
846		avail_slot += 2;
847	}
848	phys_avail[avail_slot] = 0;
849	phys_avail[avail_slot + 1] = 0;
850
851	/*
852	 * Maxmem isn't the "maximum memory", it's one larger than the
853	 * highest page of the physical address space.  It should be
854	 * called something like "Maxphyspage".
855	 */
856	Maxmem = atop(phys_avail[avail_slot - 1]);
857
858	cpu_tlb_flushID();
859}
860
861/*
862 *	Initialize a vm_page's machine-dependent fields.
863 */
864void
865pmap_page_init(vm_page_t m)
866{
867
868	TAILQ_INIT(&m->md.pv_list);
869	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
870}
871
872/*
873 *	Initialize the pmap module.
874 *	Called by vm_init, to initialize any structures that the pmap
875 *	system needs to map virtual memory.
876 */
877void
878pmap_init(void)
879{
880	vm_size_t s;
881	int i, pv_npg;
882
883	/*
884	 * Are large page mappings enabled?
885	 */
886	TUNABLE_INT_FETCH("vm.pmap.superpages_enabled", &superpages_enabled);
887
888	/*
889	 * Initialize the pv chunk list mutex.
890	 */
891	mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
892
893	/*
894	 * Initialize the pool of pv list locks.
895	 */
896	for (i = 0; i < NPV_LIST_LOCKS; i++)
897		rw_init(&pv_list_locks[i], "pmap pv list");
898
899	/*
900	 * Calculate the size of the pv head table for superpages.
901	 */
902	pv_npg = howmany(vm_phys_segs[vm_phys_nsegs - 1].end, L2_SIZE);
903
904	/*
905	 * Allocate memory for the pv head table for superpages.
906	 */
907	s = (vm_size_t)(pv_npg * sizeof(struct md_page));
908	s = round_page(s);
909	pv_table = (struct md_page *)kmem_malloc(kernel_arena, s,
910	    M_WAITOK | M_ZERO);
911	for (i = 0; i < pv_npg; i++)
912		TAILQ_INIT(&pv_table[i].pv_list);
913	TAILQ_INIT(&pv_dummy.pv_list);
914}
915
916static SYSCTL_NODE(_vm_pmap, OID_AUTO, l2, CTLFLAG_RD, 0,
917    "2MB page mapping counters");
918
919static u_long pmap_l2_demotions;
920SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, demotions, CTLFLAG_RD,
921    &pmap_l2_demotions, 0, "2MB page demotions");
922
923static u_long pmap_l2_p_failures;
924SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, p_failures, CTLFLAG_RD,
925    &pmap_l2_p_failures, 0, "2MB page promotion failures");
926
927static u_long pmap_l2_promotions;
928SYSCTL_ULONG(_vm_pmap_l2, OID_AUTO, promotions, CTLFLAG_RD,
929    &pmap_l2_promotions, 0, "2MB page promotions");
930
931/*
932 * Invalidate a single TLB entry.
933 */
934PMAP_INLINE void
935pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
936{
937
938	sched_pin();
939	__asm __volatile(
940	    "dsb  ishst		\n"
941	    "tlbi vaae1is, %0	\n"
942	    "dsb  ish		\n"
943	    "isb		\n"
944	    : : "r"(va >> PAGE_SHIFT));
945	sched_unpin();
946}
947
948PMAP_INLINE void
949pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
950{
951	vm_offset_t addr;
952
953	sched_pin();
954	dsb(ishst);
955	for (addr = sva; addr < eva; addr += PAGE_SIZE) {
956		__asm __volatile(
957		    "tlbi vaae1is, %0" : : "r"(addr >> PAGE_SHIFT));
958	}
959	__asm __volatile(
960	    "dsb  ish	\n"
961	    "isb	\n");
962	sched_unpin();
963}
964
965PMAP_INLINE void
966pmap_invalidate_all(pmap_t pmap)
967{
968
969	sched_pin();
970	__asm __volatile(
971	    "dsb  ishst		\n"
972	    "tlbi vmalle1is	\n"
973	    "dsb  ish		\n"
974	    "isb		\n");
975	sched_unpin();
976}
977
978/*
979 *	Routine:	pmap_extract
980 *	Function:
981 *		Extract the physical page address associated
982 *		with the given map/virtual_address pair.
983 */
984vm_paddr_t
985pmap_extract(pmap_t pmap, vm_offset_t va)
986{
987	pt_entry_t *pte, tpte;
988	vm_paddr_t pa;
989	int lvl;
990
991	pa = 0;
992	PMAP_LOCK(pmap);
993	/*
994	 * Find the block or page map for this virtual address. pmap_pte
995	 * will return either a valid block/page entry, or NULL.
996	 */
997	pte = pmap_pte(pmap, va, &lvl);
998	if (pte != NULL) {
999		tpte = pmap_load(pte);
1000		pa = tpte & ~ATTR_MASK;
1001		switch(lvl) {
1002		case 1:
1003			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1004			    ("pmap_extract: Invalid L1 pte found: %lx",
1005			    tpte & ATTR_DESCR_MASK));
1006			pa |= (va & L1_OFFSET);
1007			break;
1008		case 2:
1009			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1010			    ("pmap_extract: Invalid L2 pte found: %lx",
1011			    tpte & ATTR_DESCR_MASK));
1012			pa |= (va & L2_OFFSET);
1013			break;
1014		case 3:
1015			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1016			    ("pmap_extract: Invalid L3 pte found: %lx",
1017			    tpte & ATTR_DESCR_MASK));
1018			pa |= (va & L3_OFFSET);
1019			break;
1020		}
1021	}
1022	PMAP_UNLOCK(pmap);
1023	return (pa);
1024}
1025
1026/*
1027 *	Routine:	pmap_extract_and_hold
1028 *	Function:
1029 *		Atomically extract and hold the physical page
1030 *		with the given pmap and virtual address pair
1031 *		if that mapping permits the given protection.
1032 */
1033vm_page_t
1034pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
1035{
1036	pt_entry_t *pte, tpte;
1037	vm_offset_t off;
1038	vm_paddr_t pa;
1039	vm_page_t m;
1040	int lvl;
1041
1042	pa = 0;
1043	m = NULL;
1044	PMAP_LOCK(pmap);
1045retry:
1046	pte = pmap_pte(pmap, va, &lvl);
1047	if (pte != NULL) {
1048		tpte = pmap_load(pte);
1049
1050		KASSERT(lvl > 0 && lvl <= 3,
1051		    ("pmap_extract_and_hold: Invalid level %d", lvl));
1052		CTASSERT(L1_BLOCK == L2_BLOCK);
1053		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
1054		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
1055		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
1056		     tpte & ATTR_DESCR_MASK));
1057		if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
1058		    ((prot & VM_PROT_WRITE) == 0)) {
1059			switch(lvl) {
1060			case 1:
1061				off = va & L1_OFFSET;
1062				break;
1063			case 2:
1064				off = va & L2_OFFSET;
1065				break;
1066			case 3:
1067			default:
1068				off = 0;
1069			}
1070			if (vm_page_pa_tryrelock(pmap,
1071			    (tpte & ~ATTR_MASK) | off, &pa))
1072				goto retry;
1073			m = PHYS_TO_VM_PAGE((tpte & ~ATTR_MASK) | off);
1074			vm_page_hold(m);
1075		}
1076	}
1077	PA_UNLOCK_COND(pa);
1078	PMAP_UNLOCK(pmap);
1079	return (m);
1080}
1081
1082vm_paddr_t
1083pmap_kextract(vm_offset_t va)
1084{
1085	pt_entry_t *pte, tpte;
1086	vm_paddr_t pa;
1087	int lvl;
1088
1089	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1090		pa = DMAP_TO_PHYS(va);
1091	} else {
1092		pa = 0;
1093		pte = pmap_pte(kernel_pmap, va, &lvl);
1094		if (pte != NULL) {
1095			tpte = pmap_load(pte);
1096			pa = tpte & ~ATTR_MASK;
1097			switch(lvl) {
1098			case 1:
1099				KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1100				    ("pmap_kextract: Invalid L1 pte found: %lx",
1101				    tpte & ATTR_DESCR_MASK));
1102				pa |= (va & L1_OFFSET);
1103				break;
1104			case 2:
1105				KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1106				    ("pmap_kextract: Invalid L2 pte found: %lx",
1107				    tpte & ATTR_DESCR_MASK));
1108				pa |= (va & L2_OFFSET);
1109				break;
1110			case 3:
1111				KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1112				    ("pmap_kextract: Invalid L3 pte found: %lx",
1113				    tpte & ATTR_DESCR_MASK));
1114				pa |= (va & L3_OFFSET);
1115				break;
1116			}
1117		}
1118	}
1119	return (pa);
1120}
1121
1122/***************************************************
1123 * Low level mapping routines.....
1124 ***************************************************/
1125
1126static void
1127pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode)
1128{
1129	pd_entry_t *pde;
1130	pt_entry_t *pte;
1131	vm_offset_t va;
1132	int lvl;
1133
1134	KASSERT((pa & L3_OFFSET) == 0,
1135	   ("pmap_kenter: Invalid physical address"));
1136	KASSERT((sva & L3_OFFSET) == 0,
1137	   ("pmap_kenter: Invalid virtual address"));
1138	KASSERT((size & PAGE_MASK) == 0,
1139	    ("pmap_kenter: Mapping is not page-sized"));
1140
1141	va = sva;
1142	while (size != 0) {
1143		pde = pmap_pde(kernel_pmap, va, &lvl);
1144		KASSERT(pde != NULL,
1145		    ("pmap_kenter: Invalid page entry, va: 0x%lx", va));
1146		KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl));
1147
1148		pte = pmap_l2_to_l3(pde, va);
1149		pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT |
1150		    ATTR_IDX(mode) | L3_PAGE);
1151		PTE_SYNC(pte);
1152
1153		va += PAGE_SIZE;
1154		pa += PAGE_SIZE;
1155		size -= PAGE_SIZE;
1156	}
1157	pmap_invalidate_range(kernel_pmap, sva, va);
1158}
1159
1160void
1161pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1162{
1163
1164	pmap_kenter(sva, size, pa, DEVICE_MEMORY);
1165}
1166
1167/*
1168 * Remove a page from the kernel pagetables.
1169 */
1170PMAP_INLINE void
1171pmap_kremove(vm_offset_t va)
1172{
1173	pt_entry_t *pte;
1174	int lvl;
1175
1176	pte = pmap_pte(kernel_pmap, va, &lvl);
1177	KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1178	KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1179
1180	if (pmap_l3_valid_cacheable(pmap_load(pte)))
1181		cpu_dcache_wb_range(va, L3_SIZE);
1182	pmap_load_clear(pte);
1183	PTE_SYNC(pte);
1184	pmap_invalidate_page(kernel_pmap, va);
1185}
1186
1187void
1188pmap_kremove_device(vm_offset_t sva, vm_size_t size)
1189{
1190	pt_entry_t *pte;
1191	vm_offset_t va;
1192	int lvl;
1193
1194	KASSERT((sva & L3_OFFSET) == 0,
1195	   ("pmap_kremove_device: Invalid virtual address"));
1196	KASSERT((size & PAGE_MASK) == 0,
1197	    ("pmap_kremove_device: Mapping is not page-sized"));
1198
1199	va = sva;
1200	while (size != 0) {
1201		pte = pmap_pte(kernel_pmap, va, &lvl);
1202		KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1203		KASSERT(lvl == 3,
1204		    ("Invalid device pagetable level: %d != 3", lvl));
1205		pmap_load_clear(pte);
1206		PTE_SYNC(pte);
1207
1208		va += PAGE_SIZE;
1209		size -= PAGE_SIZE;
1210	}
1211	pmap_invalidate_range(kernel_pmap, sva, va);
1212}
1213
1214/*
1215 *	Used to map a range of physical addresses into kernel
1216 *	virtual address space.
1217 *
1218 *	The value passed in '*virt' is a suggested virtual address for
1219 *	the mapping. Architectures which can support a direct-mapped
1220 *	physical to virtual region can return the appropriate address
1221 *	within that region, leaving '*virt' unchanged. Other
1222 *	architectures should map the pages starting at '*virt' and
1223 *	update '*virt' with the first usable address after the mapped
1224 *	region.
1225 */
1226vm_offset_t
1227pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1228{
1229	return PHYS_TO_DMAP(start);
1230}
1231
1232
1233/*
1234 * Add a list of wired pages to the kva
1235 * this routine is only used for temporary
1236 * kernel mappings that do not need to have
1237 * page modification or references recorded.
1238 * Note that old mappings are simply written
1239 * over.  The page *must* be wired.
1240 * Note: SMP coherent.  Uses a ranged shootdown IPI.
1241 */
1242void
1243pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1244{
1245	pd_entry_t *pde;
1246	pt_entry_t *pte, pa;
1247	vm_offset_t va;
1248	vm_page_t m;
1249	int i, lvl;
1250
1251	va = sva;
1252	for (i = 0; i < count; i++) {
1253		pde = pmap_pde(kernel_pmap, va, &lvl);
1254		KASSERT(pde != NULL,
1255		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1256		KASSERT(lvl == 2,
1257		    ("pmap_qenter: Invalid level %d", lvl));
1258
1259		m = ma[i];
1260		pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
1261		    ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
1262		pte = pmap_l2_to_l3(pde, va);
1263		pmap_load_store(pte, pa);
1264		PTE_SYNC(pte);
1265
1266		va += L3_SIZE;
1267	}
1268	pmap_invalidate_range(kernel_pmap, sva, va);
1269}
1270
1271/*
1272 * This routine tears out page mappings from the
1273 * kernel -- it is meant only for temporary mappings.
1274 */
1275void
1276pmap_qremove(vm_offset_t sva, int count)
1277{
1278	pt_entry_t *pte;
1279	vm_offset_t va;
1280	int lvl;
1281
1282	KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1283
1284	va = sva;
1285	while (count-- > 0) {
1286		pte = pmap_pte(kernel_pmap, va, &lvl);
1287		KASSERT(lvl == 3,
1288		    ("Invalid device pagetable level: %d != 3", lvl));
1289		if (pte != NULL) {
1290			if (pmap_l3_valid_cacheable(pmap_load(pte)))
1291				cpu_dcache_wb_range(va, L3_SIZE);
1292			pmap_load_clear(pte);
1293			PTE_SYNC(pte);
1294		}
1295
1296		va += PAGE_SIZE;
1297	}
1298	pmap_invalidate_range(kernel_pmap, sva, va);
1299}
1300
1301/***************************************************
1302 * Page table page management routines.....
1303 ***************************************************/
1304static __inline void
1305pmap_free_zero_pages(struct spglist *free)
1306{
1307	vm_page_t m;
1308
1309	while ((m = SLIST_FIRST(free)) != NULL) {
1310		SLIST_REMOVE_HEAD(free, plinks.s.ss);
1311		/* Preserve the page's PG_ZERO setting. */
1312		vm_page_free_toq(m);
1313	}
1314}
1315
1316/*
1317 * Schedule the specified unused page table page to be freed.  Specifically,
1318 * add the page to the specified list of pages that will be released to the
1319 * physical memory manager after the TLB has been updated.
1320 */
1321static __inline void
1322pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1323    boolean_t set_PG_ZERO)
1324{
1325
1326	if (set_PG_ZERO)
1327		m->flags |= PG_ZERO;
1328	else
1329		m->flags &= ~PG_ZERO;
1330	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1331}
1332
1333/*
1334 * Decrements a page table page's wire count, which is used to record the
1335 * number of valid page table entries within the page.  If the wire count
1336 * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1337 * page table page was unmapped and FALSE otherwise.
1338 */
1339static inline boolean_t
1340pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1341{
1342
1343	--m->wire_count;
1344	if (m->wire_count == 0) {
1345		_pmap_unwire_l3(pmap, va, m, free);
1346		return (TRUE);
1347	} else
1348		return (FALSE);
1349}
1350
1351static void
1352_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1353{
1354
1355	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1356	/*
1357	 * unmap the page table page
1358	 */
1359	if (m->pindex >= (NUL2E + NUL1E)) {
1360		/* l1 page */
1361		pd_entry_t *l0;
1362
1363		l0 = pmap_l0(pmap, va);
1364		pmap_load_clear(l0);
1365		PTE_SYNC(l0);
1366	} else if (m->pindex >= NUL2E) {
1367		/* l2 page */
1368		pd_entry_t *l1;
1369
1370		l1 = pmap_l1(pmap, va);
1371		pmap_load_clear(l1);
1372		PTE_SYNC(l1);
1373	} else {
1374		/* l3 page */
1375		pd_entry_t *l2;
1376
1377		l2 = pmap_l2(pmap, va);
1378		pmap_load_clear(l2);
1379		PTE_SYNC(l2);
1380	}
1381	pmap_resident_count_dec(pmap, 1);
1382	if (m->pindex < NUL2E) {
1383		/* We just released an l3, unhold the matching l2 */
1384		pd_entry_t *l1, tl1;
1385		vm_page_t l2pg;
1386
1387		l1 = pmap_l1(pmap, va);
1388		tl1 = pmap_load(l1);
1389		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1390		pmap_unwire_l3(pmap, va, l2pg, free);
1391	} else if (m->pindex < (NUL2E + NUL1E)) {
1392		/* We just released an l2, unhold the matching l1 */
1393		pd_entry_t *l0, tl0;
1394		vm_page_t l1pg;
1395
1396		l0 = pmap_l0(pmap, va);
1397		tl0 = pmap_load(l0);
1398		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1399		pmap_unwire_l3(pmap, va, l1pg, free);
1400	}
1401	pmap_invalidate_page(pmap, va);
1402
1403	/*
1404	 * This is a release store so that the ordinary store unmapping
1405	 * the page table page is globally performed before TLB shoot-
1406	 * down is begun.
1407	 */
1408	atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
1409
1410	/*
1411	 * Put page on a list so that it is released after
1412	 * *ALL* TLB shootdown is done
1413	 */
1414	pmap_add_delayed_free_list(m, free, TRUE);
1415}
1416
1417/*
1418 * After removing an l3 entry, this routine is used to
1419 * conditionally free the page, and manage the hold/wire counts.
1420 */
1421static int
1422pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1423    struct spglist *free)
1424{
1425	vm_page_t mpte;
1426
1427	if (va >= VM_MAXUSER_ADDRESS)
1428		return (0);
1429	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1430	mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1431	return (pmap_unwire_l3(pmap, va, mpte, free));
1432}
1433
1434void
1435pmap_pinit0(pmap_t pmap)
1436{
1437
1438	PMAP_LOCK_INIT(pmap);
1439	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1440	pmap->pm_l0 = kernel_pmap->pm_l0;
1441	pmap->pm_root.rt_root = 0;
1442}
1443
1444int
1445pmap_pinit(pmap_t pmap)
1446{
1447	vm_paddr_t l0phys;
1448	vm_page_t l0pt;
1449
1450	/*
1451	 * allocate the l0 page
1452	 */
1453	while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1454	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1455		VM_WAIT;
1456
1457	l0phys = VM_PAGE_TO_PHYS(l0pt);
1458	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1459
1460	if ((l0pt->flags & PG_ZERO) == 0)
1461		pagezero(pmap->pm_l0);
1462
1463	pmap->pm_root.rt_root = 0;
1464	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1465
1466	return (1);
1467}
1468
1469/*
1470 * This routine is called if the desired page table page does not exist.
1471 *
1472 * If page table page allocation fails, this routine may sleep before
1473 * returning NULL.  It sleeps only if a lock pointer was given.
1474 *
1475 * Note: If a page allocation fails at page table level two or three,
1476 * one or two pages may be held during the wait, only to be released
1477 * afterwards.  This conservative approach is easily argued to avoid
1478 * race conditions.
1479 */
1480static vm_page_t
1481_pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1482{
1483	vm_page_t m, l1pg, l2pg;
1484
1485	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1486
1487	/*
1488	 * Allocate a page table page.
1489	 */
1490	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1491	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1492		if (lockp != NULL) {
1493			RELEASE_PV_LIST_LOCK(lockp);
1494			PMAP_UNLOCK(pmap);
1495			VM_WAIT;
1496			PMAP_LOCK(pmap);
1497		}
1498
1499		/*
1500		 * Indicate the need to retry.  While waiting, the page table
1501		 * page may have been allocated.
1502		 */
1503		return (NULL);
1504	}
1505	if ((m->flags & PG_ZERO) == 0)
1506		pmap_zero_page(m);
1507
1508	/*
1509	 * Map the pagetable page into the process address space, if
1510	 * it isn't already there.
1511	 */
1512
1513	if (ptepindex >= (NUL2E + NUL1E)) {
1514		pd_entry_t *l0;
1515		vm_pindex_t l0index;
1516
1517		l0index = ptepindex - (NUL2E + NUL1E);
1518		l0 = &pmap->pm_l0[l0index];
1519		pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1520		PTE_SYNC(l0);
1521	} else if (ptepindex >= NUL2E) {
1522		vm_pindex_t l0index, l1index;
1523		pd_entry_t *l0, *l1;
1524		pd_entry_t tl0;
1525
1526		l1index = ptepindex - NUL2E;
1527		l0index = l1index >> L0_ENTRIES_SHIFT;
1528
1529		l0 = &pmap->pm_l0[l0index];
1530		tl0 = pmap_load(l0);
1531		if (tl0 == 0) {
1532			/* recurse for allocating page dir */
1533			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1534			    lockp) == NULL) {
1535				--m->wire_count;
1536				/* XXX: release mem barrier? */
1537				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1538				vm_page_free_zero(m);
1539				return (NULL);
1540			}
1541		} else {
1542			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1543			l1pg->wire_count++;
1544		}
1545
1546		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1547		l1 = &l1[ptepindex & Ln_ADDR_MASK];
1548		pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1549		PTE_SYNC(l1);
1550	} else {
1551		vm_pindex_t l0index, l1index;
1552		pd_entry_t *l0, *l1, *l2;
1553		pd_entry_t tl0, tl1;
1554
1555		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1556		l0index = l1index >> L0_ENTRIES_SHIFT;
1557
1558		l0 = &pmap->pm_l0[l0index];
1559		tl0 = pmap_load(l0);
1560		if (tl0 == 0) {
1561			/* recurse for allocating page dir */
1562			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1563			    lockp) == NULL) {
1564				--m->wire_count;
1565				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1566				vm_page_free_zero(m);
1567				return (NULL);
1568			}
1569			tl0 = pmap_load(l0);
1570			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1571			l1 = &l1[l1index & Ln_ADDR_MASK];
1572		} else {
1573			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1574			l1 = &l1[l1index & Ln_ADDR_MASK];
1575			tl1 = pmap_load(l1);
1576			if (tl1 == 0) {
1577				/* recurse for allocating page dir */
1578				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1579				    lockp) == NULL) {
1580					--m->wire_count;
1581					/* XXX: release mem barrier? */
1582					atomic_subtract_int(
1583					    &vm_cnt.v_wire_count, 1);
1584					vm_page_free_zero(m);
1585					return (NULL);
1586				}
1587			} else {
1588				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1589				l2pg->wire_count++;
1590			}
1591		}
1592
1593		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1594		l2 = &l2[ptepindex & Ln_ADDR_MASK];
1595		pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1596		PTE_SYNC(l2);
1597	}
1598
1599	pmap_resident_count_inc(pmap, 1);
1600
1601	return (m);
1602}
1603
1604static vm_page_t
1605pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1606{
1607	vm_pindex_t ptepindex;
1608	pd_entry_t *pde, tpde;
1609#ifdef INVARIANTS
1610	pt_entry_t *pte;
1611#endif
1612	vm_page_t m;
1613	int lvl;
1614
1615	/*
1616	 * Calculate pagetable page index
1617	 */
1618	ptepindex = pmap_l2_pindex(va);
1619retry:
1620	/*
1621	 * Get the page directory entry
1622	 */
1623	pde = pmap_pde(pmap, va, &lvl);
1624
1625	/*
1626	 * If the page table page is mapped, we just increment the hold count,
1627	 * and activate it. If we get a level 2 pde it will point to a level 3
1628	 * table.
1629	 */
1630	switch (lvl) {
1631	case -1:
1632		break;
1633	case 0:
1634#ifdef INVARIANTS
1635		pte = pmap_l0_to_l1(pde, va);
1636		KASSERT(pmap_load(pte) == 0,
1637		    ("pmap_alloc_l3: TODO: l0 superpages"));
1638#endif
1639		break;
1640	case 1:
1641#ifdef INVARIANTS
1642		pte = pmap_l1_to_l2(pde, va);
1643		KASSERT(pmap_load(pte) == 0,
1644		    ("pmap_alloc_l3: TODO: l1 superpages"));
1645#endif
1646		break;
1647	case 2:
1648		tpde = pmap_load(pde);
1649		if (tpde != 0) {
1650			m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1651			m->wire_count++;
1652			return (m);
1653		}
1654		break;
1655	default:
1656		panic("pmap_alloc_l3: Invalid level %d", lvl);
1657	}
1658
1659	/*
1660	 * Here if the pte page isn't mapped, or if it has been deallocated.
1661	 */
1662	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1663	if (m == NULL && lockp != NULL)
1664		goto retry;
1665
1666	return (m);
1667}
1668
1669
1670/***************************************************
1671 * Pmap allocation/deallocation routines.
1672 ***************************************************/
1673
1674/*
1675 * Release any resources held by the given physical map.
1676 * Called when a pmap initialized by pmap_pinit is being released.
1677 * Should only be called if the map contains no valid mappings.
1678 */
1679void
1680pmap_release(pmap_t pmap)
1681{
1682	vm_page_t m;
1683
1684	KASSERT(pmap->pm_stats.resident_count == 0,
1685	    ("pmap_release: pmap resident count %ld != 0",
1686	    pmap->pm_stats.resident_count));
1687	KASSERT(vm_radix_is_empty(&pmap->pm_root),
1688	    ("pmap_release: pmap has reserved page table page(s)"));
1689
1690	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1691
1692	m->wire_count--;
1693	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1694	vm_page_free_zero(m);
1695}
1696
1697static int
1698kvm_size(SYSCTL_HANDLER_ARGS)
1699{
1700	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1701
1702	return sysctl_handle_long(oidp, &ksize, 0, req);
1703}
1704SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1705    0, 0, kvm_size, "LU", "Size of KVM");
1706
1707static int
1708kvm_free(SYSCTL_HANDLER_ARGS)
1709{
1710	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1711
1712	return sysctl_handle_long(oidp, &kfree, 0, req);
1713}
1714SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1715    0, 0, kvm_free, "LU", "Amount of KVM free");
1716
1717/*
1718 * grow the number of kernel page table entries, if needed
1719 */
1720void
1721pmap_growkernel(vm_offset_t addr)
1722{
1723	vm_paddr_t paddr;
1724	vm_page_t nkpg;
1725	pd_entry_t *l0, *l1, *l2;
1726
1727	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1728
1729	addr = roundup2(addr, L2_SIZE);
1730	if (addr - 1 >= kernel_map->max_offset)
1731		addr = kernel_map->max_offset;
1732	while (kernel_vm_end < addr) {
1733		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
1734		KASSERT(pmap_load(l0) != 0,
1735		    ("pmap_growkernel: No level 0 kernel entry"));
1736
1737		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
1738		if (pmap_load(l1) == 0) {
1739			/* We need a new PDP entry */
1740			nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1741			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1742			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1743			if (nkpg == NULL)
1744				panic("pmap_growkernel: no memory to grow kernel");
1745			if ((nkpg->flags & PG_ZERO) == 0)
1746				pmap_zero_page(nkpg);
1747			paddr = VM_PAGE_TO_PHYS(nkpg);
1748			pmap_load_store(l1, paddr | L1_TABLE);
1749			PTE_SYNC(l1);
1750			continue; /* try again */
1751		}
1752		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1753		if ((pmap_load(l2) & ATTR_AF) != 0) {
1754			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1755			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1756				kernel_vm_end = kernel_map->max_offset;
1757				break;
1758			}
1759			continue;
1760		}
1761
1762		nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
1763		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1764		    VM_ALLOC_ZERO);
1765		if (nkpg == NULL)
1766			panic("pmap_growkernel: no memory to grow kernel");
1767		if ((nkpg->flags & PG_ZERO) == 0)
1768			pmap_zero_page(nkpg);
1769		paddr = VM_PAGE_TO_PHYS(nkpg);
1770		pmap_load_store(l2, paddr | L2_TABLE);
1771		PTE_SYNC(l2);
1772		pmap_invalidate_page(kernel_pmap, kernel_vm_end);
1773
1774		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1775		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1776			kernel_vm_end = kernel_map->max_offset;
1777			break;
1778		}
1779	}
1780}
1781
1782
1783/***************************************************
1784 * page management routines.
1785 ***************************************************/
1786
1787CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1788CTASSERT(_NPCM == 3);
1789CTASSERT(_NPCPV == 168);
1790
1791static __inline struct pv_chunk *
1792pv_to_chunk(pv_entry_t pv)
1793{
1794
1795	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1796}
1797
1798#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1799
1800#define	PC_FREE0	0xfffffffffffffffful
1801#define	PC_FREE1	0xfffffffffffffffful
1802#define	PC_FREE2	0x000000fffffffffful
1803
1804static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1805
1806#if 0
1807#ifdef PV_STATS
1808static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1809
1810SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1811	"Current number of pv entry chunks");
1812SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1813	"Current number of pv entry chunks allocated");
1814SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1815	"Current number of pv entry chunks frees");
1816SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1817	"Number of times tried to get a chunk page but failed.");
1818
1819static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1820static int pv_entry_spare;
1821
1822SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1823	"Current number of pv entry frees");
1824SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1825	"Current number of pv entry allocs");
1826SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1827	"Current number of pv entries");
1828SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1829	"Current number of spare pv entries");
1830#endif
1831#endif /* 0 */
1832
1833/*
1834 * We are in a serious low memory condition.  Resort to
1835 * drastic measures to free some pages so we can allocate
1836 * another pv entry chunk.
1837 *
1838 * Returns NULL if PV entries were reclaimed from the specified pmap.
1839 *
1840 * We do not, however, unmap 2mpages because subsequent accesses will
1841 * allocate per-page pv entries until repromotion occurs, thereby
1842 * exacerbating the shortage of free pv entries.
1843 */
1844static vm_page_t
1845reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1846{
1847
1848	panic("ARM64TODO: reclaim_pv_chunk");
1849}
1850
1851/*
1852 * free the pv_entry back to the free list
1853 */
1854static void
1855free_pv_entry(pmap_t pmap, pv_entry_t pv)
1856{
1857	struct pv_chunk *pc;
1858	int idx, field, bit;
1859
1860	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1861	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1862	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1863	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1864	pc = pv_to_chunk(pv);
1865	idx = pv - &pc->pc_pventry[0];
1866	field = idx / 64;
1867	bit = idx % 64;
1868	pc->pc_map[field] |= 1ul << bit;
1869	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
1870	    pc->pc_map[2] != PC_FREE2) {
1871		/* 98% of the time, pc is already at the head of the list. */
1872		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1873			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1874			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1875		}
1876		return;
1877	}
1878	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1879	free_pv_chunk(pc);
1880}
1881
1882static void
1883free_pv_chunk(struct pv_chunk *pc)
1884{
1885	vm_page_t m;
1886
1887	mtx_lock(&pv_chunks_mutex);
1888 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1889	mtx_unlock(&pv_chunks_mutex);
1890	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1891	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1892	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1893	/* entire chunk is free, return it */
1894	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1895	dump_drop_page(m->phys_addr);
1896	vm_page_unwire(m, PQ_NONE);
1897	vm_page_free(m);
1898}
1899
1900/*
1901 * Returns a new PV entry, allocating a new PV chunk from the system when
1902 * needed.  If this PV chunk allocation fails and a PV list lock pointer was
1903 * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
1904 * returned.
1905 *
1906 * The given PV list lock may be released.
1907 */
1908static pv_entry_t
1909get_pv_entry(pmap_t pmap, struct rwlock **lockp)
1910{
1911	int bit, field;
1912	pv_entry_t pv;
1913	struct pv_chunk *pc;
1914	vm_page_t m;
1915
1916	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1917	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1918retry:
1919	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1920	if (pc != NULL) {
1921		for (field = 0; field < _NPCM; field++) {
1922			if (pc->pc_map[field]) {
1923				bit = ffsl(pc->pc_map[field]) - 1;
1924				break;
1925			}
1926		}
1927		if (field < _NPCM) {
1928			pv = &pc->pc_pventry[field * 64 + bit];
1929			pc->pc_map[field] &= ~(1ul << bit);
1930			/* If this was the last item, move it to tail */
1931			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
1932			    pc->pc_map[2] == 0) {
1933				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1934				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1935				    pc_list);
1936			}
1937			PV_STAT(atomic_add_long(&pv_entry_count, 1));
1938			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1939			return (pv);
1940		}
1941	}
1942	/* No free items, allocate another chunk */
1943	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1944	    VM_ALLOC_WIRED);
1945	if (m == NULL) {
1946		if (lockp == NULL) {
1947			PV_STAT(pc_chunk_tryfail++);
1948			return (NULL);
1949		}
1950		m = reclaim_pv_chunk(pmap, lockp);
1951		if (m == NULL)
1952			goto retry;
1953	}
1954	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1955	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1956	dump_add_page(m->phys_addr);
1957	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1958	pc->pc_pmap = pmap;
1959	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
1960	pc->pc_map[1] = PC_FREE1;
1961	pc->pc_map[2] = PC_FREE2;
1962	mtx_lock(&pv_chunks_mutex);
1963	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1964	mtx_unlock(&pv_chunks_mutex);
1965	pv = &pc->pc_pventry[0];
1966	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1967	PV_STAT(atomic_add_long(&pv_entry_count, 1));
1968	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1969	return (pv);
1970}
1971
1972/*
1973 * Ensure that the number of spare PV entries in the specified pmap meets or
1974 * exceeds the given count, "needed".
1975 *
1976 * The given PV list lock may be released.
1977 */
1978static void
1979reserve_pv_entries(pmap_t pmap, int needed, struct rwlock **lockp)
1980{
1981	struct pch new_tail;
1982	struct pv_chunk *pc;
1983	int avail, free;
1984	vm_page_t m;
1985
1986	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1987	KASSERT(lockp != NULL, ("reserve_pv_entries: lockp is NULL"));
1988
1989	/*
1990	 * Newly allocated PV chunks must be stored in a private list until
1991	 * the required number of PV chunks have been allocated.  Otherwise,
1992	 * reclaim_pv_chunk() could recycle one of these chunks.  In
1993	 * contrast, these chunks must be added to the pmap upon allocation.
1994	 */
1995	TAILQ_INIT(&new_tail);
1996retry:
1997	avail = 0;
1998	TAILQ_FOREACH(pc, &pmap->pm_pvchunk, pc_list) {
1999		bit_count((bitstr_t *)pc->pc_map, 0,
2000		    sizeof(pc->pc_map) * NBBY, &free);
2001		if (free == 0)
2002			break;
2003		avail += free;
2004		if (avail >= needed)
2005			break;
2006	}
2007	for (; avail < needed; avail += _NPCPV) {
2008		m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
2009		    VM_ALLOC_WIRED);
2010		if (m == NULL) {
2011			m = reclaim_pv_chunk(pmap, lockp);
2012			if (m == NULL)
2013				goto retry;
2014		}
2015		PV_STAT(atomic_add_int(&pc_chunk_count, 1));
2016		PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
2017		dump_add_page(m->phys_addr);
2018		pc = (void *)PHYS_TO_DMAP(m->phys_addr);
2019		pc->pc_pmap = pmap;
2020		pc->pc_map[0] = PC_FREE0;
2021		pc->pc_map[1] = PC_FREE1;
2022		pc->pc_map[2] = PC_FREE2;
2023		TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
2024		TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru);
2025		PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV));
2026	}
2027	if (!TAILQ_EMPTY(&new_tail)) {
2028		mtx_lock(&pv_chunks_mutex);
2029		TAILQ_CONCAT(&pv_chunks, &new_tail, pc_lru);
2030		mtx_unlock(&pv_chunks_mutex);
2031	}
2032}
2033
2034/*
2035 * First find and then remove the pv entry for the specified pmap and virtual
2036 * address from the specified pv list.  Returns the pv entry if found and NULL
2037 * otherwise.  This operation can be performed on pv lists for either 4KB or
2038 * 2MB page mappings.
2039 */
2040static __inline pv_entry_t
2041pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2042{
2043	pv_entry_t pv;
2044
2045	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
2046		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
2047			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
2048			pvh->pv_gen++;
2049			break;
2050		}
2051	}
2052	return (pv);
2053}
2054
2055/*
2056 * After demotion from a 2MB page mapping to 512 4KB page mappings,
2057 * destroy the pv entry for the 2MB page mapping and reinstantiate the pv
2058 * entries for each of the 4KB page mappings.
2059 */
2060static void
2061pmap_pv_demote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2062    struct rwlock **lockp)
2063{
2064	struct md_page *pvh;
2065	struct pv_chunk *pc;
2066	pv_entry_t pv;
2067	vm_offset_t va_last;
2068	vm_page_t m;
2069	int bit, field;
2070
2071	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2072	KASSERT((pa & L2_OFFSET) == 0,
2073	    ("pmap_pv_demote_l2: pa is not 2mpage aligned"));
2074	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2075
2076	/*
2077	 * Transfer the 2mpage's pv entry for this mapping to the first
2078	 * page's pv list.  Once this transfer begins, the pv list lock
2079	 * must not be released until the last pv entry is reinstantiated.
2080	 */
2081	pvh = pa_to_pvh(pa);
2082	va = va & ~L2_OFFSET;
2083	pv = pmap_pvh_remove(pvh, pmap, va);
2084	KASSERT(pv != NULL, ("pmap_pv_demote_l2: pv not found"));
2085	m = PHYS_TO_VM_PAGE(pa);
2086	TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2087	m->md.pv_gen++;
2088	/* Instantiate the remaining Ln_ENTRIES - 1 pv entries. */
2089	PV_STAT(atomic_add_long(&pv_entry_allocs, Ln_ENTRIES - 1));
2090	va_last = va + L2_SIZE - PAGE_SIZE;
2091	for (;;) {
2092		pc = TAILQ_FIRST(&pmap->pm_pvchunk);
2093		KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 ||
2094		    pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare"));
2095		for (field = 0; field < _NPCM; field++) {
2096			while (pc->pc_map[field]) {
2097				bit = ffsl(pc->pc_map[field]) - 1;
2098				pc->pc_map[field] &= ~(1ul << bit);
2099				pv = &pc->pc_pventry[field * 64 + bit];
2100				va += PAGE_SIZE;
2101				pv->pv_va = va;
2102				m++;
2103				KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2104			    ("pmap_pv_demote_l2: page %p is not managed", m));
2105				TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2106				m->md.pv_gen++;
2107				if (va == va_last)
2108					goto out;
2109			}
2110		}
2111		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2112		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2113	}
2114out:
2115	if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) {
2116		TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
2117		TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list);
2118	}
2119	PV_STAT(atomic_add_long(&pv_entry_count, Ln_ENTRIES - 1));
2120	PV_STAT(atomic_subtract_int(&pv_entry_spare, Ln_ENTRIES - 1));
2121}
2122
2123/*
2124 * First find and then destroy the pv entry for the specified pmap and virtual
2125 * address.  This operation can be performed on pv lists for either 4KB or 2MB
2126 * page mappings.
2127 */
2128static void
2129pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
2130{
2131	pv_entry_t pv;
2132
2133	pv = pmap_pvh_remove(pvh, pmap, va);
2134	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
2135	free_pv_entry(pmap, pv);
2136}
2137
2138/*
2139 * Conditionally create the PV entry for a 4KB page mapping if the required
2140 * memory can be allocated without resorting to reclamation.
2141 */
2142static boolean_t
2143pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
2144    struct rwlock **lockp)
2145{
2146	pv_entry_t pv;
2147
2148	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2149	/* Pass NULL instead of the lock pointer to disable reclamation. */
2150	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
2151		pv->pv_va = va;
2152		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2153		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2154		m->md.pv_gen++;
2155		return (TRUE);
2156	} else
2157		return (FALSE);
2158}
2159
2160/*
2161 * pmap_remove_l3: do the things to unmap a page in a process
2162 */
2163static int
2164pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
2165    pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
2166{
2167	struct md_page *pvh;
2168	pt_entry_t old_l3;
2169	vm_page_t m;
2170
2171	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2172	if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
2173		cpu_dcache_wb_range(va, L3_SIZE);
2174	old_l3 = pmap_load_clear(l3);
2175	PTE_SYNC(l3);
2176	pmap_invalidate_page(pmap, va);
2177	if (old_l3 & ATTR_SW_WIRED)
2178		pmap->pm_stats.wired_count -= 1;
2179	pmap_resident_count_dec(pmap, 1);
2180	if (old_l3 & ATTR_SW_MANAGED) {
2181		m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
2182		if (pmap_page_dirty(old_l3))
2183			vm_page_dirty(m);
2184		if (old_l3 & ATTR_AF)
2185			vm_page_aflag_set(m, PGA_REFERENCED);
2186		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
2187		pmap_pvh_free(&m->md, pmap, va);
2188		if (TAILQ_EMPTY(&m->md.pv_list) &&
2189		    (m->flags & PG_FICTITIOUS) == 0) {
2190			pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
2191			if (TAILQ_EMPTY(&pvh->pv_list))
2192				vm_page_aflag_clear(m, PGA_WRITEABLE);
2193		}
2194	}
2195	return (pmap_unuse_l3(pmap, va, l2e, free));
2196}
2197
2198/*
2199 *	Remove the given range of addresses from the specified map.
2200 *
2201 *	It is assumed that the start and end are properly
2202 *	rounded to the page size.
2203 */
2204void
2205pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2206{
2207	struct rwlock *lock;
2208	vm_offset_t va, va_next;
2209	pd_entry_t *l0, *l1, *l2;
2210	pt_entry_t l3_paddr, *l3;
2211	struct spglist free;
2212	int anyvalid;
2213
2214	/*
2215	 * Perform an unsynchronized read.  This is, however, safe.
2216	 */
2217	if (pmap->pm_stats.resident_count == 0)
2218		return;
2219
2220	anyvalid = 0;
2221	SLIST_INIT(&free);
2222
2223	PMAP_LOCK(pmap);
2224
2225	lock = NULL;
2226	for (; sva < eva; sva = va_next) {
2227
2228		if (pmap->pm_stats.resident_count == 0)
2229			break;
2230
2231		l0 = pmap_l0(pmap, sva);
2232		if (pmap_load(l0) == 0) {
2233			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2234			if (va_next < sva)
2235				va_next = eva;
2236			continue;
2237		}
2238
2239		l1 = pmap_l0_to_l1(l0, sva);
2240		if (pmap_load(l1) == 0) {
2241			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2242			if (va_next < sva)
2243				va_next = eva;
2244			continue;
2245		}
2246
2247		/*
2248		 * Calculate index for next page table.
2249		 */
2250		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2251		if (va_next < sva)
2252			va_next = eva;
2253
2254		l2 = pmap_l1_to_l2(l1, sva);
2255		if (l2 == NULL)
2256			continue;
2257
2258		l3_paddr = pmap_load(l2);
2259
2260		if ((l3_paddr & ATTR_DESCR_MASK) == L2_BLOCK) {
2261			/* TODO: Add pmap_remove_l2 */
2262			if (pmap_demote_l2_locked(pmap, l2, sva & ~L2_OFFSET,
2263			    &lock) == NULL)
2264				continue;
2265			l3_paddr = pmap_load(l2);
2266		}
2267
2268		/*
2269		 * Weed out invalid mappings.
2270		 */
2271		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2272			continue;
2273
2274		/*
2275		 * Limit our scan to either the end of the va represented
2276		 * by the current page table page, or to the end of the
2277		 * range being removed.
2278		 */
2279		if (va_next > eva)
2280			va_next = eva;
2281
2282		va = va_next;
2283		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2284		    sva += L3_SIZE) {
2285			if (l3 == NULL)
2286				panic("l3 == NULL");
2287			if (pmap_load(l3) == 0) {
2288				if (va != va_next) {
2289					pmap_invalidate_range(pmap, va, sva);
2290					va = va_next;
2291				}
2292				continue;
2293			}
2294			if (va == va_next)
2295				va = sva;
2296			if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free,
2297			    &lock)) {
2298				sva += L3_SIZE;
2299				break;
2300			}
2301		}
2302		if (va != va_next)
2303			pmap_invalidate_range(pmap, va, sva);
2304	}
2305	if (lock != NULL)
2306		rw_wunlock(lock);
2307	if (anyvalid)
2308		pmap_invalidate_all(pmap);
2309	PMAP_UNLOCK(pmap);
2310	pmap_free_zero_pages(&free);
2311}
2312
2313/*
2314 *	Routine:	pmap_remove_all
2315 *	Function:
2316 *		Removes this physical page from
2317 *		all physical maps in which it resides.
2318 *		Reflects back modify bits to the pager.
2319 *
2320 *	Notes:
2321 *		Original versions of this routine were very
2322 *		inefficient because they iteratively called
2323 *		pmap_remove (slow...)
2324 */
2325
2326void
2327pmap_remove_all(vm_page_t m)
2328{
2329	struct md_page *pvh;
2330	pv_entry_t pv;
2331	pmap_t pmap;
2332	struct rwlock *lock;
2333	pd_entry_t *pde, tpde;
2334	pt_entry_t *pte, tpte;
2335	vm_offset_t va;
2336	struct spglist free;
2337	int lvl, pvh_gen, md_gen;
2338
2339	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2340	    ("pmap_remove_all: page %p is not managed", m));
2341	SLIST_INIT(&free);
2342	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2343	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
2344	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
2345retry:
2346	rw_wlock(lock);
2347	while ((pv = TAILQ_FIRST(&pvh->pv_list)) != NULL) {
2348		pmap = PV_PMAP(pv);
2349		if (!PMAP_TRYLOCK(pmap)) {
2350			pvh_gen = pvh->pv_gen;
2351			rw_wunlock(lock);
2352			PMAP_LOCK(pmap);
2353			rw_wlock(lock);
2354			if (pvh_gen != pvh->pv_gen) {
2355				rw_wunlock(lock);
2356				PMAP_UNLOCK(pmap);
2357				goto retry;
2358			}
2359		}
2360		va = pv->pv_va;
2361		pte = pmap_pte(pmap, va, &lvl);
2362		KASSERT(pte != NULL,
2363		    ("pmap_remove_all: no page table entry found"));
2364		KASSERT(lvl == 2,
2365		    ("pmap_remove_all: invalid pte level %d", lvl));
2366
2367		pmap_demote_l2_locked(pmap, pte, va, &lock);
2368		PMAP_UNLOCK(pmap);
2369	}
2370	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2371		pmap = PV_PMAP(pv);
2372		if (!PMAP_TRYLOCK(pmap)) {
2373			pvh_gen = pvh->pv_gen;
2374			md_gen = m->md.pv_gen;
2375			rw_wunlock(lock);
2376			PMAP_LOCK(pmap);
2377			rw_wlock(lock);
2378			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
2379				rw_wunlock(lock);
2380				PMAP_UNLOCK(pmap);
2381				goto retry;
2382			}
2383		}
2384		pmap_resident_count_dec(pmap, 1);
2385
2386		pde = pmap_pde(pmap, pv->pv_va, &lvl);
2387		KASSERT(pde != NULL,
2388		    ("pmap_remove_all: no page directory entry found"));
2389		KASSERT(lvl == 2,
2390		    ("pmap_remove_all: invalid pde level %d", lvl));
2391		tpde = pmap_load(pde);
2392
2393		pte = pmap_l2_to_l3(pde, pv->pv_va);
2394		tpte = pmap_load(pte);
2395		if (pmap_is_current(pmap) &&
2396		    pmap_l3_valid_cacheable(tpte))
2397			cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
2398		pmap_load_clear(pte);
2399		PTE_SYNC(pte);
2400		pmap_invalidate_page(pmap, pv->pv_va);
2401		if (tpte & ATTR_SW_WIRED)
2402			pmap->pm_stats.wired_count--;
2403		if ((tpte & ATTR_AF) != 0)
2404			vm_page_aflag_set(m, PGA_REFERENCED);
2405
2406		/*
2407		 * Update the vm_page_t clean and reference bits.
2408		 */
2409		if (pmap_page_dirty(tpte))
2410			vm_page_dirty(m);
2411		pmap_unuse_l3(pmap, pv->pv_va, tpde, &free);
2412		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2413		m->md.pv_gen++;
2414		free_pv_entry(pmap, pv);
2415		PMAP_UNLOCK(pmap);
2416	}
2417	vm_page_aflag_clear(m, PGA_WRITEABLE);
2418	rw_wunlock(lock);
2419	pmap_free_zero_pages(&free);
2420}
2421
2422/*
2423 *	Set the physical protection on the
2424 *	specified range of this map as requested.
2425 */
2426void
2427pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2428{
2429	vm_offset_t va, va_next;
2430	pd_entry_t *l0, *l1, *l2;
2431	pt_entry_t *l3p, l3;
2432
2433	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2434		pmap_remove(pmap, sva, eva);
2435		return;
2436	}
2437
2438	if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE)
2439		return;
2440
2441	PMAP_LOCK(pmap);
2442	for (; sva < eva; sva = va_next) {
2443
2444		l0 = pmap_l0(pmap, sva);
2445		if (pmap_load(l0) == 0) {
2446			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2447			if (va_next < sva)
2448				va_next = eva;
2449			continue;
2450		}
2451
2452		l1 = pmap_l0_to_l1(l0, sva);
2453		if (pmap_load(l1) == 0) {
2454			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2455			if (va_next < sva)
2456				va_next = eva;
2457			continue;
2458		}
2459
2460		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2461		if (va_next < sva)
2462			va_next = eva;
2463
2464		l2 = pmap_l1_to_l2(l1, sva);
2465		if (pmap_load(l2) == 0)
2466			continue;
2467
2468		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
2469			l3p = pmap_demote_l2(pmap, l2, sva);
2470			if (l3p == NULL)
2471				continue;
2472		}
2473		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
2474		    ("pmap_protect: Invalid L2 entry after demotion"));
2475
2476		if (va_next > eva)
2477			va_next = eva;
2478
2479		va = va_next;
2480		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
2481		    sva += L3_SIZE) {
2482			l3 = pmap_load(l3p);
2483			if (pmap_l3_valid(l3)) {
2484				pmap_set(l3p, ATTR_AP(ATTR_AP_RO));
2485				PTE_SYNC(l3p);
2486				/* XXX: Use pmap_invalidate_range */
2487				pmap_invalidate_page(pmap, va);
2488			}
2489		}
2490	}
2491	PMAP_UNLOCK(pmap);
2492
2493	/* TODO: Only invalidate entries we are touching */
2494	pmap_invalidate_all(pmap);
2495}
2496
2497/*
2498 * Inserts the specified page table page into the specified pmap's collection
2499 * of idle page table pages.  Each of a pmap's page table pages is responsible
2500 * for mapping a distinct range of virtual addresses.  The pmap's collection is
2501 * ordered by this virtual address range.
2502 */
2503static __inline int
2504pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte)
2505{
2506
2507	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2508	return (vm_radix_insert(&pmap->pm_root, mpte));
2509}
2510
2511/*
2512 * Looks for a page table page mapping the specified virtual address in the
2513 * specified pmap's collection of idle page table pages.  Returns NULL if there
2514 * is no page table page corresponding to the specified virtual address.
2515 */
2516static __inline vm_page_t
2517pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va)
2518{
2519
2520	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2521	return (vm_radix_lookup(&pmap->pm_root, pmap_l2_pindex(va)));
2522}
2523
2524/*
2525 * Removes the specified page table page from the specified pmap's collection
2526 * of idle page table pages.  The specified page table page must be a member of
2527 * the pmap's collection.
2528 */
2529static __inline void
2530pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte)
2531{
2532
2533	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2534	vm_radix_remove(&pmap->pm_root, mpte->pindex);
2535}
2536
2537/*
2538 * Performs a break-before-make update of a pmap entry. This is needed when
2539 * either promoting or demoting pages to ensure the TLB doesn't get into an
2540 * inconsistent state.
2541 */
2542static void
2543pmap_update_entry(pmap_t pmap, pd_entry_t *pte, pd_entry_t newpte,
2544    vm_offset_t va, vm_size_t size)
2545{
2546	register_t intr;
2547
2548	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2549
2550	/*
2551	 * Ensure we don't get switched out with the page table in an
2552	 * inconsistent state. We also need to ensure no interrupts fire
2553	 * as they may make use of an address we are about to invalidate.
2554	 */
2555	intr = intr_disable();
2556	critical_enter();
2557
2558	/* Clear the old mapping */
2559	pmap_load_clear(pte);
2560	PTE_SYNC(pte);
2561	pmap_invalidate_range(pmap, va, va + size);
2562
2563	/* Create the new mapping */
2564	pmap_load_store(pte, newpte);
2565	PTE_SYNC(pte);
2566
2567	critical_exit();
2568	intr_restore(intr);
2569}
2570
2571/*
2572 * After promotion from 512 4KB page mappings to a single 2MB page mapping,
2573 * replace the many pv entries for the 4KB page mappings by a single pv entry
2574 * for the 2MB page mapping.
2575 */
2576static void
2577pmap_pv_promote_l2(pmap_t pmap, vm_offset_t va, vm_paddr_t pa,
2578    struct rwlock **lockp)
2579{
2580	struct md_page *pvh;
2581	pv_entry_t pv;
2582	vm_offset_t va_last;
2583	vm_page_t m;
2584
2585	KASSERT((pa & L2_OFFSET) == 0,
2586	    ("pmap_pv_promote_l2: pa is not 2mpage aligned"));
2587	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa);
2588
2589	/*
2590	 * Transfer the first page's pv entry for this mapping to the 2mpage's
2591	 * pv list.  Aside from avoiding the cost of a call to get_pv_entry(),
2592	 * a transfer avoids the possibility that get_pv_entry() calls
2593	 * reclaim_pv_chunk() and that reclaim_pv_chunk() removes one of the
2594	 * mappings that is being promoted.
2595	 */
2596	m = PHYS_TO_VM_PAGE(pa);
2597	va = va & ~L2_OFFSET;
2598	pv = pmap_pvh_remove(&m->md, pmap, va);
2599	KASSERT(pv != NULL, ("pmap_pv_promote_l2: pv not found"));
2600	pvh = pa_to_pvh(pa);
2601	TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
2602	pvh->pv_gen++;
2603	/* Free the remaining NPTEPG - 1 pv entries. */
2604	va_last = va + L2_SIZE - PAGE_SIZE;
2605	do {
2606		m++;
2607		va += PAGE_SIZE;
2608		pmap_pvh_free(&m->md, pmap, va);
2609	} while (va < va_last);
2610}
2611
2612/*
2613 * Tries to promote the 512, contiguous 4KB page mappings that are within a
2614 * single level 2 table entry to a single 2MB page mapping.  For promotion
2615 * to occur, two conditions must be met: (1) the 4KB page mappings must map
2616 * aligned, contiguous physical memory and (2) the 4KB page mappings must have
2617 * identical characteristics.
2618 */
2619static void
2620pmap_promote_l2(pmap_t pmap, pd_entry_t *l2, vm_offset_t va,
2621    struct rwlock **lockp)
2622{
2623	pt_entry_t *firstl3, *l3, newl2, oldl3, pa;
2624	vm_page_t mpte;
2625	vm_offset_t sva;
2626
2627	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2628
2629	sva = va & ~L2_OFFSET;
2630	firstl3 = pmap_l2_to_l3(l2, sva);
2631	newl2 = pmap_load(firstl3);
2632
2633	/* Check the alingment is valid */
2634	if (((newl2 & ~ATTR_MASK) & L2_OFFSET) != 0) {
2635		atomic_add_long(&pmap_l2_p_failures, 1);
2636		CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
2637		    " in pmap %p", va, pmap);
2638		return;
2639	}
2640
2641	pa = newl2 + L2_SIZE - PAGE_SIZE;
2642	for (l3 = firstl3 + NL3PG - 1; l3 > firstl3; l3--) {
2643		oldl3 = pmap_load(l3);
2644		if (oldl3 != pa) {
2645			atomic_add_long(&pmap_l2_p_failures, 1);
2646			CTR2(KTR_PMAP, "pmap_promote_l2: failure for va %#lx"
2647			    " in pmap %p", va, pmap);
2648			return;
2649		}
2650		pa -= PAGE_SIZE;
2651	}
2652
2653	/*
2654	 * Save the page table page in its current state until the L2
2655	 * mapping the superpage is demoted by pmap_demote_l2() or
2656	 * destroyed by pmap_remove_l3().
2657	 */
2658	mpte = PHYS_TO_VM_PAGE(pmap_load(l2) & ~ATTR_MASK);
2659	KASSERT(mpte >= vm_page_array &&
2660	    mpte < &vm_page_array[vm_page_array_size],
2661	    ("pmap_promote_l2: page table page is out of range"));
2662	KASSERT(mpte->pindex == pmap_l2_pindex(va),
2663	    ("pmap_promote_l2: page table page's pindex is wrong"));
2664	if (pmap_insert_pt_page(pmap, mpte)) {
2665		atomic_add_long(&pmap_l2_p_failures, 1);
2666		CTR2(KTR_PMAP,
2667		    "pmap_promote_l2: failure for va %#lx in pmap %p", va,
2668		    pmap);
2669		return;
2670	}
2671
2672	if ((newl2 & ATTR_SW_MANAGED) != 0)
2673		pmap_pv_promote_l2(pmap, va, newl2 & ~ATTR_MASK, lockp);
2674
2675	newl2 &= ~ATTR_DESCR_MASK;
2676	newl2 |= L2_BLOCK;
2677
2678	pmap_update_entry(pmap, l2, newl2, sva, L2_SIZE);
2679
2680	atomic_add_long(&pmap_l2_promotions, 1);
2681	CTR2(KTR_PMAP, "pmap_promote_l2: success for va %#lx in pmap %p", va,
2682		    pmap);
2683}
2684
2685/*
2686 *	Insert the given physical page (p) at
2687 *	the specified virtual address (v) in the
2688 *	target physical map with the protection requested.
2689 *
2690 *	If specified, the page will be wired down, meaning
2691 *	that the related pte can not be reclaimed.
2692 *
2693 *	NB:  This is the only routine which MAY NOT lazy-evaluate
2694 *	or lose information.  That is, this routine must actually
2695 *	insert this page into the given map NOW.
2696 */
2697int
2698pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2699    u_int flags, int8_t psind __unused)
2700{
2701	struct rwlock *lock;
2702	pd_entry_t *pde;
2703	pt_entry_t new_l3, orig_l3;
2704	pt_entry_t *l2, *l3;
2705	pv_entry_t pv;
2706	vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa;
2707	vm_page_t mpte, om, l1_m, l2_m, l3_m;
2708	boolean_t nosleep;
2709	int lvl;
2710
2711	va = trunc_page(va);
2712	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2713		VM_OBJECT_ASSERT_LOCKED(m->object);
2714	pa = VM_PAGE_TO_PHYS(m);
2715	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
2716	    L3_PAGE);
2717	if ((prot & VM_PROT_WRITE) == 0)
2718		new_l3 |= ATTR_AP(ATTR_AP_RO);
2719	if ((flags & PMAP_ENTER_WIRED) != 0)
2720		new_l3 |= ATTR_SW_WIRED;
2721	if ((va >> 63) == 0)
2722		new_l3 |= ATTR_AP(ATTR_AP_USER);
2723
2724	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
2725
2726	mpte = NULL;
2727
2728	lock = NULL;
2729	PMAP_LOCK(pmap);
2730
2731	pde = pmap_pde(pmap, va, &lvl);
2732	if (pde != NULL && lvl == 1) {
2733		l2 = pmap_l1_to_l2(pde, va);
2734		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK &&
2735		    (l3 = pmap_demote_l2_locked(pmap, l2, va & ~L2_OFFSET,
2736		    &lock)) != NULL) {
2737			l3 = &l3[pmap_l3_index(va)];
2738			if (va < VM_MAXUSER_ADDRESS) {
2739				mpte = PHYS_TO_VM_PAGE(
2740				    pmap_load(l2) & ~ATTR_MASK);
2741				mpte->wire_count++;
2742			}
2743			goto havel3;
2744		}
2745	}
2746
2747	if (va < VM_MAXUSER_ADDRESS) {
2748		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2749		mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2750		if (mpte == NULL && nosleep) {
2751			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
2752			if (lock != NULL)
2753				rw_wunlock(lock);
2754			PMAP_UNLOCK(pmap);
2755			return (KERN_RESOURCE_SHORTAGE);
2756		}
2757		pde = pmap_pde(pmap, va, &lvl);
2758		KASSERT(pde != NULL,
2759		    ("pmap_enter: Invalid page entry, va: 0x%lx", va));
2760		KASSERT(lvl == 2,
2761		    ("pmap_enter: Invalid level %d", lvl));
2762
2763		l3 = pmap_l2_to_l3(pde, va);
2764	} else {
2765		/*
2766		 * If we get a level 2 pde it must point to a level 3 entry
2767		 * otherwise we will need to create the intermediate tables
2768		 */
2769		if (lvl < 2) {
2770			switch(lvl) {
2771			default:
2772			case -1:
2773				/* Get the l0 pde to update */
2774				pde = pmap_l0(pmap, va);
2775				KASSERT(pde != NULL, ("..."));
2776
2777				l1_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2778				    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2779				    VM_ALLOC_ZERO);
2780				if (l1_m == NULL)
2781					panic("pmap_enter: l1 pte_m == NULL");
2782				if ((l1_m->flags & PG_ZERO) == 0)
2783					pmap_zero_page(l1_m);
2784
2785				l1_pa = VM_PAGE_TO_PHYS(l1_m);
2786				pmap_load_store(pde, l1_pa | L0_TABLE);
2787				PTE_SYNC(pde);
2788				/* FALLTHROUGH */
2789			case 0:
2790				/* Get the l1 pde to update */
2791				pde = pmap_l1_to_l2(pde, va);
2792				KASSERT(pde != NULL, ("..."));
2793
2794				l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2795				    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2796				    VM_ALLOC_ZERO);
2797				if (l2_m == NULL)
2798					panic("pmap_enter: l2 pte_m == NULL");
2799				if ((l2_m->flags & PG_ZERO) == 0)
2800					pmap_zero_page(l2_m);
2801
2802				l2_pa = VM_PAGE_TO_PHYS(l2_m);
2803				pmap_load_store(pde, l2_pa | L1_TABLE);
2804				PTE_SYNC(pde);
2805				/* FALLTHROUGH */
2806			case 1:
2807				/* Get the l2 pde to update */
2808				pde = pmap_l1_to_l2(pde, va);
2809
2810				l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2811				    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2812				    VM_ALLOC_ZERO);
2813				if (l3_m == NULL)
2814					panic("pmap_enter: l3 pte_m == NULL");
2815				if ((l3_m->flags & PG_ZERO) == 0)
2816					pmap_zero_page(l3_m);
2817
2818				l3_pa = VM_PAGE_TO_PHYS(l3_m);
2819				pmap_load_store(pde, l3_pa | L2_TABLE);
2820				PTE_SYNC(pde);
2821				break;
2822			}
2823		}
2824		l3 = pmap_l2_to_l3(pde, va);
2825		pmap_invalidate_page(pmap, va);
2826	}
2827havel3:
2828
2829	om = NULL;
2830	orig_l3 = pmap_load(l3);
2831	opa = orig_l3 & ~ATTR_MASK;
2832
2833	/*
2834	 * Is the specified virtual address already mapped?
2835	 */
2836	if (pmap_l3_valid(orig_l3)) {
2837		/*
2838		 * Wiring change, just update stats. We don't worry about
2839		 * wiring PT pages as they remain resident as long as there
2840		 * are valid mappings in them. Hence, if a user page is wired,
2841		 * the PT page will be also.
2842		 */
2843		if ((flags & PMAP_ENTER_WIRED) != 0 &&
2844		    (orig_l3 & ATTR_SW_WIRED) == 0)
2845			pmap->pm_stats.wired_count++;
2846		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
2847		    (orig_l3 & ATTR_SW_WIRED) != 0)
2848			pmap->pm_stats.wired_count--;
2849
2850		/*
2851		 * Remove the extra PT page reference.
2852		 */
2853		if (mpte != NULL) {
2854			mpte->wire_count--;
2855			KASSERT(mpte->wire_count > 0,
2856			    ("pmap_enter: missing reference to page table page,"
2857			     " va: 0x%lx", va));
2858		}
2859
2860		/*
2861		 * Has the physical page changed?
2862		 */
2863		if (opa == pa) {
2864			/*
2865			 * No, might be a protection or wiring change.
2866			 */
2867			if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
2868				new_l3 |= ATTR_SW_MANAGED;
2869				if ((new_l3 & ATTR_AP(ATTR_AP_RW)) ==
2870				    ATTR_AP(ATTR_AP_RW)) {
2871					vm_page_aflag_set(m, PGA_WRITEABLE);
2872				}
2873			}
2874			goto validate;
2875		}
2876
2877		/* Flush the cache, there might be uncommitted data in it */
2878		if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
2879			cpu_dcache_wb_range(va, L3_SIZE);
2880	} else {
2881		/*
2882		 * Increment the counters.
2883		 */
2884		if ((new_l3 & ATTR_SW_WIRED) != 0)
2885			pmap->pm_stats.wired_count++;
2886		pmap_resident_count_inc(pmap, 1);
2887	}
2888	/*
2889	 * Enter on the PV list if part of our managed memory.
2890	 */
2891	if ((m->oflags & VPO_UNMANAGED) == 0) {
2892		new_l3 |= ATTR_SW_MANAGED;
2893		pv = get_pv_entry(pmap, &lock);
2894		pv->pv_va = va;
2895		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
2896		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2897		m->md.pv_gen++;
2898		if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
2899			vm_page_aflag_set(m, PGA_WRITEABLE);
2900	}
2901
2902	/*
2903	 * Update the L3 entry.
2904	 */
2905	if (orig_l3 != 0) {
2906validate:
2907		orig_l3 = pmap_load(l3);
2908		opa = orig_l3 & ~ATTR_MASK;
2909
2910		if (opa != pa) {
2911			pmap_update_entry(pmap, l3, new_l3, va, PAGE_SIZE);
2912			if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
2913				om = PHYS_TO_VM_PAGE(opa);
2914				if (pmap_page_dirty(orig_l3))
2915					vm_page_dirty(om);
2916				if ((orig_l3 & ATTR_AF) != 0)
2917					vm_page_aflag_set(om, PGA_REFERENCED);
2918				CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2919				pmap_pvh_free(&om->md, pmap, va);
2920				if ((om->aflags & PGA_WRITEABLE) != 0 &&
2921				    TAILQ_EMPTY(&om->md.pv_list) &&
2922				    ((om->flags & PG_FICTITIOUS) != 0 ||
2923				    TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list)))
2924					vm_page_aflag_clear(om, PGA_WRITEABLE);
2925			}
2926		} else {
2927			pmap_load_store(l3, new_l3);
2928			PTE_SYNC(l3);
2929			pmap_invalidate_page(pmap, va);
2930			if (pmap_page_dirty(orig_l3) &&
2931			    (orig_l3 & ATTR_SW_MANAGED) != 0)
2932				vm_page_dirty(m);
2933		}
2934	} else {
2935		pmap_load_store(l3, new_l3);
2936	}
2937
2938	PTE_SYNC(l3);
2939	pmap_invalidate_page(pmap, va);
2940
2941	if (pmap != pmap_kernel()) {
2942		if (pmap == &curproc->p_vmspace->vm_pmap)
2943		    cpu_icache_sync_range(va, PAGE_SIZE);
2944
2945		if ((mpte == NULL || mpte->wire_count == NL3PG) &&
2946		    pmap_superpages_enabled() &&
2947		    (m->flags & PG_FICTITIOUS) == 0 &&
2948		    vm_reserv_level_iffullpop(m) == 0) {
2949			pmap_promote_l2(pmap, pde, va, &lock);
2950		}
2951	}
2952
2953	if (lock != NULL)
2954		rw_wunlock(lock);
2955	PMAP_UNLOCK(pmap);
2956	return (KERN_SUCCESS);
2957}
2958
2959/*
2960 * Maps a sequence of resident pages belonging to the same object.
2961 * The sequence begins with the given page m_start.  This page is
2962 * mapped at the given virtual address start.  Each subsequent page is
2963 * mapped at a virtual address that is offset from start by the same
2964 * amount as the page is offset from m_start within the object.  The
2965 * last page in the sequence is the page with the largest offset from
2966 * m_start that can be mapped at a virtual address less than the given
2967 * virtual address end.  Not every virtual page between start and end
2968 * is mapped; only those for which a resident page exists with the
2969 * corresponding offset from m_start are mapped.
2970 */
2971void
2972pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2973    vm_page_t m_start, vm_prot_t prot)
2974{
2975	struct rwlock *lock;
2976	vm_offset_t va;
2977	vm_page_t m, mpte;
2978	vm_pindex_t diff, psize;
2979
2980	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2981
2982	psize = atop(end - start);
2983	mpte = NULL;
2984	m = m_start;
2985	lock = NULL;
2986	PMAP_LOCK(pmap);
2987	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2988		va = start + ptoa(diff);
2989		mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock);
2990		m = TAILQ_NEXT(m, listq);
2991	}
2992	if (lock != NULL)
2993		rw_wunlock(lock);
2994	PMAP_UNLOCK(pmap);
2995}
2996
2997/*
2998 * this code makes some *MAJOR* assumptions:
2999 * 1. Current pmap & pmap exists.
3000 * 2. Not wired.
3001 * 3. Read access.
3002 * 4. No page table pages.
3003 * but is *MUCH* faster than pmap_enter...
3004 */
3005
3006void
3007pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
3008{
3009	struct rwlock *lock;
3010
3011	lock = NULL;
3012	PMAP_LOCK(pmap);
3013	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
3014	if (lock != NULL)
3015		rw_wunlock(lock);
3016	PMAP_UNLOCK(pmap);
3017}
3018
3019static vm_page_t
3020pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
3021    vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
3022{
3023	struct spglist free;
3024	pd_entry_t *pde;
3025	pt_entry_t *l2, *l3;
3026	vm_paddr_t pa;
3027	int lvl;
3028
3029	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
3030	    (m->oflags & VPO_UNMANAGED) != 0,
3031	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
3032	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
3033
3034	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
3035	/*
3036	 * In the case that a page table page is not
3037	 * resident, we are creating it here.
3038	 */
3039	if (va < VM_MAXUSER_ADDRESS) {
3040		vm_pindex_t l2pindex;
3041
3042		/*
3043		 * Calculate pagetable page index
3044		 */
3045		l2pindex = pmap_l2_pindex(va);
3046		if (mpte && (mpte->pindex == l2pindex)) {
3047			mpte->wire_count++;
3048		} else {
3049			/*
3050			 * Get the l2 entry
3051			 */
3052			pde = pmap_pde(pmap, va, &lvl);
3053
3054			/*
3055			 * If the page table page is mapped, we just increment
3056			 * the hold count, and activate it.  Otherwise, we
3057			 * attempt to allocate a page table page.  If this
3058			 * attempt fails, we don't retry.  Instead, we give up.
3059			 */
3060			if (lvl == 1) {
3061				l2 = pmap_l1_to_l2(pde, va);
3062				if ((pmap_load(l2) & ATTR_DESCR_MASK) ==
3063				    L2_BLOCK)
3064					return (NULL);
3065			}
3066			if (lvl == 2 && pmap_load(pde) != 0) {
3067				mpte =
3068				    PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
3069				mpte->wire_count++;
3070			} else {
3071				/*
3072				 * Pass NULL instead of the PV list lock
3073				 * pointer, because we don't intend to sleep.
3074				 */
3075				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
3076				if (mpte == NULL)
3077					return (mpte);
3078			}
3079		}
3080		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
3081		l3 = &l3[pmap_l3_index(va)];
3082	} else {
3083		mpte = NULL;
3084		pde = pmap_pde(kernel_pmap, va, &lvl);
3085		KASSERT(pde != NULL,
3086		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
3087		     va));
3088		KASSERT(lvl == 2,
3089		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
3090		l3 = pmap_l2_to_l3(pde, va);
3091	}
3092
3093	if (pmap_load(l3) != 0) {
3094		if (mpte != NULL) {
3095			mpte->wire_count--;
3096			mpte = NULL;
3097		}
3098		return (mpte);
3099	}
3100
3101	/*
3102	 * Enter on the PV list if part of our managed memory.
3103	 */
3104	if ((m->oflags & VPO_UNMANAGED) == 0 &&
3105	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
3106		if (mpte != NULL) {
3107			SLIST_INIT(&free);
3108			if (pmap_unwire_l3(pmap, va, mpte, &free)) {
3109				pmap_invalidate_page(pmap, va);
3110				pmap_free_zero_pages(&free);
3111			}
3112			mpte = NULL;
3113		}
3114		return (mpte);
3115	}
3116
3117	/*
3118	 * Increment counters
3119	 */
3120	pmap_resident_count_inc(pmap, 1);
3121
3122	pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
3123	    ATTR_AP(ATTR_AP_RO) | L3_PAGE;
3124
3125	/*
3126	 * Now validate mapping with RO protection
3127	 */
3128	if ((m->oflags & VPO_UNMANAGED) == 0)
3129		pa |= ATTR_SW_MANAGED;
3130	pmap_load_store(l3, pa);
3131	PTE_SYNC(l3);
3132	pmap_invalidate_page(pmap, va);
3133	return (mpte);
3134}
3135
3136/*
3137 * This code maps large physical mmap regions into the
3138 * processor address space.  Note that some shortcuts
3139 * are taken, but the code works.
3140 */
3141void
3142pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
3143    vm_pindex_t pindex, vm_size_t size)
3144{
3145
3146	VM_OBJECT_ASSERT_WLOCKED(object);
3147	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
3148	    ("pmap_object_init_pt: non-device object"));
3149}
3150
3151/*
3152 *	Clear the wired attribute from the mappings for the specified range of
3153 *	addresses in the given pmap.  Every valid mapping within that range
3154 *	must have the wired attribute set.  In contrast, invalid mappings
3155 *	cannot have the wired attribute set, so they are ignored.
3156 *
3157 *	The wired attribute of the page table entry is not a hardware feature,
3158 *	so there is no need to invalidate any TLB entries.
3159 */
3160void
3161pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
3162{
3163	vm_offset_t va_next;
3164	pd_entry_t *l0, *l1, *l2;
3165	pt_entry_t *l3;
3166
3167	PMAP_LOCK(pmap);
3168	for (; sva < eva; sva = va_next) {
3169		l0 = pmap_l0(pmap, sva);
3170		if (pmap_load(l0) == 0) {
3171			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
3172			if (va_next < sva)
3173				va_next = eva;
3174			continue;
3175		}
3176
3177		l1 = pmap_l0_to_l1(l0, sva);
3178		if (pmap_load(l1) == 0) {
3179			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
3180			if (va_next < sva)
3181				va_next = eva;
3182			continue;
3183		}
3184
3185		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
3186		if (va_next < sva)
3187			va_next = eva;
3188
3189		l2 = pmap_l1_to_l2(l1, sva);
3190		if (pmap_load(l2) == 0)
3191			continue;
3192
3193		if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK) {
3194			l3 = pmap_demote_l2(pmap, l2, sva);
3195			if (l3 == NULL)
3196				continue;
3197		}
3198		KASSERT((pmap_load(l2) & ATTR_DESCR_MASK) == L2_TABLE,
3199		    ("pmap_unwire: Invalid l2 entry after demotion"));
3200
3201		if (va_next > eva)
3202			va_next = eva;
3203		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
3204		    sva += L3_SIZE) {
3205			if (pmap_load(l3) == 0)
3206				continue;
3207			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
3208				panic("pmap_unwire: l3 %#jx is missing "
3209				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
3210
3211			/*
3212			 * PG_W must be cleared atomically.  Although the pmap
3213			 * lock synchronizes access to PG_W, another processor
3214			 * could be setting PG_M and/or PG_A concurrently.
3215			 */
3216			atomic_clear_long(l3, ATTR_SW_WIRED);
3217			pmap->pm_stats.wired_count--;
3218		}
3219	}
3220	PMAP_UNLOCK(pmap);
3221}
3222
3223/*
3224 *	Copy the range specified by src_addr/len
3225 *	from the source map to the range dst_addr/len
3226 *	in the destination map.
3227 *
3228 *	This routine is only advisory and need not do anything.
3229 */
3230
3231void
3232pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
3233    vm_offset_t src_addr)
3234{
3235}
3236
3237/*
3238 *	pmap_zero_page zeros the specified hardware page by mapping
3239 *	the page into KVM and using bzero to clear its contents.
3240 */
3241void
3242pmap_zero_page(vm_page_t m)
3243{
3244	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3245
3246	pagezero((void *)va);
3247}
3248
3249/*
3250 *	pmap_zero_page_area zeros the specified hardware page by mapping
3251 *	the page into KVM and using bzero to clear its contents.
3252 *
3253 *	off and size may not cover an area beyond a single hardware page.
3254 */
3255void
3256pmap_zero_page_area(vm_page_t m, int off, int size)
3257{
3258	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3259
3260	if (off == 0 && size == PAGE_SIZE)
3261		pagezero((void *)va);
3262	else
3263		bzero((char *)va + off, size);
3264}
3265
3266/*
3267 *	pmap_zero_page_idle zeros the specified hardware page by mapping
3268 *	the page into KVM and using bzero to clear its contents.  This
3269 *	is intended to be called from the vm_pagezero process only and
3270 *	outside of Giant.
3271 */
3272void
3273pmap_zero_page_idle(vm_page_t m)
3274{
3275	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
3276
3277	pagezero((void *)va);
3278}
3279
3280/*
3281 *	pmap_copy_page copies the specified (machine independent)
3282 *	page by mapping the page into virtual memory and using
3283 *	bcopy to copy the page, one machine dependent page at a
3284 *	time.
3285 */
3286void
3287pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
3288{
3289	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
3290	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
3291
3292	pagecopy((void *)src, (void *)dst);
3293}
3294
3295int unmapped_buf_allowed = 1;
3296
3297void
3298pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
3299    vm_offset_t b_offset, int xfersize)
3300{
3301	void *a_cp, *b_cp;
3302	vm_page_t m_a, m_b;
3303	vm_paddr_t p_a, p_b;
3304	vm_offset_t a_pg_offset, b_pg_offset;
3305	int cnt;
3306
3307	while (xfersize > 0) {
3308		a_pg_offset = a_offset & PAGE_MASK;
3309		m_a = ma[a_offset >> PAGE_SHIFT];
3310		p_a = m_a->phys_addr;
3311		b_pg_offset = b_offset & PAGE_MASK;
3312		m_b = mb[b_offset >> PAGE_SHIFT];
3313		p_b = m_b->phys_addr;
3314		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
3315		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
3316		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
3317			panic("!DMAP a %lx", p_a);
3318		} else {
3319			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
3320		}
3321		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
3322			panic("!DMAP b %lx", p_b);
3323		} else {
3324			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
3325		}
3326		bcopy(a_cp, b_cp, cnt);
3327		a_offset += cnt;
3328		b_offset += cnt;
3329		xfersize -= cnt;
3330	}
3331}
3332
3333vm_offset_t
3334pmap_quick_enter_page(vm_page_t m)
3335{
3336
3337	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
3338}
3339
3340void
3341pmap_quick_remove_page(vm_offset_t addr)
3342{
3343}
3344
3345/*
3346 * Returns true if the pmap's pv is one of the first
3347 * 16 pvs linked to from this page.  This count may
3348 * be changed upwards or downwards in the future; it
3349 * is only necessary that true be returned for a small
3350 * subset of pmaps for proper page aging.
3351 */
3352boolean_t
3353pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
3354{
3355	struct md_page *pvh;
3356	struct rwlock *lock;
3357	pv_entry_t pv;
3358	int loops = 0;
3359	boolean_t rv;
3360
3361	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3362	    ("pmap_page_exists_quick: page %p is not managed", m));
3363	rv = FALSE;
3364	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3365	rw_rlock(lock);
3366	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3367		if (PV_PMAP(pv) == pmap) {
3368			rv = TRUE;
3369			break;
3370		}
3371		loops++;
3372		if (loops >= 16)
3373			break;
3374	}
3375	if (!rv && loops < 16 && (m->flags & PG_FICTITIOUS) == 0) {
3376		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3377		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3378			if (PV_PMAP(pv) == pmap) {
3379				rv = TRUE;
3380				break;
3381			}
3382			loops++;
3383			if (loops >= 16)
3384				break;
3385		}
3386	}
3387	rw_runlock(lock);
3388	return (rv);
3389}
3390
3391/*
3392 *	pmap_page_wired_mappings:
3393 *
3394 *	Return the number of managed mappings to the given physical page
3395 *	that are wired.
3396 */
3397int
3398pmap_page_wired_mappings(vm_page_t m)
3399{
3400	struct rwlock *lock;
3401	struct md_page *pvh;
3402	pmap_t pmap;
3403	pt_entry_t *pte;
3404	pv_entry_t pv;
3405	int count, lvl, md_gen, pvh_gen;
3406
3407	if ((m->oflags & VPO_UNMANAGED) != 0)
3408		return (0);
3409	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3410	rw_rlock(lock);
3411restart:
3412	count = 0;
3413	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3414		pmap = PV_PMAP(pv);
3415		if (!PMAP_TRYLOCK(pmap)) {
3416			md_gen = m->md.pv_gen;
3417			rw_runlock(lock);
3418			PMAP_LOCK(pmap);
3419			rw_rlock(lock);
3420			if (md_gen != m->md.pv_gen) {
3421				PMAP_UNLOCK(pmap);
3422				goto restart;
3423			}
3424		}
3425		pte = pmap_pte(pmap, pv->pv_va, &lvl);
3426		if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
3427			count++;
3428		PMAP_UNLOCK(pmap);
3429	}
3430	if ((m->flags & PG_FICTITIOUS) == 0) {
3431		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3432		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3433			pmap = PV_PMAP(pv);
3434			if (!PMAP_TRYLOCK(pmap)) {
3435				md_gen = m->md.pv_gen;
3436				pvh_gen = pvh->pv_gen;
3437				rw_runlock(lock);
3438				PMAP_LOCK(pmap);
3439				rw_rlock(lock);
3440				if (md_gen != m->md.pv_gen ||
3441				    pvh_gen != pvh->pv_gen) {
3442					PMAP_UNLOCK(pmap);
3443					goto restart;
3444				}
3445			}
3446			pte = pmap_pte(pmap, pv->pv_va, &lvl);
3447			if (pte != NULL &&
3448			    (pmap_load(pte) & ATTR_SW_WIRED) != 0)
3449				count++;
3450			PMAP_UNLOCK(pmap);
3451		}
3452	}
3453	rw_runlock(lock);
3454	return (count);
3455}
3456
3457/*
3458 * Destroy all managed, non-wired mappings in the given user-space
3459 * pmap.  This pmap cannot be active on any processor besides the
3460 * caller.
3461 *
3462 * This function cannot be applied to the kernel pmap.  Moreover, it
3463 * is not intended for general use.  It is only to be used during
3464 * process termination.  Consequently, it can be implemented in ways
3465 * that make it faster than pmap_remove().  First, it can more quickly
3466 * destroy mappings by iterating over the pmap's collection of PV
3467 * entries, rather than searching the page table.  Second, it doesn't
3468 * have to test and clear the page table entries atomically, because
3469 * no processor is currently accessing the user address space.  In
3470 * particular, a page table entry's dirty bit won't change state once
3471 * this function starts.
3472 */
3473void
3474pmap_remove_pages(pmap_t pmap)
3475{
3476	pd_entry_t *pde;
3477	pt_entry_t *pte, tpte;
3478	struct spglist free;
3479	vm_page_t m, ml3, mt;
3480	pv_entry_t pv;
3481	struct md_page *pvh;
3482	struct pv_chunk *pc, *npc;
3483	struct rwlock *lock;
3484	int64_t bit;
3485	uint64_t inuse, bitmask;
3486	int allfree, field, freed, idx, lvl;
3487	vm_paddr_t pa;
3488
3489	lock = NULL;
3490
3491	SLIST_INIT(&free);
3492	PMAP_LOCK(pmap);
3493	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
3494		allfree = 1;
3495		freed = 0;
3496		for (field = 0; field < _NPCM; field++) {
3497			inuse = ~pc->pc_map[field] & pc_freemask[field];
3498			while (inuse != 0) {
3499				bit = ffsl(inuse) - 1;
3500				bitmask = 1UL << bit;
3501				idx = field * 64 + bit;
3502				pv = &pc->pc_pventry[idx];
3503				inuse &= ~bitmask;
3504
3505				pde = pmap_pde(pmap, pv->pv_va, &lvl);
3506				KASSERT(pde != NULL,
3507				    ("Attempting to remove an unmapped page"));
3508
3509				switch(lvl) {
3510				case 1:
3511					pte = pmap_l1_to_l2(pde, pv->pv_va);
3512					tpte = pmap_load(pte);
3513					KASSERT((tpte & ATTR_DESCR_MASK) ==
3514					    L2_BLOCK,
3515					    ("Attempting to remove an invalid "
3516					    "block: %lx", tpte));
3517					tpte = pmap_load(pte);
3518					break;
3519				case 2:
3520					pte = pmap_l2_to_l3(pde, pv->pv_va);
3521					tpte = pmap_load(pte);
3522					KASSERT((tpte & ATTR_DESCR_MASK) ==
3523					    L3_PAGE,
3524					    ("Attempting to remove an invalid "
3525					     "page: %lx", tpte));
3526					break;
3527				default:
3528					panic(
3529					    "Invalid page directory level: %d",
3530					    lvl);
3531				}
3532
3533/*
3534 * We cannot remove wired pages from a process' mapping at this time
3535 */
3536				if (tpte & ATTR_SW_WIRED) {
3537					allfree = 0;
3538					continue;
3539				}
3540
3541				pa = tpte & ~ATTR_MASK;
3542
3543				m = PHYS_TO_VM_PAGE(pa);
3544				KASSERT(m->phys_addr == pa,
3545				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
3546				    m, (uintmax_t)m->phys_addr,
3547				    (uintmax_t)tpte));
3548
3549				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
3550				    m < &vm_page_array[vm_page_array_size],
3551				    ("pmap_remove_pages: bad pte %#jx",
3552				    (uintmax_t)tpte));
3553
3554				if (pmap_is_current(pmap)) {
3555					if (lvl == 2 &&
3556					    pmap_l3_valid_cacheable(tpte)) {
3557						cpu_dcache_wb_range(pv->pv_va,
3558						    L3_SIZE);
3559					} else if (lvl == 1 &&
3560					    pmap_pte_valid_cacheable(tpte)) {
3561						cpu_dcache_wb_range(pv->pv_va,
3562						    L2_SIZE);
3563					}
3564				}
3565				pmap_load_clear(pte);
3566				PTE_SYNC(pte);
3567				pmap_invalidate_page(pmap, pv->pv_va);
3568
3569				/*
3570				 * Update the vm_page_t clean/reference bits.
3571				 */
3572				if ((tpte & ATTR_AP_RW_BIT) ==
3573				    ATTR_AP(ATTR_AP_RW)) {
3574					switch (lvl) {
3575					case 1:
3576						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3577							vm_page_dirty(m);
3578						break;
3579					case 2:
3580						vm_page_dirty(m);
3581						break;
3582					}
3583				}
3584
3585				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
3586
3587				/* Mark free */
3588				pc->pc_map[field] |= bitmask;
3589				switch (lvl) {
3590				case 1:
3591					pmap_resident_count_dec(pmap,
3592					    L2_SIZE / PAGE_SIZE);
3593					pvh = pa_to_pvh(tpte & ~ATTR_MASK);
3594					TAILQ_REMOVE(&pvh->pv_list, pv,pv_next);
3595					pvh->pv_gen++;
3596					if (TAILQ_EMPTY(&pvh->pv_list)) {
3597						for (mt = m; mt < &m[L2_SIZE / PAGE_SIZE]; mt++)
3598							if ((mt->aflags & PGA_WRITEABLE) != 0 &&
3599							    TAILQ_EMPTY(&mt->md.pv_list))
3600								vm_page_aflag_clear(mt, PGA_WRITEABLE);
3601					}
3602					ml3 = pmap_lookup_pt_page(pmap,
3603					    pv->pv_va);
3604					if (ml3 != NULL) {
3605						pmap_remove_pt_page(pmap, ml3);
3606						pmap_resident_count_dec(pmap,1);
3607						KASSERT(ml3->wire_count == NL3PG,
3608						    ("pmap_remove_pages: l3 page wire count error"));
3609						ml3->wire_count = 0;
3610						pmap_add_delayed_free_list(ml3,
3611						    &free, FALSE);
3612						atomic_subtract_int(
3613						    &vm_cnt.v_wire_count, 1);
3614					}
3615					break;
3616				case 2:
3617					pmap_resident_count_dec(pmap, 1);
3618					TAILQ_REMOVE(&m->md.pv_list, pv,
3619					    pv_next);
3620					m->md.pv_gen++;
3621					if ((m->aflags & PGA_WRITEABLE) != 0 &&
3622					    TAILQ_EMPTY(&m->md.pv_list) &&
3623					    (m->flags & PG_FICTITIOUS) == 0) {
3624						pvh = pa_to_pvh(
3625						    VM_PAGE_TO_PHYS(m));
3626						if (TAILQ_EMPTY(&pvh->pv_list))
3627							vm_page_aflag_clear(m,
3628							    PGA_WRITEABLE);
3629					}
3630					break;
3631				}
3632				pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde),
3633				    &free);
3634				freed++;
3635			}
3636		}
3637		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3638		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3639		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3640		if (allfree) {
3641			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3642			free_pv_chunk(pc);
3643		}
3644	}
3645	pmap_invalidate_all(pmap);
3646	if (lock != NULL)
3647		rw_wunlock(lock);
3648	PMAP_UNLOCK(pmap);
3649	pmap_free_zero_pages(&free);
3650}
3651
3652/*
3653 * This is used to check if a page has been accessed or modified. As we
3654 * don't have a bit to see if it has been modified we have to assume it
3655 * has been if the page is read/write.
3656 */
3657static boolean_t
3658pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3659{
3660	struct rwlock *lock;
3661	pv_entry_t pv;
3662	struct md_page *pvh;
3663	pt_entry_t *pte, mask, value;
3664	pmap_t pmap;
3665	int lvl, md_gen, pvh_gen;
3666	boolean_t rv;
3667
3668	rv = FALSE;
3669	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3670	rw_rlock(lock);
3671restart:
3672	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3673		pmap = PV_PMAP(pv);
3674		if (!PMAP_TRYLOCK(pmap)) {
3675			md_gen = m->md.pv_gen;
3676			rw_runlock(lock);
3677			PMAP_LOCK(pmap);
3678			rw_rlock(lock);
3679			if (md_gen != m->md.pv_gen) {
3680				PMAP_UNLOCK(pmap);
3681				goto restart;
3682			}
3683		}
3684		pte = pmap_pte(pmap, pv->pv_va, &lvl);
3685		KASSERT(lvl == 3,
3686		    ("pmap_page_test_mappings: Invalid level %d", lvl));
3687		mask = 0;
3688		value = 0;
3689		if (modified) {
3690			mask |= ATTR_AP_RW_BIT;
3691			value |= ATTR_AP(ATTR_AP_RW);
3692		}
3693		if (accessed) {
3694			mask |= ATTR_AF | ATTR_DESCR_MASK;
3695			value |= ATTR_AF | L3_PAGE;
3696		}
3697		rv = (pmap_load(pte) & mask) == value;
3698		PMAP_UNLOCK(pmap);
3699		if (rv)
3700			goto out;
3701	}
3702	if ((m->flags & PG_FICTITIOUS) == 0) {
3703		pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m));
3704		TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
3705			pmap = PV_PMAP(pv);
3706			if (!PMAP_TRYLOCK(pmap)) {
3707				md_gen = m->md.pv_gen;
3708				pvh_gen = pvh->pv_gen;
3709				rw_runlock(lock);
3710				PMAP_LOCK(pmap);
3711				rw_rlock(lock);
3712				if (md_gen != m->md.pv_gen ||
3713				    pvh_gen != pvh->pv_gen) {
3714					PMAP_UNLOCK(pmap);
3715					goto restart;
3716				}
3717			}
3718			pte = pmap_pte(pmap, pv->pv_va, &lvl);
3719			KASSERT(lvl == 2,
3720			    ("pmap_page_test_mappings: Invalid level %d", lvl));
3721			mask = 0;
3722			value = 0;
3723			if (modified) {
3724				mask |= ATTR_AP_RW_BIT;
3725				value |= ATTR_AP(ATTR_AP_RW);
3726			}
3727			if (accessed) {
3728				mask |= ATTR_AF | ATTR_DESCR_MASK;
3729				value |= ATTR_AF | L2_BLOCK;
3730			}
3731			rv = (pmap_load(pte) & mask) == value;
3732			PMAP_UNLOCK(pmap);
3733			if (rv)
3734				goto out;
3735		}
3736	}
3737out:
3738	rw_runlock(lock);
3739	return (rv);
3740}
3741
3742/*
3743 *	pmap_is_modified:
3744 *
3745 *	Return whether or not the specified physical page was modified
3746 *	in any physical maps.
3747 */
3748boolean_t
3749pmap_is_modified(vm_page_t m)
3750{
3751
3752	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3753	    ("pmap_is_modified: page %p is not managed", m));
3754
3755	/*
3756	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3757	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
3758	 * is clear, no PTEs can have PG_M set.
3759	 */
3760	VM_OBJECT_ASSERT_WLOCKED(m->object);
3761	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3762		return (FALSE);
3763	return (pmap_page_test_mappings(m, FALSE, TRUE));
3764}
3765
3766/*
3767 *	pmap_is_prefaultable:
3768 *
3769 *	Return whether or not the specified virtual address is eligible
3770 *	for prefault.
3771 */
3772boolean_t
3773pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3774{
3775	pt_entry_t *pte;
3776	boolean_t rv;
3777	int lvl;
3778
3779	rv = FALSE;
3780	PMAP_LOCK(pmap);
3781	pte = pmap_pte(pmap, addr, &lvl);
3782	if (pte != NULL && pmap_load(pte) != 0) {
3783		rv = TRUE;
3784	}
3785	PMAP_UNLOCK(pmap);
3786	return (rv);
3787}
3788
3789/*
3790 *	pmap_is_referenced:
3791 *
3792 *	Return whether or not the specified physical page was referenced
3793 *	in any physical maps.
3794 */
3795boolean_t
3796pmap_is_referenced(vm_page_t m)
3797{
3798
3799	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3800	    ("pmap_is_referenced: page %p is not managed", m));
3801	return (pmap_page_test_mappings(m, TRUE, FALSE));
3802}
3803
3804/*
3805 * Clear the write and modified bits in each of the given page's mappings.
3806 */
3807void
3808pmap_remove_write(vm_page_t m)
3809{
3810	struct md_page *pvh;
3811	pmap_t pmap;
3812	struct rwlock *lock;
3813	pv_entry_t next_pv, pv;
3814	pt_entry_t oldpte, *pte;
3815	vm_offset_t va;
3816	int lvl, md_gen, pvh_gen;
3817
3818	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3819	    ("pmap_remove_write: page %p is not managed", m));
3820
3821	/*
3822	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3823	 * set by another thread while the object is locked.  Thus,
3824	 * if PGA_WRITEABLE is clear, no page table entries need updating.
3825	 */
3826	VM_OBJECT_ASSERT_WLOCKED(m->object);
3827	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3828		return;
3829	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3830	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
3831	    pa_to_pvh(VM_PAGE_TO_PHYS(m));
3832retry_pv_loop:
3833	rw_wlock(lock);
3834	TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
3835		pmap = PV_PMAP(pv);
3836		if (!PMAP_TRYLOCK(pmap)) {
3837			pvh_gen = pvh->pv_gen;
3838			rw_wunlock(lock);
3839			PMAP_LOCK(pmap);
3840			rw_wlock(lock);
3841			if (pvh_gen != pvh->pv_gen) {
3842				PMAP_UNLOCK(pmap);
3843				rw_wunlock(lock);
3844				goto retry_pv_loop;
3845			}
3846		}
3847		va = pv->pv_va;
3848		pte = pmap_pte(pmap, pv->pv_va, &lvl);
3849		if ((pmap_load(pte) & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
3850			pmap_demote_l2_locked(pmap, pte, va & ~L2_OFFSET,
3851			    &lock);
3852		KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3853		    ("inconsistent pv lock %p %p for page %p",
3854		    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3855		PMAP_UNLOCK(pmap);
3856	}
3857	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3858		pmap = PV_PMAP(pv);
3859		if (!PMAP_TRYLOCK(pmap)) {
3860			pvh_gen = pvh->pv_gen;
3861			md_gen = m->md.pv_gen;
3862			rw_wunlock(lock);
3863			PMAP_LOCK(pmap);
3864			rw_wlock(lock);
3865			if (pvh_gen != pvh->pv_gen ||
3866			    md_gen != m->md.pv_gen) {
3867				PMAP_UNLOCK(pmap);
3868				rw_wunlock(lock);
3869				goto retry_pv_loop;
3870			}
3871		}
3872		pte = pmap_pte(pmap, pv->pv_va, &lvl);
3873retry:
3874		oldpte = pmap_load(pte);
3875		if ((oldpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) {
3876			if (!atomic_cmpset_long(pte, oldpte,
3877			    oldpte | ATTR_AP(ATTR_AP_RO)))
3878				goto retry;
3879			if ((oldpte & ATTR_AF) != 0)
3880				vm_page_dirty(m);
3881			pmap_invalidate_page(pmap, pv->pv_va);
3882		}
3883		PMAP_UNLOCK(pmap);
3884	}
3885	rw_wunlock(lock);
3886	vm_page_aflag_clear(m, PGA_WRITEABLE);
3887}
3888
3889static __inline boolean_t
3890safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
3891{
3892
3893	return (FALSE);
3894}
3895
3896#define	PMAP_TS_REFERENCED_MAX	5
3897
3898/*
3899 *	pmap_ts_referenced:
3900 *
3901 *	Return a count of reference bits for a page, clearing those bits.
3902 *	It is not necessary for every reference bit to be cleared, but it
3903 *	is necessary that 0 only be returned when there are truly no
3904 *	reference bits set.
3905 *
3906 *	XXX: The exact number of bits to check and clear is a matter that
3907 *	should be tested and standardized at some point in the future for
3908 *	optimal aging of shared pages.
3909 */
3910int
3911pmap_ts_referenced(vm_page_t m)
3912{
3913	struct md_page *pvh;
3914	pv_entry_t pv, pvf;
3915	pmap_t pmap;
3916	struct rwlock *lock;
3917	pd_entry_t *pde, tpde;
3918	pt_entry_t *pte, tpte;
3919	pt_entry_t *l3;
3920	vm_offset_t va;
3921	vm_paddr_t pa;
3922	int cleared, md_gen, not_cleared, lvl, pvh_gen;
3923	struct spglist free;
3924	bool demoted;
3925
3926	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3927	    ("pmap_ts_referenced: page %p is not managed", m));
3928	SLIST_INIT(&free);
3929	cleared = 0;
3930	pa = VM_PAGE_TO_PHYS(m);
3931	lock = PHYS_TO_PV_LIST_LOCK(pa);
3932	pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy : pa_to_pvh(pa);
3933	rw_wlock(lock);
3934retry:
3935	not_cleared = 0;
3936	if ((pvf = TAILQ_FIRST(&pvh->pv_list)) == NULL)
3937		goto small_mappings;
3938	pv = pvf;
3939	do {
3940		if (pvf == NULL)
3941			pvf = pv;
3942		pmap = PV_PMAP(pv);
3943		if (!PMAP_TRYLOCK(pmap)) {
3944			pvh_gen = pvh->pv_gen;
3945			rw_wunlock(lock);
3946			PMAP_LOCK(pmap);
3947			rw_wlock(lock);
3948			if (pvh_gen != pvh->pv_gen) {
3949				PMAP_UNLOCK(pmap);
3950				goto retry;
3951			}
3952		}
3953		va = pv->pv_va;
3954		pde = pmap_pde(pmap, pv->pv_va, &lvl);
3955		KASSERT(pde != NULL, ("pmap_ts_referenced: no l1 table found"));
3956		KASSERT(lvl == 1,
3957		    ("pmap_ts_referenced: invalid pde level %d", lvl));
3958		tpde = pmap_load(pde);
3959		KASSERT((tpde & ATTR_DESCR_MASK) == L1_TABLE,
3960		    ("pmap_ts_referenced: found an invalid l1 table"));
3961		pte = pmap_l1_to_l2(pde, pv->pv_va);
3962		tpte = pmap_load(pte);
3963		if ((tpte & ATTR_AF) != 0) {
3964			/*
3965			 * Since this reference bit is shared by 512 4KB
3966			 * pages, it should not be cleared every time it is
3967			 * tested.  Apply a simple "hash" function on the
3968			 * physical page number, the virtual superpage number,
3969			 * and the pmap address to select one 4KB page out of
3970			 * the 512 on which testing the reference bit will
3971			 * result in clearing that reference bit.  This
3972			 * function is designed to avoid the selection of the
3973			 * same 4KB page for every 2MB page mapping.
3974			 *
3975			 * On demotion, a mapping that hasn't been referenced
3976			 * is simply destroyed.  To avoid the possibility of a
3977			 * subsequent page fault on a demoted wired mapping,
3978			 * always leave its reference bit set.  Moreover,
3979			 * since the superpage is wired, the current state of
3980			 * its reference bit won't affect page replacement.
3981			 */
3982			if ((((pa >> PAGE_SHIFT) ^ (pv->pv_va >> L2_SHIFT) ^
3983			    (uintptr_t)pmap) & (Ln_ENTRIES - 1)) == 0 &&
3984			    (tpte & ATTR_SW_WIRED) == 0) {
3985				if (safe_to_clear_referenced(pmap, tpte)) {
3986					/*
3987					 * TODO: We don't handle the access
3988					 * flag at all. We need to be able
3989					 * to set it in  the exception handler.
3990					 */
3991					panic("ARM64TODO: "
3992					    "safe_to_clear_referenced\n");
3993				} else if (pmap_demote_l2_locked(pmap, pte,
3994				    pv->pv_va, &lock) != NULL) {
3995					demoted = true;
3996					va += VM_PAGE_TO_PHYS(m) -
3997					    (tpte & ~ATTR_MASK);
3998					l3 = pmap_l2_to_l3(pte, va);
3999					pmap_remove_l3(pmap, l3, va,
4000					    pmap_load(pte), NULL, &lock);
4001				} else
4002					demoted = true;
4003
4004				if (demoted) {
4005					/*
4006					 * The superpage mapping was removed
4007					 * entirely and therefore 'pv' is no
4008					 * longer valid.
4009					 */
4010					if (pvf == pv)
4011						pvf = NULL;
4012					pv = NULL;
4013				}
4014				cleared++;
4015				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4016				    ("inconsistent pv lock %p %p for page %p",
4017				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4018			} else
4019				not_cleared++;
4020		}
4021		PMAP_UNLOCK(pmap);
4022		/* Rotate the PV list if it has more than one entry. */
4023		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4024			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
4025			TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next);
4026			pvh->pv_gen++;
4027		}
4028		if (cleared + not_cleared >= PMAP_TS_REFERENCED_MAX)
4029			goto out;
4030	} while ((pv = TAILQ_FIRST(&pvh->pv_list)) != pvf);
4031small_mappings:
4032	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
4033		goto out;
4034	pv = pvf;
4035	do {
4036		if (pvf == NULL)
4037			pvf = pv;
4038		pmap = PV_PMAP(pv);
4039		if (!PMAP_TRYLOCK(pmap)) {
4040			pvh_gen = pvh->pv_gen;
4041			md_gen = m->md.pv_gen;
4042			rw_wunlock(lock);
4043			PMAP_LOCK(pmap);
4044			rw_wlock(lock);
4045			if (pvh_gen != pvh->pv_gen || md_gen != m->md.pv_gen) {
4046				PMAP_UNLOCK(pmap);
4047				goto retry;
4048			}
4049		}
4050		pde = pmap_pde(pmap, pv->pv_va, &lvl);
4051		KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
4052		KASSERT(lvl == 2,
4053		    ("pmap_ts_referenced: invalid pde level %d", lvl));
4054		tpde = pmap_load(pde);
4055		KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
4056		    ("pmap_ts_referenced: found an invalid l2 table"));
4057		pte = pmap_l2_to_l3(pde, pv->pv_va);
4058		tpte = pmap_load(pte);
4059		if ((tpte & ATTR_AF) != 0) {
4060			if (safe_to_clear_referenced(pmap, tpte)) {
4061				/*
4062				 * TODO: We don't handle the access flag
4063				 * at all. We need to be able to set it in
4064				 * the exception handler.
4065				 */
4066				panic("ARM64TODO: safe_to_clear_referenced\n");
4067			} else if ((tpte & ATTR_SW_WIRED) == 0) {
4068				/*
4069				 * Wired pages cannot be paged out so
4070				 * doing accessed bit emulation for
4071				 * them is wasted effort. We do the
4072				 * hard work for unwired pages only.
4073				 */
4074				pmap_remove_l3(pmap, pte, pv->pv_va, tpde,
4075				    &free, &lock);
4076				pmap_invalidate_page(pmap, pv->pv_va);
4077				cleared++;
4078				if (pvf == pv)
4079					pvf = NULL;
4080				pv = NULL;
4081				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
4082				    ("inconsistent pv lock %p %p for page %p",
4083				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
4084			} else
4085				not_cleared++;
4086		}
4087		PMAP_UNLOCK(pmap);
4088		/* Rotate the PV list if it has more than one entry. */
4089		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
4090			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
4091			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
4092			m->md.pv_gen++;
4093		}
4094	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
4095	    not_cleared < PMAP_TS_REFERENCED_MAX);
4096out:
4097	rw_wunlock(lock);
4098	pmap_free_zero_pages(&free);
4099	return (cleared + not_cleared);
4100}
4101
4102/*
4103 *	Apply the given advice to the specified range of addresses within the
4104 *	given pmap.  Depending on the advice, clear the referenced and/or
4105 *	modified flags in each mapping and set the mapped page's dirty field.
4106 */
4107void
4108pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
4109{
4110}
4111
4112/*
4113 *	Clear the modify bits on the specified physical page.
4114 */
4115void
4116pmap_clear_modify(vm_page_t m)
4117{
4118
4119	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
4120	    ("pmap_clear_modify: page %p is not managed", m));
4121	VM_OBJECT_ASSERT_WLOCKED(m->object);
4122	KASSERT(!vm_page_xbusied(m),
4123	    ("pmap_clear_modify: page %p is exclusive busied", m));
4124
4125	/*
4126	 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
4127	 * If the object containing the page is locked and the page is not
4128	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
4129	 */
4130	if ((m->aflags & PGA_WRITEABLE) == 0)
4131		return;
4132
4133	/* ARM64TODO: We lack support for tracking if a page is modified */
4134}
4135
4136void *
4137pmap_mapbios(vm_paddr_t pa, vm_size_t size)
4138{
4139
4140        return ((void *)PHYS_TO_DMAP(pa));
4141}
4142
4143void
4144pmap_unmapbios(vm_paddr_t pa, vm_size_t size)
4145{
4146}
4147
4148/*
4149 * Sets the memory attribute for the specified page.
4150 */
4151void
4152pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
4153{
4154
4155	m->md.pv_memattr = ma;
4156
4157	/*
4158	 * If "m" is a normal page, update its direct mapping.  This update
4159	 * can be relied upon to perform any cache operations that are
4160	 * required for data coherence.
4161	 */
4162	if ((m->flags & PG_FICTITIOUS) == 0 &&
4163	    pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE,
4164	    m->md.pv_memattr) != 0)
4165		panic("memory attribute change on the direct map failed");
4166}
4167
4168/*
4169 * Changes the specified virtual address range's memory type to that given by
4170 * the parameter "mode".  The specified virtual address range must be
4171 * completely contained within either the direct map or the kernel map.  If
4172 * the virtual address range is contained within the kernel map, then the
4173 * memory type for each of the corresponding ranges of the direct map is also
4174 * changed.  (The corresponding ranges of the direct map are those ranges that
4175 * map the same physical pages as the specified virtual address range.)  These
4176 * changes to the direct map are necessary because Intel describes the
4177 * behavior of their processors as "undefined" if two or more mappings to the
4178 * same physical page have different memory types.
4179 *
4180 * Returns zero if the change completed successfully, and either EINVAL or
4181 * ENOMEM if the change failed.  Specifically, EINVAL is returned if some part
4182 * of the virtual address range was not mapped, and ENOMEM is returned if
4183 * there was insufficient memory available to complete the change.  In the
4184 * latter case, the memory type may have been changed on some part of the
4185 * virtual address range or the direct map.
4186 */
4187static int
4188pmap_change_attr(vm_offset_t va, vm_size_t size, int mode)
4189{
4190	int error;
4191
4192	PMAP_LOCK(kernel_pmap);
4193	error = pmap_change_attr_locked(va, size, mode);
4194	PMAP_UNLOCK(kernel_pmap);
4195	return (error);
4196}
4197
4198static int
4199pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode)
4200{
4201	vm_offset_t base, offset, tmpva;
4202	pt_entry_t l3, *pte, *newpte;
4203	int lvl;
4204
4205	PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED);
4206	base = trunc_page(va);
4207	offset = va & PAGE_MASK;
4208	size = round_page(offset + size);
4209
4210	if (!VIRT_IN_DMAP(base))
4211		return (EINVAL);
4212
4213	for (tmpva = base; tmpva < base + size; ) {
4214		pte = pmap_pte(kernel_pmap, va, &lvl);
4215		if (pte == NULL)
4216			return (EINVAL);
4217
4218		if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) {
4219			/*
4220			 * We already have the correct attribute,
4221			 * ignore this entry.
4222			 */
4223			switch (lvl) {
4224			default:
4225				panic("Invalid DMAP table level: %d\n", lvl);
4226			case 1:
4227				tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE;
4228				break;
4229			case 2:
4230				tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE;
4231				break;
4232			case 3:
4233				tmpva += PAGE_SIZE;
4234				break;
4235			}
4236		} else {
4237			/*
4238			 * Split the entry to an level 3 table, then
4239			 * set the new attribute.
4240			 */
4241			switch (lvl) {
4242			default:
4243				panic("Invalid DMAP table level: %d\n", lvl);
4244			case 1:
4245				newpte = pmap_demote_l1(kernel_pmap, pte,
4246				    tmpva & ~L1_OFFSET);
4247				if (newpte == NULL)
4248					return (EINVAL);
4249				pte = pmap_l1_to_l2(pte, tmpva);
4250			case 2:
4251				newpte = pmap_demote_l2(kernel_pmap, pte,
4252				    tmpva & ~L2_OFFSET);
4253				if (newpte == NULL)
4254					return (EINVAL);
4255				pte = pmap_l2_to_l3(pte, tmpva);
4256			case 3:
4257				/* Update the entry */
4258				l3 = pmap_load(pte);
4259				l3 &= ~ATTR_IDX_MASK;
4260				l3 |= ATTR_IDX(mode);
4261
4262				pmap_update_entry(kernel_pmap, pte, l3, tmpva,
4263				    PAGE_SIZE);
4264
4265				/*
4266				 * If moving to a non-cacheable entry flush
4267				 * the cache.
4268				 */
4269				if (mode == VM_MEMATTR_UNCACHEABLE)
4270					cpu_dcache_wbinv_range(tmpva, L3_SIZE);
4271
4272				break;
4273			}
4274			tmpva += PAGE_SIZE;
4275		}
4276	}
4277
4278	return (0);
4279}
4280
4281/*
4282 * Create an L2 table to map all addresses within an L1 mapping.
4283 */
4284static pt_entry_t *
4285pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va)
4286{
4287	pt_entry_t *l2, newl2, oldl1;
4288	vm_offset_t tmpl1;
4289	vm_paddr_t l2phys, phys;
4290	vm_page_t ml2;
4291	int i;
4292
4293	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4294	oldl1 = pmap_load(l1);
4295	KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK,
4296	    ("pmap_demote_l1: Demoting a non-block entry"));
4297	KASSERT((va & L1_OFFSET) == 0,
4298	    ("pmap_demote_l1: Invalid virtual address %#lx", va));
4299	KASSERT((oldl1 & ATTR_SW_MANAGED) == 0,
4300	    ("pmap_demote_l1: Level 1 table shouldn't be managed"));
4301
4302	tmpl1 = 0;
4303	if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) {
4304		tmpl1 = kva_alloc(PAGE_SIZE);
4305		if (tmpl1 == 0)
4306			return (NULL);
4307	}
4308
4309	if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
4310	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) {
4311		CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx"
4312		    " in pmap %p", va, pmap);
4313		return (NULL);
4314	}
4315
4316	l2phys = VM_PAGE_TO_PHYS(ml2);
4317	l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys);
4318
4319	/* Address the range points at */
4320	phys = oldl1 & ~ATTR_MASK;
4321	/* The attributed from the old l1 table to be copied */
4322	newl2 = oldl1 & ATTR_MASK;
4323
4324	/* Create the new entries */
4325	for (i = 0; i < Ln_ENTRIES; i++) {
4326		l2[i] = newl2 | phys;
4327		phys += L2_SIZE;
4328	}
4329	cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE);
4330	KASSERT(l2[0] == ((oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK),
4331	    ("Invalid l2 page (%lx != %lx)", l2[0],
4332	    (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK));
4333
4334	if (tmpl1 != 0) {
4335		pmap_kenter(tmpl1, PAGE_SIZE,
4336		    DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY);
4337		l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK));
4338	}
4339
4340	pmap_update_entry(pmap, l1, l2phys | L1_TABLE, va, PAGE_SIZE);
4341
4342	if (tmpl1 != 0) {
4343		pmap_kremove(tmpl1);
4344		kva_free(tmpl1, PAGE_SIZE);
4345	}
4346
4347	return (l2);
4348}
4349
4350/*
4351 * Create an L3 table to map all addresses within an L2 mapping.
4352 */
4353static pt_entry_t *
4354pmap_demote_l2_locked(pmap_t pmap, pt_entry_t *l2, vm_offset_t va,
4355    struct rwlock **lockp)
4356{
4357	pt_entry_t *l3, newl3, oldl2;
4358	vm_offset_t tmpl2;
4359	vm_paddr_t l3phys, phys;
4360	vm_page_t ml3;
4361	int i;
4362
4363	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
4364	l3 = NULL;
4365	oldl2 = pmap_load(l2);
4366	KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK,
4367	    ("pmap_demote_l2: Demoting a non-block entry"));
4368	KASSERT((va & L2_OFFSET) == 0,
4369	    ("pmap_demote_l2: Invalid virtual address %#lx", va));
4370
4371	tmpl2 = 0;
4372	if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) {
4373		tmpl2 = kva_alloc(PAGE_SIZE);
4374		if (tmpl2 == 0)
4375			return (NULL);
4376	}
4377
4378	if ((ml3 = pmap_lookup_pt_page(pmap, va)) != NULL) {
4379		pmap_remove_pt_page(pmap, ml3);
4380	} else {
4381		ml3 = vm_page_alloc(NULL, pmap_l2_pindex(va),
4382		    (VIRT_IN_DMAP(va) ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) |
4383		    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
4384		if (ml3 == NULL) {
4385			CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx"
4386			    " in pmap %p", va, pmap);
4387			goto fail;
4388		}
4389		if (va < VM_MAXUSER_ADDRESS)
4390			pmap_resident_count_inc(pmap, 1);
4391	}
4392
4393	l3phys = VM_PAGE_TO_PHYS(ml3);
4394	l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys);
4395
4396	/* Address the range points at */
4397	phys = oldl2 & ~ATTR_MASK;
4398	/* The attributed from the old l2 table to be copied */
4399	newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE;
4400
4401	/*
4402	 * If the page table page is new, initialize it.
4403	 */
4404	if (ml3->wire_count == 1) {
4405		for (i = 0; i < Ln_ENTRIES; i++) {
4406			l3[i] = newl3 | phys;
4407			phys += L3_SIZE;
4408		}
4409		cpu_dcache_wb_range((vm_offset_t)l3, PAGE_SIZE);
4410	}
4411	KASSERT(l3[0] == ((oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE),
4412	    ("Invalid l3 page (%lx != %lx)", l3[0],
4413	    (oldl2 & ~ATTR_DESCR_MASK) | L3_PAGE));
4414
4415	/*
4416	 * Map the temporary page so we don't lose access to the l2 table.
4417	 */
4418	if (tmpl2 != 0) {
4419		pmap_kenter(tmpl2, PAGE_SIZE,
4420		    DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY);
4421		l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK));
4422	}
4423
4424	/*
4425	 * The spare PV entries must be reserved prior to demoting the
4426	 * mapping, that is, prior to changing the PDE.  Otherwise, the state
4427	 * of the L2 and the PV lists will be inconsistent, which can result
4428	 * in reclaim_pv_chunk() attempting to remove a PV entry from the
4429	 * wrong PV list and pmap_pv_demote_l2() failing to find the expected
4430	 * PV entry for the 2MB page mapping that is being demoted.
4431	 */
4432	if ((oldl2 & ATTR_SW_MANAGED) != 0)
4433		reserve_pv_entries(pmap, Ln_ENTRIES - 1, lockp);
4434
4435	pmap_update_entry(pmap, l2, l3phys | L2_TABLE, va, PAGE_SIZE);
4436
4437	/*
4438	 * Demote the PV entry.
4439	 */
4440	if ((oldl2 & ATTR_SW_MANAGED) != 0)
4441		pmap_pv_demote_l2(pmap, va, oldl2 & ~ATTR_MASK, lockp);
4442
4443	atomic_add_long(&pmap_l2_demotions, 1);
4444	CTR3(KTR_PMAP, "pmap_demote_l2: success for va %#lx"
4445	    " in pmap %p %lx", va, pmap, l3[0]);
4446
4447fail:
4448	if (tmpl2 != 0) {
4449		pmap_kremove(tmpl2);
4450		kva_free(tmpl2, PAGE_SIZE);
4451	}
4452
4453	return (l3);
4454
4455}
4456
4457static pt_entry_t *
4458pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va)
4459{
4460	struct rwlock *lock;
4461	pt_entry_t *l3;
4462
4463	lock = NULL;
4464	l3 = pmap_demote_l2_locked(pmap, l2, va, &lock);
4465	if (lock != NULL)
4466		rw_wunlock(lock);
4467	return (l3);
4468}
4469
4470/*
4471 * perform the pmap work for mincore
4472 */
4473int
4474pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
4475{
4476	pd_entry_t *l1p, l1;
4477	pd_entry_t *l2p, l2;
4478	pt_entry_t *l3p, l3;
4479	vm_paddr_t pa;
4480	bool managed;
4481	int val;
4482
4483	PMAP_LOCK(pmap);
4484retry:
4485	pa = 0;
4486	val = 0;
4487	managed = false;
4488
4489	l1p = pmap_l1(pmap, addr);
4490	if (l1p == NULL) /* No l1 */
4491		goto done;
4492
4493	l1 = pmap_load(l1p);
4494	if ((l1 & ATTR_DESCR_MASK) == L1_INVAL)
4495		goto done;
4496
4497	if ((l1 & ATTR_DESCR_MASK) == L1_BLOCK) {
4498		pa = (l1 & ~ATTR_MASK) | (addr & L1_OFFSET);
4499		managed = (l1 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
4500		val = MINCORE_SUPER | MINCORE_INCORE;
4501		if (pmap_page_dirty(l1))
4502			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
4503		if ((l1 & ATTR_AF) == ATTR_AF)
4504			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
4505		goto done;
4506	}
4507
4508	l2p = pmap_l1_to_l2(l1p, addr);
4509	if (l2p == NULL) /* No l2 */
4510		goto done;
4511
4512	l2 = pmap_load(l2p);
4513	if ((l2 & ATTR_DESCR_MASK) == L2_INVAL)
4514		goto done;
4515
4516	if ((l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
4517		pa = (l2 & ~ATTR_MASK) | (addr & L2_OFFSET);
4518		managed = (l2 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
4519		val = MINCORE_SUPER | MINCORE_INCORE;
4520		if (pmap_page_dirty(l2))
4521			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
4522		if ((l2 & ATTR_AF) == ATTR_AF)
4523			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
4524		goto done;
4525	}
4526
4527	l3p = pmap_l2_to_l3(l2p, addr);
4528	if (l3p == NULL) /* No l3 */
4529		goto done;
4530
4531	l3 = pmap_load(l2p);
4532	if ((l3 & ATTR_DESCR_MASK) == L3_INVAL)
4533		goto done;
4534
4535	if ((l3 & ATTR_DESCR_MASK) == L3_PAGE) {
4536		pa = (l3 & ~ATTR_MASK) | (addr & L3_OFFSET);
4537		managed = (l3 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
4538		val = MINCORE_INCORE;
4539		if (pmap_page_dirty(l3))
4540			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
4541		if ((l3 & ATTR_AF) == ATTR_AF)
4542			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
4543	}
4544
4545done:
4546	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
4547	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
4548		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
4549		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
4550			goto retry;
4551	} else
4552		PA_UNLOCK_COND(*locked_pa);
4553	PMAP_UNLOCK(pmap);
4554
4555	return (val);
4556}
4557
4558void
4559pmap_activate(struct thread *td)
4560{
4561	pmap_t	pmap;
4562
4563	critical_enter();
4564	pmap = vmspace_pmap(td->td_proc->p_vmspace);
4565	td->td_pcb->pcb_l0addr = vtophys(pmap->pm_l0);
4566	__asm __volatile("msr ttbr0_el1, %0" : : "r"(td->td_pcb->pcb_l0addr));
4567	pmap_invalidate_all(pmap);
4568	critical_exit();
4569}
4570
4571void
4572pmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
4573{
4574
4575	if (va >= VM_MIN_KERNEL_ADDRESS) {
4576		cpu_icache_sync_range(va, sz);
4577	} else {
4578		u_int len, offset;
4579		vm_paddr_t pa;
4580
4581		/* Find the length of data in this page to flush */
4582		offset = va & PAGE_MASK;
4583		len = imin(PAGE_SIZE - offset, sz);
4584
4585		while (sz != 0) {
4586			/* Extract the physical address & find it in the DMAP */
4587			pa = pmap_extract(pmap, va);
4588			if (pa != 0)
4589				cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
4590
4591			/* Move to the next page */
4592			sz -= len;
4593			va += len;
4594			/* Set the length for the next iteration */
4595			len = imin(PAGE_SIZE, sz);
4596		}
4597	}
4598}
4599
4600int
4601pmap_fault(pmap_t pmap, uint64_t esr, uint64_t far)
4602{
4603#ifdef SMP
4604	uint64_t par;
4605#endif
4606
4607	switch (ESR_ELx_EXCEPTION(esr)) {
4608	case EXCP_DATA_ABORT_L:
4609	case EXCP_DATA_ABORT:
4610		break;
4611	default:
4612		return (KERN_FAILURE);
4613	}
4614
4615#ifdef SMP
4616	PMAP_LOCK(pmap);
4617	switch (esr & ISS_DATA_DFSC_MASK) {
4618	case ISS_DATA_DFSC_TF_L0:
4619	case ISS_DATA_DFSC_TF_L1:
4620	case ISS_DATA_DFSC_TF_L2:
4621	case ISS_DATA_DFSC_TF_L3:
4622		/* Ask the MMU to check the address */
4623		if (pmap == kernel_pmap)
4624			par = arm64_address_translate_s1e1r(far);
4625		else
4626			par = arm64_address_translate_s1e0r(far);
4627
4628		/*
4629		 * If the translation was successful the address was invalid
4630		 * due to a break-before-make sequence. We can unlock and
4631		 * return success to the trap handler.
4632		 */
4633		if (PAR_SUCCESS(par)) {
4634			PMAP_UNLOCK(pmap);
4635			return (KERN_SUCCESS);
4636		}
4637		break;
4638	default:
4639		break;
4640	}
4641	PMAP_UNLOCK(pmap);
4642#endif
4643
4644	return (KERN_FAILURE);
4645}
4646
4647/*
4648 *	Increase the starting virtual address of the given mapping if a
4649 *	different alignment might result in more superpage mappings.
4650 */
4651void
4652pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
4653    vm_offset_t *addr, vm_size_t size)
4654{
4655	vm_offset_t superpage_offset;
4656
4657	if (size < L2_SIZE)
4658		return;
4659	if (object != NULL && (object->flags & OBJ_COLORED) != 0)
4660		offset += ptoa(object->pg_color);
4661	superpage_offset = offset & L2_OFFSET;
4662	if (size - ((L2_SIZE - superpage_offset) & L2_OFFSET) < L2_SIZE ||
4663	    (*addr & L2_OFFSET) == superpage_offset)
4664		return;
4665	if ((*addr & L2_OFFSET) < superpage_offset)
4666		*addr = (*addr & ~L2_OFFSET) + superpage_offset;
4667	else
4668		*addr = ((*addr + L2_OFFSET) & ~L2_OFFSET) + superpage_offset;
4669}
4670
4671/**
4672 * Get the kernel virtual address of a set of physical pages. If there are
4673 * physical addresses not covered by the DMAP perform a transient mapping
4674 * that will be removed when calling pmap_unmap_io_transient.
4675 *
4676 * \param page        The pages the caller wishes to obtain the virtual
4677 *                    address on the kernel memory map.
4678 * \param vaddr       On return contains the kernel virtual memory address
4679 *                    of the pages passed in the page parameter.
4680 * \param count       Number of pages passed in.
4681 * \param can_fault   TRUE if the thread using the mapped pages can take
4682 *                    page faults, FALSE otherwise.
4683 *
4684 * \returns TRUE if the caller must call pmap_unmap_io_transient when
4685 *          finished or FALSE otherwise.
4686 *
4687 */
4688boolean_t
4689pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
4690    boolean_t can_fault)
4691{
4692	vm_paddr_t paddr;
4693	boolean_t needs_mapping;
4694	int error, i;
4695
4696	/*
4697	 * Allocate any KVA space that we need, this is done in a separate
4698	 * loop to prevent calling vmem_alloc while pinned.
4699	 */
4700	needs_mapping = FALSE;
4701	for (i = 0; i < count; i++) {
4702		paddr = VM_PAGE_TO_PHYS(page[i]);
4703		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
4704			error = vmem_alloc(kernel_arena, PAGE_SIZE,
4705			    M_BESTFIT | M_WAITOK, &vaddr[i]);
4706			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
4707			needs_mapping = TRUE;
4708		} else {
4709			vaddr[i] = PHYS_TO_DMAP(paddr);
4710		}
4711	}
4712
4713	/* Exit early if everything is covered by the DMAP */
4714	if (!needs_mapping)
4715		return (FALSE);
4716
4717	if (!can_fault)
4718		sched_pin();
4719	for (i = 0; i < count; i++) {
4720		paddr = VM_PAGE_TO_PHYS(page[i]);
4721		if (!PHYS_IN_DMAP(paddr)) {
4722			panic(
4723			   "pmap_map_io_transient: TODO: Map out of DMAP data");
4724		}
4725	}
4726
4727	return (needs_mapping);
4728}
4729
4730void
4731pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
4732    boolean_t can_fault)
4733{
4734	vm_paddr_t paddr;
4735	int i;
4736
4737	if (!can_fault)
4738		sched_unpin();
4739	for (i = 0; i < count; i++) {
4740		paddr = VM_PAGE_TO_PHYS(page[i]);
4741		if (!PHYS_IN_DMAP(paddr)) {
4742			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
4743		}
4744	}
4745}
4746