pmap.c revision 297958
1281494Sandrew/*-
2281494Sandrew * Copyright (c) 1991 Regents of the University of California.
3281494Sandrew * All rights reserved.
4281494Sandrew * Copyright (c) 1994 John S. Dyson
5281494Sandrew * All rights reserved.
6281494Sandrew * Copyright (c) 1994 David Greenman
7281494Sandrew * All rights reserved.
8281494Sandrew * Copyright (c) 2003 Peter Wemm
9281494Sandrew * All rights reserved.
10281494Sandrew * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
11281494Sandrew * All rights reserved.
12281494Sandrew * Copyright (c) 2014 Andrew Turner
13281494Sandrew * All rights reserved.
14297446Sandrew * Copyright (c) 2014-2016 The FreeBSD Foundation
15281494Sandrew * All rights reserved.
16281494Sandrew *
17281494Sandrew * This code is derived from software contributed to Berkeley by
18281494Sandrew * the Systems Programming Group of the University of Utah Computer
19281494Sandrew * Science Department and William Jolitz of UUNET Technologies Inc.
20281494Sandrew *
21281494Sandrew * This software was developed by Andrew Turner under sponsorship from
22281494Sandrew * the FreeBSD Foundation.
23281494Sandrew *
24281494Sandrew * Redistribution and use in source and binary forms, with or without
25281494Sandrew * modification, are permitted provided that the following conditions
26281494Sandrew * are met:
27281494Sandrew * 1. Redistributions of source code must retain the above copyright
28281494Sandrew *    notice, this list of conditions and the following disclaimer.
29281494Sandrew * 2. Redistributions in binary form must reproduce the above copyright
30281494Sandrew *    notice, this list of conditions and the following disclaimer in the
31281494Sandrew *    documentation and/or other materials provided with the distribution.
32281494Sandrew * 3. All advertising materials mentioning features or use of this software
33281494Sandrew *    must display the following acknowledgement:
34281494Sandrew *	This product includes software developed by the University of
35281494Sandrew *	California, Berkeley and its contributors.
36281494Sandrew * 4. Neither the name of the University nor the names of its contributors
37281494Sandrew *    may be used to endorse or promote products derived from this software
38281494Sandrew *    without specific prior written permission.
39281494Sandrew *
40281494Sandrew * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
41281494Sandrew * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
42281494Sandrew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
43281494Sandrew * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
44281494Sandrew * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
45281494Sandrew * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
46281494Sandrew * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
47281494Sandrew * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
48281494Sandrew * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
49281494Sandrew * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
50281494Sandrew * SUCH DAMAGE.
51281494Sandrew *
52281494Sandrew *	from:	@(#)pmap.c	7.7 (Berkeley)	5/12/91
53281494Sandrew */
54281494Sandrew/*-
55281494Sandrew * Copyright (c) 2003 Networks Associates Technology, Inc.
56281494Sandrew * All rights reserved.
57281494Sandrew *
58281494Sandrew * This software was developed for the FreeBSD Project by Jake Burkholder,
59281494Sandrew * Safeport Network Services, and Network Associates Laboratories, the
60281494Sandrew * Security Research Division of Network Associates, Inc. under
61281494Sandrew * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
62281494Sandrew * CHATS research program.
63281494Sandrew *
64281494Sandrew * Redistribution and use in source and binary forms, with or without
65281494Sandrew * modification, are permitted provided that the following conditions
66281494Sandrew * are met:
67281494Sandrew * 1. Redistributions of source code must retain the above copyright
68281494Sandrew *    notice, this list of conditions and the following disclaimer.
69281494Sandrew * 2. Redistributions in binary form must reproduce the above copyright
70281494Sandrew *    notice, this list of conditions and the following disclaimer in the
71281494Sandrew *    documentation and/or other materials provided with the distribution.
72281494Sandrew *
73281494Sandrew * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
74281494Sandrew * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75281494Sandrew * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76281494Sandrew * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
77281494Sandrew * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
78281494Sandrew * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
79281494Sandrew * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
80281494Sandrew * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
81281494Sandrew * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
82281494Sandrew * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
83281494Sandrew * SUCH DAMAGE.
84281494Sandrew */
85281494Sandrew
86281494Sandrew#include <sys/cdefs.h>
87281494Sandrew__FBSDID("$FreeBSD: head/sys/arm64/arm64/pmap.c 297958 2016-04-14 10:43:28Z andrew $");
88281494Sandrew
89281494Sandrew/*
90281494Sandrew *	Manages physical address maps.
91281494Sandrew *
92281494Sandrew *	Since the information managed by this module is
93281494Sandrew *	also stored by the logical address mapping module,
94281494Sandrew *	this module may throw away valid virtual-to-physical
95281494Sandrew *	mappings at almost any time.  However, invalidations
96281494Sandrew *	of virtual-to-physical mappings must be done as
97281494Sandrew *	requested.
98281494Sandrew *
99281494Sandrew *	In order to cope with hardware architectures which
100281494Sandrew *	make virtual-to-physical map invalidates expensive,
101281494Sandrew *	this module may delay invalidate or reduced protection
102281494Sandrew *	operations until such time as they are actually
103281494Sandrew *	necessary.  This module is given full information as
104281494Sandrew *	to which processors are currently using which maps,
105281494Sandrew *	and to when physical maps must be made correct.
106281494Sandrew */
107281494Sandrew
108281494Sandrew#include <sys/param.h>
109281494Sandrew#include <sys/bus.h>
110281494Sandrew#include <sys/systm.h>
111281494Sandrew#include <sys/kernel.h>
112281494Sandrew#include <sys/ktr.h>
113281494Sandrew#include <sys/lock.h>
114281494Sandrew#include <sys/malloc.h>
115281494Sandrew#include <sys/mman.h>
116281494Sandrew#include <sys/msgbuf.h>
117281494Sandrew#include <sys/mutex.h>
118281494Sandrew#include <sys/proc.h>
119281494Sandrew#include <sys/rwlock.h>
120281494Sandrew#include <sys/sx.h>
121281494Sandrew#include <sys/vmem.h>
122281494Sandrew#include <sys/vmmeter.h>
123281494Sandrew#include <sys/sched.h>
124281494Sandrew#include <sys/sysctl.h>
125281494Sandrew#include <sys/_unrhdr.h>
126281494Sandrew#include <sys/smp.h>
127281494Sandrew
128281494Sandrew#include <vm/vm.h>
129281494Sandrew#include <vm/vm_param.h>
130281494Sandrew#include <vm/vm_kern.h>
131281494Sandrew#include <vm/vm_page.h>
132281494Sandrew#include <vm/vm_map.h>
133281494Sandrew#include <vm/vm_object.h>
134281494Sandrew#include <vm/vm_extern.h>
135281494Sandrew#include <vm/vm_pageout.h>
136281494Sandrew#include <vm/vm_pager.h>
137281494Sandrew#include <vm/vm_radix.h>
138281494Sandrew#include <vm/vm_reserv.h>
139281494Sandrew#include <vm/uma.h>
140281494Sandrew
141281494Sandrew#include <machine/machdep.h>
142281494Sandrew#include <machine/md_var.h>
143281494Sandrew#include <machine/pcb.h>
144281494Sandrew
145297446Sandrew#define	NL0PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
146297446Sandrew#define	NL1PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
147297446Sandrew#define	NL2PG		(PAGE_SIZE/(sizeof (pd_entry_t)))
148297446Sandrew#define	NL3PG		(PAGE_SIZE/(sizeof (pt_entry_t)))
149281494Sandrew
150297446Sandrew#define	NUL0E		L0_ENTRIES
151297446Sandrew#define	NUL1E		(NUL0E * NL1PG)
152297446Sandrew#define	NUL2E		(NUL1E * NL2PG)
153297446Sandrew
154281494Sandrew#if !defined(DIAGNOSTIC)
155281494Sandrew#ifdef __GNUC_GNU_INLINE__
156281494Sandrew#define PMAP_INLINE	__attribute__((__gnu_inline__)) inline
157281494Sandrew#else
158281494Sandrew#define PMAP_INLINE	extern inline
159281494Sandrew#endif
160281494Sandrew#else
161281494Sandrew#define PMAP_INLINE
162281494Sandrew#endif
163281494Sandrew
164281494Sandrew/*
165281494Sandrew * These are configured by the mair_el1 register. This is set up in locore.S
166281494Sandrew */
167281494Sandrew#define	DEVICE_MEMORY	0
168281494Sandrew#define	UNCACHED_MEMORY	1
169281494Sandrew#define	CACHED_MEMORY	2
170281494Sandrew
171281494Sandrew
172281494Sandrew#ifdef PV_STATS
173281494Sandrew#define PV_STAT(x)	do { x ; } while (0)
174281494Sandrew#else
175281494Sandrew#define PV_STAT(x)	do { } while (0)
176281494Sandrew#endif
177281494Sandrew
178281494Sandrew#define	pmap_l2_pindex(v)	((v) >> L2_SHIFT)
179281494Sandrew
180281494Sandrew#define	NPV_LIST_LOCKS	MAXCPU
181281494Sandrew
182281494Sandrew#define	PHYS_TO_PV_LIST_LOCK(pa)	\
183281494Sandrew			(&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
184281494Sandrew
185281494Sandrew#define	CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa)	do {	\
186281494Sandrew	struct rwlock **_lockp = (lockp);		\
187281494Sandrew	struct rwlock *_new_lock;			\
188281494Sandrew							\
189281494Sandrew	_new_lock = PHYS_TO_PV_LIST_LOCK(pa);		\
190281494Sandrew	if (_new_lock != *_lockp) {			\
191281494Sandrew		if (*_lockp != NULL)			\
192281494Sandrew			rw_wunlock(*_lockp);		\
193281494Sandrew		*_lockp = _new_lock;			\
194281494Sandrew		rw_wlock(*_lockp);			\
195281494Sandrew	}						\
196281494Sandrew} while (0)
197281494Sandrew
198281494Sandrew#define	CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m)	\
199281494Sandrew			CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
200281494Sandrew
201281494Sandrew#define	RELEASE_PV_LIST_LOCK(lockp)		do {	\
202281494Sandrew	struct rwlock **_lockp = (lockp);		\
203281494Sandrew							\
204281494Sandrew	if (*_lockp != NULL) {				\
205281494Sandrew		rw_wunlock(*_lockp);			\
206281494Sandrew		*_lockp = NULL;				\
207281494Sandrew	}						\
208281494Sandrew} while (0)
209281494Sandrew
210281494Sandrew#define	VM_PAGE_TO_PV_LIST_LOCK(m)	\
211281494Sandrew			PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
212281494Sandrew
213281494Sandrewstruct pmap kernel_pmap_store;
214281494Sandrew
215281494Sandrewvm_offset_t virtual_avail;	/* VA of first avail page (after kernel bss) */
216281494Sandrewvm_offset_t virtual_end;	/* VA of last avail page (end of kernel AS) */
217281494Sandrewvm_offset_t kernel_vm_end = 0;
218281494Sandrew
219281494Sandrewstruct msgbuf *msgbufp = NULL;
220281494Sandrew
221281494Sandrewstatic struct rwlock_padalign pvh_global_lock;
222281494Sandrew
223291246Sandrewvm_paddr_t dmap_phys_base;	/* The start of the dmap region */
224297958Sandrewvm_paddr_t dmap_phys_max;	/* The limit of the dmap region */
225297958Sandrewvm_offset_t dmap_max_addr;	/* The virtual address limit of the dmap */
226291246Sandrew
227297914Sandrew/* This code assumes all L1 DMAP entries will be used */
228297914SandrewCTASSERT((DMAP_MIN_ADDRESS  & ~L0_OFFSET) == DMAP_MIN_ADDRESS);
229297914SandrewCTASSERT((DMAP_MAX_ADDRESS  & ~L0_OFFSET) == DMAP_MAX_ADDRESS);
230297914Sandrew
231297914Sandrew#define	DMAP_TABLES	((DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) >> L0_SHIFT)
232297914Sandrewextern pt_entry_t pagetable_dmap[];
233297914Sandrew
234281494Sandrew/*
235281494Sandrew * Data for the pv entry allocation mechanism
236281494Sandrew */
237281494Sandrewstatic TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
238281494Sandrewstatic struct mtx pv_chunks_mutex;
239281494Sandrewstatic struct rwlock pv_list_locks[NPV_LIST_LOCKS];
240281494Sandrew
241281494Sandrewstatic void	free_pv_chunk(struct pv_chunk *pc);
242281494Sandrewstatic void	free_pv_entry(pmap_t pmap, pv_entry_t pv);
243281494Sandrewstatic pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
244281494Sandrewstatic vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
245281494Sandrewstatic void	pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
246281494Sandrewstatic pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
247281494Sandrew		    vm_offset_t va);
248281494Sandrewstatic vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
249281494Sandrew    vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
250281494Sandrewstatic int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
251281494Sandrew    pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
252281494Sandrewstatic boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
253281494Sandrew    vm_page_t m, struct rwlock **lockp);
254281494Sandrew
255281494Sandrewstatic vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
256281494Sandrew		struct rwlock **lockp);
257281494Sandrew
258281494Sandrewstatic void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
259281494Sandrew    struct spglist *free);
260281494Sandrewstatic int pmap_unuse_l3(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
261281494Sandrew
262288445Sandrew/*
263288445Sandrew * These load the old table data and store the new value.
264288445Sandrew * They need to be atomic as the System MMU may write to the table at
265288445Sandrew * the same time as the CPU.
266288445Sandrew */
267288445Sandrew#define	pmap_load_store(table, entry) atomic_swap_64(table, entry)
268288445Sandrew#define	pmap_set(table, mask) atomic_set_64(table, mask)
269288445Sandrew#define	pmap_load_clear(table) atomic_swap_64(table, 0)
270288445Sandrew#define	pmap_load(table) (*table)
271288445Sandrew
272281494Sandrew/********************/
273281494Sandrew/* Inline functions */
274281494Sandrew/********************/
275281494Sandrew
276281494Sandrewstatic __inline void
277281494Sandrewpagecopy(void *s, void *d)
278281494Sandrew{
279281494Sandrew
280281494Sandrew	memcpy(d, s, PAGE_SIZE);
281281494Sandrew}
282281494Sandrew
283297446Sandrew#define	pmap_l0_index(va)	(((va) >> L0_SHIFT) & L0_ADDR_MASK)
284281494Sandrew#define	pmap_l1_index(va)	(((va) >> L1_SHIFT) & Ln_ADDR_MASK)
285281494Sandrew#define	pmap_l2_index(va)	(((va) >> L2_SHIFT) & Ln_ADDR_MASK)
286281494Sandrew#define	pmap_l3_index(va)	(((va) >> L3_SHIFT) & Ln_ADDR_MASK)
287281494Sandrew
288281494Sandrewstatic __inline pd_entry_t *
289297446Sandrewpmap_l0(pmap_t pmap, vm_offset_t va)
290297446Sandrew{
291297446Sandrew
292297446Sandrew	return (&pmap->pm_l0[pmap_l0_index(va)]);
293297446Sandrew}
294297446Sandrew
295297446Sandrewstatic __inline pd_entry_t *
296297446Sandrewpmap_l0_to_l1(pd_entry_t *l0, vm_offset_t va)
297297446Sandrew{
298297446Sandrew	pd_entry_t *l1;
299297446Sandrew
300297446Sandrew	l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
301297446Sandrew	return (&l1[pmap_l1_index(va)]);
302297446Sandrew}
303297446Sandrew
304297446Sandrewstatic __inline pd_entry_t *
305281494Sandrewpmap_l1(pmap_t pmap, vm_offset_t va)
306281494Sandrew{
307297446Sandrew	pd_entry_t *l0;
308281494Sandrew
309297446Sandrew	l0 = pmap_l0(pmap, va);
310297446Sandrew	if ((pmap_load(l0) & ATTR_DESCR_MASK) != L0_TABLE)
311297446Sandrew		return (NULL);
312297446Sandrew
313297446Sandrew	return (pmap_l0_to_l1(l0, va));
314281494Sandrew}
315281494Sandrew
316281494Sandrewstatic __inline pd_entry_t *
317281494Sandrewpmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
318281494Sandrew{
319281494Sandrew	pd_entry_t *l2;
320281494Sandrew
321288445Sandrew	l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
322281494Sandrew	return (&l2[pmap_l2_index(va)]);
323281494Sandrew}
324281494Sandrew
325281494Sandrewstatic __inline pd_entry_t *
326281494Sandrewpmap_l2(pmap_t pmap, vm_offset_t va)
327281494Sandrew{
328281494Sandrew	pd_entry_t *l1;
329281494Sandrew
330281494Sandrew	l1 = pmap_l1(pmap, va);
331288445Sandrew	if ((pmap_load(l1) & ATTR_DESCR_MASK) != L1_TABLE)
332281494Sandrew		return (NULL);
333281494Sandrew
334281494Sandrew	return (pmap_l1_to_l2(l1, va));
335281494Sandrew}
336281494Sandrew
337281494Sandrewstatic __inline pt_entry_t *
338281494Sandrewpmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
339281494Sandrew{
340281494Sandrew	pt_entry_t *l3;
341281494Sandrew
342288445Sandrew	l3 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l2) & ~ATTR_MASK);
343281494Sandrew	return (&l3[pmap_l3_index(va)]);
344281494Sandrew}
345281494Sandrew
346297446Sandrew/*
347297446Sandrew * Returns the lowest valid pde for a given virtual address.
348297446Sandrew * The next level may or may not point to a valid page or block.
349297446Sandrew */
350297446Sandrewstatic __inline pd_entry_t *
351297446Sandrewpmap_pde(pmap_t pmap, vm_offset_t va, int *level)
352297446Sandrew{
353297446Sandrew	pd_entry_t *l0, *l1, *l2, desc;
354297446Sandrew
355297446Sandrew	l0 = pmap_l0(pmap, va);
356297446Sandrew	desc = pmap_load(l0) & ATTR_DESCR_MASK;
357297446Sandrew	if (desc != L0_TABLE) {
358297446Sandrew		*level = -1;
359297446Sandrew		return (NULL);
360297446Sandrew	}
361297446Sandrew
362297446Sandrew	l1 = pmap_l0_to_l1(l0, va);
363297446Sandrew	desc = pmap_load(l1) & ATTR_DESCR_MASK;
364297446Sandrew	if (desc != L1_TABLE) {
365297446Sandrew		*level = 0;
366297446Sandrew		return (l0);
367297446Sandrew	}
368297446Sandrew
369297446Sandrew	l2 = pmap_l1_to_l2(l1, va);
370297446Sandrew	desc = pmap_load(l2) & ATTR_DESCR_MASK;
371297446Sandrew	if (desc != L2_TABLE) {
372297446Sandrew		*level = 1;
373297446Sandrew		return (l1);
374297446Sandrew	}
375297446Sandrew
376297446Sandrew	*level = 2;
377297446Sandrew	return (l2);
378297446Sandrew}
379297446Sandrew
380297446Sandrew/*
381297446Sandrew * Returns the lowest valid pte block or table entry for a given virtual
382297446Sandrew * address. If there are no valid entries return NULL and set the level to
383297446Sandrew * the first invalid level.
384297446Sandrew */
385281494Sandrewstatic __inline pt_entry_t *
386297446Sandrewpmap_pte(pmap_t pmap, vm_offset_t va, int *level)
387281494Sandrew{
388297446Sandrew	pd_entry_t *l1, *l2, desc;
389297446Sandrew	pt_entry_t *l3;
390281494Sandrew
391297446Sandrew	l1 = pmap_l1(pmap, va);
392297446Sandrew	if (l1 == NULL) {
393297446Sandrew		*level = 0;
394281494Sandrew		return (NULL);
395297446Sandrew	}
396297446Sandrew	desc = pmap_load(l1) & ATTR_DESCR_MASK;
397297446Sandrew	if (desc == L1_BLOCK) {
398297446Sandrew		*level = 1;
399297446Sandrew		return (l1);
400297446Sandrew	}
401281494Sandrew
402297446Sandrew	if (desc != L1_TABLE) {
403297446Sandrew		*level = 1;
404297446Sandrew		return (NULL);
405297446Sandrew	}
406297446Sandrew
407297446Sandrew	l2 = pmap_l1_to_l2(l1, va);
408297446Sandrew	desc = pmap_load(l2) & ATTR_DESCR_MASK;
409297446Sandrew	if (desc == L2_BLOCK) {
410297446Sandrew		*level = 2;
411297446Sandrew		return (l2);
412297446Sandrew	}
413297446Sandrew
414297446Sandrew	if (desc != L2_TABLE) {
415297446Sandrew		*level = 2;
416297446Sandrew		return (NULL);
417297446Sandrew	}
418297446Sandrew
419297446Sandrew	*level = 3;
420297446Sandrew	l3 = pmap_l2_to_l3(l2, va);
421297446Sandrew	if ((pmap_load(l3) & ATTR_DESCR_MASK) != L3_PAGE)
422297446Sandrew		return (NULL);
423297446Sandrew
424297446Sandrew	return (l3);
425281494Sandrew}
426281494Sandrew
427286956Sandrewbool
428297446Sandrewpmap_get_tables(pmap_t pmap, vm_offset_t va, pd_entry_t **l0, pd_entry_t **l1,
429297446Sandrew    pd_entry_t **l2, pt_entry_t **l3)
430286956Sandrew{
431297446Sandrew	pd_entry_t *l0p, *l1p, *l2p;
432286956Sandrew
433297446Sandrew	if (pmap->pm_l0 == NULL)
434286956Sandrew		return (false);
435286956Sandrew
436297446Sandrew	l0p = pmap_l0(pmap, va);
437297446Sandrew	*l0 = l0p;
438297446Sandrew
439297446Sandrew	if ((pmap_load(l0p) & ATTR_DESCR_MASK) != L0_TABLE)
440297446Sandrew		return (false);
441297446Sandrew
442297446Sandrew	l1p = pmap_l0_to_l1(l0p, va);
443286956Sandrew	*l1 = l1p;
444286956Sandrew
445288445Sandrew	if ((pmap_load(l1p) & ATTR_DESCR_MASK) == L1_BLOCK) {
446286956Sandrew		*l2 = NULL;
447286956Sandrew		*l3 = NULL;
448286956Sandrew		return (true);
449286956Sandrew	}
450286956Sandrew
451288445Sandrew	if ((pmap_load(l1p) & ATTR_DESCR_MASK) != L1_TABLE)
452286956Sandrew		return (false);
453286956Sandrew
454286956Sandrew	l2p = pmap_l1_to_l2(l1p, va);
455286956Sandrew	*l2 = l2p;
456286956Sandrew
457288445Sandrew	if ((pmap_load(l2p) & ATTR_DESCR_MASK) == L2_BLOCK) {
458286956Sandrew		*l3 = NULL;
459286956Sandrew		return (true);
460286956Sandrew	}
461286956Sandrew
462286956Sandrew	*l3 = pmap_l2_to_l3(l2p, va);
463286956Sandrew
464286956Sandrew	return (true);
465286956Sandrew}
466286956Sandrew
467281494Sandrewstatic __inline int
468281494Sandrewpmap_is_current(pmap_t pmap)
469281494Sandrew{
470281494Sandrew
471281494Sandrew	return ((pmap == pmap_kernel()) ||
472281494Sandrew	    (pmap == curthread->td_proc->p_vmspace->vm_map.pmap));
473281494Sandrew}
474281494Sandrew
475281494Sandrewstatic __inline int
476281494Sandrewpmap_l3_valid(pt_entry_t l3)
477281494Sandrew{
478281494Sandrew
479281494Sandrew	return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
480281494Sandrew}
481281494Sandrew
482281494Sandrewstatic __inline int
483281494Sandrewpmap_l3_valid_cacheable(pt_entry_t l3)
484281494Sandrew{
485281494Sandrew
486281494Sandrew	return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) &&
487281494Sandrew	    ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
488281494Sandrew}
489281494Sandrew
490281494Sandrew#define	PTE_SYNC(pte)	cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte))
491281494Sandrew
492281494Sandrew/*
493281494Sandrew * Checks if the page is dirty. We currently lack proper tracking of this on
494281494Sandrew * arm64 so for now assume is a page mapped as rw was accessed it is.
495281494Sandrew */
496281494Sandrewstatic inline int
497281494Sandrewpmap_page_dirty(pt_entry_t pte)
498281494Sandrew{
499281494Sandrew
500281494Sandrew	return ((pte & (ATTR_AF | ATTR_AP_RW_BIT)) ==
501281494Sandrew	    (ATTR_AF | ATTR_AP(ATTR_AP_RW)));
502281494Sandrew}
503281494Sandrew
504281494Sandrewstatic __inline void
505281494Sandrewpmap_resident_count_inc(pmap_t pmap, int count)
506281494Sandrew{
507281494Sandrew
508281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
509281494Sandrew	pmap->pm_stats.resident_count += count;
510281494Sandrew}
511281494Sandrew
512281494Sandrewstatic __inline void
513281494Sandrewpmap_resident_count_dec(pmap_t pmap, int count)
514281494Sandrew{
515281494Sandrew
516281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
517281494Sandrew	KASSERT(pmap->pm_stats.resident_count >= count,
518281494Sandrew	    ("pmap %p resident count underflow %ld %d", pmap,
519281494Sandrew	    pmap->pm_stats.resident_count, count));
520281494Sandrew	pmap->pm_stats.resident_count -= count;
521281494Sandrew}
522281494Sandrew
523281494Sandrewstatic pt_entry_t *
524281494Sandrewpmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
525281494Sandrew    u_int *l2_slot)
526281494Sandrew{
527281494Sandrew	pt_entry_t *l2;
528281494Sandrew	pd_entry_t *l1;
529281494Sandrew
530281494Sandrew	l1 = (pd_entry_t *)l1pt;
531281494Sandrew	*l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
532281494Sandrew
533281494Sandrew	/* Check locore has used a table L1 map */
534281494Sandrew	KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
535281494Sandrew	   ("Invalid bootstrap L1 table"));
536281494Sandrew	/* Find the address of the L2 table */
537281494Sandrew	l2 = (pt_entry_t *)init_pt_va;
538281494Sandrew	*l2_slot = pmap_l2_index(va);
539281494Sandrew
540281494Sandrew	return (l2);
541281494Sandrew}
542281494Sandrew
543281494Sandrewstatic vm_paddr_t
544281494Sandrewpmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
545281494Sandrew{
546281494Sandrew	u_int l1_slot, l2_slot;
547281494Sandrew	pt_entry_t *l2;
548281494Sandrew
549281494Sandrew	l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
550281494Sandrew
551281494Sandrew	return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
552281494Sandrew}
553281494Sandrew
554281494Sandrewstatic void
555297958Sandrewpmap_bootstrap_dmap(vm_offset_t kern_l1, vm_paddr_t min_pa, vm_paddr_t max_pa)
556281494Sandrew{
557281494Sandrew	vm_offset_t va;
558281494Sandrew	vm_paddr_t pa;
559281494Sandrew	u_int l1_slot;
560281494Sandrew
561297958Sandrew	pa = dmap_phys_base = min_pa & ~L1_OFFSET;
562281494Sandrew	va = DMAP_MIN_ADDRESS;
563297958Sandrew	for (; va < DMAP_MAX_ADDRESS && pa < max_pa;
564281494Sandrew	    pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
565297914Sandrew		l1_slot = ((va - DMAP_MIN_ADDRESS) >> L1_SHIFT);
566281494Sandrew
567297914Sandrew		pmap_load_store(&pagetable_dmap[l1_slot],
568285537Sandrew		    (pa & ~L1_OFFSET) | ATTR_DEFAULT |
569285537Sandrew		    ATTR_IDX(CACHED_MEMORY) | L1_BLOCK);
570281494Sandrew	}
571281494Sandrew
572297958Sandrew	/* Set the upper limit of the DMAP region */
573297958Sandrew	dmap_phys_max = pa;
574297958Sandrew	dmap_max_addr = va;
575297958Sandrew
576297914Sandrew	cpu_dcache_wb_range((vm_offset_t)pagetable_dmap,
577297914Sandrew	    PAGE_SIZE * DMAP_TABLES);
578281494Sandrew	cpu_tlb_flushID();
579281494Sandrew}
580281494Sandrew
581281494Sandrewstatic vm_offset_t
582281494Sandrewpmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
583281494Sandrew{
584281494Sandrew	vm_offset_t l2pt;
585281494Sandrew	vm_paddr_t pa;
586281494Sandrew	pd_entry_t *l1;
587281494Sandrew	u_int l1_slot;
588281494Sandrew
589281494Sandrew	KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
590281494Sandrew
591281494Sandrew	l1 = (pd_entry_t *)l1pt;
592281494Sandrew	l1_slot = pmap_l1_index(va);
593281494Sandrew	l2pt = l2_start;
594281494Sandrew
595281494Sandrew	for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
596281494Sandrew		KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
597281494Sandrew
598281494Sandrew		pa = pmap_early_vtophys(l1pt, l2pt);
599281494Sandrew		pmap_load_store(&l1[l1_slot],
600281494Sandrew		    (pa & ~Ln_TABLE_MASK) | L1_TABLE);
601281494Sandrew		l2pt += PAGE_SIZE;
602281494Sandrew	}
603281494Sandrew
604281494Sandrew	/* Clean the L2 page table */
605281494Sandrew	memset((void *)l2_start, 0, l2pt - l2_start);
606281494Sandrew	cpu_dcache_wb_range(l2_start, l2pt - l2_start);
607281494Sandrew
608281494Sandrew	/* Flush the l1 table to ram */
609281494Sandrew	cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE);
610281494Sandrew
611281494Sandrew	return l2pt;
612281494Sandrew}
613281494Sandrew
614281494Sandrewstatic vm_offset_t
615281494Sandrewpmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
616281494Sandrew{
617281494Sandrew	vm_offset_t l2pt, l3pt;
618281494Sandrew	vm_paddr_t pa;
619281494Sandrew	pd_entry_t *l2;
620281494Sandrew	u_int l2_slot;
621281494Sandrew
622281494Sandrew	KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
623281494Sandrew
624281494Sandrew	l2 = pmap_l2(kernel_pmap, va);
625281494Sandrew	l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1));
626281494Sandrew	l2pt = (vm_offset_t)l2;
627281494Sandrew	l2_slot = pmap_l2_index(va);
628281494Sandrew	l3pt = l3_start;
629281494Sandrew
630281494Sandrew	for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
631281494Sandrew		KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
632281494Sandrew
633281494Sandrew		pa = pmap_early_vtophys(l1pt, l3pt);
634281494Sandrew		pmap_load_store(&l2[l2_slot],
635281494Sandrew		    (pa & ~Ln_TABLE_MASK) | L2_TABLE);
636281494Sandrew		l3pt += PAGE_SIZE;
637281494Sandrew	}
638281494Sandrew
639281494Sandrew	/* Clean the L2 page table */
640281494Sandrew	memset((void *)l3_start, 0, l3pt - l3_start);
641281494Sandrew	cpu_dcache_wb_range(l3_start, l3pt - l3_start);
642281494Sandrew
643281494Sandrew	cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE);
644281494Sandrew
645281494Sandrew	return l3pt;
646281494Sandrew}
647281494Sandrew
648281494Sandrew/*
649281494Sandrew *	Bootstrap the system enough to run with virtual memory.
650281494Sandrew */
651281494Sandrewvoid
652297446Sandrewpmap_bootstrap(vm_offset_t l0pt, vm_offset_t l1pt, vm_paddr_t kernstart,
653297446Sandrew    vm_size_t kernlen)
654281494Sandrew{
655281494Sandrew	u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot;
656281494Sandrew	uint64_t kern_delta;
657281494Sandrew	pt_entry_t *l2;
658281494Sandrew	vm_offset_t va, freemempos;
659281494Sandrew	vm_offset_t dpcpu, msgbufpv;
660297958Sandrew	vm_paddr_t pa, max_pa, min_pa;
661291246Sandrew	int i;
662281494Sandrew
663281494Sandrew	kern_delta = KERNBASE - kernstart;
664281494Sandrew	physmem = 0;
665281494Sandrew
666281494Sandrew	printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
667281494Sandrew	printf("%lx\n", l1pt);
668281494Sandrew	printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
669281494Sandrew
670281494Sandrew	/* Set this early so we can use the pagetable walking functions */
671297446Sandrew	kernel_pmap_store.pm_l0 = (pd_entry_t *)l0pt;
672281494Sandrew	PMAP_LOCK_INIT(kernel_pmap);
673281494Sandrew
674281494Sandrew 	/*
675281494Sandrew	 * Initialize the global pv list lock.
676281494Sandrew	 */
677281494Sandrew	rw_init(&pvh_global_lock, "pmap pv global");
678281494Sandrew
679291246Sandrew	/* Assume the address we were loaded to is a valid physical address */
680297958Sandrew	min_pa = max_pa = KERNBASE - kern_delta;
681291246Sandrew
682291246Sandrew	/*
683291246Sandrew	 * Find the minimum physical address. physmap is sorted,
684291246Sandrew	 * but may contain empty ranges.
685291246Sandrew	 */
686291246Sandrew	for (i = 0; i < (physmap_idx * 2); i += 2) {
687291246Sandrew		if (physmap[i] == physmap[i + 1])
688291246Sandrew			continue;
689291246Sandrew		if (physmap[i] <= min_pa)
690291246Sandrew			min_pa = physmap[i];
691297958Sandrew		if (physmap[i + 1] > max_pa)
692297958Sandrew			max_pa = physmap[i + 1];
693291246Sandrew	}
694291246Sandrew
695281494Sandrew	/* Create a direct map region early so we can use it for pa -> va */
696297958Sandrew	pmap_bootstrap_dmap(l1pt, min_pa, max_pa);
697281494Sandrew
698281494Sandrew	va = KERNBASE;
699281494Sandrew	pa = KERNBASE - kern_delta;
700281494Sandrew
701281494Sandrew	/*
702281494Sandrew	 * Start to initialise phys_avail by copying from physmap
703281494Sandrew	 * up to the physical address KERNBASE points at.
704281494Sandrew	 */
705281494Sandrew	map_slot = avail_slot = 0;
706295157Sandrew	for (; map_slot < (physmap_idx * 2) &&
707295157Sandrew	    avail_slot < (PHYS_AVAIL_SIZE - 2); map_slot += 2) {
708281494Sandrew		if (physmap[map_slot] == physmap[map_slot + 1])
709281494Sandrew			continue;
710281494Sandrew
711281494Sandrew		if (physmap[map_slot] <= pa &&
712281494Sandrew		    physmap[map_slot + 1] > pa)
713281494Sandrew			break;
714281494Sandrew
715281494Sandrew		phys_avail[avail_slot] = physmap[map_slot];
716281494Sandrew		phys_avail[avail_slot + 1] = physmap[map_slot + 1];
717281494Sandrew		physmem += (phys_avail[avail_slot + 1] -
718281494Sandrew		    phys_avail[avail_slot]) >> PAGE_SHIFT;
719281494Sandrew		avail_slot += 2;
720281494Sandrew	}
721281494Sandrew
722281494Sandrew	/* Add the memory before the kernel */
723295157Sandrew	if (physmap[avail_slot] < pa && avail_slot < (PHYS_AVAIL_SIZE - 2)) {
724281494Sandrew		phys_avail[avail_slot] = physmap[map_slot];
725281494Sandrew		phys_avail[avail_slot + 1] = pa;
726281494Sandrew		physmem += (phys_avail[avail_slot + 1] -
727281494Sandrew		    phys_avail[avail_slot]) >> PAGE_SHIFT;
728281494Sandrew		avail_slot += 2;
729281494Sandrew	}
730281494Sandrew	used_map_slot = map_slot;
731281494Sandrew
732281494Sandrew	/*
733281494Sandrew	 * Read the page table to find out what is already mapped.
734281494Sandrew	 * This assumes we have mapped a block of memory from KERNBASE
735281494Sandrew	 * using a single L1 entry.
736281494Sandrew	 */
737281494Sandrew	l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
738281494Sandrew
739281494Sandrew	/* Sanity check the index, KERNBASE should be the first VA */
740281494Sandrew	KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
741281494Sandrew
742281494Sandrew	/* Find how many pages we have mapped */
743281494Sandrew	for (; l2_slot < Ln_ENTRIES; l2_slot++) {
744281494Sandrew		if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0)
745281494Sandrew			break;
746281494Sandrew
747281494Sandrew		/* Check locore used L2 blocks */
748281494Sandrew		KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
749281494Sandrew		    ("Invalid bootstrap L2 table"));
750281494Sandrew		KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa,
751281494Sandrew		    ("Incorrect PA in L2 table"));
752281494Sandrew
753281494Sandrew		va += L2_SIZE;
754281494Sandrew		pa += L2_SIZE;
755281494Sandrew	}
756281494Sandrew
757281494Sandrew	va = roundup2(va, L1_SIZE);
758281494Sandrew
759281494Sandrew	freemempos = KERNBASE + kernlen;
760281494Sandrew	freemempos = roundup2(freemempos, PAGE_SIZE);
761281494Sandrew	/* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */
762281494Sandrew	freemempos = pmap_bootstrap_l2(l1pt, va, freemempos);
763281494Sandrew	/* And the l3 tables for the early devmap */
764281494Sandrew	freemempos = pmap_bootstrap_l3(l1pt,
765281494Sandrew	    VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos);
766281494Sandrew
767281494Sandrew	cpu_tlb_flushID();
768281494Sandrew
769281494Sandrew#define alloc_pages(var, np)						\
770281494Sandrew	(var) = freemempos;						\
771281494Sandrew	freemempos += (np * PAGE_SIZE);					\
772281494Sandrew	memset((char *)(var), 0, ((np) * PAGE_SIZE));
773281494Sandrew
774281494Sandrew	/* Allocate dynamic per-cpu area. */
775281494Sandrew	alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
776281494Sandrew	dpcpu_init((void *)dpcpu, 0);
777281494Sandrew
778281494Sandrew	/* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
779281494Sandrew	alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
780281494Sandrew	msgbufp = (void *)msgbufpv;
781281494Sandrew
782281494Sandrew	virtual_avail = roundup2(freemempos, L1_SIZE);
783281494Sandrew	virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE;
784281494Sandrew	kernel_vm_end = virtual_avail;
785281494Sandrew
786281494Sandrew	pa = pmap_early_vtophys(l1pt, freemempos);
787281494Sandrew
788281494Sandrew	/* Finish initialising physmap */
789281494Sandrew	map_slot = used_map_slot;
790281494Sandrew	for (; avail_slot < (PHYS_AVAIL_SIZE - 2) &&
791281494Sandrew	    map_slot < (physmap_idx * 2); map_slot += 2) {
792281494Sandrew		if (physmap[map_slot] == physmap[map_slot + 1])
793281494Sandrew			continue;
794281494Sandrew
795281494Sandrew		/* Have we used the current range? */
796281494Sandrew		if (physmap[map_slot + 1] <= pa)
797281494Sandrew			continue;
798281494Sandrew
799281494Sandrew		/* Do we need to split the entry? */
800281494Sandrew		if (physmap[map_slot] < pa) {
801281494Sandrew			phys_avail[avail_slot] = pa;
802281494Sandrew			phys_avail[avail_slot + 1] = physmap[map_slot + 1];
803281494Sandrew		} else {
804281494Sandrew			phys_avail[avail_slot] = physmap[map_slot];
805281494Sandrew			phys_avail[avail_slot + 1] = physmap[map_slot + 1];
806281494Sandrew		}
807281494Sandrew		physmem += (phys_avail[avail_slot + 1] -
808281494Sandrew		    phys_avail[avail_slot]) >> PAGE_SHIFT;
809281494Sandrew
810281494Sandrew		avail_slot += 2;
811281494Sandrew	}
812281494Sandrew	phys_avail[avail_slot] = 0;
813281494Sandrew	phys_avail[avail_slot + 1] = 0;
814281494Sandrew
815281494Sandrew	/*
816281494Sandrew	 * Maxmem isn't the "maximum memory", it's one larger than the
817281494Sandrew	 * highest page of the physical address space.  It should be
818281494Sandrew	 * called something like "Maxphyspage".
819281494Sandrew	 */
820281494Sandrew	Maxmem = atop(phys_avail[avail_slot - 1]);
821281494Sandrew
822281494Sandrew	cpu_tlb_flushID();
823281494Sandrew}
824281494Sandrew
825281494Sandrew/*
826281494Sandrew *	Initialize a vm_page's machine-dependent fields.
827281494Sandrew */
828281494Sandrewvoid
829281494Sandrewpmap_page_init(vm_page_t m)
830281494Sandrew{
831281494Sandrew
832281494Sandrew	TAILQ_INIT(&m->md.pv_list);
833281494Sandrew	m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
834281494Sandrew}
835281494Sandrew
836281494Sandrew/*
837281494Sandrew *	Initialize the pmap module.
838281494Sandrew *	Called by vm_init, to initialize any structures that the pmap
839281494Sandrew *	system needs to map virtual memory.
840281494Sandrew */
841281494Sandrewvoid
842281494Sandrewpmap_init(void)
843281494Sandrew{
844281494Sandrew	int i;
845281494Sandrew
846281494Sandrew	/*
847281494Sandrew	 * Initialize the pv chunk list mutex.
848281494Sandrew	 */
849281494Sandrew	mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
850281494Sandrew
851281494Sandrew	/*
852281494Sandrew	 * Initialize the pool of pv list locks.
853281494Sandrew	 */
854281494Sandrew	for (i = 0; i < NPV_LIST_LOCKS; i++)
855281494Sandrew		rw_init(&pv_list_locks[i], "pmap pv list");
856281494Sandrew}
857281494Sandrew
858281494Sandrew/*
859281494Sandrew * Normal, non-SMP, invalidation functions.
860281494Sandrew * We inline these within pmap.c for speed.
861281494Sandrew */
862281494SandrewPMAP_INLINE void
863281494Sandrewpmap_invalidate_page(pmap_t pmap, vm_offset_t va)
864281494Sandrew{
865281494Sandrew
866281494Sandrew	sched_pin();
867281494Sandrew	__asm __volatile(
868281494Sandrew	    "dsb  sy		\n"
869281494Sandrew	    "tlbi vaae1is, %0	\n"
870281494Sandrew	    "dsb  sy		\n"
871281494Sandrew	    "isb		\n"
872281494Sandrew	    : : "r"(va >> PAGE_SHIFT));
873281494Sandrew	sched_unpin();
874281494Sandrew}
875281494Sandrew
876281494SandrewPMAP_INLINE void
877281494Sandrewpmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
878281494Sandrew{
879281494Sandrew	vm_offset_t addr;
880281494Sandrew
881281494Sandrew	sched_pin();
882281494Sandrew	__asm __volatile("dsb	sy");
883296828Swma	for (addr = sva; addr < eva; addr += PAGE_SIZE) {
884281494Sandrew		__asm __volatile(
885296828Swma		    "tlbi vaae1is, %0" : : "r"(addr >> PAGE_SHIFT));
886281494Sandrew	}
887281494Sandrew	__asm __volatile(
888281494Sandrew	    "dsb  sy	\n"
889281494Sandrew	    "isb	\n");
890281494Sandrew	sched_unpin();
891281494Sandrew}
892281494Sandrew
893281494SandrewPMAP_INLINE void
894281494Sandrewpmap_invalidate_all(pmap_t pmap)
895281494Sandrew{
896281494Sandrew
897281494Sandrew	sched_pin();
898281494Sandrew	__asm __volatile(
899281494Sandrew	    "dsb  sy		\n"
900281494Sandrew	    "tlbi vmalle1is	\n"
901281494Sandrew	    "dsb  sy		\n"
902281494Sandrew	    "isb		\n");
903281494Sandrew	sched_unpin();
904281494Sandrew}
905281494Sandrew
906281494Sandrew/*
907281494Sandrew *	Routine:	pmap_extract
908281494Sandrew *	Function:
909281494Sandrew *		Extract the physical page address associated
910281494Sandrew *		with the given map/virtual_address pair.
911281494Sandrew */
912281494Sandrewvm_paddr_t
913281494Sandrewpmap_extract(pmap_t pmap, vm_offset_t va)
914281494Sandrew{
915297446Sandrew	pt_entry_t *pte, tpte;
916281494Sandrew	vm_paddr_t pa;
917297446Sandrew	int lvl;
918281494Sandrew
919281494Sandrew	pa = 0;
920281494Sandrew	PMAP_LOCK(pmap);
921281494Sandrew	/*
922297446Sandrew	 * Find the block or page map for this virtual address. pmap_pte
923297446Sandrew	 * will return either a valid block/page entry, or NULL.
924281494Sandrew	 */
925297446Sandrew	pte = pmap_pte(pmap, va, &lvl);
926297446Sandrew	if (pte != NULL) {
927297446Sandrew		tpte = pmap_load(pte);
928297446Sandrew		pa = tpte & ~ATTR_MASK;
929297446Sandrew		switch(lvl) {
930297446Sandrew		case 1:
931297446Sandrew			KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
932297446Sandrew			    ("pmap_extract: Invalid L1 pte found: %lx",
933297446Sandrew			    tpte & ATTR_DESCR_MASK));
934297446Sandrew			pa |= (va & L1_OFFSET);
935297446Sandrew			break;
936297446Sandrew		case 2:
937297446Sandrew			KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
938297446Sandrew			    ("pmap_extract: Invalid L2 pte found: %lx",
939297446Sandrew			    tpte & ATTR_DESCR_MASK));
940297446Sandrew			pa |= (va & L2_OFFSET);
941297446Sandrew			break;
942297446Sandrew		case 3:
943297446Sandrew			KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
944297446Sandrew			    ("pmap_extract: Invalid L3 pte found: %lx",
945297446Sandrew			    tpte & ATTR_DESCR_MASK));
946297446Sandrew			pa |= (va & L3_OFFSET);
947297446Sandrew			break;
948297446Sandrew		}
949281494Sandrew	}
950281494Sandrew	PMAP_UNLOCK(pmap);
951281494Sandrew	return (pa);
952281494Sandrew}
953281494Sandrew
954281494Sandrew/*
955281494Sandrew *	Routine:	pmap_extract_and_hold
956281494Sandrew *	Function:
957281494Sandrew *		Atomically extract and hold the physical page
958281494Sandrew *		with the given pmap and virtual address pair
959281494Sandrew *		if that mapping permits the given protection.
960281494Sandrew */
961281494Sandrewvm_page_t
962281494Sandrewpmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
963281494Sandrew{
964297446Sandrew	pt_entry_t *pte, tpte;
965281494Sandrew	vm_paddr_t pa;
966281494Sandrew	vm_page_t m;
967297446Sandrew	int lvl;
968281494Sandrew
969281494Sandrew	pa = 0;
970281494Sandrew	m = NULL;
971281494Sandrew	PMAP_LOCK(pmap);
972281494Sandrewretry:
973297446Sandrew	pte = pmap_pte(pmap, va, &lvl);
974297446Sandrew	if (pte != NULL) {
975297446Sandrew		tpte = pmap_load(pte);
976297446Sandrew
977297446Sandrew		KASSERT(lvl > 0 && lvl <= 3,
978297446Sandrew		    ("pmap_extract_and_hold: Invalid level %d", lvl));
979297446Sandrew		CTASSERT(L1_BLOCK == L2_BLOCK);
980297446Sandrew		KASSERT((lvl == 3 && (tpte & ATTR_DESCR_MASK) == L3_PAGE) ||
981297446Sandrew		    (lvl < 3 && (tpte & ATTR_DESCR_MASK) == L1_BLOCK),
982297446Sandrew		    ("pmap_extract_and_hold: Invalid pte at L%d: %lx", lvl,
983297446Sandrew		     tpte & ATTR_DESCR_MASK));
984297446Sandrew		if (((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
985281494Sandrew		    ((prot & VM_PROT_WRITE) == 0)) {
986297446Sandrew			if (vm_page_pa_tryrelock(pmap, tpte & ~ATTR_MASK, &pa))
987281494Sandrew				goto retry;
988297446Sandrew			m = PHYS_TO_VM_PAGE(tpte & ~ATTR_MASK);
989281494Sandrew			vm_page_hold(m);
990281494Sandrew		}
991281494Sandrew	}
992281494Sandrew	PA_UNLOCK_COND(pa);
993281494Sandrew	PMAP_UNLOCK(pmap);
994281494Sandrew	return (m);
995281494Sandrew}
996281494Sandrew
997281494Sandrewvm_paddr_t
998281494Sandrewpmap_kextract(vm_offset_t va)
999281494Sandrew{
1000297446Sandrew	pt_entry_t *pte, tpte;
1001281494Sandrew	vm_paddr_t pa;
1002297446Sandrew	int lvl;
1003281494Sandrew
1004281494Sandrew	if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
1005281494Sandrew		pa = DMAP_TO_PHYS(va);
1006281494Sandrew	} else {
1007297446Sandrew		pa = 0;
1008297446Sandrew		pte = pmap_pte(kernel_pmap, va, &lvl);
1009297446Sandrew		if (pte != NULL) {
1010297446Sandrew			tpte = pmap_load(pte);
1011297446Sandrew			pa = tpte & ~ATTR_MASK;
1012297446Sandrew			switch(lvl) {
1013297446Sandrew			case 1:
1014297446Sandrew				KASSERT((tpte & ATTR_DESCR_MASK) == L1_BLOCK,
1015297446Sandrew				    ("pmap_kextract: Invalid L1 pte found: %lx",
1016297446Sandrew				    tpte & ATTR_DESCR_MASK));
1017297446Sandrew				pa |= (va & L1_OFFSET);
1018297446Sandrew				break;
1019297446Sandrew			case 2:
1020297446Sandrew				KASSERT((tpte & ATTR_DESCR_MASK) == L2_BLOCK,
1021297446Sandrew				    ("pmap_kextract: Invalid L2 pte found: %lx",
1022297446Sandrew				    tpte & ATTR_DESCR_MASK));
1023297446Sandrew				pa |= (va & L2_OFFSET);
1024297446Sandrew				break;
1025297446Sandrew			case 3:
1026297446Sandrew				KASSERT((tpte & ATTR_DESCR_MASK) == L3_PAGE,
1027297446Sandrew				    ("pmap_kextract: Invalid L3 pte found: %lx",
1028297446Sandrew				    tpte & ATTR_DESCR_MASK));
1029297446Sandrew				pa |= (va & L3_OFFSET);
1030297446Sandrew				break;
1031297446Sandrew			}
1032297446Sandrew		}
1033281494Sandrew	}
1034281494Sandrew	return (pa);
1035281494Sandrew}
1036281494Sandrew
1037281494Sandrew/***************************************************
1038281494Sandrew * Low level mapping routines.....
1039281494Sandrew ***************************************************/
1040281494Sandrew
1041281494Sandrewvoid
1042285212Sandrewpmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa)
1043281494Sandrew{
1044297446Sandrew	pd_entry_t *pde;
1045297446Sandrew	pt_entry_t *pte;
1046285212Sandrew	vm_offset_t va;
1047297446Sandrew	int lvl;
1048281494Sandrew
1049281494Sandrew	KASSERT((pa & L3_OFFSET) == 0,
1050281494Sandrew	   ("pmap_kenter_device: Invalid physical address"));
1051285212Sandrew	KASSERT((sva & L3_OFFSET) == 0,
1052281494Sandrew	   ("pmap_kenter_device: Invalid virtual address"));
1053281494Sandrew	KASSERT((size & PAGE_MASK) == 0,
1054281494Sandrew	    ("pmap_kenter_device: Mapping is not page-sized"));
1055281494Sandrew
1056285212Sandrew	va = sva;
1057281494Sandrew	while (size != 0) {
1058297446Sandrew		pde = pmap_pde(kernel_pmap, va, &lvl);
1059297446Sandrew		KASSERT(pde != NULL,
1060297446Sandrew		    ("pmap_kenter_device: Invalid page entry, va: 0x%lx", va));
1061297446Sandrew		KASSERT(lvl == 2,
1062297446Sandrew		    ("pmap_kenter_device: Invalid level %d", lvl));
1063297446Sandrew
1064297446Sandrew		pte = pmap_l2_to_l3(pde, va);
1065297446Sandrew		pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT |
1066285537Sandrew		    ATTR_IDX(DEVICE_MEMORY) | L3_PAGE);
1067297446Sandrew		PTE_SYNC(pte);
1068281494Sandrew
1069281494Sandrew		va += PAGE_SIZE;
1070281494Sandrew		pa += PAGE_SIZE;
1071281494Sandrew		size -= PAGE_SIZE;
1072281494Sandrew	}
1073285212Sandrew	pmap_invalidate_range(kernel_pmap, sva, va);
1074281494Sandrew}
1075281494Sandrew
1076281494Sandrew/*
1077281494Sandrew * Remove a page from the kernel pagetables.
1078281494Sandrew */
1079281494SandrewPMAP_INLINE void
1080281494Sandrewpmap_kremove(vm_offset_t va)
1081281494Sandrew{
1082297446Sandrew	pt_entry_t *pte;
1083297446Sandrew	int lvl;
1084281494Sandrew
1085297446Sandrew	pte = pmap_pte(kernel_pmap, va, &lvl);
1086297446Sandrew	KASSERT(pte != NULL, ("pmap_kremove: Invalid address"));
1087297446Sandrew	KASSERT(lvl == 3, ("pmap_kremove: Invalid pte level %d", lvl));
1088281494Sandrew
1089297446Sandrew	if (pmap_l3_valid_cacheable(pmap_load(pte)))
1090281494Sandrew		cpu_dcache_wb_range(va, L3_SIZE);
1091297446Sandrew	pmap_load_clear(pte);
1092297446Sandrew	PTE_SYNC(pte);
1093285212Sandrew	pmap_invalidate_page(kernel_pmap, va);
1094281494Sandrew}
1095281494Sandrew
1096281494Sandrewvoid
1097285212Sandrewpmap_kremove_device(vm_offset_t sva, vm_size_t size)
1098281494Sandrew{
1099297446Sandrew	pt_entry_t *pte;
1100285212Sandrew	vm_offset_t va;
1101297446Sandrew	int lvl;
1102281494Sandrew
1103285212Sandrew	KASSERT((sva & L3_OFFSET) == 0,
1104281494Sandrew	   ("pmap_kremove_device: Invalid virtual address"));
1105281494Sandrew	KASSERT((size & PAGE_MASK) == 0,
1106281494Sandrew	    ("pmap_kremove_device: Mapping is not page-sized"));
1107281494Sandrew
1108285212Sandrew	va = sva;
1109281494Sandrew	while (size != 0) {
1110297446Sandrew		pte = pmap_pte(kernel_pmap, va, &lvl);
1111297446Sandrew		KASSERT(pte != NULL, ("Invalid page table, va: 0x%lx", va));
1112297446Sandrew		KASSERT(lvl == 3,
1113297446Sandrew		    ("Invalid device pagetable level: %d != 3", lvl));
1114297446Sandrew		pmap_load_clear(pte);
1115297446Sandrew		PTE_SYNC(pte);
1116281494Sandrew
1117281494Sandrew		va += PAGE_SIZE;
1118281494Sandrew		size -= PAGE_SIZE;
1119281494Sandrew	}
1120285212Sandrew	pmap_invalidate_range(kernel_pmap, sva, va);
1121281494Sandrew}
1122281494Sandrew
1123281494Sandrew/*
1124281494Sandrew *	Used to map a range of physical addresses into kernel
1125281494Sandrew *	virtual address space.
1126281494Sandrew *
1127281494Sandrew *	The value passed in '*virt' is a suggested virtual address for
1128281494Sandrew *	the mapping. Architectures which can support a direct-mapped
1129281494Sandrew *	physical to virtual region can return the appropriate address
1130281494Sandrew *	within that region, leaving '*virt' unchanged. Other
1131281494Sandrew *	architectures should map the pages starting at '*virt' and
1132281494Sandrew *	update '*virt' with the first usable address after the mapped
1133281494Sandrew *	region.
1134281494Sandrew */
1135281494Sandrewvm_offset_t
1136281494Sandrewpmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
1137281494Sandrew{
1138281494Sandrew	return PHYS_TO_DMAP(start);
1139281494Sandrew}
1140281494Sandrew
1141281494Sandrew
1142281494Sandrew/*
1143281494Sandrew * Add a list of wired pages to the kva
1144281494Sandrew * this routine is only used for temporary
1145281494Sandrew * kernel mappings that do not need to have
1146281494Sandrew * page modification or references recorded.
1147281494Sandrew * Note that old mappings are simply written
1148281494Sandrew * over.  The page *must* be wired.
1149281494Sandrew * Note: SMP coherent.  Uses a ranged shootdown IPI.
1150281494Sandrew */
1151281494Sandrewvoid
1152281494Sandrewpmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
1153281494Sandrew{
1154297446Sandrew	pd_entry_t *pde;
1155297446Sandrew	pt_entry_t *pte, pa;
1156281494Sandrew	vm_offset_t va;
1157281494Sandrew	vm_page_t m;
1158297446Sandrew	int i, lvl;
1159281494Sandrew
1160281494Sandrew	va = sva;
1161281494Sandrew	for (i = 0; i < count; i++) {
1162297446Sandrew		pde = pmap_pde(kernel_pmap, va, &lvl);
1163297446Sandrew		KASSERT(pde != NULL,
1164297446Sandrew		    ("pmap_qenter: Invalid page entry, va: 0x%lx", va));
1165297446Sandrew		KASSERT(lvl == 2,
1166297446Sandrew		    ("pmap_qenter: Invalid level %d", lvl));
1167297446Sandrew
1168281494Sandrew		m = ma[i];
1169285537Sandrew		pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
1170285537Sandrew		    ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
1171297446Sandrew		pte = pmap_l2_to_l3(pde, va);
1172297446Sandrew		pmap_load_store(pte, pa);
1173297446Sandrew		PTE_SYNC(pte);
1174281494Sandrew
1175281494Sandrew		va += L3_SIZE;
1176281494Sandrew	}
1177285212Sandrew	pmap_invalidate_range(kernel_pmap, sva, va);
1178281494Sandrew}
1179281494Sandrew
1180281494Sandrew/*
1181281494Sandrew * This routine tears out page mappings from the
1182281494Sandrew * kernel -- it is meant only for temporary mappings.
1183281494Sandrew */
1184281494Sandrewvoid
1185281494Sandrewpmap_qremove(vm_offset_t sva, int count)
1186281494Sandrew{
1187297446Sandrew	pt_entry_t *pte;
1188281494Sandrew	vm_offset_t va;
1189297446Sandrew	int lvl;
1190281494Sandrew
1191285212Sandrew	KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva));
1192285212Sandrew
1193281494Sandrew	va = sva;
1194281494Sandrew	while (count-- > 0) {
1195297446Sandrew		pte = pmap_pte(kernel_pmap, va, &lvl);
1196297446Sandrew		KASSERT(lvl == 3,
1197297446Sandrew		    ("Invalid device pagetable level: %d != 3", lvl));
1198297446Sandrew		if (pte != NULL) {
1199297446Sandrew			if (pmap_l3_valid_cacheable(pmap_load(pte)))
1200297446Sandrew				cpu_dcache_wb_range(va, L3_SIZE);
1201297446Sandrew			pmap_load_clear(pte);
1202297446Sandrew			PTE_SYNC(pte);
1203297446Sandrew		}
1204285212Sandrew
1205281494Sandrew		va += PAGE_SIZE;
1206281494Sandrew	}
1207281494Sandrew	pmap_invalidate_range(kernel_pmap, sva, va);
1208281494Sandrew}
1209281494Sandrew
1210281494Sandrew/***************************************************
1211281494Sandrew * Page table page management routines.....
1212281494Sandrew ***************************************************/
1213281494Sandrewstatic __inline void
1214281494Sandrewpmap_free_zero_pages(struct spglist *free)
1215281494Sandrew{
1216281494Sandrew	vm_page_t m;
1217281494Sandrew
1218281494Sandrew	while ((m = SLIST_FIRST(free)) != NULL) {
1219281494Sandrew		SLIST_REMOVE_HEAD(free, plinks.s.ss);
1220281494Sandrew		/* Preserve the page's PG_ZERO setting. */
1221281494Sandrew		vm_page_free_toq(m);
1222281494Sandrew	}
1223281494Sandrew}
1224281494Sandrew
1225281494Sandrew/*
1226281494Sandrew * Schedule the specified unused page table page to be freed.  Specifically,
1227281494Sandrew * add the page to the specified list of pages that will be released to the
1228281494Sandrew * physical memory manager after the TLB has been updated.
1229281494Sandrew */
1230281494Sandrewstatic __inline void
1231281494Sandrewpmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
1232281494Sandrew    boolean_t set_PG_ZERO)
1233281494Sandrew{
1234281494Sandrew
1235281494Sandrew	if (set_PG_ZERO)
1236281494Sandrew		m->flags |= PG_ZERO;
1237281494Sandrew	else
1238281494Sandrew		m->flags &= ~PG_ZERO;
1239281494Sandrew	SLIST_INSERT_HEAD(free, m, plinks.s.ss);
1240281494Sandrew}
1241281494Sandrew
1242281494Sandrew/*
1243281494Sandrew * Decrements a page table page's wire count, which is used to record the
1244281494Sandrew * number of valid page table entries within the page.  If the wire count
1245281494Sandrew * drops to zero, then the page table page is unmapped.  Returns TRUE if the
1246281494Sandrew * page table page was unmapped and FALSE otherwise.
1247281494Sandrew */
1248281494Sandrewstatic inline boolean_t
1249281494Sandrewpmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1250281494Sandrew{
1251281494Sandrew
1252281494Sandrew	--m->wire_count;
1253281494Sandrew	if (m->wire_count == 0) {
1254281494Sandrew		_pmap_unwire_l3(pmap, va, m, free);
1255281494Sandrew		return (TRUE);
1256281494Sandrew	} else
1257281494Sandrew		return (FALSE);
1258281494Sandrew}
1259281494Sandrew
1260281494Sandrewstatic void
1261281494Sandrew_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
1262281494Sandrew{
1263281494Sandrew
1264281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1265281494Sandrew	/*
1266281494Sandrew	 * unmap the page table page
1267281494Sandrew	 */
1268297446Sandrew	if (m->pindex >= (NUL2E + NUL1E)) {
1269297446Sandrew		/* l1 page */
1270297446Sandrew		pd_entry_t *l0;
1271297446Sandrew
1272297446Sandrew		l0 = pmap_l0(pmap, va);
1273297446Sandrew		pmap_load_clear(l0);
1274297446Sandrew		PTE_SYNC(l0);
1275297446Sandrew	} else if (m->pindex >= NUL2E) {
1276297446Sandrew		/* l2 page */
1277281494Sandrew		pd_entry_t *l1;
1278297446Sandrew
1279281494Sandrew		l1 = pmap_l1(pmap, va);
1280281494Sandrew		pmap_load_clear(l1);
1281281494Sandrew		PTE_SYNC(l1);
1282281494Sandrew	} else {
1283297446Sandrew		/* l3 page */
1284281494Sandrew		pd_entry_t *l2;
1285297446Sandrew
1286281494Sandrew		l2 = pmap_l2(pmap, va);
1287281494Sandrew		pmap_load_clear(l2);
1288281494Sandrew		PTE_SYNC(l2);
1289281494Sandrew	}
1290281494Sandrew	pmap_resident_count_dec(pmap, 1);
1291297446Sandrew	if (m->pindex < NUL2E) {
1292297446Sandrew		/* We just released an l3, unhold the matching l2 */
1293297446Sandrew		pd_entry_t *l1, tl1;
1294297446Sandrew		vm_page_t l2pg;
1295281494Sandrew
1296297446Sandrew		l1 = pmap_l1(pmap, va);
1297297446Sandrew		tl1 = pmap_load(l1);
1298297446Sandrew		l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1299297446Sandrew		pmap_unwire_l3(pmap, va, l2pg, free);
1300297446Sandrew	} else if (m->pindex < (NUL2E + NUL1E)) {
1301297446Sandrew		/* We just released an l2, unhold the matching l1 */
1302297446Sandrew		pd_entry_t *l0, tl0;
1303297446Sandrew		vm_page_t l1pg;
1304297446Sandrew
1305297446Sandrew		l0 = pmap_l0(pmap, va);
1306297446Sandrew		tl0 = pmap_load(l0);
1307297446Sandrew		l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1308297446Sandrew		pmap_unwire_l3(pmap, va, l1pg, free);
1309281494Sandrew	}
1310285212Sandrew	pmap_invalidate_page(pmap, va);
1311281494Sandrew
1312281494Sandrew	/*
1313281494Sandrew	 * This is a release store so that the ordinary store unmapping
1314281494Sandrew	 * the page table page is globally performed before TLB shoot-
1315281494Sandrew	 * down is begun.
1316281494Sandrew	 */
1317281494Sandrew	atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
1318281494Sandrew
1319281494Sandrew	/*
1320281494Sandrew	 * Put page on a list so that it is released after
1321281494Sandrew	 * *ALL* TLB shootdown is done
1322281494Sandrew	 */
1323281494Sandrew	pmap_add_delayed_free_list(m, free, TRUE);
1324281494Sandrew}
1325281494Sandrew
1326281494Sandrew/*
1327281494Sandrew * After removing an l3 entry, this routine is used to
1328281494Sandrew * conditionally free the page, and manage the hold/wire counts.
1329281494Sandrew */
1330281494Sandrewstatic int
1331281494Sandrewpmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
1332281494Sandrew    struct spglist *free)
1333281494Sandrew{
1334281494Sandrew	vm_page_t mpte;
1335281494Sandrew
1336281494Sandrew	if (va >= VM_MAXUSER_ADDRESS)
1337281494Sandrew		return (0);
1338281494Sandrew	KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
1339281494Sandrew	mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
1340281494Sandrew	return (pmap_unwire_l3(pmap, va, mpte, free));
1341281494Sandrew}
1342281494Sandrew
1343281494Sandrewvoid
1344281494Sandrewpmap_pinit0(pmap_t pmap)
1345281494Sandrew{
1346281494Sandrew
1347281494Sandrew	PMAP_LOCK_INIT(pmap);
1348281494Sandrew	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1349297446Sandrew	pmap->pm_l0 = kernel_pmap->pm_l0;
1350281494Sandrew}
1351281494Sandrew
1352281494Sandrewint
1353281494Sandrewpmap_pinit(pmap_t pmap)
1354281494Sandrew{
1355297446Sandrew	vm_paddr_t l0phys;
1356297446Sandrew	vm_page_t l0pt;
1357281494Sandrew
1358281494Sandrew	/*
1359297446Sandrew	 * allocate the l0 page
1360281494Sandrew	 */
1361297446Sandrew	while ((l0pt = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
1362281494Sandrew	    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
1363281494Sandrew		VM_WAIT;
1364281494Sandrew
1365297446Sandrew	l0phys = VM_PAGE_TO_PHYS(l0pt);
1366297446Sandrew	pmap->pm_l0 = (pd_entry_t *)PHYS_TO_DMAP(l0phys);
1367281494Sandrew
1368297446Sandrew	if ((l0pt->flags & PG_ZERO) == 0)
1369297446Sandrew		pagezero(pmap->pm_l0);
1370281494Sandrew
1371281494Sandrew	bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
1372281494Sandrew
1373281494Sandrew	return (1);
1374281494Sandrew}
1375281494Sandrew
1376281494Sandrew/*
1377281494Sandrew * This routine is called if the desired page table page does not exist.
1378281494Sandrew *
1379281494Sandrew * If page table page allocation fails, this routine may sleep before
1380281494Sandrew * returning NULL.  It sleeps only if a lock pointer was given.
1381281494Sandrew *
1382281494Sandrew * Note: If a page allocation fails at page table level two or three,
1383281494Sandrew * one or two pages may be held during the wait, only to be released
1384281494Sandrew * afterwards.  This conservative approach is easily argued to avoid
1385281494Sandrew * race conditions.
1386281494Sandrew */
1387281494Sandrewstatic vm_page_t
1388281494Sandrew_pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
1389281494Sandrew{
1390297446Sandrew	vm_page_t m, l1pg, l2pg;
1391281494Sandrew
1392281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1393281494Sandrew
1394281494Sandrew	/*
1395281494Sandrew	 * Allocate a page table page.
1396281494Sandrew	 */
1397281494Sandrew	if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
1398281494Sandrew	    VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
1399281494Sandrew		if (lockp != NULL) {
1400281494Sandrew			RELEASE_PV_LIST_LOCK(lockp);
1401281494Sandrew			PMAP_UNLOCK(pmap);
1402281494Sandrew			rw_runlock(&pvh_global_lock);
1403281494Sandrew			VM_WAIT;
1404281494Sandrew			rw_rlock(&pvh_global_lock);
1405281494Sandrew			PMAP_LOCK(pmap);
1406281494Sandrew		}
1407281494Sandrew
1408281494Sandrew		/*
1409281494Sandrew		 * Indicate the need to retry.  While waiting, the page table
1410281494Sandrew		 * page may have been allocated.
1411281494Sandrew		 */
1412281494Sandrew		return (NULL);
1413281494Sandrew	}
1414281494Sandrew	if ((m->flags & PG_ZERO) == 0)
1415281494Sandrew		pmap_zero_page(m);
1416281494Sandrew
1417281494Sandrew	/*
1418281494Sandrew	 * Map the pagetable page into the process address space, if
1419281494Sandrew	 * it isn't already there.
1420281494Sandrew	 */
1421281494Sandrew
1422297446Sandrew	if (ptepindex >= (NUL2E + NUL1E)) {
1423297446Sandrew		pd_entry_t *l0;
1424297446Sandrew		vm_pindex_t l0index;
1425281494Sandrew
1426297446Sandrew		l0index = ptepindex - (NUL2E + NUL1E);
1427297446Sandrew		l0 = &pmap->pm_l0[l0index];
1428297446Sandrew		pmap_load_store(l0, VM_PAGE_TO_PHYS(m) | L0_TABLE);
1429297446Sandrew		PTE_SYNC(l0);
1430297446Sandrew	} else if (ptepindex >= NUL2E) {
1431297446Sandrew		vm_pindex_t l0index, l1index;
1432297446Sandrew		pd_entry_t *l0, *l1;
1433297446Sandrew		pd_entry_t tl0;
1434297446Sandrew
1435297446Sandrew		l1index = ptepindex - NUL2E;
1436297446Sandrew		l0index = l1index >> L0_ENTRIES_SHIFT;
1437297446Sandrew
1438297446Sandrew		l0 = &pmap->pm_l0[l0index];
1439297446Sandrew		tl0 = pmap_load(l0);
1440297446Sandrew		if (tl0 == 0) {
1441297446Sandrew			/* recurse for allocating page dir */
1442297446Sandrew			if (_pmap_alloc_l3(pmap, NUL2E + NUL1E + l0index,
1443297446Sandrew			    lockp) == NULL) {
1444297446Sandrew				--m->wire_count;
1445297446Sandrew				/* XXX: release mem barrier? */
1446297446Sandrew				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1447297446Sandrew				vm_page_free_zero(m);
1448297446Sandrew				return (NULL);
1449297446Sandrew			}
1450297446Sandrew		} else {
1451297446Sandrew			l1pg = PHYS_TO_VM_PAGE(tl0 & ~ATTR_MASK);
1452297446Sandrew			l1pg->wire_count++;
1453297446Sandrew		}
1454297446Sandrew
1455297446Sandrew		l1 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l0) & ~ATTR_MASK);
1456297446Sandrew		l1 = &l1[ptepindex & Ln_ADDR_MASK];
1457281494Sandrew		pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
1458281494Sandrew		PTE_SYNC(l1);
1459281494Sandrew	} else {
1460297446Sandrew		vm_pindex_t l0index, l1index;
1461297446Sandrew		pd_entry_t *l0, *l1, *l2;
1462297446Sandrew		pd_entry_t tl0, tl1;
1463281494Sandrew
1464297446Sandrew		l1index = ptepindex >> Ln_ENTRIES_SHIFT;
1465297446Sandrew		l0index = l1index >> L0_ENTRIES_SHIFT;
1466297446Sandrew
1467297446Sandrew		l0 = &pmap->pm_l0[l0index];
1468297446Sandrew		tl0 = pmap_load(l0);
1469297446Sandrew		if (tl0 == 0) {
1470281494Sandrew			/* recurse for allocating page dir */
1471297446Sandrew			if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1472281494Sandrew			    lockp) == NULL) {
1473281494Sandrew				--m->wire_count;
1474281494Sandrew				atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1475281494Sandrew				vm_page_free_zero(m);
1476281494Sandrew				return (NULL);
1477281494Sandrew			}
1478297446Sandrew			tl0 = pmap_load(l0);
1479297446Sandrew			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1480297446Sandrew			l1 = &l1[l1index & Ln_ADDR_MASK];
1481281494Sandrew		} else {
1482297446Sandrew			l1 = (pd_entry_t *)PHYS_TO_DMAP(tl0 & ~ATTR_MASK);
1483297446Sandrew			l1 = &l1[l1index & Ln_ADDR_MASK];
1484297446Sandrew			tl1 = pmap_load(l1);
1485297446Sandrew			if (tl1 == 0) {
1486297446Sandrew				/* recurse for allocating page dir */
1487297446Sandrew				if (_pmap_alloc_l3(pmap, NUL2E + l1index,
1488297446Sandrew				    lockp) == NULL) {
1489297446Sandrew					--m->wire_count;
1490297446Sandrew					/* XXX: release mem barrier? */
1491297446Sandrew					atomic_subtract_int(
1492297446Sandrew					    &vm_cnt.v_wire_count, 1);
1493297446Sandrew					vm_page_free_zero(m);
1494297446Sandrew					return (NULL);
1495297446Sandrew				}
1496297446Sandrew			} else {
1497297446Sandrew				l2pg = PHYS_TO_VM_PAGE(tl1 & ~ATTR_MASK);
1498297446Sandrew				l2pg->wire_count++;
1499297446Sandrew			}
1500281494Sandrew		}
1501281494Sandrew
1502288445Sandrew		l2 = (pd_entry_t *)PHYS_TO_DMAP(pmap_load(l1) & ~ATTR_MASK);
1503281494Sandrew		l2 = &l2[ptepindex & Ln_ADDR_MASK];
1504285537Sandrew		pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | L2_TABLE);
1505281494Sandrew		PTE_SYNC(l2);
1506281494Sandrew	}
1507281494Sandrew
1508281494Sandrew	pmap_resident_count_inc(pmap, 1);
1509281494Sandrew
1510281494Sandrew	return (m);
1511281494Sandrew}
1512281494Sandrew
1513281494Sandrewstatic vm_page_t
1514281494Sandrewpmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
1515281494Sandrew{
1516281494Sandrew	vm_pindex_t ptepindex;
1517297446Sandrew	pd_entry_t *pde, tpde;
1518281494Sandrew	vm_page_t m;
1519297446Sandrew	int lvl;
1520281494Sandrew
1521281494Sandrew	/*
1522281494Sandrew	 * Calculate pagetable page index
1523281494Sandrew	 */
1524281494Sandrew	ptepindex = pmap_l2_pindex(va);
1525281494Sandrewretry:
1526281494Sandrew	/*
1527281494Sandrew	 * Get the page directory entry
1528281494Sandrew	 */
1529297446Sandrew	pde = pmap_pde(pmap, va, &lvl);
1530281494Sandrew
1531281494Sandrew	/*
1532297446Sandrew	 * If the page table page is mapped, we just increment the hold count,
1533297446Sandrew	 * and activate it. If we get a level 2 pde it will point to a level 3
1534297446Sandrew	 * table.
1535281494Sandrew	 */
1536297446Sandrew	if (lvl == 2) {
1537297446Sandrew		tpde = pmap_load(pde);
1538297446Sandrew		if (tpde != 0) {
1539297446Sandrew			m = PHYS_TO_VM_PAGE(tpde & ~ATTR_MASK);
1540297446Sandrew			m->wire_count++;
1541297446Sandrew			return (m);
1542297446Sandrew		}
1543281494Sandrew	}
1544297446Sandrew
1545297446Sandrew	/*
1546297446Sandrew	 * Here if the pte page isn't mapped, or if it has been deallocated.
1547297446Sandrew	 */
1548297446Sandrew	m = _pmap_alloc_l3(pmap, ptepindex, lockp);
1549297446Sandrew	if (m == NULL && lockp != NULL)
1550297446Sandrew		goto retry;
1551297446Sandrew
1552281494Sandrew	return (m);
1553281494Sandrew}
1554281494Sandrew
1555281494Sandrew
1556281494Sandrew/***************************************************
1557281494Sandrew * Pmap allocation/deallocation routines.
1558281494Sandrew ***************************************************/
1559281494Sandrew
1560281494Sandrew/*
1561281494Sandrew * Release any resources held by the given physical map.
1562281494Sandrew * Called when a pmap initialized by pmap_pinit is being released.
1563281494Sandrew * Should only be called if the map contains no valid mappings.
1564281494Sandrew */
1565281494Sandrewvoid
1566281494Sandrewpmap_release(pmap_t pmap)
1567281494Sandrew{
1568281494Sandrew	vm_page_t m;
1569281494Sandrew
1570281494Sandrew	KASSERT(pmap->pm_stats.resident_count == 0,
1571281494Sandrew	    ("pmap_release: pmap resident count %ld != 0",
1572281494Sandrew	    pmap->pm_stats.resident_count));
1573281494Sandrew
1574297446Sandrew	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l0));
1575281494Sandrew
1576281494Sandrew	m->wire_count--;
1577281494Sandrew	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
1578281494Sandrew	vm_page_free_zero(m);
1579281494Sandrew}
1580281494Sandrew
1581281494Sandrew#if 0
1582281494Sandrewstatic int
1583281494Sandrewkvm_size(SYSCTL_HANDLER_ARGS)
1584281494Sandrew{
1585281494Sandrew	unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
1586281494Sandrew
1587281494Sandrew	return sysctl_handle_long(oidp, &ksize, 0, req);
1588281494Sandrew}
1589281494SandrewSYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
1590281494Sandrew    0, 0, kvm_size, "LU", "Size of KVM");
1591281494Sandrew
1592281494Sandrewstatic int
1593281494Sandrewkvm_free(SYSCTL_HANDLER_ARGS)
1594281494Sandrew{
1595281494Sandrew	unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
1596281494Sandrew
1597281494Sandrew	return sysctl_handle_long(oidp, &kfree, 0, req);
1598281494Sandrew}
1599281494SandrewSYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
1600281494Sandrew    0, 0, kvm_free, "LU", "Amount of KVM free");
1601281494Sandrew#endif /* 0 */
1602281494Sandrew
1603281494Sandrew/*
1604281494Sandrew * grow the number of kernel page table entries, if needed
1605281494Sandrew */
1606281494Sandrewvoid
1607281494Sandrewpmap_growkernel(vm_offset_t addr)
1608281494Sandrew{
1609281494Sandrew	vm_paddr_t paddr;
1610281494Sandrew	vm_page_t nkpg;
1611297446Sandrew	pd_entry_t *l0, *l1, *l2;
1612281494Sandrew
1613281494Sandrew	mtx_assert(&kernel_map->system_mtx, MA_OWNED);
1614281494Sandrew
1615281494Sandrew	addr = roundup2(addr, L2_SIZE);
1616281494Sandrew	if (addr - 1 >= kernel_map->max_offset)
1617281494Sandrew		addr = kernel_map->max_offset;
1618281494Sandrew	while (kernel_vm_end < addr) {
1619297446Sandrew		l0 = pmap_l0(kernel_pmap, kernel_vm_end);
1620297446Sandrew		KASSERT(pmap_load(l0) != 0,
1621297446Sandrew		    ("pmap_growkernel: No level 0 kernel entry"));
1622297446Sandrew
1623297446Sandrew		l1 = pmap_l0_to_l1(l0, kernel_vm_end);
1624285045Sandrew		if (pmap_load(l1) == 0) {
1625281494Sandrew			/* We need a new PDP entry */
1626281494Sandrew			nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
1627281494Sandrew			    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
1628281494Sandrew			    VM_ALLOC_WIRED | VM_ALLOC_ZERO);
1629281494Sandrew			if (nkpg == NULL)
1630281494Sandrew				panic("pmap_growkernel: no memory to grow kernel");
1631281494Sandrew			if ((nkpg->flags & PG_ZERO) == 0)
1632281494Sandrew				pmap_zero_page(nkpg);
1633281494Sandrew			paddr = VM_PAGE_TO_PHYS(nkpg);
1634281494Sandrew			pmap_load_store(l1, paddr | L1_TABLE);
1635281494Sandrew			PTE_SYNC(l1);
1636281494Sandrew			continue; /* try again */
1637281494Sandrew		}
1638281494Sandrew		l2 = pmap_l1_to_l2(l1, kernel_vm_end);
1639285045Sandrew		if ((pmap_load(l2) & ATTR_AF) != 0) {
1640281494Sandrew			kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1641281494Sandrew			if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1642281494Sandrew				kernel_vm_end = kernel_map->max_offset;
1643281494Sandrew				break;
1644281494Sandrew			}
1645281494Sandrew			continue;
1646281494Sandrew		}
1647281494Sandrew
1648281494Sandrew		nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
1649281494Sandrew		    VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
1650281494Sandrew		    VM_ALLOC_ZERO);
1651281494Sandrew		if (nkpg == NULL)
1652281494Sandrew			panic("pmap_growkernel: no memory to grow kernel");
1653281494Sandrew		if ((nkpg->flags & PG_ZERO) == 0)
1654281494Sandrew			pmap_zero_page(nkpg);
1655281494Sandrew		paddr = VM_PAGE_TO_PHYS(nkpg);
1656281494Sandrew		pmap_load_store(l2, paddr | L2_TABLE);
1657281494Sandrew		PTE_SYNC(l2);
1658285212Sandrew		pmap_invalidate_page(kernel_pmap, kernel_vm_end);
1659281494Sandrew
1660281494Sandrew		kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
1661281494Sandrew		if (kernel_vm_end - 1 >= kernel_map->max_offset) {
1662281494Sandrew			kernel_vm_end = kernel_map->max_offset;
1663281494Sandrew			break;
1664281494Sandrew		}
1665281494Sandrew	}
1666281494Sandrew}
1667281494Sandrew
1668281494Sandrew
1669281494Sandrew/***************************************************
1670281494Sandrew * page management routines.
1671281494Sandrew ***************************************************/
1672281494Sandrew
1673281494SandrewCTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
1674281494SandrewCTASSERT(_NPCM == 3);
1675281494SandrewCTASSERT(_NPCPV == 168);
1676281494Sandrew
1677281494Sandrewstatic __inline struct pv_chunk *
1678281494Sandrewpv_to_chunk(pv_entry_t pv)
1679281494Sandrew{
1680281494Sandrew
1681281494Sandrew	return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
1682281494Sandrew}
1683281494Sandrew
1684281494Sandrew#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
1685281494Sandrew
1686281494Sandrew#define	PC_FREE0	0xfffffffffffffffful
1687281494Sandrew#define	PC_FREE1	0xfffffffffffffffful
1688281494Sandrew#define	PC_FREE2	0x000000fffffffffful
1689281494Sandrew
1690281494Sandrewstatic const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
1691281494Sandrew
1692281494Sandrew#if 0
1693281494Sandrew#ifdef PV_STATS
1694281494Sandrewstatic int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
1695281494Sandrew
1696281494SandrewSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
1697281494Sandrew	"Current number of pv entry chunks");
1698281494SandrewSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
1699281494Sandrew	"Current number of pv entry chunks allocated");
1700281494SandrewSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
1701281494Sandrew	"Current number of pv entry chunks frees");
1702281494SandrewSYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
1703281494Sandrew	"Number of times tried to get a chunk page but failed.");
1704281494Sandrew
1705281494Sandrewstatic long pv_entry_frees, pv_entry_allocs, pv_entry_count;
1706281494Sandrewstatic int pv_entry_spare;
1707281494Sandrew
1708281494SandrewSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
1709281494Sandrew	"Current number of pv entry frees");
1710281494SandrewSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
1711281494Sandrew	"Current number of pv entry allocs");
1712281494SandrewSYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
1713281494Sandrew	"Current number of pv entries");
1714281494SandrewSYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
1715281494Sandrew	"Current number of spare pv entries");
1716281494Sandrew#endif
1717281494Sandrew#endif /* 0 */
1718281494Sandrew
1719281494Sandrew/*
1720281494Sandrew * We are in a serious low memory condition.  Resort to
1721281494Sandrew * drastic measures to free some pages so we can allocate
1722281494Sandrew * another pv entry chunk.
1723281494Sandrew *
1724281494Sandrew * Returns NULL if PV entries were reclaimed from the specified pmap.
1725281494Sandrew *
1726281494Sandrew * We do not, however, unmap 2mpages because subsequent accesses will
1727281494Sandrew * allocate per-page pv entries until repromotion occurs, thereby
1728281494Sandrew * exacerbating the shortage of free pv entries.
1729281494Sandrew */
1730281494Sandrewstatic vm_page_t
1731281494Sandrewreclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
1732281494Sandrew{
1733281494Sandrew
1734286073Semaste	panic("ARM64TODO: reclaim_pv_chunk");
1735281494Sandrew}
1736281494Sandrew
1737281494Sandrew/*
1738281494Sandrew * free the pv_entry back to the free list
1739281494Sandrew */
1740281494Sandrewstatic void
1741281494Sandrewfree_pv_entry(pmap_t pmap, pv_entry_t pv)
1742281494Sandrew{
1743281494Sandrew	struct pv_chunk *pc;
1744281494Sandrew	int idx, field, bit;
1745281494Sandrew
1746281494Sandrew	rw_assert(&pvh_global_lock, RA_LOCKED);
1747281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1748281494Sandrew	PV_STAT(atomic_add_long(&pv_entry_frees, 1));
1749281494Sandrew	PV_STAT(atomic_add_int(&pv_entry_spare, 1));
1750281494Sandrew	PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
1751281494Sandrew	pc = pv_to_chunk(pv);
1752281494Sandrew	idx = pv - &pc->pc_pventry[0];
1753281494Sandrew	field = idx / 64;
1754281494Sandrew	bit = idx % 64;
1755281494Sandrew	pc->pc_map[field] |= 1ul << bit;
1756281494Sandrew	if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
1757281494Sandrew	    pc->pc_map[2] != PC_FREE2) {
1758281494Sandrew		/* 98% of the time, pc is already at the head of the list. */
1759281494Sandrew		if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
1760281494Sandrew			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1761281494Sandrew			TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1762281494Sandrew		}
1763281494Sandrew		return;
1764281494Sandrew	}
1765281494Sandrew	TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1766281494Sandrew	free_pv_chunk(pc);
1767281494Sandrew}
1768281494Sandrew
1769281494Sandrewstatic void
1770281494Sandrewfree_pv_chunk(struct pv_chunk *pc)
1771281494Sandrew{
1772281494Sandrew	vm_page_t m;
1773281494Sandrew
1774281494Sandrew	mtx_lock(&pv_chunks_mutex);
1775281494Sandrew 	TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
1776281494Sandrew	mtx_unlock(&pv_chunks_mutex);
1777281494Sandrew	PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
1778281494Sandrew	PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
1779281494Sandrew	PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
1780281494Sandrew	/* entire chunk is free, return it */
1781281494Sandrew	m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
1782281494Sandrew	dump_drop_page(m->phys_addr);
1783288256Salc	vm_page_unwire(m, PQ_NONE);
1784281494Sandrew	vm_page_free(m);
1785281494Sandrew}
1786281494Sandrew
1787281494Sandrew/*
1788281494Sandrew * Returns a new PV entry, allocating a new PV chunk from the system when
1789281494Sandrew * needed.  If this PV chunk allocation fails and a PV list lock pointer was
1790281494Sandrew * given, a PV chunk is reclaimed from an arbitrary pmap.  Otherwise, NULL is
1791281494Sandrew * returned.
1792281494Sandrew *
1793281494Sandrew * The given PV list lock may be released.
1794281494Sandrew */
1795281494Sandrewstatic pv_entry_t
1796281494Sandrewget_pv_entry(pmap_t pmap, struct rwlock **lockp)
1797281494Sandrew{
1798281494Sandrew	int bit, field;
1799281494Sandrew	pv_entry_t pv;
1800281494Sandrew	struct pv_chunk *pc;
1801281494Sandrew	vm_page_t m;
1802281494Sandrew
1803281494Sandrew	rw_assert(&pvh_global_lock, RA_LOCKED);
1804281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1805281494Sandrew	PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
1806281494Sandrewretry:
1807281494Sandrew	pc = TAILQ_FIRST(&pmap->pm_pvchunk);
1808281494Sandrew	if (pc != NULL) {
1809281494Sandrew		for (field = 0; field < _NPCM; field++) {
1810281494Sandrew			if (pc->pc_map[field]) {
1811281494Sandrew				bit = ffsl(pc->pc_map[field]) - 1;
1812281494Sandrew				break;
1813281494Sandrew			}
1814281494Sandrew		}
1815281494Sandrew		if (field < _NPCM) {
1816281494Sandrew			pv = &pc->pc_pventry[field * 64 + bit];
1817281494Sandrew			pc->pc_map[field] &= ~(1ul << bit);
1818281494Sandrew			/* If this was the last item, move it to tail */
1819281494Sandrew			if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
1820281494Sandrew			    pc->pc_map[2] == 0) {
1821281494Sandrew				TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
1822281494Sandrew				TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
1823281494Sandrew				    pc_list);
1824281494Sandrew			}
1825281494Sandrew			PV_STAT(atomic_add_long(&pv_entry_count, 1));
1826281494Sandrew			PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
1827281494Sandrew			return (pv);
1828281494Sandrew		}
1829281494Sandrew	}
1830281494Sandrew	/* No free items, allocate another chunk */
1831281494Sandrew	m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
1832281494Sandrew	    VM_ALLOC_WIRED);
1833281494Sandrew	if (m == NULL) {
1834281494Sandrew		if (lockp == NULL) {
1835281494Sandrew			PV_STAT(pc_chunk_tryfail++);
1836281494Sandrew			return (NULL);
1837281494Sandrew		}
1838281494Sandrew		m = reclaim_pv_chunk(pmap, lockp);
1839281494Sandrew		if (m == NULL)
1840281494Sandrew			goto retry;
1841281494Sandrew	}
1842281494Sandrew	PV_STAT(atomic_add_int(&pc_chunk_count, 1));
1843281494Sandrew	PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
1844281494Sandrew	dump_add_page(m->phys_addr);
1845281494Sandrew	pc = (void *)PHYS_TO_DMAP(m->phys_addr);
1846281494Sandrew	pc->pc_pmap = pmap;
1847281494Sandrew	pc->pc_map[0] = PC_FREE0 & ~1ul;	/* preallocated bit 0 */
1848281494Sandrew	pc->pc_map[1] = PC_FREE1;
1849281494Sandrew	pc->pc_map[2] = PC_FREE2;
1850281494Sandrew	mtx_lock(&pv_chunks_mutex);
1851281494Sandrew	TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
1852281494Sandrew	mtx_unlock(&pv_chunks_mutex);
1853281494Sandrew	pv = &pc->pc_pventry[0];
1854281494Sandrew	TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
1855281494Sandrew	PV_STAT(atomic_add_long(&pv_entry_count, 1));
1856281494Sandrew	PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
1857281494Sandrew	return (pv);
1858281494Sandrew}
1859281494Sandrew
1860281494Sandrew/*
1861281494Sandrew * First find and then remove the pv entry for the specified pmap and virtual
1862281494Sandrew * address from the specified pv list.  Returns the pv entry if found and NULL
1863281494Sandrew * otherwise.  This operation can be performed on pv lists for either 4KB or
1864281494Sandrew * 2MB page mappings.
1865281494Sandrew */
1866281494Sandrewstatic __inline pv_entry_t
1867281494Sandrewpmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1868281494Sandrew{
1869281494Sandrew	pv_entry_t pv;
1870281494Sandrew
1871281494Sandrew	rw_assert(&pvh_global_lock, RA_LOCKED);
1872281494Sandrew	TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
1873281494Sandrew		if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
1874281494Sandrew			TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
1875281494Sandrew			pvh->pv_gen++;
1876281494Sandrew			break;
1877281494Sandrew		}
1878281494Sandrew	}
1879281494Sandrew	return (pv);
1880281494Sandrew}
1881281494Sandrew
1882281494Sandrew/*
1883281494Sandrew * First find and then destroy the pv entry for the specified pmap and virtual
1884281494Sandrew * address.  This operation can be performed on pv lists for either 4KB or 2MB
1885281494Sandrew * page mappings.
1886281494Sandrew */
1887281494Sandrewstatic void
1888281494Sandrewpmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
1889281494Sandrew{
1890281494Sandrew	pv_entry_t pv;
1891281494Sandrew
1892281494Sandrew	pv = pmap_pvh_remove(pvh, pmap, va);
1893281494Sandrew	KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
1894281494Sandrew	free_pv_entry(pmap, pv);
1895281494Sandrew}
1896281494Sandrew
1897281494Sandrew/*
1898281494Sandrew * Conditionally create the PV entry for a 4KB page mapping if the required
1899281494Sandrew * memory can be allocated without resorting to reclamation.
1900281494Sandrew */
1901281494Sandrewstatic boolean_t
1902281494Sandrewpmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
1903281494Sandrew    struct rwlock **lockp)
1904281494Sandrew{
1905281494Sandrew	pv_entry_t pv;
1906281494Sandrew
1907281494Sandrew	rw_assert(&pvh_global_lock, RA_LOCKED);
1908281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1909281494Sandrew	/* Pass NULL instead of the lock pointer to disable reclamation. */
1910281494Sandrew	if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
1911281494Sandrew		pv->pv_va = va;
1912281494Sandrew		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1913281494Sandrew		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
1914281494Sandrew		m->md.pv_gen++;
1915281494Sandrew		return (TRUE);
1916281494Sandrew	} else
1917281494Sandrew		return (FALSE);
1918281494Sandrew}
1919281494Sandrew
1920281494Sandrew/*
1921281494Sandrew * pmap_remove_l3: do the things to unmap a page in a process
1922281494Sandrew */
1923281494Sandrewstatic int
1924281494Sandrewpmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
1925281494Sandrew    pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
1926281494Sandrew{
1927281494Sandrew	pt_entry_t old_l3;
1928281494Sandrew	vm_page_t m;
1929281494Sandrew
1930281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
1931281494Sandrew	if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
1932281494Sandrew		cpu_dcache_wb_range(va, L3_SIZE);
1933281494Sandrew	old_l3 = pmap_load_clear(l3);
1934281494Sandrew	PTE_SYNC(l3);
1935285212Sandrew	pmap_invalidate_page(pmap, va);
1936281494Sandrew	if (old_l3 & ATTR_SW_WIRED)
1937281494Sandrew		pmap->pm_stats.wired_count -= 1;
1938281494Sandrew	pmap_resident_count_dec(pmap, 1);
1939281494Sandrew	if (old_l3 & ATTR_SW_MANAGED) {
1940281494Sandrew		m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
1941281494Sandrew		if (pmap_page_dirty(old_l3))
1942281494Sandrew			vm_page_dirty(m);
1943281494Sandrew		if (old_l3 & ATTR_AF)
1944281494Sandrew			vm_page_aflag_set(m, PGA_REFERENCED);
1945281494Sandrew		CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
1946281494Sandrew		pmap_pvh_free(&m->md, pmap, va);
1947281494Sandrew	}
1948281494Sandrew	return (pmap_unuse_l3(pmap, va, l2e, free));
1949281494Sandrew}
1950281494Sandrew
1951281494Sandrew/*
1952281494Sandrew *	Remove the given range of addresses from the specified map.
1953281494Sandrew *
1954281494Sandrew *	It is assumed that the start and end are properly
1955281494Sandrew *	rounded to the page size.
1956281494Sandrew */
1957281494Sandrewvoid
1958281494Sandrewpmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
1959281494Sandrew{
1960281494Sandrew	struct rwlock *lock;
1961281494Sandrew	vm_offset_t va, va_next;
1962297446Sandrew	pd_entry_t *l0, *l1, *l2;
1963281494Sandrew	pt_entry_t l3_paddr, *l3;
1964281494Sandrew	struct spglist free;
1965281494Sandrew	int anyvalid;
1966281494Sandrew
1967281494Sandrew	/*
1968281494Sandrew	 * Perform an unsynchronized read.  This is, however, safe.
1969281494Sandrew	 */
1970281494Sandrew	if (pmap->pm_stats.resident_count == 0)
1971281494Sandrew		return;
1972281494Sandrew
1973281494Sandrew	anyvalid = 0;
1974281494Sandrew	SLIST_INIT(&free);
1975281494Sandrew
1976281494Sandrew	rw_rlock(&pvh_global_lock);
1977281494Sandrew	PMAP_LOCK(pmap);
1978281494Sandrew
1979281494Sandrew	lock = NULL;
1980281494Sandrew	for (; sva < eva; sva = va_next) {
1981281494Sandrew
1982281494Sandrew		if (pmap->pm_stats.resident_count == 0)
1983281494Sandrew			break;
1984281494Sandrew
1985297446Sandrew		l0 = pmap_l0(pmap, sva);
1986297446Sandrew		if (pmap_load(l0) == 0) {
1987297446Sandrew			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
1988297446Sandrew			if (va_next < sva)
1989297446Sandrew				va_next = eva;
1990297446Sandrew			continue;
1991297446Sandrew		}
1992297446Sandrew
1993297446Sandrew		l1 = pmap_l0_to_l1(l0, sva);
1994285045Sandrew		if (pmap_load(l1) == 0) {
1995281494Sandrew			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
1996281494Sandrew			if (va_next < sva)
1997281494Sandrew				va_next = eva;
1998281494Sandrew			continue;
1999281494Sandrew		}
2000281494Sandrew
2001281494Sandrew		/*
2002281494Sandrew		 * Calculate index for next page table.
2003281494Sandrew		 */
2004281494Sandrew		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2005281494Sandrew		if (va_next < sva)
2006281494Sandrew			va_next = eva;
2007281494Sandrew
2008281494Sandrew		l2 = pmap_l1_to_l2(l1, sva);
2009281494Sandrew		if (l2 == NULL)
2010281494Sandrew			continue;
2011281494Sandrew
2012288445Sandrew		l3_paddr = pmap_load(l2);
2013281494Sandrew
2014281494Sandrew		/*
2015281494Sandrew		 * Weed out invalid mappings.
2016281494Sandrew		 */
2017281494Sandrew		if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
2018281494Sandrew			continue;
2019281494Sandrew
2020281494Sandrew		/*
2021281494Sandrew		 * Limit our scan to either the end of the va represented
2022281494Sandrew		 * by the current page table page, or to the end of the
2023281494Sandrew		 * range being removed.
2024281494Sandrew		 */
2025281494Sandrew		if (va_next > eva)
2026281494Sandrew			va_next = eva;
2027281494Sandrew
2028281494Sandrew		va = va_next;
2029281494Sandrew		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2030281494Sandrew		    sva += L3_SIZE) {
2031281494Sandrew			if (l3 == NULL)
2032281494Sandrew				panic("l3 == NULL");
2033285045Sandrew			if (pmap_load(l3) == 0) {
2034281494Sandrew				if (va != va_next) {
2035281494Sandrew					pmap_invalidate_range(pmap, va, sva);
2036281494Sandrew					va = va_next;
2037281494Sandrew				}
2038281494Sandrew				continue;
2039281494Sandrew			}
2040281494Sandrew			if (va == va_next)
2041281494Sandrew				va = sva;
2042281494Sandrew			if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free,
2043281494Sandrew			    &lock)) {
2044281494Sandrew				sva += L3_SIZE;
2045281494Sandrew				break;
2046281494Sandrew			}
2047281494Sandrew		}
2048281494Sandrew		if (va != va_next)
2049281494Sandrew			pmap_invalidate_range(pmap, va, sva);
2050281494Sandrew	}
2051281494Sandrew	if (lock != NULL)
2052281494Sandrew		rw_wunlock(lock);
2053281494Sandrew	if (anyvalid)
2054281494Sandrew		pmap_invalidate_all(pmap);
2055281494Sandrew	rw_runlock(&pvh_global_lock);
2056281494Sandrew	PMAP_UNLOCK(pmap);
2057281494Sandrew	pmap_free_zero_pages(&free);
2058281494Sandrew}
2059281494Sandrew
2060281494Sandrew/*
2061281494Sandrew *	Routine:	pmap_remove_all
2062281494Sandrew *	Function:
2063281494Sandrew *		Removes this physical page from
2064281494Sandrew *		all physical maps in which it resides.
2065281494Sandrew *		Reflects back modify bits to the pager.
2066281494Sandrew *
2067281494Sandrew *	Notes:
2068281494Sandrew *		Original versions of this routine were very
2069281494Sandrew *		inefficient because they iteratively called
2070281494Sandrew *		pmap_remove (slow...)
2071281494Sandrew */
2072281494Sandrew
2073281494Sandrewvoid
2074281494Sandrewpmap_remove_all(vm_page_t m)
2075281494Sandrew{
2076281494Sandrew	pv_entry_t pv;
2077281494Sandrew	pmap_t pmap;
2078297446Sandrew	pd_entry_t *pde, tpde;
2079297446Sandrew	pt_entry_t *pte, tpte;
2080281494Sandrew	struct spglist free;
2081297446Sandrew	int lvl;
2082281494Sandrew
2083281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2084281494Sandrew	    ("pmap_remove_all: page %p is not managed", m));
2085281494Sandrew	SLIST_INIT(&free);
2086281494Sandrew	rw_wlock(&pvh_global_lock);
2087281494Sandrew	while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
2088281494Sandrew		pmap = PV_PMAP(pv);
2089281494Sandrew		PMAP_LOCK(pmap);
2090281494Sandrew		pmap_resident_count_dec(pmap, 1);
2091297446Sandrew
2092297446Sandrew		pde = pmap_pde(pmap, pv->pv_va, &lvl);
2093297446Sandrew		KASSERT(pde != NULL,
2094297446Sandrew		    ("pmap_remove_all: no page directory entry found"));
2095297446Sandrew		KASSERT(lvl == 2,
2096297446Sandrew		    ("pmap_remove_all: invalid pde level %d", lvl));
2097297446Sandrew		tpde = pmap_load(pde);
2098297446Sandrew
2099297446Sandrew		pte = pmap_l2_to_l3(pde, pv->pv_va);
2100297446Sandrew		tpte = pmap_load(pte);
2101281494Sandrew		if (pmap_is_current(pmap) &&
2102297446Sandrew		    pmap_l3_valid_cacheable(tpte))
2103281494Sandrew			cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
2104297446Sandrew		pmap_load_clear(pte);
2105297446Sandrew		PTE_SYNC(pte);
2106285212Sandrew		pmap_invalidate_page(pmap, pv->pv_va);
2107297446Sandrew		if (tpte & ATTR_SW_WIRED)
2108281494Sandrew			pmap->pm_stats.wired_count--;
2109297446Sandrew		if ((tpte & ATTR_AF) != 0)
2110281494Sandrew			vm_page_aflag_set(m, PGA_REFERENCED);
2111281494Sandrew
2112281494Sandrew		/*
2113281494Sandrew		 * Update the vm_page_t clean and reference bits.
2114281494Sandrew		 */
2115297446Sandrew		if (pmap_page_dirty(tpte))
2116281494Sandrew			vm_page_dirty(m);
2117297446Sandrew		pmap_unuse_l3(pmap, pv->pv_va, tpde, &free);
2118281494Sandrew		TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2119281494Sandrew		m->md.pv_gen++;
2120281494Sandrew		free_pv_entry(pmap, pv);
2121281494Sandrew		PMAP_UNLOCK(pmap);
2122281494Sandrew	}
2123281494Sandrew	vm_page_aflag_clear(m, PGA_WRITEABLE);
2124281494Sandrew	rw_wunlock(&pvh_global_lock);
2125281494Sandrew	pmap_free_zero_pages(&free);
2126281494Sandrew}
2127281494Sandrew
2128281494Sandrew/*
2129281494Sandrew *	Set the physical protection on the
2130281494Sandrew *	specified range of this map as requested.
2131281494Sandrew */
2132281494Sandrewvoid
2133281494Sandrewpmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
2134281494Sandrew{
2135281494Sandrew	vm_offset_t va, va_next;
2136297446Sandrew	pd_entry_t *l0, *l1, *l2;
2137281494Sandrew	pt_entry_t *l3p, l3;
2138281494Sandrew
2139281494Sandrew	if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
2140281494Sandrew		pmap_remove(pmap, sva, eva);
2141281494Sandrew		return;
2142281494Sandrew	}
2143281494Sandrew
2144281494Sandrew	if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE)
2145281494Sandrew		return;
2146281494Sandrew
2147281494Sandrew	PMAP_LOCK(pmap);
2148281494Sandrew	for (; sva < eva; sva = va_next) {
2149281494Sandrew
2150297446Sandrew		l0 = pmap_l0(pmap, sva);
2151297446Sandrew		if (pmap_load(l0) == 0) {
2152297446Sandrew			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2153297446Sandrew			if (va_next < sva)
2154297446Sandrew				va_next = eva;
2155297446Sandrew			continue;
2156297446Sandrew		}
2157297446Sandrew
2158297446Sandrew		l1 = pmap_l0_to_l1(l0, sva);
2159285045Sandrew		if (pmap_load(l1) == 0) {
2160281494Sandrew			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2161281494Sandrew			if (va_next < sva)
2162281494Sandrew				va_next = eva;
2163281494Sandrew			continue;
2164281494Sandrew		}
2165281494Sandrew
2166281494Sandrew		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2167281494Sandrew		if (va_next < sva)
2168281494Sandrew			va_next = eva;
2169281494Sandrew
2170281494Sandrew		l2 = pmap_l1_to_l2(l1, sva);
2171288445Sandrew		if (l2 == NULL || (pmap_load(l2) & ATTR_DESCR_MASK) != L2_TABLE)
2172281494Sandrew			continue;
2173281494Sandrew
2174281494Sandrew		if (va_next > eva)
2175281494Sandrew			va_next = eva;
2176281494Sandrew
2177281494Sandrew		va = va_next;
2178281494Sandrew		for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
2179281494Sandrew		    sva += L3_SIZE) {
2180281494Sandrew			l3 = pmap_load(l3p);
2181281494Sandrew			if (pmap_l3_valid(l3)) {
2182281494Sandrew				pmap_set(l3p, ATTR_AP(ATTR_AP_RO));
2183281494Sandrew				PTE_SYNC(l3p);
2184285212Sandrew				/* XXX: Use pmap_invalidate_range */
2185285212Sandrew				pmap_invalidate_page(pmap, va);
2186281494Sandrew			}
2187281494Sandrew		}
2188281494Sandrew	}
2189281494Sandrew	PMAP_UNLOCK(pmap);
2190281494Sandrew
2191281494Sandrew	/* TODO: Only invalidate entries we are touching */
2192281494Sandrew	pmap_invalidate_all(pmap);
2193281494Sandrew}
2194281494Sandrew
2195281494Sandrew/*
2196281494Sandrew *	Insert the given physical page (p) at
2197281494Sandrew *	the specified virtual address (v) in the
2198281494Sandrew *	target physical map with the protection requested.
2199281494Sandrew *
2200281494Sandrew *	If specified, the page will be wired down, meaning
2201281494Sandrew *	that the related pte can not be reclaimed.
2202281494Sandrew *
2203281494Sandrew *	NB:  This is the only routine which MAY NOT lazy-evaluate
2204281494Sandrew *	or lose information.  That is, this routine must actually
2205281494Sandrew *	insert this page into the given map NOW.
2206281494Sandrew */
2207281494Sandrewint
2208281494Sandrewpmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
2209281494Sandrew    u_int flags, int8_t psind __unused)
2210281494Sandrew{
2211281494Sandrew	struct rwlock *lock;
2212297446Sandrew	pd_entry_t *pde;
2213281494Sandrew	pt_entry_t new_l3, orig_l3;
2214281494Sandrew	pt_entry_t *l3;
2215281494Sandrew	pv_entry_t pv;
2216297446Sandrew	vm_paddr_t opa, pa, l1_pa, l2_pa, l3_pa;
2217297446Sandrew	vm_page_t mpte, om, l1_m, l2_m, l3_m;
2218281494Sandrew	boolean_t nosleep;
2219297446Sandrew	int lvl;
2220281494Sandrew
2221281494Sandrew	va = trunc_page(va);
2222281494Sandrew	if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
2223281494Sandrew		VM_OBJECT_ASSERT_LOCKED(m->object);
2224281494Sandrew	pa = VM_PAGE_TO_PHYS(m);
2225285537Sandrew	new_l3 = (pt_entry_t)(pa | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
2226285537Sandrew	    L3_PAGE);
2227281494Sandrew	if ((prot & VM_PROT_WRITE) == 0)
2228281494Sandrew		new_l3 |= ATTR_AP(ATTR_AP_RO);
2229281494Sandrew	if ((flags & PMAP_ENTER_WIRED) != 0)
2230281494Sandrew		new_l3 |= ATTR_SW_WIRED;
2231281494Sandrew	if ((va >> 63) == 0)
2232281494Sandrew		new_l3 |= ATTR_AP(ATTR_AP_USER);
2233281494Sandrew
2234285212Sandrew	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
2235285212Sandrew
2236281494Sandrew	mpte = NULL;
2237281494Sandrew
2238281494Sandrew	lock = NULL;
2239281494Sandrew	rw_rlock(&pvh_global_lock);
2240281494Sandrew	PMAP_LOCK(pmap);
2241281494Sandrew
2242281494Sandrew	if (va < VM_MAXUSER_ADDRESS) {
2243281494Sandrew		nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
2244281494Sandrew		mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
2245281494Sandrew		if (mpte == NULL && nosleep) {
2246285212Sandrew			CTR0(KTR_PMAP, "pmap_enter: mpte == NULL");
2247281494Sandrew			if (lock != NULL)
2248281494Sandrew				rw_wunlock(lock);
2249281494Sandrew			rw_runlock(&pvh_global_lock);
2250281494Sandrew			PMAP_UNLOCK(pmap);
2251281494Sandrew			return (KERN_RESOURCE_SHORTAGE);
2252281494Sandrew		}
2253297446Sandrew		pde = pmap_pde(pmap, va, &lvl);
2254297446Sandrew		KASSERT(pde != NULL,
2255297446Sandrew		    ("pmap_enter: Invalid page entry, va: 0x%lx", va));
2256297446Sandrew		KASSERT(lvl == 2,
2257297446Sandrew		    ("pmap_enter: Invalid level %d", lvl));
2258297446Sandrew
2259297446Sandrew		l3 = pmap_l2_to_l3(pde, va);
2260281494Sandrew	} else {
2261297446Sandrew		pde = pmap_pde(pmap, va, &lvl);
2262297446Sandrew		/*
2263297446Sandrew		 * If we get a level 2 pde it must point to a level 3 entry
2264297446Sandrew		 * otherwise we will need to create the intermediate tables
2265297446Sandrew		 */
2266297446Sandrew		if (lvl < 2) {
2267297446Sandrew			switch(lvl) {
2268297446Sandrew			default:
2269297446Sandrew			case -1:
2270297446Sandrew				/* Get the l0 pde to update */
2271297446Sandrew				pde = pmap_l0(pmap, va);
2272297446Sandrew				KASSERT(pde != NULL, ("..."));
2273281494Sandrew
2274297446Sandrew				l1_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2275297446Sandrew				    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2276297446Sandrew				    VM_ALLOC_ZERO);
2277297446Sandrew				if (l1_m == NULL)
2278297446Sandrew					panic("pmap_enter: l1 pte_m == NULL");
2279297446Sandrew				if ((l1_m->flags & PG_ZERO) == 0)
2280297446Sandrew					pmap_zero_page(l1_m);
2281297446Sandrew
2282297446Sandrew				l1_pa = VM_PAGE_TO_PHYS(l1_m);
2283297446Sandrew				pmap_load_store(pde, l1_pa | L0_TABLE);
2284297446Sandrew				PTE_SYNC(pde);
2285297446Sandrew				/* FALLTHROUGH */
2286297446Sandrew			case 0:
2287297446Sandrew				/* Get the l1 pde to update */
2288297446Sandrew				pde = pmap_l1_to_l2(pde, va);
2289297446Sandrew				KASSERT(pde != NULL, ("..."));
2290297446Sandrew
2291281494Sandrew				l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2292281494Sandrew				    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2293281494Sandrew				    VM_ALLOC_ZERO);
2294281494Sandrew				if (l2_m == NULL)
2295281494Sandrew					panic("pmap_enter: l2 pte_m == NULL");
2296281494Sandrew				if ((l2_m->flags & PG_ZERO) == 0)
2297281494Sandrew					pmap_zero_page(l2_m);
2298281494Sandrew
2299281494Sandrew				l2_pa = VM_PAGE_TO_PHYS(l2_m);
2300297446Sandrew				pmap_load_store(pde, l2_pa | L1_TABLE);
2301297446Sandrew				PTE_SYNC(pde);
2302297446Sandrew				/* FALLTHROUGH */
2303297446Sandrew			case 1:
2304297446Sandrew				/* Get the l2 pde to update */
2305297446Sandrew				pde = pmap_l1_to_l2(pde, va);
2306281494Sandrew
2307297446Sandrew				l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
2308297446Sandrew				    VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
2309297446Sandrew				    VM_ALLOC_ZERO);
2310297446Sandrew				if (l3_m == NULL)
2311297446Sandrew					panic("pmap_enter: l3 pte_m == NULL");
2312297446Sandrew				if ((l3_m->flags & PG_ZERO) == 0)
2313297446Sandrew					pmap_zero_page(l3_m);
2314281494Sandrew
2315297446Sandrew				l3_pa = VM_PAGE_TO_PHYS(l3_m);
2316297446Sandrew				pmap_load_store(pde, l3_pa | L2_TABLE);
2317297446Sandrew				PTE_SYNC(pde);
2318297446Sandrew				break;
2319297446Sandrew			}
2320281494Sandrew		}
2321297446Sandrew		l3 = pmap_l2_to_l3(pde, va);
2322285212Sandrew		pmap_invalidate_page(pmap, va);
2323281494Sandrew	}
2324281494Sandrew
2325281494Sandrew	om = NULL;
2326281494Sandrew	orig_l3 = pmap_load(l3);
2327281494Sandrew	opa = orig_l3 & ~ATTR_MASK;
2328281494Sandrew
2329281494Sandrew	/*
2330281494Sandrew	 * Is the specified virtual address already mapped?
2331281494Sandrew	 */
2332281494Sandrew	if (pmap_l3_valid(orig_l3)) {
2333281494Sandrew		/*
2334281494Sandrew		 * Wiring change, just update stats. We don't worry about
2335281494Sandrew		 * wiring PT pages as they remain resident as long as there
2336281494Sandrew		 * are valid mappings in them. Hence, if a user page is wired,
2337281494Sandrew		 * the PT page will be also.
2338281494Sandrew		 */
2339281494Sandrew		if ((flags & PMAP_ENTER_WIRED) != 0 &&
2340281494Sandrew		    (orig_l3 & ATTR_SW_WIRED) == 0)
2341281494Sandrew			pmap->pm_stats.wired_count++;
2342281494Sandrew		else if ((flags & PMAP_ENTER_WIRED) == 0 &&
2343281494Sandrew		    (orig_l3 & ATTR_SW_WIRED) != 0)
2344281494Sandrew			pmap->pm_stats.wired_count--;
2345281494Sandrew
2346281494Sandrew		/*
2347281494Sandrew		 * Remove the extra PT page reference.
2348281494Sandrew		 */
2349281494Sandrew		if (mpte != NULL) {
2350281494Sandrew			mpte->wire_count--;
2351281494Sandrew			KASSERT(mpte->wire_count > 0,
2352281494Sandrew			    ("pmap_enter: missing reference to page table page,"
2353281494Sandrew			     " va: 0x%lx", va));
2354281494Sandrew		}
2355281494Sandrew
2356281494Sandrew		/*
2357281494Sandrew		 * Has the physical page changed?
2358281494Sandrew		 */
2359281494Sandrew		if (opa == pa) {
2360281494Sandrew			/*
2361281494Sandrew			 * No, might be a protection or wiring change.
2362281494Sandrew			 */
2363281494Sandrew			if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
2364281494Sandrew				new_l3 |= ATTR_SW_MANAGED;
2365281494Sandrew				if ((new_l3 & ATTR_AP(ATTR_AP_RW)) ==
2366281494Sandrew				    ATTR_AP(ATTR_AP_RW)) {
2367281494Sandrew					vm_page_aflag_set(m, PGA_WRITEABLE);
2368281494Sandrew				}
2369281494Sandrew			}
2370281494Sandrew			goto validate;
2371281494Sandrew		}
2372281494Sandrew
2373281494Sandrew		/* Flush the cache, there might be uncommitted data in it */
2374281494Sandrew		if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
2375281494Sandrew			cpu_dcache_wb_range(va, L3_SIZE);
2376281494Sandrew	} else {
2377281494Sandrew		/*
2378281494Sandrew		 * Increment the counters.
2379281494Sandrew		 */
2380281494Sandrew		if ((new_l3 & ATTR_SW_WIRED) != 0)
2381281494Sandrew			pmap->pm_stats.wired_count++;
2382281494Sandrew		pmap_resident_count_inc(pmap, 1);
2383281494Sandrew	}
2384281494Sandrew	/*
2385281494Sandrew	 * Enter on the PV list if part of our managed memory.
2386281494Sandrew	 */
2387281494Sandrew	if ((m->oflags & VPO_UNMANAGED) == 0) {
2388281494Sandrew		new_l3 |= ATTR_SW_MANAGED;
2389281494Sandrew		pv = get_pv_entry(pmap, &lock);
2390281494Sandrew		pv->pv_va = va;
2391281494Sandrew		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
2392281494Sandrew		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
2393281494Sandrew		m->md.pv_gen++;
2394281494Sandrew		if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
2395281494Sandrew			vm_page_aflag_set(m, PGA_WRITEABLE);
2396281494Sandrew	}
2397281494Sandrew
2398281494Sandrew	/*
2399281494Sandrew	 * Update the L3 entry.
2400281494Sandrew	 */
2401281494Sandrew	if (orig_l3 != 0) {
2402281494Sandrewvalidate:
2403281494Sandrew		orig_l3 = pmap_load_store(l3, new_l3);
2404281494Sandrew		PTE_SYNC(l3);
2405281494Sandrew		opa = orig_l3 & ~ATTR_MASK;
2406281494Sandrew
2407281494Sandrew		if (opa != pa) {
2408281494Sandrew			if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
2409281494Sandrew				om = PHYS_TO_VM_PAGE(opa);
2410281494Sandrew				if (pmap_page_dirty(orig_l3))
2411281494Sandrew					vm_page_dirty(om);
2412281494Sandrew				if ((orig_l3 & ATTR_AF) != 0)
2413281494Sandrew					vm_page_aflag_set(om, PGA_REFERENCED);
2414281494Sandrew				CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
2415281494Sandrew				pmap_pvh_free(&om->md, pmap, va);
2416281494Sandrew			}
2417281494Sandrew		} else if (pmap_page_dirty(orig_l3)) {
2418281494Sandrew			if ((orig_l3 & ATTR_SW_MANAGED) != 0)
2419281494Sandrew				vm_page_dirty(m);
2420281494Sandrew		}
2421281494Sandrew	} else {
2422281494Sandrew		pmap_load_store(l3, new_l3);
2423281494Sandrew		PTE_SYNC(l3);
2424281494Sandrew	}
2425285212Sandrew	pmap_invalidate_page(pmap, va);
2426281494Sandrew	if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
2427281494Sandrew	    cpu_icache_sync_range(va, PAGE_SIZE);
2428281494Sandrew
2429281494Sandrew	if (lock != NULL)
2430281494Sandrew		rw_wunlock(lock);
2431281494Sandrew	rw_runlock(&pvh_global_lock);
2432281494Sandrew	PMAP_UNLOCK(pmap);
2433281494Sandrew	return (KERN_SUCCESS);
2434281494Sandrew}
2435281494Sandrew
2436281494Sandrew/*
2437281494Sandrew * Maps a sequence of resident pages belonging to the same object.
2438281494Sandrew * The sequence begins with the given page m_start.  This page is
2439281494Sandrew * mapped at the given virtual address start.  Each subsequent page is
2440281494Sandrew * mapped at a virtual address that is offset from start by the same
2441281494Sandrew * amount as the page is offset from m_start within the object.  The
2442281494Sandrew * last page in the sequence is the page with the largest offset from
2443281494Sandrew * m_start that can be mapped at a virtual address less than the given
2444281494Sandrew * virtual address end.  Not every virtual page between start and end
2445281494Sandrew * is mapped; only those for which a resident page exists with the
2446281494Sandrew * corresponding offset from m_start are mapped.
2447281494Sandrew */
2448281494Sandrewvoid
2449281494Sandrewpmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
2450281494Sandrew    vm_page_t m_start, vm_prot_t prot)
2451281494Sandrew{
2452281494Sandrew	struct rwlock *lock;
2453281494Sandrew	vm_offset_t va;
2454281494Sandrew	vm_page_t m, mpte;
2455281494Sandrew	vm_pindex_t diff, psize;
2456281494Sandrew
2457281494Sandrew	VM_OBJECT_ASSERT_LOCKED(m_start->object);
2458281494Sandrew
2459281494Sandrew	psize = atop(end - start);
2460281494Sandrew	mpte = NULL;
2461281494Sandrew	m = m_start;
2462281494Sandrew	lock = NULL;
2463281494Sandrew	rw_rlock(&pvh_global_lock);
2464281494Sandrew	PMAP_LOCK(pmap);
2465281494Sandrew	while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
2466281494Sandrew		va = start + ptoa(diff);
2467281494Sandrew		mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock);
2468281494Sandrew		m = TAILQ_NEXT(m, listq);
2469281494Sandrew	}
2470281494Sandrew	if (lock != NULL)
2471281494Sandrew		rw_wunlock(lock);
2472281494Sandrew	rw_runlock(&pvh_global_lock);
2473281494Sandrew	PMAP_UNLOCK(pmap);
2474281494Sandrew}
2475281494Sandrew
2476281494Sandrew/*
2477281494Sandrew * this code makes some *MAJOR* assumptions:
2478281494Sandrew * 1. Current pmap & pmap exists.
2479281494Sandrew * 2. Not wired.
2480281494Sandrew * 3. Read access.
2481281494Sandrew * 4. No page table pages.
2482281494Sandrew * but is *MUCH* faster than pmap_enter...
2483281494Sandrew */
2484281494Sandrew
2485281494Sandrewvoid
2486281494Sandrewpmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
2487281494Sandrew{
2488281494Sandrew	struct rwlock *lock;
2489281494Sandrew
2490281494Sandrew	lock = NULL;
2491281494Sandrew	rw_rlock(&pvh_global_lock);
2492281494Sandrew	PMAP_LOCK(pmap);
2493281494Sandrew	(void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
2494281494Sandrew	if (lock != NULL)
2495281494Sandrew		rw_wunlock(lock);
2496281494Sandrew	rw_runlock(&pvh_global_lock);
2497281494Sandrew	PMAP_UNLOCK(pmap);
2498281494Sandrew}
2499281494Sandrew
2500281494Sandrewstatic vm_page_t
2501281494Sandrewpmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
2502281494Sandrew    vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
2503281494Sandrew{
2504281494Sandrew	struct spglist free;
2505297446Sandrew	pd_entry_t *pde;
2506281494Sandrew	pt_entry_t *l3;
2507281494Sandrew	vm_paddr_t pa;
2508297446Sandrew	int lvl;
2509281494Sandrew
2510281494Sandrew	KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
2511281494Sandrew	    (m->oflags & VPO_UNMANAGED) != 0,
2512281494Sandrew	    ("pmap_enter_quick_locked: managed mapping within the clean submap"));
2513281494Sandrew	rw_assert(&pvh_global_lock, RA_LOCKED);
2514281494Sandrew	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
2515281494Sandrew
2516285212Sandrew	CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va);
2517281494Sandrew	/*
2518281494Sandrew	 * In the case that a page table page is not
2519281494Sandrew	 * resident, we are creating it here.
2520281494Sandrew	 */
2521281494Sandrew	if (va < VM_MAXUSER_ADDRESS) {
2522281494Sandrew		vm_pindex_t l2pindex;
2523281494Sandrew
2524281494Sandrew		/*
2525281494Sandrew		 * Calculate pagetable page index
2526281494Sandrew		 */
2527281494Sandrew		l2pindex = pmap_l2_pindex(va);
2528281494Sandrew		if (mpte && (mpte->pindex == l2pindex)) {
2529281494Sandrew			mpte->wire_count++;
2530281494Sandrew		} else {
2531281494Sandrew			/*
2532281494Sandrew			 * Get the l2 entry
2533281494Sandrew			 */
2534297446Sandrew			pde = pmap_pde(pmap, va, &lvl);
2535281494Sandrew
2536281494Sandrew			/*
2537281494Sandrew			 * If the page table page is mapped, we just increment
2538281494Sandrew			 * the hold count, and activate it.  Otherwise, we
2539281494Sandrew			 * attempt to allocate a page table page.  If this
2540281494Sandrew			 * attempt fails, we don't retry.  Instead, we give up.
2541281494Sandrew			 */
2542297446Sandrew			if (lvl == 2 && pmap_load(pde) != 0) {
2543285045Sandrew				mpte =
2544297446Sandrew				    PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK);
2545281494Sandrew				mpte->wire_count++;
2546281494Sandrew			} else {
2547281494Sandrew				/*
2548281494Sandrew				 * Pass NULL instead of the PV list lock
2549281494Sandrew				 * pointer, because we don't intend to sleep.
2550281494Sandrew				 */
2551281494Sandrew				mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
2552281494Sandrew				if (mpte == NULL)
2553281494Sandrew					return (mpte);
2554281494Sandrew			}
2555281494Sandrew		}
2556281494Sandrew		l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
2557281494Sandrew		l3 = &l3[pmap_l3_index(va)];
2558281494Sandrew	} else {
2559281494Sandrew		mpte = NULL;
2560297446Sandrew		pde = pmap_pde(kernel_pmap, va, &lvl);
2561297446Sandrew		KASSERT(pde != NULL,
2562297446Sandrew		    ("pmap_enter_quick_locked: Invalid page entry, va: 0x%lx",
2563297446Sandrew		     va));
2564297446Sandrew		KASSERT(lvl == 2,
2565297446Sandrew		    ("pmap_enter_quick_locked: Invalid level %d", lvl));
2566297446Sandrew		l3 = pmap_l2_to_l3(pde, va);
2567281494Sandrew	}
2568297446Sandrew
2569285212Sandrew	if (pmap_load(l3) != 0) {
2570281494Sandrew		if (mpte != NULL) {
2571281494Sandrew			mpte->wire_count--;
2572281494Sandrew			mpte = NULL;
2573281494Sandrew		}
2574281494Sandrew		return (mpte);
2575281494Sandrew	}
2576281494Sandrew
2577281494Sandrew	/*
2578281494Sandrew	 * Enter on the PV list if part of our managed memory.
2579281494Sandrew	 */
2580281494Sandrew	if ((m->oflags & VPO_UNMANAGED) == 0 &&
2581281494Sandrew	    !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
2582281494Sandrew		if (mpte != NULL) {
2583281494Sandrew			SLIST_INIT(&free);
2584281494Sandrew			if (pmap_unwire_l3(pmap, va, mpte, &free)) {
2585281494Sandrew				pmap_invalidate_page(pmap, va);
2586281494Sandrew				pmap_free_zero_pages(&free);
2587281494Sandrew			}
2588281494Sandrew			mpte = NULL;
2589281494Sandrew		}
2590281494Sandrew		return (mpte);
2591281494Sandrew	}
2592281494Sandrew
2593281494Sandrew	/*
2594281494Sandrew	 * Increment counters
2595281494Sandrew	 */
2596281494Sandrew	pmap_resident_count_inc(pmap, 1);
2597281494Sandrew
2598285537Sandrew	pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_IDX(m->md.pv_memattr) |
2599281494Sandrew	    ATTR_AP(ATTR_AP_RW) | L3_PAGE;
2600281494Sandrew
2601281494Sandrew	/*
2602281494Sandrew	 * Now validate mapping with RO protection
2603281494Sandrew	 */
2604281494Sandrew	if ((m->oflags & VPO_UNMANAGED) == 0)
2605281494Sandrew		pa |= ATTR_SW_MANAGED;
2606281494Sandrew	pmap_load_store(l3, pa);
2607281494Sandrew	PTE_SYNC(l3);
2608281494Sandrew	pmap_invalidate_page(pmap, va);
2609281494Sandrew	return (mpte);
2610281494Sandrew}
2611281494Sandrew
2612281494Sandrew/*
2613281494Sandrew * This code maps large physical mmap regions into the
2614281494Sandrew * processor address space.  Note that some shortcuts
2615281494Sandrew * are taken, but the code works.
2616281494Sandrew */
2617281494Sandrewvoid
2618281494Sandrewpmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
2619281494Sandrew    vm_pindex_t pindex, vm_size_t size)
2620281494Sandrew{
2621281494Sandrew
2622281846Sandrew	VM_OBJECT_ASSERT_WLOCKED(object);
2623281846Sandrew	KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG,
2624281846Sandrew	    ("pmap_object_init_pt: non-device object"));
2625281494Sandrew}
2626281494Sandrew
2627281494Sandrew/*
2628281494Sandrew *	Clear the wired attribute from the mappings for the specified range of
2629281494Sandrew *	addresses in the given pmap.  Every valid mapping within that range
2630281494Sandrew *	must have the wired attribute set.  In contrast, invalid mappings
2631281494Sandrew *	cannot have the wired attribute set, so they are ignored.
2632281494Sandrew *
2633281494Sandrew *	The wired attribute of the page table entry is not a hardware feature,
2634281494Sandrew *	so there is no need to invalidate any TLB entries.
2635281494Sandrew */
2636281494Sandrewvoid
2637281494Sandrewpmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
2638281494Sandrew{
2639281494Sandrew	vm_offset_t va_next;
2640297446Sandrew	pd_entry_t *l0, *l1, *l2;
2641281494Sandrew	pt_entry_t *l3;
2642281494Sandrew	boolean_t pv_lists_locked;
2643281494Sandrew
2644281494Sandrew	pv_lists_locked = FALSE;
2645281494Sandrew	PMAP_LOCK(pmap);
2646281494Sandrew	for (; sva < eva; sva = va_next) {
2647297446Sandrew		l0 = pmap_l0(pmap, sva);
2648297446Sandrew		if (pmap_load(l0) == 0) {
2649297446Sandrew			va_next = (sva + L0_SIZE) & ~L0_OFFSET;
2650297446Sandrew			if (va_next < sva)
2651297446Sandrew				va_next = eva;
2652297446Sandrew			continue;
2653297446Sandrew		}
2654297446Sandrew
2655297446Sandrew		l1 = pmap_l0_to_l1(l0, sva);
2656285045Sandrew		if (pmap_load(l1) == 0) {
2657281494Sandrew			va_next = (sva + L1_SIZE) & ~L1_OFFSET;
2658281494Sandrew			if (va_next < sva)
2659281494Sandrew				va_next = eva;
2660281494Sandrew			continue;
2661281494Sandrew		}
2662281494Sandrew
2663281494Sandrew		va_next = (sva + L2_SIZE) & ~L2_OFFSET;
2664281494Sandrew		if (va_next < sva)
2665281494Sandrew			va_next = eva;
2666281494Sandrew
2667281494Sandrew		l2 = pmap_l1_to_l2(l1, sva);
2668285045Sandrew		if (pmap_load(l2) == 0)
2669281494Sandrew			continue;
2670281494Sandrew
2671281494Sandrew		if (va_next > eva)
2672281494Sandrew			va_next = eva;
2673281494Sandrew		for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
2674281494Sandrew		    sva += L3_SIZE) {
2675285045Sandrew			if (pmap_load(l3) == 0)
2676281494Sandrew				continue;
2677285045Sandrew			if ((pmap_load(l3) & ATTR_SW_WIRED) == 0)
2678281494Sandrew				panic("pmap_unwire: l3 %#jx is missing "
2679288445Sandrew				    "ATTR_SW_WIRED", (uintmax_t)pmap_load(l3));
2680281494Sandrew
2681281494Sandrew			/*
2682281494Sandrew			 * PG_W must be cleared atomically.  Although the pmap
2683281494Sandrew			 * lock synchronizes access to PG_W, another processor
2684281494Sandrew			 * could be setting PG_M and/or PG_A concurrently.
2685281494Sandrew			 */
2686281494Sandrew			atomic_clear_long(l3, ATTR_SW_WIRED);
2687281494Sandrew			pmap->pm_stats.wired_count--;
2688281494Sandrew		}
2689281494Sandrew	}
2690281494Sandrew	if (pv_lists_locked)
2691281494Sandrew		rw_runlock(&pvh_global_lock);
2692281494Sandrew	PMAP_UNLOCK(pmap);
2693281494Sandrew}
2694281494Sandrew
2695281494Sandrew/*
2696281494Sandrew *	Copy the range specified by src_addr/len
2697281494Sandrew *	from the source map to the range dst_addr/len
2698281494Sandrew *	in the destination map.
2699281494Sandrew *
2700281494Sandrew *	This routine is only advisory and need not do anything.
2701281494Sandrew */
2702281494Sandrew
2703281494Sandrewvoid
2704281494Sandrewpmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
2705281494Sandrew    vm_offset_t src_addr)
2706281494Sandrew{
2707281494Sandrew}
2708281494Sandrew
2709281494Sandrew/*
2710281494Sandrew *	pmap_zero_page zeros the specified hardware page by mapping
2711281494Sandrew *	the page into KVM and using bzero to clear its contents.
2712281494Sandrew */
2713281494Sandrewvoid
2714281494Sandrewpmap_zero_page(vm_page_t m)
2715281494Sandrew{
2716281494Sandrew	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2717281494Sandrew
2718281494Sandrew	pagezero((void *)va);
2719281494Sandrew}
2720281494Sandrew
2721281494Sandrew/*
2722281494Sandrew *	pmap_zero_page_area zeros the specified hardware page by mapping
2723281494Sandrew *	the page into KVM and using bzero to clear its contents.
2724281494Sandrew *
2725281494Sandrew *	off and size may not cover an area beyond a single hardware page.
2726281494Sandrew */
2727281494Sandrewvoid
2728281494Sandrewpmap_zero_page_area(vm_page_t m, int off, int size)
2729281494Sandrew{
2730281494Sandrew	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2731281494Sandrew
2732281494Sandrew	if (off == 0 && size == PAGE_SIZE)
2733281494Sandrew		pagezero((void *)va);
2734281494Sandrew	else
2735281494Sandrew		bzero((char *)va + off, size);
2736281494Sandrew}
2737281494Sandrew
2738281494Sandrew/*
2739281494Sandrew *	pmap_zero_page_idle zeros the specified hardware page by mapping
2740281494Sandrew *	the page into KVM and using bzero to clear its contents.  This
2741281494Sandrew *	is intended to be called from the vm_pagezero process only and
2742281494Sandrew *	outside of Giant.
2743281494Sandrew */
2744281494Sandrewvoid
2745281494Sandrewpmap_zero_page_idle(vm_page_t m)
2746281494Sandrew{
2747281494Sandrew	vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
2748281494Sandrew
2749281494Sandrew	pagezero((void *)va);
2750281494Sandrew}
2751281494Sandrew
2752281494Sandrew/*
2753281494Sandrew *	pmap_copy_page copies the specified (machine independent)
2754281494Sandrew *	page by mapping the page into virtual memory and using
2755281494Sandrew *	bcopy to copy the page, one machine dependent page at a
2756281494Sandrew *	time.
2757281494Sandrew */
2758281494Sandrewvoid
2759281494Sandrewpmap_copy_page(vm_page_t msrc, vm_page_t mdst)
2760281494Sandrew{
2761281494Sandrew	vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
2762281494Sandrew	vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
2763281494Sandrew
2764281494Sandrew	pagecopy((void *)src, (void *)dst);
2765281494Sandrew}
2766281494Sandrew
2767281494Sandrewint unmapped_buf_allowed = 1;
2768281494Sandrew
2769281494Sandrewvoid
2770281494Sandrewpmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
2771281494Sandrew    vm_offset_t b_offset, int xfersize)
2772281494Sandrew{
2773281494Sandrew	void *a_cp, *b_cp;
2774281494Sandrew	vm_page_t m_a, m_b;
2775281494Sandrew	vm_paddr_t p_a, p_b;
2776281494Sandrew	vm_offset_t a_pg_offset, b_pg_offset;
2777281494Sandrew	int cnt;
2778281494Sandrew
2779281494Sandrew	while (xfersize > 0) {
2780281494Sandrew		a_pg_offset = a_offset & PAGE_MASK;
2781281494Sandrew		m_a = ma[a_offset >> PAGE_SHIFT];
2782281494Sandrew		p_a = m_a->phys_addr;
2783281494Sandrew		b_pg_offset = b_offset & PAGE_MASK;
2784281494Sandrew		m_b = mb[b_offset >> PAGE_SHIFT];
2785281494Sandrew		p_b = m_b->phys_addr;
2786281494Sandrew		cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
2787281494Sandrew		cnt = min(cnt, PAGE_SIZE - b_pg_offset);
2788281494Sandrew		if (__predict_false(!PHYS_IN_DMAP(p_a))) {
2789281494Sandrew			panic("!DMAP a %lx", p_a);
2790281494Sandrew		} else {
2791281494Sandrew			a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
2792281494Sandrew		}
2793281494Sandrew		if (__predict_false(!PHYS_IN_DMAP(p_b))) {
2794281494Sandrew			panic("!DMAP b %lx", p_b);
2795281494Sandrew		} else {
2796281494Sandrew			b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
2797281494Sandrew		}
2798281494Sandrew		bcopy(a_cp, b_cp, cnt);
2799281494Sandrew		a_offset += cnt;
2800281494Sandrew		b_offset += cnt;
2801281494Sandrew		xfersize -= cnt;
2802281494Sandrew	}
2803281494Sandrew}
2804281494Sandrew
2805286296Sjahvm_offset_t
2806286296Sjahpmap_quick_enter_page(vm_page_t m)
2807286296Sjah{
2808286296Sjah
2809286296Sjah	return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
2810286296Sjah}
2811286296Sjah
2812286296Sjahvoid
2813286296Sjahpmap_quick_remove_page(vm_offset_t addr)
2814286296Sjah{
2815286296Sjah}
2816286296Sjah
2817281494Sandrew/*
2818281494Sandrew * Returns true if the pmap's pv is one of the first
2819281494Sandrew * 16 pvs linked to from this page.  This count may
2820281494Sandrew * be changed upwards or downwards in the future; it
2821281494Sandrew * is only necessary that true be returned for a small
2822281494Sandrew * subset of pmaps for proper page aging.
2823281494Sandrew */
2824281494Sandrewboolean_t
2825281494Sandrewpmap_page_exists_quick(pmap_t pmap, vm_page_t m)
2826281494Sandrew{
2827281494Sandrew	struct rwlock *lock;
2828281494Sandrew	pv_entry_t pv;
2829281494Sandrew	int loops = 0;
2830281494Sandrew	boolean_t rv;
2831281494Sandrew
2832281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
2833281494Sandrew	    ("pmap_page_exists_quick: page %p is not managed", m));
2834281494Sandrew	rv = FALSE;
2835281494Sandrew	rw_rlock(&pvh_global_lock);
2836281494Sandrew	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2837281494Sandrew	rw_rlock(lock);
2838281494Sandrew	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
2839281494Sandrew		if (PV_PMAP(pv) == pmap) {
2840281494Sandrew			rv = TRUE;
2841281494Sandrew			break;
2842281494Sandrew		}
2843281494Sandrew		loops++;
2844281494Sandrew		if (loops >= 16)
2845281494Sandrew			break;
2846281494Sandrew	}
2847281494Sandrew	rw_runlock(lock);
2848281494Sandrew	rw_runlock(&pvh_global_lock);
2849281494Sandrew	return (rv);
2850281494Sandrew}
2851281494Sandrew
2852281494Sandrew/*
2853281494Sandrew *	pmap_page_wired_mappings:
2854281494Sandrew *
2855281494Sandrew *	Return the number of managed mappings to the given physical page
2856281494Sandrew *	that are wired.
2857281494Sandrew */
2858281494Sandrewint
2859281494Sandrewpmap_page_wired_mappings(vm_page_t m)
2860281494Sandrew{
2861281494Sandrew	struct rwlock *lock;
2862281494Sandrew	pmap_t pmap;
2863297446Sandrew	pt_entry_t *pte;
2864281494Sandrew	pv_entry_t pv;
2865297446Sandrew	int count, lvl, md_gen;
2866281494Sandrew
2867281494Sandrew	if ((m->oflags & VPO_UNMANAGED) != 0)
2868281494Sandrew		return (0);
2869281494Sandrew	rw_rlock(&pvh_global_lock);
2870281494Sandrew	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
2871281494Sandrew	rw_rlock(lock);
2872281494Sandrewrestart:
2873281494Sandrew	count = 0;
2874281494Sandrew	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
2875281494Sandrew		pmap = PV_PMAP(pv);
2876281494Sandrew		if (!PMAP_TRYLOCK(pmap)) {
2877281494Sandrew			md_gen = m->md.pv_gen;
2878281494Sandrew			rw_runlock(lock);
2879281494Sandrew			PMAP_LOCK(pmap);
2880281494Sandrew			rw_rlock(lock);
2881281494Sandrew			if (md_gen != m->md.pv_gen) {
2882281494Sandrew				PMAP_UNLOCK(pmap);
2883281494Sandrew				goto restart;
2884281494Sandrew			}
2885281494Sandrew		}
2886297446Sandrew		pte = pmap_pte(pmap, pv->pv_va, &lvl);
2887297446Sandrew		if (pte != NULL && (pmap_load(pte) & ATTR_SW_WIRED) != 0)
2888281494Sandrew			count++;
2889281494Sandrew		PMAP_UNLOCK(pmap);
2890281494Sandrew	}
2891281494Sandrew	rw_runlock(lock);
2892281494Sandrew	rw_runlock(&pvh_global_lock);
2893281494Sandrew	return (count);
2894281494Sandrew}
2895281494Sandrew
2896281494Sandrew/*
2897281494Sandrew * Destroy all managed, non-wired mappings in the given user-space
2898281494Sandrew * pmap.  This pmap cannot be active on any processor besides the
2899281494Sandrew * caller.
2900281494Sandrew *
2901281494Sandrew * This function cannot be applied to the kernel pmap.  Moreover, it
2902281494Sandrew * is not intended for general use.  It is only to be used during
2903281494Sandrew * process termination.  Consequently, it can be implemented in ways
2904281494Sandrew * that make it faster than pmap_remove().  First, it can more quickly
2905281494Sandrew * destroy mappings by iterating over the pmap's collection of PV
2906281494Sandrew * entries, rather than searching the page table.  Second, it doesn't
2907281494Sandrew * have to test and clear the page table entries atomically, because
2908281494Sandrew * no processor is currently accessing the user address space.  In
2909281494Sandrew * particular, a page table entry's dirty bit won't change state once
2910281494Sandrew * this function starts.
2911281494Sandrew */
2912281494Sandrewvoid
2913281494Sandrewpmap_remove_pages(pmap_t pmap)
2914281494Sandrew{
2915297446Sandrew	pd_entry_t *pde;
2916297446Sandrew	pt_entry_t *pte, tpte;
2917281494Sandrew	struct spglist free;
2918281494Sandrew	vm_page_t m;
2919281494Sandrew	pv_entry_t pv;
2920281494Sandrew	struct pv_chunk *pc, *npc;
2921281494Sandrew	struct rwlock *lock;
2922281494Sandrew	int64_t bit;
2923281494Sandrew	uint64_t inuse, bitmask;
2924297446Sandrew	int allfree, field, freed, idx, lvl;
2925281494Sandrew	vm_paddr_t pa;
2926281494Sandrew
2927281494Sandrew	lock = NULL;
2928281494Sandrew
2929281494Sandrew	SLIST_INIT(&free);
2930281494Sandrew	rw_rlock(&pvh_global_lock);
2931281494Sandrew	PMAP_LOCK(pmap);
2932281494Sandrew	TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
2933281494Sandrew		allfree = 1;
2934281494Sandrew		freed = 0;
2935281494Sandrew		for (field = 0; field < _NPCM; field++) {
2936281494Sandrew			inuse = ~pc->pc_map[field] & pc_freemask[field];
2937281494Sandrew			while (inuse != 0) {
2938281494Sandrew				bit = ffsl(inuse) - 1;
2939281494Sandrew				bitmask = 1UL << bit;
2940281494Sandrew				idx = field * 64 + bit;
2941281494Sandrew				pv = &pc->pc_pventry[idx];
2942281494Sandrew				inuse &= ~bitmask;
2943281494Sandrew
2944297446Sandrew				pde = pmap_pde(pmap, pv->pv_va, &lvl);
2945297446Sandrew				KASSERT(pde != NULL,
2946297446Sandrew				    ("Attempting to remove an unmapped page"));
2947297446Sandrew				KASSERT(lvl == 2,
2948297446Sandrew				    ("Invalid page directory level: %d", lvl));
2949281494Sandrew
2950297446Sandrew				pte = pmap_l2_to_l3(pde, pv->pv_va);
2951297446Sandrew				KASSERT(pte != NULL,
2952297446Sandrew				    ("Attempting to remove an unmapped page"));
2953297446Sandrew
2954297446Sandrew				tpte = pmap_load(pte);
2955297446Sandrew
2956281494Sandrew/*
2957281494Sandrew * We cannot remove wired pages from a process' mapping at this time
2958281494Sandrew */
2959297446Sandrew				if (tpte & ATTR_SW_WIRED) {
2960281494Sandrew					allfree = 0;
2961281494Sandrew					continue;
2962281494Sandrew				}
2963281494Sandrew
2964297446Sandrew				pa = tpte & ~ATTR_MASK;
2965281494Sandrew
2966281494Sandrew				m = PHYS_TO_VM_PAGE(pa);
2967281494Sandrew				KASSERT(m->phys_addr == pa,
2968281494Sandrew				    ("vm_page_t %p phys_addr mismatch %016jx %016jx",
2969281494Sandrew				    m, (uintmax_t)m->phys_addr,
2970297446Sandrew				    (uintmax_t)tpte));
2971281494Sandrew
2972281494Sandrew				KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
2973281494Sandrew				    m < &vm_page_array[vm_page_array_size],
2974297446Sandrew				    ("pmap_remove_pages: bad pte %#jx",
2975297446Sandrew				    (uintmax_t)tpte));
2976281494Sandrew
2977297446Sandrew				/* XXX: assumes tpte is level 3 */
2978281494Sandrew				if (pmap_is_current(pmap) &&
2979297446Sandrew				    pmap_l3_valid_cacheable(tpte))
2980281494Sandrew					cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
2981297446Sandrew				pmap_load_clear(pte);
2982297446Sandrew				PTE_SYNC(pte);
2983285212Sandrew				pmap_invalidate_page(pmap, pv->pv_va);
2984281494Sandrew
2985281494Sandrew				/*
2986281494Sandrew				 * Update the vm_page_t clean/reference bits.
2987281494Sandrew				 */
2988297446Sandrew				if ((tpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
2989281494Sandrew					vm_page_dirty(m);
2990281494Sandrew
2991281494Sandrew				CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
2992281494Sandrew
2993281494Sandrew				/* Mark free */
2994281494Sandrew				pc->pc_map[field] |= bitmask;
2995281494Sandrew
2996281494Sandrew				pmap_resident_count_dec(pmap, 1);
2997281494Sandrew				TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
2998281494Sandrew				m->md.pv_gen++;
2999281494Sandrew
3000297446Sandrew				pmap_unuse_l3(pmap, pv->pv_va, pmap_load(pde),
3001297446Sandrew				    &free);
3002281494Sandrew				freed++;
3003281494Sandrew			}
3004281494Sandrew		}
3005281494Sandrew		PV_STAT(atomic_add_long(&pv_entry_frees, freed));
3006281494Sandrew		PV_STAT(atomic_add_int(&pv_entry_spare, freed));
3007281494Sandrew		PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
3008281494Sandrew		if (allfree) {
3009281494Sandrew			TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
3010281494Sandrew			free_pv_chunk(pc);
3011281494Sandrew		}
3012281494Sandrew	}
3013281494Sandrew	pmap_invalidate_all(pmap);
3014281494Sandrew	if (lock != NULL)
3015281494Sandrew		rw_wunlock(lock);
3016281494Sandrew	rw_runlock(&pvh_global_lock);
3017281494Sandrew	PMAP_UNLOCK(pmap);
3018281494Sandrew	pmap_free_zero_pages(&free);
3019281494Sandrew}
3020281494Sandrew
3021281494Sandrew/*
3022281494Sandrew * This is used to check if a page has been accessed or modified. As we
3023281494Sandrew * don't have a bit to see if it has been modified we have to assume it
3024281494Sandrew * has been if the page is read/write.
3025281494Sandrew */
3026281494Sandrewstatic boolean_t
3027281494Sandrewpmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
3028281494Sandrew{
3029281494Sandrew	struct rwlock *lock;
3030281494Sandrew	pv_entry_t pv;
3031297446Sandrew	pt_entry_t *pte, mask, value;
3032281494Sandrew	pmap_t pmap;
3033297446Sandrew	int lvl, md_gen;
3034281494Sandrew	boolean_t rv;
3035281494Sandrew
3036281494Sandrew	rv = FALSE;
3037281494Sandrew	rw_rlock(&pvh_global_lock);
3038281494Sandrew	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3039281494Sandrew	rw_rlock(lock);
3040281494Sandrewrestart:
3041281494Sandrew	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3042281494Sandrew		pmap = PV_PMAP(pv);
3043281494Sandrew		if (!PMAP_TRYLOCK(pmap)) {
3044281494Sandrew			md_gen = m->md.pv_gen;
3045281494Sandrew			rw_runlock(lock);
3046281494Sandrew			PMAP_LOCK(pmap);
3047281494Sandrew			rw_rlock(lock);
3048281494Sandrew			if (md_gen != m->md.pv_gen) {
3049281494Sandrew				PMAP_UNLOCK(pmap);
3050281494Sandrew				goto restart;
3051281494Sandrew			}
3052281494Sandrew		}
3053297446Sandrew		pte = pmap_pte(pmap, pv->pv_va, &lvl);
3054297446Sandrew		KASSERT(lvl == 3,
3055297446Sandrew		    ("pmap_page_test_mappings: Invalid level %d", lvl));
3056281494Sandrew		mask = 0;
3057281494Sandrew		value = 0;
3058281494Sandrew		if (modified) {
3059281494Sandrew			mask |= ATTR_AP_RW_BIT;
3060281494Sandrew			value |= ATTR_AP(ATTR_AP_RW);
3061281494Sandrew		}
3062281494Sandrew		if (accessed) {
3063281494Sandrew			mask |= ATTR_AF | ATTR_DESCR_MASK;
3064281494Sandrew			value |= ATTR_AF | L3_PAGE;
3065281494Sandrew		}
3066297446Sandrew		rv = (pmap_load(pte) & mask) == value;
3067281494Sandrew		PMAP_UNLOCK(pmap);
3068281494Sandrew		if (rv)
3069281494Sandrew			goto out;
3070281494Sandrew	}
3071281494Sandrewout:
3072281494Sandrew	rw_runlock(lock);
3073281494Sandrew	rw_runlock(&pvh_global_lock);
3074281494Sandrew	return (rv);
3075281494Sandrew}
3076281494Sandrew
3077281494Sandrew/*
3078281494Sandrew *	pmap_is_modified:
3079281494Sandrew *
3080281494Sandrew *	Return whether or not the specified physical page was modified
3081281494Sandrew *	in any physical maps.
3082281494Sandrew */
3083281494Sandrewboolean_t
3084281494Sandrewpmap_is_modified(vm_page_t m)
3085281494Sandrew{
3086281494Sandrew
3087281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3088281494Sandrew	    ("pmap_is_modified: page %p is not managed", m));
3089281494Sandrew
3090281494Sandrew	/*
3091281494Sandrew	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3092281494Sandrew	 * concurrently set while the object is locked.  Thus, if PGA_WRITEABLE
3093281494Sandrew	 * is clear, no PTEs can have PG_M set.
3094281494Sandrew	 */
3095281494Sandrew	VM_OBJECT_ASSERT_WLOCKED(m->object);
3096281494Sandrew	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3097281494Sandrew		return (FALSE);
3098281494Sandrew	return (pmap_page_test_mappings(m, FALSE, TRUE));
3099281494Sandrew}
3100281494Sandrew
3101281494Sandrew/*
3102281494Sandrew *	pmap_is_prefaultable:
3103281494Sandrew *
3104281494Sandrew *	Return whether or not the specified virtual address is eligible
3105281494Sandrew *	for prefault.
3106281494Sandrew */
3107281494Sandrewboolean_t
3108281494Sandrewpmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
3109281494Sandrew{
3110297446Sandrew	pt_entry_t *pte;
3111281494Sandrew	boolean_t rv;
3112297446Sandrew	int lvl;
3113281494Sandrew
3114281494Sandrew	rv = FALSE;
3115281494Sandrew	PMAP_LOCK(pmap);
3116297446Sandrew	pte = pmap_pte(pmap, addr, &lvl);
3117297446Sandrew	if (pte != NULL && pmap_load(pte) != 0) {
3118281494Sandrew		rv = TRUE;
3119281494Sandrew	}
3120281494Sandrew	PMAP_UNLOCK(pmap);
3121281494Sandrew	return (rv);
3122281494Sandrew}
3123281494Sandrew
3124281494Sandrew/*
3125281494Sandrew *	pmap_is_referenced:
3126281494Sandrew *
3127281494Sandrew *	Return whether or not the specified physical page was referenced
3128281494Sandrew *	in any physical maps.
3129281494Sandrew */
3130281494Sandrewboolean_t
3131281494Sandrewpmap_is_referenced(vm_page_t m)
3132281494Sandrew{
3133281494Sandrew
3134281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3135281494Sandrew	    ("pmap_is_referenced: page %p is not managed", m));
3136281494Sandrew	return (pmap_page_test_mappings(m, TRUE, FALSE));
3137281494Sandrew}
3138281494Sandrew
3139281494Sandrew/*
3140281494Sandrew * Clear the write and modified bits in each of the given page's mappings.
3141281494Sandrew */
3142281494Sandrewvoid
3143281494Sandrewpmap_remove_write(vm_page_t m)
3144281494Sandrew{
3145281494Sandrew	pmap_t pmap;
3146281494Sandrew	struct rwlock *lock;
3147281494Sandrew	pv_entry_t pv;
3148297446Sandrew	pt_entry_t oldpte, *pte;
3149297446Sandrew	int lvl, md_gen;
3150281494Sandrew
3151281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3152281494Sandrew	    ("pmap_remove_write: page %p is not managed", m));
3153281494Sandrew
3154281494Sandrew	/*
3155281494Sandrew	 * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
3156281494Sandrew	 * set by another thread while the object is locked.  Thus,
3157281494Sandrew	 * if PGA_WRITEABLE is clear, no page table entries need updating.
3158281494Sandrew	 */
3159281494Sandrew	VM_OBJECT_ASSERT_WLOCKED(m->object);
3160281494Sandrew	if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
3161281494Sandrew		return;
3162281494Sandrew	rw_rlock(&pvh_global_lock);
3163281494Sandrew	lock = VM_PAGE_TO_PV_LIST_LOCK(m);
3164281494Sandrewretry_pv_loop:
3165281494Sandrew	rw_wlock(lock);
3166281494Sandrew	TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
3167281494Sandrew		pmap = PV_PMAP(pv);
3168281494Sandrew		if (!PMAP_TRYLOCK(pmap)) {
3169281494Sandrew			md_gen = m->md.pv_gen;
3170281494Sandrew			rw_wunlock(lock);
3171281494Sandrew			PMAP_LOCK(pmap);
3172281494Sandrew			rw_wlock(lock);
3173281494Sandrew			if (md_gen != m->md.pv_gen) {
3174281494Sandrew				PMAP_UNLOCK(pmap);
3175281494Sandrew				rw_wunlock(lock);
3176281494Sandrew				goto retry_pv_loop;
3177281494Sandrew			}
3178281494Sandrew		}
3179297446Sandrew		pte = pmap_pte(pmap, pv->pv_va, &lvl);
3180281494Sandrewretry:
3181297446Sandrew		oldpte = pmap_load(pte);
3182297446Sandrew		if ((oldpte & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) {
3183297446Sandrew			if (!atomic_cmpset_long(pte, oldpte,
3184297446Sandrew			    oldpte | ATTR_AP(ATTR_AP_RO)))
3185281494Sandrew				goto retry;
3186297446Sandrew			if ((oldpte & ATTR_AF) != 0)
3187281494Sandrew				vm_page_dirty(m);
3188281494Sandrew			pmap_invalidate_page(pmap, pv->pv_va);
3189281494Sandrew		}
3190281494Sandrew		PMAP_UNLOCK(pmap);
3191281494Sandrew	}
3192281494Sandrew	rw_wunlock(lock);
3193281494Sandrew	vm_page_aflag_clear(m, PGA_WRITEABLE);
3194281494Sandrew	rw_runlock(&pvh_global_lock);
3195281494Sandrew}
3196281494Sandrew
3197281494Sandrewstatic __inline boolean_t
3198281494Sandrewsafe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
3199281494Sandrew{
3200281494Sandrew
3201281494Sandrew	return (FALSE);
3202281494Sandrew}
3203281494Sandrew
3204281494Sandrew#define	PMAP_TS_REFERENCED_MAX	5
3205281494Sandrew
3206281494Sandrew/*
3207281494Sandrew *	pmap_ts_referenced:
3208281494Sandrew *
3209281494Sandrew *	Return a count of reference bits for a page, clearing those bits.
3210281494Sandrew *	It is not necessary for every reference bit to be cleared, but it
3211281494Sandrew *	is necessary that 0 only be returned when there are truly no
3212281494Sandrew *	reference bits set.
3213281494Sandrew *
3214281494Sandrew *	XXX: The exact number of bits to check and clear is a matter that
3215281494Sandrew *	should be tested and standardized at some point in the future for
3216281494Sandrew *	optimal aging of shared pages.
3217281494Sandrew */
3218281494Sandrewint
3219281494Sandrewpmap_ts_referenced(vm_page_t m)
3220281494Sandrew{
3221281494Sandrew	pv_entry_t pv, pvf;
3222281494Sandrew	pmap_t pmap;
3223281494Sandrew	struct rwlock *lock;
3224297446Sandrew	pd_entry_t *pde, tpde;
3225297446Sandrew	pt_entry_t *pte, tpte;
3226281494Sandrew	vm_paddr_t pa;
3227297446Sandrew	int cleared, md_gen, not_cleared, lvl;
3228281494Sandrew	struct spglist free;
3229281494Sandrew
3230281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3231281494Sandrew	    ("pmap_ts_referenced: page %p is not managed", m));
3232281494Sandrew	SLIST_INIT(&free);
3233281494Sandrew	cleared = 0;
3234281494Sandrew	pa = VM_PAGE_TO_PHYS(m);
3235281494Sandrew	lock = PHYS_TO_PV_LIST_LOCK(pa);
3236281494Sandrew	rw_rlock(&pvh_global_lock);
3237281494Sandrew	rw_wlock(lock);
3238281494Sandrewretry:
3239281494Sandrew	not_cleared = 0;
3240281494Sandrew	if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
3241281494Sandrew		goto out;
3242281494Sandrew	pv = pvf;
3243281494Sandrew	do {
3244281494Sandrew		if (pvf == NULL)
3245281494Sandrew			pvf = pv;
3246281494Sandrew		pmap = PV_PMAP(pv);
3247281494Sandrew		if (!PMAP_TRYLOCK(pmap)) {
3248281494Sandrew			md_gen = m->md.pv_gen;
3249281494Sandrew			rw_wunlock(lock);
3250281494Sandrew			PMAP_LOCK(pmap);
3251281494Sandrew			rw_wlock(lock);
3252281494Sandrew			if (md_gen != m->md.pv_gen) {
3253281494Sandrew				PMAP_UNLOCK(pmap);
3254281494Sandrew				goto retry;
3255281494Sandrew			}
3256281494Sandrew		}
3257297446Sandrew		pde = pmap_pde(pmap, pv->pv_va, &lvl);
3258297446Sandrew		KASSERT(pde != NULL, ("pmap_ts_referenced: no l2 table found"));
3259297446Sandrew		KASSERT(lvl == 2,
3260297446Sandrew		    ("pmap_ts_referenced: invalid pde level %d", lvl));
3261297446Sandrew		tpde = pmap_load(pde);
3262297446Sandrew		KASSERT((tpde & ATTR_DESCR_MASK) == L2_TABLE,
3263281494Sandrew		    ("pmap_ts_referenced: found an invalid l2 table"));
3264297446Sandrew		pte = pmap_l2_to_l3(pde, pv->pv_va);
3265297446Sandrew		tpte = pmap_load(pte);
3266297446Sandrew		if ((tpte & ATTR_AF) != 0) {
3267297446Sandrew			if (safe_to_clear_referenced(pmap, tpte)) {
3268281494Sandrew				/*
3269281494Sandrew				 * TODO: We don't handle the access flag
3270281494Sandrew				 * at all. We need to be able to set it in
3271281494Sandrew				 * the exception handler.
3272281494Sandrew				 */
3273286073Semaste				panic("ARM64TODO: safe_to_clear_referenced\n");
3274297446Sandrew			} else if ((tpte & ATTR_SW_WIRED) == 0) {
3275281494Sandrew				/*
3276281494Sandrew				 * Wired pages cannot be paged out so
3277281494Sandrew				 * doing accessed bit emulation for
3278281494Sandrew				 * them is wasted effort. We do the
3279281494Sandrew				 * hard work for unwired pages only.
3280281494Sandrew				 */
3281297446Sandrew				pmap_remove_l3(pmap, pte, pv->pv_va, tpde,
3282288445Sandrew				    &free, &lock);
3283281494Sandrew				pmap_invalidate_page(pmap, pv->pv_va);
3284281494Sandrew				cleared++;
3285281494Sandrew				if (pvf == pv)
3286281494Sandrew					pvf = NULL;
3287281494Sandrew				pv = NULL;
3288281494Sandrew				KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
3289281494Sandrew				    ("inconsistent pv lock %p %p for page %p",
3290281494Sandrew				    lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
3291281494Sandrew			} else
3292281494Sandrew				not_cleared++;
3293281494Sandrew		}
3294281494Sandrew		PMAP_UNLOCK(pmap);
3295281494Sandrew		/* Rotate the PV list if it has more than one entry. */
3296281494Sandrew		if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
3297281494Sandrew			TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
3298281494Sandrew			TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
3299281494Sandrew			m->md.pv_gen++;
3300281494Sandrew		}
3301281494Sandrew	} while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
3302281494Sandrew	    not_cleared < PMAP_TS_REFERENCED_MAX);
3303281494Sandrewout:
3304281494Sandrew	rw_wunlock(lock);
3305281494Sandrew	rw_runlock(&pvh_global_lock);
3306281494Sandrew	pmap_free_zero_pages(&free);
3307281494Sandrew	return (cleared + not_cleared);
3308281494Sandrew}
3309281494Sandrew
3310281494Sandrew/*
3311281494Sandrew *	Apply the given advice to the specified range of addresses within the
3312281494Sandrew *	given pmap.  Depending on the advice, clear the referenced and/or
3313281494Sandrew *	modified flags in each mapping and set the mapped page's dirty field.
3314281494Sandrew */
3315281494Sandrewvoid
3316281494Sandrewpmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
3317281494Sandrew{
3318281494Sandrew}
3319281494Sandrew
3320281494Sandrew/*
3321281494Sandrew *	Clear the modify bits on the specified physical page.
3322281494Sandrew */
3323281494Sandrewvoid
3324281494Sandrewpmap_clear_modify(vm_page_t m)
3325281494Sandrew{
3326281494Sandrew
3327281494Sandrew	KASSERT((m->oflags & VPO_UNMANAGED) == 0,
3328281494Sandrew	    ("pmap_clear_modify: page %p is not managed", m));
3329281494Sandrew	VM_OBJECT_ASSERT_WLOCKED(m->object);
3330281494Sandrew	KASSERT(!vm_page_xbusied(m),
3331281494Sandrew	    ("pmap_clear_modify: page %p is exclusive busied", m));
3332281494Sandrew
3333281494Sandrew	/*
3334281494Sandrew	 * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
3335281494Sandrew	 * If the object containing the page is locked and the page is not
3336281494Sandrew	 * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
3337281494Sandrew	 */
3338281494Sandrew	if ((m->aflags & PGA_WRITEABLE) == 0)
3339281494Sandrew		return;
3340281846Sandrew
3341286073Semaste	/* ARM64TODO: We lack support for tracking if a page is modified */
3342281494Sandrew}
3343281494Sandrew
3344282221Sandrewvoid *
3345282221Sandrewpmap_mapbios(vm_paddr_t pa, vm_size_t size)
3346282221Sandrew{
3347282221Sandrew
3348282221Sandrew        return ((void *)PHYS_TO_DMAP(pa));
3349282221Sandrew}
3350282221Sandrew
3351282221Sandrewvoid
3352282221Sandrewpmap_unmapbios(vm_paddr_t pa, vm_size_t size)
3353282221Sandrew{
3354282221Sandrew}
3355282221Sandrew
3356281494Sandrew/*
3357281494Sandrew * Sets the memory attribute for the specified page.
3358281494Sandrew */
3359281494Sandrewvoid
3360281494Sandrewpmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
3361281494Sandrew{
3362281494Sandrew
3363286080Sandrew	m->md.pv_memattr = ma;
3364286080Sandrew
3365286080Sandrew	/*
3366286080Sandrew	 * ARM64TODO: Implement the below (from the amd64 pmap)
3367286080Sandrew	 * If "m" is a normal page, update its direct mapping.  This update
3368286080Sandrew	 * can be relied upon to perform any cache operations that are
3369286080Sandrew	 * required for data coherence.
3370286080Sandrew	 */
3371286080Sandrew	if ((m->flags & PG_FICTITIOUS) == 0 &&
3372286080Sandrew	    PHYS_IN_DMAP(VM_PAGE_TO_PHYS(m)))
3373286080Sandrew		panic("ARM64TODO: pmap_page_set_memattr");
3374281494Sandrew}
3375281494Sandrew
3376281494Sandrew/*
3377281494Sandrew * perform the pmap work for mincore
3378281494Sandrew */
3379281494Sandrewint
3380281494Sandrewpmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
3381281494Sandrew{
3382287570Sandrew	pd_entry_t *l1p, l1;
3383287570Sandrew	pd_entry_t *l2p, l2;
3384287570Sandrew	pt_entry_t *l3p, l3;
3385287570Sandrew	vm_paddr_t pa;
3386287570Sandrew	bool managed;
3387287570Sandrew	int val;
3388281494Sandrew
3389287570Sandrew	PMAP_LOCK(pmap);
3390287570Sandrewretry:
3391287570Sandrew	pa = 0;
3392287570Sandrew	val = 0;
3393287570Sandrew	managed = false;
3394287570Sandrew
3395287570Sandrew	l1p = pmap_l1(pmap, addr);
3396287570Sandrew	if (l1p == NULL) /* No l1 */
3397287570Sandrew		goto done;
3398295425Swma
3399287570Sandrew	l1 = pmap_load(l1p);
3400295425Swma	if ((l1 & ATTR_DESCR_MASK) == L1_INVAL)
3401295425Swma		goto done;
3402295425Swma
3403287570Sandrew	if ((l1 & ATTR_DESCR_MASK) == L1_BLOCK) {
3404287570Sandrew		pa = (l1 & ~ATTR_MASK) | (addr & L1_OFFSET);
3405287570Sandrew		managed = (l1 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
3406287570Sandrew		val = MINCORE_SUPER | MINCORE_INCORE;
3407287570Sandrew		if (pmap_page_dirty(l1))
3408287570Sandrew			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3409287570Sandrew		if ((l1 & ATTR_AF) == ATTR_AF)
3410287570Sandrew			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3411287570Sandrew		goto done;
3412287570Sandrew	}
3413287570Sandrew
3414287570Sandrew	l2p = pmap_l1_to_l2(l1p, addr);
3415287570Sandrew	if (l2p == NULL) /* No l2 */
3416287570Sandrew		goto done;
3417295425Swma
3418287570Sandrew	l2 = pmap_load(l2p);
3419295425Swma	if ((l2 & ATTR_DESCR_MASK) == L2_INVAL)
3420295425Swma		goto done;
3421295425Swma
3422287570Sandrew	if ((l2 & ATTR_DESCR_MASK) == L2_BLOCK) {
3423287570Sandrew		pa = (l2 & ~ATTR_MASK) | (addr & L2_OFFSET);
3424287570Sandrew		managed = (l2 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
3425287570Sandrew		val = MINCORE_SUPER | MINCORE_INCORE;
3426287570Sandrew		if (pmap_page_dirty(l2))
3427287570Sandrew			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3428287570Sandrew		if ((l2 & ATTR_AF) == ATTR_AF)
3429287570Sandrew			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3430287570Sandrew		goto done;
3431287570Sandrew	}
3432287570Sandrew
3433287570Sandrew	l3p = pmap_l2_to_l3(l2p, addr);
3434287570Sandrew	if (l3p == NULL) /* No l3 */
3435287570Sandrew		goto done;
3436295425Swma
3437287570Sandrew	l3 = pmap_load(l2p);
3438295425Swma	if ((l3 & ATTR_DESCR_MASK) == L3_INVAL)
3439295425Swma		goto done;
3440295425Swma
3441287570Sandrew	if ((l3 & ATTR_DESCR_MASK) == L3_PAGE) {
3442287570Sandrew		pa = (l3 & ~ATTR_MASK) | (addr & L3_OFFSET);
3443287570Sandrew		managed = (l3 & ATTR_SW_MANAGED) == ATTR_SW_MANAGED;
3444287570Sandrew		val = MINCORE_INCORE;
3445287570Sandrew		if (pmap_page_dirty(l3))
3446287570Sandrew			val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER;
3447287570Sandrew		if ((l3 & ATTR_AF) == ATTR_AF)
3448287570Sandrew			val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER;
3449287570Sandrew	}
3450287570Sandrew
3451287570Sandrewdone:
3452287570Sandrew	if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) !=
3453287570Sandrew	    (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) {
3454287570Sandrew		/* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */
3455287570Sandrew		if (vm_page_pa_tryrelock(pmap, pa, locked_pa))
3456287570Sandrew			goto retry;
3457287570Sandrew	} else
3458287570Sandrew		PA_UNLOCK_COND(*locked_pa);
3459287570Sandrew	PMAP_UNLOCK(pmap);
3460287570Sandrew
3461287570Sandrew	return (val);
3462281494Sandrew}
3463281494Sandrew
3464281494Sandrewvoid
3465281494Sandrewpmap_activate(struct thread *td)
3466281494Sandrew{
3467281494Sandrew	pmap_t	pmap;
3468281494Sandrew
3469281494Sandrew	critical_enter();
3470281494Sandrew	pmap = vmspace_pmap(td->td_proc->p_vmspace);
3471297446Sandrew	td->td_pcb->pcb_l0addr = vtophys(pmap->pm_l0);
3472297446Sandrew	__asm __volatile("msr ttbr0_el1, %0" : : "r"(td->td_pcb->pcb_l0addr));
3473285212Sandrew	pmap_invalidate_all(pmap);
3474281494Sandrew	critical_exit();
3475281494Sandrew}
3476281494Sandrew
3477281494Sandrewvoid
3478287105Sandrewpmap_sync_icache(pmap_t pmap, vm_offset_t va, vm_size_t sz)
3479281494Sandrew{
3480281494Sandrew
3481287105Sandrew	if (va >= VM_MIN_KERNEL_ADDRESS) {
3482287105Sandrew		cpu_icache_sync_range(va, sz);
3483287105Sandrew	} else {
3484287105Sandrew		u_int len, offset;
3485287105Sandrew		vm_paddr_t pa;
3486287105Sandrew
3487287105Sandrew		/* Find the length of data in this page to flush */
3488287105Sandrew		offset = va & PAGE_MASK;
3489287105Sandrew		len = imin(PAGE_SIZE - offset, sz);
3490287105Sandrew
3491287105Sandrew		while (sz != 0) {
3492287105Sandrew			/* Extract the physical address & find it in the DMAP */
3493287105Sandrew			pa = pmap_extract(pmap, va);
3494287105Sandrew			if (pa != 0)
3495287105Sandrew				cpu_icache_sync_range(PHYS_TO_DMAP(pa), len);
3496287105Sandrew
3497287105Sandrew			/* Move to the next page */
3498287105Sandrew			sz -= len;
3499287105Sandrew			va += len;
3500287105Sandrew			/* Set the length for the next iteration */
3501287105Sandrew			len = imin(PAGE_SIZE, sz);
3502287105Sandrew		}
3503287105Sandrew	}
3504281494Sandrew}
3505281494Sandrew
3506281494Sandrew/*
3507281494Sandrew *	Increase the starting virtual address of the given mapping if a
3508281494Sandrew *	different alignment might result in more superpage mappings.
3509281494Sandrew */
3510281494Sandrewvoid
3511281494Sandrewpmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
3512281494Sandrew    vm_offset_t *addr, vm_size_t size)
3513281494Sandrew{
3514281494Sandrew}
3515281494Sandrew
3516281494Sandrew/**
3517281494Sandrew * Get the kernel virtual address of a set of physical pages. If there are
3518281494Sandrew * physical addresses not covered by the DMAP perform a transient mapping
3519281494Sandrew * that will be removed when calling pmap_unmap_io_transient.
3520281494Sandrew *
3521281494Sandrew * \param page        The pages the caller wishes to obtain the virtual
3522281494Sandrew *                    address on the kernel memory map.
3523281494Sandrew * \param vaddr       On return contains the kernel virtual memory address
3524281494Sandrew *                    of the pages passed in the page parameter.
3525281494Sandrew * \param count       Number of pages passed in.
3526281494Sandrew * \param can_fault   TRUE if the thread using the mapped pages can take
3527281494Sandrew *                    page faults, FALSE otherwise.
3528281494Sandrew *
3529281494Sandrew * \returns TRUE if the caller must call pmap_unmap_io_transient when
3530281494Sandrew *          finished or FALSE otherwise.
3531281494Sandrew *
3532281494Sandrew */
3533281494Sandrewboolean_t
3534281494Sandrewpmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
3535281494Sandrew    boolean_t can_fault)
3536281494Sandrew{
3537281494Sandrew	vm_paddr_t paddr;
3538281494Sandrew	boolean_t needs_mapping;
3539281494Sandrew	int error, i;
3540281494Sandrew
3541281494Sandrew	/*
3542281494Sandrew	 * Allocate any KVA space that we need, this is done in a separate
3543281494Sandrew	 * loop to prevent calling vmem_alloc while pinned.
3544281494Sandrew	 */
3545281494Sandrew	needs_mapping = FALSE;
3546281494Sandrew	for (i = 0; i < count; i++) {
3547281494Sandrew		paddr = VM_PAGE_TO_PHYS(page[i]);
3548297617Sandrew		if (__predict_false(!PHYS_IN_DMAP(paddr))) {
3549281494Sandrew			error = vmem_alloc(kernel_arena, PAGE_SIZE,
3550281494Sandrew			    M_BESTFIT | M_WAITOK, &vaddr[i]);
3551281494Sandrew			KASSERT(error == 0, ("vmem_alloc failed: %d", error));
3552281494Sandrew			needs_mapping = TRUE;
3553281494Sandrew		} else {
3554281494Sandrew			vaddr[i] = PHYS_TO_DMAP(paddr);
3555281494Sandrew		}
3556281494Sandrew	}
3557281494Sandrew
3558281494Sandrew	/* Exit early if everything is covered by the DMAP */
3559281494Sandrew	if (!needs_mapping)
3560281494Sandrew		return (FALSE);
3561281494Sandrew
3562281494Sandrew	if (!can_fault)
3563281494Sandrew		sched_pin();
3564281494Sandrew	for (i = 0; i < count; i++) {
3565281494Sandrew		paddr = VM_PAGE_TO_PHYS(page[i]);
3566297617Sandrew		if (!PHYS_IN_DMAP(paddr)) {
3567281494Sandrew			panic(
3568281494Sandrew			   "pmap_map_io_transient: TODO: Map out of DMAP data");
3569281494Sandrew		}
3570281494Sandrew	}
3571281494Sandrew
3572281494Sandrew	return (needs_mapping);
3573281494Sandrew}
3574281494Sandrew
3575281494Sandrewvoid
3576281494Sandrewpmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
3577281494Sandrew    boolean_t can_fault)
3578281494Sandrew{
3579281494Sandrew	vm_paddr_t paddr;
3580281494Sandrew	int i;
3581281494Sandrew
3582281494Sandrew	if (!can_fault)
3583281494Sandrew		sched_unpin();
3584281494Sandrew	for (i = 0; i < count; i++) {
3585281494Sandrew		paddr = VM_PAGE_TO_PHYS(page[i]);
3586297617Sandrew		if (!PHYS_IN_DMAP(paddr)) {
3587286073Semaste			panic("ARM64TODO: pmap_unmap_io_transient: Unmap data");
3588281494Sandrew		}
3589281494Sandrew	}
3590281494Sandrew}
3591