pmap-v4.h revision 239268
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed by the University of
20 *      California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
42 *
43 *      from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 *      from: @(#)pmap.h        7.4 (Berkeley) 5/12/91
45 * 	from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46 *
47 * $FreeBSD: head/sys/arm/include/pmap.h 239268 2012-08-15 03:03:03Z gonzo $
48 */
49
50#ifndef _MACHINE_PMAP_H_
51#define _MACHINE_PMAP_H_
52
53#include <machine/pte.h>
54#include <machine/cpuconf.h>
55/*
56 * Pte related macros
57 */
58#if ARM_ARCH_6 || ARM_ARCH_7A
59#ifdef SMP
60#define PTE_NOCACHE	2
61#else
62#define PTE_NOCACHE	1
63#endif
64#define PTE_CACHE	4
65#define PTE_DEVICE	2
66#define PTE_PAGETABLE	4
67#else
68#define PTE_NOCACHE	1
69#define PTE_CACHE	2
70#define PTE_PAGETABLE	3
71#endif
72
73enum mem_type {
74	STRONG_ORD = 0,
75	DEVICE_NOSHARE,
76	DEVICE_SHARE,
77	NRML_NOCACHE,
78	NRML_IWT_OWT,
79	NRML_IWB_OWB,
80	NRML_IWBA_OWBA
81};
82
83#ifndef LOCORE
84
85#include <sys/queue.h>
86#include <sys/_cpuset.h>
87#include <sys/_lock.h>
88#include <sys/_mutex.h>
89
90#define PDESIZE		sizeof(pd_entry_t)	/* for assembly files */
91#define PTESIZE		sizeof(pt_entry_t)	/* for assembly files */
92
93#ifdef _KERNEL
94
95#define vtophys(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
96#define pmap_kextract(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
97
98#endif
99
100#define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
101#define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
102#define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
103#define	pmap_page_set_memattr(m, ma)	(void)0
104
105/*
106 * Pmap stuff
107 */
108
109/*
110 * This structure is used to hold a virtual<->physical address
111 * association and is used mostly by bootstrap code
112 */
113struct pv_addr {
114	SLIST_ENTRY(pv_addr) pv_list;
115	vm_offset_t	pv_va;
116	vm_paddr_t	pv_pa;
117};
118
119struct	pv_entry;
120
121struct	md_page {
122	int pvh_attrs;
123	vm_offset_t pv_kva;		/* first kernel VA mapping */
124	TAILQ_HEAD(,pv_entry)	pv_list;
125};
126
127#define	VM_MDPAGE_INIT(pg)						\
128do {									\
129	TAILQ_INIT(&pg->pv_list);					\
130	mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\
131	(pg)->mdpage.pvh_attrs = 0;					\
132} while (/*CONSTCOND*/0)
133
134struct l1_ttable;
135struct l2_dtable;
136
137
138/*
139 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
140 * A bucket size of 16 provides for 16MB of contiguous virtual address
141 * space per l2_dtable. Most processes will, therefore, require only two or
142 * three of these to map their whole working set.
143 */
144#define	L2_BUCKET_LOG2	4
145#define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
146/*
147 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
148 * of l2_dtable structures required to track all possible page descriptors
149 * mappable by an L1 translation table is given by the following constants:
150 */
151#define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
152#define	L2_SIZE		(1 << L2_LOG2)
153
154struct	pmap {
155	struct mtx		pm_mtx;
156	u_int8_t		pm_domain;
157	struct l1_ttable	*pm_l1;
158	struct l2_dtable	*pm_l2[L2_SIZE];
159	pd_entry_t		*pm_pdir;	/* KVA of page directory */
160	cpuset_t		pm_active;	/* active on cpus */
161	struct pmap_statistics	pm_stats;	/* pmap statictics */
162	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
163};
164
165typedef struct pmap *pmap_t;
166
167#ifdef _KERNEL
168extern struct pmap	kernel_pmap_store;
169#define kernel_pmap	(&kernel_pmap_store)
170#define pmap_kernel() kernel_pmap
171
172#define	PMAP_ASSERT_LOCKED(pmap) \
173				mtx_assert(&(pmap)->pm_mtx, MA_OWNED)
174#define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
175#define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
176#define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
177				    NULL, MTX_DEF | MTX_DUPOK)
178#define	PMAP_OWNED(pmap)	mtx_owned(&(pmap)->pm_mtx)
179#define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
180#define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
181#define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
182#endif
183
184
185/*
186 * For each vm_page_t, there is a list of all currently valid virtual
187 * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
188 */
189typedef struct pv_entry {
190	pmap_t          pv_pmap;        /* pmap where mapping lies */
191	vm_offset_t     pv_va;          /* virtual address for mapping */
192	TAILQ_ENTRY(pv_entry)   pv_list;
193	TAILQ_ENTRY(pv_entry)	pv_plist;
194	int		pv_flags;	/* flags (wired, etc...) */
195} *pv_entry_t;
196
197#ifdef _KERNEL
198
199boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
200
201/*
202 * virtual address to page table entry and
203 * to physical address. Likewise for alternate address space.
204 * Note: these work recursively, thus vtopte of a pte will give
205 * the corresponding pde that in turn maps it.
206 */
207
208/*
209 * The current top of kernel VM.
210 */
211extern vm_offset_t pmap_curmaxkvaddr;
212
213struct pcb;
214
215void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
216/* Virtual address to page table entry */
217static __inline pt_entry_t *
218vtopte(vm_offset_t va)
219{
220	pd_entry_t *pdep;
221	pt_entry_t *ptep;
222
223	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
224		return (NULL);
225	return (ptep);
226}
227
228extern vm_paddr_t phys_avail[];
229extern vm_offset_t virtual_avail;
230extern vm_offset_t virtual_end;
231
232void	pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
233int	pmap_change_attr(vm_offset_t, vm_size_t, int);
234void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
235void	pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa);
236void	*pmap_kenter_temp(vm_paddr_t pa, int i);
237void 	pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
238void	pmap_kremove(vm_offset_t);
239void	*pmap_mapdev(vm_offset_t, vm_size_t);
240void	pmap_unmapdev(vm_offset_t, vm_size_t);
241vm_page_t	pmap_use_pt(pmap_t, vm_offset_t);
242void	pmap_debug(int);
243void	pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
244void	pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
245vm_size_t	pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
246void
247pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
248    int cache);
249int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
250int pmap_dmap_iscurrent(pmap_t pmap);
251
252/*
253 * Definitions for MMU domains
254 */
255#define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
256#define	PMAP_DOMAIN_KERNEL	0	/* The kernel uses domain #0 */
257
258/*
259 * The new pmap ensures that page-tables are always mapping Write-Thru.
260 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
261 * on every change.
262 *
263 * Unfortunately, not all CPUs have a write-through cache mode.  So we
264 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
265 * and if there is the chance for PTE syncs to be needed, we define
266 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
267 * the code.
268 */
269extern int pmap_needs_pte_sync;
270
271/*
272 * These macros define the various bit masks in the PTE.
273 *
274 * We use these macros since we use different bits on different processor
275 * models.
276 */
277
278#define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
279#define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\
280    				L1_S_XSCALE_TEX(TEX_XSCALE_T))
281
282#define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
283#define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \
284    				L2_XSCALE_L_TEX(TEX_XSCALE_T))
285
286#define	L2_S_PROT_U_generic	(L2_AP(AP_U))
287#define	L2_S_PROT_W_generic	(L2_AP(AP_W))
288#define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
289
290#define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
291#define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
292#define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
293
294#define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
295#define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \
296    				 L2_XSCALE_T_TEX(TEX_XSCALE_X))
297
298#define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
299#define	L1_S_PROTO_xscale	(L1_TYPE_S)
300
301#define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
302#define	L1_C_PROTO_xscale	(L1_TYPE_C)
303
304#define	L2_L_PROTO		(L2_TYPE_L)
305
306#define	L2_S_PROTO_generic	(L2_TYPE_S)
307#define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
308
309/*
310 * User-visible names for the ones that vary with MMU class.
311 */
312#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
313#define	L2_AP(x)	(L2_AP0(x))
314#else
315#define	L2_AP(x)	(L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x))
316#endif
317
318#if ARM_NMMUS > 1
319/* More than one MMU class configured; use variables. */
320#define	L2_S_PROT_U		pte_l2_s_prot_u
321#define	L2_S_PROT_W		pte_l2_s_prot_w
322#define	L2_S_PROT_MASK		pte_l2_s_prot_mask
323
324#define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
325#define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
326#define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
327
328#define	L1_S_PROTO		pte_l1_s_proto
329#define	L1_C_PROTO		pte_l1_c_proto
330#define	L2_S_PROTO		pte_l2_s_proto
331
332#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
333#define	L2_S_PROT_U		L2_S_PROT_U_generic
334#define	L2_S_PROT_W		L2_S_PROT_W_generic
335#define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
336
337#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
338#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
339#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
340
341#define	L1_S_PROTO		L1_S_PROTO_generic
342#define	L1_C_PROTO		L1_C_PROTO_generic
343#define	L2_S_PROTO		L2_S_PROTO_generic
344
345#elif ARM_MMU_XSCALE == 1
346#define	L2_S_PROT_U		L2_S_PROT_U_xscale
347#define	L2_S_PROT_W		L2_S_PROT_W_xscale
348#define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
349
350#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
351#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
352#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
353
354#define	L1_S_PROTO		L1_S_PROTO_xscale
355#define	L1_C_PROTO		L1_C_PROTO_xscale
356#define	L2_S_PROTO		L2_S_PROTO_xscale
357
358#elif (ARM_MMU_V6 + ARM_MMU_V7) != 0
359
360#define	L2_S_PROT_U		(L2_AP0(2))		/* user access */
361#define	L2_S_PROT_R		(L2_APX|L2_AP0(1))	/* read access */
362
363#define	L2_S_PROT_MASK		(L2_S_PROT_U|L2_S_PROT_R)
364#define	L2_S_WRITABLE(pte)	(!(pte & L2_APX))
365
366#ifndef SMP
367#define	L1_S_CACHE_MASK		(L1_S_TEX_MASK|L1_S_B|L1_S_C)
368#define	L2_L_CACHE_MASK		(L2_L_TEX_MASK|L2_B|L2_C)
369#define	L2_S_CACHE_MASK		(L2_S_TEX_MASK|L2_B|L2_C)
370#else
371#define	L1_S_CACHE_MASK		(L1_S_TEX_MASK|L1_S_B|L1_S_C|L1_SHARED)
372#define	L2_L_CACHE_MASK		(L2_L_TEX_MASK|L2_B|L2_C|L2_SHARED)
373#define	L2_S_CACHE_MASK		(L2_S_TEX_MASK|L2_B|L2_C|L2_SHARED)
374#endif  /* SMP */
375
376#define	L1_S_PROTO		(L1_TYPE_S)
377#define	L1_C_PROTO		(L1_TYPE_C)
378#define	L2_S_PROTO		(L2_TYPE_S)
379
380#ifndef SMP
381#define ARM_L1S_STRONG_ORD	(0)
382#define ARM_L1S_DEVICE_NOSHARE	(L1_S_TEX(2))
383#define ARM_L1S_DEVICE_SHARE	(L1_S_B)
384#define ARM_L1S_NRML_NOCACHE	(L1_S_TEX(1))
385#define ARM_L1S_NRML_IWT_OWT	(L1_S_C)
386#define ARM_L1S_NRML_IWB_OWB	(L1_S_C|L1_S_B)
387#define ARM_L1S_NRML_IWBA_OWBA	(L1_S_TEX(1)|L1_S_C|L1_S_B)
388
389#define ARM_L2L_STRONG_ORD	(0)
390#define ARM_L2L_DEVICE_NOSHARE	(L2_L_TEX(2))
391#define ARM_L2L_DEVICE_SHARE	(L2_B)
392#define ARM_L2L_NRML_NOCACHE	(L2_L_TEX(1))
393#define ARM_L2L_NRML_IWT_OWT	(L2_C)
394#define ARM_L2L_NRML_IWB_OWB	(L2_C|L2_B)
395#define ARM_L2L_NRML_IWBA_OWBA	(L2_L_TEX(1)|L2_C|L2_B)
396
397#define ARM_L2S_STRONG_ORD	(0)
398#define ARM_L2S_DEVICE_NOSHARE	(L2_S_TEX(2))
399#define ARM_L2S_DEVICE_SHARE	(L2_B)
400#define ARM_L2S_NRML_NOCACHE	(L2_S_TEX(1))
401#define ARM_L2S_NRML_IWT_OWT	(L2_C)
402#define ARM_L2S_NRML_IWB_OWB	(L2_C|L2_B)
403#define ARM_L2S_NRML_IWBA_OWBA	(L2_S_TEX(1)|L2_C|L2_B)
404#else
405#define ARM_L1S_STRONG_ORD	(0)
406#define ARM_L1S_DEVICE_NOSHARE	(L1_S_TEX(2))
407#define ARM_L1S_DEVICE_SHARE	(L1_S_B)
408#define ARM_L1S_NRML_NOCACHE	(L1_S_TEX(1)|L1_SHARED)
409#define ARM_L1S_NRML_IWT_OWT	(L1_S_C|L1_SHARED)
410#define ARM_L1S_NRML_IWB_OWB	(L1_S_C|L1_S_B|L1_SHARED)
411#define ARM_L1S_NRML_IWBA_OWBA	(L1_S_TEX(1)|L1_S_C|L1_S_B|L1_SHARED)
412
413#define ARM_L2L_STRONG_ORD	(0)
414#define ARM_L2L_DEVICE_NOSHARE	(L2_L_TEX(2))
415#define ARM_L2L_DEVICE_SHARE	(L2_B)
416#define ARM_L2L_NRML_NOCACHE	(L2_L_TEX(1)|L2_SHARED)
417#define ARM_L2L_NRML_IWT_OWT	(L2_C|L2_SHARED)
418#define ARM_L2L_NRML_IWB_OWB	(L2_C|L2_B|L2_SHARED)
419#define ARM_L2L_NRML_IWBA_OWBA	(L2_L_TEX(1)|L2_C|L2_B|L2_SHARED)
420
421#define ARM_L2S_STRONG_ORD	(0)
422#define ARM_L2S_DEVICE_NOSHARE	(L2_S_TEX(2))
423#define ARM_L2S_DEVICE_SHARE	(L2_B)
424#define ARM_L2S_NRML_NOCACHE	(L2_S_TEX(1)|L2_SHARED)
425#define ARM_L2S_NRML_IWT_OWT	(L2_C|L2_SHARED)
426#define ARM_L2S_NRML_IWB_OWB	(L2_C|L2_B|L2_SHARED)
427#define ARM_L2S_NRML_IWBA_OWBA	(L2_S_TEX(1)|L2_C|L2_B|L2_SHARED)
428#endif /* SMP */
429#endif /* ARM_NMMUS > 1 */
430
431#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
432#define	PMAP_NEEDS_PTE_SYNC	1
433#define	PMAP_INCLUDE_PTE_SYNC
434#elif defined(CPU_XSCALE_81342)
435#define PMAP_NEEDS_PTE_SYNC	1
436#define PMAP_INCLUDE_PTE_SYNC
437#elif (ARM_MMU_SA1 == 0)
438#define	PMAP_NEEDS_PTE_SYNC	0
439#endif
440
441/*
442 * These macros return various bits based on kernel/user and protection.
443 * Note that the compiler will usually fold these at compile time.
444 */
445#if (ARM_MMU_V6 + ARM_MMU_V7) == 0
446
447#define	L1_S_PROT_U		(L1_S_AP(AP_U))
448#define	L1_S_PROT_W		(L1_S_AP(AP_W))
449#define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
450#define	L1_S_WRITABLE(pd)	((pd) & L1_S_PROT_W)
451
452#define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
453				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
454
455#define	L2_L_PROT_U		(L2_AP(AP_U))
456#define	L2_L_PROT_W		(L2_AP(AP_W))
457#define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
458
459#define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
460				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
461
462#define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
463				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
464#else
465#define	L1_S_PROT_U		(L1_S_AP(AP_U))
466#define	L1_S_PROT_MASK		(L1_S_APX|L1_S_AP(0x3))
467#define	L1_S_WRITABLE(pd)	(!((pd) & L1_S_APX))
468
469#define	L1_S_PROT(ku, pr)	(L1_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L1_S_PROT_U : 0) | \
470				 (((pr) & VM_PROT_WRITE) ? L1_S_APX : 0)))
471
472#define	L2_L_PROT_MASK		(L2_APX|L2_AP0(0x3))
473#define	L2_L_PROT(ku, pr)	(L2_L_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
474				 (((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
475
476#define	L2_S_PROT(ku, pr)	(L2_S_PROT_MASK & ~((((ku) == PTE_KERNEL) ? L2_S_PROT_U : 0) | \
477				 (((pr) & VM_PROT_WRITE) ? L2_APX : 0)))
478
479#endif
480
481/*
482 * Macros to test if a mapping is mappable with an L1 Section mapping
483 * or an L2 Large Page mapping.
484 */
485#define	L1_S_MAPPABLE_P(va, pa, size)					\
486	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
487
488#define	L2_L_MAPPABLE_P(va, pa, size)					\
489	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
490
491/*
492 * Provide a fallback in case we were not able to determine it at
493 * compile-time.
494 */
495#ifndef PMAP_NEEDS_PTE_SYNC
496#define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
497#define	PMAP_INCLUDE_PTE_SYNC
498#endif
499
500#define	PTE_SYNC(pte)							\
501do {									\
502	if (PMAP_NEEDS_PTE_SYNC) {					\
503		cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
504		cpu_l2cache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
505	} else								\
506		cpu_drain_writebuf();					\
507} while (/*CONSTCOND*/0)
508
509#define	PTE_SYNC_RANGE(pte, cnt)					\
510do {									\
511	if (PMAP_NEEDS_PTE_SYNC) {					\
512		cpu_dcache_wb_range((vm_offset_t)(pte),			\
513		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
514		cpu_l2cache_wb_range((vm_offset_t)(pte), 		\
515		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
516	} else								\
517		cpu_drain_writebuf();					\
518} while (/*CONSTCOND*/0)
519
520extern pt_entry_t		pte_l1_s_cache_mode;
521extern pt_entry_t		pte_l1_s_cache_mask;
522
523extern pt_entry_t		pte_l2_l_cache_mode;
524extern pt_entry_t		pte_l2_l_cache_mask;
525
526extern pt_entry_t		pte_l2_s_cache_mode;
527extern pt_entry_t		pte_l2_s_cache_mask;
528
529extern pt_entry_t		pte_l1_s_cache_mode_pt;
530extern pt_entry_t		pte_l2_l_cache_mode_pt;
531extern pt_entry_t		pte_l2_s_cache_mode_pt;
532
533extern pt_entry_t		pte_l2_s_prot_u;
534extern pt_entry_t		pte_l2_s_prot_w;
535extern pt_entry_t		pte_l2_s_prot_mask;
536
537extern pt_entry_t		pte_l1_s_proto;
538extern pt_entry_t		pte_l1_c_proto;
539extern pt_entry_t		pte_l2_s_proto;
540
541extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
542extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
543
544#if (ARM_MMU_GENERIC + ARM_MMU_V6 + ARM_MMU_V7 + ARM_MMU_SA1) != 0 || defined(CPU_XSCALE_81342)
545void	pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
546void	pmap_zero_page_generic(vm_paddr_t, int, int);
547
548void	pmap_pte_init_generic(void);
549#if defined(CPU_ARM8)
550void	pmap_pte_init_arm8(void);
551#endif
552#if defined(CPU_ARM9)
553void	pmap_pte_init_arm9(void);
554#endif /* CPU_ARM9 */
555#if defined(CPU_ARM10)
556void	pmap_pte_init_arm10(void);
557#endif /* CPU_ARM10 */
558#if (ARM_MMU_V6 + ARM_MMU_V7) != 0
559void	pmap_pte_init_mmu_v6(void);
560#endif /* CPU_ARM11 */
561#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
562
563#if /* ARM_MMU_SA1 == */1
564void	pmap_pte_init_sa1(void);
565#endif /* ARM_MMU_SA1 == 1 */
566
567#if ARM_MMU_XSCALE == 1
568void	pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
569void	pmap_zero_page_xscale(vm_paddr_t, int, int);
570
571void	pmap_pte_init_xscale(void);
572
573void	xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
574
575void	pmap_use_minicache(vm_offset_t, vm_size_t);
576#endif /* ARM_MMU_XSCALE == 1 */
577#if defined(CPU_XSCALE_81342)
578#define ARM_HAVE_SUPERSECTIONS
579#endif
580
581#define PTE_KERNEL	0
582#define PTE_USER	1
583#define	l1pte_valid(pde)	((pde) != 0)
584#define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
585#define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
586#define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
587
588#define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
589#define	l2pte_valid(pte)	((pte) != 0)
590#define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
591#define l2pte_minidata(pte)	(((pte) & \
592				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
593				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
594
595/* L1 and L2 page table macros */
596#define pmap_pde_v(pde)		l1pte_valid(*(pde))
597#define pmap_pde_section(pde)	l1pte_section_p(*(pde))
598#define pmap_pde_page(pde)	l1pte_page_p(*(pde))
599#define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
600
601#define	pmap_pte_v(pte)		l2pte_valid(*(pte))
602#define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
603
604/*
605 * Flags that indicate attributes of pages or mappings of pages.
606 *
607 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
608 * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
609 * pv_entry's for each page.  They live in the same "namespace" so
610 * that we can clear multiple attributes at a time.
611 *
612 * Note the "non-cacheable" flag generally means the page has
613 * multiple mappings in a given address space.
614 */
615#define	PVF_MOD		0x01		/* page is modified */
616#define	PVF_REF		0x02		/* page is referenced */
617#define	PVF_WIRED	0x04		/* mapping is wired */
618#define	PVF_WRITE	0x08		/* mapping is writable */
619#define	PVF_EXEC	0x10		/* mapping is executable */
620#define	PVF_NC		0x20		/* mapping is non-cacheable */
621#define	PVF_MWC		0x40		/* mapping is used multiple times in userland */
622#define	PVF_UNMAN	0x80		/* mapping is unmanaged */
623
624void vector_page_setprot(int);
625
626void pmap_update(pmap_t);
627
628/*
629 * This structure is used by machine-dependent code to describe
630 * static mappings of devices, created at bootstrap time.
631 */
632struct pmap_devmap {
633	vm_offset_t	pd_va;		/* virtual address */
634	vm_paddr_t	pd_pa;		/* physical address */
635	vm_size_t	pd_size;	/* size of region */
636	vm_prot_t	pd_prot;	/* protection code */
637	int		pd_cache;	/* cache attributes */
638};
639
640const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
641const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
642
643void	pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
644void	pmap_devmap_register(const struct pmap_devmap *);
645
646#define SECTION_CACHE	0x1
647#define SECTION_PT	0x2
648void	pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
649#ifdef ARM_HAVE_SUPERSECTIONS
650void	pmap_kenter_supersection(vm_offset_t, uint64_t, int flags);
651#endif
652
653extern char *_tmppt;
654
655void	pmap_postinit(void);
656
657#ifdef ARM_USE_SMALL_ALLOC
658void	arm_add_smallalloc_pages(void *, void *, int, int);
659vm_offset_t arm_ptovirt(vm_paddr_t);
660void arm_init_smallalloc(void);
661struct arm_small_page {
662	void *addr;
663	TAILQ_ENTRY(arm_small_page) pg_list;
664};
665
666#endif
667
668#define ARM_NOCACHE_KVA_SIZE 0x1000000
669extern vm_offset_t arm_nocache_startaddr;
670void *arm_remap_nocache(void *, vm_size_t);
671void arm_unmap_nocache(void *, vm_size_t);
672
673extern vm_paddr_t dump_avail[];
674#endif	/* _KERNEL */
675
676#endif	/* !LOCORE */
677
678#endif	/* !_MACHINE_PMAP_H_ */
679