pmap-v4.h revision 152128
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed by the University of
20 *      California, Berkeley and its contributors.
21 * 4. Neither the name of the University nor the names of its contributors
22 *    may be used to endorse or promote products derived from this software
23 *    without specific prior written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Derived from hp300 version by Mike Hibler, this version by William
38 * Jolitz uses a recursive map [a pde points to the page directory] to
39 * map the page tables using the pagetables themselves. This is done to
40 * reduce the impact on kernel virtual memory for lots of sparse address
41 * space, and to reduce the cost of memory to each process.
42 *
43 *      from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
44 *      from: @(#)pmap.h        7.4 (Berkeley) 5/12/91
45 * 	from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30
46 *
47 * $FreeBSD: head/sys/arm/include/pmap.h 152128 2005-11-06 16:10:28Z cognet $
48 */
49
50#ifndef _MACHINE_PMAP_H_
51#define _MACHINE_PMAP_H_
52
53#include <machine/pte.h>
54
55/*
56 * Pte related macros
57 */
58#define PTE_NOCACHE	0
59#define PTE_CACHE	1
60#define PTE_PAGETABLE	2
61
62#ifndef LOCORE
63
64#include <sys/queue.h>
65
66#define PDESIZE		sizeof(pd_entry_t)	/* for assembly files */
67#define PTESIZE		sizeof(pt_entry_t)	/* for assembly files */
68
69#ifdef _KERNEL
70
71#define vtophys(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
72#define pmap_kextract(va)	pmap_extract(pmap_kernel(), (vm_offset_t)(va))
73
74#endif
75
76#define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
77/*
78 * Pmap stuff
79 */
80
81/*
82 * This structure is used to hold a virtual<->physical address
83 * association and is used mostly by bootstrap code
84 */
85struct pv_addr {
86	SLIST_ENTRY(pv_addr) pv_list;
87	vm_offset_t	pv_va;
88	vm_paddr_t	pv_pa;
89};
90
91struct	pv_entry;
92
93struct	md_page {
94	int pvh_attrs;
95	u_int uro_mappings;
96	u_int urw_mappings;
97	union {
98		u_short s_mappings[2]; /* Assume kernel count <= 65535 */
99		u_int i_mappings;
100	} k_u;
101#define	kro_mappings	k_u.s_mappings[0]
102#define	krw_mappings	k_u.s_mappings[1]
103#define	k_mappings	k_u.i_mappings
104	int			pv_list_count;
105	TAILQ_HEAD(,pv_entry)	pv_list;
106};
107
108#define	VM_MDPAGE_INIT(pg)						\
109do {									\
110	TAILQ_INIT(&pg->pv_list);					\
111	mtx_init(&(pg)->md_page.pvh_mtx, "MDPAGE Mutex", NULL, MTX_DEV);\
112	(pg)->mdpage.pvh_attrs = 0;					\
113	(pg)->mdpage.uro_mappings = 0;					\
114	(pg)->mdpage.urw_mappings = 0;					\
115	(pg)->mdpage.k_mappings = 0;					\
116} while (/*CONSTCOND*/0)
117
118struct l1_ttable;
119struct l2_dtable;
120
121
122/*
123 * The number of L2 descriptor tables which can be tracked by an l2_dtable.
124 * A bucket size of 16 provides for 16MB of contiguous virtual address
125 * space per l2_dtable. Most processes will, therefore, require only two or
126 * three of these to map their whole working set.
127 */
128#define	L2_BUCKET_LOG2	4
129#define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
130/*
131 * Given the above "L2-descriptors-per-l2_dtable" constant, the number
132 * of l2_dtable structures required to track all possible page descriptors
133 * mappable by an L1 translation table is given by the following constants:
134 */
135#define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
136#define	L2_SIZE		(1 << L2_LOG2)
137
138struct	pmap {
139	u_int8_t		pm_domain;
140	struct l1_ttable	*pm_l1;
141	struct l2_dtable	*pm_l2[L2_SIZE];
142	pd_entry_t		*pm_pdir;	/* KVA of page directory */
143	int			pm_count;	/* reference count */
144	int			pm_active;	/* active on cpus */
145	struct pmap_statistics	pm_stats;	/* pmap statictics */
146	TAILQ_HEAD(,pv_entry)	pm_pvlist;	/* list of mappings in pmap */
147	LIST_ENTRY(pmap)	pm_list;	/* List of all pmaps */
148};
149
150typedef struct pmap *pmap_t;
151
152#ifdef _KERNEL
153extern pmap_t	kernel_pmap;
154#define pmap_kernel() kernel_pmap
155
156#endif
157
158
159/*
160 * For each vm_page_t, there is a list of all currently valid virtual
161 * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
162 */
163typedef struct pv_entry {
164	pmap_t          pv_pmap;        /* pmap where mapping lies */
165	vm_offset_t     pv_va;          /* virtual address for mapping */
166	TAILQ_ENTRY(pv_entry)   pv_list;
167	TAILQ_ENTRY(pv_entry)	pv_plist;
168	int		pv_flags;	/* flags (wired, etc...) */
169} *pv_entry_t;
170
171#define PV_ENTRY_NULL   ((pv_entry_t) 0)
172
173#ifdef _KERNEL
174
175boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **);
176
177/*
178 * virtual address to page table entry and
179 * to physical address. Likewise for alternate address space.
180 * Note: these work recursively, thus vtopte of a pte will give
181 * the corresponding pde that in turn maps it.
182 */
183
184/*
185 * The current top of kernel VM.
186 */
187extern vm_offset_t pmap_curmaxkvaddr;
188
189struct pcb;
190
191void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
192/* Virtual address to page table entry */
193static __inline pt_entry_t *
194vtopte(vm_offset_t va)
195{
196	pd_entry_t *pdep;
197	pt_entry_t *ptep;
198
199	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
200		return (NULL);
201	return (ptep);
202}
203
204extern vm_offset_t avail_end;
205extern vm_offset_t clean_eva;
206extern vm_offset_t clean_sva;
207extern vm_offset_t phys_avail[];
208extern vm_offset_t virtual_avail;
209extern vm_offset_t virtual_end;
210
211void	pmap_bootstrap(vm_offset_t, vm_offset_t, struct pv_addr *);
212void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
213void 	pmap_kenter_user(vm_offset_t va, vm_paddr_t pa);
214void	pmap_kremove(vm_offset_t);
215void	*pmap_mapdev(vm_offset_t, vm_size_t);
216void	pmap_unmapdev(vm_offset_t, vm_size_t);
217vm_page_t	pmap_use_pt(pmap_t, vm_offset_t);
218void	pmap_debug(int);
219void	pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int);
220void	pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *);
221vm_size_t	pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int);
222void
223pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot,
224    int cache);
225int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int);
226
227/*
228 * Definitions for MMU domains
229 */
230#define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
231#define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
232
233/*
234 * The new pmap ensures that page-tables are always mapping Write-Thru.
235 * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
236 * on every change.
237 *
238 * Unfortunately, not all CPUs have a write-through cache mode.  So we
239 * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
240 * and if there is the chance for PTE syncs to be needed, we define
241 * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
242 * the code.
243 */
244extern int pmap_needs_pte_sync;
245
246/*
247 * These macros define the various bit masks in the PTE.
248 *
249 * We use these macros since we use different bits on different processor
250 * models.
251 */
252#define	L1_S_PROT_U		(L1_S_AP(AP_U))
253#define	L1_S_PROT_W		(L1_S_AP(AP_W))
254#define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
255
256#define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
257#define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
258
259#define	L2_L_PROT_U		(L2_AP(AP_U))
260#define	L2_L_PROT_W		(L2_AP(AP_W))
261#define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
262
263#define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
264#define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
265
266#define	L2_S_PROT_U_generic	(L2_AP(AP_U))
267#define	L2_S_PROT_W_generic	(L2_AP(AP_W))
268#define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
269
270#define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
271#define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
272#define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
273
274#define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
275#define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
276
277#define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
278#define	L1_S_PROTO_xscale	(L1_TYPE_S)
279
280#define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
281#define	L1_C_PROTO_xscale	(L1_TYPE_C)
282
283#define	L2_L_PROTO		(L2_TYPE_L)
284
285#define	L2_S_PROTO_generic	(L2_TYPE_S)
286#define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
287
288/*
289 * User-visible names for the ones that vary with MMU class.
290 */
291
292#if ARM_NMMUS > 1
293/* More than one MMU class configured; use variables. */
294#define	L2_S_PROT_U		pte_l2_s_prot_u
295#define	L2_S_PROT_W		pte_l2_s_prot_w
296#define	L2_S_PROT_MASK		pte_l2_s_prot_mask
297
298#define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
299#define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
300#define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
301
302#define	L1_S_PROTO		pte_l1_s_proto
303#define	L1_C_PROTO		pte_l1_c_proto
304#define	L2_S_PROTO		pte_l2_s_proto
305
306#elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
307#define	L2_S_PROT_U		L2_S_PROT_U_generic
308#define	L2_S_PROT_W		L2_S_PROT_W_generic
309#define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
310
311#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
312#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
313#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
314
315#define	L1_S_PROTO		L1_S_PROTO_generic
316#define	L1_C_PROTO		L1_C_PROTO_generic
317#define	L2_S_PROTO		L2_S_PROTO_generic
318
319#elif ARM_MMU_XSCALE == 1
320#define	L2_S_PROT_U		L2_S_PROT_U_xscale
321#define	L2_S_PROT_W		L2_S_PROT_W_xscale
322#define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
323
324#define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
325#define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
326#define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
327
328#define	L1_S_PROTO		L1_S_PROTO_xscale
329#define	L1_C_PROTO		L1_C_PROTO_xscale
330#define	L2_S_PROTO		L2_S_PROTO_xscale
331
332#endif /* ARM_NMMUS > 1 */
333
334#if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
335#define	PMAP_NEEDS_PTE_SYNC	1
336#define	PMAP_INCLUDE_PTE_SYNC
337#elif (ARM_MMU_SA1 == 0)
338#define	PMAP_NEEDS_PTE_SYNC	0
339#endif
340
341/*
342 * These macros return various bits based on kernel/user and protection.
343 * Note that the compiler will usually fold these at compile time.
344 */
345#define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
346				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
347
348#define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
349				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
350
351#define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
352				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
353
354/*
355 * Macros to test if a mapping is mappable with an L1 Section mapping
356 * or an L2 Large Page mapping.
357 */
358#define	L1_S_MAPPABLE_P(va, pa, size)					\
359	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
360
361#define	L2_L_MAPPABLE_P(va, pa, size)					\
362	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
363
364/*
365 * Provide a fallback in case we were not able to determine it at
366 * compile-time.
367 */
368#ifndef PMAP_NEEDS_PTE_SYNC
369#define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
370#define	PMAP_INCLUDE_PTE_SYNC
371#endif
372
373#define	PTE_SYNC(pte)							\
374do {									\
375	if (PMAP_NEEDS_PTE_SYNC)					\
376		cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\
377} while (/*CONSTCOND*/0)
378
379#define	PTE_SYNC_RANGE(pte, cnt)					\
380do {									\
381	if (PMAP_NEEDS_PTE_SYNC) {					\
382		cpu_dcache_wb_range((vm_offset_t)(pte),			\
383		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
384	}								\
385} while (/*CONSTCOND*/0)
386
387extern pt_entry_t		pte_l1_s_cache_mode;
388extern pt_entry_t		pte_l1_s_cache_mask;
389
390extern pt_entry_t		pte_l2_l_cache_mode;
391extern pt_entry_t		pte_l2_l_cache_mask;
392
393extern pt_entry_t		pte_l2_s_cache_mode;
394extern pt_entry_t		pte_l2_s_cache_mask;
395
396extern pt_entry_t		pte_l1_s_cache_mode_pt;
397extern pt_entry_t		pte_l2_l_cache_mode_pt;
398extern pt_entry_t		pte_l2_s_cache_mode_pt;
399
400extern pt_entry_t		pte_l2_s_prot_u;
401extern pt_entry_t		pte_l2_s_prot_w;
402extern pt_entry_t		pte_l2_s_prot_mask;
403
404extern pt_entry_t		pte_l1_s_proto;
405extern pt_entry_t		pte_l1_c_proto;
406extern pt_entry_t		pte_l2_s_proto;
407
408extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t);
409extern void (*pmap_zero_page_func)(vm_paddr_t, int, int);
410
411#if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
412void	pmap_copy_page_generic(vm_paddr_t, vm_paddr_t);
413void	pmap_zero_page_generic(vm_paddr_t, int, int);
414
415void	pmap_pte_init_generic(void);
416#if defined(CPU_ARM8)
417void	pmap_pte_init_arm8(void);
418#endif
419#if defined(CPU_ARM9)
420void	pmap_pte_init_arm9(void);
421#endif /* CPU_ARM9 */
422#if defined(CPU_ARM10)
423void	pmap_pte_init_arm10(void);
424#endif /* CPU_ARM10 */
425#endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
426
427#if /* ARM_MMU_SA1 == */1
428void	pmap_pte_init_sa1(void);
429#endif /* ARM_MMU_SA1 == 1 */
430
431#if ARM_MMU_XSCALE == 1
432void	pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t);
433void	pmap_zero_page_xscale(vm_paddr_t, int, int);
434
435void	pmap_pte_init_xscale(void);
436
437void	xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t);
438
439void	pmap_use_minicache(vm_offset_t, vm_size_t);
440#endif /* ARM_MMU_XSCALE == 1 */
441#define PTE_KERNEL	0
442#define PTE_USER	1
443#define	l1pte_valid(pde)	((pde) != 0)
444#define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
445#define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
446#define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
447
448#define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
449#define	l2pte_valid(pte)	((pte) != 0)
450#define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
451#define l2pte_minidata(pte)	(((pte) & \
452				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
453				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
454
455/* L1 and L2 page table macros */
456#define pmap_pde_v(pde)		l1pte_valid(*(pde))
457#define pmap_pde_section(pde)	l1pte_section_p(*(pde))
458#define pmap_pde_page(pde)	l1pte_page_p(*(pde))
459#define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
460
461#define	pmap_pte_v(pte)		l2pte_valid(*(pte))
462#define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
463
464/*
465 * Flags that indicate attributes of pages or mappings of pages.
466 *
467 * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
468 * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
469 * pv_entry's for each page.  They live in the same "namespace" so
470 * that we can clear multiple attributes at a time.
471 *
472 * Note the "non-cacheable" flag generally means the page has
473 * multiple mappings in a given address space.
474 */
475#define	PVF_MOD		0x01		/* page is modified */
476#define	PVF_REF		0x02		/* page is referenced */
477#define	PVF_WIRED	0x04		/* mapping is wired */
478#define	PVF_WRITE	0x08		/* mapping is writable */
479#define	PVF_EXEC	0x10		/* mapping is executable */
480#define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
481#define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
482#define	PVF_NC		(PVF_UNC|PVF_KNC)
483
484void vector_page_setprot(int);
485
486void pmap_update(pmap_t);
487
488/*
489 * This structure is used by machine-dependent code to describe
490 * static mappings of devices, created at bootstrap time.
491 */
492struct pmap_devmap {
493	vm_offset_t	pd_va;		/* virtual address */
494	vm_paddr_t	pd_pa;		/* physical address */
495	vm_size_t	pd_size;	/* size of region */
496	vm_prot_t	pd_prot;	/* protection code */
497	int		pd_cache;	/* cache attributes */
498};
499
500const struct pmap_devmap *pmap_devmap_find_pa(vm_paddr_t, vm_size_t);
501const struct pmap_devmap *pmap_devmap_find_va(vm_offset_t, vm_size_t);
502
503void	pmap_devmap_bootstrap(vm_offset_t, const struct pmap_devmap *);
504void	pmap_devmap_register(const struct pmap_devmap *);
505
506#define SECTION_CACHE	0x1
507#define SECTION_PT	0x2
508void	pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags);
509
510extern char *_tmppt;
511
512void	pmap_postinit(void);
513
514#ifdef ARM_USE_SMALL_ALLOC
515void	arm_add_smallalloc_pages(void *, void *, int, int);
516void 	arm_busy_pages(void);
517struct arm_small_page {
518	void *addr;
519	TAILQ_ENTRY(arm_small_page) pg_list;
520};
521
522#endif
523extern vm_paddr_t dump_avail[];
524#endif	/* _KERNEL */
525
526#endif	/* !LOCORE */
527
528#endif	/* !_MACHINE_PMAP_H_ */
529