pmap.h revision 267964
1/*-
2 * Copyright (c) 1991 Regents of the University of California.
3 * All rights reserved.
4 *
5 * This code is derived from software contributed to Berkeley by
6 * the Systems Programming Group of the University of Utah Computer
7 * Science Department and William Jolitz of UUNET Technologies Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 4. Neither the name of the University nor the names of its contributors
18 *    may be used to endorse or promote products derived from this software
19 *    without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
33 * Derived from hp300 version by Mike Hibler, this version by William
34 * Jolitz uses a recursive map [a pde points to the page directory] to
35 * map the page tables using the pagetables themselves. This is done to
36 * reduce the impact on kernel virtual memory for lots of sparse address
37 * space, and to reduce the cost of memory to each process.
38 *
39 *	from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
40 *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
41 * $FreeBSD: stable/10/sys/i386/include/pmap.h 267964 2014-06-27 17:22:18Z jhb $
42 */
43
44#ifndef _MACHINE_PMAP_H_
45#define	_MACHINE_PMAP_H_
46
47/*
48 * Page-directory and page-table entries follow this format, with a few
49 * of the fields not present here and there, depending on a lot of things.
50 */
51				/* ---- Intel Nomenclature ---- */
52#define	PG_V		0x001	/* P	Valid			*/
53#define PG_RW		0x002	/* R/W	Read/Write		*/
54#define PG_U		0x004	/* U/S  User/Supervisor		*/
55#define	PG_NC_PWT	0x008	/* PWT	Write through		*/
56#define	PG_NC_PCD	0x010	/* PCD	Cache disable		*/
57#define PG_A		0x020	/* A	Accessed		*/
58#define	PG_M		0x040	/* D	Dirty			*/
59#define	PG_PS		0x080	/* PS	Page size (0=4k,1=4M)	*/
60#define	PG_PTE_PAT	0x080	/* PAT	PAT index		*/
61#define	PG_G		0x100	/* G	Global			*/
62#define	PG_AVAIL1	0x200	/*    /	Available for system	*/
63#define	PG_AVAIL2	0x400	/*   <	programmers use		*/
64#define	PG_AVAIL3	0x800	/*    \				*/
65#define	PG_PDE_PAT	0x1000	/* PAT	PAT index		*/
66#ifdef PAE
67#define	PG_NX		(1ull<<63) /* No-execute */
68#endif
69
70
71/* Our various interpretations of the above */
72#define PG_W		PG_AVAIL1	/* "Wired" pseudoflag */
73#define	PG_MANAGED	PG_AVAIL2
74#ifdef PAE
75#define	PG_FRAME	(0x000ffffffffff000ull)
76#define	PG_PS_FRAME	(0x000fffffffe00000ull)
77#else
78#define	PG_FRAME	(~PAGE_MASK)
79#define	PG_PS_FRAME	(0xffc00000)
80#endif
81#define	PG_PROT		(PG_RW|PG_U)	/* all protection bits . */
82#define PG_N		(PG_NC_PWT|PG_NC_PCD)	/* Non-cacheable */
83
84/* Page level cache control fields used to determine the PAT type */
85#define PG_PDE_CACHE	(PG_PDE_PAT | PG_NC_PWT | PG_NC_PCD)
86#define PG_PTE_CACHE	(PG_PTE_PAT | PG_NC_PWT | PG_NC_PCD)
87
88/*
89 * Promotion to a 2 or 4MB (PDE) page mapping requires that the corresponding
90 * 4KB (PTE) page mappings have identical settings for the following fields:
91 */
92#define PG_PTE_PROMOTE	(PG_MANAGED | PG_W | PG_G | PG_PTE_PAT | \
93	    PG_M | PG_A | PG_NC_PCD | PG_NC_PWT | PG_U | PG_RW | PG_V)
94
95/*
96 * Page Protection Exception bits
97 */
98
99#define PGEX_P		0x01	/* Protection violation vs. not present */
100#define PGEX_W		0x02	/* during a Write cycle */
101#define PGEX_U		0x04	/* access from User mode (UPL) */
102#define PGEX_RSV	0x08	/* reserved PTE field is non-zero */
103#define PGEX_I		0x10	/* during an instruction fetch */
104
105/*
106 * Size of Kernel address space.  This is the number of page table pages
107 * (4MB each) to use for the kernel.  256 pages == 1 Gigabyte.
108 * This **MUST** be a multiple of 4 (eg: 252, 256, 260, etc).
109 * For PAE, the page table page unit size is 2MB.  This means that 512 pages
110 * is 1 Gigabyte.  Double everything.  It must be a multiple of 8 for PAE.
111 */
112#ifndef KVA_PAGES
113#ifdef PAE
114#define KVA_PAGES	512
115#else
116#define KVA_PAGES	256
117#endif
118#endif
119
120/*
121 * Pte related macros
122 */
123#define VADDR(pdi, pti) ((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
124
125/* Initial number of kernel page tables. */
126#ifndef NKPT
127#ifdef PAE
128/* 152 page tables needed to map 16G (76B "struct vm_page", 2M page tables). */
129#define	NKPT		240
130#else
131/* 18 page tables needed to map 4G (72B "struct vm_page", 4M page tables). */
132#define	NKPT		30
133#endif
134#endif
135
136#ifndef NKPDE
137#define NKPDE	(KVA_PAGES)	/* number of page tables/pde's */
138#endif
139
140/*
141 * The *PTDI values control the layout of virtual memory
142 *
143 * XXX This works for now, but I am not real happy with it, I'll fix it
144 * right after I fix locore.s and the magic 28K hole
145 */
146#define	KPTDI		(NPDEPTD-NKPDE)	/* start of kernel virtual pde's */
147#define	PTDPTDI		(KPTDI-NPGPTD)	/* ptd entry that points to ptd! */
148
149/*
150 * XXX doesn't really belong here I guess...
151 */
152#define ISA_HOLE_START    0xa0000
153#define ISA_HOLE_LENGTH (0x100000-ISA_HOLE_START)
154
155#ifndef LOCORE
156
157#include <sys/queue.h>
158#include <sys/_cpuset.h>
159#include <sys/_lock.h>
160#include <sys/_mutex.h>
161
162#include <vm/_vm_radix.h>
163
164#ifdef PAE
165
166typedef uint64_t pdpt_entry_t;
167typedef uint64_t pd_entry_t;
168typedef uint64_t pt_entry_t;
169
170#define	PTESHIFT	(3)
171#define	PDESHIFT	(3)
172
173#else
174
175typedef uint32_t pd_entry_t;
176typedef uint32_t pt_entry_t;
177
178#define	PTESHIFT	(2)
179#define	PDESHIFT	(2)
180
181#endif
182
183/*
184 * Address of current address space page table maps and directories.
185 */
186#ifdef _KERNEL
187extern pt_entry_t PTmap[];
188extern pd_entry_t PTD[];
189extern pd_entry_t PTDpde[];
190
191#ifdef PAE
192extern pdpt_entry_t *IdlePDPT;
193#endif
194extern pd_entry_t *IdlePTD;	/* physical address of "Idle" state directory */
195
196/*
197 * Translate a virtual address to the kernel virtual address of its page table
198 * entry (PTE).  This can be used recursively.  If the address of a PTE as
199 * previously returned by this macro is itself given as the argument, then the
200 * address of the page directory entry (PDE) that maps the PTE will be
201 * returned.
202 *
203 * This macro may be used before pmap_bootstrap() is called.
204 */
205#define	vtopte(va)	(PTmap + i386_btop(va))
206
207/*
208 * Translate a virtual address to its physical address.
209 *
210 * This macro may be used before pmap_bootstrap() is called.
211 */
212#define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
213
214#if defined(XEN)
215#include <sys/param.h>
216
217#include <xen/xen-os.h>
218
219#include <machine/xen/xenvar.h>
220#include <machine/xen/xenpmap.h>
221
222extern pt_entry_t pg_nx;
223
224#define PG_KERNEL  (PG_V | PG_A | PG_RW | PG_M)
225
226#define MACH_TO_VM_PAGE(ma) PHYS_TO_VM_PAGE(xpmap_mtop((ma)))
227#define VM_PAGE_TO_MACH(m) xpmap_ptom(VM_PAGE_TO_PHYS((m)))
228
229#define VTOM(va) xpmap_ptom(VTOP(va))
230
231static __inline vm_paddr_t
232pmap_kextract_ma(vm_offset_t va)
233{
234        vm_paddr_t ma;
235        if ((ma = PTD[va >> PDRSHIFT]) & PG_PS) {
236                ma = (ma & ~(NBPDR - 1)) | (va & (NBPDR - 1));
237        } else {
238                ma = (*vtopte(va) & PG_FRAME) | (va & PAGE_MASK);
239        }
240        return ma;
241}
242
243static __inline vm_paddr_t
244pmap_kextract(vm_offset_t va)
245{
246        return xpmap_mtop(pmap_kextract_ma(va));
247}
248#define vtomach(va)     pmap_kextract_ma(((vm_offset_t) (va)))
249
250vm_paddr_t pmap_extract_ma(struct pmap *pmap, vm_offset_t va);
251
252void    pmap_kenter_ma(vm_offset_t va, vm_paddr_t pa);
253void    pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len);
254void    pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len);
255
256static __inline pt_entry_t
257pte_load_store(pt_entry_t *ptep, pt_entry_t v)
258{
259	pt_entry_t r;
260
261	r = *ptep;
262	PT_SET_VA(ptep, v, TRUE);
263	return (r);
264}
265
266static __inline pt_entry_t
267pte_load_store_ma(pt_entry_t *ptep, pt_entry_t v)
268{
269	pt_entry_t r;
270
271	r = *ptep;
272	PT_SET_VA_MA(ptep, v, TRUE);
273	return (r);
274}
275
276#define	pte_load_clear(ptep)	pte_load_store((ptep), (pt_entry_t)0ULL)
277
278#define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
279#define	pte_store_ma(ptep, pte)	pte_load_store_ma((ptep), (pt_entry_t)pte)
280#define	pde_store_ma(ptep, pte)	pte_load_store_ma((ptep), (pt_entry_t)pte)
281
282#elif !defined(XEN)
283
284/*
285 * KPTmap is a linear mapping of the kernel page table.  It differs from the
286 * recursive mapping in two ways: (1) it only provides access to kernel page
287 * table pages, and not user page table pages, and (2) it provides access to
288 * a kernel page table page after the corresponding virtual addresses have
289 * been promoted to a 2/4MB page mapping.
290 *
291 * KPTmap is first initialized by locore to support just NPKT page table
292 * pages.  Later, it is reinitialized by pmap_bootstrap() to allow for
293 * expansion of the kernel page table.
294 */
295extern pt_entry_t *KPTmap;
296
297/*
298 * Extract from the kernel page table the physical address that is mapped by
299 * the given virtual address "va".
300 *
301 * This function may be used before pmap_bootstrap() is called.
302 */
303static __inline vm_paddr_t
304pmap_kextract(vm_offset_t va)
305{
306	vm_paddr_t pa;
307
308	if ((pa = PTD[va >> PDRSHIFT]) & PG_PS) {
309		pa = (pa & PG_PS_FRAME) | (va & PDRMASK);
310	} else {
311		/*
312		 * Beware of a concurrent promotion that changes the PDE at
313		 * this point!  For example, vtopte() must not be used to
314		 * access the PTE because it would use the new PDE.  It is,
315		 * however, safe to use the old PDE because the page table
316		 * page is preserved by the promotion.
317		 */
318		pa = KPTmap[i386_btop(va)];
319		pa = (pa & PG_FRAME) | (va & PAGE_MASK);
320	}
321	return (pa);
322}
323#endif
324
325#if !defined(XEN)
326#define PT_UPDATES_FLUSH()
327#endif
328
329#if defined(PAE) && !defined(XEN)
330
331#define	pde_cmpset(pdep, old, new)	atomic_cmpset_64_i586(pdep, old, new)
332#define	pte_load_store(ptep, pte)	atomic_swap_64_i586(ptep, pte)
333#define	pte_load_clear(ptep)		atomic_swap_64_i586(ptep, 0)
334#define	pte_store(ptep, pte)		atomic_store_rel_64_i586(ptep, pte)
335
336extern pt_entry_t pg_nx;
337
338#elif !defined(PAE) && !defined(XEN)
339
340#define	pde_cmpset(pdep, old, new)	atomic_cmpset_int(pdep, old, new)
341#define	pte_load_store(ptep, pte)	atomic_swap_int(ptep, pte)
342#define	pte_load_clear(ptep)		atomic_swap_int(ptep, 0)
343#define	pte_store(ptep, pte) do { \
344	*(u_int *)(ptep) = (u_int)(pte); \
345} while (0)
346
347#endif /* PAE */
348
349#define	pte_clear(ptep)			pte_store(ptep, 0)
350
351#define	pde_store(pdep, pde)		pte_store(pdep, pde)
352
353#endif /* _KERNEL */
354
355/*
356 * Pmap stuff
357 */
358struct	pv_entry;
359struct	pv_chunk;
360
361struct md_page {
362	TAILQ_HEAD(,pv_entry)	pv_list;
363	int			pat_mode;
364};
365
366struct pmap {
367	struct mtx		pm_mtx;
368	pd_entry_t		*pm_pdir;	/* KVA of page directory */
369	TAILQ_HEAD(,pv_chunk)	pm_pvchunk;	/* list of mappings in pmap */
370	cpuset_t		pm_active;	/* active on cpus */
371	struct pmap_statistics	pm_stats;	/* pmap statistics */
372	LIST_ENTRY(pmap) 	pm_list;	/* List of all pmaps */
373#ifdef PAE
374	pdpt_entry_t		*pm_pdpt;	/* KVA of page director pointer
375						   table */
376#endif
377	struct vm_radix		pm_root;	/* spare page table pages */
378};
379
380typedef struct pmap	*pmap_t;
381
382#ifdef _KERNEL
383extern struct pmap	kernel_pmap_store;
384#define kernel_pmap	(&kernel_pmap_store)
385
386#define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
387#define	PMAP_LOCK_ASSERT(pmap, type) \
388				mtx_assert(&(pmap)->pm_mtx, (type))
389#define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
390#define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
391				    NULL, MTX_DEF | MTX_DUPOK)
392#define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
393#define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
394#define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
395#define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
396#endif
397
398/*
399 * For each vm_page_t, there is a list of all currently valid virtual
400 * mappings of that page.  An entry is a pv_entry_t, the list is pv_list.
401 */
402typedef struct pv_entry {
403	vm_offset_t	pv_va;		/* virtual address for mapping */
404	TAILQ_ENTRY(pv_entry)	pv_next;
405} *pv_entry_t;
406
407/*
408 * pv_entries are allocated in chunks per-process.  This avoids the
409 * need to track per-pmap assignments.
410 */
411#define	_NPCM	11
412#define	_NPCPV	336
413struct pv_chunk {
414	pmap_t			pc_pmap;
415	TAILQ_ENTRY(pv_chunk)	pc_list;
416	uint32_t		pc_map[_NPCM];	/* bitmap; 1 = free */
417	TAILQ_ENTRY(pv_chunk)	pc_lru;
418	struct pv_entry		pc_pventry[_NPCPV];
419};
420
421#ifdef	_KERNEL
422
423extern caddr_t	CADDR3;
424extern pt_entry_t *CMAP3;
425extern vm_paddr_t phys_avail[];
426extern vm_paddr_t dump_avail[];
427extern int pseflag;
428extern int pgeflag;
429extern char *ptvmmap;		/* poor name! */
430extern vm_offset_t virtual_avail;
431extern vm_offset_t virtual_end;
432
433#define	pmap_page_get_memattr(m)	((vm_memattr_t)(m)->md.pat_mode)
434#define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
435#define	pmap_unmapbios(va, sz)	pmap_unmapdev((va), (sz))
436
437/*
438 * Only the following functions or macros may be used before pmap_bootstrap()
439 * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and
440 * vtopte().
441 */
442void	pmap_bootstrap(vm_paddr_t);
443int	pmap_cache_bits(int mode, boolean_t is_pde);
444int	pmap_change_attr(vm_offset_t, vm_size_t, int);
445void	pmap_init_pat(void);
446void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
447void	*pmap_kenter_temporary(vm_paddr_t pa, int i);
448void	pmap_kremove(vm_offset_t);
449void	*pmap_mapbios(vm_paddr_t, vm_size_t);
450void	*pmap_mapdev(vm_paddr_t, vm_size_t);
451void	*pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
452boolean_t pmap_page_is_mapped(vm_page_t m);
453void	pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
454void	pmap_unmapdev(vm_offset_t, vm_size_t);
455pt_entry_t *pmap_pte(pmap_t, vm_offset_t) __pure2;
456void	pmap_invalidate_page(pmap_t, vm_offset_t);
457void	pmap_invalidate_range(pmap_t, vm_offset_t, vm_offset_t);
458void	pmap_invalidate_all(pmap_t);
459void	pmap_invalidate_cache(void);
460void	pmap_invalidate_cache_pages(vm_page_t *pages, int count);
461void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
462
463#endif /* _KERNEL */
464
465#endif /* !LOCORE */
466
467#endif /* !_MACHINE_PMAP_H_ */
468