pmap.h revision 187327
1187301Sgonzo/*-
2178172Simp * Copyright (c) 1991 Regents of the University of California.
3178172Simp * All rights reserved.
4178172Simp *
5178172Simp * This code is derived from software contributed to Berkeley by
6178172Simp * the Systems Programming Group of the University of Utah Computer
7178172Simp * Science Department and William Jolitz of UUNET Technologies Inc.
8178172Simp *
9178172Simp * Redistribution and use in source and binary forms, with or without
10178172Simp * modification, are permitted provided that the following conditions
11178172Simp * are met:
12178172Simp * 1. Redistributions of source code must retain the above copyright
13178172Simp *    notice, this list of conditions and the following disclaimer.
14178172Simp * 2. Redistributions in binary form must reproduce the above copyright
15178172Simp *    notice, this list of conditions and the following disclaimer in the
16178172Simp *    documentation and/or other materials provided with the distribution.
17178172Simp * 4. Neither the name of the University nor the names of its contributors
18178172Simp *    may be used to endorse or promote products derived from this software
19178172Simp *    without specific prior written permission.
20178172Simp *
21178172Simp * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22178172Simp * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23178172Simp * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24178172Simp * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25178172Simp * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26178172Simp * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27178172Simp * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28178172Simp * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29178172Simp * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30178172Simp * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31178172Simp * SUCH DAMAGE.
32178172Simp *
33178172Simp * Derived from hp300 version by Mike Hibler, this version by William
34178172Simp * Jolitz uses a recursive map [a pde points to the page directory] to
35178172Simp * map the page tables using the pagetables themselves. This is done to
36178172Simp * reduce the impact on kernel virtual memory for lots of sparse address
37178172Simp * space, and to reduce the cost of memory to each process.
38178172Simp *
39178172Simp *	from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
40178172Simp *	from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
41178172Simp *	from: src/sys/i386/include/pmap.h,v 1.65.2.2 2000/11/30 01:54:42 peter
42178172Simp *	JNPR: pmap.h,v 1.7.2.1 2007/09/10 07:44:12 girish
43178172Simp *      $FreeBSD: head/sys/mips/include/pmap.h 187327 2009-01-16 08:38:03Z imp $
44178172Simp */
45178172Simp
46178172Simp#ifndef _MACHINE_PMAP_H_
47178172Simp#define	_MACHINE_PMAP_H_
48178172Simp
49178172Simp#include <machine/vmparam.h>
50187301Sgonzo#include <machine/pte.h>
51178172Simp
52178172Simp#define	VADDR(pdi, pti)	((vm_offset_t)(((pdi)<<PDRSHIFT)|((pti)<<PAGE_SHIFT)))
53178172Simp
54178172Simp#define	NKPT		120	/* actual number of kernel page tables */
55178172Simp
56178172Simp#ifndef NKPDE
57178172Simp#define	NKPDE		255	/* addressable number of page tables/pde's */
58178172Simp#endif
59178172Simp
60178172Simp#define	KPTDI		(VM_MIN_KERNEL_ADDRESS >> SEGSHIFT)
61178172Simp#define	NUSERPGTBLS	(VM_MAXUSER_ADDRESS >> SEGSHIFT)
62178172Simp
63178172Simp#ifndef LOCORE
64178172Simp
65178172Simp#include <sys/queue.h>
66178172Simp#include <sys/_lock.h>
67178172Simp#include <sys/_mutex.h>
68178172Simp
69178172Simp/*
70178172Simp * Pmap stuff
71178172Simp */
72178172Simpstruct pv_entry;
73178172Simp
74178172Simpstruct md_page {
75178172Simp	int pv_list_count;
76178172Simp	int pv_flags;
77178172Simp	    TAILQ_HEAD(, pv_entry)pv_list;
78178172Simp};
79178172Simp
80178172Simp#define	PV_TABLE_MOD		0x01	/* modified */
81178172Simp#define	PV_TABLE_REF		0x02	/* referenced */
82178172Simp
83178172Simp#define	ASID_BITS		8
84178172Simp#define	ASIDGEN_BITS		(32 - ASID_BITS)
85178172Simp#define	ASIDGEN_MASK		((1 << ASIDGEN_BITS) - 1)
86178172Simp
87178172Simpstruct pmap {
88178172Simp	pd_entry_t *pm_segtab;	/* KVA of segment table */
89178172Simp	           TAILQ_HEAD(, pv_entry)pm_pvlist;	/* list of mappings in
90178172Simp							 * pmap */
91178172Simp	int pm_active;		/* active on cpus */
92178172Simp	struct {
93178172Simp		u_int32_t asid:ASID_BITS;	/* TLB address space tag */
94178172Simp		u_int32_t gen:ASIDGEN_BITS;	/* its generation number */
95178172Simp	}      pm_asid[MAXSMPCPU];
96178172Simp	struct pmap_statistics pm_stats;	/* pmap statistics */
97178172Simp	struct vm_page *pm_ptphint;	/* pmap ptp hint */
98178172Simp	struct mtx pm_mtx;
99178172Simp};
100178172Simp
101178172Simptypedef struct pmap *pmap_t;
102178172Simp
103187301Sgonzo#ifdef	_KERNEL
104178172Simp
105178172Simppt_entry_t *pmap_pte(pmap_t, vm_offset_t);
106178172Simppd_entry_t pmap_segmap(pmap_t pmap, vm_offset_t va);
107178172Simpvm_offset_t pmap_kextract(vm_offset_t va);
108178172Simpextern pmap_t kernel_pmap;
109178172Simp
110178172Simp#define	vtophys(va)	pmap_kextract(((vm_offset_t) (va)))
111178172Simp
112178172Simp#define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
113178172Simp#define	PMAP_LOCK_ASSERT(pmap, type)	mtx_assert(&(pmap)->pm_mtx, (type))
114178172Simp#define	PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx)
115178172Simp#define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, "pmap", \
116178172Simp				    NULL, MTX_DEF)
117178172Simp#define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
118178172Simp#define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
119178172Simp#define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
120178172Simp#define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
121178172Simp
122178172Simp#define PMAP_LGMEM_LOCK_INIT(sysmap) mtx_init(&(sysmap)->lock, "pmap-lgmem", \
123178172Simp				    "per-cpu-map", (MTX_DEF| MTX_DUPOK))
124178172Simp#define PMAP_LGMEM_LOCK(sysmap) mtx_lock(&(sysmap)->lock)
125178172Simp#define PMAP_LGMEM_UNLOCK(sysmap) mtx_unlock(&(sysmap)->lock)
126178172Simp#define PMAP_LGMEM_DESTROY(sysmap) mtx_destroy(&(sysmap)->lock)
127178172Simp
128178172Simp/*
129178172Simp * For each vm_page_t, there is a list of all currently valid virtual
130178172Simp * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
131178172Simp */
132178172Simptypedef struct pv_entry {
133178172Simp	pmap_t pv_pmap;		/* pmap where mapping lies */
134178172Simp	vm_offset_t pv_va;	/* virtual address for mapping */
135178172Simp	            TAILQ_ENTRY(pv_entry)pv_list;
136178172Simp	            TAILQ_ENTRY(pv_entry)pv_plist;
137178172Simp	vm_page_t pv_ptem;	/* VM page for pte */
138178172Simp	boolean_t pv_wired;	/* whether this entry is wired */
139178172Simp}       *pv_entry_t;
140178172Simp
141178172Simp
142178172Simp#if defined(DIAGNOSTIC)
143178172Simp#define	PMAP_DIAGNOSTIC
144178172Simp#endif
145178172Simp
146178172Simpextern vm_offset_t avail_end;
147178172Simpextern vm_offset_t avail_start;
148178172Simpextern vm_offset_t phys_avail[];
149178172Simpextern char *ptvmmap;		/* poor name! */
150178172Simpextern vm_offset_t virtual_avail;
151178172Simpextern vm_offset_t virtual_end;
152178172Simpextern pd_entry_t *segbase;
153178172Simp
154178172Simpextern vm_paddr_t mips_wired_tlb_physmem_start;
155178172Simpextern vm_paddr_t mips_wired_tlb_physmem_end;
156178172Simpextern u_int need_wired_tlb_page_pool;
157178172Simp
158178172Simp#define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
159178172Simp#define pmap_kernel() kernel_pmap
160178172Simp
161178172Simpvoid pmap_bootstrap(void);
162178172Simpvoid *pmap_mapdev(vm_offset_t, vm_size_t);
163178172Simpvoid pmap_unmapdev(vm_offset_t, vm_size_t);
164178172Simpvm_offset_t pmap_steal_memory(vm_size_t size);
165178172Simpvoid pmap_set_modified(vm_offset_t pa);
166178172Simpint page_is_managed(vm_offset_t pa);
167178172Simpvoid pmap_page_is_free(vm_page_t m);
168187327Simpvoid pmap_kenter(vm_offset_t va, vm_paddr_t pa);
169187327Simpvoid pmap_kremove(vm_offset_t va);
170178172Simpvoid *pmap_kenter_temporary(vm_paddr_t pa, int i);
171178172Simpvoid pmap_kenter_temporary_free(vm_paddr_t pa);
172178172Simpint pmap_compute_pages_to_dump(void);
173178172Simpvoid pmap_update_page(pmap_t pmap, vm_offset_t va, pt_entry_t pte);
174178172Simp
175178172Simp/*
176178172Simp * floating virtual pages (FPAGES)
177178172Simp *
178178172Simp * These are the reserved virtual memory areas which can be
179178172Simp * mapped to any physical memory.
180178172Simp */
181178172Simp#define	FPAGES			2
182178172Simp#define	FPAGES_SHARED		2
183178172Simp#define	FSPACE			((FPAGES * MAXCPU + FPAGES_SHARED)  * PAGE_SIZE)
184178172Simp#define	PMAP_FPAGE1		0x00	/* Used by pmap_zero_page &
185178172Simp					 * pmap_copy_page */
186178172Simp#define	PMAP_FPAGE2		0x01	/* Used by pmap_copy_page */
187178172Simp
188178172Simp#define	PMAP_FPAGE3		0x00	/* Used by pmap_zero_page_idle */
189178172Simp#define	PMAP_FPAGE_KENTER_TEMP	0x01	/* Used by coredump */
190178172Simp
191178172Simpstruct fpage {
192178172Simp	vm_offset_t kva;
193178172Simp	u_int state;
194178172Simp};
195178172Simp
196178172Simpstruct sysmaps {
197178172Simp	struct mtx lock;
198178172Simp	struct fpage fp[FPAGES];
199178172Simp};
200178172Simp
201178172Simpvm_offset_t
202178172Simppmap_map_fpage(vm_paddr_t pa, struct fpage *fp,
203178172Simp    boolean_t check_unmaped);
204178172Simpvoid pmap_unmap_fpage(vm_paddr_t pa, struct fpage *fp);
205178172Simp
206178172Simp#endif				/* _KERNEL */
207178172Simp
208178172Simp#endif				/* !LOCORE */
209178172Simp
210178172Simp#endif				/* !_MACHINE_PMAP_H_ */
211