pmap.h revision 1.43
1/* $NetBSD: pmap.h,v 1.43 2001/04/24 20:11:53 thorpej Exp $ */
2
3/*-
4 * Copyright (c) 1998, 1999, 2000, 2001 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center and by Chris G. Demetriou.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 *    notice, this list of conditions and the following disclaimer in the
18 *    documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 *    must display the following acknowledgement:
21 *	This product includes software developed by the NetBSD
22 *	Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 *    contributors may be used to endorse or promote products derived
25 *    from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * Copyright (c) 1987 Carnegie-Mellon University
42 * Copyright (c) 1991, 1993
43 *	The Regents of the University of California.  All rights reserved.
44 *
45 * This code is derived from software contributed to Berkeley by
46 * the Systems Programming Group of the University of Utah Computer
47 * Science Department.
48 *
49 * Redistribution and use in source and binary forms, with or without
50 * modification, are permitted provided that the following conditions
51 * are met:
52 * 1. Redistributions of source code must retain the above copyright
53 *    notice, this list of conditions and the following disclaimer.
54 * 2. Redistributions in binary form must reproduce the above copyright
55 *    notice, this list of conditions and the following disclaimer in the
56 *    documentation and/or other materials provided with the distribution.
57 * 3. All advertising materials mentioning features or use of this software
58 *    must display the following acknowledgement:
59 *	This product includes software developed by the University of
60 *	California, Berkeley and its contributors.
61 * 4. Neither the name of the University nor the names of its contributors
62 *    may be used to endorse or promote products derived from this software
63 *    without specific prior written permission.
64 *
65 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
66 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
67 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
68 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
69 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
70 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
71 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
72 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
73 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
74 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
75 * SUCH DAMAGE.
76 *
77 *	@(#)pmap.h	8.1 (Berkeley) 6/10/93
78 */
79
80#ifndef	_PMAP_MACHINE_
81#define	_PMAP_MACHINE_
82
83#if defined(_KERNEL) && !defined(_LKM)
84#include "opt_multiprocessor.h"
85#endif
86
87#include <sys/lock.h>
88#include <sys/queue.h>
89
90#include <machine/pte.h>
91
92/*
93 * Machine-dependent virtual memory state.
94 *
95 * If we ever support processor numbers higher than 63, we'll have to
96 * rethink the CPU mask.
97 *
98 * Note pm_asn and pm_asngen are arrays allocated in pmap_create().
99 * Their size is based on the PCS count from the HWRPB, and indexed
100 * by processor ID (from `whami').
101 *
102 * The kernel pmap is a special case; it gets statically-allocated
103 * arrays which hold enough for ALPHA_MAXPROCS.
104 */
105struct pmap_asn_info {
106	unsigned int		pma_asn;	/* address space number */
107	unsigned long		pma_asngen;	/* ASN generation number */
108};
109
110struct pmap {
111	TAILQ_ENTRY(pmap)	pm_list;	/* list of all pmaps */
112	pt_entry_t		*pm_lev1map;	/* level 1 map */
113	int			pm_count;	/* pmap reference count */
114	struct simplelock	pm_slock;	/* lock on pmap */
115	struct pmap_statistics	pm_stats;	/* pmap statistics */
116	long			pm_nlev2;	/* level 2 pt page count */
117	long			pm_nlev3;	/* level 3 pt page count */
118	unsigned long		pm_cpus;	/* mask of CPUs using pmap */
119	unsigned long		pm_needisync;	/* mask of CPUs needing isync */
120	struct pmap_asn_info	pm_asni[1];	/* ASN information */
121			/*	variable length		*/
122};
123typedef struct pmap	*pmap_t;
124
125/*
126 * Compute the sizeo of a pmap structure.  Subtract one because one
127 * ASN info structure is already included in the pmap structure itself.
128 */
129#define	PMAP_SIZEOF(x)							\
130	(ALIGN(sizeof(struct pmap) +					\
131	       (sizeof(struct pmap_asn_info) * ((x) - 1))))
132
133#define	PMAP_ASN_RESERVED	0	/* reserved for Lev1map users */
134
135extern u_long		kernel_pmap_store[];
136
137#define pmap_kernel()	((pmap_t) (&kernel_pmap_store[0]))
138
139/*
140 * For each vm_page_t, there is a list of all currently valid virtual
141 * mappings of that page.  An entry is a pv_entry_t, the list is pv_table.
142 */
143typedef struct pv_entry {
144	LIST_ENTRY(pv_entry) pv_list;	/* pv_entry list */
145	struct pmap	*pv_pmap;	/* pmap where mapping lies */
146	vaddr_t		pv_va;		/* virtual address for mapping */
147	pt_entry_t	*pv_pte;	/* PTE that maps the VA */
148} *pv_entry_t;
149
150/*
151 * The head of the list of pv_entry_t's, also contains page attributes.
152 */
153struct pv_head {
154	LIST_HEAD(, pv_entry) pvh_list;		/* pv_entry list */
155	struct simplelock pvh_slock;		/* lock on this head */
156	int pvh_attrs;				/* page attributes */
157	int pvh_usage;				/* page usage */
158	int pvh_refcnt;				/* special use ref count */
159};
160
161/* pvh_attrs */
162#define	PGA_MODIFIED		0x01		/* modified */
163#define	PGA_REFERENCED		0x02		/* referenced */
164
165/* pvh_usage */
166#define	PGU_NORMAL		0		/* free or normal use */
167#define	PGU_PVENT		1		/* PV entries */
168#define	PGU_L1PT		2		/* level 1 page table */
169#define	PGU_L2PT		3		/* level 2 page table */
170#define	PGU_L3PT		4		/* level 3 page table */
171
172#define	PGU_ISPTPAGE(pgu)	((pgu) >= PGU_L1PT)
173
174#define	PGU_STRINGS							\
175{									\
176	"normal",							\
177	"pvent",							\
178	"l1pt",								\
179	"l2pt",								\
180	"l3pt",								\
181}
182
183#ifdef _KERNEL
184
185#ifndef _LKM
186#include "opt_new_scc_driver.h"
187#include "opt_dec_3000_300.h"			/* XXX */
188#include "opt_dec_3000_500.h"			/* XXX */
189#include "opt_dec_kn8ae.h"			/* XXX */
190
191#if defined(NEW_SCC_DRIVER)
192#if defined(DEC_KN8AE)
193#define	_PMAP_MAY_USE_PROM_CONSOLE
194#endif
195#else /* ! NEW_SCC_DRIVER */
196#if defined(DEC_3000_300)		\
197 || defined(DEC_3000_500)		\
198 || defined(DEC_KN8AE) 				/* XXX */
199#define _PMAP_MAY_USE_PROM_CONSOLE		/* XXX */
200#endif						/* XXX */
201#endif /* NEW_SCC_DRIVER */
202
203#if defined(MULTIPROCESSOR)
204struct cpu_info;
205struct trapframe;
206
207void	pmap_do_reactivate(struct cpu_info *, struct trapframe *);
208
209void	pmap_tlb_shootdown(pmap_t, vaddr_t, pt_entry_t);
210void	pmap_do_tlb_shootdown(struct cpu_info *, struct trapframe *);
211void	pmap_tlb_shootdown_q_drain(u_long, boolean_t);
212#define	PMAP_TLB_SHOOTDOWN(pm, va, pte)					\
213	pmap_tlb_shootdown((pm), (va), (pte))
214#else
215#define	PMAP_TLB_SHOOTDOWN(pm, va, pte)		/* nothing */
216#endif /* MULTIPROCESSOR */
217#endif /* _LKM */
218
219#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
220#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
221
222#define	pmap_update()			/* nothing (yet) */
223
224extern	pt_entry_t *VPT;		/* Virtual Page Table */
225
226#define	PMAP_STEAL_MEMORY		/* enable pmap_steal_memory() */
227#define	PMAP_GROWKERNEL			/* enable pmap_growkernel() */
228
229/*
230 * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
231 */
232#define	PMAP_MAP_POOLPAGE(pa)		ALPHA_PHYS_TO_K0SEG((pa))
233#define	PMAP_UNMAP_POOLPAGE(va)		ALPHA_K0SEG_TO_PHYS((va))
234
235paddr_t vtophys(vaddr_t);
236
237/* Machine-specific functions. */
238void	pmap_bootstrap(paddr_t ptaddr, u_int maxasn, u_long ncpuids);
239void	pmap_emulate_reference(struct proc *p, vaddr_t v,
240		int user, int write);
241#ifdef _PMAP_MAY_USE_PROM_CONSOLE
242int	pmap_uses_prom_console(void);
243#endif
244
245#define	pmap_pte_pa(pte)	(PG_PFNUM(*(pte)) << PGSHIFT)
246#define	pmap_pte_prot(pte)	(*(pte) & PG_PROT)
247#define	pmap_pte_w(pte)		(*(pte) & PG_WIRED)
248#define	pmap_pte_v(pte)		(*(pte) & PG_V)
249#define	pmap_pte_pv(pte)	(*(pte) & PG_PVLIST)
250#define	pmap_pte_asm(pte)	(*(pte) & PG_ASM)
251#define	pmap_pte_exec(pte)	(*(pte) & PG_EXEC)
252
253#define	pmap_pte_set_w(pte, v)						\
254do {									\
255	if (v)								\
256		*(pte) |= PG_WIRED;					\
257	else								\
258		*(pte) &= ~PG_WIRED;					\
259} while (0)
260
261#define	pmap_pte_w_chg(pte, nw)	((nw) ^ pmap_pte_w(pte))
262
263#define	pmap_pte_set_prot(pte, np)					\
264do {									\
265	*(pte) &= ~PG_PROT;						\
266	*(pte) |= (np);							\
267} while (0)
268
269#define	pmap_pte_prot_chg(pte, np) ((np) ^ pmap_pte_prot(pte))
270
271static __inline pt_entry_t *pmap_l2pte(pmap_t, vaddr_t, pt_entry_t *);
272static __inline pt_entry_t *pmap_l3pte(pmap_t, vaddr_t, pt_entry_t *);
273
274#define	pmap_l1pte(pmap, v)						\
275	(&(pmap)->pm_lev1map[l1pte_index((vaddr_t)(v))])
276
277static __inline pt_entry_t *
278pmap_l2pte(pmap, v, l1pte)
279	pmap_t pmap;
280	vaddr_t v;
281	pt_entry_t *l1pte;
282{
283	pt_entry_t *lev2map;
284
285	if (l1pte == NULL) {
286		l1pte = pmap_l1pte(pmap, v);
287		if (pmap_pte_v(l1pte) == 0)
288			return (NULL);
289	}
290
291	lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
292	return (&lev2map[l2pte_index(v)]);
293}
294
295static __inline pt_entry_t *
296pmap_l3pte(pmap, v, l2pte)
297	pmap_t pmap;
298	vaddr_t v;
299	pt_entry_t *l2pte;
300{
301	pt_entry_t *l1pte, *lev2map, *lev3map;
302
303	if (l2pte == NULL) {
304		l1pte = pmap_l1pte(pmap, v);
305		if (pmap_pte_v(l1pte) == 0)
306			return (NULL);
307
308		lev2map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l1pte));
309		l2pte = &lev2map[l2pte_index(v)];
310		if (pmap_pte_v(l2pte) == 0)
311			return (NULL);
312	}
313
314	lev3map = (pt_entry_t *)ALPHA_PHYS_TO_K0SEG(pmap_pte_pa(l2pte));
315	return (&lev3map[l3pte_index(v)]);
316}
317
318/*
319 * Macros for locking pmap structures.
320 *
321 * Note that we if we access the kernel pmap in interrupt context, it
322 * is only to update statistics.  Since stats are updated using atomic
323 * operations, locking the kernel pmap is not necessary.  Therefore,
324 * it is not necessary to block interrupts when locking pmap strucutres.
325 */
326#define	PMAP_LOCK(pmap)		simple_lock(&(pmap)->pm_slock)
327#define	PMAP_UNLOCK(pmap)	simple_unlock(&(pmap)->pm_slock)
328
329/*
330 * Macro for processing deferred I-stream synchronization.
331 *
332 * The pmap module may defer syncing the user I-stream until the
333 * return to userspace, since the IMB PALcode op can be quite
334 * expensive.  Since user instructions won't be executed until
335 * the return to userspace, this can be deferred until userret().
336 */
337#define	PMAP_USERRET(pmap)						\
338do {									\
339	u_long cpu_mask = (1UL << cpu_number());			\
340									\
341	if ((pmap)->pm_needisync & cpu_mask) {				\
342		atomic_clearbits_ulong(&(pmap)->pm_needisync,		\
343		    cpu_mask);						\
344		alpha_pal_imb();					\
345	}								\
346} while (0)
347
348#endif /* _KERNEL */
349
350#endif /* _PMAP_MACHINE_ */
351