pmap.h revision 1.6
1/*	$NetBSD: pmap.h,v 1.6 1996/07/02 22:42:49 cgd Exp $	*/
2
3/*
4 * Mach Operating System
5 * Copyright (c) 1993,1992 Carnegie Mellon University
6 * All Rights Reserved.
7 *
8 * Permission to use, copy, modify and distribute this software and its
9 * documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
13 *
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
16 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
17 *
18 * Carnegie Mellon requests users of this software to return to
19 *
20 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
21 *  School of Computer Science
22 *  Carnegie Mellon University
23 *  Pittsburgh PA 15213-3890
24 *
25 * any improvements or extensions that they make and grant Carnegie Mellon
26 * the rights to redistribute these changes.
27 */
28/*
29 * HISTORY
30 * $Log: pmap.h,v $
31 * Revision 1.6  1996/07/02 22:42:49  cgd
32 * pull in the Mach3 alpha pmap, as a base for the new pmap module.  Modified
33 * from the mach3 versions only as much as necessary to allow the old
34 * NetBSD/Alpha pmap code to compile.  THESE WILL NOT WORK AS-IS, and at
35 * minimum will require code to implement reference- and modified-bit
36 * emulation.
37 *
38 * Revision 2.3  93/01/19  08:59:45  danner
39 * 	Do not allocate cpusets as commons, to avoid
40 * 	cacheline conflicts.
41 * 	[93/01/15            af]
42 *
43 * Revision 2.2  93/01/14  17:13:51  danner
44 * 	Created, from dbg's i386 pmap module.
45 * 	[92/06/15            af]
46 *
47 *
48 */
49
50/*
51 *	File:	pmap.h
52 *
53 *	Author:  David Golub (mods for Alpha by Alessandro Forin)
54 *	Date:	1988 ca.
55 *
56 *	Machine-dependent structures for the physical map module.
57 */
58
59#ifdef OLD_PMAP
60#include <machine/pmap.old.h>
61#else
62
63#ifndef	_PMAP_MACHINE_
64#define _PMAP_MACHINE_	1
65
66#ifndef	ASSEMBLER
67
68#include <kern/zalloc.h>
69#include <kern/lock.h>
70#include <mach/machine/vm_param.h>
71#include <mach/vm_statistics.h>
72#include <mach/kern_return.h>
73
74/*
75 *	Alpha Page Table Entry
76 */
77
78typedef unsigned long	pt_entry_t;
79#define PT_ENTRY_NULL	((pt_entry_t *) 0)
80
81#endif	ASSEMBLER
82
83#define ALPHA_OFFMASK	(ALPHA_PGBYTES-1)	/* offset within page */
84
85#define	SEG_MASK	((ALPHA_PGBYTES / 8)-1)	/* masks for segments */
86#define	SEG3_SHIFT	(ALPHA_PGSHIFT)		/* shifts for segments */
87#define	SEG2_SHIFT	(SEG3_SHIFT+(ALPHA_PGSHIFT-3))
88#define	SEG1_SHIFT	(SEG2_SHIFT+(ALPHA_PGSHIFT-3))
89
90/*
91 *	Convert address offset to page descriptor index
92 */
93#define pdenum(a)	(((a) >> SEG1_SHIFT) & SEG_MASK)
94
95/*
96 *	Convert page descriptor index to user virtual address
97 */
98#define pdetova(a)	((vm_offset_t)(a) << SEG1_SHIFT)
99#define pde2tova(a)	((vm_offset_t)(a) << SEG2_SHIFT)
100#define pde3tova(a)	((vm_offset_t)(a) << SEG3_SHIFT)
101
102/*
103 *	Convert address offset to second level page table index
104 */
105#define pte2num(a)	(((a) >> SEG2_SHIFT) & SEG_MASK)
106
107/*
108 *	Convert address offset to third level page table index
109 */
110#define pte3num(a)	(((a) >> SEG3_SHIFT) & SEG_MASK)
111
112#define NPTES	(alpha_ptob(1)/sizeof(pt_entry_t))
113#define NPDES	(alpha_ptob(1)/sizeof(pt_entry_t))
114
115/*
116 *	Hardware pte bit definitions (to be used directly on the ptes
117 *	without using the bit fields).
118 */
119
120#define	ALPHA_PTE_VALID		0x1
121
122#define	ALPHA_PTE_FAULT_ON_x	0xe
123
124#define	ALPHA_PTE_GLOBAL	0x10
125#define	ALPHA_PTE_GRANULARITY	0x60
126
127#define	ALPHA_PTE_PROT		0xff00
128#define	ALPHA_PTE_PROTOFF	8
129#define	ALPHA_PTE_KW		0x10
130#define	ALPHA_PTE_UW		0x80
131#define	ALPHA_PTE_KR		0x01
132#define	ALPHA_PTE_UR		0x08
133
134#define ALPHA_PTE_WRITE		0x00009000
135
136#define	ALPHA_PTE_SOFTWARE	0xffff0000
137#define ALPHA_PTE_WIRED		0x00010000
138#define ALPHA_PTE_REF		0x00020000
139#define ALPHA_PTE_MOD		0x00040000
140
141#define ALPHA_PTE_PFN		0xffffffff00000000
142
143#define	pa_to_pte(a)		(alpha_btop(a) << 32)
144#define	pte_to_pa(p)		(alpha_ptob( (p) >> 32 ))
145#define	pte_increment_pa(p)	((p) += pa_to_pte(ALPHA_PGBYTES))
146
147/*
148 *	Convert page table entry to kernel virtual address
149 */
150#define ptetokv(a)	(phystokv(pte_to_pa(a)))
151
152#ifndef	ASSEMBLER
153typedef	volatile long	cpu_set;	/* set of CPUs - must be <= 64 */
154					/* changed by other processors */
155
156struct pmap {
157	pt_entry_t	*dirbase;	/* page directory pointer register */
158	int		pid;		/* TLBPID when in use		*/
159	int		ref_count;	/* reference count */
160	decl_simple_lock_data(,lock)
161					/* lock on map */
162	struct pmap_statistics	stats;	/* map statistics */
163	cpu_set		cpus_using;	/* bitmap of cpus using pmap */
164	int		(*hacking)();	/* horrible things needed	*/
165};
166
167typedef struct pmap	*pmap_t;
168
169#define PMAP_NULL	((pmap_t) 0)
170
171extern vm_offset_t	kvtophys(vm_offset_t);
172extern void		set_ptbr(/* pmap_t map, pcb_t pcb */);
173
174#if	NCPUS > 1
175/*
176 *	List of cpus that are actively using mapped memory.  Any
177 *	pmap update operation must wait for all cpus in this list.
178 *	Update operations must still be queued to cpus not in this
179 *	list.
180 */
181extern cpu_set		cpus_active;
182
183/*
184 *	List of cpus that are idle, but still operating, and will want
185 *	to see any kernel pmap updates when they become active.
186 */
187extern cpu_set		cpus_idle;
188
189/*
190 *	Quick test for pmap update requests.
191 */
192extern volatile
193boolean_t	cpu_update_needed[NCPUS];
194
195/*
196 *	External declarations for PMAP_ACTIVATE.
197 */
198
199void		process_pmap_updates();
200void		pmap_update_interrupt();
201extern	pmap_t	kernel_pmap;
202
203#endif	NCPUS > 1
204
205/*
206 *	Machine dependent routines that are used only for Alpha.
207 */
208
209pt_entry_t	*pmap_pte();
210
211/*
212 *	Macros for speed.
213 */
214
215#if	NCPUS > 1
216
217/*
218 *	For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
219 *	fields to control TLB invalidation on other CPUS.
220 */
221
222#define	PMAP_ACTIVATE_KERNEL(my_cpu)	{				\
223									\
224	/*								\
225	 *	Let pmap updates proceed while we wait for this pmap.	\
226	 */								\
227	i_bit_clear((my_cpu), &cpus_active);				\
228									\
229	/*								\
230	 *	Lock the pmap to put this cpu in its active set.	\
231	 *	Wait for updates here.					\
232	 */								\
233	simple_lock(&kernel_pmap->lock);				\
234									\
235	/*								\
236	 *	Process invalidate requests for the kernel pmap.	\
237	 */								\
238	if (cpu_update_needed[(my_cpu)])				\
239	    process_pmap_updates(kernel_pmap);				\
240									\
241	/*								\
242	 *	Mark that this cpu is using the pmap.			\
243	 */								\
244	i_bit_set((my_cpu), &kernel_pmap->cpus_using);			\
245									\
246	/*								\
247	 *	Mark this cpu active - IPL will be lowered by		\
248	 *	load_context().						\
249	 */								\
250	i_bit_set((my_cpu), &cpus_active);				\
251									\
252	simple_unlock(&kernel_pmap->lock);				\
253}
254
255#define	PMAP_DEACTIVATE_KERNEL(my_cpu)	{				\
256	/*								\
257	 *	Mark pmap no longer in use by this cpu even if		\
258	 *	pmap is locked against updates.				\
259	 */								\
260	i_bit_clear((my_cpu), &kernel_pmap->cpus_using);		\
261}
262
263#define PMAP_ACTIVATE_USER(pmap, th, my_cpu)	{			\
264	register pmap_t		tpmap = (pmap);				\
265	register pcb_t		pcb = (th)->pcb;			\
266									\
267	if (tpmap == kernel_pmap) {					\
268	    /*								\
269	     *	If this is the kernel pmap, switch to its page tables.	\
270	     */								\
271	    set_ptbr(tpmap,pcb,TRUE);					\
272	}								\
273	else {								\
274	    /*								\
275	     *	Let pmap updates proceed while we wait for this pmap.	\
276	     */								\
277	    i_bit_clear((my_cpu), &cpus_active);			\
278									\
279	    /*								\
280	     *	Lock the pmap to put this cpu in its active set.	\
281	     *	Wait for updates here.					\
282	     */								\
283	    simple_lock(&tpmap->lock);					\
284									\
285	    /*								\
286	     *	No need to invalidate the TLB - the entire user pmap	\
287	     *	will be invalidated by reloading dirbase.		\
288	     */								\
289	    if (tpmap->pid < 0) pmap_assign_tlbpid(tpmap);		\
290	    set_ptbr(tpmap, pcb, TRUE);					\
291									\
292	    /*								\
293	     *	Mark that this cpu is using the pmap.			\
294	     */								\
295	    i_bit_set((my_cpu), &tpmap->cpus_using);			\
296									\
297	    /*								\
298	     *	Mark this cpu active - IPL will be lowered by		\
299	     *	load_context().						\
300	     */								\
301	    i_bit_set((my_cpu), &cpus_active);				\
302									\
303	    simple_unlock(&tpmap->lock);				\
304	}								\
305}
306
307#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu)	{		\
308	register pmap_t		tpmap = (pmap);				\
309									\
310	/*								\
311	 *	Do nothing if this is the kernel pmap.			\
312	 */								\
313	if (tpmap != kernel_pmap) {					\
314	    /*								\
315	     *	Mark pmap no longer in use by this cpu even if		\
316	     *	pmap is locked against updates.				\
317	     */								\
318	    i_bit_clear((my_cpu), &(pmap)->cpus_using);			\
319	}								\
320}
321
322#define MARK_CPU_IDLE(my_cpu)	{					\
323	/*								\
324	 *	Mark this cpu idle, and remove it from the active set,	\
325	 *	since it is not actively using any pmap.  Signal_cpus	\
326	 *	will notice that it is idle, and avoid signaling it,	\
327	 *	but will queue the update request for when the cpu	\
328	 *	becomes active.						\
329	 */								\
330	spl_t	s = splvm();						\
331	i_bit_set((my_cpu), &cpus_idle);				\
332	i_bit_clear((my_cpu), &cpus_active);				\
333	splx(s);							\
334}
335
336#define MARK_CPU_ACTIVE(my_cpu)	{					\
337									\
338	spl_t	s = splvm();						\
339	/*								\
340	 *	If a kernel_pmap update was requested while this cpu	\
341	 *	was idle, process it as if we got the interrupt.	\
342	 *	Before doing so, remove this cpu from the idle set.	\
343	 *	Since we do not grab any pmap locks while we flush	\
344	 *	our TLB, another cpu may start an update operation	\
345	 *	before we finish.  Removing this cpu from the idle	\
346	 *	set assures that we will receive another update		\
347	 *	interrupt if this happens.				\
348	 */								\
349	i_bit_clear((my_cpu), &cpus_idle);				\
350									\
351	if (cpu_update_needed[(my_cpu)])				\
352	    pmap_update_interrupt();					\
353									\
354	/*								\
355	 *	Mark that this cpu is now active.			\
356	 */								\
357	i_bit_set((my_cpu), &cpus_active);				\
358	splx(s);							\
359}
360
361#else	NCPUS > 1
362
363/*
364 *	With only one CPU, we just have to indicate whether the pmap is
365 *	in use.
366 */
367
368#define	PMAP_ACTIVATE_KERNEL(my_cpu)	{				\
369	kernel_pmap->cpus_using = TRUE;					\
370}
371
372#define	PMAP_DEACTIVATE_KERNEL(my_cpu)	{				\
373	kernel_pmap->cpus_using = FALSE;				\
374}
375
376#define	PMAP_ACTIVATE_USER(pmap, th, my_cpu)	{			\
377	register pmap_t		tpmap = (pmap);				\
378	register pcb_t		pcb = (th)->pcb;			\
379									\
380	if (tpmap->pid < 0) pmap_assign_tlbpid(tpmap);			\
381	set_ptbr(tpmap,pcb,TRUE);					\
382	if (tpmap != kernel_pmap) {					\
383	    tpmap->cpus_using = TRUE;					\
384	}								\
385}
386
387#define PMAP_DEACTIVATE_USER(pmap, thread, cpu)	{			\
388	if ((pmap) != kernel_pmap)					\
389	    (pmap)->cpus_using = FALSE;					\
390}
391
392#endif	NCPUS > 1
393
394#define	pmap_kernel()			(kernel_pmap)
395#define pmap_resident_count(pmap)	((pmap)->stats.resident_count)
396#define pmap_phys_address(frame)	((vm_offset_t) (alpha_ptob(frame)))
397#define pmap_phys_to_frame(phys)	((alpha_btop(phys)))
398#define	pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
399#define	pmap_attribute(pmap,addr,size,attr,value) \
400					(KERN_INVALID_ADDRESS)
401
402/*
403 *	Data structures this module exports
404 */
405extern pmap_t		kernel_pmap;	/* pointer to the kernel pmap	*/
406
407
408#endif	ASSEMBLER
409
410/*
411 *	We want to implement pmap_steal_memory and pmap_startup.
412 */
413
414#define	MACHINE_PAGES
415
416#endif	_PMAP_MACHINE_
417
418#endif /* OLD_PMAP */
419