1/*	$NetBSD: pmap.h,v 1.25 2011/06/30 00:52:59 matt Exp $	*/
2
3/*-
4 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
5 * Copyright (C) 1995, 1996 TooLs GmbH.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *	This product includes software developed by TooLs GmbH.
19 * 4. The name of TooLs GmbH may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
27 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
28 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
29 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
30 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
31 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#ifndef	_POWERPC_OEA_PMAP_H_
35#define	_POWERPC_OEA_PMAP_H_
36
37#ifdef _LOCORE
38#error use assym.h instead
39#endif
40
41#if defined(_LKM) || defined(_MODULE)
42#error this file should not be included by loadable kernel modules
43#endif
44
45#ifdef _KERNEL_OPT
46#include "opt_ppcarch.h"
47#endif
48#include <powerpc/oea/pte.h>
49
50/*
51 * Pmap stuff
52 */
53struct pmap {
54#ifdef PPC_OEA64
55	struct steg *pm_steg_table;		/* segment table pointer */
56	/* XXX need way to track exec pages */
57#endif
58
59#if defined(PPC_OEA) || defined (PPC_OEA64_BRIDGE)
60	register_t pm_sr[16];			/* segments used in this pmap */
61	int pm_exec[16];			/* counts of exec mappings */
62#endif
63	register_t pm_vsid;			/* VSID bits */
64	int pm_refs;				/* ref count */
65	struct pmap_statistics pm_stats;	/* pmap statistics */
66	unsigned int pm_evictions;		/* pvo's not in page table */
67
68#ifdef PPC_OEA64
69	unsigned int pm_ste_evictions;
70#endif
71};
72
73struct pmap_ops {
74	int (*pmapop_pte_spill)(struct pmap *, vaddr_t, bool);
75	void (*pmapop_real_memory)(paddr_t *, psize_t *);
76	void (*pmapop_init)(void);
77	void (*pmapop_virtual_space)(vaddr_t *, vaddr_t *);
78	pmap_t (*pmapop_create)(void);
79	void (*pmapop_reference)(pmap_t);
80	void (*pmapop_destroy)(pmap_t);
81	void (*pmapop_copy)(pmap_t, pmap_t, vaddr_t, vsize_t, vaddr_t);
82	void (*pmapop_update)(pmap_t);
83	int (*pmapop_enter)(pmap_t, vaddr_t, paddr_t, vm_prot_t, u_int);
84	void (*pmapop_remove)(pmap_t, vaddr_t, vaddr_t);
85	void (*pmapop_kenter_pa)(vaddr_t, paddr_t, vm_prot_t, u_int);
86	void (*pmapop_kremove)(vaddr_t, vsize_t);
87	bool (*pmapop_extract)(pmap_t, vaddr_t, paddr_t *);
88
89	void (*pmapop_protect)(pmap_t, vaddr_t, vaddr_t, vm_prot_t);
90	void (*pmapop_unwire)(pmap_t, vaddr_t);
91	void (*pmapop_page_protect)(struct vm_page *, vm_prot_t);
92	bool (*pmapop_query_bit)(struct vm_page *, int);
93	bool (*pmapop_clear_bit)(struct vm_page *, int);
94
95	void (*pmapop_activate)(struct lwp *);
96	void (*pmapop_deactivate)(struct lwp *);
97
98	void (*pmapop_pinit)(pmap_t);
99	void (*pmapop_procwr)(struct proc *, vaddr_t, size_t);
100
101	void (*pmapop_pte_print)(volatile struct pte *);
102	void (*pmapop_pteg_check)(void);
103	void (*pmapop_print_mmuregs)(void);
104	void (*pmapop_print_pte)(pmap_t, vaddr_t);
105	void (*pmapop_pteg_dist)(void);
106	void (*pmapop_pvo_verify)(void);
107	vaddr_t (*pmapop_steal_memory)(vsize_t, vaddr_t *, vaddr_t *);
108	void (*pmapop_bootstrap)(paddr_t, paddr_t);
109};
110
111#ifdef	_KERNEL
112#include <sys/cdefs.h>
113__BEGIN_DECLS
114#include <sys/param.h>
115#include <sys/systm.h>
116
117#if defined (PPC_OEA) || defined (PPC_OEA64_BRIDGE)
118extern register_t iosrtable[];
119#endif
120extern int pmap_use_altivec;
121
122#define pmap_clear_modify(pg)		(pmap_clear_bit((pg), PTE_CHG))
123#define	pmap_clear_reference(pg)	(pmap_clear_bit((pg), PTE_REF))
124#define	pmap_is_modified(pg)		(pmap_query_bit((pg), PTE_CHG))
125#define	pmap_is_referenced(pg)		(pmap_query_bit((pg), PTE_REF))
126
127#define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
128#define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
129
130/* ARGSUSED */
131static inline void
132pmap_remove_all(struct pmap *pmap)
133{
134	/* Nothing. */
135}
136
137#if (defined(PPC_OEA) + defined(PPC_OEA64) + defined(PPC_OEA64_BRIDGE)) != 1
138#define	PMAP_NEEDS_FIXUP
139#endif
140
141void pmap_bootstrap(vaddr_t, vaddr_t);
142bool pmap_extract(pmap_t, vaddr_t, paddr_t *);
143bool pmap_query_bit(struct vm_page *, int);
144bool pmap_clear_bit(struct vm_page *, int);
145void pmap_real_memory(paddr_t *, psize_t *);
146void pmap_procwr(struct proc *, vaddr_t, size_t);
147int pmap_pte_spill(pmap_t, vaddr_t, bool);
148void pmap_pinit(pmap_t);
149
150u_int powerpc_mmap_flags(paddr_t);
151#define POWERPC_MMAP_FLAG_MASK	0xf
152#define POWERPC_MMAP_FLAG_PREFETCHABLE	0x1
153#define POWERPC_MMAP_FLAG_CACHEABLE	0x2
154
155#define pmap_phys_address(ppn)		(ppn & ~POWERPC_MMAP_FLAG_MASK)
156#define pmap_mmap_flags(ppn)		powerpc_mmap_flags(ppn)
157
158static inline paddr_t vtophys (vaddr_t);
159
160/*
161 * Alternate mapping hooks for pool pages.  Avoids thrashing the TLB.
162 *
163 * Note: This won't work if we have more memory than can be direct-mapped
164 * VA==PA all at once.  But pmap_copy_page() and pmap_zero_page() will have
165 * this problem, too.
166 */
167#if !defined(PPC_OEA64) && !defined (PPC_OEA64_BRIDGE)
168#define	PMAP_MAP_POOLPAGE(pa)	(pa)
169#define	PMAP_UNMAP_POOLPAGE(pa)	(pa)
170#define POOL_VTOPHYS(va)	vtophys((vaddr_t) va)
171#endif
172
173static inline paddr_t
174vtophys(vaddr_t va)
175{
176	paddr_t pa;
177
178	if (pmap_extract(pmap_kernel(), va, &pa))
179		return pa;
180	KASSERTMSG(0, "vtophys: pmap_extract of %#"PRIxVADDR" failed", va);
181	return (paddr_t) -1;
182}
183
184
185#ifdef PMAP_NEEDS_FIXUP
186extern const struct pmap_ops *pmapops;
187extern const struct pmap_ops pmap32_ops;
188extern const struct pmap_ops pmap64_ops;
189extern const struct pmap_ops pmap64bridge_ops;
190
191static inline void
192pmap_setup32(void)
193{
194	pmapops = &pmap32_ops;
195}
196
197static inline void
198pmap_setup64(void)
199{
200	pmapops = &pmap64_ops;
201}
202
203static inline void
204pmap_setup64bridge(void)
205{
206	pmapops = &pmap64bridge_ops;
207}
208#endif
209
210bool pmap_pageidlezero (paddr_t);
211void pmap_syncicache (paddr_t, psize_t);
212#ifdef PPC_OEA64
213vaddr_t pmap_setusr (vaddr_t);
214vaddr_t pmap_unsetusr (void);
215#endif
216
217#ifdef PPC_OEA64_BRIDGE
218int pmap_setup_segment0_map(int use_large_pages, ...);
219#endif
220
221#define PMAP_MD_PREFETCHABLE		0x2000000
222#define PMAP_STEAL_MEMORY
223#define PMAP_NEED_PROCWR
224
225void pmap_zero_page(paddr_t);
226void pmap_copy_page(paddr_t, paddr_t);
227
228LIST_HEAD(pvo_head, pvo_entry);
229
230#define	__HAVE_VM_PAGE_MD
231
232struct vm_page_md {
233	unsigned int mdpg_attrs;
234	struct pvo_head mdpg_pvoh;
235#ifdef MODULAR
236	uintptr_t mdpg_dummy[3];
237#endif
238};
239
240#define	VM_MDPAGE_INIT(pg) do {			\
241	(pg)->mdpage.mdpg_attrs = 0;		\
242	LIST_INIT(&(pg)->mdpage.mdpg_pvoh);	\
243} while (/*CONSTCOND*/0)
244
245__END_DECLS
246#endif	/* _KERNEL */
247
248#endif	/* _POWERPC_OEA_PMAP_H_ */
249