1/*-
2 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
3 * All rights reserved.
4 *
5 * Adapted for Freescale's e500 core CPUs.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. The name of the author may not be used to endorse or promote products
16 *    derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN
21 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
23 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
24 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
25 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
26 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
27 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 *
29 * $FreeBSD$
30 */
31/*-
32 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
33 * Copyright (C) 1995, 1996 TooLs GmbH.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 *    notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 *    notice, this list of conditions and the following disclaimer in the
43 *    documentation and/or other materials provided with the distribution.
44 * 3. All advertising materials mentioning features or use of this software
45 *    must display the following acknowledgement:
46 *	This product includes software developed by TooLs GmbH.
47 * 4. The name of TooLs GmbH may not be used to endorse or promote products
48 *    derived from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
51 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
52 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
53 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
54 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
55 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
56 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
57 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
58 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
59 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *
61 *	from: $NetBSD: pmap.h,v 1.17 2000/03/30 16:18:24 jdolecek Exp $
62 */
63
64#ifndef	_MACHINE_PMAP_H_
65#define	_MACHINE_PMAP_H_
66
67#include <sys/queue.h>
68#include <sys/tree.h>
69#include <sys/_cpuset.h>
70#include <sys/_lock.h>
71#include <sys/_mutex.h>
72#include <machine/sr.h>
73#include <machine/pte.h>
74#include <machine/slb.h>
75#include <machine/tlb.h>
76
77struct pmap_md {
78	u_int		md_index;
79	vm_paddr_t      md_paddr;
80	vm_offset_t     md_vaddr;
81	vm_size_t       md_size;
82};
83
84#if defined(AIM)
85
86#if !defined(NPMAPS)
87#define	NPMAPS		32768
88#endif /* !defined(NPMAPS) */
89
90struct	slbtnode;
91struct	pmap;
92typedef	struct pmap *pmap_t;
93
94struct pvo_entry {
95	LIST_ENTRY(pvo_entry) pvo_vlink;	/* Link to common virt page */
96	LIST_ENTRY(pvo_entry) pvo_olink;	/* Link to overflow entry */
97	LIST_ENTRY(pvo_entry) pvo_plink;	/* Link to pmap entries */
98	union {
99		struct	pte pte;		/* 32 bit PTE */
100		struct	lpte lpte;		/* 64 bit PTE */
101	} pvo_pte;
102	pmap_t		pvo_pmap;		/* Owning pmap */
103	vm_offset_t	pvo_vaddr;		/* VA of entry */
104	uint64_t	pvo_vpn;		/* Virtual page number */
105};
106LIST_HEAD(pvo_head, pvo_entry);
107
108#define	PVO_PTEGIDX_MASK	0x007UL		/* which PTEG slot */
109#define	PVO_PTEGIDX_VALID	0x008UL		/* slot is valid */
110#define	PVO_WIRED		0x010UL		/* PVO entry is wired */
111#define	PVO_MANAGED		0x020UL		/* PVO entry is managed */
112#define	PVO_EXECUTABLE		0x040UL		/* PVO entry is executable */
113#define	PVO_BOOTSTRAP		0x080UL		/* PVO entry allocated during
114						   bootstrap */
115#define PVO_LARGE		0x200UL		/* large page */
116#define	PVO_VADDR(pvo)		((pvo)->pvo_vaddr & ~ADDR_POFF)
117#define	PVO_PTEGIDX_GET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_MASK)
118#define	PVO_PTEGIDX_ISSET(pvo)	((pvo)->pvo_vaddr & PVO_PTEGIDX_VALID)
119#define	PVO_PTEGIDX_CLR(pvo)	\
120	((void)((pvo)->pvo_vaddr &= ~(PVO_PTEGIDX_VALID|PVO_PTEGIDX_MASK)))
121#define	PVO_PTEGIDX_SET(pvo, i)	\
122	((void)((pvo)->pvo_vaddr |= (i)|PVO_PTEGIDX_VALID))
123#define	PVO_VSID(pvo)		((pvo)->pvo_vpn >> 16)
124
125struct	pmap {
126	struct	mtx	pm_mtx;
127
128    #ifdef __powerpc64__
129	struct slbtnode	*pm_slb_tree_root;
130	struct slb	**pm_slb;
131	int		pm_slb_len;
132    #else
133	register_t	pm_sr[16];
134    #endif
135	cpuset_t	pm_active;
136
137	struct pmap	*pmap_phys;
138	struct		pmap_statistics	pm_stats;
139	struct pvo_head pmap_pvo;
140};
141
142struct	md_page {
143	u_int64_t	 mdpg_attrs;
144	vm_memattr_t	 mdpg_cache_attrs;
145	struct	pvo_head mdpg_pvoh;
146};
147
148#define	pmap_page_get_memattr(m)	((m)->md.mdpg_cache_attrs)
149#define	pmap_page_is_mapped(m)	(!LIST_EMPTY(&(m)->md.mdpg_pvoh))
150
151/*
152 * Return the VSID corresponding to a given virtual address.
153 * If no VSID is currently defined, it will allocate one, and add
154 * it to a free slot if available.
155 *
156 * NB: The PMAP MUST be locked already.
157 */
158uint64_t va_to_vsid(pmap_t pm, vm_offset_t va);
159
160/* Lock-free, non-allocating lookup routines */
161uint64_t kernel_va_to_slbv(vm_offset_t va);
162struct slb *user_va_to_slb_entry(pmap_t pm, vm_offset_t va);
163
164uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large);
165void	free_vsid(pmap_t pm, uint64_t esid, int large);
166void	slb_insert_user(pmap_t pm, struct slb *slb);
167void	slb_insert_kernel(uint64_t slbe, uint64_t slbv);
168
169struct slbtnode *slb_alloc_tree(void);
170void     slb_free_tree(pmap_t pm);
171struct slb **slb_alloc_user_cache(void);
172void	slb_free_user_cache(struct slb **);
173
174#else
175
176struct pmap {
177	struct mtx		pm_mtx;		/* pmap mutex */
178	tlbtid_t		pm_tid[MAXCPU];	/* TID to identify this pmap entries in TLB */
179	cpuset_t		pm_active;	/* active on cpus */
180	struct pmap_statistics	pm_stats;	/* pmap statistics */
181
182	/* Page table directory, array of pointers to page tables. */
183	pte_t			*pm_pdir[PDIR_NENTRIES];
184
185	/* List of allocated ptbl bufs (ptbl kva regions). */
186	TAILQ_HEAD(, ptbl_buf)	pm_ptbl_list;
187};
188typedef	struct pmap *pmap_t;
189
190struct pv_entry {
191	pmap_t pv_pmap;
192	vm_offset_t pv_va;
193	TAILQ_ENTRY(pv_entry) pv_link;
194};
195typedef struct pv_entry *pv_entry_t;
196
197struct md_page {
198	TAILQ_HEAD(, pv_entry) pv_list;
199};
200
201#define	pmap_page_get_memattr(m)	VM_MEMATTR_DEFAULT
202#define	pmap_page_is_mapped(m)	(!TAILQ_EMPTY(&(m)->md.pv_list))
203
204#endif /* AIM */
205
206extern	struct pmap kernel_pmap_store;
207#define	kernel_pmap	(&kernel_pmap_store)
208
209#ifdef _KERNEL
210
211#define	PMAP_LOCK(pmap)		mtx_lock(&(pmap)->pm_mtx)
212#define	PMAP_LOCK_ASSERT(pmap, type) \
213				mtx_assert(&(pmap)->pm_mtx, (type))
214#define	PMAP_LOCK_DESTROY(pmap)	mtx_destroy(&(pmap)->pm_mtx)
215#define	PMAP_LOCK_INIT(pmap)	mtx_init(&(pmap)->pm_mtx, \
216				    (pmap == kernel_pmap) ? "kernelpmap" : \
217				    "pmap", NULL, MTX_DEF)
218#define	PMAP_LOCKED(pmap)	mtx_owned(&(pmap)->pm_mtx)
219#define	PMAP_MTX(pmap)		(&(pmap)->pm_mtx)
220#define	PMAP_TRYLOCK(pmap)	mtx_trylock(&(pmap)->pm_mtx)
221#define	PMAP_UNLOCK(pmap)	mtx_unlock(&(pmap)->pm_mtx)
222
223#define	pmap_page_is_write_mapped(m)	(((m)->aflags & PGA_WRITEABLE) != 0)
224
225void		pmap_bootstrap(vm_offset_t, vm_offset_t);
226void		pmap_kenter(vm_offset_t va, vm_offset_t pa);
227void		pmap_kenter_attr(vm_offset_t va, vm_offset_t pa, vm_memattr_t);
228void		pmap_kremove(vm_offset_t);
229void		*pmap_mapdev(vm_offset_t, vm_size_t);
230void		*pmap_mapdev_attr(vm_offset_t, vm_size_t, vm_memattr_t);
231void		pmap_unmapdev(vm_offset_t, vm_size_t);
232void		pmap_page_set_memattr(vm_page_t, vm_memattr_t);
233void		pmap_deactivate(struct thread *);
234vm_offset_t	pmap_kextract(vm_offset_t);
235int		pmap_dev_direct_mapped(vm_offset_t, vm_size_t);
236boolean_t	pmap_mmu_install(char *name, int prio);
237
238#define	vtophys(va)	pmap_kextract((vm_offset_t)(va))
239
240#define PHYS_AVAIL_SZ	128
241extern	vm_offset_t phys_avail[PHYS_AVAIL_SZ];
242extern	vm_offset_t virtual_avail;
243extern	vm_offset_t virtual_end;
244
245extern	vm_offset_t msgbuf_phys;
246
247extern	int pmap_bootstrapped;
248
249extern vm_offset_t pmap_dumpsys_map(struct pmap_md *, vm_size_t, vm_size_t *);
250extern void pmap_dumpsys_unmap(struct pmap_md *, vm_size_t, vm_offset_t);
251
252extern struct pmap_md *pmap_scan_md(struct pmap_md *);
253
254#endif
255
256#endif /* !_MACHINE_PMAP_H_ */
257