tlb.h revision 91613
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 91613 2002-03-04 05:20:29Z jake $
27 */
28
29#ifndef	_MACHINE_TLB_H_
30#define	_MACHINE_TLB_H_
31
32#define	TLB_SLOT_COUNT			64
33
34#define	TLB_SLOT_TSB_KERNEL_MIN		62	/* XXX */
35#define	TLB_SLOT_KERNEL			63
36
37#define	TLB_DAR_SLOT_SHIFT		(3)
38#define	TLB_DAR_SLOT(slot)		((slot) << TLB_DAR_SLOT_SHIFT)
39
40#define	TAR_VPN_SHIFT			(13)
41#define	TAR_CTX_MASK			((1 << TAR_VPN_SHIFT) - 1)
42
43#define	TLB_TAR_VA(va)			((va) & ~TAR_CTX_MASK)
44#define	TLB_TAR_CTX(ctx)		((ctx) & TAR_CTX_MASK)
45
46#define	TLB_DEMAP_ID_SHIFT		(4)
47#define	TLB_DEMAP_ID_PRIMARY		(0)
48#define	TLB_DEMAP_ID_SECONDARY		(1)
49#define	TLB_DEMAP_ID_NUCLEUS		(2)
50
51#define	TLB_DEMAP_TYPE_SHIFT		(6)
52#define	TLB_DEMAP_TYPE_PAGE		(0)
53#define	TLB_DEMAP_TYPE_CONTEXT		(1)
54
55#define	TLB_DEMAP_VA(va)		((va) & ~PAGE_MASK)
56#define	TLB_DEMAP_ID(id)		((id) << TLB_DEMAP_ID_SHIFT)
57#define	TLB_DEMAP_TYPE(type)		((type) << TLB_DEMAP_TYPE_SHIFT)
58
59#define	TLB_DEMAP_PAGE			(TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
60#define	TLB_DEMAP_CONTEXT		(TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
61
62#define	TLB_DEMAP_PRIMARY		(TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
63#define	TLB_DEMAP_SECONDARY		(TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
64#define	TLB_DEMAP_NUCLEUS		(TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
65
66#define	TLB_CTX_KERNEL			(0)
67#define	TLB_CTX_USER_MIN		(1)
68#define	TLB_CTX_USER_MAX		(8192)
69
70#define	TLB_DTLB			(1 << 0)
71#define	TLB_ITLB			(1 << 1)
72
73#define	MMU_SFSR_ASI_SHIFT		(16)
74#define	MMU_SFSR_FT_SHIFT		(7)
75#define	MMU_SFSR_E_SHIFT		(6)
76#define	MMU_SFSR_CT_SHIFT		(4)
77#define	MMU_SFSR_PR_SHIFT		(3)
78#define	MMU_SFSR_W_SHIFT		(2)
79#define	MMU_SFSR_OW_SHIFT		(1)
80#define	MMU_SFSR_FV_SHIFT		(0)
81
82#define	MMU_SFSR_ASI_SIZE		(8)
83#define	MMU_SFSR_FT_SIZE		(6)
84#define	MMU_SFSR_CT_SIZE		(2)
85
86#define	MMU_SFSR_W			(1L << MMU_SFSR_W_SHIFT)
87
88/*
89 * Some tlb operations must be atomical, so no interrupt or trap can be allowed
90 * while they are in progress. Traps should not happen, but interrupts need to
91 * be explicitely disabled. critical_enter() cannot be used here, since it only
92 * disables soft interrupts.
93 * XXX: is something like this needed elsewhere, too?
94 */
95
96static __inline void
97tlb_dtlb_context_primary_demap(void)
98{
99	stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
100	membar(Sync);
101}
102
103static __inline void
104tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
105{
106	if (ctx == TLB_CTX_KERNEL) {
107		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
108		    ASI_DMMU_DEMAP, 0);
109		membar(Sync);
110	} else if (ctx != -1) {
111		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
112		    ASI_DMMU_DEMAP, 0);
113		membar(Sync);
114	}
115}
116
117static __inline void
118tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
119{
120	u_long pst;
121
122	pst = intr_disable();
123	stxa(AA_DMMU_TAR, ASI_DMMU,
124	    TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
125	stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
126	membar(Sync);
127	intr_restore(pst);
128}
129
130static __inline void
131tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
132{
133	u_long pst;
134
135	pst = intr_disable();
136	stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
137	stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
138	membar(Sync);
139	intr_restore(pst);
140}
141
142static __inline void
143tlb_itlb_context_primary_demap(void)
144{
145	stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
146	membar(Sync);
147}
148
149static __inline void
150tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
151{
152	if (ctx == TLB_CTX_KERNEL) {
153		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
154		    ASI_IMMU_DEMAP, 0);
155		flush(KERNBASE);
156	} else if (ctx != -1) {
157		stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
158		    ASI_IMMU_DEMAP, 0);
159		membar(Sync);
160	}
161}
162
163static __inline void
164tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
165{
166	u_long pst;
167
168	pst = intr_disable();
169	stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
170	stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
171	if (ctx == TLB_CTX_KERNEL)
172		flush(va);
173	else {
174		/*
175		 * flush probably not needed and impossible here, no access to
176		 * user page.
177		 */
178		membar(Sync);
179	}
180	intr_restore(pst);
181}
182
183static __inline void
184tlb_context_demap(u_int ctx)
185{
186	if (ctx != -1) {
187		tlb_dtlb_context_primary_demap();
188		tlb_itlb_context_primary_demap();
189	}
190}
191
192static __inline void
193tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
194{
195	u_long pst;
196
197	pst = intr_disable();
198	stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
199	stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
200	flush(va);
201	intr_restore(pst);
202}
203
204static __inline void
205tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
206{
207	if (tlb & TLB_DTLB)
208		tlb_dtlb_page_demap(ctx, va);
209	if (tlb & TLB_ITLB)
210		tlb_itlb_page_demap(ctx, va);
211}
212
213static __inline void
214tlb_range_demap(u_int ctx, vm_offset_t start, vm_offset_t end)
215{
216	for (; start < end; start += PAGE_SIZE)
217		tlb_page_demap(TLB_DTLB | TLB_ITLB, ctx, start);
218}
219
220static __inline void
221tlb_tte_demap(struct tte tte, u_int ctx)
222{
223	tlb_page_demap(TD_GET_TLB(tte.tte_data), ctx, TV_GET_VA(tte.tte_vpn));
224}
225
226static __inline void
227tlb_store(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte)
228{
229	KASSERT(ctx != -1, ("tlb_store: invalid context"));
230	if (tlb & TLB_DTLB)
231		tlb_dtlb_store(va, ctx, tte);
232	if (tlb & TLB_ITLB)
233		tlb_itlb_store(va, ctx, tte);
234}
235
236static __inline void
237tlb_store_slot(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte, int slot)
238{
239	KASSERT(ctx != -1, ("tlb_store_slot: invalid context"));
240	if (tlb & TLB_DTLB)
241		tlb_dtlb_store_slot(va, ctx, tte, slot);
242	if (tlb & TLB_ITLB)
243		tlb_itlb_store_slot(va, ctx, tte, slot);
244}
245
246#endif /* !_MACHINE_TLB_H_ */
247