Deleted Added
full compact
tlb.h (91616) tlb.h (91782)
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 91616 2002-03-04 07:07:10Z jake $
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 91782 2002-03-07 05:25:15Z jake $
27 */
28
29#ifndef _MACHINE_TLB_H_
30#define _MACHINE_TLB_H_
31
32#define TLB_SLOT_COUNT 64
33
34#define TLB_SLOT_TSB_KERNEL_MIN 62 /* XXX */
35#define TLB_SLOT_KERNEL 63
36
37#define TLB_DAR_SLOT_SHIFT (3)
38#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
39
40#define TAR_VPN_SHIFT (13)
41#define TAR_CTX_MASK ((1 << TAR_VPN_SHIFT) - 1)
42
43#define TLB_TAR_VA(va) ((va) & ~TAR_CTX_MASK)
44#define TLB_TAR_CTX(ctx) ((ctx) & TAR_CTX_MASK)
45
46#define TLB_DEMAP_ID_SHIFT (4)
47#define TLB_DEMAP_ID_PRIMARY (0)
48#define TLB_DEMAP_ID_SECONDARY (1)
49#define TLB_DEMAP_ID_NUCLEUS (2)
50
51#define TLB_DEMAP_TYPE_SHIFT (6)
52#define TLB_DEMAP_TYPE_PAGE (0)
53#define TLB_DEMAP_TYPE_CONTEXT (1)
54
55#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK)
56#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT)
57#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT)
58
59#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
60#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
61
62#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
63#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
64#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
65
66#define TLB_CTX_KERNEL (0)
67#define TLB_CTX_USER_MIN (1)
68#define TLB_CTX_USER_MAX (8192)
69
70#define TLB_DTLB (1 << 0)
71#define TLB_ITLB (1 << 1)
72
73#define MMU_SFSR_ASI_SHIFT (16)
74#define MMU_SFSR_FT_SHIFT (7)
75#define MMU_SFSR_E_SHIFT (6)
76#define MMU_SFSR_CT_SHIFT (4)
77#define MMU_SFSR_PR_SHIFT (3)
78#define MMU_SFSR_W_SHIFT (2)
79#define MMU_SFSR_OW_SHIFT (1)
80#define MMU_SFSR_FV_SHIFT (0)
81
82#define MMU_SFSR_ASI_SIZE (8)
83#define MMU_SFSR_FT_SIZE (6)
84#define MMU_SFSR_CT_SIZE (2)
85
86#define MMU_SFSR_W (1L << MMU_SFSR_W_SHIFT)
87
88extern int kernel_tlb_slots;
89extern struct tte *kernel_ttes;
90
91/*
92 * Some tlb operations must be atomical, so no interrupt or trap can be allowed
93 * while they are in progress. Traps should not happen, but interrupts need to
94 * be explicitely disabled. critical_enter() cannot be used here, since it only
95 * disables soft interrupts.
96 * XXX: is something like this needed elsewhere, too?
97 */
98
99static __inline void
100tlb_dtlb_context_primary_demap(void)
101{
102 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
103 membar(Sync);
104}
105
106static __inline void
27 */
28
29#ifndef _MACHINE_TLB_H_
30#define _MACHINE_TLB_H_
31
32#define TLB_SLOT_COUNT 64
33
34#define TLB_SLOT_TSB_KERNEL_MIN 62 /* XXX */
35#define TLB_SLOT_KERNEL 63
36
37#define TLB_DAR_SLOT_SHIFT (3)
38#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
39
40#define TAR_VPN_SHIFT (13)
41#define TAR_CTX_MASK ((1 << TAR_VPN_SHIFT) - 1)
42
43#define TLB_TAR_VA(va) ((va) & ~TAR_CTX_MASK)
44#define TLB_TAR_CTX(ctx) ((ctx) & TAR_CTX_MASK)
45
46#define TLB_DEMAP_ID_SHIFT (4)
47#define TLB_DEMAP_ID_PRIMARY (0)
48#define TLB_DEMAP_ID_SECONDARY (1)
49#define TLB_DEMAP_ID_NUCLEUS (2)
50
51#define TLB_DEMAP_TYPE_SHIFT (6)
52#define TLB_DEMAP_TYPE_PAGE (0)
53#define TLB_DEMAP_TYPE_CONTEXT (1)
54
55#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK)
56#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT)
57#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT)
58
59#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
60#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
61
62#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
63#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
64#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
65
66#define TLB_CTX_KERNEL (0)
67#define TLB_CTX_USER_MIN (1)
68#define TLB_CTX_USER_MAX (8192)
69
70#define TLB_DTLB (1 << 0)
71#define TLB_ITLB (1 << 1)
72
73#define MMU_SFSR_ASI_SHIFT (16)
74#define MMU_SFSR_FT_SHIFT (7)
75#define MMU_SFSR_E_SHIFT (6)
76#define MMU_SFSR_CT_SHIFT (4)
77#define MMU_SFSR_PR_SHIFT (3)
78#define MMU_SFSR_W_SHIFT (2)
79#define MMU_SFSR_OW_SHIFT (1)
80#define MMU_SFSR_FV_SHIFT (0)
81
82#define MMU_SFSR_ASI_SIZE (8)
83#define MMU_SFSR_FT_SIZE (6)
84#define MMU_SFSR_CT_SIZE (2)
85
86#define MMU_SFSR_W (1L << MMU_SFSR_W_SHIFT)
87
88extern int kernel_tlb_slots;
89extern struct tte *kernel_ttes;
90
91/*
92 * Some tlb operations must be atomical, so no interrupt or trap can be allowed
93 * while they are in progress. Traps should not happen, but interrupts need to
94 * be explicitely disabled. critical_enter() cannot be used here, since it only
95 * disables soft interrupts.
96 * XXX: is something like this needed elsewhere, too?
97 */
98
99static __inline void
100tlb_dtlb_context_primary_demap(void)
101{
102 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
103 membar(Sync);
104}
105
106static __inline void
107tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
107tlb_dtlb_page_demap(struct pmap *pm, vm_offset_t va)
108{
108{
109 u_int ctx;
110
111 ctx = pm->pm_context[PCPU_GET(cpuid)];
109 if (ctx == TLB_CTX_KERNEL) {
110 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
111 ASI_DMMU_DEMAP, 0);
112 membar(Sync);
113 } else if (ctx != -1) {
114 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
115 ASI_DMMU_DEMAP, 0);
116 membar(Sync);
117 }
118}
119
120static __inline void
121tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
122{
123 u_long pst;
124
125 pst = intr_disable();
126 stxa(AA_DMMU_TAR, ASI_DMMU,
127 TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
128 stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
129 membar(Sync);
130 intr_restore(pst);
131}
132
133static __inline void
134tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
135{
136 u_long pst;
137
138 pst = intr_disable();
139 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
140 stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
141 membar(Sync);
142 intr_restore(pst);
143}
144
145static __inline void
146tlb_itlb_context_primary_demap(void)
147{
148 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
149 membar(Sync);
150}
151
152static __inline void
112 if (ctx == TLB_CTX_KERNEL) {
113 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
114 ASI_DMMU_DEMAP, 0);
115 membar(Sync);
116 } else if (ctx != -1) {
117 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
118 ASI_DMMU_DEMAP, 0);
119 membar(Sync);
120 }
121}
122
123static __inline void
124tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
125{
126 u_long pst;
127
128 pst = intr_disable();
129 stxa(AA_DMMU_TAR, ASI_DMMU,
130 TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
131 stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
132 membar(Sync);
133 intr_restore(pst);
134}
135
136static __inline void
137tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
138{
139 u_long pst;
140
141 pst = intr_disable();
142 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
143 stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
144 membar(Sync);
145 intr_restore(pst);
146}
147
148static __inline void
149tlb_itlb_context_primary_demap(void)
150{
151 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
152 membar(Sync);
153}
154
155static __inline void
153tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
156tlb_itlb_page_demap(struct pmap *pm, vm_offset_t va)
154{
157{
158 u_int ctx;
159
160 ctx = pm->pm_context[PCPU_GET(cpuid)];
155 if (ctx == TLB_CTX_KERNEL) {
156 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
157 ASI_IMMU_DEMAP, 0);
158 flush(KERNBASE);
159 } else if (ctx != -1) {
160 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
161 ASI_IMMU_DEMAP, 0);
162 membar(Sync);
163 }
164}
165
166static __inline void
167tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
168{
169 u_long pst;
170
171 pst = intr_disable();
172 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
173 stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
174 if (ctx == TLB_CTX_KERNEL)
175 flush(va);
176 else {
177 /*
178 * flush probably not needed and impossible here, no access to
179 * user page.
180 */
181 membar(Sync);
182 }
183 intr_restore(pst);
184}
185
186static __inline void
161 if (ctx == TLB_CTX_KERNEL) {
162 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
163 ASI_IMMU_DEMAP, 0);
164 flush(KERNBASE);
165 } else if (ctx != -1) {
166 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE,
167 ASI_IMMU_DEMAP, 0);
168 membar(Sync);
169 }
170}
171
172static __inline void
173tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
174{
175 u_long pst;
176
177 pst = intr_disable();
178 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
179 stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
180 if (ctx == TLB_CTX_KERNEL)
181 flush(va);
182 else {
183 /*
184 * flush probably not needed and impossible here, no access to
185 * user page.
186 */
187 membar(Sync);
188 }
189 intr_restore(pst);
190}
191
192static __inline void
187tlb_context_demap(u_int ctx)
193tlb_context_demap(struct pmap *pm)
188{
194{
195 u_int ctx;
196
197 ctx = pm->pm_context[PCPU_GET(cpuid)];
189 if (ctx != -1) {
190 tlb_dtlb_context_primary_demap();
191 tlb_itlb_context_primary_demap();
192 }
193}
194
195static __inline void
196tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
197{
198 u_long pst;
199
200 pst = intr_disable();
201 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
202 stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
203 flush(va);
204 intr_restore(pst);
205}
206
207static __inline void
198 if (ctx != -1) {
199 tlb_dtlb_context_primary_demap();
200 tlb_itlb_context_primary_demap();
201 }
202}
203
204static __inline void
205tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
206{
207 u_long pst;
208
209 pst = intr_disable();
210 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
211 stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
212 flush(va);
213 intr_restore(pst);
214}
215
216static __inline void
208tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
217tlb_page_demap(u_int tlb, struct pmap *pm, vm_offset_t va)
209{
210 if (tlb & TLB_DTLB)
218{
219 if (tlb & TLB_DTLB)
211 tlb_dtlb_page_demap(ctx, va);
220 tlb_dtlb_page_demap(pm, va);
212 if (tlb & TLB_ITLB)
221 if (tlb & TLB_ITLB)
213 tlb_itlb_page_demap(ctx, va);
222 tlb_itlb_page_demap(pm, va);
214}
215
216static __inline void
223}
224
225static __inline void
217tlb_range_demap(u_int ctx, vm_offset_t start, vm_offset_t end)
226tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end)
218{
219 for (; start < end; start += PAGE_SIZE)
227{
228 for (; start < end; start += PAGE_SIZE)
220 tlb_page_demap(TLB_DTLB | TLB_ITLB, ctx, start);
229 tlb_page_demap(TLB_DTLB | TLB_ITLB, pm, start);
221}
222
223static __inline void
230}
231
232static __inline void
224tlb_tte_demap(struct tte tte, u_int ctx)
233tlb_tte_demap(struct tte tte, struct pmap *pm)
225{
234{
226 tlb_page_demap(TD_GET_TLB(tte.tte_data), ctx, TV_GET_VA(tte.tte_vpn));
235 tlb_page_demap(TD_GET_TLB(tte.tte_data), pm, TV_GET_VA(tte.tte_vpn));
227}
228
229static __inline void
230tlb_store(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte)
231{
232 KASSERT(ctx != -1, ("tlb_store: invalid context"));
233 if (tlb & TLB_DTLB)
234 tlb_dtlb_store(va, ctx, tte);
235 if (tlb & TLB_ITLB)
236 tlb_itlb_store(va, ctx, tte);
237}
238
239static __inline void
240tlb_store_slot(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte, int slot)
241{
242 KASSERT(ctx != -1, ("tlb_store_slot: invalid context"));
243 if (tlb & TLB_DTLB)
244 tlb_dtlb_store_slot(va, ctx, tte, slot);
245 if (tlb & TLB_ITLB)
246 tlb_itlb_store_slot(va, ctx, tte, slot);
247}
248
249#endif /* !_MACHINE_TLB_H_ */
236}
237
238static __inline void
239tlb_store(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte)
240{
241 KASSERT(ctx != -1, ("tlb_store: invalid context"));
242 if (tlb & TLB_DTLB)
243 tlb_dtlb_store(va, ctx, tte);
244 if (tlb & TLB_ITLB)
245 tlb_itlb_store(va, ctx, tte);
246}
247
248static __inline void
249tlb_store_slot(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte, int slot)
250{
251 KASSERT(ctx != -1, ("tlb_store_slot: invalid context"));
252 if (tlb & TLB_DTLB)
253 tlb_dtlb_store_slot(va, ctx, tte, slot);
254 if (tlb & TLB_ITLB)
255 tlb_itlb_store_slot(va, ctx, tte, slot);
256}
257
258#endif /* !_MACHINE_TLB_H_ */