Deleted Added
full compact
tlb.h (88629) tlb.h (91170)
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 88629 2001-12-29 07:07:35Z jake $
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 91170 2002-02-23 20:59:35Z jake $
27 */
28
29#ifndef _MACHINE_TLB_H_
30#define _MACHINE_TLB_H_
31
32#define TLB_SLOT_COUNT 64
33
34#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */
35#define TLB_SLOT_TSB_USER_PRIMARY 61
36#define TLB_SLOT_TSB_USER_SECONDARY 62
37#define TLB_SLOT_KERNEL 63
38
39#define TLB_DAR_SLOT_SHIFT (3)
40#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
41
42#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK)
43#define TLB_TAR_CTX(ctx) ((ctx) & PAGE_MASK)
44
45#define TLB_DEMAP_ID_SHIFT (4)
46#define TLB_DEMAP_ID_PRIMARY (0)
47#define TLB_DEMAP_ID_SECONDARY (1)
48#define TLB_DEMAP_ID_NUCLEUS (2)
49
50#define TLB_DEMAP_TYPE_SHIFT (6)
51#define TLB_DEMAP_TYPE_PAGE (0)
52#define TLB_DEMAP_TYPE_CONTEXT (1)
53
54#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK)
55#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT)
56#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT)
57
58#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
59#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
60
61#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
62#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
63#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
64
65#define TLB_CTX_KERNEL (0)
66
67#define TLB_DTLB (1 << 0)
68#define TLB_ITLB (1 << 1)
69
70#define MMU_SFSR_ASI_SHIFT (16)
71#define MMU_SFSR_FT_SHIFT (7)
72#define MMU_SFSR_E_SHIFT (6)
73#define MMU_SFSR_CT_SHIFT (4)
74#define MMU_SFSR_PR_SHIFT (3)
75#define MMU_SFSR_W_SHIFT (2)
76#define MMU_SFSR_OW_SHIFT (1)
77#define MMU_SFSR_FV_SHIFT (0)
78
79#define MMU_SFSR_ASI_SIZE (8)
80#define MMU_SFSR_FT_SIZE (6)
81#define MMU_SFSR_CT_SIZE (2)
82
83#define MMU_SFSR_W (1L << MMU_SFSR_W_SHIFT)
84
85/*
86 * Some tlb operations must be atomical, so no interrupt or trap can be allowed
87 * while they are in progress. Traps should not happen, but interrupts need to
88 * be explicitely disabled. critical_enter() cannot be used here, since it only
89 * disables soft interrupts.
90 * XXX: is something like this needed elsewhere, too?
91 */
27 */
28
29#ifndef _MACHINE_TLB_H_
30#define _MACHINE_TLB_H_
31
32#define TLB_SLOT_COUNT 64
33
34#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */
35#define TLB_SLOT_TSB_USER_PRIMARY 61
36#define TLB_SLOT_TSB_USER_SECONDARY 62
37#define TLB_SLOT_KERNEL 63
38
39#define TLB_DAR_SLOT_SHIFT (3)
40#define TLB_DAR_SLOT(slot) ((slot) << TLB_DAR_SLOT_SHIFT)
41
42#define TLB_TAR_VA(va) ((va) & ~PAGE_MASK)
43#define TLB_TAR_CTX(ctx) ((ctx) & PAGE_MASK)
44
45#define TLB_DEMAP_ID_SHIFT (4)
46#define TLB_DEMAP_ID_PRIMARY (0)
47#define TLB_DEMAP_ID_SECONDARY (1)
48#define TLB_DEMAP_ID_NUCLEUS (2)
49
50#define TLB_DEMAP_TYPE_SHIFT (6)
51#define TLB_DEMAP_TYPE_PAGE (0)
52#define TLB_DEMAP_TYPE_CONTEXT (1)
53
54#define TLB_DEMAP_VA(va) ((va) & ~PAGE_MASK)
55#define TLB_DEMAP_ID(id) ((id) << TLB_DEMAP_ID_SHIFT)
56#define TLB_DEMAP_TYPE(type) ((type) << TLB_DEMAP_TYPE_SHIFT)
57
58#define TLB_DEMAP_PAGE (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_PAGE))
59#define TLB_DEMAP_CONTEXT (TLB_DEMAP_TYPE(TLB_DEMAP_TYPE_CONTEXT))
60
61#define TLB_DEMAP_PRIMARY (TLB_DEMAP_ID(TLB_DEMAP_ID_PRIMARY))
62#define TLB_DEMAP_SECONDARY (TLB_DEMAP_ID(TLB_DEMAP_ID_SECONDARY))
63#define TLB_DEMAP_NUCLEUS (TLB_DEMAP_ID(TLB_DEMAP_ID_NUCLEUS))
64
65#define TLB_CTX_KERNEL (0)
66
67#define TLB_DTLB (1 << 0)
68#define TLB_ITLB (1 << 1)
69
70#define MMU_SFSR_ASI_SHIFT (16)
71#define MMU_SFSR_FT_SHIFT (7)
72#define MMU_SFSR_E_SHIFT (6)
73#define MMU_SFSR_CT_SHIFT (4)
74#define MMU_SFSR_PR_SHIFT (3)
75#define MMU_SFSR_W_SHIFT (2)
76#define MMU_SFSR_OW_SHIFT (1)
77#define MMU_SFSR_FV_SHIFT (0)
78
79#define MMU_SFSR_ASI_SIZE (8)
80#define MMU_SFSR_FT_SIZE (6)
81#define MMU_SFSR_CT_SIZE (2)
82
83#define MMU_SFSR_W (1L << MMU_SFSR_W_SHIFT)
84
85/*
86 * Some tlb operations must be atomical, so no interrupt or trap can be allowed
87 * while they are in progress. Traps should not happen, but interrupts need to
88 * be explicitely disabled. critical_enter() cannot be used here, since it only
89 * disables soft interrupts.
90 * XXX: is something like this needed elsewhere, too?
91 */
92#define TLB_ATOMIC_START(s) do { \
93 (s) = rdpr(pstate); \
94 wrpr(pstate, (s) & ~PSTATE_IE, 0); \
95} while (0)
96#define TLB_ATOMIC_END(s) wrpr(pstate, (s), 0)
97
98static __inline void
99tlb_dtlb_context_primary_demap(void)
100{
101 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
102 membar(Sync);
103}
104
105static __inline void
106tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
107{
108 if (ctx == TLB_CTX_KERNEL) {
109 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
110 ASI_DMMU_DEMAP, 0);
111 membar(Sync);
112 } else {
113 stxa(AA_DMMU_SCXR, ASI_DMMU, ctx);
114 membar(Sync);
115 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
116 ASI_DMMU_DEMAP, 0);
117 membar(Sync);
118 stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
119 membar(Sync);
120 }
121}
122
123static __inline void
124tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
125{
126 u_long pst;
127
92
93static __inline void
94tlb_dtlb_context_primary_demap(void)
95{
96 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
97 membar(Sync);
98}
99
100static __inline void
101tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
102{
103 if (ctx == TLB_CTX_KERNEL) {
104 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
105 ASI_DMMU_DEMAP, 0);
106 membar(Sync);
107 } else {
108 stxa(AA_DMMU_SCXR, ASI_DMMU, ctx);
109 membar(Sync);
110 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
111 ASI_DMMU_DEMAP, 0);
112 membar(Sync);
113 stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
114 membar(Sync);
115 }
116}
117
118static __inline void
119tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
120{
121 u_long pst;
122
128 TLB_ATOMIC_START(pst);
123 pst = intr_disable();
129 stxa(AA_DMMU_TAR, ASI_DMMU,
130 TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
131 stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
132 membar(Sync);
124 stxa(AA_DMMU_TAR, ASI_DMMU,
125 TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
126 stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
127 membar(Sync);
133 TLB_ATOMIC_END(pst);
128 intr_restore(pst);
134}
135
136static __inline void
137tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
138{
139 u_long pst;
140
129}
130
131static __inline void
132tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
133{
134 u_long pst;
135
141 TLB_ATOMIC_START(pst);
136 pst = intr_disable();
142 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
143 stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
144 membar(Sync);
137 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
138 stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
139 membar(Sync);
145 TLB_ATOMIC_END(pst);
140 intr_restore(pst);
146}
147
148static __inline void
149tlb_itlb_context_primary_demap(void)
150{
151 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
152 membar(Sync);
153}
154
155static __inline void
156tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
157{
158 if (ctx == TLB_CTX_KERNEL) {
159 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
160 ASI_IMMU_DEMAP, 0);
161 flush(KERNBASE);
162 } else {
163 stxa(AA_DMMU_SCXR, ASI_DMMU, ctx);
164 membar(Sync);
165 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
166 ASI_IMMU_DEMAP, 0);
167 membar(Sync);
168 stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
169 /* flush probably not needed. */
170 membar(Sync);
171 }
172}
173
174static __inline void
175tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
176{
177 u_long pst;
178
141}
142
143static __inline void
144tlb_itlb_context_primary_demap(void)
145{
146 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
147 membar(Sync);
148}
149
150static __inline void
151tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
152{
153 if (ctx == TLB_CTX_KERNEL) {
154 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
155 ASI_IMMU_DEMAP, 0);
156 flush(KERNBASE);
157 } else {
158 stxa(AA_DMMU_SCXR, ASI_DMMU, ctx);
159 membar(Sync);
160 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
161 ASI_IMMU_DEMAP, 0);
162 membar(Sync);
163 stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
164 /* flush probably not needed. */
165 membar(Sync);
166 }
167}
168
169static __inline void
170tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
171{
172 u_long pst;
173
179 TLB_ATOMIC_START(pst);
174 pst = intr_disable();
180 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
181 stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
182 if (ctx == TLB_CTX_KERNEL)
183 flush(va);
184 else {
185 /*
186 * flush probably not needed and impossible here, no access to
187 * user page.
188 */
189 membar(Sync);
190 }
175 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
176 stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
177 if (ctx == TLB_CTX_KERNEL)
178 flush(va);
179 else {
180 /*
181 * flush probably not needed and impossible here, no access to
182 * user page.
183 */
184 membar(Sync);
185 }
191 TLB_ATOMIC_END(pst);
186 intr_restore(pst);
192}
193
194static __inline void
195tlb_context_primary_demap(u_int tlb)
196{
197 if (tlb & TLB_DTLB)
198 tlb_dtlb_context_primary_demap();
199 if (tlb & TLB_ITLB)
200 tlb_itlb_context_primary_demap();
201}
202
203static __inline void
204tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
205{
206 u_long pst;
207
187}
188
189static __inline void
190tlb_context_primary_demap(u_int tlb)
191{
192 if (tlb & TLB_DTLB)
193 tlb_dtlb_context_primary_demap();
194 if (tlb & TLB_ITLB)
195 tlb_itlb_context_primary_demap();
196}
197
198static __inline void
199tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
200{
201 u_long pst;
202
208 TLB_ATOMIC_START(pst);
203 pst = intr_disable();
209 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
210 stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
211 flush(va);
204 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
205 stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
206 flush(va);
212 TLB_ATOMIC_END(pst);
207 intr_restore(pst);
213}
214
215static __inline void
216tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
217{
218 if (tlb & TLB_DTLB)
219 tlb_dtlb_page_demap(ctx, va);
220 if (tlb & TLB_ITLB)
221 tlb_itlb_page_demap(ctx, va);
222}
223
224static __inline void
225tlb_store(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte)
226{
227 if (tlb & TLB_DTLB)
228 tlb_dtlb_store(va, ctx, tte);
229 if (tlb & TLB_ITLB)
230 tlb_itlb_store(va, ctx, tte);
231}
232
233static __inline void
234tlb_store_slot(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte, int slot)
235{
236 if (tlb & TLB_DTLB)
237 tlb_dtlb_store_slot(va, ctx, tte, slot);
238 if (tlb & TLB_ITLB)
239 tlb_itlb_store_slot(va, ctx, tte, slot);
240}
241
242#endif /* !_MACHINE_TLB_H_ */
208}
209
210static __inline void
211tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
212{
213 if (tlb & TLB_DTLB)
214 tlb_dtlb_page_demap(ctx, va);
215 if (tlb & TLB_ITLB)
216 tlb_itlb_page_demap(ctx, va);
217}
218
219static __inline void
220tlb_store(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte)
221{
222 if (tlb & TLB_DTLB)
223 tlb_dtlb_store(va, ctx, tte);
224 if (tlb & TLB_ITLB)
225 tlb_itlb_store(va, ctx, tte);
226}
227
228static __inline void
229tlb_store_slot(u_int tlb, vm_offset_t va, u_long ctx, struct tte tte, int slot)
230{
231 if (tlb & TLB_DTLB)
232 tlb_dtlb_store_slot(va, ctx, tte, slot);
233 if (tlb & TLB_ITLB)
234 tlb_itlb_store_slot(va, ctx, tte, slot);
235}
236
237#endif /* !_MACHINE_TLB_H_ */