Deleted Added
full compact
1/*-
2 * Copyright (c) 2001 Jake Burkholder.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright

--- 9 unchanged lines hidden (view full) ---

18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 85234 2001-10-20 15:58:31Z jake $
26 * $FreeBSD: head/sys/sparc64/include/tlb.h 88629 2001-12-29 07:07:35Z jake $
27 */
28
29#ifndef _MACHINE_TLB_H_
30#define _MACHINE_TLB_H_
31
32#define TLB_SLOT_COUNT 64
33
34#define TLB_SLOT_TSB_KERNEL_MIN 60 /* XXX */

--- 42 unchanged lines hidden (view full) ---

77#define MMU_SFSR_FV_SHIFT (0)
78
79#define MMU_SFSR_ASI_SIZE (8)
80#define MMU_SFSR_FT_SIZE (6)
81#define MMU_SFSR_CT_SIZE (2)
82
83#define MMU_SFSR_W (1L << MMU_SFSR_W_SHIFT)
84
85/*
86 * Some tlb operations must be atomical, so no interrupt or trap can be allowed
87 * while they are in progress. Traps should not happen, but interrupts need to
88 * be explicitely disabled. critical_enter() cannot be used here, since it only
89 * disables soft interrupts.
90 * XXX: is something like this needed elsewhere, too?
91 */
92#define TLB_ATOMIC_START(s) do { \
93 (s) = rdpr(pstate); \
94 wrpr(pstate, (s) & ~PSTATE_IE, 0); \
95} while (0)
96#define TLB_ATOMIC_END(s) wrpr(pstate, (s), 0)
97
98static __inline void
99tlb_dtlb_context_primary_demap(void)
100{
101 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0);
102 membar(Sync);
103}
104
105static __inline void
106tlb_dtlb_page_demap(u_long ctx, vm_offset_t va)
107{
108 if (ctx == TLB_CTX_KERNEL) {
109 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
110 ASI_DMMU_DEMAP, 0);
111 membar(Sync);
112 } else {
113 stxa(AA_DMMU_SCXR, ASI_DMMU, ctx);
114 membar(Sync);
115 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
116 ASI_DMMU_DEMAP, 0);
117 membar(Sync);
118 stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
119 membar(Sync);
120 }
121}
122
123static __inline void
124tlb_dtlb_store(vm_offset_t va, u_long ctx, struct tte tte)
125{
126 u_long pst;
127
128 TLB_ATOMIC_START(pst);
129 stxa(AA_DMMU_TAR, ASI_DMMU,
130 TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
131 stxa(0, ASI_DTLB_DATA_IN_REG, tte.tte_data);
132 membar(Sync);
133 TLB_ATOMIC_END(pst);
134}
135
136static __inline void
137tlb_dtlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
138{
139 u_long pst;
140
141 TLB_ATOMIC_START(pst);
142 stxa(AA_DMMU_TAR, ASI_DMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
143 stxa(TLB_DAR_SLOT(slot), ASI_DTLB_DATA_ACCESS_REG, tte.tte_data);
144 membar(Sync);
145 TLB_ATOMIC_END(pst);
146}
147
148static __inline void
149tlb_itlb_context_primary_demap(void)
150{
151 stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0);
152 membar(Sync);
153}
154
155static __inline void
156tlb_itlb_page_demap(u_long ctx, vm_offset_t va)
157{
158 if (ctx == TLB_CTX_KERNEL) {
159 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE,
160 ASI_IMMU_DEMAP, 0);
161 flush(KERNBASE);
162 } else {
163 stxa(AA_DMMU_SCXR, ASI_DMMU, ctx);
164 membar(Sync);
165 stxa(TLB_DEMAP_VA(va) | TLB_DEMAP_SECONDARY | TLB_DEMAP_PAGE,
166 ASI_IMMU_DEMAP, 0);
167 membar(Sync);
168 stxa(AA_DMMU_SCXR, ASI_DMMU, 0);
169 /* flush probably not needed. */
170 membar(Sync);
171 }
172}
173
174static __inline void
175tlb_itlb_store(vm_offset_t va, u_long ctx, struct tte tte)
176{
177 u_long pst;
178
179 TLB_ATOMIC_START(pst);
180 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
181 stxa(0, ASI_ITLB_DATA_IN_REG, tte.tte_data);
182 if (ctx == TLB_CTX_KERNEL)
183 flush(va);
184 else {
185 /*
186 * flush probably not needed and impossible here, no access to
187 * user page.
188 */
189 membar(Sync);
190 }
191 TLB_ATOMIC_END(pst);
192}
193
194static __inline void
195tlb_context_primary_demap(u_int tlb)
196{
197 if (tlb & TLB_DTLB)
198 tlb_dtlb_context_primary_demap();
199 if (tlb & TLB_ITLB)
200 tlb_itlb_context_primary_demap();
201}
202
203static __inline void
204tlb_itlb_store_slot(vm_offset_t va, u_long ctx, struct tte tte, int slot)
205{
206 u_long pst;
207
208 TLB_ATOMIC_START(pst);
209 stxa(AA_IMMU_TAR, ASI_IMMU, TLB_TAR_VA(va) | TLB_TAR_CTX(ctx));
210 stxa(TLB_DAR_SLOT(slot), ASI_ITLB_DATA_ACCESS_REG, tte.tte_data);
211 flush(va);
212 TLB_ATOMIC_END(pst);
213}
214
215static __inline void
216tlb_page_demap(u_int tlb, u_int ctx, vm_offset_t va)
217{
218 if (tlb & TLB_DTLB)
219 tlb_dtlb_page_demap(ctx, va);
220 if (tlb & TLB_ITLB)

--- 22 unchanged lines hidden ---