Deleted Added
full compact
locore.S (222391) locore.S (222400)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 222391 2011-05-27 23:09:12Z marcel $
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 222400 2011-05-28 04:10:44Z marcel $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51kernel_text:
52
53/*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51kernel_text:
52
53/*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
86 * r3-r27 : scratch registers
87 * r28 : kernload
88 * r29 : temp TLB1 entry
89 * r30 : initial TLB1 entry we started in
90 * r31 : metadata pointer
86 * r3-r26 : scratch registers
87 * r27 : kernload
88 * r28 : temp TLB1 entry
89 * r29 : initial TLB1 entry we started in
90 * r30-r31 : arguments (metadata pointer)
91 */
92
93/*
91 */
92
93/*
94 * Keep metadata ptr in r31 for later use.
94 * Keep arguments in r30 & r31 for later use.
95 */
95 */
96 mr %r31, %r3
96 mr %r30, %r3
97 mr %r31, %r4
97
98/*
99 * Initial cleanup
100 */
101 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
102 mtmsr %r3
103 isync
104
105 lis %r3, HID0_E500_DEFAULT_SET@h
106 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
107 mtspr SPR_HID0, %r3
108 isync
109 lis %r3, HID1_E500_DEFAULT_SET@h
110 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
111 mtspr SPR_HID1, %r3
112 isync
113
114 /* Invalidate all entries in TLB0 */
115 li %r3, 0
116 bl tlb_inval_all
117
118/*
119 * Locate the TLB1 entry that maps this code
120 */
121 bl 1f
1221: mflr %r3
98
99/*
100 * Initial cleanup
101 */
102 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
103 mtmsr %r3
104 isync
105
106 lis %r3, HID0_E500_DEFAULT_SET@h
107 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
108 mtspr SPR_HID0, %r3
109 isync
110 lis %r3, HID1_E500_DEFAULT_SET@h
111 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
112 mtspr SPR_HID1, %r3
113 isync
114
115 /* Invalidate all entries in TLB0 */
116 li %r3, 0
117 bl tlb_inval_all
118
119/*
120 * Locate the TLB1 entry that maps this code
121 */
122 bl 1f
1231: mflr %r3
123 bl tlb1_find_current /* the entry number found is returned in r30 */
124 bl tlb1_find_current /* the entry found is returned in r29 */
124
125 bl tlb1_inval_all_but_current
126/*
127 * Create temporary mapping in AS=1 and switch to it
128 */
129 bl tlb1_temp_mapping_as1
130
131 mfmsr %r3
132 ori %r3, %r3, (PSL_IS | PSL_DS)
133 bl 2f
1342: mflr %r4
135 addi %r4, %r4, 20
136 mtspr SPR_SRR0, %r4
137 mtspr SPR_SRR1, %r3
138 rfi /* Switch context */
139
140/*
141 * Invalidate initial entry
142 */
125
126 bl tlb1_inval_all_but_current
127/*
128 * Create temporary mapping in AS=1 and switch to it
129 */
130 bl tlb1_temp_mapping_as1
131
132 mfmsr %r3
133 ori %r3, %r3, (PSL_IS | PSL_DS)
134 bl 2f
1352: mflr %r4
136 addi %r4, %r4, 20
137 mtspr SPR_SRR0, %r4
138 mtspr SPR_SRR1, %r3
139 rfi /* Switch context */
140
141/*
142 * Invalidate initial entry
143 */
143 mr %r3, %r30
144 mr %r3, %r29
144 bl tlb1_inval_entry
145
146/*
147 * Setup final mapping in TLB1[1] and switch to it
148 */
149 /* Final kernel mapping, map in 16 MB of RAM */
150 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
151 li %r4, 0 /* Entry 0 */
152 rlwimi %r3, %r4, 16, 12, 15
153 mtspr SPR_MAS0, %r3
154 isync
155
156 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
157 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
158 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
159 isync
160
161 lis %r3, KERNBASE@h
162 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
163#ifdef SMP
164 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
165#endif
166 mtspr SPR_MAS2, %r3
167 isync
168
169 /* Discover phys load address */
170 bl 3f
1713: mflr %r4 /* Use current address */
172 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
145 bl tlb1_inval_entry
146
147/*
148 * Setup final mapping in TLB1[1] and switch to it
149 */
150 /* Final kernel mapping, map in 16 MB of RAM */
151 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
152 li %r4, 0 /* Entry 0 */
153 rlwimi %r3, %r4, 16, 12, 15
154 mtspr SPR_MAS0, %r3
155 isync
156
157 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
158 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
159 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
160 isync
161
162 lis %r3, KERNBASE@h
163 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
164#ifdef SMP
165 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
166#endif
167 mtspr SPR_MAS2, %r3
168 isync
169
170 /* Discover phys load address */
171 bl 3f
1723: mflr %r4 /* Use current address */
173 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
173 mr %r28, %r4 /* Keep kernel load address */
174 mr %r27, %r4 /* Keep kernel load address */
174 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
175 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
176 isync
177 tlbwe
178 isync
179 msync
180
181 /* Switch to the above TLB1[1] mapping */
182 bl 4f
1834: mflr %r4
184 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
185 rlwinm %r3, %r3, 0, 0, 19
186 add %r4, %r4, %r3 /* Convert to kernel virtual address */
187 addi %r4, %r4, 36
188 li %r3, PSL_DE /* Note AS=0 */
189 mtspr SPR_SRR0, %r4
190 mtspr SPR_SRR1, %r3
191 rfi
192
193/*
194 * Invalidate temp mapping
195 */
175 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
176 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
177 isync
178 tlbwe
179 isync
180 msync
181
182 /* Switch to the above TLB1[1] mapping */
183 bl 4f
1844: mflr %r4
185 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
186 rlwinm %r3, %r3, 0, 0, 19
187 add %r4, %r4, %r3 /* Convert to kernel virtual address */
188 addi %r4, %r4, 36
189 li %r3, PSL_DE /* Note AS=0 */
190 mtspr SPR_SRR0, %r4
191 mtspr SPR_SRR1, %r3
192 rfi
193
194/*
195 * Invalidate temp mapping
196 */
196 mr %r3, %r29
197 mr %r3, %r28
197 bl tlb1_inval_entry
198
199/*
200 * Save kernel load address for later use.
201 */
202 lis %r3, kernload@ha
203 addi %r3, %r3, kernload@l
198 bl tlb1_inval_entry
199
200/*
201 * Save kernel load address for later use.
202 */
203 lis %r3, kernload@ha
204 addi %r3, %r3, kernload@l
204 stw %r28, 0(%r3)
205 stw %r27, 0(%r3)
205#ifdef SMP
206 /*
207 * APs need a separate copy of kernload info within the __boot_page
208 * area so they can access this value very early, before their TLBs
209 * are fully set up and the kernload global location is available.
210 */
211 lis %r3, kernload_ap@ha
212 addi %r3, %r3, kernload_ap@l
206#ifdef SMP
207 /*
208 * APs need a separate copy of kernload info within the __boot_page
209 * area so they can access this value very early, before their TLBs
210 * are fully set up and the kernload global location is available.
211 */
212 lis %r3, kernload_ap@ha
213 addi %r3, %r3, kernload_ap@l
213 stw %r28, 0(%r3)
214 stw %r27, 0(%r3)
214 msync
215#endif
216
217/*
218 * Setup a temporary stack
219 */
220 lis %r1, tmpstack@ha
221 addi %r1, %r1, tmpstack@l
222 addi %r1, %r1, (TMPSTACKSZ - 8)
223
224/*
225 * Initialise exception vector offsets
226 */
227 bl ivor_setup
228
229/*
230 * Set up arguments and jump to system initialization code
231 */
215 msync
216#endif
217
218/*
219 * Setup a temporary stack
220 */
221 lis %r1, tmpstack@ha
222 addi %r1, %r1, tmpstack@l
223 addi %r1, %r1, (TMPSTACKSZ - 8)
224
225/*
226 * Initialise exception vector offsets
227 */
228 bl ivor_setup
229
230/*
231 * Set up arguments and jump to system initialization code
232 */
232 lis %r3, kernel_text@ha
233 addi %r3, %r3, kernel_text@l
234 lis %r4, _end@ha
235 addi %r4, %r4, _end@l
236 mr %r5, %r31 /* metadata ptr */
233 mr %r3, %r30
234 mr %r4, %r31
237
238 /* Prepare e500 core */
235
236 /* Prepare e500 core */
239 bl e500_init
237 bl booke_init
240
241 /* Switch to thread0.td_kstack now */
242 mr %r1, %r3
243 li %r3, 0
244 stw %r3, 0(%r1)
245
246 /* Machine independet part, does not return */
247 bl mi_startup
248 /* NOT REACHED */
2495: b 5b
250
251
252#ifdef SMP
253/************************************************************************/
254/* AP Boot page */
255/************************************************************************/
256 .text
257 .globl __boot_page
258 .align 12
259__boot_page:
260 bl 1f
261
262kernload_ap:
263 .long 0
264
265/*
266 * Initial configuration
267 */
2681:
269 /* Set HIDs */
270 lis %r3, HID0_E500_DEFAULT_SET@h
271 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
272 mtspr SPR_HID0, %r3
273 isync
274 lis %r3, HID1_E500_DEFAULT_SET@h
275 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
276 mtspr SPR_HID1, %r3
277 isync
278
279 /* Enable branch prediction */
280 li %r3, BUCSR_BPEN
281 mtspr SPR_BUCSR, %r3
282 isync
283
284 /* Invalidate all entries in TLB0 */
285 li %r3, 0
286 bl tlb_inval_all
287
288/*
289 * Find TLB1 entry which is translating us now
290 */
291 bl 2f
2922: mflr %r3
238
239 /* Switch to thread0.td_kstack now */
240 mr %r1, %r3
241 li %r3, 0
242 stw %r3, 0(%r1)
243
244 /* Machine independet part, does not return */
245 bl mi_startup
246 /* NOT REACHED */
2475: b 5b
248
249
250#ifdef SMP
251/************************************************************************/
252/* AP Boot page */
253/************************************************************************/
254 .text
255 .globl __boot_page
256 .align 12
257__boot_page:
258 bl 1f
259
260kernload_ap:
261 .long 0
262
263/*
264 * Initial configuration
265 */
2661:
267 /* Set HIDs */
268 lis %r3, HID0_E500_DEFAULT_SET@h
269 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
270 mtspr SPR_HID0, %r3
271 isync
272 lis %r3, HID1_E500_DEFAULT_SET@h
273 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
274 mtspr SPR_HID1, %r3
275 isync
276
277 /* Enable branch prediction */
278 li %r3, BUCSR_BPEN
279 mtspr SPR_BUCSR, %r3
280 isync
281
282 /* Invalidate all entries in TLB0 */
283 li %r3, 0
284 bl tlb_inval_all
285
286/*
287 * Find TLB1 entry which is translating us now
288 */
289 bl 2f
2902: mflr %r3
293 bl tlb1_find_current /* the entry number found is in r30 */
291 bl tlb1_find_current /* the entry number found is in r29 */
294
295 bl tlb1_inval_all_but_current
296/*
297 * Create temporary translation in AS=1 and switch to it
298 */
299 bl tlb1_temp_mapping_as1
300
301 mfmsr %r3
302 ori %r3, %r3, (PSL_IS | PSL_DS)
303 bl 3f
3043: mflr %r4
305 addi %r4, %r4, 20
306 mtspr SPR_SRR0, %r4
307 mtspr SPR_SRR1, %r3
308 rfi /* Switch context */
309
310/*
311 * Invalidate initial entry
312 */
292
293 bl tlb1_inval_all_but_current
294/*
295 * Create temporary translation in AS=1 and switch to it
296 */
297 bl tlb1_temp_mapping_as1
298
299 mfmsr %r3
300 ori %r3, %r3, (PSL_IS | PSL_DS)
301 bl 3f
3023: mflr %r4
303 addi %r4, %r4, 20
304 mtspr SPR_SRR0, %r4
305 mtspr SPR_SRR1, %r3
306 rfi /* Switch context */
307
308/*
309 * Invalidate initial entry
310 */
313 mr %r3, %r30
311 mr %r3, %r29
314 bl tlb1_inval_entry
315
316/*
317 * Setup final mapping in TLB1[1] and switch to it
318 */
319 /* Final kernel mapping, map in 16 MB of RAM */
320 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
321 li %r4, 0 /* Entry 0 */
322 rlwimi %r3, %r4, 16, 4, 15
323 mtspr SPR_MAS0, %r3
324 isync
325
326 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
327 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
328 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
329 isync
330
331 lis %r3, KERNBASE@h
332 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
333 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
334 mtspr SPR_MAS2, %r3
335 isync
336
337 /* Retrieve kernel load [physical] address from kernload_ap */
338 bl 4f
3394: mflr %r3
340 rlwinm %r3, %r3, 0, 0, 19
341 lis %r4, kernload_ap@h
342 ori %r4, %r4, kernload_ap@l
343 lis %r5, __boot_page@h
344 ori %r5, %r5, __boot_page@l
345 sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
346 lwzx %r3, %r4, %r3
347
348 /* Set RPN and protection */
349 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
350 mtspr SPR_MAS3, %r3
351 isync
352 tlbwe
353 isync
354 msync
355
356 /* Switch to the final mapping */
357 bl 5f
3585: mflr %r3
359 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
360 add %r3, %r3, %r5 /* Make this virtual address */
361 addi %r3, %r3, 32
362 li %r4, 0 /* Note AS=0 */
363 mtspr SPR_SRR0, %r3
364 mtspr SPR_SRR1, %r4
365 rfi
366
367/*
368 * At this point we're running at virtual addresses KERNBASE and beyond so
369 * it's allowed to directly access all locations the kernel was linked
370 * against.
371 */
372
373/*
374 * Invalidate temp mapping
375 */
312 bl tlb1_inval_entry
313
314/*
315 * Setup final mapping in TLB1[1] and switch to it
316 */
317 /* Final kernel mapping, map in 16 MB of RAM */
318 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
319 li %r4, 0 /* Entry 0 */
320 rlwimi %r3, %r4, 16, 4, 15
321 mtspr SPR_MAS0, %r3
322 isync
323
324 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
325 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
326 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
327 isync
328
329 lis %r3, KERNBASE@h
330 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
331 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
332 mtspr SPR_MAS2, %r3
333 isync
334
335 /* Retrieve kernel load [physical] address from kernload_ap */
336 bl 4f
3374: mflr %r3
338 rlwinm %r3, %r3, 0, 0, 19
339 lis %r4, kernload_ap@h
340 ori %r4, %r4, kernload_ap@l
341 lis %r5, __boot_page@h
342 ori %r5, %r5, __boot_page@l
343 sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
344 lwzx %r3, %r4, %r3
345
346 /* Set RPN and protection */
347 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
348 mtspr SPR_MAS3, %r3
349 isync
350 tlbwe
351 isync
352 msync
353
354 /* Switch to the final mapping */
355 bl 5f
3565: mflr %r3
357 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
358 add %r3, %r3, %r5 /* Make this virtual address */
359 addi %r3, %r3, 32
360 li %r4, 0 /* Note AS=0 */
361 mtspr SPR_SRR0, %r3
362 mtspr SPR_SRR1, %r4
363 rfi
364
365/*
366 * At this point we're running at virtual addresses KERNBASE and beyond so
367 * it's allowed to directly access all locations the kernel was linked
368 * against.
369 */
370
371/*
372 * Invalidate temp mapping
373 */
376 mr %r3, %r29
374 mr %r3, %r28
377 bl tlb1_inval_entry
378
379/*
380 * Setup a temporary stack
381 */
382 lis %r1, tmpstack@ha
383 addi %r1, %r1, tmpstack@l
384 addi %r1, %r1, (TMPSTACKSZ - 8)
385
386/*
387 * Initialise exception vector offsets
388 */
389 bl ivor_setup
390
391 /*
392 * Assign our pcpu instance
393 */
394 lis %r3, ap_pcpu@h
395 ori %r3, %r3, ap_pcpu@l
396 lwz %r3, 0(%r3)
397 mtsprg0 %r3
398
399 bl pmap_bootstrap_ap
400
401 bl cpudep_ap_bootstrap
402 /* Switch to the idle thread's kstack */
403 mr %r1, %r3
404
405 bl machdep_ap_bootstrap
406
407 /* NOT REACHED */
4086: b 6b
409#endif /* SMP */
410
411/*
412 * Invalidate all entries in the given TLB.
413 *
414 * r3 TLBSEL
415 */
416tlb_inval_all:
417 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
418 ori %r3, %r3, 0x4 /* INVALL */
419 tlbivax 0, %r3
420 isync
421 msync
422
423 tlbsync
424 msync
425 blr
426
427/*
375 bl tlb1_inval_entry
376
377/*
378 * Setup a temporary stack
379 */
380 lis %r1, tmpstack@ha
381 addi %r1, %r1, tmpstack@l
382 addi %r1, %r1, (TMPSTACKSZ - 8)
383
384/*
385 * Initialise exception vector offsets
386 */
387 bl ivor_setup
388
389 /*
390 * Assign our pcpu instance
391 */
392 lis %r3, ap_pcpu@h
393 ori %r3, %r3, ap_pcpu@l
394 lwz %r3, 0(%r3)
395 mtsprg0 %r3
396
397 bl pmap_bootstrap_ap
398
399 bl cpudep_ap_bootstrap
400 /* Switch to the idle thread's kstack */
401 mr %r1, %r3
402
403 bl machdep_ap_bootstrap
404
405 /* NOT REACHED */
4066: b 6b
407#endif /* SMP */
408
409/*
410 * Invalidate all entries in the given TLB.
411 *
412 * r3 TLBSEL
413 */
414tlb_inval_all:
415 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
416 ori %r3, %r3, 0x4 /* INVALL */
417 tlbivax 0, %r3
418 isync
419 msync
420
421 tlbsync
422 msync
423 blr
424
425/*
428 * expects address to look up in r3, returns entry number in r30
426 * expects address to look up in r3, returns entry number in r29
429 *
430 * FIXME: the hidden assumption is we are now running in AS=0, but we should
431 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
432 */
433tlb1_find_current:
434 mfspr %r17, SPR_PID0
435 slwi %r17, %r17, MAS6_SPID0_SHIFT
436 mtspr SPR_MAS6, %r17
437 isync
438 tlbsx 0, %r3
439 mfspr %r17, SPR_MAS0
427 *
428 * FIXME: the hidden assumption is we are now running in AS=0, but we should
429 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
430 */
431tlb1_find_current:
432 mfspr %r17, SPR_PID0
433 slwi %r17, %r17, MAS6_SPID0_SHIFT
434 mtspr SPR_MAS6, %r17
435 isync
436 tlbsx 0, %r3
437 mfspr %r17, SPR_MAS0
440 rlwinm %r30, %r17, 16, 20, 31 /* MAS0[ESEL] -> r30 */
438 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
441
442 /* Make sure we have IPROT set on the entry */
443 mfspr %r17, SPR_MAS1
444 oris %r17, %r17, MAS1_IPROT@h
445 mtspr SPR_MAS1, %r17
446 isync
447 tlbwe
448 isync
449 msync
450 blr
451
452/*
453 * Invalidates a single entry in TLB1.
454 *
455 * r3 ESEL
456 * r4-r5 scratched
457 */
458tlb1_inval_entry:
459 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
460 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
461 mtspr SPR_MAS0, %r4
462 isync
463 tlbre
464 li %r5, 0 /* MAS1[V] = 0 */
465 mtspr SPR_MAS1, %r5
466 isync
467 tlbwe
468 isync
469 msync
470 blr
471
472/*
439
440 /* Make sure we have IPROT set on the entry */
441 mfspr %r17, SPR_MAS1
442 oris %r17, %r17, MAS1_IPROT@h
443 mtspr SPR_MAS1, %r17
444 isync
445 tlbwe
446 isync
447 msync
448 blr
449
450/*
451 * Invalidates a single entry in TLB1.
452 *
453 * r3 ESEL
454 * r4-r5 scratched
455 */
456tlb1_inval_entry:
457 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
458 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
459 mtspr SPR_MAS0, %r4
460 isync
461 tlbre
462 li %r5, 0 /* MAS1[V] = 0 */
463 mtspr SPR_MAS1, %r5
464 isync
465 tlbwe
466 isync
467 msync
468 blr
469
470/*
473 * r30 current entry number
474 * r29 returned temp entry
471 * r29 current entry number
472 * r28 returned temp entry
475 * r3-r5 scratched
476 */
477tlb1_temp_mapping_as1:
478 /* Read our current translation */
479 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
473 * r3-r5 scratched
474 */
475tlb1_temp_mapping_as1:
476 /* Read our current translation */
477 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
480 rlwimi %r3, %r30, 16, 12, 15 /* Select our current entry */
478 rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */
481 mtspr SPR_MAS0, %r3
482 isync
483 tlbre
484
485 /*
486 * Prepare and write temp entry
487 *
488 * FIXME this is not robust against overflow i.e. when the current
489 * entry is the last in TLB1
490 */
491 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
479 mtspr SPR_MAS0, %r3
480 isync
481 tlbre
482
483 /*
484 * Prepare and write temp entry
485 *
486 * FIXME this is not robust against overflow i.e. when the current
487 * entry is the last in TLB1
488 */
489 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
492 addi %r29, %r30, 1 /* Use next entry. */
493 rlwimi %r3, %r29, 16, 12, 15 /* Select temp entry */
490 addi %r28, %r29, 1 /* Use next entry. */
491 rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */
494 mtspr SPR_MAS0, %r3
495 isync
496 mfspr %r5, SPR_MAS1
497 li %r4, 1 /* AS=1 */
498 rlwimi %r5, %r4, 12, 19, 19
499 li %r4, 0 /* Global mapping, TID=0 */
500 rlwimi %r5, %r4, 16, 8, 15
501 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
502 mtspr SPR_MAS1, %r5
503 isync
504 tlbwe
505 isync
506 msync
507 blr
508
509/*
510 * Loops over TLB1, invalidates all entries skipping the one which currently
511 * maps this code.
512 *
492 mtspr SPR_MAS0, %r3
493 isync
494 mfspr %r5, SPR_MAS1
495 li %r4, 1 /* AS=1 */
496 rlwimi %r5, %r4, 12, 19, 19
497 li %r4, 0 /* Global mapping, TID=0 */
498 rlwimi %r5, %r4, 16, 8, 15
499 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
500 mtspr SPR_MAS1, %r5
501 isync
502 tlbwe
503 isync
504 msync
505 blr
506
507/*
508 * Loops over TLB1, invalidates all entries skipping the one which currently
509 * maps this code.
510 *
513 * r30 current entry
511 * r29 current entry
514 * r3-r5 scratched
515 */
516tlb1_inval_all_but_current:
517 mr %r6, %r3
518 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
519 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
520 li %r4, 0 /* Start from Entry 0 */
5211: lis %r5, MAS0_TLBSEL1@h
522 rlwimi %r5, %r4, 16, 12, 15
523 mtspr SPR_MAS0, %r5
524 isync
525 tlbre
526 mfspr %r5, SPR_MAS1
512 * r3-r5 scratched
513 */
514tlb1_inval_all_but_current:
515 mr %r6, %r3
516 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
517 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
518 li %r4, 0 /* Start from Entry 0 */
5191: lis %r5, MAS0_TLBSEL1@h
520 rlwimi %r5, %r4, 16, 12, 15
521 mtspr SPR_MAS0, %r5
522 isync
523 tlbre
524 mfspr %r5, SPR_MAS1
527 cmpw %r4, %r30 /* our current entry? */
525 cmpw %r4, %r29 /* our current entry? */
528 beq 2f
529 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
530 mtspr SPR_MAS1, %r5
531 isync
532 tlbwe
533 isync
534 msync
5352: addi %r4, %r4, 1
536 cmpw %r4, %r3 /* Check if this is the last entry */
537 bne 1b
538 blr
539
540#ifdef SMP
541__boot_page_padding:
542 /*
543 * Boot page needs to be exactly 4K, with the last word of this page
544 * acting as the reset vector, so we need to stuff the remainder.
545 * Upon release from holdoff CPU fetches the last word of the boot
546 * page.
547 */
548 .space 4092 - (__boot_page_padding - __boot_page)
549 b __boot_page
550#endif /* SMP */
551
552/************************************************************************/
553/* locore subroutines */
554/************************************************************************/
555
556ivor_setup:
557 /* Set base address of interrupt handler routines */
558 lis %r3, interrupt_vector_base@h
559 mtspr SPR_IVPR, %r3
560
561 /* Assign interrupt handler routines offsets */
562 li %r3, int_critical_input@l
563 mtspr SPR_IVOR0, %r3
564 li %r3, int_machine_check@l
565 mtspr SPR_IVOR1, %r3
566 li %r3, int_data_storage@l
567 mtspr SPR_IVOR2, %r3
568 li %r3, int_instr_storage@l
569 mtspr SPR_IVOR3, %r3
570 li %r3, int_external_input@l
571 mtspr SPR_IVOR4, %r3
572 li %r3, int_alignment@l
573 mtspr SPR_IVOR5, %r3
574 li %r3, int_program@l
575 mtspr SPR_IVOR6, %r3
576 li %r3, int_syscall@l
577 mtspr SPR_IVOR8, %r3
578 li %r3, int_decrementer@l
579 mtspr SPR_IVOR10, %r3
580 li %r3, int_fixed_interval_timer@l
581 mtspr SPR_IVOR11, %r3
582 li %r3, int_watchdog@l
583 mtspr SPR_IVOR12, %r3
584 li %r3, int_data_tlb_error@l
585 mtspr SPR_IVOR13, %r3
586 li %r3, int_inst_tlb_error@l
587 mtspr SPR_IVOR14, %r3
588 li %r3, int_debug@l
589 mtspr SPR_IVOR15, %r3
590 blr
591
592/*
593 * void tid_flush(tlbtid_t tid);
594 *
595 * Invalidate all TLB0 entries which match the given TID. Note this is
596 * dedicated for cases when invalidation(s) should NOT be propagated to other
597 * CPUs.
598 *
599 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
600 * correctly (by tlb0_get_tlbconf()).
601 *
602 */
603ENTRY(tid_flush)
604 cmpwi %r3, TID_KERNEL
605 beq tid_flush_end /* don't evict kernel translations */
606
607 /* Number of TLB0 ways */
608 lis %r4, tlb0_ways@h
609 ori %r4, %r4, tlb0_ways@l
610 lwz %r4, 0(%r4)
611
612 /* Number of entries / way */
613 lis %r5, tlb0_entries_per_way@h
614 ori %r5, %r5, tlb0_entries_per_way@l
615 lwz %r5, 0(%r5)
616
617 /* Disable interrupts */
618 mfmsr %r10
619 wrteei 0
620
621 li %r6, 0 /* ways counter */
622loop_ways:
623 li %r7, 0 /* entries [per way] counter */
624loop_entries:
625 /* Select TLB0 and ESEL (way) */
626 lis %r8, MAS0_TLBSEL0@h
627 rlwimi %r8, %r6, 16, 14, 15
628 mtspr SPR_MAS0, %r8
629 isync
630
631 /* Select EPN (entry within the way) */
632 rlwinm %r8, %r7, 12, 13, 19
633 mtspr SPR_MAS2, %r8
634 isync
635 tlbre
636
637 /* Check if valid entry */
638 mfspr %r8, SPR_MAS1
639 andis. %r9, %r8, MAS1_VALID@h
640 beq next_entry /* invalid entry */
641
642 /* Check if this is our TID */
643 rlwinm %r9, %r8, 16, 24, 31
644
645 cmplw %r9, %r3
646 bne next_entry /* not our TID */
647
648 /* Clear VALID bit */
649 rlwinm %r8, %r8, 0, 1, 31
650 mtspr SPR_MAS1, %r8
651 isync
652 tlbwe
653 isync
654 msync
655
656next_entry:
657 addi %r7, %r7, 1
658 cmpw %r7, %r5
659 bne loop_entries
660
661 /* Next way */
662 addi %r6, %r6, 1
663 cmpw %r6, %r4
664 bne loop_ways
665
666 /* Restore MSR (possibly re-enable interrupts) */
667 mtmsr %r10
668 isync
669
670tid_flush_end:
671 blr
672
673/*
674 * Cache disable/enable/inval sequences according
675 * to section 2.16 of E500CORE RM.
676 */
677ENTRY(dcache_inval)
678 /* Invalidate d-cache */
679 mfspr %r3, SPR_L1CSR0
680 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
681 msync
682 isync
683 mtspr SPR_L1CSR0, %r3
684 isync
6851: mfspr %r3, SPR_L1CSR0
686 andi. %r3, %r3, L1CSR0_DCFI
687 bne 1b
688 blr
689
690ENTRY(dcache_disable)
691 /* Disable d-cache */
692 mfspr %r3, SPR_L1CSR0
693 li %r4, L1CSR0_DCE@l
694 not %r4, %r4
695 and %r3, %r3, %r4
696 msync
697 isync
698 mtspr SPR_L1CSR0, %r3
699 isync
700 blr
701
702ENTRY(dcache_enable)
703 /* Enable d-cache */
704 mfspr %r3, SPR_L1CSR0
705 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
706 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
707 msync
708 isync
709 mtspr SPR_L1CSR0, %r3
710 isync
711 blr
712
713ENTRY(icache_inval)
714 /* Invalidate i-cache */
715 mfspr %r3, SPR_L1CSR1
716 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
717 isync
718 mtspr SPR_L1CSR1, %r3
719 isync
7201: mfspr %r3, SPR_L1CSR1
721 andi. %r3, %r3, L1CSR1_ICFI
722 bne 1b
723 blr
724
725ENTRY(icache_disable)
726 /* Disable i-cache */
727 mfspr %r3, SPR_L1CSR1
728 li %r4, L1CSR1_ICE@l
729 not %r4, %r4
730 and %r3, %r3, %r4
731 isync
732 mtspr SPR_L1CSR1, %r3
733 isync
734 blr
735
736ENTRY(icache_enable)
737 /* Enable i-cache */
738 mfspr %r3, SPR_L1CSR1
739 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
740 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
741 isync
742 mtspr SPR_L1CSR1, %r3
743 isync
744 blr
745
746/*
747 * int setfault()
748 *
749 * Similar to setjmp to setup for handling faults on accesses to user memory.
750 * Any routine using this may only call bcopy, either the form below,
751 * or the (currently used) C code optimized, so it doesn't use any non-volatile
752 * registers.
753 */
754 .globl setfault
755setfault:
756 mflr %r0
757 mfsprg0 %r4
758 lwz %r4, PC_CURTHREAD(%r4)
759 lwz %r4, TD_PCB(%r4)
760 stw %r3, PCB_ONFAULT(%r4)
761 mfcr %r10
762 mfctr %r11
763 mfxer %r12
764 stw %r0, 0(%r3)
765 stw %r1, 4(%r3)
766 stw %r2, 8(%r3)
767 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
768 li %r3, 0 /* return FALSE */
769 blr
770
771/************************************************************************/
772/* Data section */
773/************************************************************************/
774 .data
775 .align 4
776tmpstack:
777 .space TMPSTACKSZ
778
779/*
780 * Compiled KERNBASE locations
781 */
782 .globl kernbase
783 .set kernbase, KERNBASE
784
785/*
786 * Globals
787 */
788#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
789
790GLOBAL(kernload)
791 .long 0
792GLOBAL(intrnames)
793 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
794GLOBAL(eintrnames)
795 .align 4
796GLOBAL(intrcnt)
797 .space INTRCNT_COUNT * 4 * 2
798GLOBAL(eintrcnt)
799
800#include <powerpc/booke/trap_subr.S>
526 beq 2f
527 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
528 mtspr SPR_MAS1, %r5
529 isync
530 tlbwe
531 isync
532 msync
5332: addi %r4, %r4, 1
534 cmpw %r4, %r3 /* Check if this is the last entry */
535 bne 1b
536 blr
537
538#ifdef SMP
539__boot_page_padding:
540 /*
541 * Boot page needs to be exactly 4K, with the last word of this page
542 * acting as the reset vector, so we need to stuff the remainder.
543 * Upon release from holdoff CPU fetches the last word of the boot
544 * page.
545 */
546 .space 4092 - (__boot_page_padding - __boot_page)
547 b __boot_page
548#endif /* SMP */
549
550/************************************************************************/
551/* locore subroutines */
552/************************************************************************/
553
554ivor_setup:
555 /* Set base address of interrupt handler routines */
556 lis %r3, interrupt_vector_base@h
557 mtspr SPR_IVPR, %r3
558
559 /* Assign interrupt handler routines offsets */
560 li %r3, int_critical_input@l
561 mtspr SPR_IVOR0, %r3
562 li %r3, int_machine_check@l
563 mtspr SPR_IVOR1, %r3
564 li %r3, int_data_storage@l
565 mtspr SPR_IVOR2, %r3
566 li %r3, int_instr_storage@l
567 mtspr SPR_IVOR3, %r3
568 li %r3, int_external_input@l
569 mtspr SPR_IVOR4, %r3
570 li %r3, int_alignment@l
571 mtspr SPR_IVOR5, %r3
572 li %r3, int_program@l
573 mtspr SPR_IVOR6, %r3
574 li %r3, int_syscall@l
575 mtspr SPR_IVOR8, %r3
576 li %r3, int_decrementer@l
577 mtspr SPR_IVOR10, %r3
578 li %r3, int_fixed_interval_timer@l
579 mtspr SPR_IVOR11, %r3
580 li %r3, int_watchdog@l
581 mtspr SPR_IVOR12, %r3
582 li %r3, int_data_tlb_error@l
583 mtspr SPR_IVOR13, %r3
584 li %r3, int_inst_tlb_error@l
585 mtspr SPR_IVOR14, %r3
586 li %r3, int_debug@l
587 mtspr SPR_IVOR15, %r3
588 blr
589
590/*
591 * void tid_flush(tlbtid_t tid);
592 *
593 * Invalidate all TLB0 entries which match the given TID. Note this is
594 * dedicated for cases when invalidation(s) should NOT be propagated to other
595 * CPUs.
596 *
597 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
598 * correctly (by tlb0_get_tlbconf()).
599 *
600 */
601ENTRY(tid_flush)
602 cmpwi %r3, TID_KERNEL
603 beq tid_flush_end /* don't evict kernel translations */
604
605 /* Number of TLB0 ways */
606 lis %r4, tlb0_ways@h
607 ori %r4, %r4, tlb0_ways@l
608 lwz %r4, 0(%r4)
609
610 /* Number of entries / way */
611 lis %r5, tlb0_entries_per_way@h
612 ori %r5, %r5, tlb0_entries_per_way@l
613 lwz %r5, 0(%r5)
614
615 /* Disable interrupts */
616 mfmsr %r10
617 wrteei 0
618
619 li %r6, 0 /* ways counter */
620loop_ways:
621 li %r7, 0 /* entries [per way] counter */
622loop_entries:
623 /* Select TLB0 and ESEL (way) */
624 lis %r8, MAS0_TLBSEL0@h
625 rlwimi %r8, %r6, 16, 14, 15
626 mtspr SPR_MAS0, %r8
627 isync
628
629 /* Select EPN (entry within the way) */
630 rlwinm %r8, %r7, 12, 13, 19
631 mtspr SPR_MAS2, %r8
632 isync
633 tlbre
634
635 /* Check if valid entry */
636 mfspr %r8, SPR_MAS1
637 andis. %r9, %r8, MAS1_VALID@h
638 beq next_entry /* invalid entry */
639
640 /* Check if this is our TID */
641 rlwinm %r9, %r8, 16, 24, 31
642
643 cmplw %r9, %r3
644 bne next_entry /* not our TID */
645
646 /* Clear VALID bit */
647 rlwinm %r8, %r8, 0, 1, 31
648 mtspr SPR_MAS1, %r8
649 isync
650 tlbwe
651 isync
652 msync
653
654next_entry:
655 addi %r7, %r7, 1
656 cmpw %r7, %r5
657 bne loop_entries
658
659 /* Next way */
660 addi %r6, %r6, 1
661 cmpw %r6, %r4
662 bne loop_ways
663
664 /* Restore MSR (possibly re-enable interrupts) */
665 mtmsr %r10
666 isync
667
668tid_flush_end:
669 blr
670
671/*
672 * Cache disable/enable/inval sequences according
673 * to section 2.16 of E500CORE RM.
674 */
675ENTRY(dcache_inval)
676 /* Invalidate d-cache */
677 mfspr %r3, SPR_L1CSR0
678 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
679 msync
680 isync
681 mtspr SPR_L1CSR0, %r3
682 isync
6831: mfspr %r3, SPR_L1CSR0
684 andi. %r3, %r3, L1CSR0_DCFI
685 bne 1b
686 blr
687
688ENTRY(dcache_disable)
689 /* Disable d-cache */
690 mfspr %r3, SPR_L1CSR0
691 li %r4, L1CSR0_DCE@l
692 not %r4, %r4
693 and %r3, %r3, %r4
694 msync
695 isync
696 mtspr SPR_L1CSR0, %r3
697 isync
698 blr
699
700ENTRY(dcache_enable)
701 /* Enable d-cache */
702 mfspr %r3, SPR_L1CSR0
703 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
704 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
705 msync
706 isync
707 mtspr SPR_L1CSR0, %r3
708 isync
709 blr
710
711ENTRY(icache_inval)
712 /* Invalidate i-cache */
713 mfspr %r3, SPR_L1CSR1
714 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
715 isync
716 mtspr SPR_L1CSR1, %r3
717 isync
7181: mfspr %r3, SPR_L1CSR1
719 andi. %r3, %r3, L1CSR1_ICFI
720 bne 1b
721 blr
722
723ENTRY(icache_disable)
724 /* Disable i-cache */
725 mfspr %r3, SPR_L1CSR1
726 li %r4, L1CSR1_ICE@l
727 not %r4, %r4
728 and %r3, %r3, %r4
729 isync
730 mtspr SPR_L1CSR1, %r3
731 isync
732 blr
733
734ENTRY(icache_enable)
735 /* Enable i-cache */
736 mfspr %r3, SPR_L1CSR1
737 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
738 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
739 isync
740 mtspr SPR_L1CSR1, %r3
741 isync
742 blr
743
744/*
745 * int setfault()
746 *
747 * Similar to setjmp to setup for handling faults on accesses to user memory.
748 * Any routine using this may only call bcopy, either the form below,
749 * or the (currently used) C code optimized, so it doesn't use any non-volatile
750 * registers.
751 */
752 .globl setfault
753setfault:
754 mflr %r0
755 mfsprg0 %r4
756 lwz %r4, PC_CURTHREAD(%r4)
757 lwz %r4, TD_PCB(%r4)
758 stw %r3, PCB_ONFAULT(%r4)
759 mfcr %r10
760 mfctr %r11
761 mfxer %r12
762 stw %r0, 0(%r3)
763 stw %r1, 4(%r3)
764 stw %r2, 8(%r3)
765 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
766 li %r3, 0 /* return FALSE */
767 blr
768
769/************************************************************************/
770/* Data section */
771/************************************************************************/
772 .data
773 .align 4
774tmpstack:
775 .space TMPSTACKSZ
776
777/*
778 * Compiled KERNBASE locations
779 */
780 .globl kernbase
781 .set kernbase, KERNBASE
782
783/*
784 * Globals
785 */
786#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
787
788GLOBAL(kernload)
789 .long 0
790GLOBAL(intrnames)
791 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
792GLOBAL(eintrnames)
793 .align 4
794GLOBAL(intrcnt)
795 .space INTRCNT_COUNT * 4 * 2
796GLOBAL(eintrcnt)
797
798#include <powerpc/booke/trap_subr.S>