Deleted Added
full compact
locore.S (235932) locore.S (236141)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 235932 2012-05-24 20:58:40Z marcel $
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 236141 2012-05-27 10:25:20Z raj $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51kernel_text:
52
53/*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
86 * r3-r27 : scratch registers
87 * r28 : temp TLB1 entry
88 * r29 : initial TLB1 entry we started in
89 * r30-r31 : arguments (metadata pointer)
90 */
91
92/*
93 * Keep arguments in r30 & r31 for later use.
94 */
95 mr %r30, %r3
96 mr %r31, %r4
97
98/*
99 * Initial cleanup
100 */
101 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
102 mtmsr %r3
103 isync
104
105 lis %r3, HID0_E500_DEFAULT_SET@h
106 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
107 mtspr SPR_HID0, %r3
108 isync
109 lis %r3, HID1_E500_DEFAULT_SET@h
110 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
111 mtspr SPR_HID1, %r3
112 isync
113
114 /* Invalidate all entries in TLB0 */
115 li %r3, 0
116 bl tlb_inval_all
117
118 cmpwi %r30, 0
119 beq done_mapping
120
121/*
122 * Locate the TLB1 entry that maps this code
123 */
124 bl 1f
1251: mflr %r3
126 bl tlb1_find_current /* the entry found is returned in r29 */
127
128 bl tlb1_inval_all_but_current
129/*
130 * Create temporary mapping in AS=1 and switch to it
131 */
132 bl tlb1_temp_mapping_as1
133
134 mfmsr %r3
135 ori %r3, %r3, (PSL_IS | PSL_DS)
136 bl 2f
1372: mflr %r4
138 addi %r4, %r4, 20
139 mtspr SPR_SRR0, %r4
140 mtspr SPR_SRR1, %r3
141 rfi /* Switch context */
142
143/*
144 * Invalidate initial entry
145 */
146 mr %r3, %r29
147 bl tlb1_inval_entry
148
149/*
150 * Setup final mapping in TLB1[1] and switch to it
151 */
152 /* Final kernel mapping, map in 16 MB of RAM */
153 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
154 li %r4, 0 /* Entry 0 */
155 rlwimi %r3, %r4, 16, 12, 15
156 mtspr SPR_MAS0, %r3
157 isync
158
159 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
160 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
161 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
162 isync
163
164 lis %r3, KERNBASE@h
165 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
166#ifdef SMP
167 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
168#endif
169 mtspr SPR_MAS2, %r3
170 isync
171
172 /* Discover phys load address */
173 bl 3f
1743: mflr %r4 /* Use current address */
175 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
176 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
177 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
178 isync
179 tlbwe
180 isync
181 msync
182
183 /* Switch to the above TLB1[1] mapping */
184 bl 4f
1854: mflr %r4
186 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
187 rlwinm %r3, %r3, 0, 0, 19
188 add %r4, %r4, %r3 /* Convert to kernel virtual address */
189 addi %r4, %r4, 36
190 li %r3, PSL_DE /* Note AS=0 */
191 mtspr SPR_SRR0, %r4
192 mtspr SPR_SRR1, %r3
193 rfi
194
195/*
196 * Invalidate temp mapping
197 */
198 mr %r3, %r28
199 bl tlb1_inval_entry
200
201done_mapping:
202
203/*
204 * Setup a temporary stack
205 */
206 lis %r1, tmpstack@ha
207 addi %r1, %r1, tmpstack@l
208 addi %r1, %r1, (TMPSTACKSZ - 8)
209
210/*
211 * Initialise exception vector offsets
212 */
213 bl ivor_setup
214
215/*
216 * Set up arguments and jump to system initialization code
217 */
218 mr %r3, %r30
219 mr %r4, %r31
220
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51kernel_text:
52
53/*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
86 * r3-r27 : scratch registers
87 * r28 : temp TLB1 entry
88 * r29 : initial TLB1 entry we started in
89 * r30-r31 : arguments (metadata pointer)
90 */
91
92/*
93 * Keep arguments in r30 & r31 for later use.
94 */
95 mr %r30, %r3
96 mr %r31, %r4
97
98/*
99 * Initial cleanup
100 */
101 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
102 mtmsr %r3
103 isync
104
105 lis %r3, HID0_E500_DEFAULT_SET@h
106 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
107 mtspr SPR_HID0, %r3
108 isync
109 lis %r3, HID1_E500_DEFAULT_SET@h
110 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
111 mtspr SPR_HID1, %r3
112 isync
113
114 /* Invalidate all entries in TLB0 */
115 li %r3, 0
116 bl tlb_inval_all
117
118 cmpwi %r30, 0
119 beq done_mapping
120
121/*
122 * Locate the TLB1 entry that maps this code
123 */
124 bl 1f
1251: mflr %r3
126 bl tlb1_find_current /* the entry found is returned in r29 */
127
128 bl tlb1_inval_all_but_current
129/*
130 * Create temporary mapping in AS=1 and switch to it
131 */
132 bl tlb1_temp_mapping_as1
133
134 mfmsr %r3
135 ori %r3, %r3, (PSL_IS | PSL_DS)
136 bl 2f
1372: mflr %r4
138 addi %r4, %r4, 20
139 mtspr SPR_SRR0, %r4
140 mtspr SPR_SRR1, %r3
141 rfi /* Switch context */
142
143/*
144 * Invalidate initial entry
145 */
146 mr %r3, %r29
147 bl tlb1_inval_entry
148
149/*
150 * Setup final mapping in TLB1[1] and switch to it
151 */
152 /* Final kernel mapping, map in 16 MB of RAM */
153 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
154 li %r4, 0 /* Entry 0 */
155 rlwimi %r3, %r4, 16, 12, 15
156 mtspr SPR_MAS0, %r3
157 isync
158
159 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
160 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
161 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
162 isync
163
164 lis %r3, KERNBASE@h
165 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
166#ifdef SMP
167 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
168#endif
169 mtspr SPR_MAS2, %r3
170 isync
171
172 /* Discover phys load address */
173 bl 3f
1743: mflr %r4 /* Use current address */
175 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
176 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
177 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
178 isync
179 tlbwe
180 isync
181 msync
182
183 /* Switch to the above TLB1[1] mapping */
184 bl 4f
1854: mflr %r4
186 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
187 rlwinm %r3, %r3, 0, 0, 19
188 add %r4, %r4, %r3 /* Convert to kernel virtual address */
189 addi %r4, %r4, 36
190 li %r3, PSL_DE /* Note AS=0 */
191 mtspr SPR_SRR0, %r4
192 mtspr SPR_SRR1, %r3
193 rfi
194
195/*
196 * Invalidate temp mapping
197 */
198 mr %r3, %r28
199 bl tlb1_inval_entry
200
201done_mapping:
202
203/*
204 * Setup a temporary stack
205 */
206 lis %r1, tmpstack@ha
207 addi %r1, %r1, tmpstack@l
208 addi %r1, %r1, (TMPSTACKSZ - 8)
209
210/*
211 * Initialise exception vector offsets
212 */
213 bl ivor_setup
214
215/*
216 * Set up arguments and jump to system initialization code
217 */
218 mr %r3, %r30
219 mr %r4, %r31
220
221 /* Prepare e500 core */
221 /* Prepare core */
222 bl booke_init
223
224 /* Switch to thread0.td_kstack now */
225 mr %r1, %r3
226 li %r3, 0
227 stw %r3, 0(%r1)
228
229 /* Machine independet part, does not return */
230 bl mi_startup
231 /* NOT REACHED */
2325: b 5b
233
234
235#ifdef SMP
236/************************************************************************/
237/* AP Boot page */
238/************************************************************************/
239 .text
240 .globl __boot_page
241 .align 12
242__boot_page:
243 bl 1f
244
245 .globl bp_trace
246bp_trace:
247 .long 0
248
249 .globl bp_kernload
250bp_kernload:
251 .long 0
252
253/*
254 * Initial configuration
255 */
2561:
257 mflr %r31 /* r31 hold the address of bp_trace */
258
259 /* Set HIDs */
260 lis %r3, HID0_E500_DEFAULT_SET@h
261 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
262 mtspr SPR_HID0, %r3
263 isync
264 lis %r3, HID1_E500_DEFAULT_SET@h
265 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
266 mtspr SPR_HID1, %r3
267 isync
268
269 /* Enable branch prediction */
270 li %r3, BUCSR_BPEN
271 mtspr SPR_BUCSR, %r3
272 isync
273
274 /* Invalidate all entries in TLB0 */
275 li %r3, 0
276 bl tlb_inval_all
277
278/*
279 * Find TLB1 entry which is translating us now
280 */
281 bl 2f
2822: mflr %r3
283 bl tlb1_find_current /* the entry number found is in r29 */
284
285 bl tlb1_inval_all_but_current
286/*
287 * Create temporary translation in AS=1 and switch to it
288 */
289 bl tlb1_temp_mapping_as1
290
291 mfmsr %r3
292 ori %r3, %r3, (PSL_IS | PSL_DS)
293 bl 3f
2943: mflr %r4
295 addi %r4, %r4, 20
296 mtspr SPR_SRR0, %r4
297 mtspr SPR_SRR1, %r3
298 rfi /* Switch context */
299
300/*
301 * Invalidate initial entry
302 */
303 mr %r3, %r29
304 bl tlb1_inval_entry
305
306/*
307 * Setup final mapping in TLB1[1] and switch to it
308 */
309 /* Final kernel mapping, map in 16 MB of RAM */
310 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
311 li %r4, 0 /* Entry 0 */
312 rlwimi %r3, %r4, 16, 4, 15
313 mtspr SPR_MAS0, %r3
314 isync
315
316 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
317 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
318 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
319 isync
320
321 lis %r3, KERNBASE@h
322 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
323 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
324 mtspr SPR_MAS2, %r3
325 isync
326
327 /* Retrieve kernel load [physical] address from bp_kernload */
328 bl 4f
3294: mflr %r3
330 rlwinm %r3, %r3, 0, 0, 19
331 lis %r4, bp_kernload@h
332 ori %r4, %r4, bp_kernload@l
333 lis %r5, __boot_page@h
334 ori %r5, %r5, __boot_page@l
335 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */
336 lwzx %r3, %r4, %r3
337
338 /* Set RPN and protection */
339 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
340 mtspr SPR_MAS3, %r3
341 isync
342 tlbwe
343 isync
344 msync
345
346 /* Switch to the final mapping */
347 bl 5f
3485: mflr %r3
349 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
350 add %r3, %r3, %r5 /* Make this virtual address */
351 addi %r3, %r3, 32
352 li %r4, 0 /* Note AS=0 */
353 mtspr SPR_SRR0, %r3
354 mtspr SPR_SRR1, %r4
355 rfi
356
357/*
358 * At this point we're running at virtual addresses KERNBASE and beyond so
359 * it's allowed to directly access all locations the kernel was linked
360 * against.
361 */
362
363/*
364 * Invalidate temp mapping
365 */
366 mr %r3, %r28
367 bl tlb1_inval_entry
368
369/*
370 * Setup a temporary stack
371 */
372 lis %r1, tmpstack@ha
373 addi %r1, %r1, tmpstack@l
374 addi %r1, %r1, (TMPSTACKSZ - 8)
375
376/*
377 * Initialise exception vector offsets
378 */
379 bl ivor_setup
380
381 /*
382 * Assign our pcpu instance
383 */
384 lis %r3, ap_pcpu@h
385 ori %r3, %r3, ap_pcpu@l
386 lwz %r3, 0(%r3)
387 mtsprg0 %r3
388
389 bl pmap_bootstrap_ap
390
391 bl cpudep_ap_bootstrap
392 /* Switch to the idle thread's kstack */
393 mr %r1, %r3
394
395 bl machdep_ap_bootstrap
396
397 /* NOT REACHED */
3986: b 6b
399#endif /* SMP */
400
401/*
402 * Invalidate all entries in the given TLB.
403 *
404 * r3 TLBSEL
405 */
406tlb_inval_all:
407 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
408 ori %r3, %r3, 0x4 /* INVALL */
409 tlbivax 0, %r3
410 isync
411 msync
412
413 tlbsync
414 msync
415 blr
416
417/*
418 * expects address to look up in r3, returns entry number in r29
419 *
420 * FIXME: the hidden assumption is we are now running in AS=0, but we should
421 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
422 */
423tlb1_find_current:
424 mfspr %r17, SPR_PID0
425 slwi %r17, %r17, MAS6_SPID0_SHIFT
426 mtspr SPR_MAS6, %r17
427 isync
428 tlbsx 0, %r3
429 mfspr %r17, SPR_MAS0
430 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
431
432 /* Make sure we have IPROT set on the entry */
433 mfspr %r17, SPR_MAS1
434 oris %r17, %r17, MAS1_IPROT@h
435 mtspr SPR_MAS1, %r17
436 isync
437 tlbwe
438 isync
439 msync
440 blr
441
442/*
443 * Invalidates a single entry in TLB1.
444 *
445 * r3 ESEL
446 * r4-r5 scratched
447 */
448tlb1_inval_entry:
449 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
450 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
451 mtspr SPR_MAS0, %r4
452 isync
453 tlbre
454 li %r5, 0 /* MAS1[V] = 0 */
455 mtspr SPR_MAS1, %r5
456 isync
457 tlbwe
458 isync
459 msync
460 blr
461
462/*
463 * r29 current entry number
464 * r28 returned temp entry
465 * r3-r5 scratched
466 */
467tlb1_temp_mapping_as1:
468 /* Read our current translation */
469 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
470 rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */
471 mtspr SPR_MAS0, %r3
472 isync
473 tlbre
474
475 /*
476 * Prepare and write temp entry
477 *
478 * FIXME this is not robust against overflow i.e. when the current
479 * entry is the last in TLB1
480 */
481 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
482 addi %r28, %r29, 1 /* Use next entry. */
483 rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */
484 mtspr SPR_MAS0, %r3
485 isync
486 mfspr %r5, SPR_MAS1
487 li %r4, 1 /* AS=1 */
488 rlwimi %r5, %r4, 12, 19, 19
489 li %r4, 0 /* Global mapping, TID=0 */
490 rlwimi %r5, %r4, 16, 8, 15
491 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
492 mtspr SPR_MAS1, %r5
493 isync
494 tlbwe
495 isync
496 msync
497 blr
498
499/*
500 * Loops over TLB1, invalidates all entries skipping the one which currently
501 * maps this code.
502 *
503 * r29 current entry
504 * r3-r5 scratched
505 */
506tlb1_inval_all_but_current:
507 mr %r6, %r3
508 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
509 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
510 li %r4, 0 /* Start from Entry 0 */
5111: lis %r5, MAS0_TLBSEL1@h
512 rlwimi %r5, %r4, 16, 12, 15
513 mtspr SPR_MAS0, %r5
514 isync
515 tlbre
516 mfspr %r5, SPR_MAS1
517 cmpw %r4, %r29 /* our current entry? */
518 beq 2f
519 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
520 mtspr SPR_MAS1, %r5
521 isync
522 tlbwe
523 isync
524 msync
5252: addi %r4, %r4, 1
526 cmpw %r4, %r3 /* Check if this is the last entry */
527 bne 1b
528 blr
529
530#ifdef SMP
531__boot_page_padding:
532 /*
533 * Boot page needs to be exactly 4K, with the last word of this page
534 * acting as the reset vector, so we need to stuff the remainder.
535 * Upon release from holdoff CPU fetches the last word of the boot
536 * page.
537 */
538 .space 4092 - (__boot_page_padding - __boot_page)
539 b __boot_page
540#endif /* SMP */
541
542/************************************************************************/
543/* locore subroutines */
544/************************************************************************/
545
546ivor_setup:
547 /* Set base address of interrupt handler routines */
548 lis %r3, interrupt_vector_base@h
549 mtspr SPR_IVPR, %r3
550
551 /* Assign interrupt handler routines offsets */
552 li %r3, int_critical_input@l
553 mtspr SPR_IVOR0, %r3
554 li %r3, int_machine_check@l
555 mtspr SPR_IVOR1, %r3
556 li %r3, int_data_storage@l
557 mtspr SPR_IVOR2, %r3
558 li %r3, int_instr_storage@l
559 mtspr SPR_IVOR3, %r3
560 li %r3, int_external_input@l
561 mtspr SPR_IVOR4, %r3
562 li %r3, int_alignment@l
563 mtspr SPR_IVOR5, %r3
564 li %r3, int_program@l
565 mtspr SPR_IVOR6, %r3
566 li %r3, int_syscall@l
567 mtspr SPR_IVOR8, %r3
568 li %r3, int_decrementer@l
569 mtspr SPR_IVOR10, %r3
570 li %r3, int_fixed_interval_timer@l
571 mtspr SPR_IVOR11, %r3
572 li %r3, int_watchdog@l
573 mtspr SPR_IVOR12, %r3
574 li %r3, int_data_tlb_error@l
575 mtspr SPR_IVOR13, %r3
576 li %r3, int_inst_tlb_error@l
577 mtspr SPR_IVOR14, %r3
578 li %r3, int_debug@l
579 mtspr SPR_IVOR15, %r3
580 blr
581
582/*
583 * void tid_flush(tlbtid_t tid);
584 *
585 * Invalidate all TLB0 entries which match the given TID. Note this is
586 * dedicated for cases when invalidation(s) should NOT be propagated to other
587 * CPUs.
588 *
589 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
590 * correctly (by tlb0_get_tlbconf()).
591 *
592 */
593ENTRY(tid_flush)
594 cmpwi %r3, TID_KERNEL
595 beq tid_flush_end /* don't evict kernel translations */
596
597 /* Number of TLB0 ways */
598 lis %r4, tlb0_ways@h
599 ori %r4, %r4, tlb0_ways@l
600 lwz %r4, 0(%r4)
601
602 /* Number of entries / way */
603 lis %r5, tlb0_entries_per_way@h
604 ori %r5, %r5, tlb0_entries_per_way@l
605 lwz %r5, 0(%r5)
606
607 /* Disable interrupts */
608 mfmsr %r10
609 wrteei 0
610
611 li %r6, 0 /* ways counter */
612loop_ways:
613 li %r7, 0 /* entries [per way] counter */
614loop_entries:
615 /* Select TLB0 and ESEL (way) */
616 lis %r8, MAS0_TLBSEL0@h
617 rlwimi %r8, %r6, 16, 14, 15
618 mtspr SPR_MAS0, %r8
619 isync
620
621 /* Select EPN (entry within the way) */
622 rlwinm %r8, %r7, 12, 13, 19
623 mtspr SPR_MAS2, %r8
624 isync
625 tlbre
626
627 /* Check if valid entry */
628 mfspr %r8, SPR_MAS1
629 andis. %r9, %r8, MAS1_VALID@h
630 beq next_entry /* invalid entry */
631
632 /* Check if this is our TID */
633 rlwinm %r9, %r8, 16, 24, 31
634
635 cmplw %r9, %r3
636 bne next_entry /* not our TID */
637
638 /* Clear VALID bit */
639 rlwinm %r8, %r8, 0, 1, 31
640 mtspr SPR_MAS1, %r8
641 isync
642 tlbwe
643 isync
644 msync
645
646next_entry:
647 addi %r7, %r7, 1
648 cmpw %r7, %r5
649 bne loop_entries
650
651 /* Next way */
652 addi %r6, %r6, 1
653 cmpw %r6, %r4
654 bne loop_ways
655
656 /* Restore MSR (possibly re-enable interrupts) */
657 mtmsr %r10
658 isync
659
660tid_flush_end:
661 blr
662
663/*
664 * Cache disable/enable/inval sequences according
665 * to section 2.16 of E500CORE RM.
666 */
667ENTRY(dcache_inval)
668 /* Invalidate d-cache */
669 mfspr %r3, SPR_L1CSR0
670 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
671 msync
672 isync
673 mtspr SPR_L1CSR0, %r3
674 isync
6751: mfspr %r3, SPR_L1CSR0
676 andi. %r3, %r3, L1CSR0_DCFI
677 bne 1b
678 blr
679
680ENTRY(dcache_disable)
681 /* Disable d-cache */
682 mfspr %r3, SPR_L1CSR0
683 li %r4, L1CSR0_DCE@l
684 not %r4, %r4
685 and %r3, %r3, %r4
686 msync
687 isync
688 mtspr SPR_L1CSR0, %r3
689 isync
690 blr
691
692ENTRY(dcache_enable)
693 /* Enable d-cache */
694 mfspr %r3, SPR_L1CSR0
695 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
696 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
697 msync
698 isync
699 mtspr SPR_L1CSR0, %r3
700 isync
701 blr
702
703ENTRY(icache_inval)
704 /* Invalidate i-cache */
705 mfspr %r3, SPR_L1CSR1
706 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
707 isync
708 mtspr SPR_L1CSR1, %r3
709 isync
7101: mfspr %r3, SPR_L1CSR1
711 andi. %r3, %r3, L1CSR1_ICFI
712 bne 1b
713 blr
714
715ENTRY(icache_disable)
716 /* Disable i-cache */
717 mfspr %r3, SPR_L1CSR1
718 li %r4, L1CSR1_ICE@l
719 not %r4, %r4
720 and %r3, %r3, %r4
721 isync
722 mtspr SPR_L1CSR1, %r3
723 isync
724 blr
725
726ENTRY(icache_enable)
727 /* Enable i-cache */
728 mfspr %r3, SPR_L1CSR1
729 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
730 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
731 isync
732 mtspr SPR_L1CSR1, %r3
733 isync
734 blr
735
736/*
737 * int setfault()
738 *
739 * Similar to setjmp to setup for handling faults on accesses to user memory.
740 * Any routine using this may only call bcopy, either the form below,
741 * or the (currently used) C code optimized, so it doesn't use any non-volatile
742 * registers.
743 */
744 .globl setfault
745setfault:
746 mflr %r0
747 mfsprg0 %r4
748 lwz %r4, PC_CURTHREAD(%r4)
749 lwz %r4, TD_PCB(%r4)
750 stw %r3, PCB_ONFAULT(%r4)
751 mfcr %r10
752 mfctr %r11
753 mfxer %r12
754 stw %r0, 0(%r3)
755 stw %r1, 4(%r3)
756 stw %r2, 8(%r3)
757 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
758 li %r3, 0 /* return FALSE */
759 blr
760
761/************************************************************************/
762/* Data section */
763/************************************************************************/
764 .data
765 .align 4
766tmpstack:
767 .space TMPSTACKSZ
768
769/*
770 * Compiled KERNBASE locations
771 */
772 .globl kernbase
773 .set kernbase, KERNBASE
774
775/*
776 * Globals
777 */
778#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
779
780GLOBAL(intrnames)
781 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
782GLOBAL(sintrnames)
783 .long INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
784
785 .align 4
786GLOBAL(intrcnt)
787 .space INTRCNT_COUNT * 4 * 2
788GLOBAL(sintrcnt)
789 .long INTRCNT_COUNT * 4 * 2
790
791#include <powerpc/booke/trap_subr.S>
222 bl booke_init
223
224 /* Switch to thread0.td_kstack now */
225 mr %r1, %r3
226 li %r3, 0
227 stw %r3, 0(%r1)
228
229 /* Machine independet part, does not return */
230 bl mi_startup
231 /* NOT REACHED */
2325: b 5b
233
234
235#ifdef SMP
236/************************************************************************/
237/* AP Boot page */
238/************************************************************************/
239 .text
240 .globl __boot_page
241 .align 12
242__boot_page:
243 bl 1f
244
245 .globl bp_trace
246bp_trace:
247 .long 0
248
249 .globl bp_kernload
250bp_kernload:
251 .long 0
252
253/*
254 * Initial configuration
255 */
2561:
257 mflr %r31 /* r31 hold the address of bp_trace */
258
259 /* Set HIDs */
260 lis %r3, HID0_E500_DEFAULT_SET@h
261 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
262 mtspr SPR_HID0, %r3
263 isync
264 lis %r3, HID1_E500_DEFAULT_SET@h
265 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
266 mtspr SPR_HID1, %r3
267 isync
268
269 /* Enable branch prediction */
270 li %r3, BUCSR_BPEN
271 mtspr SPR_BUCSR, %r3
272 isync
273
274 /* Invalidate all entries in TLB0 */
275 li %r3, 0
276 bl tlb_inval_all
277
278/*
279 * Find TLB1 entry which is translating us now
280 */
281 bl 2f
2822: mflr %r3
283 bl tlb1_find_current /* the entry number found is in r29 */
284
285 bl tlb1_inval_all_but_current
286/*
287 * Create temporary translation in AS=1 and switch to it
288 */
289 bl tlb1_temp_mapping_as1
290
291 mfmsr %r3
292 ori %r3, %r3, (PSL_IS | PSL_DS)
293 bl 3f
2943: mflr %r4
295 addi %r4, %r4, 20
296 mtspr SPR_SRR0, %r4
297 mtspr SPR_SRR1, %r3
298 rfi /* Switch context */
299
300/*
301 * Invalidate initial entry
302 */
303 mr %r3, %r29
304 bl tlb1_inval_entry
305
306/*
307 * Setup final mapping in TLB1[1] and switch to it
308 */
309 /* Final kernel mapping, map in 16 MB of RAM */
310 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
311 li %r4, 0 /* Entry 0 */
312 rlwimi %r3, %r4, 16, 4, 15
313 mtspr SPR_MAS0, %r3
314 isync
315
316 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
317 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
318 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
319 isync
320
321 lis %r3, KERNBASE@h
322 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
323 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
324 mtspr SPR_MAS2, %r3
325 isync
326
327 /* Retrieve kernel load [physical] address from bp_kernload */
328 bl 4f
3294: mflr %r3
330 rlwinm %r3, %r3, 0, 0, 19
331 lis %r4, bp_kernload@h
332 ori %r4, %r4, bp_kernload@l
333 lis %r5, __boot_page@h
334 ori %r5, %r5, __boot_page@l
335 sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */
336 lwzx %r3, %r4, %r3
337
338 /* Set RPN and protection */
339 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
340 mtspr SPR_MAS3, %r3
341 isync
342 tlbwe
343 isync
344 msync
345
346 /* Switch to the final mapping */
347 bl 5f
3485: mflr %r3
349 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
350 add %r3, %r3, %r5 /* Make this virtual address */
351 addi %r3, %r3, 32
352 li %r4, 0 /* Note AS=0 */
353 mtspr SPR_SRR0, %r3
354 mtspr SPR_SRR1, %r4
355 rfi
356
357/*
358 * At this point we're running at virtual addresses KERNBASE and beyond so
359 * it's allowed to directly access all locations the kernel was linked
360 * against.
361 */
362
363/*
364 * Invalidate temp mapping
365 */
366 mr %r3, %r28
367 bl tlb1_inval_entry
368
369/*
370 * Setup a temporary stack
371 */
372 lis %r1, tmpstack@ha
373 addi %r1, %r1, tmpstack@l
374 addi %r1, %r1, (TMPSTACKSZ - 8)
375
376/*
377 * Initialise exception vector offsets
378 */
379 bl ivor_setup
380
381 /*
382 * Assign our pcpu instance
383 */
384 lis %r3, ap_pcpu@h
385 ori %r3, %r3, ap_pcpu@l
386 lwz %r3, 0(%r3)
387 mtsprg0 %r3
388
389 bl pmap_bootstrap_ap
390
391 bl cpudep_ap_bootstrap
392 /* Switch to the idle thread's kstack */
393 mr %r1, %r3
394
395 bl machdep_ap_bootstrap
396
397 /* NOT REACHED */
3986: b 6b
399#endif /* SMP */
400
401/*
402 * Invalidate all entries in the given TLB.
403 *
404 * r3 TLBSEL
405 */
406tlb_inval_all:
407 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
408 ori %r3, %r3, 0x4 /* INVALL */
409 tlbivax 0, %r3
410 isync
411 msync
412
413 tlbsync
414 msync
415 blr
416
417/*
418 * expects address to look up in r3, returns entry number in r29
419 *
420 * FIXME: the hidden assumption is we are now running in AS=0, but we should
421 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
422 */
423tlb1_find_current:
424 mfspr %r17, SPR_PID0
425 slwi %r17, %r17, MAS6_SPID0_SHIFT
426 mtspr SPR_MAS6, %r17
427 isync
428 tlbsx 0, %r3
429 mfspr %r17, SPR_MAS0
430 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
431
432 /* Make sure we have IPROT set on the entry */
433 mfspr %r17, SPR_MAS1
434 oris %r17, %r17, MAS1_IPROT@h
435 mtspr SPR_MAS1, %r17
436 isync
437 tlbwe
438 isync
439 msync
440 blr
441
442/*
443 * Invalidates a single entry in TLB1.
444 *
445 * r3 ESEL
446 * r4-r5 scratched
447 */
448tlb1_inval_entry:
449 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
450 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
451 mtspr SPR_MAS0, %r4
452 isync
453 tlbre
454 li %r5, 0 /* MAS1[V] = 0 */
455 mtspr SPR_MAS1, %r5
456 isync
457 tlbwe
458 isync
459 msync
460 blr
461
462/*
463 * r29 current entry number
464 * r28 returned temp entry
465 * r3-r5 scratched
466 */
467tlb1_temp_mapping_as1:
468 /* Read our current translation */
469 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
470 rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */
471 mtspr SPR_MAS0, %r3
472 isync
473 tlbre
474
475 /*
476 * Prepare and write temp entry
477 *
478 * FIXME this is not robust against overflow i.e. when the current
479 * entry is the last in TLB1
480 */
481 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
482 addi %r28, %r29, 1 /* Use next entry. */
483 rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */
484 mtspr SPR_MAS0, %r3
485 isync
486 mfspr %r5, SPR_MAS1
487 li %r4, 1 /* AS=1 */
488 rlwimi %r5, %r4, 12, 19, 19
489 li %r4, 0 /* Global mapping, TID=0 */
490 rlwimi %r5, %r4, 16, 8, 15
491 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
492 mtspr SPR_MAS1, %r5
493 isync
494 tlbwe
495 isync
496 msync
497 blr
498
499/*
500 * Loops over TLB1, invalidates all entries skipping the one which currently
501 * maps this code.
502 *
503 * r29 current entry
504 * r3-r5 scratched
505 */
506tlb1_inval_all_but_current:
507 mr %r6, %r3
508 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
509 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
510 li %r4, 0 /* Start from Entry 0 */
5111: lis %r5, MAS0_TLBSEL1@h
512 rlwimi %r5, %r4, 16, 12, 15
513 mtspr SPR_MAS0, %r5
514 isync
515 tlbre
516 mfspr %r5, SPR_MAS1
517 cmpw %r4, %r29 /* our current entry? */
518 beq 2f
519 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
520 mtspr SPR_MAS1, %r5
521 isync
522 tlbwe
523 isync
524 msync
5252: addi %r4, %r4, 1
526 cmpw %r4, %r3 /* Check if this is the last entry */
527 bne 1b
528 blr
529
530#ifdef SMP
531__boot_page_padding:
532 /*
533 * Boot page needs to be exactly 4K, with the last word of this page
534 * acting as the reset vector, so we need to stuff the remainder.
535 * Upon release from holdoff CPU fetches the last word of the boot
536 * page.
537 */
538 .space 4092 - (__boot_page_padding - __boot_page)
539 b __boot_page
540#endif /* SMP */
541
542/************************************************************************/
543/* locore subroutines */
544/************************************************************************/
545
546ivor_setup:
547 /* Set base address of interrupt handler routines */
548 lis %r3, interrupt_vector_base@h
549 mtspr SPR_IVPR, %r3
550
551 /* Assign interrupt handler routines offsets */
552 li %r3, int_critical_input@l
553 mtspr SPR_IVOR0, %r3
554 li %r3, int_machine_check@l
555 mtspr SPR_IVOR1, %r3
556 li %r3, int_data_storage@l
557 mtspr SPR_IVOR2, %r3
558 li %r3, int_instr_storage@l
559 mtspr SPR_IVOR3, %r3
560 li %r3, int_external_input@l
561 mtspr SPR_IVOR4, %r3
562 li %r3, int_alignment@l
563 mtspr SPR_IVOR5, %r3
564 li %r3, int_program@l
565 mtspr SPR_IVOR6, %r3
566 li %r3, int_syscall@l
567 mtspr SPR_IVOR8, %r3
568 li %r3, int_decrementer@l
569 mtspr SPR_IVOR10, %r3
570 li %r3, int_fixed_interval_timer@l
571 mtspr SPR_IVOR11, %r3
572 li %r3, int_watchdog@l
573 mtspr SPR_IVOR12, %r3
574 li %r3, int_data_tlb_error@l
575 mtspr SPR_IVOR13, %r3
576 li %r3, int_inst_tlb_error@l
577 mtspr SPR_IVOR14, %r3
578 li %r3, int_debug@l
579 mtspr SPR_IVOR15, %r3
580 blr
581
582/*
583 * void tid_flush(tlbtid_t tid);
584 *
585 * Invalidate all TLB0 entries which match the given TID. Note this is
586 * dedicated for cases when invalidation(s) should NOT be propagated to other
587 * CPUs.
588 *
589 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
590 * correctly (by tlb0_get_tlbconf()).
591 *
592 */
593ENTRY(tid_flush)
594 cmpwi %r3, TID_KERNEL
595 beq tid_flush_end /* don't evict kernel translations */
596
597 /* Number of TLB0 ways */
598 lis %r4, tlb0_ways@h
599 ori %r4, %r4, tlb0_ways@l
600 lwz %r4, 0(%r4)
601
602 /* Number of entries / way */
603 lis %r5, tlb0_entries_per_way@h
604 ori %r5, %r5, tlb0_entries_per_way@l
605 lwz %r5, 0(%r5)
606
607 /* Disable interrupts */
608 mfmsr %r10
609 wrteei 0
610
611 li %r6, 0 /* ways counter */
612loop_ways:
613 li %r7, 0 /* entries [per way] counter */
614loop_entries:
615 /* Select TLB0 and ESEL (way) */
616 lis %r8, MAS0_TLBSEL0@h
617 rlwimi %r8, %r6, 16, 14, 15
618 mtspr SPR_MAS0, %r8
619 isync
620
621 /* Select EPN (entry within the way) */
622 rlwinm %r8, %r7, 12, 13, 19
623 mtspr SPR_MAS2, %r8
624 isync
625 tlbre
626
627 /* Check if valid entry */
628 mfspr %r8, SPR_MAS1
629 andis. %r9, %r8, MAS1_VALID@h
630 beq next_entry /* invalid entry */
631
632 /* Check if this is our TID */
633 rlwinm %r9, %r8, 16, 24, 31
634
635 cmplw %r9, %r3
636 bne next_entry /* not our TID */
637
638 /* Clear VALID bit */
639 rlwinm %r8, %r8, 0, 1, 31
640 mtspr SPR_MAS1, %r8
641 isync
642 tlbwe
643 isync
644 msync
645
646next_entry:
647 addi %r7, %r7, 1
648 cmpw %r7, %r5
649 bne loop_entries
650
651 /* Next way */
652 addi %r6, %r6, 1
653 cmpw %r6, %r4
654 bne loop_ways
655
656 /* Restore MSR (possibly re-enable interrupts) */
657 mtmsr %r10
658 isync
659
660tid_flush_end:
661 blr
662
663/*
664 * Cache disable/enable/inval sequences according
665 * to section 2.16 of E500CORE RM.
666 */
667ENTRY(dcache_inval)
668 /* Invalidate d-cache */
669 mfspr %r3, SPR_L1CSR0
670 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
671 msync
672 isync
673 mtspr SPR_L1CSR0, %r3
674 isync
6751: mfspr %r3, SPR_L1CSR0
676 andi. %r3, %r3, L1CSR0_DCFI
677 bne 1b
678 blr
679
680ENTRY(dcache_disable)
681 /* Disable d-cache */
682 mfspr %r3, SPR_L1CSR0
683 li %r4, L1CSR0_DCE@l
684 not %r4, %r4
685 and %r3, %r3, %r4
686 msync
687 isync
688 mtspr SPR_L1CSR0, %r3
689 isync
690 blr
691
692ENTRY(dcache_enable)
693 /* Enable d-cache */
694 mfspr %r3, SPR_L1CSR0
695 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
696 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
697 msync
698 isync
699 mtspr SPR_L1CSR0, %r3
700 isync
701 blr
702
703ENTRY(icache_inval)
704 /* Invalidate i-cache */
705 mfspr %r3, SPR_L1CSR1
706 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
707 isync
708 mtspr SPR_L1CSR1, %r3
709 isync
7101: mfspr %r3, SPR_L1CSR1
711 andi. %r3, %r3, L1CSR1_ICFI
712 bne 1b
713 blr
714
715ENTRY(icache_disable)
716 /* Disable i-cache */
717 mfspr %r3, SPR_L1CSR1
718 li %r4, L1CSR1_ICE@l
719 not %r4, %r4
720 and %r3, %r3, %r4
721 isync
722 mtspr SPR_L1CSR1, %r3
723 isync
724 blr
725
726ENTRY(icache_enable)
727 /* Enable i-cache */
728 mfspr %r3, SPR_L1CSR1
729 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
730 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
731 isync
732 mtspr SPR_L1CSR1, %r3
733 isync
734 blr
735
736/*
737 * int setfault()
738 *
739 * Similar to setjmp to setup for handling faults on accesses to user memory.
740 * Any routine using this may only call bcopy, either the form below,
741 * or the (currently used) C code optimized, so it doesn't use any non-volatile
742 * registers.
743 */
744 .globl setfault
745setfault:
746 mflr %r0
747 mfsprg0 %r4
748 lwz %r4, PC_CURTHREAD(%r4)
749 lwz %r4, TD_PCB(%r4)
750 stw %r3, PCB_ONFAULT(%r4)
751 mfcr %r10
752 mfctr %r11
753 mfxer %r12
754 stw %r0, 0(%r3)
755 stw %r1, 4(%r3)
756 stw %r2, 8(%r3)
757 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
758 li %r3, 0 /* return FALSE */
759 blr
760
761/************************************************************************/
762/* Data section */
763/************************************************************************/
764 .data
765 .align 4
766tmpstack:
767 .space TMPSTACKSZ
768
769/*
770 * Compiled KERNBASE locations
771 */
772 .globl kernbase
773 .set kernbase, KERNBASE
774
775/*
776 * Globals
777 */
778#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
779
780GLOBAL(intrnames)
781 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
782GLOBAL(sintrnames)
783 .long INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
784
785 .align 4
786GLOBAL(intrcnt)
787 .space INTRCNT_COUNT * 4 * 2
788GLOBAL(sintrcnt)
789 .long INTRCNT_COUNT * 4 * 2
790
791#include <powerpc/booke/trap_subr.S>