Deleted Added
full compact
locore.S (292900) locore.S (292903)
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 292900 2015-12-30 02:23:14Z jhibbits $
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 292903 2015-12-30 03:43:25Z jhibbits $
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ 16384
43
44 .text
45 .globl btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52 .globl kernel_text
53kernel_text:
54
55/*
56 * Startup entry. Note, this must be the first thing in the text segment!
57 */
58 .text
59 .globl __start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 * - System memory starts from physical address 0
65 * - It's mapped by a single TLB1 entry
66 * - TLB1 mapping is 1:1 pa to va
67 * - Kernel is loaded at 64MB boundary
68 * - All PID registers are set to the same value
69 * - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 * r1 : stack pointer
73 * r3 : metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 * - Find TLB1 entry we started in
77 * - Make sure it's protected, invalidate other entries
78 * - Create temp entry in the second AS (make sure it's not TLB[1])
79 * - Switch to temp mapping
80 * - Map 64MB of RAM in TLB1[1]
81 * - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 * - Switch to to TLB1[1] mapping
83 * - Invalidate temp mapping
84 *
85 * locore registers use:
86 * r1 : stack pointer
87 * r2 : trace pointer (AP only, for early diagnostics)
88 * r3-r27 : scratch registers
89 * r28 : temp TLB1 entry
90 * r29 : initial TLB1 entry we started in
91 * r30-r31 : arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97 mr %r30, %r3
98 mr %r31, %r4
99
100/*
101 * Initial cleanup
102 */
103 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
104 mtmsr %r3
105 isync
106
107 mfpvr %r3
108 rlwinm %r3, %r3, 16, 16, 31
109
110 lis %r4, HID0_E500_DEFAULT_SET@h
111 ori %r4, %r4, HID0_E500_DEFAULT_SET@l
112
113 /* Check for e500mc and e5500 */
114 cmpli 0, 0, %r3, FSL_E500mc
115 bne 2f
116
117 lis %r4, HID0_E500MC_DEFAULT_SET@h
118 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
119 b 3f
1202:
121 cmpli 0, 0, %r3, FSL_E5500
122 bne 3f
123
124 lis %r4, HID0_E5500_DEFAULT_SET@h
125 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
126
1273:
128 mtspr SPR_HID0, %r4
129 isync
130
131/*
132 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
133 * this core.
134 */
135 cmpli 0, 0, %r3, FSL_E500mc
136 beq 1f
137 cmpli 0, 0, %r3, FSL_E5500
138 beq 1f
139
140 lis %r3, HID1_E500_DEFAULT_SET@h
141 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
142 mtspr SPR_HID1, %r3
143 isync
1441:
145 /* Invalidate all entries in TLB0 */
146 li %r3, 0
147 bl tlb_inval_all
148
149 cmpwi %r30, 0
150 beq done_mapping
151
152/*
153 * Locate the TLB1 entry that maps this code
154 */
155 bl 1f
1561: mflr %r3
157 bl tlb1_find_current /* the entry found is returned in r29 */
158
159 bl tlb1_inval_all_but_current
160
161/*
162 * Create temporary mapping in AS=1 and switch to it
163 */
164 addi %r3, %r29, 1
165 bl tlb1_temp_mapping_as1
166
167 mfmsr %r3
168 ori %r3, %r3, (PSL_IS | PSL_DS)
169 bl 2f
1702: mflr %r4
171 addi %r4, %r4, 20
172 mtspr SPR_SRR0, %r4
173 mtspr SPR_SRR1, %r3
174 rfi /* Switch context */
175
176/*
177 * Invalidate initial entry
178 */
179 mr %r3, %r29
180 bl tlb1_inval_entry
181
182/*
183 * Setup final mapping in TLB1[1] and switch to it
184 */
185 /* Final kernel mapping, map in 64 MB of RAM */
186 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
187 li %r4, 0 /* Entry 0 */
188 rlwimi %r3, %r4, 16, 10, 15
189 mtspr SPR_MAS0, %r3
190 isync
191
192 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
193 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
194 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
195 isync
196
197 lis %r3, KERNBASE@h
198 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
199#ifdef SMP
200 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
201#endif
202 mtspr SPR_MAS2, %r3
203 isync
204
205 /* Discover phys load address */
206 bl 3f
2073: mflr %r4 /* Use current address */
208 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */
209 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
210 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
211 isync
212 bl zero_mas7
213 bl zero_mas8
214 tlbwe
215 isync
216 msync
217
218 /* Switch to the above TLB1[1] mapping */
219 bl 4f
2204: mflr %r4
221 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
222 rlwinm %r3, %r3, 0, 0, 19
223 add %r4, %r4, %r3 /* Convert to kernel virtual address */
224 addi %r4, %r4, 36
225 li %r3, PSL_DE /* Note AS=0 */
226 mtspr SPR_SRR0, %r4
227 mtspr SPR_SRR1, %r3
228 rfi
229
230/*
231 * Invalidate temp mapping
232 */
233 mr %r3, %r28
234 bl tlb1_inval_entry
235
236done_mapping:
237
238/*
239 * Setup a temporary stack
240 */
241 bl 1f
242 .long tmpstack-.
2431: mflr %r1
244 lwz %r2,0(%r1)
245 add %r1,%r1,%r2
246 addi %r1, %r1, (TMPSTACKSZ - 16)
247
248/*
249 * Relocate kernel
250 */
251 bl 1f
252 .long _DYNAMIC-.
253 .long _GLOBAL_OFFSET_TABLE_-.
2541: mflr %r5
255 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */
256 add %r3,%r3,%r5
257 lwz %r4,4(%r5) /* GOT pointer */
258 add %r4,%r4,%r5
259 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */
260 subf %r4,%r4,%r3 /* subtract to calculate relocbase */
261 bl elf_reloc_self
262
263/*
264 * Initialise exception vector offsets
265 */
266 bl ivor_setup
267
268/*
269 * Set up arguments and jump to system initialization code
270 */
271 mr %r3, %r30
272 mr %r4, %r31
273
274 /* Prepare core */
275 bl booke_init
276
277 /* Switch to thread0.td_kstack now */
278 mr %r1, %r3
279 li %r3, 0
280 stw %r3, 0(%r1)
281
282 /* Machine independet part, does not return */
283 bl mi_startup
284 /* NOT REACHED */
2855: b 5b
286
287
288#ifdef SMP
289/************************************************************************/
290/* AP Boot page */
291/************************************************************************/
292 .text
293 .globl __boot_page
294 .align 12
295__boot_page:
296 bl 1f
297
298 .globl bp_ntlb1s
299bp_ntlb1s:
300 .long 0
301
302 .globl bp_tlb1
303bp_tlb1:
27 */
28
29#include "assym.s"
30
31#include "opt_hwpmc_hooks.h"
32
33#include <machine/asm.h>
34#include <machine/hid.h>
35#include <machine/param.h>
36#include <machine/spr.h>
37#include <machine/pte.h>
38#include <machine/trap.h>
39#include <machine/vmparam.h>
40#include <machine/tlb.h>
41
42#define TMPSTACKSZ 16384
43
44 .text
45 .globl btext
46btext:
47
48/*
49 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
50 * mark the start of kernel text.
51 */
52 .globl kernel_text
53kernel_text:
54
55/*
56 * Startup entry. Note, this must be the first thing in the text segment!
57 */
58 .text
59 .globl __start
60__start:
61
62/*
63 * Assumptions on the boot loader:
64 * - System memory starts from physical address 0
65 * - It's mapped by a single TLB1 entry
66 * - TLB1 mapping is 1:1 pa to va
67 * - Kernel is loaded at 64MB boundary
68 * - All PID registers are set to the same value
69 * - CPU is running in AS=0
70 *
71 * Registers contents provided by the loader(8):
72 * r1 : stack pointer
73 * r3 : metadata pointer
74 *
75 * We rearrange the TLB1 layout as follows:
76 * - Find TLB1 entry we started in
77 * - Make sure it's protected, invalidate other entries
78 * - Create temp entry in the second AS (make sure it's not TLB[1])
79 * - Switch to temp mapping
80 * - Map 64MB of RAM in TLB1[1]
81 * - Use AS=1, set EPN to KERNBASE and RPN to kernel load address
82 * - Switch to to TLB1[1] mapping
83 * - Invalidate temp mapping
84 *
85 * locore registers use:
86 * r1 : stack pointer
87 * r2 : trace pointer (AP only, for early diagnostics)
88 * r3-r27 : scratch registers
89 * r28 : temp TLB1 entry
90 * r29 : initial TLB1 entry we started in
91 * r30-r31 : arguments (metadata pointer)
92 */
93
94/*
95 * Keep arguments in r30 & r31 for later use.
96 */
97 mr %r30, %r3
98 mr %r31, %r4
99
100/*
101 * Initial cleanup
102 */
103 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
104 mtmsr %r3
105 isync
106
107 mfpvr %r3
108 rlwinm %r3, %r3, 16, 16, 31
109
110 lis %r4, HID0_E500_DEFAULT_SET@h
111 ori %r4, %r4, HID0_E500_DEFAULT_SET@l
112
113 /* Check for e500mc and e5500 */
114 cmpli 0, 0, %r3, FSL_E500mc
115 bne 2f
116
117 lis %r4, HID0_E500MC_DEFAULT_SET@h
118 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
119 b 3f
1202:
121 cmpli 0, 0, %r3, FSL_E5500
122 bne 3f
123
124 lis %r4, HID0_E5500_DEFAULT_SET@h
125 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
126
1273:
128 mtspr SPR_HID0, %r4
129 isync
130
131/*
132 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
133 * this core.
134 */
135 cmpli 0, 0, %r3, FSL_E500mc
136 beq 1f
137 cmpli 0, 0, %r3, FSL_E5500
138 beq 1f
139
140 lis %r3, HID1_E500_DEFAULT_SET@h
141 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
142 mtspr SPR_HID1, %r3
143 isync
1441:
145 /* Invalidate all entries in TLB0 */
146 li %r3, 0
147 bl tlb_inval_all
148
149 cmpwi %r30, 0
150 beq done_mapping
151
152/*
153 * Locate the TLB1 entry that maps this code
154 */
155 bl 1f
1561: mflr %r3
157 bl tlb1_find_current /* the entry found is returned in r29 */
158
159 bl tlb1_inval_all_but_current
160
161/*
162 * Create temporary mapping in AS=1 and switch to it
163 */
164 addi %r3, %r29, 1
165 bl tlb1_temp_mapping_as1
166
167 mfmsr %r3
168 ori %r3, %r3, (PSL_IS | PSL_DS)
169 bl 2f
1702: mflr %r4
171 addi %r4, %r4, 20
172 mtspr SPR_SRR0, %r4
173 mtspr SPR_SRR1, %r3
174 rfi /* Switch context */
175
176/*
177 * Invalidate initial entry
178 */
179 mr %r3, %r29
180 bl tlb1_inval_entry
181
182/*
183 * Setup final mapping in TLB1[1] and switch to it
184 */
185 /* Final kernel mapping, map in 64 MB of RAM */
186 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
187 li %r4, 0 /* Entry 0 */
188 rlwimi %r3, %r4, 16, 10, 15
189 mtspr SPR_MAS0, %r3
190 isync
191
192 li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
193 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
194 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
195 isync
196
197 lis %r3, KERNBASE@h
198 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
199#ifdef SMP
200 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
201#endif
202 mtspr SPR_MAS2, %r3
203 isync
204
205 /* Discover phys load address */
206 bl 3f
2073: mflr %r4 /* Use current address */
208 rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */
209 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
210 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
211 isync
212 bl zero_mas7
213 bl zero_mas8
214 tlbwe
215 isync
216 msync
217
218 /* Switch to the above TLB1[1] mapping */
219 bl 4f
2204: mflr %r4
221 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
222 rlwinm %r3, %r3, 0, 0, 19
223 add %r4, %r4, %r3 /* Convert to kernel virtual address */
224 addi %r4, %r4, 36
225 li %r3, PSL_DE /* Note AS=0 */
226 mtspr SPR_SRR0, %r4
227 mtspr SPR_SRR1, %r3
228 rfi
229
230/*
231 * Invalidate temp mapping
232 */
233 mr %r3, %r28
234 bl tlb1_inval_entry
235
236done_mapping:
237
238/*
239 * Setup a temporary stack
240 */
241 bl 1f
242 .long tmpstack-.
2431: mflr %r1
244 lwz %r2,0(%r1)
245 add %r1,%r1,%r2
246 addi %r1, %r1, (TMPSTACKSZ - 16)
247
248/*
249 * Relocate kernel
250 */
251 bl 1f
252 .long _DYNAMIC-.
253 .long _GLOBAL_OFFSET_TABLE_-.
2541: mflr %r5
255 lwz %r3,0(%r5) /* _DYNAMIC in %r3 */
256 add %r3,%r3,%r5
257 lwz %r4,4(%r5) /* GOT pointer */
258 add %r4,%r4,%r5
259 lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */
260 subf %r4,%r4,%r3 /* subtract to calculate relocbase */
261 bl elf_reloc_self
262
263/*
264 * Initialise exception vector offsets
265 */
266 bl ivor_setup
267
268/*
269 * Set up arguments and jump to system initialization code
270 */
271 mr %r3, %r30
272 mr %r4, %r31
273
274 /* Prepare core */
275 bl booke_init
276
277 /* Switch to thread0.td_kstack now */
278 mr %r1, %r3
279 li %r3, 0
280 stw %r3, 0(%r1)
281
282 /* Machine independet part, does not return */
283 bl mi_startup
284 /* NOT REACHED */
2855: b 5b
286
287
288#ifdef SMP
289/************************************************************************/
290/* AP Boot page */
291/************************************************************************/
292 .text
293 .globl __boot_page
294 .align 12
295__boot_page:
296 bl 1f
297
298 .globl bp_ntlb1s
299bp_ntlb1s:
300 .long 0
301
302 .globl bp_tlb1
303bp_tlb1:
304 .space 4 * 3 * 16
304 .space 4 * 3 * 64
305
306 .globl bp_tlb1_end
307bp_tlb1_end:
308
309/*
310 * Initial configuration
311 */
3121: mflr %r31 /* r31 hold the address of bp_ntlb1s */
313
314 /* Set HIDs */
315 mfpvr %r3
316 rlwinm %r3, %r3, 16, 16, 31
317
318 /* HID0 for E500 is default */
319 lis %r4, HID0_E500_DEFAULT_SET@h
320 ori %r4, %r4, HID0_E500_DEFAULT_SET@l
321
322 cmpli 0, 0, %r3, FSL_E500mc
323 bne 2f
324 lis %r4, HID0_E500MC_DEFAULT_SET@h
325 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
326 b 3f
3272:
328 cmpli 0, 0, %r3, FSL_E5500
329 bne 3f
330 lis %r4, HID0_E5500_DEFAULT_SET@h
331 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
3323:
333 mtspr SPR_HID0, %r4
334 isync
335/*
336 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
337 * this core.
338 */
339 cmpli 0, 0, %r3, FSL_E500mc
340 beq 1f
341 cmpli 0, 0, %r3, FSL_E5500
342 beq 1f
343
344 lis %r3, HID1_E500_DEFAULT_SET@h
345 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
346 mtspr SPR_HID1, %r3
347 isync
3481:
349 /* Enable branch prediction */
350 li %r3, BUCSR_BPEN
351 mtspr SPR_BUCSR, %r3
352 isync
353
354 /* Invalidate all entries in TLB0 */
355 li %r3, 0
356 bl tlb_inval_all
357
358/*
359 * Find TLB1 entry which is translating us now
360 */
361 bl 2f
3622: mflr %r3
363 bl tlb1_find_current /* the entry number found is in r29 */
364
365 bl tlb1_inval_all_but_current
366
367/*
368 * Create temporary translation in AS=1 and switch to it
369 */
370 lwz %r3, 0(%r31)
371 bl tlb1_temp_mapping_as1
372
373 mfmsr %r3
374 ori %r3, %r3, (PSL_IS | PSL_DS)
375 bl 3f
3763: mflr %r4
377 addi %r4, %r4, 20
378 mtspr SPR_SRR0, %r4
379 mtspr SPR_SRR1, %r3
380 rfi /* Switch context */
381
382/*
383 * Invalidate initial entry
384 */
385 mr %r3, %r29
386 bl tlb1_inval_entry
387
388/*
389 * Setup final mapping in TLB1[1] and switch to it
390 */
391 lwz %r6, 0(%r31)
392 addi %r5, %r31, 4
393 li %r4, 0
394
3954: lis %r3, MAS0_TLBSEL1@h
396 rlwimi %r3, %r4, 16, 12, 15
397 mtspr SPR_MAS0, %r3
398 isync
399 lwz %r3, 0(%r5)
400 mtspr SPR_MAS1, %r3
401 isync
402 lwz %r3, 4(%r5)
403 mtspr SPR_MAS2, %r3
404 isync
405 lwz %r3, 8(%r5)
406 mtspr SPR_MAS3, %r3
407 isync
408 tlbwe
409 isync
410 msync
411 addi %r5, %r5, 12
412 addi %r4, %r4, 1
413 cmpw %r4, %r6
414 blt 4b
415
416 /* Switch to the final mapping */
417 bl 5f
418 .long __boot_page-.
4195: mflr %r5
420 lwz %r3,0(%r3)
421 add %r5,%r5,%r3 /* __boot_page in r5 */
422 bl 6f
4236: mflr %r3
424 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
425 add %r3, %r3, %r5 /* Make this virtual address */
426 addi %r3, %r3, 32
427 li %r4, 0 /* Note AS=0 */
428 mtspr SPR_SRR0, %r3
429 mtspr SPR_SRR1, %r4
430 rfi
431
432/*
433 * At this point we're running at virtual addresses KERNBASE and beyond so
434 * it's allowed to directly access all locations the kernel was linked
435 * against.
436 */
437
438/*
439 * Invalidate temp mapping
440 */
441 mr %r3, %r28
442 bl tlb1_inval_entry
443
444/*
445 * Setup a temporary stack
446 */
447 bl 1f
448 .long tmpstack-.
4491: mflr %r1
450 lwz %r2,0(%r1)
451 add %r1,%r1,%r2
452 addi %r1, %r1, (TMPSTACKSZ - 16)
453
454/*
455 * Initialise exception vector offsets
456 */
457 bl ivor_setup
458
459 /*
460 * Assign our pcpu instance
461 */
462 bl 1f
463 .long ap_pcpu-.
4641: mflr %r4
465 lwz %r3, 0(%r4)
466 add %r3, %r3, %r4
467 lwz %r3, 0(%r3)
468 mtsprg0 %r3
469
470 bl pmap_bootstrap_ap
471
472 bl cpudep_ap_bootstrap
473 /* Switch to the idle thread's kstack */
474 mr %r1, %r3
475
476 bl machdep_ap_bootstrap
477
478 /* NOT REACHED */
4796: b 6b
480#endif /* SMP */
481
482/*
483 * Invalidate all entries in the given TLB.
484 *
485 * r3 TLBSEL
486 */
487tlb_inval_all:
488 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */
489 ori %r3, %r3, (1 << 2) /* INVALL */
490 tlbivax 0, %r3
491 isync
492 msync
493
494 tlbsync
495 msync
496 blr
497
498/*
499 * expects address to look up in r3, returns entry number in r29
500 *
501 * FIXME: the hidden assumption is we are now running in AS=0, but we should
502 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
503 */
504tlb1_find_current:
505 mfspr %r17, SPR_PID0
506 slwi %r17, %r17, MAS6_SPID0_SHIFT
507 mtspr SPR_MAS6, %r17
508 isync
509 tlbsx 0, %r3
510 mfspr %r17, SPR_MAS0
511 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
512
513 /* Make sure we have IPROT set on the entry */
514 mfspr %r17, SPR_MAS1
515 oris %r17, %r17, MAS1_IPROT@h
516 mtspr SPR_MAS1, %r17
517 isync
518 tlbwe
519 isync
520 msync
521 blr
522
523/*
524 * Invalidates a single entry in TLB1.
525 *
526 * r3 ESEL
527 * r4-r5 scratched
528 */
529tlb1_inval_entry:
530 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
531 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */
532 mtspr SPR_MAS0, %r4
533 isync
534 tlbre
535 li %r5, 0 /* MAS1[V] = 0 */
536 mtspr SPR_MAS1, %r5
537 isync
538 tlbwe
539 isync
540 msync
541 blr
542
543/*
544 * r3 entry of temp translation
545 * r29 entry of current translation
546 * r28 returns temp entry passed in r3
547 * r4-r5 scratched
548 */
549tlb1_temp_mapping_as1:
550 mr %r28, %r3
551
552 /* Read our current translation */
553 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
554 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */
555 mtspr SPR_MAS0, %r3
556 isync
557 tlbre
558
559 /* Prepare and write temp entry */
560 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
561 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */
562 mtspr SPR_MAS0, %r3
563 isync
564 mfspr %r5, SPR_MAS1
565 li %r4, 1 /* AS=1 */
566 rlwimi %r5, %r4, 12, 19, 19
567 li %r4, 0 /* Global mapping, TID=0 */
568 rlwimi %r5, %r4, 16, 8, 15
569 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
570 mtspr SPR_MAS1, %r5
571 isync
572 mflr %r3
573 bl zero_mas7
574 bl zero_mas8
575 mtlr %r3
576 tlbwe
577 isync
578 msync
579 blr
580
581/*
582 * Loops over TLB1, invalidates all entries skipping the one which currently
583 * maps this code.
584 *
585 * r29 current entry
586 * r3-r5 scratched
587 */
588tlb1_inval_all_but_current:
589 mr %r6, %r3
590 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
591 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
592 li %r4, 0 /* Start from Entry 0 */
5931: lis %r5, MAS0_TLBSEL1@h
594 rlwimi %r5, %r4, 16, 10, 15
595 mtspr SPR_MAS0, %r5
596 isync
597 tlbre
598 mfspr %r5, SPR_MAS1
599 cmpw %r4, %r29 /* our current entry? */
600 beq 2f
601 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
602 mtspr SPR_MAS1, %r5
603 isync
604 tlbwe
605 isync
606 msync
6072: addi %r4, %r4, 1
608 cmpw %r4, %r3 /* Check if this is the last entry */
609 bne 1b
610 blr
611
612/*
613 * MAS7 and MAS8 conditional zeroing.
614 */
615.globl zero_mas7
616zero_mas7:
617 mfpvr %r20
618 rlwinm %r20, %r20, 16, 16, 31
619 cmpli 0, 0, %r20, FSL_E500v1
620 beq 1f
621
622 li %r20, 0
623 mtspr SPR_MAS7, %r20
624 isync
6251:
626 blr
627
628.globl zero_mas8
629zero_mas8:
630 mfpvr %r20
631 rlwinm %r20, %r20, 16, 16, 31
632 cmpli 0, 0, %r20, FSL_E500mc
633 beq 1f
634 cmpli 0, 0, %r20, FSL_E5500
635 beq 1f
636
637 blr
6381:
639 li %r20, 0
640 mtspr SPR_MAS8, %r20
641 isync
642 blr
643
644#ifdef SMP
645__boot_page_padding:
646 /*
647 * Boot page needs to be exactly 4K, with the last word of this page
648 * acting as the reset vector, so we need to stuff the remainder.
649 * Upon release from holdoff CPU fetches the last word of the boot
650 * page.
651 */
652 .space 4092 - (__boot_page_padding - __boot_page)
653 b __boot_page
654#endif /* SMP */
655
656/************************************************************************/
657/* locore subroutines */
658/************************************************************************/
659
660/*
661 * Cache disable/enable/inval sequences according
662 * to section 2.16 of E500CORE RM.
663 */
664ENTRY(dcache_inval)
665 /* Invalidate d-cache */
666 mfspr %r3, SPR_L1CSR0
667 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
668 msync
669 isync
670 mtspr SPR_L1CSR0, %r3
671 isync
6721: mfspr %r3, SPR_L1CSR0
673 andi. %r3, %r3, L1CSR0_DCFI
674 bne 1b
675 blr
676
677ENTRY(dcache_disable)
678 /* Disable d-cache */
679 mfspr %r3, SPR_L1CSR0
680 li %r4, L1CSR0_DCE@l
681 not %r4, %r4
682 and %r3, %r3, %r4
683 msync
684 isync
685 mtspr SPR_L1CSR0, %r3
686 isync
687 blr
688
689ENTRY(dcache_enable)
690 /* Enable d-cache */
691 mfspr %r3, SPR_L1CSR0
692 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
693 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
694 msync
695 isync
696 mtspr SPR_L1CSR0, %r3
697 isync
698 blr
699
700ENTRY(icache_inval)
701 /* Invalidate i-cache */
702 mfspr %r3, SPR_L1CSR1
703 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
704 isync
705 mtspr SPR_L1CSR1, %r3
706 isync
7071: mfspr %r3, SPR_L1CSR1
708 andi. %r3, %r3, L1CSR1_ICFI
709 bne 1b
710 blr
711
712ENTRY(icache_disable)
713 /* Disable i-cache */
714 mfspr %r3, SPR_L1CSR1
715 li %r4, L1CSR1_ICE@l
716 not %r4, %r4
717 and %r3, %r3, %r4
718 isync
719 mtspr SPR_L1CSR1, %r3
720 isync
721 blr
722
723ENTRY(icache_enable)
724 /* Enable i-cache */
725 mfspr %r3, SPR_L1CSR1
726 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
727 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
728 isync
729 mtspr SPR_L1CSR1, %r3
730 isync
731 blr
732
733/*
305
306 .globl bp_tlb1_end
307bp_tlb1_end:
308
309/*
310 * Initial configuration
311 */
3121: mflr %r31 /* r31 hold the address of bp_ntlb1s */
313
314 /* Set HIDs */
315 mfpvr %r3
316 rlwinm %r3, %r3, 16, 16, 31
317
318 /* HID0 for E500 is default */
319 lis %r4, HID0_E500_DEFAULT_SET@h
320 ori %r4, %r4, HID0_E500_DEFAULT_SET@l
321
322 cmpli 0, 0, %r3, FSL_E500mc
323 bne 2f
324 lis %r4, HID0_E500MC_DEFAULT_SET@h
325 ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
326 b 3f
3272:
328 cmpli 0, 0, %r3, FSL_E5500
329 bne 3f
330 lis %r4, HID0_E5500_DEFAULT_SET@h
331 ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
3323:
333 mtspr SPR_HID0, %r4
334 isync
335/*
336 * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
337 * this core.
338 */
339 cmpli 0, 0, %r3, FSL_E500mc
340 beq 1f
341 cmpli 0, 0, %r3, FSL_E5500
342 beq 1f
343
344 lis %r3, HID1_E500_DEFAULT_SET@h
345 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
346 mtspr SPR_HID1, %r3
347 isync
3481:
349 /* Enable branch prediction */
350 li %r3, BUCSR_BPEN
351 mtspr SPR_BUCSR, %r3
352 isync
353
354 /* Invalidate all entries in TLB0 */
355 li %r3, 0
356 bl tlb_inval_all
357
358/*
359 * Find TLB1 entry which is translating us now
360 */
361 bl 2f
3622: mflr %r3
363 bl tlb1_find_current /* the entry number found is in r29 */
364
365 bl tlb1_inval_all_but_current
366
367/*
368 * Create temporary translation in AS=1 and switch to it
369 */
370 lwz %r3, 0(%r31)
371 bl tlb1_temp_mapping_as1
372
373 mfmsr %r3
374 ori %r3, %r3, (PSL_IS | PSL_DS)
375 bl 3f
3763: mflr %r4
377 addi %r4, %r4, 20
378 mtspr SPR_SRR0, %r4
379 mtspr SPR_SRR1, %r3
380 rfi /* Switch context */
381
382/*
383 * Invalidate initial entry
384 */
385 mr %r3, %r29
386 bl tlb1_inval_entry
387
388/*
389 * Setup final mapping in TLB1[1] and switch to it
390 */
391 lwz %r6, 0(%r31)
392 addi %r5, %r31, 4
393 li %r4, 0
394
3954: lis %r3, MAS0_TLBSEL1@h
396 rlwimi %r3, %r4, 16, 12, 15
397 mtspr SPR_MAS0, %r3
398 isync
399 lwz %r3, 0(%r5)
400 mtspr SPR_MAS1, %r3
401 isync
402 lwz %r3, 4(%r5)
403 mtspr SPR_MAS2, %r3
404 isync
405 lwz %r3, 8(%r5)
406 mtspr SPR_MAS3, %r3
407 isync
408 tlbwe
409 isync
410 msync
411 addi %r5, %r5, 12
412 addi %r4, %r4, 1
413 cmpw %r4, %r6
414 blt 4b
415
416 /* Switch to the final mapping */
417 bl 5f
418 .long __boot_page-.
4195: mflr %r5
420 lwz %r3,0(%r3)
421 add %r5,%r5,%r3 /* __boot_page in r5 */
422 bl 6f
4236: mflr %r3
424 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
425 add %r3, %r3, %r5 /* Make this virtual address */
426 addi %r3, %r3, 32
427 li %r4, 0 /* Note AS=0 */
428 mtspr SPR_SRR0, %r3
429 mtspr SPR_SRR1, %r4
430 rfi
431
432/*
433 * At this point we're running at virtual addresses KERNBASE and beyond so
434 * it's allowed to directly access all locations the kernel was linked
435 * against.
436 */
437
438/*
439 * Invalidate temp mapping
440 */
441 mr %r3, %r28
442 bl tlb1_inval_entry
443
444/*
445 * Setup a temporary stack
446 */
447 bl 1f
448 .long tmpstack-.
4491: mflr %r1
450 lwz %r2,0(%r1)
451 add %r1,%r1,%r2
452 addi %r1, %r1, (TMPSTACKSZ - 16)
453
454/*
455 * Initialise exception vector offsets
456 */
457 bl ivor_setup
458
459 /*
460 * Assign our pcpu instance
461 */
462 bl 1f
463 .long ap_pcpu-.
4641: mflr %r4
465 lwz %r3, 0(%r4)
466 add %r3, %r3, %r4
467 lwz %r3, 0(%r3)
468 mtsprg0 %r3
469
470 bl pmap_bootstrap_ap
471
472 bl cpudep_ap_bootstrap
473 /* Switch to the idle thread's kstack */
474 mr %r1, %r3
475
476 bl machdep_ap_bootstrap
477
478 /* NOT REACHED */
4796: b 6b
480#endif /* SMP */
481
482/*
483 * Invalidate all entries in the given TLB.
484 *
485 * r3 TLBSEL
486 */
487tlb_inval_all:
488 rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */
489 ori %r3, %r3, (1 << 2) /* INVALL */
490 tlbivax 0, %r3
491 isync
492 msync
493
494 tlbsync
495 msync
496 blr
497
498/*
499 * expects address to look up in r3, returns entry number in r29
500 *
501 * FIXME: the hidden assumption is we are now running in AS=0, but we should
502 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
503 */
504tlb1_find_current:
505 mfspr %r17, SPR_PID0
506 slwi %r17, %r17, MAS6_SPID0_SHIFT
507 mtspr SPR_MAS6, %r17
508 isync
509 tlbsx 0, %r3
510 mfspr %r17, SPR_MAS0
511 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
512
513 /* Make sure we have IPROT set on the entry */
514 mfspr %r17, SPR_MAS1
515 oris %r17, %r17, MAS1_IPROT@h
516 mtspr SPR_MAS1, %r17
517 isync
518 tlbwe
519 isync
520 msync
521 blr
522
523/*
524 * Invalidates a single entry in TLB1.
525 *
526 * r3 ESEL
527 * r4-r5 scratched
528 */
529tlb1_inval_entry:
530 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
531 rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */
532 mtspr SPR_MAS0, %r4
533 isync
534 tlbre
535 li %r5, 0 /* MAS1[V] = 0 */
536 mtspr SPR_MAS1, %r5
537 isync
538 tlbwe
539 isync
540 msync
541 blr
542
543/*
544 * r3 entry of temp translation
545 * r29 entry of current translation
546 * r28 returns temp entry passed in r3
547 * r4-r5 scratched
548 */
549tlb1_temp_mapping_as1:
550 mr %r28, %r3
551
552 /* Read our current translation */
553 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
554 rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */
555 mtspr SPR_MAS0, %r3
556 isync
557 tlbre
558
559 /* Prepare and write temp entry */
560 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
561 rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */
562 mtspr SPR_MAS0, %r3
563 isync
564 mfspr %r5, SPR_MAS1
565 li %r4, 1 /* AS=1 */
566 rlwimi %r5, %r4, 12, 19, 19
567 li %r4, 0 /* Global mapping, TID=0 */
568 rlwimi %r5, %r4, 16, 8, 15
569 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
570 mtspr SPR_MAS1, %r5
571 isync
572 mflr %r3
573 bl zero_mas7
574 bl zero_mas8
575 mtlr %r3
576 tlbwe
577 isync
578 msync
579 blr
580
581/*
582 * Loops over TLB1, invalidates all entries skipping the one which currently
583 * maps this code.
584 *
585 * r29 current entry
586 * r3-r5 scratched
587 */
588tlb1_inval_all_but_current:
589 mr %r6, %r3
590 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
591 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
592 li %r4, 0 /* Start from Entry 0 */
5931: lis %r5, MAS0_TLBSEL1@h
594 rlwimi %r5, %r4, 16, 10, 15
595 mtspr SPR_MAS0, %r5
596 isync
597 tlbre
598 mfspr %r5, SPR_MAS1
599 cmpw %r4, %r29 /* our current entry? */
600 beq 2f
601 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
602 mtspr SPR_MAS1, %r5
603 isync
604 tlbwe
605 isync
606 msync
6072: addi %r4, %r4, 1
608 cmpw %r4, %r3 /* Check if this is the last entry */
609 bne 1b
610 blr
611
612/*
613 * MAS7 and MAS8 conditional zeroing.
614 */
615.globl zero_mas7
616zero_mas7:
617 mfpvr %r20
618 rlwinm %r20, %r20, 16, 16, 31
619 cmpli 0, 0, %r20, FSL_E500v1
620 beq 1f
621
622 li %r20, 0
623 mtspr SPR_MAS7, %r20
624 isync
6251:
626 blr
627
628.globl zero_mas8
629zero_mas8:
630 mfpvr %r20
631 rlwinm %r20, %r20, 16, 16, 31
632 cmpli 0, 0, %r20, FSL_E500mc
633 beq 1f
634 cmpli 0, 0, %r20, FSL_E5500
635 beq 1f
636
637 blr
6381:
639 li %r20, 0
640 mtspr SPR_MAS8, %r20
641 isync
642 blr
643
644#ifdef SMP
645__boot_page_padding:
646 /*
647 * Boot page needs to be exactly 4K, with the last word of this page
648 * acting as the reset vector, so we need to stuff the remainder.
649 * Upon release from holdoff CPU fetches the last word of the boot
650 * page.
651 */
652 .space 4092 - (__boot_page_padding - __boot_page)
653 b __boot_page
654#endif /* SMP */
655
656/************************************************************************/
657/* locore subroutines */
658/************************************************************************/
659
660/*
661 * Cache disable/enable/inval sequences according
662 * to section 2.16 of E500CORE RM.
663 */
664ENTRY(dcache_inval)
665 /* Invalidate d-cache */
666 mfspr %r3, SPR_L1CSR0
667 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
668 msync
669 isync
670 mtspr SPR_L1CSR0, %r3
671 isync
6721: mfspr %r3, SPR_L1CSR0
673 andi. %r3, %r3, L1CSR0_DCFI
674 bne 1b
675 blr
676
677ENTRY(dcache_disable)
678 /* Disable d-cache */
679 mfspr %r3, SPR_L1CSR0
680 li %r4, L1CSR0_DCE@l
681 not %r4, %r4
682 and %r3, %r3, %r4
683 msync
684 isync
685 mtspr SPR_L1CSR0, %r3
686 isync
687 blr
688
689ENTRY(dcache_enable)
690 /* Enable d-cache */
691 mfspr %r3, SPR_L1CSR0
692 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
693 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
694 msync
695 isync
696 mtspr SPR_L1CSR0, %r3
697 isync
698 blr
699
700ENTRY(icache_inval)
701 /* Invalidate i-cache */
702 mfspr %r3, SPR_L1CSR1
703 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
704 isync
705 mtspr SPR_L1CSR1, %r3
706 isync
7071: mfspr %r3, SPR_L1CSR1
708 andi. %r3, %r3, L1CSR1_ICFI
709 bne 1b
710 blr
711
712ENTRY(icache_disable)
713 /* Disable i-cache */
714 mfspr %r3, SPR_L1CSR1
715 li %r4, L1CSR1_ICE@l
716 not %r4, %r4
717 and %r3, %r3, %r4
718 isync
719 mtspr SPR_L1CSR1, %r3
720 isync
721 blr
722
723ENTRY(icache_enable)
724 /* Enable i-cache */
725 mfspr %r3, SPR_L1CSR1
726 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
727 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
728 isync
729 mtspr SPR_L1CSR1, %r3
730 isync
731 blr
732
733/*
734 * L2 cache disable/enable/inval sequences for E500mc.
735 */
736
737ENTRY(l2cache_inval)
738 mfspr %r3, SPR_L2CSR0
739 oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
740 ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
741 isync
742 mtspr SPR_L2CSR0, %r3
743 isync
7441: mfspr %r3, SPR_L2CSR0
745 andis. %r3, %r3, L2CSR0_L2FI@h
746 bne 1b
747 blr
748
749ENTRY(l2cache_enable)
750 mfspr %r3, SPR_L2CSR0
751 oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
752 isync
753 mtspr SPR_L2CSR0, %r3
754 isync
755 blr
756
757/*
758 * Branch predictor setup.
759 */
760ENTRY(bpred_enable)
761 mfspr %r3, SPR_BUCSR
762 ori %r3, %r3, BUCSR_BBFI
763 isync
764 mtspr SPR_BUCSR, %r3
765 isync
766 ori %r3, %r3, BUCSR_BPEN
767 isync
768 mtspr SPR_BUCSR, %r3
769 isync
770 blr
771
772ENTRY(dataloss_erratum_access)
773 /* Lock two cache lines into I-Cache */
774 sync
775 mfspr %r11, SPR_L1CSR1
776 rlwinm %r11, %r11, 0, ~L1CSR1_ICUL
777 sync
778 isync
779 mtspr SPR_L1CSR1, %r11
780 isync
781
782 mflr %r9
783 bl 1f
784 .long 2f-.
7851:
786 mflr %r5
787 lwz %r8, 0(%r5)
788 mtlr %r9
789 add %r8, %r8, %r5
790 icbtls 0, 0, %r8
791 addi %r9, %r8, 64
792
793 sync
794 mfspr %r11, SPR_L1CSR1
7953: andi. %r11, %r11, L1CSR1_ICUL
796 bne 3b
797
798 icbtls 0, 0, %r9
799
800 sync
801 mfspr %r11, SPR_L1CSR1
8023: andi. %r11, %r11, L1CSR1_ICUL
803 bne 3b
804
805 b 2f
806 .align 6
807 /* Inside a locked cacheline, wait a while, write, then wait a while */
8082: sync
809
810 mfspr %r5, TBR_TBL
8114: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */
812 mfspr %r5, TBR_TBL
813 subf. %r5, %r5, %r11
814 bgt 4b
815
816 stw %r4, 0(%r3)
817
818 mfspr %r5, TBR_TBL
8194: addis %r11, %r5, 0x100000@h /* wait around one million timebase ticks */
820 mfspr %r5, TBR_TBL
821 subf. %r5, %r5, %r11
822 bgt 4b
823
824 sync
825
826 /*
827 * Fill out the rest of this cache line and the next with nops,
828 * to ensure that nothing outside the locked area will be
829 * fetched due to a branch.
830 */
831 .rept 19
832 nop
833 .endr
834
835 icblc 0, 0, %r8
836 icblc 0, 0, %r9
837
838 blr
839
840/*
734 * int setfault()
735 *
736 * Similar to setjmp to setup for handling faults on accesses to user memory.
737 * Any routine using this may only call bcopy, either the form below,
738 * or the (currently used) C code optimized, so it doesn't use any non-volatile
739 * registers.
740 */
741 .globl setfault
742setfault:
743 mflr %r0
744 mfsprg0 %r4
745 lwz %r4, TD_PCB(%r2)
746 stw %r3, PCB_ONFAULT(%r4)
747 mfcr %r4
748 stw %r0, 0(%r3)
749 stw %r1, 4(%r3)
750 stw %r2, 8(%r3)
751 stw %r4, 12(%r3)
752 stmw %r13, 16(%r3) /* store CR, CTR, XER, [r13 .. r31] */
753 li %r3, 0 /* return FALSE */
754 blr
755
756/************************************************************************/
757/* Data section */
758/************************************************************************/
759 .data
760 .align 3
761GLOBAL(__startkernel)
762 .long begin
763GLOBAL(__endkernel)
764 .long end
765 .align 4
766tmpstack:
767 .space TMPSTACKSZ
768tmpstackbound:
769 .space 10240 /* XXX: this really should not be necessary */
770
771/*
772 * Compiled KERNBASE locations
773 */
774 .globl kernbase
775 .set kernbase, KERNBASE
776
777#include <powerpc/booke/trap_subr.S>
841 * int setfault()
842 *
843 * Similar to setjmp to setup for handling faults on accesses to user memory.
844 * Any routine using this may only call bcopy, either the form below,
845 * or the (currently used) C code optimized, so it doesn't use any non-volatile
846 * registers.
847 */
848 .globl setfault
849setfault:
850 mflr %r0
851 mfsprg0 %r4
852 lwz %r4, TD_PCB(%r2)
853 stw %r3, PCB_ONFAULT(%r4)
854 mfcr %r4
855 stw %r0, 0(%r3)
856 stw %r1, 4(%r3)
857 stw %r2, 8(%r3)
858 stw %r4, 12(%r3)
859 stmw %r13, 16(%r3) /* store CR, CTR, XER, [r13 .. r31] */
860 li %r3, 0 /* return FALSE */
861 blr
862
863/************************************************************************/
864/* Data section */
865/************************************************************************/
866 .data
867 .align 3
868GLOBAL(__startkernel)
869 .long begin
870GLOBAL(__endkernel)
871 .long end
872 .align 4
873tmpstack:
874 .space TMPSTACKSZ
875tmpstackbound:
876 .space 10240 /* XXX: this really should not be necessary */
877
878/*
879 * Compiled KERNBASE locations
880 */
881 .globl kernbase
882 .set kernbase, KERNBASE
883
884#include <powerpc/booke/trap_subr.S>