Deleted Added
sdiff udiff text old ( 222391 ) new ( 222400 )
full compact
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 222400 2011-05-28 04:10:44Z marcel $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51kernel_text:
52
53/*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
86 * r3-r26 : scratch registers
87 * r27 : kernload
88 * r28 : temp TLB1 entry
89 * r29 : initial TLB1 entry we started in
90 * r30-r31 : arguments (metadata pointer)
91 */
92
93/*
94 * Keep arguments in r30 & r31 for later use.
95 */
96 mr %r30, %r3
97 mr %r31, %r4
98
99/*
100 * Initial cleanup
101 */
102 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
103 mtmsr %r3
104 isync
105
106 lis %r3, HID0_E500_DEFAULT_SET@h
107 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
108 mtspr SPR_HID0, %r3
109 isync
110 lis %r3, HID1_E500_DEFAULT_SET@h
111 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
112 mtspr SPR_HID1, %r3
113 isync
114
115 /* Invalidate all entries in TLB0 */
116 li %r3, 0
117 bl tlb_inval_all
118
119/*
120 * Locate the TLB1 entry that maps this code
121 */
122 bl 1f
1231: mflr %r3
124 bl tlb1_find_current /* the entry found is returned in r29 */
125
126 bl tlb1_inval_all_but_current
127/*
128 * Create temporary mapping in AS=1 and switch to it
129 */
130 bl tlb1_temp_mapping_as1
131
132 mfmsr %r3
133 ori %r3, %r3, (PSL_IS | PSL_DS)
134 bl 2f
1352: mflr %r4
136 addi %r4, %r4, 20
137 mtspr SPR_SRR0, %r4
138 mtspr SPR_SRR1, %r3
139 rfi /* Switch context */
140
141/*
142 * Invalidate initial entry
143 */
144 mr %r3, %r29
145 bl tlb1_inval_entry
146
147/*
148 * Setup final mapping in TLB1[1] and switch to it
149 */
150 /* Final kernel mapping, map in 16 MB of RAM */
151 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
152 li %r4, 0 /* Entry 0 */
153 rlwimi %r3, %r4, 16, 12, 15
154 mtspr SPR_MAS0, %r3
155 isync
156
157 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
158 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
159 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
160 isync
161
162 lis %r3, KERNBASE@h
163 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
164#ifdef SMP
165 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
166#endif
167 mtspr SPR_MAS2, %r3
168 isync
169
170 /* Discover phys load address */
171 bl 3f
1723: mflr %r4 /* Use current address */
173 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
174 mr %r27, %r4 /* Keep kernel load address */
175 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
176 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
177 isync
178 tlbwe
179 isync
180 msync
181
182 /* Switch to the above TLB1[1] mapping */
183 bl 4f
1844: mflr %r4
185 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
186 rlwinm %r3, %r3, 0, 0, 19
187 add %r4, %r4, %r3 /* Convert to kernel virtual address */
188 addi %r4, %r4, 36
189 li %r3, PSL_DE /* Note AS=0 */
190 mtspr SPR_SRR0, %r4
191 mtspr SPR_SRR1, %r3
192 rfi
193
194/*
195 * Invalidate temp mapping
196 */
197 mr %r3, %r28
198 bl tlb1_inval_entry
199
200/*
201 * Save kernel load address for later use.
202 */
203 lis %r3, kernload@ha
204 addi %r3, %r3, kernload@l
205 stw %r27, 0(%r3)
206#ifdef SMP
207 /*
208 * APs need a separate copy of kernload info within the __boot_page
209 * area so they can access this value very early, before their TLBs
210 * are fully set up and the kernload global location is available.
211 */
212 lis %r3, kernload_ap@ha
213 addi %r3, %r3, kernload_ap@l
214 stw %r27, 0(%r3)
215 msync
216#endif
217
218/*
219 * Setup a temporary stack
220 */
221 lis %r1, tmpstack@ha
222 addi %r1, %r1, tmpstack@l
223 addi %r1, %r1, (TMPSTACKSZ - 8)
224
225/*
226 * Initialise exception vector offsets
227 */
228 bl ivor_setup
229
230/*
231 * Set up arguments and jump to system initialization code
232 */
233 mr %r3, %r30
234 mr %r4, %r31
235
236 /* Prepare e500 core */
237 bl booke_init
238
239 /* Switch to thread0.td_kstack now */
240 mr %r1, %r3
241 li %r3, 0
242 stw %r3, 0(%r1)
243
244 /* Machine independet part, does not return */
245 bl mi_startup
246 /* NOT REACHED */
2475: b 5b
248
249
250#ifdef SMP
251/************************************************************************/
252/* AP Boot page */
253/************************************************************************/
254 .text
255 .globl __boot_page
256 .align 12
257__boot_page:
258 bl 1f
259
260kernload_ap:
261 .long 0
262
263/*
264 * Initial configuration
265 */
2661:
267 /* Set HIDs */
268 lis %r3, HID0_E500_DEFAULT_SET@h
269 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
270 mtspr SPR_HID0, %r3
271 isync
272 lis %r3, HID1_E500_DEFAULT_SET@h
273 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
274 mtspr SPR_HID1, %r3
275 isync
276
277 /* Enable branch prediction */
278 li %r3, BUCSR_BPEN
279 mtspr SPR_BUCSR, %r3
280 isync
281
282 /* Invalidate all entries in TLB0 */
283 li %r3, 0
284 bl tlb_inval_all
285
286/*
287 * Find TLB1 entry which is translating us now
288 */
289 bl 2f
2902: mflr %r3
291 bl tlb1_find_current /* the entry number found is in r29 */
292
293 bl tlb1_inval_all_but_current
294/*
295 * Create temporary translation in AS=1 and switch to it
296 */
297 bl tlb1_temp_mapping_as1
298
299 mfmsr %r3
300 ori %r3, %r3, (PSL_IS | PSL_DS)
301 bl 3f
3023: mflr %r4
303 addi %r4, %r4, 20
304 mtspr SPR_SRR0, %r4
305 mtspr SPR_SRR1, %r3
306 rfi /* Switch context */
307
308/*
309 * Invalidate initial entry
310 */
311 mr %r3, %r29
312 bl tlb1_inval_entry
313
314/*
315 * Setup final mapping in TLB1[1] and switch to it
316 */
317 /* Final kernel mapping, map in 16 MB of RAM */
318 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
319 li %r4, 0 /* Entry 0 */
320 rlwimi %r3, %r4, 16, 4, 15
321 mtspr SPR_MAS0, %r3
322 isync
323
324 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
325 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
326 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
327 isync
328
329 lis %r3, KERNBASE@h
330 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
331 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
332 mtspr SPR_MAS2, %r3
333 isync
334
335 /* Retrieve kernel load [physical] address from kernload_ap */
336 bl 4f
3374: mflr %r3
338 rlwinm %r3, %r3, 0, 0, 19
339 lis %r4, kernload_ap@h
340 ori %r4, %r4, kernload_ap@l
341 lis %r5, __boot_page@h
342 ori %r5, %r5, __boot_page@l
343 sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
344 lwzx %r3, %r4, %r3
345
346 /* Set RPN and protection */
347 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
348 mtspr SPR_MAS3, %r3
349 isync
350 tlbwe
351 isync
352 msync
353
354 /* Switch to the final mapping */
355 bl 5f
3565: mflr %r3
357 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
358 add %r3, %r3, %r5 /* Make this virtual address */
359 addi %r3, %r3, 32
360 li %r4, 0 /* Note AS=0 */
361 mtspr SPR_SRR0, %r3
362 mtspr SPR_SRR1, %r4
363 rfi
364
365/*
366 * At this point we're running at virtual addresses KERNBASE and beyond so
367 * it's allowed to directly access all locations the kernel was linked
368 * against.
369 */
370
371/*
372 * Invalidate temp mapping
373 */
374 mr %r3, %r28
375 bl tlb1_inval_entry
376
377/*
378 * Setup a temporary stack
379 */
380 lis %r1, tmpstack@ha
381 addi %r1, %r1, tmpstack@l
382 addi %r1, %r1, (TMPSTACKSZ - 8)
383
384/*
385 * Initialise exception vector offsets
386 */
387 bl ivor_setup
388
389 /*
390 * Assign our pcpu instance
391 */
392 lis %r3, ap_pcpu@h
393 ori %r3, %r3, ap_pcpu@l
394 lwz %r3, 0(%r3)
395 mtsprg0 %r3
396
397 bl pmap_bootstrap_ap
398
399 bl cpudep_ap_bootstrap
400 /* Switch to the idle thread's kstack */
401 mr %r1, %r3
402
403 bl machdep_ap_bootstrap
404
405 /* NOT REACHED */
4066: b 6b
407#endif /* SMP */
408
409/*
410 * Invalidate all entries in the given TLB.
411 *
412 * r3 TLBSEL
413 */
414tlb_inval_all:
415 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
416 ori %r3, %r3, 0x4 /* INVALL */
417 tlbivax 0, %r3
418 isync
419 msync
420
421 tlbsync
422 msync
423 blr
424
425/*
426 * expects address to look up in r3, returns entry number in r29
427 *
428 * FIXME: the hidden assumption is we are now running in AS=0, but we should
429 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
430 */
431tlb1_find_current:
432 mfspr %r17, SPR_PID0
433 slwi %r17, %r17, MAS6_SPID0_SHIFT
434 mtspr SPR_MAS6, %r17
435 isync
436 tlbsx 0, %r3
437 mfspr %r17, SPR_MAS0
438 rlwinm %r29, %r17, 16, 20, 31 /* MAS0[ESEL] -> r29 */
439
440 /* Make sure we have IPROT set on the entry */
441 mfspr %r17, SPR_MAS1
442 oris %r17, %r17, MAS1_IPROT@h
443 mtspr SPR_MAS1, %r17
444 isync
445 tlbwe
446 isync
447 msync
448 blr
449
450/*
451 * Invalidates a single entry in TLB1.
452 *
453 * r3 ESEL
454 * r4-r5 scratched
455 */
456tlb1_inval_entry:
457 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
458 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
459 mtspr SPR_MAS0, %r4
460 isync
461 tlbre
462 li %r5, 0 /* MAS1[V] = 0 */
463 mtspr SPR_MAS1, %r5
464 isync
465 tlbwe
466 isync
467 msync
468 blr
469
470/*
471 * r29 current entry number
472 * r28 returned temp entry
473 * r3-r5 scratched
474 */
475tlb1_temp_mapping_as1:
476 /* Read our current translation */
477 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
478 rlwimi %r3, %r29, 16, 12, 15 /* Select our current entry */
479 mtspr SPR_MAS0, %r3
480 isync
481 tlbre
482
483 /*
484 * Prepare and write temp entry
485 *
486 * FIXME this is not robust against overflow i.e. when the current
487 * entry is the last in TLB1
488 */
489 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
490 addi %r28, %r29, 1 /* Use next entry. */
491 rlwimi %r3, %r28, 16, 12, 15 /* Select temp entry */
492 mtspr SPR_MAS0, %r3
493 isync
494 mfspr %r5, SPR_MAS1
495 li %r4, 1 /* AS=1 */
496 rlwimi %r5, %r4, 12, 19, 19
497 li %r4, 0 /* Global mapping, TID=0 */
498 rlwimi %r5, %r4, 16, 8, 15
499 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
500 mtspr SPR_MAS1, %r5
501 isync
502 tlbwe
503 isync
504 msync
505 blr
506
507/*
508 * Loops over TLB1, invalidates all entries skipping the one which currently
509 * maps this code.
510 *
511 * r29 current entry
512 * r3-r5 scratched
513 */
514tlb1_inval_all_but_current:
515 mr %r6, %r3
516 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
517 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
518 li %r4, 0 /* Start from Entry 0 */
5191: lis %r5, MAS0_TLBSEL1@h
520 rlwimi %r5, %r4, 16, 12, 15
521 mtspr SPR_MAS0, %r5
522 isync
523 tlbre
524 mfspr %r5, SPR_MAS1
525 cmpw %r4, %r29 /* our current entry? */
526 beq 2f
527 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
528 mtspr SPR_MAS1, %r5
529 isync
530 tlbwe
531 isync
532 msync
5332: addi %r4, %r4, 1
534 cmpw %r4, %r3 /* Check if this is the last entry */
535 bne 1b
536 blr
537
538#ifdef SMP
539__boot_page_padding:
540 /*
541 * Boot page needs to be exactly 4K, with the last word of this page
542 * acting as the reset vector, so we need to stuff the remainder.
543 * Upon release from holdoff CPU fetches the last word of the boot
544 * page.
545 */
546 .space 4092 - (__boot_page_padding - __boot_page)
547 b __boot_page
548#endif /* SMP */
549
550/************************************************************************/
551/* locore subroutines */
552/************************************************************************/
553
554ivor_setup:
555 /* Set base address of interrupt handler routines */
556 lis %r3, interrupt_vector_base@h
557 mtspr SPR_IVPR, %r3
558
559 /* Assign interrupt handler routines offsets */
560 li %r3, int_critical_input@l
561 mtspr SPR_IVOR0, %r3
562 li %r3, int_machine_check@l
563 mtspr SPR_IVOR1, %r3
564 li %r3, int_data_storage@l
565 mtspr SPR_IVOR2, %r3
566 li %r3, int_instr_storage@l
567 mtspr SPR_IVOR3, %r3
568 li %r3, int_external_input@l
569 mtspr SPR_IVOR4, %r3
570 li %r3, int_alignment@l
571 mtspr SPR_IVOR5, %r3
572 li %r3, int_program@l
573 mtspr SPR_IVOR6, %r3
574 li %r3, int_syscall@l
575 mtspr SPR_IVOR8, %r3
576 li %r3, int_decrementer@l
577 mtspr SPR_IVOR10, %r3
578 li %r3, int_fixed_interval_timer@l
579 mtspr SPR_IVOR11, %r3
580 li %r3, int_watchdog@l
581 mtspr SPR_IVOR12, %r3
582 li %r3, int_data_tlb_error@l
583 mtspr SPR_IVOR13, %r3
584 li %r3, int_inst_tlb_error@l
585 mtspr SPR_IVOR14, %r3
586 li %r3, int_debug@l
587 mtspr SPR_IVOR15, %r3
588 blr
589
590/*
591 * void tid_flush(tlbtid_t tid);
592 *
593 * Invalidate all TLB0 entries which match the given TID. Note this is
594 * dedicated for cases when invalidation(s) should NOT be propagated to other
595 * CPUs.
596 *
597 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
598 * correctly (by tlb0_get_tlbconf()).
599 *
600 */
601ENTRY(tid_flush)
602 cmpwi %r3, TID_KERNEL
603 beq tid_flush_end /* don't evict kernel translations */
604
605 /* Number of TLB0 ways */
606 lis %r4, tlb0_ways@h
607 ori %r4, %r4, tlb0_ways@l
608 lwz %r4, 0(%r4)
609
610 /* Number of entries / way */
611 lis %r5, tlb0_entries_per_way@h
612 ori %r5, %r5, tlb0_entries_per_way@l
613 lwz %r5, 0(%r5)
614
615 /* Disable interrupts */
616 mfmsr %r10
617 wrteei 0
618
619 li %r6, 0 /* ways counter */
620loop_ways:
621 li %r7, 0 /* entries [per way] counter */
622loop_entries:
623 /* Select TLB0 and ESEL (way) */
624 lis %r8, MAS0_TLBSEL0@h
625 rlwimi %r8, %r6, 16, 14, 15
626 mtspr SPR_MAS0, %r8
627 isync
628
629 /* Select EPN (entry within the way) */
630 rlwinm %r8, %r7, 12, 13, 19
631 mtspr SPR_MAS2, %r8
632 isync
633 tlbre
634
635 /* Check if valid entry */
636 mfspr %r8, SPR_MAS1
637 andis. %r9, %r8, MAS1_VALID@h
638 beq next_entry /* invalid entry */
639
640 /* Check if this is our TID */
641 rlwinm %r9, %r8, 16, 24, 31
642
643 cmplw %r9, %r3
644 bne next_entry /* not our TID */
645
646 /* Clear VALID bit */
647 rlwinm %r8, %r8, 0, 1, 31
648 mtspr SPR_MAS1, %r8
649 isync
650 tlbwe
651 isync
652 msync
653
654next_entry:
655 addi %r7, %r7, 1
656 cmpw %r7, %r5
657 bne loop_entries
658
659 /* Next way */
660 addi %r6, %r6, 1
661 cmpw %r6, %r4
662 bne loop_ways
663
664 /* Restore MSR (possibly re-enable interrupts) */
665 mtmsr %r10
666 isync
667
668tid_flush_end:
669 blr
670
671/*
672 * Cache disable/enable/inval sequences according
673 * to section 2.16 of E500CORE RM.
674 */
675ENTRY(dcache_inval)
676 /* Invalidate d-cache */
677 mfspr %r3, SPR_L1CSR0
678 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
679 msync
680 isync
681 mtspr SPR_L1CSR0, %r3
682 isync
6831: mfspr %r3, SPR_L1CSR0
684 andi. %r3, %r3, L1CSR0_DCFI
685 bne 1b
686 blr
687
688ENTRY(dcache_disable)
689 /* Disable d-cache */
690 mfspr %r3, SPR_L1CSR0
691 li %r4, L1CSR0_DCE@l
692 not %r4, %r4
693 and %r3, %r3, %r4
694 msync
695 isync
696 mtspr SPR_L1CSR0, %r3
697 isync
698 blr
699
700ENTRY(dcache_enable)
701 /* Enable d-cache */
702 mfspr %r3, SPR_L1CSR0
703 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
704 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
705 msync
706 isync
707 mtspr SPR_L1CSR0, %r3
708 isync
709 blr
710
711ENTRY(icache_inval)
712 /* Invalidate i-cache */
713 mfspr %r3, SPR_L1CSR1
714 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
715 isync
716 mtspr SPR_L1CSR1, %r3
717 isync
7181: mfspr %r3, SPR_L1CSR1
719 andi. %r3, %r3, L1CSR1_ICFI
720 bne 1b
721 blr
722
723ENTRY(icache_disable)
724 /* Disable i-cache */
725 mfspr %r3, SPR_L1CSR1
726 li %r4, L1CSR1_ICE@l
727 not %r4, %r4
728 and %r3, %r3, %r4
729 isync
730 mtspr SPR_L1CSR1, %r3
731 isync
732 blr
733
734ENTRY(icache_enable)
735 /* Enable i-cache */
736 mfspr %r3, SPR_L1CSR1
737 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
738 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
739 isync
740 mtspr SPR_L1CSR1, %r3
741 isync
742 blr
743
744/*
745 * int setfault()
746 *
747 * Similar to setjmp to setup for handling faults on accesses to user memory.
748 * Any routine using this may only call bcopy, either the form below,
749 * or the (currently used) C code optimized, so it doesn't use any non-volatile
750 * registers.
751 */
752 .globl setfault
753setfault:
754 mflr %r0
755 mfsprg0 %r4
756 lwz %r4, PC_CURTHREAD(%r4)
757 lwz %r4, TD_PCB(%r4)
758 stw %r3, PCB_ONFAULT(%r4)
759 mfcr %r10
760 mfctr %r11
761 mfxer %r12
762 stw %r0, 0(%r3)
763 stw %r1, 4(%r3)
764 stw %r2, 8(%r3)
765 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
766 li %r3, 0 /* return FALSE */
767 blr
768
769/************************************************************************/
770/* Data section */
771/************************************************************************/
772 .data
773 .align 4
774tmpstack:
775 .space TMPSTACKSZ
776
777/*
778 * Compiled KERNBASE locations
779 */
780 .globl kernbase
781 .set kernbase, KERNBASE
782
783/*
784 * Globals
785 */
786#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
787
788GLOBAL(kernload)
789 .long 0
790GLOBAL(intrnames)
791 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
792GLOBAL(eintrnames)
793 .align 4
794GLOBAL(intrcnt)
795 .space INTRCNT_COUNT * 4 * 2
796GLOBAL(eintrcnt)
797
798#include <powerpc/booke/trap_subr.S>