Deleted Added
sdiff udiff text old ( 222391 ) new ( 222400 )
full compact
1/*-
2 * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
3 * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
18 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
19 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
20 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
21 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
22 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
23 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 *
26 * $FreeBSD: head/sys/powerpc/booke/locore.S 222391 2011-05-27 23:09:12Z marcel $
27 */
28
29#include "assym.s"
30
31#include <machine/asm.h>
32#include <machine/hid.h>
33#include <machine/param.h>
34#include <machine/spr.h>
35#include <machine/pte.h>
36#include <machine/trap.h>
37#include <machine/vmparam.h>
38#include <machine/tlb.h>
39
40#define TMPSTACKSZ 16384
41
42 .text
43 .globl btext
44btext:
45
46/*
47 * This symbol is here for the benefit of kvm_mkdb, and is supposed to
48 * mark the start of kernel text.
49 */
50 .globl kernel_text
51kernel_text:
52
53/*
54 * Startup entry. Note, this must be the first thing in the text segment!
55 */
56 .text
57 .globl __start
58__start:
59
60/*
61 * Assumptions on the boot loader:
62 * - system memory starts from physical address 0
63 * - it's mapped by a single TBL1 entry
64 * - TLB1 mapping is 1:1 pa to va
65 * - kernel is loaded at 16MB boundary
66 * - all PID registers are set to the same value
67 * - CPU is running in AS=0
68 *
69 * Registers contents provided by the loader(8):
70 * r1 : stack pointer
71 * r3 : metadata pointer
72 *
73 * We rearrange the TLB1 layout as follows:
74 * - find TLB1 entry we started in
75 * - make sure it's protected, ivalidate other entries
76 * - create temp entry in the second AS (make sure it's not TLB[1])
77 * - switch to temp mapping
78 * - map 16MB of RAM in TLB1[1]
79 * - use AS=1, set EPN to KERNBASE and RPN to kernel load address
80 * - switch to to TLB1[1] mapping
81 * - invalidate temp mapping
82 *
83 * locore registers use:
84 * r1 : stack pointer
85 * r2 : trace pointer (AP only, for early diagnostics)
86 * r3-r27 : scratch registers
87 * r28 : kernload
88 * r29 : temp TLB1 entry
89 * r30 : initial TLB1 entry we started in
90 * r31 : metadata pointer
91 */
92
93/*
94 * Keep metadata ptr in r31 for later use.
95 */
96 mr %r31, %r3
97
98/*
99 * Initial cleanup
100 */
101 li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
102 mtmsr %r3
103 isync
104
105 lis %r3, HID0_E500_DEFAULT_SET@h
106 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
107 mtspr SPR_HID0, %r3
108 isync
109 lis %r3, HID1_E500_DEFAULT_SET@h
110 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
111 mtspr SPR_HID1, %r3
112 isync
113
114 /* Invalidate all entries in TLB0 */
115 li %r3, 0
116 bl tlb_inval_all
117
118/*
119 * Locate the TLB1 entry that maps this code
120 */
121 bl 1f
1221: mflr %r3
123 bl tlb1_find_current /* the entry number found is returned in r30 */
124
125 bl tlb1_inval_all_but_current
126/*
127 * Create temporary mapping in AS=1 and switch to it
128 */
129 bl tlb1_temp_mapping_as1
130
131 mfmsr %r3
132 ori %r3, %r3, (PSL_IS | PSL_DS)
133 bl 2f
1342: mflr %r4
135 addi %r4, %r4, 20
136 mtspr SPR_SRR0, %r4
137 mtspr SPR_SRR1, %r3
138 rfi /* Switch context */
139
140/*
141 * Invalidate initial entry
142 */
143 mr %r3, %r30
144 bl tlb1_inval_entry
145
146/*
147 * Setup final mapping in TLB1[1] and switch to it
148 */
149 /* Final kernel mapping, map in 16 MB of RAM */
150 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
151 li %r4, 0 /* Entry 0 */
152 rlwimi %r3, %r4, 16, 12, 15
153 mtspr SPR_MAS0, %r3
154 isync
155
156 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
157 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
158 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
159 isync
160
161 lis %r3, KERNBASE@h
162 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
163#ifdef SMP
164 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
165#endif
166 mtspr SPR_MAS2, %r3
167 isync
168
169 /* Discover phys load address */
170 bl 3f
1713: mflr %r4 /* Use current address */
172 rlwinm %r4, %r4, 0, 0, 7 /* 16MB alignment mask */
173 mr %r28, %r4 /* Keep kernel load address */
174 ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
175 mtspr SPR_MAS3, %r4 /* Set RPN and protection */
176 isync
177 tlbwe
178 isync
179 msync
180
181 /* Switch to the above TLB1[1] mapping */
182 bl 4f
1834: mflr %r4
184 rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
185 rlwinm %r3, %r3, 0, 0, 19
186 add %r4, %r4, %r3 /* Convert to kernel virtual address */
187 addi %r4, %r4, 36
188 li %r3, PSL_DE /* Note AS=0 */
189 mtspr SPR_SRR0, %r4
190 mtspr SPR_SRR1, %r3
191 rfi
192
193/*
194 * Invalidate temp mapping
195 */
196 mr %r3, %r29
197 bl tlb1_inval_entry
198
199/*
200 * Save kernel load address for later use.
201 */
202 lis %r3, kernload@ha
203 addi %r3, %r3, kernload@l
204 stw %r28, 0(%r3)
205#ifdef SMP
206 /*
207 * APs need a separate copy of kernload info within the __boot_page
208 * area so they can access this value very early, before their TLBs
209 * are fully set up and the kernload global location is available.
210 */
211 lis %r3, kernload_ap@ha
212 addi %r3, %r3, kernload_ap@l
213 stw %r28, 0(%r3)
214 msync
215#endif
216
217/*
218 * Setup a temporary stack
219 */
220 lis %r1, tmpstack@ha
221 addi %r1, %r1, tmpstack@l
222 addi %r1, %r1, (TMPSTACKSZ - 8)
223
224/*
225 * Initialise exception vector offsets
226 */
227 bl ivor_setup
228
229/*
230 * Set up arguments and jump to system initialization code
231 */
232 lis %r3, kernel_text@ha
233 addi %r3, %r3, kernel_text@l
234 lis %r4, _end@ha
235 addi %r4, %r4, _end@l
236 mr %r5, %r31 /* metadata ptr */
237
238 /* Prepare e500 core */
239 bl e500_init
240
241 /* Switch to thread0.td_kstack now */
242 mr %r1, %r3
243 li %r3, 0
244 stw %r3, 0(%r1)
245
246 /* Machine independet part, does not return */
247 bl mi_startup
248 /* NOT REACHED */
2495: b 5b
250
251
252#ifdef SMP
253/************************************************************************/
254/* AP Boot page */
255/************************************************************************/
256 .text
257 .globl __boot_page
258 .align 12
259__boot_page:
260 bl 1f
261
262kernload_ap:
263 .long 0
264
265/*
266 * Initial configuration
267 */
2681:
269 /* Set HIDs */
270 lis %r3, HID0_E500_DEFAULT_SET@h
271 ori %r3, %r3, HID0_E500_DEFAULT_SET@l
272 mtspr SPR_HID0, %r3
273 isync
274 lis %r3, HID1_E500_DEFAULT_SET@h
275 ori %r3, %r3, HID1_E500_DEFAULT_SET@l
276 mtspr SPR_HID1, %r3
277 isync
278
279 /* Enable branch prediction */
280 li %r3, BUCSR_BPEN
281 mtspr SPR_BUCSR, %r3
282 isync
283
284 /* Invalidate all entries in TLB0 */
285 li %r3, 0
286 bl tlb_inval_all
287
288/*
289 * Find TLB1 entry which is translating us now
290 */
291 bl 2f
2922: mflr %r3
293 bl tlb1_find_current /* the entry number found is in r30 */
294
295 bl tlb1_inval_all_but_current
296/*
297 * Create temporary translation in AS=1 and switch to it
298 */
299 bl tlb1_temp_mapping_as1
300
301 mfmsr %r3
302 ori %r3, %r3, (PSL_IS | PSL_DS)
303 bl 3f
3043: mflr %r4
305 addi %r4, %r4, 20
306 mtspr SPR_SRR0, %r4
307 mtspr SPR_SRR1, %r3
308 rfi /* Switch context */
309
310/*
311 * Invalidate initial entry
312 */
313 mr %r3, %r30
314 bl tlb1_inval_entry
315
316/*
317 * Setup final mapping in TLB1[1] and switch to it
318 */
319 /* Final kernel mapping, map in 16 MB of RAM */
320 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
321 li %r4, 0 /* Entry 0 */
322 rlwimi %r3, %r4, 16, 4, 15
323 mtspr SPR_MAS0, %r3
324 isync
325
326 li %r3, (TLB_SIZE_16M << MAS1_TSIZE_SHIFT)@l
327 oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
328 mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
329 isync
330
331 lis %r3, KERNBASE@h
332 ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
333 ori %r3, %r3, MAS2_M@l /* WIMGE = 0b00100 */
334 mtspr SPR_MAS2, %r3
335 isync
336
337 /* Retrieve kernel load [physical] address from kernload_ap */
338 bl 4f
3394: mflr %r3
340 rlwinm %r3, %r3, 0, 0, 19
341 lis %r4, kernload_ap@h
342 ori %r4, %r4, kernload_ap@l
343 lis %r5, __boot_page@h
344 ori %r5, %r5, __boot_page@l
345 sub %r4, %r4, %r5 /* offset of kernload_ap within __boot_page */
346 lwzx %r3, %r4, %r3
347
348 /* Set RPN and protection */
349 ori %r3, %r3, (MAS3_SX | MAS3_SW | MAS3_SR)@l
350 mtspr SPR_MAS3, %r3
351 isync
352 tlbwe
353 isync
354 msync
355
356 /* Switch to the final mapping */
357 bl 5f
3585: mflr %r3
359 rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
360 add %r3, %r3, %r5 /* Make this virtual address */
361 addi %r3, %r3, 32
362 li %r4, 0 /* Note AS=0 */
363 mtspr SPR_SRR0, %r3
364 mtspr SPR_SRR1, %r4
365 rfi
366
367/*
368 * At this point we're running at virtual addresses KERNBASE and beyond so
369 * it's allowed to directly access all locations the kernel was linked
370 * against.
371 */
372
373/*
374 * Invalidate temp mapping
375 */
376 mr %r3, %r29
377 bl tlb1_inval_entry
378
379/*
380 * Setup a temporary stack
381 */
382 lis %r1, tmpstack@ha
383 addi %r1, %r1, tmpstack@l
384 addi %r1, %r1, (TMPSTACKSZ - 8)
385
386/*
387 * Initialise exception vector offsets
388 */
389 bl ivor_setup
390
391 /*
392 * Assign our pcpu instance
393 */
394 lis %r3, ap_pcpu@h
395 ori %r3, %r3, ap_pcpu@l
396 lwz %r3, 0(%r3)
397 mtsprg0 %r3
398
399 bl pmap_bootstrap_ap
400
401 bl cpudep_ap_bootstrap
402 /* Switch to the idle thread's kstack */
403 mr %r1, %r3
404
405 bl machdep_ap_bootstrap
406
407 /* NOT REACHED */
4086: b 6b
409#endif /* SMP */
410
411/*
412 * Invalidate all entries in the given TLB.
413 *
414 * r3 TLBSEL
415 */
416tlb_inval_all:
417 rlwinm %r3, %r3, 3, 0x18 /* TLBSEL */
418 ori %r3, %r3, 0x4 /* INVALL */
419 tlbivax 0, %r3
420 isync
421 msync
422
423 tlbsync
424 msync
425 blr
426
427/*
428 * expects address to look up in r3, returns entry number in r30
429 *
430 * FIXME: the hidden assumption is we are now running in AS=0, but we should
431 * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
432 */
433tlb1_find_current:
434 mfspr %r17, SPR_PID0
435 slwi %r17, %r17, MAS6_SPID0_SHIFT
436 mtspr SPR_MAS6, %r17
437 isync
438 tlbsx 0, %r3
439 mfspr %r17, SPR_MAS0
440 rlwinm %r30, %r17, 16, 20, 31 /* MAS0[ESEL] -> r30 */
441
442 /* Make sure we have IPROT set on the entry */
443 mfspr %r17, SPR_MAS1
444 oris %r17, %r17, MAS1_IPROT@h
445 mtspr SPR_MAS1, %r17
446 isync
447 tlbwe
448 isync
449 msync
450 blr
451
452/*
453 * Invalidates a single entry in TLB1.
454 *
455 * r3 ESEL
456 * r4-r5 scratched
457 */
458tlb1_inval_entry:
459 lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
460 rlwimi %r4, %r3, 16, 12, 15 /* Select our entry */
461 mtspr SPR_MAS0, %r4
462 isync
463 tlbre
464 li %r5, 0 /* MAS1[V] = 0 */
465 mtspr SPR_MAS1, %r5
466 isync
467 tlbwe
468 isync
469 msync
470 blr
471
472/*
473 * r30 current entry number
474 * r29 returned temp entry
475 * r3-r5 scratched
476 */
477tlb1_temp_mapping_as1:
478 /* Read our current translation */
479 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
480 rlwimi %r3, %r30, 16, 12, 15 /* Select our current entry */
481 mtspr SPR_MAS0, %r3
482 isync
483 tlbre
484
485 /*
486 * Prepare and write temp entry
487 *
488 * FIXME this is not robust against overflow i.e. when the current
489 * entry is the last in TLB1
490 */
491 lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
492 addi %r29, %r30, 1 /* Use next entry. */
493 rlwimi %r3, %r29, 16, 12, 15 /* Select temp entry */
494 mtspr SPR_MAS0, %r3
495 isync
496 mfspr %r5, SPR_MAS1
497 li %r4, 1 /* AS=1 */
498 rlwimi %r5, %r4, 12, 19, 19
499 li %r4, 0 /* Global mapping, TID=0 */
500 rlwimi %r5, %r4, 16, 8, 15
501 oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
502 mtspr SPR_MAS1, %r5
503 isync
504 tlbwe
505 isync
506 msync
507 blr
508
509/*
510 * Loops over TLB1, invalidates all entries skipping the one which currently
511 * maps this code.
512 *
513 * r30 current entry
514 * r3-r5 scratched
515 */
516tlb1_inval_all_but_current:
517 mr %r6, %r3
518 mfspr %r3, SPR_TLB1CFG /* Get number of entries */
519 andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
520 li %r4, 0 /* Start from Entry 0 */
5211: lis %r5, MAS0_TLBSEL1@h
522 rlwimi %r5, %r4, 16, 12, 15
523 mtspr SPR_MAS0, %r5
524 isync
525 tlbre
526 mfspr %r5, SPR_MAS1
527 cmpw %r4, %r30 /* our current entry? */
528 beq 2f
529 rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
530 mtspr SPR_MAS1, %r5
531 isync
532 tlbwe
533 isync
534 msync
5352: addi %r4, %r4, 1
536 cmpw %r4, %r3 /* Check if this is the last entry */
537 bne 1b
538 blr
539
540#ifdef SMP
541__boot_page_padding:
542 /*
543 * Boot page needs to be exactly 4K, with the last word of this page
544 * acting as the reset vector, so we need to stuff the remainder.
545 * Upon release from holdoff CPU fetches the last word of the boot
546 * page.
547 */
548 .space 4092 - (__boot_page_padding - __boot_page)
549 b __boot_page
550#endif /* SMP */
551
552/************************************************************************/
553/* locore subroutines */
554/************************************************************************/
555
556ivor_setup:
557 /* Set base address of interrupt handler routines */
558 lis %r3, interrupt_vector_base@h
559 mtspr SPR_IVPR, %r3
560
561 /* Assign interrupt handler routines offsets */
562 li %r3, int_critical_input@l
563 mtspr SPR_IVOR0, %r3
564 li %r3, int_machine_check@l
565 mtspr SPR_IVOR1, %r3
566 li %r3, int_data_storage@l
567 mtspr SPR_IVOR2, %r3
568 li %r3, int_instr_storage@l
569 mtspr SPR_IVOR3, %r3
570 li %r3, int_external_input@l
571 mtspr SPR_IVOR4, %r3
572 li %r3, int_alignment@l
573 mtspr SPR_IVOR5, %r3
574 li %r3, int_program@l
575 mtspr SPR_IVOR6, %r3
576 li %r3, int_syscall@l
577 mtspr SPR_IVOR8, %r3
578 li %r3, int_decrementer@l
579 mtspr SPR_IVOR10, %r3
580 li %r3, int_fixed_interval_timer@l
581 mtspr SPR_IVOR11, %r3
582 li %r3, int_watchdog@l
583 mtspr SPR_IVOR12, %r3
584 li %r3, int_data_tlb_error@l
585 mtspr SPR_IVOR13, %r3
586 li %r3, int_inst_tlb_error@l
587 mtspr SPR_IVOR14, %r3
588 li %r3, int_debug@l
589 mtspr SPR_IVOR15, %r3
590 blr
591
592/*
593 * void tid_flush(tlbtid_t tid);
594 *
595 * Invalidate all TLB0 entries which match the given TID. Note this is
596 * dedicated for cases when invalidation(s) should NOT be propagated to other
597 * CPUs.
598 *
599 * Global vars tlb0_ways, tlb0_entries_per_way are assumed to have been set up
600 * correctly (by tlb0_get_tlbconf()).
601 *
602 */
603ENTRY(tid_flush)
604 cmpwi %r3, TID_KERNEL
605 beq tid_flush_end /* don't evict kernel translations */
606
607 /* Number of TLB0 ways */
608 lis %r4, tlb0_ways@h
609 ori %r4, %r4, tlb0_ways@l
610 lwz %r4, 0(%r4)
611
612 /* Number of entries / way */
613 lis %r5, tlb0_entries_per_way@h
614 ori %r5, %r5, tlb0_entries_per_way@l
615 lwz %r5, 0(%r5)
616
617 /* Disable interrupts */
618 mfmsr %r10
619 wrteei 0
620
621 li %r6, 0 /* ways counter */
622loop_ways:
623 li %r7, 0 /* entries [per way] counter */
624loop_entries:
625 /* Select TLB0 and ESEL (way) */
626 lis %r8, MAS0_TLBSEL0@h
627 rlwimi %r8, %r6, 16, 14, 15
628 mtspr SPR_MAS0, %r8
629 isync
630
631 /* Select EPN (entry within the way) */
632 rlwinm %r8, %r7, 12, 13, 19
633 mtspr SPR_MAS2, %r8
634 isync
635 tlbre
636
637 /* Check if valid entry */
638 mfspr %r8, SPR_MAS1
639 andis. %r9, %r8, MAS1_VALID@h
640 beq next_entry /* invalid entry */
641
642 /* Check if this is our TID */
643 rlwinm %r9, %r8, 16, 24, 31
644
645 cmplw %r9, %r3
646 bne next_entry /* not our TID */
647
648 /* Clear VALID bit */
649 rlwinm %r8, %r8, 0, 1, 31
650 mtspr SPR_MAS1, %r8
651 isync
652 tlbwe
653 isync
654 msync
655
656next_entry:
657 addi %r7, %r7, 1
658 cmpw %r7, %r5
659 bne loop_entries
660
661 /* Next way */
662 addi %r6, %r6, 1
663 cmpw %r6, %r4
664 bne loop_ways
665
666 /* Restore MSR (possibly re-enable interrupts) */
667 mtmsr %r10
668 isync
669
670tid_flush_end:
671 blr
672
673/*
674 * Cache disable/enable/inval sequences according
675 * to section 2.16 of E500CORE RM.
676 */
677ENTRY(dcache_inval)
678 /* Invalidate d-cache */
679 mfspr %r3, SPR_L1CSR0
680 ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
681 msync
682 isync
683 mtspr SPR_L1CSR0, %r3
684 isync
6851: mfspr %r3, SPR_L1CSR0
686 andi. %r3, %r3, L1CSR0_DCFI
687 bne 1b
688 blr
689
690ENTRY(dcache_disable)
691 /* Disable d-cache */
692 mfspr %r3, SPR_L1CSR0
693 li %r4, L1CSR0_DCE@l
694 not %r4, %r4
695 and %r3, %r3, %r4
696 msync
697 isync
698 mtspr SPR_L1CSR0, %r3
699 isync
700 blr
701
702ENTRY(dcache_enable)
703 /* Enable d-cache */
704 mfspr %r3, SPR_L1CSR0
705 oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
706 ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
707 msync
708 isync
709 mtspr SPR_L1CSR0, %r3
710 isync
711 blr
712
713ENTRY(icache_inval)
714 /* Invalidate i-cache */
715 mfspr %r3, SPR_L1CSR1
716 ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
717 isync
718 mtspr SPR_L1CSR1, %r3
719 isync
7201: mfspr %r3, SPR_L1CSR1
721 andi. %r3, %r3, L1CSR1_ICFI
722 bne 1b
723 blr
724
725ENTRY(icache_disable)
726 /* Disable i-cache */
727 mfspr %r3, SPR_L1CSR1
728 li %r4, L1CSR1_ICE@l
729 not %r4, %r4
730 and %r3, %r3, %r4
731 isync
732 mtspr SPR_L1CSR1, %r3
733 isync
734 blr
735
736ENTRY(icache_enable)
737 /* Enable i-cache */
738 mfspr %r3, SPR_L1CSR1
739 oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
740 ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
741 isync
742 mtspr SPR_L1CSR1, %r3
743 isync
744 blr
745
746/*
747 * int setfault()
748 *
749 * Similar to setjmp to setup for handling faults on accesses to user memory.
750 * Any routine using this may only call bcopy, either the form below,
751 * or the (currently used) C code optimized, so it doesn't use any non-volatile
752 * registers.
753 */
754 .globl setfault
755setfault:
756 mflr %r0
757 mfsprg0 %r4
758 lwz %r4, PC_CURTHREAD(%r4)
759 lwz %r4, TD_PCB(%r4)
760 stw %r3, PCB_ONFAULT(%r4)
761 mfcr %r10
762 mfctr %r11
763 mfxer %r12
764 stw %r0, 0(%r3)
765 stw %r1, 4(%r3)
766 stw %r2, 8(%r3)
767 stmw %r10, 12(%r3) /* store CR, CTR, XER, [r13 .. r31] */
768 li %r3, 0 /* return FALSE */
769 blr
770
771/************************************************************************/
772/* Data section */
773/************************************************************************/
774 .data
775 .align 4
776tmpstack:
777 .space TMPSTACKSZ
778
779/*
780 * Compiled KERNBASE locations
781 */
782 .globl kernbase
783 .set kernbase, KERNBASE
784
785/*
786 * Globals
787 */
788#define INTRCNT_COUNT 256 /* max(HROWPIC_IRQMAX,OPENPIC_IRQMAX) */
789
790GLOBAL(kernload)
791 .long 0
792GLOBAL(intrnames)
793 .space INTRCNT_COUNT * (MAXCOMLEN + 1) * 2
794GLOBAL(eintrnames)
795 .align 4
796GLOBAL(intrcnt)
797 .space INTRCNT_COUNT * 4 * 2
798GLOBAL(eintrcnt)
799
800#include <powerpc/booke/trap_subr.S>