Deleted Added
full compact
trap_subr64.S (274743) trap_subr64.S (275268)
1/* $FreeBSD: head/sys/powerpc/aim/trap_subr64.S 274743 2014-11-20 06:32:47Z jhibbits $ */
1/* $FreeBSD: head/sys/powerpc/aim/trap_subr64.S 275268 2014-11-29 20:54:33Z jhibbits $ */
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file. to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 * #include <powerpc/aim/trap_subr.S>
40 */
41
42/*
43 * Save/restore segment registers
44 */
45
46/*
47 * Restore SRs for a pmap
48 *
49 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
50 */
51
52/*
53 * User SRs are loaded through a pointer to the current pmap.
54 */
55restore_usersrs:
56 GET_CPUINFO(%r28)
57 ld %r28,PC_USERSLB(%r28)
58 li %r29, 0 /* Set the counter to zero */
59
60 slbia
61 slbmfee %r31,%r29
62 clrrdi %r31,%r31,28
63 slbie %r31
641: ld %r31, 0(%r28) /* Load SLB entry pointer */
65 cmpli 0, %r31, 0 /* If NULL, stop */
66 beqlr
67
68 ld %r30, 0(%r31) /* Load SLBV */
69 ld %r31, 8(%r31) /* Load SLBE */
70 or %r31, %r31, %r29 /* Set SLBE slot */
71 slbmte %r30, %r31 /* Install SLB entry */
72
73 addi %r28, %r28, 8 /* Advance pointer */
74 addi %r29, %r29, 1
75 b 1b /* Repeat */
76
77/*
78 * Kernel SRs are loaded directly from the PCPU fields
79 */
80restore_kernsrs:
81 GET_CPUINFO(%r28)
82 addi %r28,%r28,PC_KERNSLB
83 li %r29, 0 /* Set the counter to zero */
84
85 slbia
86 slbmfee %r31,%r29
87 clrrdi %r31,%r31,28
88 slbie %r31
891: cmpli 0, %r29, USER_SLB_SLOT /* Skip the user slot */
90 beq- 2f
91
92 ld %r31, 8(%r28) /* Load SLBE */
93 cmpli 0, %r31, 0 /* If SLBE is not valid, stop */
94 beqlr
95 ld %r30, 0(%r28) /* Load SLBV */
96 slbmte %r30, %r31 /* Install SLB entry */
97
982: addi %r28, %r28, 16 /* Advance pointer */
99 addi %r29, %r29, 1
100 cmpli 0, %r29, 64 /* Repeat if we are not at the end */
101 blt 1b
102 blr
103
104/*
105 * FRAME_SETUP assumes:
106 * SPRG1 SP (1)
107 * SPRG3 trap type
108 * savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
109 * r28 LR
110 * r29 CR
111 * r30 scratch
112 * r31 scratch
113 * r1 kernel stack
114 * SRR0/1 as at start of trap
115 *
116 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
117 * in any real-mode fault handler, including those handling double faults.
118 */
119#define FRAME_SETUP(savearea) \
120/* Have to enable translation to allow access of kernel stack: */ \
121 GET_CPUINFO(%r31); \
122 mfsrr0 %r30; \
123 std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \
124 mfsrr1 %r30; \
125 std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \
126 mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \
127 mfmsr %r30; \
128 ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \
129 mtmsr %r30; /* stack can now be accessed */ \
130 isync; \
131 stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
132 std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \
133 std %r31,FRAME_1+48(%r1); /* save SP " " */ \
134 std %r2, FRAME_2+48(%r1); /* save r2 " " */ \
135 std %r28,FRAME_LR+48(%r1); /* save LR " " */ \
136 std %r29,FRAME_CR+48(%r1); /* save CR " " */ \
137 GET_CPUINFO(%r2); \
138 ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \
139 ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \
140 ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \
141 ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
142 ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
143 std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \
144 std %r4, FRAME_4+48(%r1); \
145 std %r5, FRAME_5+48(%r1); \
146 std %r6, FRAME_6+48(%r1); \
147 std %r7, FRAME_7+48(%r1); \
148 std %r8, FRAME_8+48(%r1); \
149 std %r9, FRAME_9+48(%r1); \
150 std %r10, FRAME_10+48(%r1); \
151 std %r11, FRAME_11+48(%r1); \
152 std %r12, FRAME_12+48(%r1); \
153 std %r13, FRAME_13+48(%r1); \
154 std %r14, FRAME_14+48(%r1); \
155 std %r15, FRAME_15+48(%r1); \
156 std %r16, FRAME_16+48(%r1); \
157 std %r17, FRAME_17+48(%r1); \
158 std %r18, FRAME_18+48(%r1); \
159 std %r19, FRAME_19+48(%r1); \
160 std %r20, FRAME_20+48(%r1); \
161 std %r21, FRAME_21+48(%r1); \
162 std %r22, FRAME_22+48(%r1); \
163 std %r23, FRAME_23+48(%r1); \
164 std %r24, FRAME_24+48(%r1); \
165 std %r25, FRAME_25+48(%r1); \
166 std %r26, FRAME_26+48(%r1); \
167 std %r27, FRAME_27+48(%r1); \
168 std %r28, FRAME_28+48(%r1); \
169 std %r29, FRAME_29+48(%r1); \
170 std %r30, FRAME_30+48(%r1); \
171 std %r31, FRAME_31+48(%r1); \
172 ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \
173 ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
174 ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \
175 ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \
176 mfxer %r3; \
177 mfctr %r4; \
178 mfsprg3 %r5; \
179 std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \
180 std %r4, FRAME_CTR+48(1); \
181 std %r5, FRAME_EXC+48(1); \
182 std %r28,FRAME_AIM_DAR+48(1); \
183 std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \
184 std %r30,FRAME_SRR0+48(1); \
185 std %r31,FRAME_SRR1+48(1); \
186 ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */
187
188#define FRAME_LEAVE(savearea) \
189/* Disable exceptions: */ \
190 mfmsr %r2; \
191 andi. %r2,%r2,~PSL_EE@l; \
192 mtmsr %r2; \
193 isync; \
194/* Now restore regs: */ \
195 ld %r2,FRAME_SRR0+48(%r1); \
196 ld %r3,FRAME_SRR1+48(%r1); \
197 ld %r4,FRAME_CTR+48(%r1); \
198 ld %r5,FRAME_XER+48(%r1); \
199 ld %r6,FRAME_LR+48(%r1); \
200 GET_CPUINFO(%r7); \
201 std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \
202 std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \
203 ld %r7,FRAME_CR+48(%r1); \
204 mtctr %r4; \
205 mtxer %r5; \
206 mtlr %r6; \
207 mtsprg2 %r7; /* save cr */ \
208 ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \
209 ld %r30,FRAME_30+48(%r1); \
210 ld %r29,FRAME_29+48(%r1); \
211 ld %r28,FRAME_28+48(%r1); \
212 ld %r27,FRAME_27+48(%r1); \
213 ld %r26,FRAME_26+48(%r1); \
214 ld %r25,FRAME_25+48(%r1); \
215 ld %r24,FRAME_24+48(%r1); \
216 ld %r23,FRAME_23+48(%r1); \
217 ld %r22,FRAME_22+48(%r1); \
218 ld %r21,FRAME_21+48(%r1); \
219 ld %r20,FRAME_20+48(%r1); \
220 ld %r19,FRAME_19+48(%r1); \
221 ld %r18,FRAME_18+48(%r1); \
222 ld %r17,FRAME_17+48(%r1); \
223 ld %r16,FRAME_16+48(%r1); \
224 ld %r15,FRAME_15+48(%r1); \
225 ld %r14,FRAME_14+48(%r1); \
226 ld %r13,FRAME_13+48(%r1); \
227 ld %r12,FRAME_12+48(%r1); \
228 ld %r11,FRAME_11+48(%r1); \
229 ld %r10,FRAME_10+48(%r1); \
230 ld %r9, FRAME_9+48(%r1); \
231 ld %r8, FRAME_8+48(%r1); \
232 ld %r7, FRAME_7+48(%r1); \
233 ld %r6, FRAME_6+48(%r1); \
234 ld %r5, FRAME_5+48(%r1); \
235 ld %r4, FRAME_4+48(%r1); \
236 ld %r3, FRAME_3+48(%r1); \
237 ld %r2, FRAME_2+48(%r1); \
238 ld %r0, FRAME_0+48(%r1); \
239 ld %r1, FRAME_1+48(%r1); \
240/* Can't touch %r1 from here on */ \
241 mtsprg3 %r3; /* save r3 */ \
242/* Disable translation, machine check and recoverability: */ \
243 mfmsr %r3; \
244 andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
245 mtmsr %r3; \
246 isync; \
247/* Decide whether we return to user mode: */ \
248 GET_CPUINFO(%r3); \
249 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \
250 mtcr %r3; \
251 bf 17,1f; /* branch if PSL_PR is false */ \
252/* Restore user SRs */ \
253 GET_CPUINFO(%r3); \
254 std %r27,(savearea+CPUSAVE_R27)(%r3); \
255 std %r28,(savearea+CPUSAVE_R28)(%r3); \
256 std %r29,(savearea+CPUSAVE_R29)(%r3); \
257 std %r30,(savearea+CPUSAVE_R30)(%r3); \
258 std %r31,(savearea+CPUSAVE_R31)(%r3); \
259 mflr %r27; /* preserve LR */ \
260 bl restore_usersrs; /* uses r28-r31 */ \
261 mtlr %r27; \
262 ld %r31,(savearea+CPUSAVE_R31)(%r3); \
263 ld %r30,(savearea+CPUSAVE_R30)(%r3); \
264 ld %r29,(savearea+CPUSAVE_R29)(%r3); \
265 ld %r28,(savearea+CPUSAVE_R28)(%r3); \
266 ld %r27,(savearea+CPUSAVE_R27)(%r3); \
2671: mfsprg2 %r3; /* restore cr */ \
268 mtcr %r3; \
269 GET_CPUINFO(%r3); \
270 ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \
271 mtsrr0 %r3; \
272 GET_CPUINFO(%r3); \
273 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \
274 mtsrr1 %r3; \
275 mfsprg3 %r3 /* restore r3 */
276
277#ifdef KDTRACE_HOOKS
278 .data
279 .globl dtrace_invop_calltrap_addr
280 .align 8
281 .type dtrace_invop_calltrap_addr, @object
282 .size dtrace_invop_calltrap_addr, 8
283dtrace_invop_calltrap_addr:
284 .word 0
285 .word 0
286
287 .text
288#endif
289
290/*
291 * Processor reset exception handler. These are typically
292 * the first instructions the processor executes after a
293 * software reset. We do this in two bits so that we are
294 * not still hanging around in the trap handling region
295 * once the MMU is turned on.
296 */
297 .globl CNAME(rstcode), CNAME(rstsize)
298CNAME(rstcode):
299 /* Explicitly set MSR[SF] */
300 mfmsr %r9
301 li %r8,1
302 insrdi %r9,%r8,1,0
303 mtmsrd %r9
304 isync
305
306 ba cpu_reset
307CNAME(rstsize) = . - CNAME(rstcode)
308
309cpu_reset:
310 lis %r1,(tmpstk+TMPSTKSZ-48)@ha /* get new SP */
311 addi %r1,%r1,(tmpstk+TMPSTKSZ-48)@l
312
313 lis %r3,tocbase@ha
314 ld %r2,tocbase@l(%r3)
315 bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
316 nop
317 lis %r3,1@l
318 bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */
319 nop
320 bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */
321 nop
322 mr %r1,%r3 /* Use new stack */
323 bl CNAME(cpudep_ap_setup)
324 nop
325 GET_CPUINFO(%r5)
326 ld %r3,(PC_RESTORE)(%r5)
327 cmpldi %cr0,%r3,0
328 beq %cr0,2f
329 nop
330 li %r4,1
331 b CNAME(longjmp)
332 nop
3332:
334#ifdef SMP
335 bl CNAME(machdep_ap_bootstrap) /* And away! */
336 nop
337#endif
338
339 /* Should not be reached */
3409:
341 b 9b
342
343/*
344 * This code gets copied to all the trap vectors
345 * (except ISI/DSI, ALI, and the interrupts)
346 */
347
348 .globl CNAME(trapcode),CNAME(trapsize)
349CNAME(trapcode):
350 mtsprg1 %r1 /* save SP */
351 mflr %r1 /* Save the old LR in r1 */
352 mtsprg2 %r1 /* And then in SPRG2 */
353 li %r1, 0xA0 /* How to get the vector from LR */
354 bla generictrap /* LR & SPRG3 is exception # */
355CNAME(trapsize) = .-CNAME(trapcode)
356
357/*
358 * For SLB misses: do special things for the kernel
359 *
360 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
361 * the only time this can be called.
362 */
363 .globl CNAME(slbtrap),CNAME(slbtrapsize)
364CNAME(slbtrap):
365 mtsprg1 %r1 /* save SP */
366 GET_CPUINFO(%r1)
367 std %r2,(PC_SLBSAVE+16)(%r1)
368 mfcr %r2 /* save CR */
369 std %r2,(PC_SLBSAVE+104)(%r1)
370 mfsrr1 %r2 /* test kernel mode */
371 mtcr %r2
372 bf 17,1f /* branch if PSL_PR is false */
373 /* User mode */
374 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
375 mtcr %r2
376 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
377 mflr %r1 /* Save the old LR in r1 */
378 mtsprg2 %r1 /* And then in SPRG2 */
379 li %r1, 0x80 /* How to get the vector from LR */
380 bla generictrap /* LR & SPRG3 is exception # */
3811: mflr %r2 /* Save the old LR in r2 */
382 bla kern_slbtrap
383CNAME(slbtrapsize) = .-CNAME(slbtrap)
384
385kern_slbtrap:
386 std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */
387 std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
388
389 /* Check if this needs to be handled as a regular trap (userseg miss) */
390 mflr %r2
391 andi. %r2,%r2,0xff80
392 cmpwi %r2,0x380
393 bne 1f
394 mfdar %r2
395 b 2f
3961: mfsrr0 %r2
3972: /* r2 now contains the fault address */
398 lis %r3,SEGMENT_MASK@highesta
399 ori %r3,%r3,SEGMENT_MASK@highera
400 sldi %r3,%r3,32
401 oris %r3,%r3,SEGMENT_MASK@ha
402 ori %r3,%r3,SEGMENT_MASK@l
403 and %r2,%r2,%r3 /* R2 = segment base address */
404 lis %r3,USER_ADDR@highesta
405 ori %r3,%r3,USER_ADDR@highera
406 sldi %r3,%r3,32
407 oris %r3,%r3,USER_ADDR@ha
408 ori %r3,%r3,USER_ADDR@l
409 cmpd %r2,%r3 /* Compare fault base to USER_ADDR */
410 bne 3f
411
412 /* User seg miss, handle as a regular trap */
413 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
414 mtcr %r2
415 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
416 ld %r3,(PC_SLBSAVE+24)(%r1)
417 ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
418 mtsprg2 %r1 /* And then in SPRG2 */
419 li %r1, 0x80 /* How to get the vector from LR */
420 b generictrap /* Retain old LR using b */
421
4223: /* Real kernel SLB miss */
423 std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */
424 mfsprg1 %r2 /* Old R1 */
425 std %r2,(PC_SLBSAVE+8)(%r1)
426 /* R2,R3 already saved */
427 std %r4,(PC_SLBSAVE+32)(%r1)
428 std %r5,(PC_SLBSAVE+40)(%r1)
429 std %r6,(PC_SLBSAVE+48)(%r1)
430 std %r7,(PC_SLBSAVE+56)(%r1)
431 std %r8,(PC_SLBSAVE+64)(%r1)
432 std %r9,(PC_SLBSAVE+72)(%r1)
433 std %r10,(PC_SLBSAVE+80)(%r1)
434 std %r11,(PC_SLBSAVE+88)(%r1)
435 std %r12,(PC_SLBSAVE+96)(%r1)
436 /* CR already saved */
437 mfxer %r2 /* save XER */
438 std %r2,(PC_SLBSAVE+112)(%r1)
439 mflr %r2 /* save LR (SP already saved) */
440 std %r2,(PC_SLBSAVE+120)(%r1)
441 mfctr %r2 /* save CTR */
442 std %r2,(PC_SLBSAVE+128)(%r1)
443
444 /* Call handler */
445 addi %r1,%r1,PC_SLBSTACK-48+1024
446 li %r2,~15
447 and %r1,%r1,%r2
448 lis %r3,tocbase@ha
449 ld %r2,tocbase@l(%r3)
450 mflr %r3
451 andi. %r3,%r3,0xff80
452 mfdar %r4
453 mfsrr0 %r5
454 bl handle_kernel_slb_spill
455 nop
456
457 /* Save r28-31, restore r4-r12 */
458 GET_CPUINFO(%r1)
459 ld %r4,(PC_SLBSAVE+32)(%r1)
460 ld %r5,(PC_SLBSAVE+40)(%r1)
461 ld %r6,(PC_SLBSAVE+48)(%r1)
462 ld %r7,(PC_SLBSAVE+56)(%r1)
463 ld %r8,(PC_SLBSAVE+64)(%r1)
464 ld %r9,(PC_SLBSAVE+72)(%r1)
465 ld %r10,(PC_SLBSAVE+80)(%r1)
466 ld %r11,(PC_SLBSAVE+88)(%r1)
467 ld %r12,(PC_SLBSAVE+96)(%r1)
468 std %r28,(PC_SLBSAVE+64)(%r1)
469 std %r29,(PC_SLBSAVE+72)(%r1)
470 std %r30,(PC_SLBSAVE+80)(%r1)
471 std %r31,(PC_SLBSAVE+88)(%r1)
472
473 /* Restore kernel mapping */
474 bl restore_kernsrs
475
476 /* Restore remaining registers */
477 ld %r28,(PC_SLBSAVE+64)(%r1)
478 ld %r29,(PC_SLBSAVE+72)(%r1)
479 ld %r30,(PC_SLBSAVE+80)(%r1)
480 ld %r31,(PC_SLBSAVE+88)(%r1)
481
482 ld %r2,(PC_SLBSAVE+104)(%r1)
483 mtcr %r2
484 ld %r2,(PC_SLBSAVE+112)(%r1)
485 mtxer %r2
486 ld %r2,(PC_SLBSAVE+120)(%r1)
487 mtlr %r2
488 ld %r2,(PC_SLBSAVE+128)(%r1)
489 mtctr %r2
490 ld %r2,(PC_SLBSAVE+136)(%r1)
491 mtlr %r2
492
493 /* Restore r0-r3 */
494 ld %r0,(PC_SLBSAVE+0)(%r1)
495 ld %r2,(PC_SLBSAVE+16)(%r1)
496 ld %r3,(PC_SLBSAVE+24)(%r1)
497 mfsprg1 %r1
498
499 /* Back to whatever we were doing */
500 rfid
501
502/*
503 * For ALI: has to save DSISR and DAR
504 */
505 .globl CNAME(alitrap),CNAME(alisize)
506CNAME(alitrap):
507 mtsprg1 %r1 /* save SP */
508 GET_CPUINFO(%r1)
509 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
510 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
511 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
512 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
513 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
514 mfdar %r30
515 mfdsisr %r31
516 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
517 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
518 mfsprg1 %r1 /* restore SP, in case of branch */
519 mflr %r28 /* save LR */
520 mfcr %r29 /* save CR */
521
522 /* Put our exception vector in SPRG3 */
523 li %r31, EXC_ALI
524 mtsprg3 %r31
525
526 /* Test whether we already had PR set */
527 mfsrr1 %r31
528 mtcr %r31
529 bla s_trap
530CNAME(alisize) = .-CNAME(alitrap)
531
532/*
533 * Similar to the above for DSI
534 * Has to handle BAT spills
535 * and standard pagetable spills
536 */
537 .globl CNAME(dsitrap),CNAME(dsisize)
538CNAME(dsitrap):
539 mtsprg1 %r1 /* save SP */
540 GET_CPUINFO(%r1)
541 std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
542 std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
543 std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
544 std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
545 std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
546 mfsprg1 %r1 /* restore SP */
547 mfcr %r29 /* save CR */
548 mfxer %r30 /* save XER */
549 mtsprg2 %r30 /* in SPRG2 */
550 mfsrr1 %r31 /* test kernel mode */
551 mtcr %r31
552 mflr %r28 /* save LR (SP already saved) */
553 bla disitrap
554CNAME(dsisize) = .-CNAME(dsitrap)
555
556/*
557 * Preamble code for DSI/ISI traps
558 */
559disitrap:
560 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */
561 mflr %r1
562 andi. %r1,%r1,0xff00
563 mtsprg3 %r1
564
565 GET_CPUINFO(%r1)
566 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
567 std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
568 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
569 std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
570 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
571 std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
572 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
573 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
574 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
575 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
576 mfdar %r30
577 mfdsisr %r31
578 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
579 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
580
581#ifdef KDB
582 /* Try to detect a kernel stack overflow */
583 mfsrr1 %r31
584 mtcr %r31
585 bt 17,realtrap /* branch is user mode */
586 mfsprg1 %r31 /* get old SP */
587 clrrdi %r31,%r31,12 /* Round SP down to nearest page */
588 sub. %r30,%r31,%r30 /* SP - DAR */
589 bge 1f
590 neg %r30,%r30 /* modulo value */
5911: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */
592 bge %cr0,realtrap /* no, too far away. */
593
594 /* Now convert this DSI into a DDB trap. */
595 GET_CPUINFO(%r1)
596 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
597 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
598 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
599 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
600 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */
601 std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */
602 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */
603 std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */
604 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */
605 std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */
606 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */
607 std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */
608 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */
609 std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */
610 b dbtrap
611#endif
612
613 /* XXX need stack probe here */
614realtrap:
615/* Test whether we already had PR set */
616 mfsrr1 %r1
617 mtcr %r1
618 mfsprg1 %r1 /* restore SP (might have been
619 overwritten) */
620 bf 17,k_trap /* branch if PSL_PR is false */
621 GET_CPUINFO(%r1)
622 ld %r1,PC_CURPCB(%r1)
623 mr %r27,%r28 /* Save LR, r29 */
624 mtsprg2 %r29
625 bl restore_kernsrs /* enable kernel mapping */
626 mfsprg2 %r29
627 mr %r28,%r27
628 ba s_trap
629
630/*
631 * generictrap does some standard setup for trap handling to minimize
632 * the code that need be installed in the actual vectors. It expects
633 * the following conditions.
634 *
635 * R1 - Trap vector = LR & (0xff00 | R1)
636 * SPRG1 - Original R1 contents
637 * SPRG2 - Original LR
638 */
639
640generictrap:
641 /* Save R1 for computing the exception vector */
642 mtsprg3 %r1
643
644 /* Save interesting registers */
645 GET_CPUINFO(%r1)
646 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
647 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
648 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
649 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
650 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
651 mfdar %r30
652 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
653 mfsprg1 %r1 /* restore SP, in case of branch */
654 mfsprg2 %r28 /* save LR */
655 mfcr %r29 /* save CR */
656
657 /* Compute the exception vector from the link register */
658 mfsprg3 %r31
659 ori %r31,%r31,0xff00
660 mflr %r30
661 and %r30,%r30,%r31
662 mtsprg3 %r30
663
664 /* Test whether we already had PR set */
665 mfsrr1 %r31
666 mtcr %r31
667
668s_trap:
669 bf 17,k_trap /* branch if PSL_PR is false */
670 GET_CPUINFO(%r1)
671u_trap:
672 ld %r1,PC_CURPCB(%r1)
673 mr %r27,%r28 /* Save LR, r29 */
674 mtsprg2 %r29
675 bl restore_kernsrs /* enable kernel mapping */
676 mfsprg2 %r29
677 mr %r28,%r27
678
679/*
680 * Now the common trap catching code.
681 */
682k_trap:
683 FRAME_SETUP(PC_TEMPSAVE)
684/* Call C interrupt dispatcher: */
685trapagain:
686 lis %r3,tocbase@ha
687 ld %r2,tocbase@l(%r3)
688 addi %r3,%r1,48
689 bl CNAME(powerpc_interrupt)
690 nop
691
692 .globl CNAME(trapexit) /* backtrace code sentinel */
693CNAME(trapexit):
694/* Disable interrupts: */
695 mfmsr %r3
696 andi. %r3,%r3,~PSL_EE@l
697 mtmsr %r3
698 isync
699/* Test AST pending: */
700 ld %r5,FRAME_SRR1+48(%r1)
701 mtcr %r5
702 bf 17,1f /* branch if PSL_PR is false */
703
704 GET_CPUINFO(%r3) /* get per-CPU pointer */
705 lwz %r4, TD_FLAGS(%r13) /* get thread flags value */
706 lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
707 ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
708 and. %r4,%r4,%r5
709 beq 1f
710 mfmsr %r3 /* re-enable interrupts */
711 ori %r3,%r3,PSL_EE@l
712 mtmsr %r3
713 isync
714 lis %r3,tocbase@ha
715 ld %r2,tocbase@l(%r3)
716 addi %r3,%r1,48
717 bl CNAME(ast)
718 nop
719 .globl CNAME(asttrapexit) /* backtrace code sentinel #2 */
720CNAME(asttrapexit):
721 b trapexit /* test ast ret value ? */
7221:
723 FRAME_LEAVE(PC_TEMPSAVE)
724 rfid
725
726#if defined(KDB)
727/*
728 * Deliberate entry to dbtrap
729 */
730ASENTRY_NOPROF(breakpoint)
731 mtsprg1 %r1
732 mfmsr %r3
733 mtsrr1 %r3
734 andi. %r3,%r3,~(PSL_EE|PSL_ME)@l
735 mtmsr %r3 /* disable interrupts */
736 isync
737 GET_CPUINFO(%r3)
738 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
739 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
740 std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
741 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
742 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
743 mflr %r28
744 li %r29,EXC_BPT
745 mtlr %r29
746 mfcr %r29
747 mtsrr0 %r28
748
749/*
750 * Now the kdb trap catching code.
751 */
752dbtrap:
753 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */
754 mflr %r1
755 andi. %r1,%r1,0xff00
756 mtsprg3 %r1
757
758 lis %r1,(tmpstk+TMPSTKSZ-48)@ha /* get new SP */
759 addi %r1,%r1,(tmpstk+TMPSTKSZ-48)@l
760
761 FRAME_SETUP(PC_DBSAVE)
762/* Call C trap code: */
763 lis %r3,tocbase@ha
764 ld %r2,tocbase@l(%r3)
765 addi %r3,%r1,48
766 bl CNAME(db_trap_glue)
767 nop
768 or. %r3,%r3,%r3
769 bne dbleave
770/* This wasn't for KDB, so switch to real trap: */
771 ld %r3,FRAME_EXC+48(%r1) /* save exception */
772 GET_CPUINFO(%r4)
773 std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
774 FRAME_LEAVE(PC_DBSAVE)
775 mtsprg1 %r1 /* prepare for entrance to realtrap */
776 GET_CPUINFO(%r1)
777 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
778 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
779 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
780 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
781 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
782 mflr %r28
783 mfcr %r29
784 ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
785 mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */
786 mfsprg1 %r1
787 b realtrap
788dbleave:
789 FRAME_LEAVE(PC_DBSAVE)
790 rfid
791
792/*
793 * In case of KDB we want a separate trap catcher for it
794 */
795 .globl CNAME(dblow),CNAME(dbsize)
796CNAME(dblow):
797 mtsprg1 %r1 /* save SP */
798 mtsprg2 %r29 /* save r29 */
799 mfcr %r29 /* save CR in r29 */
800 mfsrr1 %r1
801 mtcr %r1
2/* $NetBSD: trap_subr.S,v 1.20 2002/04/22 23:20:08 kleink Exp $ */
3
4/*-
5 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
6 * Copyright (C) 1995, 1996 TooLs GmbH.
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 * must display the following acknowledgement:
19 * This product includes software developed by TooLs GmbH.
20 * 4. The name of TooLs GmbH may not be used to endorse or promote products
21 * derived from this software without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/*
36 * NOTICE: This is not a standalone file. to use it, #include it in
37 * your port's locore.S, like so:
38 *
39 * #include <powerpc/aim/trap_subr.S>
40 */
41
42/*
43 * Save/restore segment registers
44 */
45
46/*
47 * Restore SRs for a pmap
48 *
49 * Requires that r28-r31 be scratch, with r28 initialized to the SLB cache
50 */
51
52/*
53 * User SRs are loaded through a pointer to the current pmap.
54 */
55restore_usersrs:
56 GET_CPUINFO(%r28)
57 ld %r28,PC_USERSLB(%r28)
58 li %r29, 0 /* Set the counter to zero */
59
60 slbia
61 slbmfee %r31,%r29
62 clrrdi %r31,%r31,28
63 slbie %r31
641: ld %r31, 0(%r28) /* Load SLB entry pointer */
65 cmpli 0, %r31, 0 /* If NULL, stop */
66 beqlr
67
68 ld %r30, 0(%r31) /* Load SLBV */
69 ld %r31, 8(%r31) /* Load SLBE */
70 or %r31, %r31, %r29 /* Set SLBE slot */
71 slbmte %r30, %r31 /* Install SLB entry */
72
73 addi %r28, %r28, 8 /* Advance pointer */
74 addi %r29, %r29, 1
75 b 1b /* Repeat */
76
77/*
78 * Kernel SRs are loaded directly from the PCPU fields
79 */
80restore_kernsrs:
81 GET_CPUINFO(%r28)
82 addi %r28,%r28,PC_KERNSLB
83 li %r29, 0 /* Set the counter to zero */
84
85 slbia
86 slbmfee %r31,%r29
87 clrrdi %r31,%r31,28
88 slbie %r31
891: cmpli 0, %r29, USER_SLB_SLOT /* Skip the user slot */
90 beq- 2f
91
92 ld %r31, 8(%r28) /* Load SLBE */
93 cmpli 0, %r31, 0 /* If SLBE is not valid, stop */
94 beqlr
95 ld %r30, 0(%r28) /* Load SLBV */
96 slbmte %r30, %r31 /* Install SLB entry */
97
982: addi %r28, %r28, 16 /* Advance pointer */
99 addi %r29, %r29, 1
100 cmpli 0, %r29, 64 /* Repeat if we are not at the end */
101 blt 1b
102 blr
103
104/*
105 * FRAME_SETUP assumes:
106 * SPRG1 SP (1)
107 * SPRG3 trap type
108 * savearea r27-r31,DAR,DSISR (DAR & DSISR only for DSI traps)
109 * r28 LR
110 * r29 CR
111 * r30 scratch
112 * r31 scratch
113 * r1 kernel stack
114 * SRR0/1 as at start of trap
115 *
116 * NOTE: SPRG1 is never used while the MMU is on, making it safe to reuse
117 * in any real-mode fault handler, including those handling double faults.
118 */
119#define FRAME_SETUP(savearea) \
120/* Have to enable translation to allow access of kernel stack: */ \
121 GET_CPUINFO(%r31); \
122 mfsrr0 %r30; \
123 std %r30,(savearea+CPUSAVE_SRR0)(%r31); /* save SRR0 */ \
124 mfsrr1 %r30; \
125 std %r30,(savearea+CPUSAVE_SRR1)(%r31); /* save SRR1 */ \
126 mfsprg1 %r31; /* get saved SP (clears SPRG1) */ \
127 mfmsr %r30; \
128 ori %r30,%r30,(PSL_DR|PSL_IR|PSL_RI)@l; /* relocation on */ \
129 mtmsr %r30; /* stack can now be accessed */ \
130 isync; \
131 stdu %r31,-(FRAMELEN+288)(%r1); /* save it in the callframe */ \
132 std %r0, FRAME_0+48(%r1); /* save r0 in the trapframe */ \
133 std %r31,FRAME_1+48(%r1); /* save SP " " */ \
134 std %r2, FRAME_2+48(%r1); /* save r2 " " */ \
135 std %r28,FRAME_LR+48(%r1); /* save LR " " */ \
136 std %r29,FRAME_CR+48(%r1); /* save CR " " */ \
137 GET_CPUINFO(%r2); \
138 ld %r27,(savearea+CPUSAVE_R27)(%r2); /* get saved r27 */ \
139 ld %r28,(savearea+CPUSAVE_R28)(%r2); /* get saved r28 */ \
140 ld %r29,(savearea+CPUSAVE_R29)(%r2); /* get saved r29 */ \
141 ld %r30,(savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
142 ld %r31,(savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
143 std %r3, FRAME_3+48(%r1); /* save r3-r31 */ \
144 std %r4, FRAME_4+48(%r1); \
145 std %r5, FRAME_5+48(%r1); \
146 std %r6, FRAME_6+48(%r1); \
147 std %r7, FRAME_7+48(%r1); \
148 std %r8, FRAME_8+48(%r1); \
149 std %r9, FRAME_9+48(%r1); \
150 std %r10, FRAME_10+48(%r1); \
151 std %r11, FRAME_11+48(%r1); \
152 std %r12, FRAME_12+48(%r1); \
153 std %r13, FRAME_13+48(%r1); \
154 std %r14, FRAME_14+48(%r1); \
155 std %r15, FRAME_15+48(%r1); \
156 std %r16, FRAME_16+48(%r1); \
157 std %r17, FRAME_17+48(%r1); \
158 std %r18, FRAME_18+48(%r1); \
159 std %r19, FRAME_19+48(%r1); \
160 std %r20, FRAME_20+48(%r1); \
161 std %r21, FRAME_21+48(%r1); \
162 std %r22, FRAME_22+48(%r1); \
163 std %r23, FRAME_23+48(%r1); \
164 std %r24, FRAME_24+48(%r1); \
165 std %r25, FRAME_25+48(%r1); \
166 std %r26, FRAME_26+48(%r1); \
167 std %r27, FRAME_27+48(%r1); \
168 std %r28, FRAME_28+48(%r1); \
169 std %r29, FRAME_29+48(%r1); \
170 std %r30, FRAME_30+48(%r1); \
171 std %r31, FRAME_31+48(%r1); \
172 ld %r28,(savearea+CPUSAVE_AIM_DAR)(%r2); /* saved DAR */ \
173 ld %r29,(savearea+CPUSAVE_AIM_DSISR)(%r2);/* saved DSISR */\
174 ld %r30,(savearea+CPUSAVE_SRR0)(%r2); /* saved SRR0 */ \
175 ld %r31,(savearea+CPUSAVE_SRR1)(%r2); /* saved SRR1 */ \
176 mfxer %r3; \
177 mfctr %r4; \
178 mfsprg3 %r5; \
179 std %r3, FRAME_XER+48(1); /* save xer/ctr/exc */ \
180 std %r4, FRAME_CTR+48(1); \
181 std %r5, FRAME_EXC+48(1); \
182 std %r28,FRAME_AIM_DAR+48(1); \
183 std %r29,FRAME_AIM_DSISR+48(1); /* save dsisr/srr0/srr1 */ \
184 std %r30,FRAME_SRR0+48(1); \
185 std %r31,FRAME_SRR1+48(1); \
186 ld %r13,PC_CURTHREAD(%r2) /* set kernel curthread */
187
188#define FRAME_LEAVE(savearea) \
189/* Disable exceptions: */ \
190 mfmsr %r2; \
191 andi. %r2,%r2,~PSL_EE@l; \
192 mtmsr %r2; \
193 isync; \
194/* Now restore regs: */ \
195 ld %r2,FRAME_SRR0+48(%r1); \
196 ld %r3,FRAME_SRR1+48(%r1); \
197 ld %r4,FRAME_CTR+48(%r1); \
198 ld %r5,FRAME_XER+48(%r1); \
199 ld %r6,FRAME_LR+48(%r1); \
200 GET_CPUINFO(%r7); \
201 std %r2,(savearea+CPUSAVE_SRR0)(%r7); /* save SRR0 */ \
202 std %r3,(savearea+CPUSAVE_SRR1)(%r7); /* save SRR1 */ \
203 ld %r7,FRAME_CR+48(%r1); \
204 mtctr %r4; \
205 mtxer %r5; \
206 mtlr %r6; \
207 mtsprg2 %r7; /* save cr */ \
208 ld %r31,FRAME_31+48(%r1); /* restore r0-31 */ \
209 ld %r30,FRAME_30+48(%r1); \
210 ld %r29,FRAME_29+48(%r1); \
211 ld %r28,FRAME_28+48(%r1); \
212 ld %r27,FRAME_27+48(%r1); \
213 ld %r26,FRAME_26+48(%r1); \
214 ld %r25,FRAME_25+48(%r1); \
215 ld %r24,FRAME_24+48(%r1); \
216 ld %r23,FRAME_23+48(%r1); \
217 ld %r22,FRAME_22+48(%r1); \
218 ld %r21,FRAME_21+48(%r1); \
219 ld %r20,FRAME_20+48(%r1); \
220 ld %r19,FRAME_19+48(%r1); \
221 ld %r18,FRAME_18+48(%r1); \
222 ld %r17,FRAME_17+48(%r1); \
223 ld %r16,FRAME_16+48(%r1); \
224 ld %r15,FRAME_15+48(%r1); \
225 ld %r14,FRAME_14+48(%r1); \
226 ld %r13,FRAME_13+48(%r1); \
227 ld %r12,FRAME_12+48(%r1); \
228 ld %r11,FRAME_11+48(%r1); \
229 ld %r10,FRAME_10+48(%r1); \
230 ld %r9, FRAME_9+48(%r1); \
231 ld %r8, FRAME_8+48(%r1); \
232 ld %r7, FRAME_7+48(%r1); \
233 ld %r6, FRAME_6+48(%r1); \
234 ld %r5, FRAME_5+48(%r1); \
235 ld %r4, FRAME_4+48(%r1); \
236 ld %r3, FRAME_3+48(%r1); \
237 ld %r2, FRAME_2+48(%r1); \
238 ld %r0, FRAME_0+48(%r1); \
239 ld %r1, FRAME_1+48(%r1); \
240/* Can't touch %r1 from here on */ \
241 mtsprg3 %r3; /* save r3 */ \
242/* Disable translation, machine check and recoverability: */ \
243 mfmsr %r3; \
244 andi. %r3,%r3,~(PSL_DR|PSL_IR|PSL_ME|PSL_RI)@l; \
245 mtmsr %r3; \
246 isync; \
247/* Decide whether we return to user mode: */ \
248 GET_CPUINFO(%r3); \
249 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); \
250 mtcr %r3; \
251 bf 17,1f; /* branch if PSL_PR is false */ \
252/* Restore user SRs */ \
253 GET_CPUINFO(%r3); \
254 std %r27,(savearea+CPUSAVE_R27)(%r3); \
255 std %r28,(savearea+CPUSAVE_R28)(%r3); \
256 std %r29,(savearea+CPUSAVE_R29)(%r3); \
257 std %r30,(savearea+CPUSAVE_R30)(%r3); \
258 std %r31,(savearea+CPUSAVE_R31)(%r3); \
259 mflr %r27; /* preserve LR */ \
260 bl restore_usersrs; /* uses r28-r31 */ \
261 mtlr %r27; \
262 ld %r31,(savearea+CPUSAVE_R31)(%r3); \
263 ld %r30,(savearea+CPUSAVE_R30)(%r3); \
264 ld %r29,(savearea+CPUSAVE_R29)(%r3); \
265 ld %r28,(savearea+CPUSAVE_R28)(%r3); \
266 ld %r27,(savearea+CPUSAVE_R27)(%r3); \
2671: mfsprg2 %r3; /* restore cr */ \
268 mtcr %r3; \
269 GET_CPUINFO(%r3); \
270 ld %r3,(savearea+CPUSAVE_SRR0)(%r3); /* restore srr0 */ \
271 mtsrr0 %r3; \
272 GET_CPUINFO(%r3); \
273 ld %r3,(savearea+CPUSAVE_SRR1)(%r3); /* restore srr1 */ \
274 mtsrr1 %r3; \
275 mfsprg3 %r3 /* restore r3 */
276
277#ifdef KDTRACE_HOOKS
278 .data
279 .globl dtrace_invop_calltrap_addr
280 .align 8
281 .type dtrace_invop_calltrap_addr, @object
282 .size dtrace_invop_calltrap_addr, 8
283dtrace_invop_calltrap_addr:
284 .word 0
285 .word 0
286
287 .text
288#endif
289
290/*
291 * Processor reset exception handler. These are typically
292 * the first instructions the processor executes after a
293 * software reset. We do this in two bits so that we are
294 * not still hanging around in the trap handling region
295 * once the MMU is turned on.
296 */
297 .globl CNAME(rstcode), CNAME(rstsize)
298CNAME(rstcode):
299 /* Explicitly set MSR[SF] */
300 mfmsr %r9
301 li %r8,1
302 insrdi %r9,%r8,1,0
303 mtmsrd %r9
304 isync
305
306 ba cpu_reset
307CNAME(rstsize) = . - CNAME(rstcode)
308
309cpu_reset:
310 lis %r1,(tmpstk+TMPSTKSZ-48)@ha /* get new SP */
311 addi %r1,%r1,(tmpstk+TMPSTKSZ-48)@l
312
313 lis %r3,tocbase@ha
314 ld %r2,tocbase@l(%r3)
315 bl CNAME(cpudep_ap_early_bootstrap) /* Set PCPU */
316 nop
317 lis %r3,1@l
318 bl CNAME(pmap_cpu_bootstrap) /* Turn on virtual memory */
319 nop
320 bl CNAME(cpudep_ap_bootstrap) /* Set up PCPU and stack */
321 nop
322 mr %r1,%r3 /* Use new stack */
323 bl CNAME(cpudep_ap_setup)
324 nop
325 GET_CPUINFO(%r5)
326 ld %r3,(PC_RESTORE)(%r5)
327 cmpldi %cr0,%r3,0
328 beq %cr0,2f
329 nop
330 li %r4,1
331 b CNAME(longjmp)
332 nop
3332:
334#ifdef SMP
335 bl CNAME(machdep_ap_bootstrap) /* And away! */
336 nop
337#endif
338
339 /* Should not be reached */
3409:
341 b 9b
342
343/*
344 * This code gets copied to all the trap vectors
345 * (except ISI/DSI, ALI, and the interrupts)
346 */
347
348 .globl CNAME(trapcode),CNAME(trapsize)
349CNAME(trapcode):
350 mtsprg1 %r1 /* save SP */
351 mflr %r1 /* Save the old LR in r1 */
352 mtsprg2 %r1 /* And then in SPRG2 */
353 li %r1, 0xA0 /* How to get the vector from LR */
354 bla generictrap /* LR & SPRG3 is exception # */
355CNAME(trapsize) = .-CNAME(trapcode)
356
357/*
358 * For SLB misses: do special things for the kernel
359 *
360 * Note: SPRG1 is always safe to overwrite any time the MMU is on, which is
361 * the only time this can be called.
362 */
363 .globl CNAME(slbtrap),CNAME(slbtrapsize)
364CNAME(slbtrap):
365 mtsprg1 %r1 /* save SP */
366 GET_CPUINFO(%r1)
367 std %r2,(PC_SLBSAVE+16)(%r1)
368 mfcr %r2 /* save CR */
369 std %r2,(PC_SLBSAVE+104)(%r1)
370 mfsrr1 %r2 /* test kernel mode */
371 mtcr %r2
372 bf 17,1f /* branch if PSL_PR is false */
373 /* User mode */
374 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
375 mtcr %r2
376 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2 */
377 mflr %r1 /* Save the old LR in r1 */
378 mtsprg2 %r1 /* And then in SPRG2 */
379 li %r1, 0x80 /* How to get the vector from LR */
380 bla generictrap /* LR & SPRG3 is exception # */
3811: mflr %r2 /* Save the old LR in r2 */
382 bla kern_slbtrap
383CNAME(slbtrapsize) = .-CNAME(slbtrap)
384
385kern_slbtrap:
386 std %r2,(PC_SLBSAVE+136)(%r1) /* old LR */
387 std %r3,(PC_SLBSAVE+24)(%r1) /* save R3 */
388
389 /* Check if this needs to be handled as a regular trap (userseg miss) */
390 mflr %r2
391 andi. %r2,%r2,0xff80
392 cmpwi %r2,0x380
393 bne 1f
394 mfdar %r2
395 b 2f
3961: mfsrr0 %r2
3972: /* r2 now contains the fault address */
398 lis %r3,SEGMENT_MASK@highesta
399 ori %r3,%r3,SEGMENT_MASK@highera
400 sldi %r3,%r3,32
401 oris %r3,%r3,SEGMENT_MASK@ha
402 ori %r3,%r3,SEGMENT_MASK@l
403 and %r2,%r2,%r3 /* R2 = segment base address */
404 lis %r3,USER_ADDR@highesta
405 ori %r3,%r3,USER_ADDR@highera
406 sldi %r3,%r3,32
407 oris %r3,%r3,USER_ADDR@ha
408 ori %r3,%r3,USER_ADDR@l
409 cmpd %r2,%r3 /* Compare fault base to USER_ADDR */
410 bne 3f
411
412 /* User seg miss, handle as a regular trap */
413 ld %r2,(PC_SLBSAVE+104)(%r1) /* Restore CR */
414 mtcr %r2
415 ld %r2,(PC_SLBSAVE+16)(%r1) /* Restore R2,R3 */
416 ld %r3,(PC_SLBSAVE+24)(%r1)
417 ld %r1,(PC_SLBSAVE+136)(%r1) /* Save the old LR in r1 */
418 mtsprg2 %r1 /* And then in SPRG2 */
419 li %r1, 0x80 /* How to get the vector from LR */
420 b generictrap /* Retain old LR using b */
421
4223: /* Real kernel SLB miss */
423 std %r0,(PC_SLBSAVE+0)(%r1) /* free all volatile regs */
424 mfsprg1 %r2 /* Old R1 */
425 std %r2,(PC_SLBSAVE+8)(%r1)
426 /* R2,R3 already saved */
427 std %r4,(PC_SLBSAVE+32)(%r1)
428 std %r5,(PC_SLBSAVE+40)(%r1)
429 std %r6,(PC_SLBSAVE+48)(%r1)
430 std %r7,(PC_SLBSAVE+56)(%r1)
431 std %r8,(PC_SLBSAVE+64)(%r1)
432 std %r9,(PC_SLBSAVE+72)(%r1)
433 std %r10,(PC_SLBSAVE+80)(%r1)
434 std %r11,(PC_SLBSAVE+88)(%r1)
435 std %r12,(PC_SLBSAVE+96)(%r1)
436 /* CR already saved */
437 mfxer %r2 /* save XER */
438 std %r2,(PC_SLBSAVE+112)(%r1)
439 mflr %r2 /* save LR (SP already saved) */
440 std %r2,(PC_SLBSAVE+120)(%r1)
441 mfctr %r2 /* save CTR */
442 std %r2,(PC_SLBSAVE+128)(%r1)
443
444 /* Call handler */
445 addi %r1,%r1,PC_SLBSTACK-48+1024
446 li %r2,~15
447 and %r1,%r1,%r2
448 lis %r3,tocbase@ha
449 ld %r2,tocbase@l(%r3)
450 mflr %r3
451 andi. %r3,%r3,0xff80
452 mfdar %r4
453 mfsrr0 %r5
454 bl handle_kernel_slb_spill
455 nop
456
457 /* Save r28-31, restore r4-r12 */
458 GET_CPUINFO(%r1)
459 ld %r4,(PC_SLBSAVE+32)(%r1)
460 ld %r5,(PC_SLBSAVE+40)(%r1)
461 ld %r6,(PC_SLBSAVE+48)(%r1)
462 ld %r7,(PC_SLBSAVE+56)(%r1)
463 ld %r8,(PC_SLBSAVE+64)(%r1)
464 ld %r9,(PC_SLBSAVE+72)(%r1)
465 ld %r10,(PC_SLBSAVE+80)(%r1)
466 ld %r11,(PC_SLBSAVE+88)(%r1)
467 ld %r12,(PC_SLBSAVE+96)(%r1)
468 std %r28,(PC_SLBSAVE+64)(%r1)
469 std %r29,(PC_SLBSAVE+72)(%r1)
470 std %r30,(PC_SLBSAVE+80)(%r1)
471 std %r31,(PC_SLBSAVE+88)(%r1)
472
473 /* Restore kernel mapping */
474 bl restore_kernsrs
475
476 /* Restore remaining registers */
477 ld %r28,(PC_SLBSAVE+64)(%r1)
478 ld %r29,(PC_SLBSAVE+72)(%r1)
479 ld %r30,(PC_SLBSAVE+80)(%r1)
480 ld %r31,(PC_SLBSAVE+88)(%r1)
481
482 ld %r2,(PC_SLBSAVE+104)(%r1)
483 mtcr %r2
484 ld %r2,(PC_SLBSAVE+112)(%r1)
485 mtxer %r2
486 ld %r2,(PC_SLBSAVE+120)(%r1)
487 mtlr %r2
488 ld %r2,(PC_SLBSAVE+128)(%r1)
489 mtctr %r2
490 ld %r2,(PC_SLBSAVE+136)(%r1)
491 mtlr %r2
492
493 /* Restore r0-r3 */
494 ld %r0,(PC_SLBSAVE+0)(%r1)
495 ld %r2,(PC_SLBSAVE+16)(%r1)
496 ld %r3,(PC_SLBSAVE+24)(%r1)
497 mfsprg1 %r1
498
499 /* Back to whatever we were doing */
500 rfid
501
502/*
503 * For ALI: has to save DSISR and DAR
504 */
505 .globl CNAME(alitrap),CNAME(alisize)
506CNAME(alitrap):
507 mtsprg1 %r1 /* save SP */
508 GET_CPUINFO(%r1)
509 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
510 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
511 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
512 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
513 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
514 mfdar %r30
515 mfdsisr %r31
516 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
517 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
518 mfsprg1 %r1 /* restore SP, in case of branch */
519 mflr %r28 /* save LR */
520 mfcr %r29 /* save CR */
521
522 /* Put our exception vector in SPRG3 */
523 li %r31, EXC_ALI
524 mtsprg3 %r31
525
526 /* Test whether we already had PR set */
527 mfsrr1 %r31
528 mtcr %r31
529 bla s_trap
530CNAME(alisize) = .-CNAME(alitrap)
531
532/*
533 * Similar to the above for DSI
534 * Has to handle BAT spills
535 * and standard pagetable spills
536 */
537 .globl CNAME(dsitrap),CNAME(dsisize)
538CNAME(dsitrap):
539 mtsprg1 %r1 /* save SP */
540 GET_CPUINFO(%r1)
541 std %r27,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
542 std %r28,(PC_DISISAVE+CPUSAVE_R28)(%r1)
543 std %r29,(PC_DISISAVE+CPUSAVE_R29)(%r1)
544 std %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
545 std %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
546 mfsprg1 %r1 /* restore SP */
547 mfcr %r29 /* save CR */
548 mfxer %r30 /* save XER */
549 mtsprg2 %r30 /* in SPRG2 */
550 mfsrr1 %r31 /* test kernel mode */
551 mtcr %r31
552 mflr %r28 /* save LR (SP already saved) */
553 bla disitrap
554CNAME(dsisize) = .-CNAME(dsitrap)
555
556/*
557 * Preamble code for DSI/ISI traps
558 */
559disitrap:
560 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */
561 mflr %r1
562 andi. %r1,%r1,0xff00
563 mtsprg3 %r1
564
565 GET_CPUINFO(%r1)
566 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1)
567 std %r31,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
568 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1)
569 std %r30,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
570 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1)
571 std %r31,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
572 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1)
573 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
574 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1)
575 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
576 mfdar %r30
577 mfdsisr %r31
578 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
579 std %r31,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1)
580
581#ifdef KDB
582 /* Try to detect a kernel stack overflow */
583 mfsrr1 %r31
584 mtcr %r31
585 bt 17,realtrap /* branch is user mode */
586 mfsprg1 %r31 /* get old SP */
587 clrrdi %r31,%r31,12 /* Round SP down to nearest page */
588 sub. %r30,%r31,%r30 /* SP - DAR */
589 bge 1f
590 neg %r30,%r30 /* modulo value */
5911: cmpldi %cr0,%r30,4096 /* is DAR within a page of SP? */
592 bge %cr0,realtrap /* no, too far away. */
593
594 /* Now convert this DSI into a DDB trap. */
595 GET_CPUINFO(%r1)
596 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1) /* get DAR */
597 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DAR)(%r1) /* save DAR */
598 ld %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DSISR)(%r1) /* get DSISR */
599 std %r30,(PC_DBSAVE +CPUSAVE_AIM_DSISR)(%r1) /* save DSISR */
600 ld %r31,(PC_DISISAVE+CPUSAVE_R27)(%r1) /* get r27 */
601 std %r31,(PC_DBSAVE +CPUSAVE_R27)(%r1) /* save r27 */
602 ld %r30,(PC_DISISAVE+CPUSAVE_R28)(%r1) /* get r28 */
603 std %r30,(PC_DBSAVE +CPUSAVE_R28)(%r1) /* save r28 */
604 ld %r31,(PC_DISISAVE+CPUSAVE_R29)(%r1) /* get r29 */
605 std %r31,(PC_DBSAVE +CPUSAVE_R29)(%r1) /* save r29 */
606 ld %r30,(PC_DISISAVE+CPUSAVE_R30)(%r1) /* get r30 */
607 std %r30,(PC_DBSAVE +CPUSAVE_R30)(%r1) /* save r30 */
608 ld %r31,(PC_DISISAVE+CPUSAVE_R31)(%r1) /* get r31 */
609 std %r31,(PC_DBSAVE +CPUSAVE_R31)(%r1) /* save r31 */
610 b dbtrap
611#endif
612
613 /* XXX need stack probe here */
614realtrap:
615/* Test whether we already had PR set */
616 mfsrr1 %r1
617 mtcr %r1
618 mfsprg1 %r1 /* restore SP (might have been
619 overwritten) */
620 bf 17,k_trap /* branch if PSL_PR is false */
621 GET_CPUINFO(%r1)
622 ld %r1,PC_CURPCB(%r1)
623 mr %r27,%r28 /* Save LR, r29 */
624 mtsprg2 %r29
625 bl restore_kernsrs /* enable kernel mapping */
626 mfsprg2 %r29
627 mr %r28,%r27
628 ba s_trap
629
630/*
631 * generictrap does some standard setup for trap handling to minimize
632 * the code that need be installed in the actual vectors. It expects
633 * the following conditions.
634 *
635 * R1 - Trap vector = LR & (0xff00 | R1)
636 * SPRG1 - Original R1 contents
637 * SPRG2 - Original LR
638 */
639
640generictrap:
641 /* Save R1 for computing the exception vector */
642 mtsprg3 %r1
643
644 /* Save interesting registers */
645 GET_CPUINFO(%r1)
646 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1) /* free r27-r31 */
647 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
648 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
649 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
650 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
651 mfdar %r30
652 std %r30,(PC_TEMPSAVE+CPUSAVE_AIM_DAR)(%r1)
653 mfsprg1 %r1 /* restore SP, in case of branch */
654 mfsprg2 %r28 /* save LR */
655 mfcr %r29 /* save CR */
656
657 /* Compute the exception vector from the link register */
658 mfsprg3 %r31
659 ori %r31,%r31,0xff00
660 mflr %r30
661 and %r30,%r30,%r31
662 mtsprg3 %r30
663
664 /* Test whether we already had PR set */
665 mfsrr1 %r31
666 mtcr %r31
667
668s_trap:
669 bf 17,k_trap /* branch if PSL_PR is false */
670 GET_CPUINFO(%r1)
671u_trap:
672 ld %r1,PC_CURPCB(%r1)
673 mr %r27,%r28 /* Save LR, r29 */
674 mtsprg2 %r29
675 bl restore_kernsrs /* enable kernel mapping */
676 mfsprg2 %r29
677 mr %r28,%r27
678
679/*
680 * Now the common trap catching code.
681 */
682k_trap:
683 FRAME_SETUP(PC_TEMPSAVE)
684/* Call C interrupt dispatcher: */
685trapagain:
686 lis %r3,tocbase@ha
687 ld %r2,tocbase@l(%r3)
688 addi %r3,%r1,48
689 bl CNAME(powerpc_interrupt)
690 nop
691
692 .globl CNAME(trapexit) /* backtrace code sentinel */
693CNAME(trapexit):
694/* Disable interrupts: */
695 mfmsr %r3
696 andi. %r3,%r3,~PSL_EE@l
697 mtmsr %r3
698 isync
699/* Test AST pending: */
700 ld %r5,FRAME_SRR1+48(%r1)
701 mtcr %r5
702 bf 17,1f /* branch if PSL_PR is false */
703
704 GET_CPUINFO(%r3) /* get per-CPU pointer */
705 lwz %r4, TD_FLAGS(%r13) /* get thread flags value */
706 lis %r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@h
707 ori %r5,%r5, (TDF_ASTPENDING|TDF_NEEDRESCHED)@l
708 and. %r4,%r4,%r5
709 beq 1f
710 mfmsr %r3 /* re-enable interrupts */
711 ori %r3,%r3,PSL_EE@l
712 mtmsr %r3
713 isync
714 lis %r3,tocbase@ha
715 ld %r2,tocbase@l(%r3)
716 addi %r3,%r1,48
717 bl CNAME(ast)
718 nop
719 .globl CNAME(asttrapexit) /* backtrace code sentinel #2 */
720CNAME(asttrapexit):
721 b trapexit /* test ast ret value ? */
7221:
723 FRAME_LEAVE(PC_TEMPSAVE)
724 rfid
725
726#if defined(KDB)
727/*
728 * Deliberate entry to dbtrap
729 */
730ASENTRY_NOPROF(breakpoint)
731 mtsprg1 %r1
732 mfmsr %r3
733 mtsrr1 %r3
734 andi. %r3,%r3,~(PSL_EE|PSL_ME)@l
735 mtmsr %r3 /* disable interrupts */
736 isync
737 GET_CPUINFO(%r3)
738 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r3)
739 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r3)
740 std %r29,(PC_DBSAVE+CPUSAVE_R29)(%r3)
741 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r3)
742 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r3)
743 mflr %r28
744 li %r29,EXC_BPT
745 mtlr %r29
746 mfcr %r29
747 mtsrr0 %r28
748
749/*
750 * Now the kdb trap catching code.
751 */
752dbtrap:
753 /* Write the trap vector to SPRG3 by computing LR & 0xff00 */
754 mflr %r1
755 andi. %r1,%r1,0xff00
756 mtsprg3 %r1
757
758 lis %r1,(tmpstk+TMPSTKSZ-48)@ha /* get new SP */
759 addi %r1,%r1,(tmpstk+TMPSTKSZ-48)@l
760
761 FRAME_SETUP(PC_DBSAVE)
762/* Call C trap code: */
763 lis %r3,tocbase@ha
764 ld %r2,tocbase@l(%r3)
765 addi %r3,%r1,48
766 bl CNAME(db_trap_glue)
767 nop
768 or. %r3,%r3,%r3
769 bne dbleave
770/* This wasn't for KDB, so switch to real trap: */
771 ld %r3,FRAME_EXC+48(%r1) /* save exception */
772 GET_CPUINFO(%r4)
773 std %r3,(PC_DBSAVE+CPUSAVE_R31)(%r4)
774 FRAME_LEAVE(PC_DBSAVE)
775 mtsprg1 %r1 /* prepare for entrance to realtrap */
776 GET_CPUINFO(%r1)
777 std %r27,(PC_TEMPSAVE+CPUSAVE_R27)(%r1)
778 std %r28,(PC_TEMPSAVE+CPUSAVE_R28)(%r1)
779 std %r29,(PC_TEMPSAVE+CPUSAVE_R29)(%r1)
780 std %r30,(PC_TEMPSAVE+CPUSAVE_R30)(%r1)
781 std %r31,(PC_TEMPSAVE+CPUSAVE_R31)(%r1)
782 mflr %r28
783 mfcr %r29
784 ld %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1)
785 mtsprg3 %r31 /* SPRG3 was clobbered by FRAME_LEAVE */
786 mfsprg1 %r1
787 b realtrap
788dbleave:
789 FRAME_LEAVE(PC_DBSAVE)
790 rfid
791
792/*
793 * In case of KDB we want a separate trap catcher for it
794 */
795 .globl CNAME(dblow),CNAME(dbsize)
796CNAME(dblow):
797 mtsprg1 %r1 /* save SP */
798 mtsprg2 %r29 /* save r29 */
799 mfcr %r29 /* save CR in r29 */
800 mfsrr1 %r1
801 mtcr %r1
802 bf 17,2f /* branch if privileged */
802 bf 17,1f /* branch if privileged */
803
803
8041:
805 /* Unprivileged case */
806 mtcr %r29 /* put the condition register back */
807 mfsprg2 %r29 /* ... and r29 */
808 mflr %r1 /* save LR */
809 mtsprg2 %r1 /* And then in SPRG2 */
810 li %r1, 0 /* How to get the vector from LR */
811
812 bla generictrap /* and we look like a generic trap */
804 /* Unprivileged case */
805 mtcr %r29 /* put the condition register back */
806 mfsprg2 %r29 /* ... and r29 */
807 mflr %r1 /* save LR */
808 mtsprg2 %r1 /* And then in SPRG2 */
809 li %r1, 0 /* How to get the vector from LR */
810
811 bla generictrap /* and we look like a generic trap */
8132:
814#ifdef KDTRACE_HOOKS
815 /* Privileged, so drop to KDB */
816 mfsrr0 %r1
817 mtsprg3 %r3
818 lwz %r1,0(%r1)
819 /* Check if it's a DTrace trap. */
820 li %r3,0x0808
821 addis %r3,%r3,0x7c81
822 cmplw %cr0,%r3,%r1
823 mfsprg3 %r3
824 beq %cr0,1b
825#endif
8121:
826 GET_CPUINFO(%r1)
827 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */
828 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
829 mfsprg2 %r28 /* r29 holds cr... */
830 std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */
831 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */
832 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */
833 mflr %r28 /* save LR */
834 bla dbtrap
835CNAME(dbsize) = .-CNAME(dblow)
836#endif /* KDB */
813 GET_CPUINFO(%r1)
814 std %r27,(PC_DBSAVE+CPUSAVE_R27)(%r1) /* free r27 */
815 std %r28,(PC_DBSAVE+CPUSAVE_R28)(%r1) /* free r28 */
816 mfsprg2 %r28 /* r29 holds cr... */
817 std %r28,(PC_DBSAVE+CPUSAVE_R29)(%r1) /* free r29 */
818 std %r30,(PC_DBSAVE+CPUSAVE_R30)(%r1) /* free r30 */
819 std %r31,(PC_DBSAVE+CPUSAVE_R31)(%r1) /* free r31 */
820 mflr %r28 /* save LR */
821 bla dbtrap
822CNAME(dbsize) = .-CNAME(dblow)
823#endif /* KDB */