• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/powerpc/kernel/
1/*
2 *  PowerPC version
3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
6 *  Adapted for Power Macintosh by Paul Mackerras.
7 *  Low-level exception handlers and MMU support
8 *  rewritten by Paul Mackerras.
9 *    Copyright (C) 1996 Paul Mackerras.
10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
11 *
12 *  This file contains the system call entry code, context switch
13 *  code, and exception/interrupt return code for PowerPC.
14 *
15 *  This program is free software; you can redistribute it and/or
16 *  modify it under the terms of the GNU General Public License
17 *  as published by the Free Software Foundation; either version
18 *  2 of the License, or (at your option) any later version.
19 *
20 */
21
22#include <linux/errno.h>
23#include <linux/sys.h>
24#include <linux/threads.h>
25#include <asm/reg.h>
26#include <asm/page.h>
27#include <asm/mmu.h>
28#include <asm/cputable.h>
29#include <asm/thread_info.h>
30#include <asm/ppc_asm.h>
31#include <asm/asm-offsets.h>
32#include <asm/unistd.h>
33#include <asm/ftrace.h>
34
35#undef SHOW_SYSCALLS
36#undef SHOW_SYSCALLS_TASK
37
38/*
39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
40 */
41#if MSR_KERNEL >= 0x10000
42#define LOAD_MSR_KERNEL(r, x)	lis r,(x)@h; ori r,r,(x)@l
43#else
44#define LOAD_MSR_KERNEL(r, x)	li r,(x)
45#endif
46
47#ifdef CONFIG_BOOKE
48	.globl	mcheck_transfer_to_handler
49mcheck_transfer_to_handler:
50	mfspr	r0,SPRN_DSRR0
51	stw	r0,_DSRR0(r11)
52	mfspr	r0,SPRN_DSRR1
53	stw	r0,_DSRR1(r11)
54	/* fall through */
55
56	.globl	debug_transfer_to_handler
57debug_transfer_to_handler:
58	mfspr	r0,SPRN_CSRR0
59	stw	r0,_CSRR0(r11)
60	mfspr	r0,SPRN_CSRR1
61	stw	r0,_CSRR1(r11)
62	/* fall through */
63
64	.globl	crit_transfer_to_handler
65crit_transfer_to_handler:
66#ifdef CONFIG_PPC_BOOK3E_MMU
67	mfspr	r0,SPRN_MAS0
68	stw	r0,MAS0(r11)
69	mfspr	r0,SPRN_MAS1
70	stw	r0,MAS1(r11)
71	mfspr	r0,SPRN_MAS2
72	stw	r0,MAS2(r11)
73	mfspr	r0,SPRN_MAS3
74	stw	r0,MAS3(r11)
75	mfspr	r0,SPRN_MAS6
76	stw	r0,MAS6(r11)
77#ifdef CONFIG_PHYS_64BIT
78	mfspr	r0,SPRN_MAS7
79	stw	r0,MAS7(r11)
80#endif /* CONFIG_PHYS_64BIT */
81#endif /* CONFIG_PPC_BOOK3E_MMU */
82#ifdef CONFIG_44x
83	mfspr	r0,SPRN_MMUCR
84	stw	r0,MMUCR(r11)
85#endif
86	mfspr	r0,SPRN_SRR0
87	stw	r0,_SRR0(r11)
88	mfspr	r0,SPRN_SRR1
89	stw	r0,_SRR1(r11)
90
91	mfspr	r8,SPRN_SPRG_THREAD
92	lwz	r0,KSP_LIMIT(r8)
93	stw	r0,SAVED_KSP_LIMIT(r11)
94	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
95	stw	r0,KSP_LIMIT(r8)
96	/* fall through */
97#endif
98
99#ifdef CONFIG_40x
100	.globl	crit_transfer_to_handler
101crit_transfer_to_handler:
102	lwz	r0,crit_r10@l(0)
103	stw	r0,GPR10(r11)
104	lwz	r0,crit_r11@l(0)
105	stw	r0,GPR11(r11)
106	mfspr	r0,SPRN_SRR0
107	stw	r0,crit_srr0@l(0)
108	mfspr	r0,SPRN_SRR1
109	stw	r0,crit_srr1@l(0)
110
111	mfspr	r8,SPRN_SPRG_THREAD
112	lwz	r0,KSP_LIMIT(r8)
113	stw	r0,saved_ksp_limit@l(0)
114	rlwimi	r0,r1,0,0,(31-THREAD_SHIFT)
115	stw	r0,KSP_LIMIT(r8)
116	/* fall through */
117#endif
118
119/*
120 * This code finishes saving the registers to the exception frame
121 * and jumps to the appropriate handler for the exception, turning
122 * on address translation.
123 * Note that we rely on the caller having set cr0.eq iff the exception
124 * occurred in kernel mode (i.e. MSR:PR = 0).
125 */
126	.globl	transfer_to_handler_full
127transfer_to_handler_full:
128	SAVE_NVGPRS(r11)
129	/* fall through */
130
131	.globl	transfer_to_handler
132transfer_to_handler:
133	stw	r2,GPR2(r11)
134	stw	r12,_NIP(r11)
135	stw	r9,_MSR(r11)
136	andi.	r2,r9,MSR_PR
137	mfctr	r12
138	mfspr	r2,SPRN_XER
139	stw	r12,_CTR(r11)
140	stw	r2,_XER(r11)
141	mfspr	r12,SPRN_SPRG_THREAD
142	addi	r2,r12,-THREAD
143	tovirt(r2,r2)			/* set r2 to current */
144	beq	2f			/* if from user, fix up THREAD.regs */
145	addi	r11,r1,STACK_FRAME_OVERHEAD
146	stw	r11,PT_REGS(r12)
147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
148	/* Check to see if the dbcr0 register is set up to debug.  Use the
149	   internal debug mode bit to do this. */
150	lwz	r12,THREAD_DBCR0(r12)
151	andis.	r12,r12,DBCR0_IDM@h
152	beq+	3f
153	/* From user and task is ptraced - load up global dbcr0 */
154	li	r12,-1			/* clear all pending debug events */
155	mtspr	SPRN_DBSR,r12
156	lis	r11,global_dbcr0@ha
157	tophys(r11,r11)
158	addi	r11,r11,global_dbcr0@l
159#ifdef CONFIG_SMP
160	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
161	lwz	r9,TI_CPU(r9)
162	slwi	r9,r9,3
163	add	r11,r11,r9
164#endif
165	lwz	r12,0(r11)
166	mtspr	SPRN_DBCR0,r12
167	lwz	r12,4(r11)
168	addi	r12,r12,-1
169	stw	r12,4(r11)
170#endif
171	b	3f
172
1732:	/* if from kernel, check interrupted DOZE/NAP mode and
174         * check for stack overflow
175         */
176	lwz	r9,KSP_LIMIT(r12)
177	cmplw	r1,r9			/* if r1 <= ksp_limit */
178	ble-	stack_ovf		/* then the kernel stack overflowed */
1795:
180#if defined(CONFIG_6xx) || defined(CONFIG_E500)
181	rlwinm	r9,r1,0,0,31-THREAD_SHIFT
182	tophys(r9,r9)			/* check local flags */
183	lwz	r12,TI_LOCAL_FLAGS(r9)
184	mtcrf	0x01,r12
185	bt-	31-TLF_NAPPING,4f
186	bt-	31-TLF_SLEEPING,7f
187#endif /* CONFIG_6xx || CONFIG_E500 */
188	.globl transfer_to_handler_cont
189transfer_to_handler_cont:
1903:
191	mflr	r9
192	lwz	r11,0(r9)		/* virtual address of handler */
193	lwz	r9,4(r9)		/* where to go when done */
194#ifdef CONFIG_TRACE_IRQFLAGS
195	lis	r12,reenable_mmu@h
196	ori	r12,r12,reenable_mmu@l
197	mtspr	SPRN_SRR0,r12
198	mtspr	SPRN_SRR1,r10
199	SYNC
200	RFI
201reenable_mmu:				/* re-enable mmu so we can */
202	mfmsr	r10
203	lwz	r12,_MSR(r1)
204	xor	r10,r10,r12
205	andi.	r10,r10,MSR_EE		/* Did EE change? */
206	beq	1f
207
208	/* Save handler and return address into the 2 unused words
209	 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
210	 * else can be recovered from the pt_regs except r3 which for
211	 * normal interrupts has been set to pt_regs and for syscalls
212	 * is an argument, so we temporarily use ORIG_GPR3 to save it
213	 */
214	stw	r9,8(r1)
215	stw	r11,12(r1)
216	stw	r3,ORIG_GPR3(r1)
217	bl	trace_hardirqs_off
218	lwz	r0,GPR0(r1)
219	lwz	r3,ORIG_GPR3(r1)
220	lwz	r4,GPR4(r1)
221	lwz	r5,GPR5(r1)
222	lwz	r6,GPR6(r1)
223	lwz	r7,GPR7(r1)
224	lwz	r8,GPR8(r1)
225	lwz	r9,8(r1)
226	lwz	r11,12(r1)
2271:	mtctr	r11
228	mtlr	r9
229	bctr				/* jump to handler */
230#else /* CONFIG_TRACE_IRQFLAGS */
231	mtspr	SPRN_SRR0,r11
232	mtspr	SPRN_SRR1,r10
233	mtlr	r9
234	SYNC
235	RFI				/* jump to handler, enable MMU */
236#endif /* CONFIG_TRACE_IRQFLAGS */
237
238#if defined(CONFIG_6xx) || defined(CONFIG_E500)
2394:	rlwinm	r12,r12,0,~_TLF_NAPPING
240	stw	r12,TI_LOCAL_FLAGS(r9)
241	b	power_save_ppc32_restore
242
2437:	rlwinm	r12,r12,0,~_TLF_SLEEPING
244	stw	r12,TI_LOCAL_FLAGS(r9)
245	lwz	r9,_MSR(r11)		/* if sleeping, clear MSR.EE */
246	rlwinm	r9,r9,0,~MSR_EE
247	lwz	r12,_LINK(r11)		/* and return to address in LR */
248	b	fast_exception_return
249#endif
250
251/*
252 * On kernel stack overflow, load up an initial stack pointer
253 * and call StackOverflow(regs), which should not return.
254 */
255stack_ovf:
256	/* sometimes we use a statically-allocated stack, which is OK. */
257	lis	r12,_end@h
258	ori	r12,r12,_end@l
259	cmplw	r1,r12
260	ble	5b			/* r1 <= &_end is OK */
261	SAVE_NVGPRS(r11)
262	addi	r3,r1,STACK_FRAME_OVERHEAD
263	lis	r1,init_thread_union@ha
264	addi	r1,r1,init_thread_union@l
265	addi	r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
266	lis	r9,StackOverflow@ha
267	addi	r9,r9,StackOverflow@l
268	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
269	FIX_SRR1(r10,r12)
270	mtspr	SPRN_SRR0,r9
271	mtspr	SPRN_SRR1,r10
272	SYNC
273	RFI
274
275/*
276 * Handle a system call.
277 */
278	.stabs	"arch/powerpc/kernel/",N_SO,0,0,0f
279	.stabs	"entry_32.S",N_SO,0,0,0f
2800:
281
282_GLOBAL(DoSyscall)
283	stw	r3,ORIG_GPR3(r1)
284	li	r12,0
285	stw	r12,RESULT(r1)
286	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
287	rlwinm	r11,r11,0,4,2
288	stw	r11,_CCR(r1)
289#ifdef SHOW_SYSCALLS
290	bl	do_show_syscall
291#endif /* SHOW_SYSCALLS */
292#ifdef CONFIG_TRACE_IRQFLAGS
293	/* Return from syscalls can (and generally will) hard enable
294	 * interrupts. You aren't supposed to call a syscall with
295	 * interrupts disabled in the first place. However, to ensure
296	 * that we get it right vs. lockdep if it happens, we force
297	 * that hard enable here with appropriate tracing if we see
298	 * that we have been called with interrupts off
299	 */
300	mfmsr	r11
301	andi.	r12,r11,MSR_EE
302	bne+	1f
303	/* We came in with interrupts disabled, we enable them now */
304	bl	trace_hardirqs_on
305	mfmsr	r11
306	lwz	r0,GPR0(r1)
307	lwz	r3,GPR3(r1)
308	lwz	r4,GPR4(r1)
309	ori	r11,r11,MSR_EE
310	lwz	r5,GPR5(r1)
311	lwz	r6,GPR6(r1)
312	lwz	r7,GPR7(r1)
313	lwz	r8,GPR8(r1)
314	mtmsr	r11
3151:
316#endif /* CONFIG_TRACE_IRQFLAGS */
317	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
318	lwz	r11,TI_FLAGS(r10)
319	andi.	r11,r11,_TIF_SYSCALL_T_OR_A
320	bne-	syscall_dotrace
321syscall_dotrace_cont:
322	cmplwi	0,r0,NR_syscalls
323	lis	r10,sys_call_table@h
324	ori	r10,r10,sys_call_table@l
325	slwi	r0,r0,2
326	bge-	66f
327	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
328	mtlr	r10
329	addi	r9,r1,STACK_FRAME_OVERHEAD
330	PPC440EP_ERR42
331	blrl			/* Call handler */
332	.globl	ret_from_syscall
333ret_from_syscall:
334#ifdef SHOW_SYSCALLS
335	bl	do_show_syscall_exit
336#endif
337	mr	r6,r3
338	rlwinm	r12,r1,0,0,(31-THREAD_SHIFT)	/* current_thread_info() */
339	/* disable interrupts so current_thread_info()->flags can't change */
340	LOAD_MSR_KERNEL(r10,MSR_KERNEL)	/* doesn't include MSR_EE */
341	/* Note: We don't bother telling lockdep about it */
342	SYNC
343	MTMSRD(r10)
344	lwz	r9,TI_FLAGS(r12)
345	li	r8,-_LAST_ERRNO
346	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
347	bne-	syscall_exit_work
348	cmplw	0,r3,r8
349	blt+	syscall_exit_cont
350	lwz	r11,_CCR(r1)			/* Load CR */
351	neg	r3,r3
352	oris	r11,r11,0x1000	/* Set SO bit in CR */
353	stw	r11,_CCR(r1)
354syscall_exit_cont:
355	lwz	r8,_MSR(r1)
356#ifdef CONFIG_TRACE_IRQFLAGS
357	/* If we are going to return from the syscall with interrupts
358	 * off, we trace that here. It shouldn't happen though but we
359	 * want to catch the bugger if it does right ?
360	 */
361	andi.	r10,r8,MSR_EE
362	bne+	1f
363	stw	r3,GPR3(r1)
364	bl      trace_hardirqs_off
365	lwz	r3,GPR3(r1)
3661:
367#endif /* CONFIG_TRACE_IRQFLAGS */
368#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
369	/* If the process has its own DBCR0 value, load it up.  The internal
370	   debug mode bit tells us that dbcr0 should be loaded. */
371	lwz	r0,THREAD+THREAD_DBCR0(r2)
372	andis.	r10,r0,DBCR0_IDM@h
373	bnel-	load_dbcr0
374#endif
375#ifdef CONFIG_44x
376BEGIN_MMU_FTR_SECTION
377	lis	r4,icache_44x_need_flush@ha
378	lwz	r5,icache_44x_need_flush@l(r4)
379	cmplwi	cr0,r5,0
380	bne-	2f
3811:
382END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
383#endif /* CONFIG_44x */
384BEGIN_FTR_SECTION
385	lwarx	r7,0,r1
386END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
387	stwcx.	r0,0,r1			/* to clear the reservation */
388	lwz	r4,_LINK(r1)
389	lwz	r5,_CCR(r1)
390	mtlr	r4
391	mtcr	r5
392	lwz	r7,_NIP(r1)
393	FIX_SRR1(r8, r0)
394	lwz	r2,GPR2(r1)
395	lwz	r1,GPR1(r1)
396	mtspr	SPRN_SRR0,r7
397	mtspr	SPRN_SRR1,r8
398	SYNC
399	RFI
400#ifdef CONFIG_44x
4012:	li	r7,0
402	iccci	r0,r0
403	stw	r7,icache_44x_need_flush@l(r4)
404	b	1b
405#endif  /* CONFIG_44x */
406
40766:	li	r3,-ENOSYS
408	b	ret_from_syscall
409
410	.globl	ret_from_fork
411ret_from_fork:
412	REST_NVGPRS(r1)
413	bl	schedule_tail
414	li	r3,0
415	b	ret_from_syscall
416
417/* Traced system call support */
418syscall_dotrace:
419	SAVE_NVGPRS(r1)
420	li	r0,0xc00
421	stw	r0,_TRAP(r1)
422	addi	r3,r1,STACK_FRAME_OVERHEAD
423	bl	do_syscall_trace_enter
424	/*
425	 * Restore argument registers possibly just changed.
426	 * We use the return value of do_syscall_trace_enter
427	 * for call number to look up in the table (r0).
428	 */
429	mr	r0,r3
430	lwz	r3,GPR3(r1)
431	lwz	r4,GPR4(r1)
432	lwz	r5,GPR5(r1)
433	lwz	r6,GPR6(r1)
434	lwz	r7,GPR7(r1)
435	lwz	r8,GPR8(r1)
436	REST_NVGPRS(r1)
437	b	syscall_dotrace_cont
438
439syscall_exit_work:
440	andi.	r0,r9,_TIF_RESTOREALL
441	beq+	0f
442	REST_NVGPRS(r1)
443	b	2f
4440:	cmplw	0,r3,r8
445	blt+	1f
446	andi.	r0,r9,_TIF_NOERROR
447	bne-	1f
448	lwz	r11,_CCR(r1)			/* Load CR */
449	neg	r3,r3
450	oris	r11,r11,0x1000	/* Set SO bit in CR */
451	stw	r11,_CCR(r1)
452
4531:	stw	r6,RESULT(r1)	/* Save result */
454	stw	r3,GPR3(r1)	/* Update return value */
4552:	andi.	r0,r9,(_TIF_PERSYSCALL_MASK)
456	beq	4f
457
458	/* Clear per-syscall TIF flags if any are set.  */
459
460	li	r11,_TIF_PERSYSCALL_MASK
461	addi	r12,r12,TI_FLAGS
4623:	lwarx	r8,0,r12
463	andc	r8,r8,r11
464#ifdef CONFIG_IBM405_ERR77
465	dcbt	0,r12
466#endif
467	stwcx.	r8,0,r12
468	bne-	3b
469	subi	r12,r12,TI_FLAGS
470
4714:	/* Anything which requires enabling interrupts? */
472	andi.	r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
473	beq	ret_from_except
474
475	/* Re-enable interrupts. There is no need to trace that with
476	 * lockdep as we are supposed to have IRQs on at this point
477	 */
478	ori	r10,r10,MSR_EE
479	SYNC
480	MTMSRD(r10)
481
482	/* Save NVGPRS if they're not saved already */
483	lwz	r4,_TRAP(r1)
484	andi.	r4,r4,1
485	beq	5f
486	SAVE_NVGPRS(r1)
487	li	r4,0xc00
488	stw	r4,_TRAP(r1)
4895:
490	addi	r3,r1,STACK_FRAME_OVERHEAD
491	bl	do_syscall_trace_leave
492	b	ret_from_except_full
493
494#ifdef SHOW_SYSCALLS
495do_show_syscall:
496#ifdef SHOW_SYSCALLS_TASK
497	lis	r11,show_syscalls_task@ha
498	lwz	r11,show_syscalls_task@l(r11)
499	cmp	0,r2,r11
500	bnelr
501#endif
502	stw	r31,GPR31(r1)
503	mflr	r31
504	lis	r3,7f@ha
505	addi	r3,r3,7f@l
506	lwz	r4,GPR0(r1)
507	lwz	r5,GPR3(r1)
508	lwz	r6,GPR4(r1)
509	lwz	r7,GPR5(r1)
510	lwz	r8,GPR6(r1)
511	lwz	r9,GPR7(r1)
512	bl	printk
513	lis	r3,77f@ha
514	addi	r3,r3,77f@l
515	lwz	r4,GPR8(r1)
516	mr	r5,r2
517	bl	printk
518	lwz	r0,GPR0(r1)
519	lwz	r3,GPR3(r1)
520	lwz	r4,GPR4(r1)
521	lwz	r5,GPR5(r1)
522	lwz	r6,GPR6(r1)
523	lwz	r7,GPR7(r1)
524	lwz	r8,GPR8(r1)
525	mtlr	r31
526	lwz	r31,GPR31(r1)
527	blr
528
529do_show_syscall_exit:
530#ifdef SHOW_SYSCALLS_TASK
531	lis	r11,show_syscalls_task@ha
532	lwz	r11,show_syscalls_task@l(r11)
533	cmp	0,r2,r11
534	bnelr
535#endif
536	stw	r31,GPR31(r1)
537	mflr	r31
538	stw	r3,RESULT(r1)	/* Save result */
539	mr	r4,r3
540	lis	r3,79f@ha
541	addi	r3,r3,79f@l
542	bl	printk
543	lwz	r3,RESULT(r1)
544	mtlr	r31
545	lwz	r31,GPR31(r1)
546	blr
547
5487:	.string	"syscall %d(%x, %x, %x, %x, %x, "
54977:	.string	"%x), current=%p\n"
55079:	.string	" -> %x\n"
551	.align	2,0
552
553#ifdef SHOW_SYSCALLS_TASK
554	.data
555	.globl	show_syscalls_task
556show_syscalls_task:
557	.long	-1
558	.text
559#endif
560#endif /* SHOW_SYSCALLS */
561
562/*
563 * The fork/clone functions need to copy the full register set into
564 * the child process. Therefore we need to save all the nonvolatile
565 * registers (r13 - r31) before calling the C code.
566 */
567	.globl	ppc_fork
568ppc_fork:
569	SAVE_NVGPRS(r1)
570	lwz	r0,_TRAP(r1)
571	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
572	stw	r0,_TRAP(r1)		/* register set saved */
573	b	sys_fork
574
575	.globl	ppc_vfork
576ppc_vfork:
577	SAVE_NVGPRS(r1)
578	lwz	r0,_TRAP(r1)
579	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
580	stw	r0,_TRAP(r1)		/* register set saved */
581	b	sys_vfork
582
583	.globl	ppc_clone
584ppc_clone:
585	SAVE_NVGPRS(r1)
586	lwz	r0,_TRAP(r1)
587	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
588	stw	r0,_TRAP(r1)		/* register set saved */
589	b	sys_clone
590
591	.globl	ppc_swapcontext
592ppc_swapcontext:
593	SAVE_NVGPRS(r1)
594	lwz	r0,_TRAP(r1)
595	rlwinm	r0,r0,0,0,30		/* clear LSB to indicate full */
596	stw	r0,_TRAP(r1)		/* register set saved */
597	b	sys_swapcontext
598
599/*
600 * Top-level page fault handling.
601 * This is in assembler because if do_page_fault tells us that
602 * it is a bad kernel page fault, we want to save the non-volatile
603 * registers before calling bad_page_fault.
604 */
605	.globl	handle_page_fault
606handle_page_fault:
607	stw	r4,_DAR(r1)
608	addi	r3,r1,STACK_FRAME_OVERHEAD
609	bl	do_page_fault
610	cmpwi	r3,0
611	beq+	ret_from_except
612	SAVE_NVGPRS(r1)
613	lwz	r0,_TRAP(r1)
614	clrrwi	r0,r0,1
615	stw	r0,_TRAP(r1)
616	mr	r5,r3
617	addi	r3,r1,STACK_FRAME_OVERHEAD
618	lwz	r4,_DAR(r1)
619	bl	bad_page_fault
620	b	ret_from_except_full
621
622/*
623 * This routine switches between two different tasks.  The process
624 * state of one is saved on its kernel stack.  Then the state
625 * of the other is restored from its kernel stack.  The memory
626 * management hardware is updated to the second process's state.
627 * Finally, we can return to the second process.
628 * On entry, r3 points to the THREAD for the current task, r4
629 * points to the THREAD for the new task.
630 *
631 * This routine is always called with interrupts disabled.
632 *
633 * Note: there are two ways to get to the "going out" portion
634 * of this code; either by coming in via the entry (_switch)
635 * or via "fork" which must set up an environment equivalent
636 * to the "_switch" path.  If you change this , you'll have to
637 * change the fork code also.
638 *
639 * The code which creates the new task context is in 'copy_thread'
640 * in arch/ppc/kernel/process.c
641 */
642_GLOBAL(_switch)
643	stwu	r1,-INT_FRAME_SIZE(r1)
644	mflr	r0
645	stw	r0,INT_FRAME_SIZE+4(r1)
646	/* r3-r12 are caller saved -- Cort */
647	SAVE_NVGPRS(r1)
648	stw	r0,_NIP(r1)	/* Return to switch caller */
649	mfmsr	r11
650	li	r0,MSR_FP	/* Disable floating-point */
651#ifdef CONFIG_ALTIVEC
652BEGIN_FTR_SECTION
653	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
654	mfspr	r12,SPRN_VRSAVE	/* save vrsave register value */
655	stw	r12,THREAD+THREAD_VRSAVE(r2)
656END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
657#endif /* CONFIG_ALTIVEC */
658#ifdef CONFIG_SPE
659BEGIN_FTR_SECTION
660	oris	r0,r0,MSR_SPE@h	 /* Disable SPE */
661	mfspr	r12,SPRN_SPEFSCR /* save spefscr register value */
662	stw	r12,THREAD+THREAD_SPEFSCR(r2)
663END_FTR_SECTION_IFSET(CPU_FTR_SPE)
664#endif /* CONFIG_SPE */
665	and.	r0,r0,r11	/* FP or altivec or SPE enabled? */
666	beq+	1f
667	andc	r11,r11,r0
668	MTMSRD(r11)
669	isync
6701:	stw	r11,_MSR(r1)
671	mfcr	r10
672	stw	r10,_CCR(r1)
673	stw	r1,KSP(r3)	/* Set old stack pointer */
674
675#ifdef CONFIG_SMP
676	/* We need a sync somewhere here to make sure that if the
677	 * previous task gets rescheduled on another CPU, it sees all
678	 * stores it has performed on this one.
679	 */
680	sync
681#endif /* CONFIG_SMP */
682
683	tophys(r0,r4)
684	CLR_TOP32(r0)
685	mtspr	SPRN_SPRG_THREAD,r0	/* Update current THREAD phys addr */
686	lwz	r1,KSP(r4)	/* Load new stack pointer */
687
688	/* save the old current 'last' for return value */
689	mr	r3,r2
690	addi	r2,r4,-THREAD	/* Update current */
691
692#ifdef CONFIG_ALTIVEC
693BEGIN_FTR_SECTION
694	lwz	r0,THREAD+THREAD_VRSAVE(r2)
695	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
696END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
697#endif /* CONFIG_ALTIVEC */
698#ifdef CONFIG_SPE
699BEGIN_FTR_SECTION
700	lwz	r0,THREAD+THREAD_SPEFSCR(r2)
701	mtspr	SPRN_SPEFSCR,r0		/* restore SPEFSCR reg */
702END_FTR_SECTION_IFSET(CPU_FTR_SPE)
703#endif /* CONFIG_SPE */
704
705	lwz	r0,_CCR(r1)
706	mtcrf	0xFF,r0
707	/* r3-r12 are destroyed -- Cort */
708	REST_NVGPRS(r1)
709
710	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
711	mtlr	r4
712	addi	r1,r1,INT_FRAME_SIZE
713	blr
714
715	.globl	fast_exception_return
716fast_exception_return:
717#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
718	andi.	r10,r9,MSR_RI		/* check for recoverable interrupt */
719	beq	1f			/* if not, we've got problems */
720#endif
721
7222:	REST_4GPRS(3, r11)
723	lwz	r10,_CCR(r11)
724	REST_GPR(1, r11)
725	mtcr	r10
726	lwz	r10,_LINK(r11)
727	mtlr	r10
728	REST_GPR(10, r11)
729	mtspr	SPRN_SRR1,r9
730	mtspr	SPRN_SRR0,r12
731	REST_GPR(9, r11)
732	REST_GPR(12, r11)
733	lwz	r11,GPR11(r11)
734	SYNC
735	RFI
736
737#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
738/* check if the exception happened in a restartable section */
7391:	lis	r3,exc_exit_restart_end@ha
740	addi	r3,r3,exc_exit_restart_end@l
741	cmplw	r12,r3
742	bge	3f
743	lis	r4,exc_exit_restart@ha
744	addi	r4,r4,exc_exit_restart@l
745	cmplw	r12,r4
746	blt	3f
747	lis	r3,fee_restarts@ha
748	tophys(r3,r3)
749	lwz	r5,fee_restarts@l(r3)
750	addi	r5,r5,1
751	stw	r5,fee_restarts@l(r3)
752	mr	r12,r4		/* restart at exc_exit_restart */
753	b	2b
754
755	.section .bss
756	.align	2
757fee_restarts:
758	.space	4
759	.previous
760
761/* aargh, a nonrecoverable interrupt, panic */
762/* aargh, we don't know which trap this is */
763/* but the 601 doesn't implement the RI bit, so assume it's OK */
7643:
765BEGIN_FTR_SECTION
766	b	2b
767END_FTR_SECTION_IFSET(CPU_FTR_601)
768	li	r10,-1
769	stw	r10,_TRAP(r11)
770	addi	r3,r1,STACK_FRAME_OVERHEAD
771	lis	r10,MSR_KERNEL@h
772	ori	r10,r10,MSR_KERNEL@l
773	bl	transfer_to_handler_full
774	.long	nonrecoverable_exception
775	.long	ret_from_except
776#endif
777
778	.globl	ret_from_except_full
779ret_from_except_full:
780	REST_NVGPRS(r1)
781	/* fall through */
782
783	.globl	ret_from_except
784ret_from_except:
785	/* Hard-disable interrupts so that current_thread_info()->flags
786	 * can't change between when we test it and when we return
787	 * from the interrupt. */
788	/* Note: We don't bother telling lockdep about it */
789	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
790	SYNC			/* Some chip revs have problems here... */
791	MTMSRD(r10)		/* disable interrupts */
792
793	lwz	r3,_MSR(r1)	/* Returning to user mode? */
794	andi.	r0,r3,MSR_PR
795	beq	resume_kernel
796
797user_exc_return:		/* r10 contains MSR_KERNEL here */
798	/* Check current_thread_info()->flags */
799	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
800	lwz	r9,TI_FLAGS(r9)
801	andi.	r0,r9,_TIF_USER_WORK_MASK
802	bne	do_work
803
804restore_user:
805#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
806	/* Check whether this process has its own DBCR0 value.  The internal
807	   debug mode bit tells us that dbcr0 should be loaded. */
808	lwz	r0,THREAD+THREAD_DBCR0(r2)
809	andis.	r10,r0,DBCR0_IDM@h
810	bnel-	load_dbcr0
811#endif
812
813#ifdef CONFIG_PREEMPT
814	b	restore
815
816/* N.B. the only way to get here is from the beq following ret_from_except. */
817resume_kernel:
818	/* check current_thread_info->preempt_count */
819	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
820	lwz	r0,TI_PREEMPT(r9)
821	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
822	bne	restore
823	lwz	r0,TI_FLAGS(r9)
824	andi.	r0,r0,_TIF_NEED_RESCHED
825	beq+	restore
826	andi.	r0,r3,MSR_EE	/* interrupts off? */
827	beq	restore		/* don't schedule if so */
828#ifdef CONFIG_TRACE_IRQFLAGS
829	/* Lockdep thinks irqs are enabled, we need to call
830	 * preempt_schedule_irq with IRQs off, so we inform lockdep
831	 * now that we -did- turn them off already
832	 */
833	bl	trace_hardirqs_off
834#endif
8351:	bl	preempt_schedule_irq
836	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
837	lwz	r3,TI_FLAGS(r9)
838	andi.	r0,r3,_TIF_NEED_RESCHED
839	bne-	1b
840#ifdef CONFIG_TRACE_IRQFLAGS
841	/* And now, to properly rebalance the above, we tell lockdep they
842	 * are being turned back on, which will happen when we return
843	 */
844	bl	trace_hardirqs_on
845#endif
846#else
847resume_kernel:
848#endif /* CONFIG_PREEMPT */
849
850	/* interrupts are hard-disabled at this point */
851restore:
852#ifdef CONFIG_44x
853BEGIN_MMU_FTR_SECTION
854	b	1f
855END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
856	lis	r4,icache_44x_need_flush@ha
857	lwz	r5,icache_44x_need_flush@l(r4)
858	cmplwi	cr0,r5,0
859	beq+	1f
860	li	r6,0
861	iccci	r0,r0
862	stw	r6,icache_44x_need_flush@l(r4)
8631:
864#endif  /* CONFIG_44x */
865
866	lwz	r9,_MSR(r1)
867#ifdef CONFIG_TRACE_IRQFLAGS
868	/* Lockdep doesn't know about the fact that IRQs are temporarily turned
869	 * off in this assembly code while peeking at TI_FLAGS() and such. However
870	 * we need to inform it if the exception turned interrupts off, and we
871	 * are about to trun them back on.
872	 *
873	 * The problem here sadly is that we don't know whether the exceptions was
874	 * one that turned interrupts off or not. So we always tell lockdep about
875	 * turning them on here when we go back to wherever we came from with EE
876	 * on, even if that may meen some redudant calls being tracked. Maybe later
877	 * we could encode what the exception did somewhere or test the exception
878	 * type in the pt_regs but that sounds overkill
879	 */
880	andi.	r10,r9,MSR_EE
881	beq	1f
882	bl	trace_hardirqs_on
883	lwz	r9,_MSR(r1)
8841:
885#endif /* CONFIG_TRACE_IRQFLAGS */
886
887	lwz	r0,GPR0(r1)
888	lwz	r2,GPR2(r1)
889	REST_4GPRS(3, r1)
890	REST_2GPRS(7, r1)
891
892	lwz	r10,_XER(r1)
893	lwz	r11,_CTR(r1)
894	mtspr	SPRN_XER,r10
895	mtctr	r11
896
897	PPC405_ERR77(0,r1)
898BEGIN_FTR_SECTION
899	lwarx	r11,0,r1
900END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
901	stwcx.	r0,0,r1			/* to clear the reservation */
902
903#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
904	andi.	r10,r9,MSR_RI		/* check if this exception occurred */
905	beql	nonrecoverable		/* at a bad place (MSR:RI = 0) */
906
907	lwz	r10,_CCR(r1)
908	lwz	r11,_LINK(r1)
909	mtcrf	0xFF,r10
910	mtlr	r11
911
912	/*
913	 * Once we put values in SRR0 and SRR1, we are in a state
914	 * where exceptions are not recoverable, since taking an
915	 * exception will trash SRR0 and SRR1.  Therefore we clear the
916	 * MSR:RI bit to indicate this.  If we do take an exception,
917	 * we can't return to the point of the exception but we
918	 * can restart the exception exit path at the label
919	 * exc_exit_restart below.  -- paulus
920	 */
921	LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
922	SYNC
923	MTMSRD(r10)		/* clear the RI bit */
924	.globl exc_exit_restart
925exc_exit_restart:
926	lwz	r12,_NIP(r1)
927	FIX_SRR1(r9,r10)
928	mtspr	SPRN_SRR0,r12
929	mtspr	SPRN_SRR1,r9
930	REST_4GPRS(9, r1)
931	lwz	r1,GPR1(r1)
932	.globl exc_exit_restart_end
933exc_exit_restart_end:
934	SYNC
935	RFI
936
937#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
938	/*
939	 * This is a bit different on 4xx/Book-E because it doesn't have
940	 * the RI bit in the MSR.
941	 * The TLB miss handler checks if we have interrupted
942	 * the exception exit path and restarts it if so
943	 * (well maybe one day it will... :).
944	 */
945	lwz	r11,_LINK(r1)
946	mtlr	r11
947	lwz	r10,_CCR(r1)
948	mtcrf	0xff,r10
949	REST_2GPRS(9, r1)
950	.globl exc_exit_restart
951exc_exit_restart:
952	lwz	r11,_NIP(r1)
953	lwz	r12,_MSR(r1)
954exc_exit_start:
955	mtspr	SPRN_SRR0,r11
956	mtspr	SPRN_SRR1,r12
957	REST_2GPRS(11, r1)
958	lwz	r1,GPR1(r1)
959	.globl exc_exit_restart_end
960exc_exit_restart_end:
961	PPC405_ERR77_SYNC
962	rfi
963	b	.			/* prevent prefetch past rfi */
964
965/*
966 * Returning from a critical interrupt in user mode doesn't need
967 * to be any different from a normal exception.  For a critical
968 * interrupt in the kernel, we just return (without checking for
969 * preemption) since the interrupt may have happened at some crucial
970 * place (e.g. inside the TLB miss handler), and because we will be
971 * running with r1 pointing into critical_stack, not the current
972 * process's kernel stack (and therefore current_thread_info() will
973 * give the wrong answer).
974 * We have to restore various SPRs that may have been in use at the
975 * time of the critical interrupt.
976 *
977 */
978#ifdef CONFIG_40x
979#define PPC_40x_TURN_OFF_MSR_DR						    \
980	/* avoid any possible TLB misses here by turning off MSR.DR, we	    \
981	 * assume the instructions here are mapped by a pinned TLB entry */ \
982	li	r10,MSR_IR;						    \
983	mtmsr	r10;							    \
984	isync;								    \
985	tophys(r1, r1);
986#else
987#define PPC_40x_TURN_OFF_MSR_DR
988#endif
989
990#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)	\
991	REST_NVGPRS(r1);						\
992	lwz	r3,_MSR(r1);						\
993	andi.	r3,r3,MSR_PR;						\
994	LOAD_MSR_KERNEL(r10,MSR_KERNEL);				\
995	bne	user_exc_return;					\
996	lwz	r0,GPR0(r1);						\
997	lwz	r2,GPR2(r1);						\
998	REST_4GPRS(3, r1);						\
999	REST_2GPRS(7, r1);						\
1000	lwz	r10,_XER(r1);						\
1001	lwz	r11,_CTR(r1);						\
1002	mtspr	SPRN_XER,r10;						\
1003	mtctr	r11;							\
1004	PPC405_ERR77(0,r1);						\
1005	stwcx.	r0,0,r1;		/* to clear the reservation */	\
1006	lwz	r11,_LINK(r1);						\
1007	mtlr	r11;							\
1008	lwz	r10,_CCR(r1);						\
1009	mtcrf	0xff,r10;						\
1010	PPC_40x_TURN_OFF_MSR_DR;					\
1011	lwz	r9,_DEAR(r1);						\
1012	lwz	r10,_ESR(r1);						\
1013	mtspr	SPRN_DEAR,r9;						\
1014	mtspr	SPRN_ESR,r10;						\
1015	lwz	r11,_NIP(r1);						\
1016	lwz	r12,_MSR(r1);						\
1017	mtspr	exc_lvl_srr0,r11;					\
1018	mtspr	exc_lvl_srr1,r12;					\
1019	lwz	r9,GPR9(r1);						\
1020	lwz	r12,GPR12(r1);						\
1021	lwz	r10,GPR10(r1);						\
1022	lwz	r11,GPR11(r1);						\
1023	lwz	r1,GPR1(r1);						\
1024	PPC405_ERR77_SYNC;						\
1025	exc_lvl_rfi;							\
1026	b	.;		/* prevent prefetch past exc_lvl_rfi */
1027
1028#define	RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)			\
1029	lwz	r9,_##exc_lvl_srr0(r1);					\
1030	lwz	r10,_##exc_lvl_srr1(r1);				\
1031	mtspr	SPRN_##exc_lvl_srr0,r9;					\
1032	mtspr	SPRN_##exc_lvl_srr1,r10;
1033
1034#if defined(CONFIG_PPC_BOOK3E_MMU)
1035#ifdef CONFIG_PHYS_64BIT
1036#define	RESTORE_MAS7							\
1037	lwz	r11,MAS7(r1);						\
1038	mtspr	SPRN_MAS7,r11;
1039#else
1040#define	RESTORE_MAS7
1041#endif /* CONFIG_PHYS_64BIT */
1042#define RESTORE_MMU_REGS						\
1043	lwz	r9,MAS0(r1);						\
1044	lwz	r10,MAS1(r1);						\
1045	lwz	r11,MAS2(r1);						\
1046	mtspr	SPRN_MAS0,r9;						\
1047	lwz	r9,MAS3(r1);						\
1048	mtspr	SPRN_MAS1,r10;						\
1049	lwz	r10,MAS6(r1);						\
1050	mtspr	SPRN_MAS2,r11;						\
1051	mtspr	SPRN_MAS3,r9;						\
1052	mtspr	SPRN_MAS6,r10;						\
1053	RESTORE_MAS7;
1054#elif defined(CONFIG_44x)
1055#define RESTORE_MMU_REGS						\
1056	lwz	r9,MMUCR(r1);						\
1057	mtspr	SPRN_MMUCR,r9;
1058#else
1059#define RESTORE_MMU_REGS
1060#endif
1061
1062#ifdef CONFIG_40x
1063	.globl	ret_from_crit_exc
1064ret_from_crit_exc:
1065	mfspr	r9,SPRN_SPRG_THREAD
1066	lis	r10,saved_ksp_limit@ha;
1067	lwz	r10,saved_ksp_limit@l(r10);
1068	tovirt(r9,r9);
1069	stw	r10,KSP_LIMIT(r9)
1070	lis	r9,crit_srr0@ha;
1071	lwz	r9,crit_srr0@l(r9);
1072	lis	r10,crit_srr1@ha;
1073	lwz	r10,crit_srr1@l(r10);
1074	mtspr	SPRN_SRR0,r9;
1075	mtspr	SPRN_SRR1,r10;
1076	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1077#endif /* CONFIG_40x */
1078
1079#ifdef CONFIG_BOOKE
1080	.globl	ret_from_crit_exc
1081ret_from_crit_exc:
1082	mfspr	r9,SPRN_SPRG_THREAD
1083	lwz	r10,SAVED_KSP_LIMIT(r1)
1084	stw	r10,KSP_LIMIT(r9)
1085	RESTORE_xSRR(SRR0,SRR1);
1086	RESTORE_MMU_REGS;
1087	RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1088
1089	.globl	ret_from_debug_exc
1090ret_from_debug_exc:
1091	mfspr	r9,SPRN_SPRG_THREAD
1092	lwz	r10,SAVED_KSP_LIMIT(r1)
1093	stw	r10,KSP_LIMIT(r9)
1094	lwz	r9,THREAD_INFO-THREAD(r9)
1095	rlwinm	r10,r1,0,0,(31-THREAD_SHIFT)
1096	lwz	r10,TI_PREEMPT(r10)
1097	stw	r10,TI_PREEMPT(r9)
1098	RESTORE_xSRR(SRR0,SRR1);
1099	RESTORE_xSRR(CSRR0,CSRR1);
1100	RESTORE_MMU_REGS;
1101	RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1102
1103	.globl	ret_from_mcheck_exc
1104ret_from_mcheck_exc:
1105	mfspr	r9,SPRN_SPRG_THREAD
1106	lwz	r10,SAVED_KSP_LIMIT(r1)
1107	stw	r10,KSP_LIMIT(r9)
1108	RESTORE_xSRR(SRR0,SRR1);
1109	RESTORE_xSRR(CSRR0,CSRR1);
1110	RESTORE_xSRR(DSRR0,DSRR1);
1111	RESTORE_MMU_REGS;
1112	RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1113#endif /* CONFIG_BOOKE */
1114
1115/*
1116 * Load the DBCR0 value for a task that is being ptraced,
1117 * having first saved away the global DBCR0.  Note that r0
1118 * has the dbcr0 value to set upon entry to this.
1119 */
1120load_dbcr0:
1121	mfmsr	r10		/* first disable debug exceptions */
1122	rlwinm	r10,r10,0,~MSR_DE
1123	mtmsr	r10
1124	isync
1125	mfspr	r10,SPRN_DBCR0
1126	lis	r11,global_dbcr0@ha
1127	addi	r11,r11,global_dbcr0@l
1128#ifdef CONFIG_SMP
1129	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1130	lwz	r9,TI_CPU(r9)
1131	slwi	r9,r9,3
1132	add	r11,r11,r9
1133#endif
1134	stw	r10,0(r11)
1135	mtspr	SPRN_DBCR0,r0
1136	lwz	r10,4(r11)
1137	addi	r10,r10,1
1138	stw	r10,4(r11)
1139	li	r11,-1
1140	mtspr	SPRN_DBSR,r11	/* clear all pending debug events */
1141	blr
1142
1143	.section .bss
1144	.align	4
1145global_dbcr0:
1146	.space	8*NR_CPUS
1147	.previous
1148#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1149
1150do_work:			/* r10 contains MSR_KERNEL here */
1151	andi.	r0,r9,_TIF_NEED_RESCHED
1152	beq	do_user_signal
1153
1154do_resched:			/* r10 contains MSR_KERNEL here */
1155	/* Note: We don't need to inform lockdep that we are enabling
1156	 * interrupts here. As far as it knows, they are already enabled
1157	 */
1158	ori	r10,r10,MSR_EE
1159	SYNC
1160	MTMSRD(r10)		/* hard-enable interrupts */
1161	bl	schedule
1162recheck:
1163	/* Note: And we don't tell it we are disabling them again
1164	 * neither. Those disable/enable cycles used to peek at
1165	 * TI_FLAGS aren't advertised.
1166	 */
1167	LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1168	SYNC
1169	MTMSRD(r10)		/* disable interrupts */
1170	rlwinm	r9,r1,0,0,(31-THREAD_SHIFT)
1171	lwz	r9,TI_FLAGS(r9)
1172	andi.	r0,r9,_TIF_NEED_RESCHED
1173	bne-	do_resched
1174	andi.	r0,r9,_TIF_USER_WORK_MASK
1175	beq	restore_user
1176do_user_signal:			/* r10 contains MSR_KERNEL here */
1177	ori	r10,r10,MSR_EE
1178	SYNC
1179	MTMSRD(r10)		/* hard-enable interrupts */
1180	/* save r13-r31 in the exception frame, if not already done */
1181	lwz	r3,_TRAP(r1)
1182	andi.	r0,r3,1
1183	beq	2f
1184	SAVE_NVGPRS(r1)
1185	rlwinm	r3,r3,0,0,30
1186	stw	r3,_TRAP(r1)
11872:	addi	r3,r1,STACK_FRAME_OVERHEAD
1188	mr	r4,r9
1189	bl	do_signal
1190	REST_NVGPRS(r1)
1191	b	recheck
1192
1193/*
1194 * We come here when we are at the end of handling an exception
1195 * that occurred at a place where taking an exception will lose
1196 * state information, such as the contents of SRR0 and SRR1.
1197 */
1198nonrecoverable:
1199	lis	r10,exc_exit_restart_end@ha
1200	addi	r10,r10,exc_exit_restart_end@l
1201	cmplw	r12,r10
1202	bge	3f
1203	lis	r11,exc_exit_restart@ha
1204	addi	r11,r11,exc_exit_restart@l
1205	cmplw	r12,r11
1206	blt	3f
1207	lis	r10,ee_restarts@ha
1208	lwz	r12,ee_restarts@l(r10)
1209	addi	r12,r12,1
1210	stw	r12,ee_restarts@l(r10)
1211	mr	r12,r11		/* restart at exc_exit_restart */
1212	blr
12133:	/* OK, we can't recover, kill this process */
1214	/* but the 601 doesn't implement the RI bit, so assume it's OK */
1215BEGIN_FTR_SECTION
1216	blr
1217END_FTR_SECTION_IFSET(CPU_FTR_601)
1218	lwz	r3,_TRAP(r1)
1219	andi.	r0,r3,1
1220	beq	4f
1221	SAVE_NVGPRS(r1)
1222	rlwinm	r3,r3,0,0,30
1223	stw	r3,_TRAP(r1)
12244:	addi	r3,r1,STACK_FRAME_OVERHEAD
1225	bl	nonrecoverable_exception
1226	/* shouldn't return */
1227	b	4b
1228
1229	.section .bss
1230	.align	2
1231ee_restarts:
1232	.space	4
1233	.previous
1234
1235/*
1236 * PROM code for specific machines follows.  Put it
1237 * here so it's easy to add arch-specific sections later.
1238 * -- Cort
1239 */
1240#ifdef CONFIG_PPC_RTAS
1241/*
1242 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1243 * called with the MMU off.
1244 */
1245_GLOBAL(enter_rtas)
1246	stwu	r1,-INT_FRAME_SIZE(r1)
1247	mflr	r0
1248	stw	r0,INT_FRAME_SIZE+4(r1)
1249	LOAD_REG_ADDR(r4, rtas)
1250	lis	r6,1f@ha	/* physical return address for rtas */
1251	addi	r6,r6,1f@l
1252	tophys(r6,r6)
1253	tophys(r7,r1)
1254	lwz	r8,RTASENTRY(r4)
1255	lwz	r4,RTASBASE(r4)
1256	mfmsr	r9
1257	stw	r9,8(r1)
1258	LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1259	SYNC			/* disable interrupts so SRR0/1 */
1260	MTMSRD(r0)		/* don't get trashed */
1261	li	r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1262	mtlr	r6
1263	mtspr	SPRN_SPRG_RTAS,r7
1264	mtspr	SPRN_SRR0,r8
1265	mtspr	SPRN_SRR1,r9
1266	RFI
12671:	tophys(r9,r1)
1268	lwz	r8,INT_FRAME_SIZE+4(r9)	/* get return address */
1269	lwz	r9,8(r9)	/* original msr value */
1270	FIX_SRR1(r9,r0)
1271	addi	r1,r1,INT_FRAME_SIZE
1272	li	r0,0
1273	mtspr	SPRN_SPRG_RTAS,r0
1274	mtspr	SPRN_SRR0,r8
1275	mtspr	SPRN_SRR1,r9
1276	RFI			/* return to caller */
1277
1278	.globl	machine_check_in_rtas
1279machine_check_in_rtas:
1280	twi	31,0,0
1281
1282#endif /* CONFIG_PPC_RTAS */
1283
1284#ifdef CONFIG_FUNCTION_TRACER
1285#ifdef CONFIG_DYNAMIC_FTRACE
1286_GLOBAL(mcount)
1287_GLOBAL(_mcount)
1288	/*
1289	 * It is required that _mcount on PPC32 must preserve the
1290	 * link register. But we have r0 to play with. We use r0
1291	 * to push the return address back to the caller of mcount
1292	 * into the ctr register, restore the link register and
1293	 * then jump back using the ctr register.
1294	 */
1295	mflr	r0
1296	mtctr	r0
1297	lwz	r0, 4(r1)
1298	mtlr	r0
1299	bctr
1300
1301_GLOBAL(ftrace_caller)
1302	MCOUNT_SAVE_FRAME
1303	/* r3 ends up with link register */
1304	subi	r3, r3, MCOUNT_INSN_SIZE
1305.globl ftrace_call
1306ftrace_call:
1307	bl	ftrace_stub
1308	nop
1309#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1310.globl ftrace_graph_call
1311ftrace_graph_call:
1312	b	ftrace_graph_stub
1313_GLOBAL(ftrace_graph_stub)
1314#endif
1315	MCOUNT_RESTORE_FRAME
1316	/* old link register ends up in ctr reg */
1317	bctr
1318#else
1319_GLOBAL(mcount)
1320_GLOBAL(_mcount)
1321
1322	MCOUNT_SAVE_FRAME
1323
1324	subi	r3, r3, MCOUNT_INSN_SIZE
1325	LOAD_REG_ADDR(r5, ftrace_trace_function)
1326	lwz	r5,0(r5)
1327
1328	mtctr	r5
1329	bctrl
1330	nop
1331
1332#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1333	b	ftrace_graph_caller
1334#endif
1335	MCOUNT_RESTORE_FRAME
1336	bctr
1337#endif
1338
1339_GLOBAL(ftrace_stub)
1340	blr
1341
1342#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1343_GLOBAL(ftrace_graph_caller)
1344	/* load r4 with local address */
1345	lwz	r4, 44(r1)
1346	subi	r4, r4, MCOUNT_INSN_SIZE
1347
1348	/* get the parent address */
1349	addi	r3, r1, 52
1350
1351	bl	prepare_ftrace_return
1352	nop
1353
1354	MCOUNT_RESTORE_FRAME
1355	/* old link register ends up in ctr reg */
1356	bctr
1357
1358_GLOBAL(return_to_handler)
1359	/* need to save return values */
1360	stwu	r1, -32(r1)
1361	stw	r3, 20(r1)
1362	stw	r4, 16(r1)
1363	stw	r31, 12(r1)
1364	mr	r31, r1
1365
1366	bl	ftrace_return_to_handler
1367	nop
1368
1369	/* return value has real return address */
1370	mtlr	r3
1371
1372	lwz	r3, 20(r1)
1373	lwz	r4, 16(r1)
1374	lwz	r31,12(r1)
1375	lwz	r1, 0(r1)
1376
1377	/* Jump back to real return address */
1378	blr
1379#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1380
1381#endif /* CONFIG_MCOUNT */
1382