1/*
2 * BK Id: SCCS/s.entry.S 1.26 01/25/02 15:15:24 benh
3 */
4/*
5 *  PowerPC version
6 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
8 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
9 *  Adapted for Power Macintosh by Paul Mackerras.
10 *  Low-level exception handlers and MMU support
11 *  rewritten by Paul Mackerras.
12 *    Copyright (C) 1996 Paul Mackerras.
13 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
14 *
15 *  This file contains the system call entry code, context switch
16 *  code, and exception/interrupt return code for PowerPC.
17 *
18 *  This program is free software; you can redistribute it and/or
19 *  modify it under the terms of the GNU General Public License
20 *  as published by the Free Software Foundation; either version
21 *  2 of the License, or (at your option) any later version.
22 *
23 */
24
25#include <linux/config.h>
26#include <linux/errno.h>
27#include <linux/sys.h>
28#include <linux/threads.h>
29#include <asm/processor.h>
30#include <asm/page.h>
31#include <asm/mmu.h>
32#include <asm/cputable.h>
33#include <asm/ppc_asm.h>
34#include "ppc_defs.h"
35
36#undef SHOW_SYSCALLS
37#undef SHOW_SYSCALLS_TASK
38
39#ifdef SHOW_SYSCALLS_TASK
40	.data
41show_syscalls_task:
42	.long	-1
43#endif
44
45/*
46 * Handle a system call.
47 */
48	.text
49_GLOBAL(DoSyscall)
50	stw	r0,THREAD+LAST_SYSCALL(r2)
51	lwz	r11,_CCR(r1)	/* Clear SO bit in CR */
52	lis	r10,0x1000
53	andc	r11,r11,r10
54	stw	r11,_CCR(r1)
55#ifdef SHOW_SYSCALLS
56#ifdef SHOW_SYSCALLS_TASK
57	lis	r31,show_syscalls_task@ha
58	lwz	r31,show_syscalls_task@l(r31)
59	cmp	0,r2,r31
60	bne	1f
61#endif
62	lis	r3,7f@ha
63	addi	r3,r3,7f@l
64	lwz	r4,GPR0(r1)
65	lwz	r5,GPR3(r1)
66	lwz	r6,GPR4(r1)
67	lwz	r7,GPR5(r1)
68	lwz	r8,GPR6(r1)
69	lwz	r9,GPR7(r1)
70	bl	printk
71	lis	r3,77f@ha
72	addi	r3,r3,77f@l
73	lwz	r4,GPR8(r1)
74	lwz	r5,GPR9(r1)
75	mr	r6,r2
76	bl	printk
77	lwz	r0,GPR0(r1)
78	lwz	r3,GPR3(r1)
79	lwz	r4,GPR4(r1)
80	lwz	r5,GPR5(r1)
81	lwz	r6,GPR6(r1)
82	lwz	r7,GPR7(r1)
83	lwz	r8,GPR8(r1)
841:
85#endif /* SHOW_SYSCALLS */
86	cmpi	0,r0,0x7777	/* Special case for 'sys_sigreturn' */
87	beq-	10f
88	cmpi    0,r0,0x6666     /* Special case for 'sys_rt_sigreturn' */
89	beq-    16f
90	lwz	r10,TASK_PTRACE(r2)
91	andi.	r10,r10,PT_TRACESYS
92	bne-	50f
93	cmpli	0,r0,NR_syscalls
94	bge-	66f
95	lis	r10,sys_call_table@h
96	ori	r10,r10,sys_call_table@l
97	slwi	r0,r0,2
98	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
99	cmpi	0,r10,0
100	beq-	66f
101	mtlr	r10
102	addi	r9,r1,STACK_FRAME_OVERHEAD
103	blrl			/* Call handler */
104	.globl	ret_from_syscall_1
105ret_from_syscall_1:
10620:	stw	r3,RESULT(r1)	/* Save result */
107#ifdef SHOW_SYSCALLS
108#ifdef SHOW_SYSCALLS_TASK
109	cmp	0,r2,r31
110	bne	91f
111#endif
112	mr	r4,r3
113	lis	r3,79f@ha
114	addi	r3,r3,79f@l
115	bl	printk
116	lwz	r3,RESULT(r1)
11791:
118#endif
119	li	r10,-_LAST_ERRNO
120	cmpl	0,r3,r10
121	blt	30f
122	neg	r3,r3
123	cmpi	0,r3,ERESTARTNOHAND
124	bne	22f
125	li	r3,EINTR
12622:	lwz	r10,_CCR(r1)	/* Set SO bit in CR */
127	oris	r10,r10,0x1000
128	stw	r10,_CCR(r1)
12930:	stw	r3,GPR3(r1)	/* Update return value */
130	b	ret_from_except
13166:	li	r3,ENOSYS
132	b	22b
133/* sys_sigreturn */
13410:	addi	r3,r1,STACK_FRAME_OVERHEAD
135	bl	sys_sigreturn
136	cmpi    0,r3,0          /* Check for restarted system call */
137	bge     ret_from_except
138	b       20b
139/* sys_rt_sigreturn */
14016:	addi    r3,r1,STACK_FRAME_OVERHEAD
141	bl      sys_rt_sigreturn
142	cmpi	0,r3,0		/* Check for restarted system call */
143	bge	ret_from_except
144	b	20b
145/* Traced system call support */
14650:	bl	syscall_trace
147	lwz	r0,GPR0(r1)	/* Restore original registers */
148	lwz	r3,GPR3(r1)
149	lwz	r4,GPR4(r1)
150	lwz	r5,GPR5(r1)
151	lwz	r6,GPR6(r1)
152	lwz	r7,GPR7(r1)
153	lwz	r8,GPR8(r1)
154	lwz	r9,GPR9(r1)
155	cmpli	0,r0,NR_syscalls
156	bge-	66f
157	lis	r10,sys_call_table@h
158	ori	r10,r10,sys_call_table@l
159	slwi	r0,r0,2
160	lwzx	r10,r10,r0	/* Fetch system call handler [ptr] */
161	cmpi	0,r10,0
162	beq-	66f
163	mtlr	r10
164	addi	r9,r1,STACK_FRAME_OVERHEAD
165	blrl			/* Call handler */
166	.globl	ret_from_syscall_2
167ret_from_syscall_2:
168	stw	r3,RESULT(r1)	/* Save result */
169	stw	r3,GPR0(r1)	/* temporary gross hack to make strace work */
170	li	r10,-_LAST_ERRNO
171	cmpl	0,r3,r10
172	blt	60f
173	neg	r3,r3
174	cmpi	0,r3,ERESTARTNOHAND
175	bne	52f
176	li	r3,EINTR
17752:	lwz	r10,_CCR(r1)	/* Set SO bit in CR */
178	oris	r10,r10,0x1000
179	stw	r10,_CCR(r1)
18060:	stw	r3,GPR3(r1)	/* Update return value */
181	bl	syscall_trace
182	b	ret_from_except
18366:	li	r3,ENOSYS
184	b	52b
185#ifdef SHOW_SYSCALLS
1867:	.string	"syscall %d(%x, %x, %x, %x, %x, "
18777:	.string	"%x, %x), current=%p\n"
18879:	.string	" -> %x\n"
189	.align	2,0
190#endif
191
192/*
193 * This routine switches between two different tasks.  The process
194 * state of one is saved on its kernel stack.  Then the state
195 * of the other is restored from its kernel stack.  The memory
196 * management hardware is updated to the second process's state.
197 * Finally, we can return to the second process.
198 * On entry, r3 points to the THREAD for the current task, r4
199 * points to the THREAD for the new task.
200 *
201 * Note: there are two ways to get to the "going out" portion
202 * of this code; either by coming in via the entry (_switch)
203 * or via "fork" which must set up an environment equivalent
204 * to the "_switch" path.  If you change this , you'll have to
205 * change the fork code also.
206 *
207 * The code which creates the new task context is in 'copy_thread'
208 * in arch/ppc/kernel/process.c
209 */
210_GLOBAL(_switch)
211	stwu	r1,-INT_FRAME_SIZE(r1)
212	stw	r0,GPR0(r1)
213	lwz	r0,0(r1)
214	stw	r0,GPR1(r1)
215	/* r3-r13 are caller saved -- Cort */
216	SAVE_GPR(2, r1)
217	SAVE_8GPRS(14, r1)
218	SAVE_10GPRS(22, r1)
219	mflr	r20		/* Return to switch caller */
220	stw	r20,INT_FRAME_SIZE+4(r1)
221	mfmsr	r22
222	li	r0,MSR_FP	/* Disable floating-point */
223#ifdef CONFIG_ALTIVEC
224BEGIN_FTR_SECTION
225	oris	r0,r0,MSR_VEC@h	/* Disable altivec */
226END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
227#endif /* CONFIG_ALTIVEC */
228	and.	r0,r0,r22	/* FP or altivec enabled? */
229	beq+	1f
230	andc	r22,r22,r0
231	mtmsr	r22
232	isync
2331:	stw	r20,_NIP(r1)
234	stw	r22,_MSR(r1)
235	stw	r20,_LINK(r1)
236	mfcr	r20
237	mfctr	r22
238	mfspr	r23,XER
239	stw	r20,_CCR(r1)
240	stw	r22,_CTR(r1)
241	stw	r23,_XER(r1)
242	li	r0,0x0ff0
243	stw	r0,TRAP(r1)
244	stw	r1,KSP(r3)	/* Set old stack pointer */
245
246	tophys(r0,r4)
247	CLR_TOP32(r0)
248	mtspr	SPRG3,r0	/* Update current THREAD phys addr */
249	lwz	r1,KSP(r4)	/* Load new stack pointer */
250	/* save the old current 'last' for return value */
251	mr	r3,r2
252	addi	r2,r4,-THREAD	/* Update current */
253	lwz	r0,_CCR(r1)
254	mtcrf	0xFF,r0
255	/* r3-r13 are destroyed -- Cort */
256	REST_2GPRS(14, r1)
257	REST_8GPRS(16, r1)
258	REST_8GPRS(24, r1)
259
260	lwz	r4,_NIP(r1)	/* Return to _switch caller in new task */
261	mtlr	r4
262	addi	r1,r1,INT_FRAME_SIZE
263	blr
264
265	.globl	ret_from_fork
266ret_from_fork:
267	bl	schedule_tail
268	lwz	r0,TASK_PTRACE(r2)
269	andi.	r0,r0,PT_TRACESYS
270	bnel-	syscall_trace
271	b	ret_from_except
272
273	.globl	ret_from_intercept
274ret_from_intercept:
275	/*
276	 * We may be returning from RTL and cannot do the normal checks
277	 * -- Cort
278	 */
279	cmpi	0,r3,0
280	beq	restore
281	.globl	ret_from_except
282ret_from_except:
283	lwz	r3,_MSR(r1)	/* Returning to user mode? */
284	andi.	r3,r3,MSR_PR
285	beq+	do_signal_ret	/* if so, check need_resched and signals */
286	lwz	r3,NEED_RESCHED(r2)
287	cmpi	0,r3,0		/* check need_resched flag */
288	beq+	7f
289	bl	schedule
2907:	lwz	r5,SIGPENDING(r2) /* Check for pending unblocked signals */
291	cmpwi	0,r5,0
292	beq+	do_signal_ret
293	li	r3,0
294	addi	r4,r1,STACK_FRAME_OVERHEAD
295	bl	do_signal
296	.globl	do_signal_ret
297do_signal_ret:
298	.globl ret_to_user_hook
299ret_to_user_hook:
300	nop
301restore:
302	lwz	r3,_XER(r1)
303	mtspr	XER,r3
304	REST_10GPRS(9,r1)
305	REST_10GPRS(19,r1)
306	REST_2GPRS(29,r1)
307	REST_GPR(31,r1)
308
309	/* make sure we hard disable here, even if rtl is active, to protect
310	 * SRR[01] and SPRG2 -- Cort
311	 */
312	mfmsr	r0		/* Get current interrupt state */
313	rlwinm	r0,r0,0,17,15	/* clear MSR_EE in r0 */
314	SYNC			/* Some chip revs have problems here... */
315	mtmsr	r0		/* Update machine state */
316
317	stwcx.	r0,0,r1		/* to clear the reservation */
318
319	/* if returning to user mode, set new sprg2 and save kernel SP */
320	lwz	r0,_MSR(r1)
321	andi.	r0,r0,MSR_PR
322	beq+	1f
323#ifdef CONFIG_ALTIVEC
324BEGIN_FTR_SECTION
325	lwz	r0,THREAD+THREAD_VRSAVE(r2)
326	mtspr	SPRN_VRSAVE,r0		/* if G4, restore VRSAVE reg */
327END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
328#endif /* CONFIG_ALTIVEC */
329	addi	r0,r1,INT_FRAME_SIZE	/* size of frame */
330	stw	r0,THREAD+KSP(r2)	/* save kernel stack pointer */
331	tophys(r8,r1)
332	CLR_TOP32(r8)
333	mtspr	SPRG2,r8		/* phys exception stack pointer */
3341:
335	lwz	r3,_CTR(r1)
336	lwz	r0,_LINK(r1)
337	mtctr	r3
338	mtlr	r0
339	REST_4GPRS(3, r1)
340	REST_2GPRS(7, r1)
341
342#ifndef CONFIG_SMP
343	lwz	r0,GPR0(r1)
344	lwz	r0,GPR2(r1)
345	lwz	r0,GPR1(r1)
346#endif /* ndef CONFIG_SMP */
347
348	/* We re-use r3,r4 here (the load above was to cause the MMU
349	 * fault if necessary).  Using r3,r4 removes the need to "dummy"
350	 * load the CCR and NIP.  Since we load them we may as well
351	 * use them.
352	 */
353	lwz	r3,_CCR(r1)
354	lwz	r4,_NIP(r1)
355
356	lwz	r0,_MSR(r1)
357	FIX_SRR1(r0,r2)
358	mtspr	SRR1,r0
359	mtcrf	0xFF,r3
360	mtspr	SRR0,r4
361	lwz	r0,GPR0(r1)
362	lwz	r2,GPR2(r1)
363	lwz	r3,GPR3(r1)
364	lwz	r4,GPR4(r1)
365	lwz	r1,GPR1(r1)
366	SYNC
367	RFI
368
369
370/*
371 * PROM code for specific machines follows.  Put it
372 * here so it's easy to add arch-specific sections later.
373 * -- Cort
374 */
375#if defined(CONFIG_ALL_PPC)
376/*
377 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
378 * called with the MMU off.
379 */
380	.globl	enter_rtas
381enter_rtas:
382	mflr	r0
383	stw	r0,20(r1)
384	lis	r4,rtas_data@ha
385	lwz	r4,rtas_data@l(r4)
386	lis	r6,1f@ha	/* physical return address for rtas */
387	addi	r6,r6,1f@l
388	addis	r6,r6,-KERNELBASE@h
389	subi	r7,r1,INT_FRAME_SIZE
390	addis	r7,r7,-KERNELBASE@h
391	lis	r8,rtas_entry@ha
392	lwz	r8,rtas_entry@l(r8)
393	mfmsr	r9
394	stw	r9,8(r1)
395	li	r0,0
396	ori	r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_FE0|MSR_FE1
397	andc	r0,r9,r0
398	li	r10,MSR_IR|MSR_DR|MSR_FP
399	andc	r9,r0,r10
400	SYNC			/* disable interrupts so SRR0/1 */
401	mtmsr	r0		/* don't get trashed */
402	mtlr	r6
403	CLR_TOP32(r7)
404	mtspr	SPRG2,r7
405	mtspr	SRR0,r8
406	mtspr	SRR1,r9
407	RFI
4081:	addis	r9,r1,-KERNELBASE@h
409	lwz	r8,20(r9)	/* get return address */
410	lwz	r9,8(r9)	/* original msr value */
411	FIX_SRR1(r9,r0)
412	li	r0,0
413	mtspr	SPRG2,r0
414	mtspr	SRR0,r8
415	mtspr	SRR1,r9
416	RFI			/* return to caller */
417#endif /* CONFIG_ALL_PPC */
418