• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/microblaze/kernel/
1/*
2 * Low-level system-call handling, trap handlers and context-switching
3 *
4 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
5 * Copyright (C) 2008-2009 PetaLogix
6 * Copyright (C) 2003		John Williams <jwilliams@itee.uq.edu.au>
7 * Copyright (C) 2001,2002	NEC Corporation
8 * Copyright (C) 2001,2002	Miles Bader <miles@gnu.org>
9 *
10 * This file is subject to the terms and conditions of the GNU General
11 * Public License. See the file COPYING in the main directory of this
12 * archive for more details.
13 *
14 * Written by Miles Bader <miles@gnu.org>
15 * Heavily modified by John Williams for Microblaze
16 */
17
18#include <linux/sys.h>
19#include <linux/linkage.h>
20
21#include <asm/entry.h>
22#include <asm/current.h>
23#include <asm/processor.h>
24#include <asm/exceptions.h>
25#include <asm/asm-offsets.h>
26#include <asm/thread_info.h>
27
28#include <asm/page.h>
29#include <asm/unistd.h>
30
31#include <linux/errno.h>
32#include <asm/signal.h>
33
34#undef DEBUG
35
36/* The size of a state save frame. */
37#define STATE_SAVE_SIZE		(PT_SIZE + STATE_SAVE_ARG_SPACE)
38
39/* The offset of the struct pt_regs in a `state save frame' on the stack. */
40#define PTO	STATE_SAVE_ARG_SPACE /* 24 the space for args */
41
42#define C_ENTRY(name)	.globl name; .align 4; name
43
44/*
45 * Various ways of setting and clearing BIP in flags reg.
46 * This is mucky, but necessary using microblaze version that
47 * allows msr ops to write to BIP
48 */
49#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
50	.macro	clear_bip
51	msrclr	r0, MSR_BIP
52	.endm
53
54	.macro	set_bip
55	msrset	r0, MSR_BIP
56	.endm
57
58	.macro	clear_eip
59	msrclr	r0, MSR_EIP
60	.endm
61
62	.macro	set_ee
63	msrset	r0, MSR_EE
64	.endm
65
66	.macro	disable_irq
67	msrclr	r0, MSR_IE
68	.endm
69
70	.macro	enable_irq
71	msrset	r0, MSR_IE
72	.endm
73
74	.macro	set_ums
75	msrset	r0, MSR_UMS
76	msrclr	r0, MSR_VMS
77	.endm
78
79	.macro	set_vms
80	msrclr	r0, MSR_UMS
81	msrset	r0, MSR_VMS
82	.endm
83
84	.macro	clear_ums
85	msrclr	r0, MSR_UMS
86	.endm
87
88	.macro	clear_vms_ums
89	msrclr	r0, MSR_VMS | MSR_UMS
90	.endm
91#else
92	.macro	clear_bip
93	mfs	r11, rmsr
94	andi	r11, r11, ~MSR_BIP
95	mts	rmsr, r11
96	.endm
97
98	.macro	set_bip
99	mfs	r11, rmsr
100	ori	r11, r11, MSR_BIP
101	mts	rmsr, r11
102	.endm
103
104	.macro	clear_eip
105	mfs	r11, rmsr
106	andi	r11, r11, ~MSR_EIP
107	mts	rmsr, r11
108	.endm
109
110	.macro	set_ee
111	mfs	r11, rmsr
112	ori	r11, r11, MSR_EE
113	mts	rmsr, r11
114	.endm
115
116	.macro	disable_irq
117	mfs	r11, rmsr
118	andi	r11, r11, ~MSR_IE
119	mts	rmsr, r11
120	.endm
121
122	.macro	enable_irq
123	mfs	r11, rmsr
124	ori	r11, r11, MSR_IE
125	mts	rmsr, r11
126	.endm
127
128	.macro set_ums
129	mfs	r11, rmsr
130	ori	r11, r11, MSR_VMS
131	andni	r11, r11, MSR_UMS
132	mts	rmsr, r11
133	.endm
134
135	.macro	set_vms
136	mfs	r11, rmsr
137	ori	r11, r11, MSR_VMS
138	andni	r11, r11, MSR_UMS
139	mts	rmsr, r11
140	.endm
141
142	.macro	clear_ums
143	mfs	r11, rmsr
144	andni	r11, r11, MSR_UMS
145	mts	rmsr,r11
146	.endm
147
148	.macro	clear_vms_ums
149	mfs	r11, rmsr
150	andni	r11, r11, (MSR_VMS|MSR_UMS)
151	mts	rmsr,r11
152	.endm
153#endif
154
155/* Define how to call high-level functions. With MMU, virtual mode must be
156 * enabled when calling the high-level function. Clobbers R11.
157 * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
158 */
159
160/* turn on virtual protected mode save */
161#define VM_ON		\
162	set_ums;	\
163	rted	r0, 2f;	\
164	nop; \
1652:
166
167/* turn off virtual protected mode save and user mode save*/
168#define VM_OFF			\
169	clear_vms_ums;		\
170	rted	r0, TOPHYS(1f);	\
171	nop; \
1721:
173
174#define SAVE_REGS \
175	swi	r2, r1, PTO+PT_R2;	/* Save SDA */			\
176	swi	r3, r1, PTO+PT_R3;					\
177	swi	r4, r1, PTO+PT_R4;					\
178	swi	r5, r1, PTO+PT_R5;					\
179	swi	r6, r1, PTO+PT_R6;					\
180	swi	r7, r1, PTO+PT_R7;					\
181	swi	r8, r1, PTO+PT_R8;					\
182	swi	r9, r1, PTO+PT_R9;					\
183	swi	r10, r1, PTO+PT_R10;					\
184	swi	r11, r1, PTO+PT_R11;	/* save clobbered regs after rval */\
185	swi	r12, r1, PTO+PT_R12;					\
186	swi	r13, r1, PTO+PT_R13;	/* Save SDA2 */			\
187	swi	r14, r1, PTO+PT_PC;	/* PC, before IRQ/trap */	\
188	swi	r15, r1, PTO+PT_R15;	/* Save LP */			\
189	swi	r18, r1, PTO+PT_R18;	/* Save asm scratch reg */	\
190	swi	r19, r1, PTO+PT_R19;					\
191	swi	r20, r1, PTO+PT_R20;					\
192	swi	r21, r1, PTO+PT_R21;					\
193	swi	r22, r1, PTO+PT_R22;					\
194	swi	r23, r1, PTO+PT_R23;					\
195	swi	r24, r1, PTO+PT_R24;					\
196	swi	r25, r1, PTO+PT_R25;					\
197	swi	r26, r1, PTO+PT_R26;					\
198	swi	r27, r1, PTO+PT_R27;					\
199	swi	r28, r1, PTO+PT_R28;					\
200	swi	r29, r1, PTO+PT_R29;					\
201	swi	r30, r1, PTO+PT_R30;					\
202	swi	r31, r1, PTO+PT_R31;	/* Save current task reg */	\
203	mfs	r11, rmsr;		/* save MSR */			\
204	swi	r11, r1, PTO+PT_MSR;
205
206#define RESTORE_REGS \
207	lwi	r11, r1, PTO+PT_MSR;					\
208	mts	rmsr , r11;						\
209	lwi	r2, r1, PTO+PT_R2;	/* restore SDA */		\
210	lwi	r3, r1, PTO+PT_R3;					\
211	lwi	r4, r1, PTO+PT_R4;					\
212	lwi	r5, r1, PTO+PT_R5;					\
213	lwi	r6, r1, PTO+PT_R6;					\
214	lwi	r7, r1, PTO+PT_R7;					\
215	lwi	r8, r1, PTO+PT_R8;					\
216	lwi	r9, r1, PTO+PT_R9;					\
217	lwi	r10, r1, PTO+PT_R10;					\
218	lwi	r11, r1, PTO+PT_R11;	/* restore clobbered regs after rval */\
219	lwi	r12, r1, PTO+PT_R12;					\
220	lwi	r13, r1, PTO+PT_R13;	/* restore SDA2 */		\
221	lwi	r14, r1, PTO+PT_PC;	/* RESTORE_LINK PC, before IRQ/trap */\
222	lwi	r15, r1, PTO+PT_R15;	/* restore LP */		\
223	lwi	r18, r1, PTO+PT_R18;	/* restore asm scratch reg */	\
224	lwi	r19, r1, PTO+PT_R19;					\
225	lwi	r20, r1, PTO+PT_R20;					\
226	lwi	r21, r1, PTO+PT_R21;					\
227	lwi	r22, r1, PTO+PT_R22;					\
228	lwi	r23, r1, PTO+PT_R23;					\
229	lwi	r24, r1, PTO+PT_R24;					\
230	lwi	r25, r1, PTO+PT_R25;					\
231	lwi	r26, r1, PTO+PT_R26;					\
232	lwi	r27, r1, PTO+PT_R27;					\
233	lwi	r28, r1, PTO+PT_R28;					\
234	lwi	r29, r1, PTO+PT_R29;					\
235	lwi	r30, r1, PTO+PT_R30;					\
236	lwi	r31, r1, PTO+PT_R31;	/* Restore cur task reg */
237
238#define SAVE_STATE	\
239	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */	\
240	/* See if already in kernel mode.*/				\
241	mfs	r1, rmsr;						\
242	andi	r1, r1, MSR_UMS;					\
243	bnei	r1, 1f;						\
244	/* Kernel-mode state save.  */					\
245	/* Reload kernel stack-ptr. */					\
246	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
247			\
248	/* tophys(r1,r1); */						\
249	/* addik	r1, r1, -STATE_SAVE_SIZE; */			\
250	addik	r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
251	SAVE_REGS							\
252	brid	2f;							\
253	swi	r1, r1, PTO+PT_MODE; 	 				\
2541:	/* User-mode state save.  */					\
255	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
256	tophys(r1,r1);							\
257	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */	\
258	/* MS these three instructions can be added to one */		\
259	/* addik	r1, r1, THREAD_SIZE; */				\
260	/* tophys(r1,r1); */						\
261	/* addik	r1, r1, -STATE_SAVE_SIZE; */			\
262	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
263	SAVE_REGS							\
264	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));			\
265	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */		\
266	swi	r0, r1, PTO + PT_MODE; /* Was in user-mode.  */		\
267	/* MS: I am clearing UMS even in case when I come from kernel space */ \
268	clear_ums; 							\
2692:	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
270
271.text
272
273/*
274 * User trap.
275 *
276 * System calls are handled here.
277 *
278 * Syscall protocol:
279 * Syscall number in r12, args in r5-r10
280 * Return value in r3
281 *
282 * Trap entered via brki instruction, so BIP bit is set, and interrupts
283 * are masked. This is nice, means we don't have to CLI before state save
284 */
285C_ENTRY(_user_exception):
286	addi	r14, r14, 4	/* return address is 4 byte after call */
287	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
288
289	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
290	tophys(r1,r1);
291	lwi	r1, r1, TS_THREAD_INFO;	/* get stack from task_struct */
292	/* MS these three instructions can be added to one */
293	/* addik	r1, r1, THREAD_SIZE; */
294	/* tophys(r1,r1); */
295	/* addik	r1, r1, -STATE_SAVE_SIZE; */
296	addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
297	SAVE_REGS
298
299	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
300	swi	r11, r1, PTO+PT_R1;		/* Store user SP.  */
301	clear_ums;
302	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
303	/* Save away the syscall number.  */
304	swi	r12, r1, PTO+PT_R0;
305	tovirt(r1,r1)
306
307/* where the trap should return need -8 to adjust for rtsd r15, 8*/
308/* Jump to the appropriate function for the system call number in r12
309 * (r12 is not preserved), or return an error if r12 is not valid. The LP
310 * register should point to the location where
311 * the called function should return.  [note that MAKE_SYS_CALL uses label 1] */
312
313	/* Step into virtual mode */
314	rtbd	r0, 3f
315	nop
3163:
317	lwi	r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
318	lwi	r11, r11, TI_FLAGS	 /* get flags in thread info */
319	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
320	beqi	r11, 4f
321
322	addik	r3, r0, -ENOSYS
323	swi	r3, r1, PTO + PT_R3
324	brlid	r15, do_syscall_trace_enter
325	addik	r5, r1, PTO + PT_R0
326
327	# do_syscall_trace_enter returns the new syscall nr.
328	addk	r12, r0, r3
329	lwi	r5, r1, PTO+PT_R5;
330	lwi	r6, r1, PTO+PT_R6;
331	lwi	r7, r1, PTO+PT_R7;
332	lwi	r8, r1, PTO+PT_R8;
333	lwi	r9, r1, PTO+PT_R9;
334	lwi	r10, r1, PTO+PT_R10;
3354:
336/* Jump to the appropriate function for the system call number in r12
337 * (r12 is not preserved), or return an error if r12 is not valid.
338 * The LP register should point to the location where the called function
339 * should return.  [note that MAKE_SYS_CALL uses label 1] */
340	/* See if the system call number is valid */
341	addi	r11, r12, -__NR_syscalls;
342	bgei	r11,5f;
343	/* Figure out which function to use for this system call.  */
344	/* Note Microblaze barrel shift is optional, so don't rely on it */
345	add	r12, r12, r12;			/* convert num -> ptr */
346	add	r12, r12, r12;
347
348#ifdef DEBUG
349	/* Trac syscalls and stored them to r0_ram */
350	lwi	r3, r12, 0x400 + r0_ram
351	addi	r3, r3, 1
352	swi	r3, r12, 0x400 + r0_ram
353#endif
354
355	# Find and jump into the syscall handler.
356	lwi	r12, r12, sys_call_table
357	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
358	addi	r15, r0, ret_from_trap-8
359	bra	r12
360
361	/* The syscall number is invalid, return an error.  */
3625:
363	rtsd	r15, 8;		/* looks like a normal subroutine return */
364	addi	r3, r0, -ENOSYS;
365
366/* Entry point used to return from a syscall/trap */
367/* We re-enable BIP bit before state restore */
368C_ENTRY(ret_from_trap):
369	swi	r3, r1, PTO + PT_R3
370	swi	r4, r1, PTO + PT_R4
371
372	/* We're returning to user mode, so check for various conditions that
373	 * trigger rescheduling. */
374	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
375	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
376	andi	r11, r11, _TIF_WORK_SYSCALL_MASK
377	beqi	r11, 1f
378
379	brlid	r15, do_syscall_trace_leave
380	addik	r5, r1, PTO + PT_R0
3811:
382	/* We're returning to user mode, so check for various conditions that
383	 * trigger rescheduling. */
384	/* get thread info from current task */
385	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
386	lwi	r11, r11, TI_FLAGS;		/* get flags in thread info */
387	andi	r11, r11, _TIF_NEED_RESCHED;
388	beqi	r11, 5f;
389
390	bralid	r15, schedule;	/* Call scheduler */
391	nop;				/* delay slot */
392
393	/* Maybe handle a signal */
3945:	/* get thread info from current task*/
395	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
396	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
397	andi	r11, r11, _TIF_SIGPENDING;
398	beqi	r11, 1f;		/* Signals to handle, handle them */
399
400	addik	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
401	addi	r7, r0, 1;		/* Arg 3: int in_syscall */
402	bralid	r15, do_signal;	/* Handle any signals */
403	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
404
405/* Finally, return to user state.  */
4061:	set_bip;			/*  Ints masked for state restore */
407	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
408	VM_OFF;
409	tophys(r1,r1);
410	RESTORE_REGS;
411	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
412	lwi	r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
413TRAP_return:		/* Make global symbol for debugging */
414	rtbd	r14, 0;	/* Instructions to return from an IRQ */
415	nop;
416
417
418/* These syscalls need access to the struct pt_regs on the stack, so we
419   implement them in assembly (they're basically all wrappers anyway).  */
420
421C_ENTRY(sys_fork_wrapper):
422	addi	r5, r0, SIGCHLD			/* Arg 0: flags */
423	lwi	r6, r1, PTO+PT_R1	/* Arg 1: child SP (use parent's) */
424	addik	r7, r1, PTO			/* Arg 2: parent context */
425	add	r8. r0, r0			/* Arg 3: (unused) */
426	add	r9, r0, r0;			/* Arg 4: (unused) */
427	brid	do_fork		/* Do real work (tail-call) */
428	add	r10, r0, r0;			/* Arg 5: (unused) */
429
430/* This the initial entry point for a new child thread, with an appropriate
431   stack in place that makes it look the the child is in the middle of an
432   syscall.  This function is actually `returned to' from switch_thread
433   (copy_thread makes ret_from_fork the return address in each new thread's
434   saved context).  */
435C_ENTRY(ret_from_fork):
436	bralid	r15, schedule_tail; /* ...which is schedule_tail's arg */
437	add	r3, r5, r0;	/* switch_thread returns the prev task */
438				/* ( in the delay slot ) */
439	brid	ret_from_trap;	/* Do normal trap return */
440	add	r3, r0, r0;	/* Child's fork call should return 0. */
441
442C_ENTRY(sys_vfork):
443	brid	microblaze_vfork	/* Do real work (tail-call) */
444	addik	r5, r1, PTO
445
446C_ENTRY(sys_clone):
447	bnei	r6, 1f;			/* See if child SP arg (arg 1) is 0. */
448	lwi	r6, r1, PTO + PT_R1;	/* If so, use paret's stack ptr */
4491:	addik	r7, r1, PTO;			/* Arg 2: parent context */
450	add	r8, r0, r0;			/* Arg 3: (unused) */
451	add	r9, r0, r0;			/* Arg 4: (unused) */
452	brid	do_fork		/* Do real work (tail-call) */
453	add	r10, r0, r0;			/* Arg 5: (unused) */
454
455C_ENTRY(sys_execve):
456	brid	microblaze_execve;	/* Do real work (tail-call).*/
457	addik	r8, r1, PTO;		/* add user context as 4th arg */
458
459C_ENTRY(sys_rt_sigreturn_wrapper):
460	swi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
461	swi	r4, r1, PTO+PT_R4;
462	brlid	r15, sys_rt_sigreturn	/* Do real work */
463	addik	r5, r1, PTO;		/* add user context as 1st arg */
464	lwi	r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
465	lwi	r4, r1, PTO+PT_R4;
466	bri ret_from_trap /* fall through will not work here due to align */
467	nop;
468
469/*
470 * HW EXCEPTION rutine start
471 */
472C_ENTRY(full_exception_trap):
473	/* adjust exception address for privileged instruction
474	 * for finding where is it */
475	addik	r17, r17, -4
476	SAVE_STATE /* Save registers */
477	/* PC, before IRQ/trap - this is one instruction above */
478	swi	r17, r1, PTO+PT_PC;
479	tovirt(r1,r1)
480	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
481	addik	r15, r0, ret_from_exc - 8
482	mfs	r6, resr
483	mfs	r7, rfsr;		/* save FSR */
484	mts	rfsr, r0;	/* Clear sticky fsr */
485	rted	r0, full_exception
486	addik	r5, r1, PTO		 /* parameter struct pt_regs * regs */
487
488/*
489 * Unaligned data trap.
490 *
491 * Unaligned data trap last on 4k page is handled here.
492 *
493 * Trap entered via exception, so EE bit is set, and interrupts
494 * are masked.  This is nice, means we don't have to CLI before state save
495 *
496 * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
497 */
498C_ENTRY(unaligned_data_trap):
499	/* MS: I have to save r11 value and then restore it because
500	 * set_bit, clear_eip, set_ee use r11 as temp register if MSR
501	 * instructions are not used. We don't need to do if MSR instructions
502	 * are used and they use r0 instead of r11.
503	 * I am using ENTRY_SP which should be primary used only for stack
504	 * pointer saving. */
505	swi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
506	set_bip;        /* equalize initial state for all possible entries */
507	clear_eip;
508	set_ee;
509	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
510	SAVE_STATE		/* Save registers.*/
511	/* PC, before IRQ/trap - this is one instruction above */
512	swi	r17, r1, PTO+PT_PC;
513	tovirt(r1,r1)
514	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
515	addik	r15, r0, ret_from_exc-8
516	mfs	r3, resr		/* ESR */
517	mfs	r4, rear		/* EAR */
518	rtbd	r0, _unaligned_data_exception
519	addik	r7, r1, PTO		/* parameter struct pt_regs * regs */
520
521/*
522 * Page fault traps.
523 *
524 * If the real exception handler (from hw_exception_handler.S) didn't find
525 * the mapping for the process, then we're thrown here to handle such situation.
526 *
527 * Trap entered via exceptions, so EE bit is set, and interrupts
528 * are masked.  This is nice, means we don't have to CLI before state save
529 *
530 * Build a standard exception frame for TLB Access errors.  All TLB exceptions
531 * will bail out to this point if they can't resolve the lightweight TLB fault.
532 *
533 * The C function called is in "arch/microblaze/mm/fault.c", declared as:
534 * void do_page_fault(struct pt_regs *regs,
535 *				unsigned long address,
536 *				unsigned long error_code)
537 */
538/* data and intruction trap - which is choose is resolved int fault.c */
539C_ENTRY(page_fault_data_trap):
540	SAVE_STATE		/* Save registers.*/
541	/* PC, before IRQ/trap - this is one instruction above */
542	swi	r17, r1, PTO+PT_PC;
543	tovirt(r1,r1)
544	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
545	addik	r15, r0, ret_from_exc-8
546	mfs	r6, rear		/* parameter unsigned long address */
547	mfs	r7, resr		/* parameter unsigned long error_code */
548	rted	r0, do_page_fault
549	addik	r5, r1, PTO		/* parameter struct pt_regs * regs */
550
551C_ENTRY(page_fault_instr_trap):
552	SAVE_STATE		/* Save registers.*/
553	/* PC, before IRQ/trap - this is one instruction above */
554	swi	r17, r1, PTO+PT_PC;
555	tovirt(r1,r1)
556	/* where the trap should return need -8 to adjust for rtsd r15, 8 */
557	addik	r15, r0, ret_from_exc-8
558	mfs	r6, rear		/* parameter unsigned long address */
559	ori	r7, r0, 0		/* parameter unsigned long error_code */
560	rted	r0, do_page_fault
561	addik	r5, r1, PTO		/* parameter struct pt_regs * regs */
562
563/* Entry point used to return from an exception.  */
564C_ENTRY(ret_from_exc):
565	lwi	r11, r1, PTO + PT_MODE;
566	bnei	r11, 2f;		/* See if returning to kernel mode, */
567					/* ... if so, skip resched &c.  */
568
569	/* We're returning to user mode, so check for various conditions that
570	   trigger rescheduling. */
571	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
572	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
573	andi	r11, r11, _TIF_NEED_RESCHED;
574	beqi	r11, 5f;
575
576/* Call the scheduler before returning from a syscall/trap. */
577	bralid	r15, schedule;	/* Call scheduler */
578	nop;				/* delay slot */
579
580	/* Maybe handle a signal */
5815:	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
582	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
583	andi	r11, r11, _TIF_SIGPENDING;
584	beqi	r11, 1f;		/* Signals to handle, handle them */
585
586	/*
587	 * Handle a signal return; Pending signals should be in r18.
588	 *
589	 * Not all registers are saved by the normal trap/interrupt entry
590	 * points (for instance, call-saved registers (because the normal
591	 * C-compiler calling sequence in the kernel makes sure they're
592	 * preserved), and call-clobbered registers in the case of
593	 * traps), but signal handlers may want to examine or change the
594	 * complete register state.  Here we save anything not saved by
595	 * the normal entry sequence, so that it may be safely restored
596	 * (in a possibly modified form) after do_signal returns. */
597	addik	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
598	addi	r7, r0, 0;		/* Arg 3: int in_syscall */
599	bralid	r15, do_signal;	/* Handle any signals */
600	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
601
602/* Finally, return to user state.  */
6031:	set_bip;			/* Ints masked for state restore */
604	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
605	VM_OFF;
606	tophys(r1,r1);
607
608	RESTORE_REGS;
609	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
610
611	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
612	bri	6f;
613/* Return to kernel state.  */
6142:	set_bip;			/* Ints masked for state restore */
615	VM_OFF;
616	tophys(r1,r1);
617	RESTORE_REGS;
618	addik	r1, r1, STATE_SAVE_SIZE		/* Clean up stack space.  */
619
620	tovirt(r1,r1);
6216:
622EXC_return:		/* Make global symbol for debugging */
623	rtbd	r14, 0;	/* Instructions to return from an IRQ */
624	nop;
625
626/*
627 * HW EXCEPTION rutine end
628 */
629
630/*
631 * Hardware maskable interrupts.
632 *
633 * The stack-pointer (r1) should have already been saved to the memory
634 * location PER_CPU(ENTRY_SP).
635 */
636C_ENTRY(_interrupt):
637/* MS: we are in physical address */
638/* Save registers, switch to proper stack, convert SP to virtual.*/
639	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
640	/* MS: See if already in kernel mode. */
641	mfs	r1, rmsr
642	nop
643	andi	r1, r1, MSR_UMS
644	bnei	r1, 1f
645
646/* Kernel-mode state save. */
647	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
648	tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
649	/* save registers */
650/* MS: Make room on the stack -> activation record */
651	addik	r1, r1, -STATE_SAVE_SIZE;
652	SAVE_REGS
653	brid	2f;
654	swi	r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
6551:
656/* User-mode state save. */
657 /* MS: get the saved current */
658	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
659	tophys(r1,r1);
660	lwi	r1, r1, TS_THREAD_INFO;
661	addik	r1, r1, THREAD_SIZE;
662	tophys(r1,r1);
663	/* save registers */
664	addik	r1, r1, -STATE_SAVE_SIZE;
665	SAVE_REGS
666	/* calculate mode */
667	swi	r0, r1, PTO + PT_MODE;
668	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
669	swi	r11, r1, PTO+PT_R1;
670	clear_ums;
6712:
672	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
673	tovirt(r1,r1)
674	addik	r15, r0, irq_call;
675irq_call:rtbd	r0, do_IRQ;
676	addik	r5, r1, PTO;
677
678/* MS: we are in virtual mode */
679ret_from_irq:
680	lwi	r11, r1, PTO + PT_MODE;
681	bnei	r11, 2f;
682
683	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
684	lwi	r11, r11, TI_FLAGS; /* MS: get flags from thread info */
685	andi	r11, r11, _TIF_NEED_RESCHED;
686	beqi	r11, 5f
687	bralid	r15, schedule;
688	nop; /* delay slot */
689
690    /* Maybe handle a signal */
6915:	lwi	r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
692	lwi	r11, r11, TI_FLAGS; /* get flags in thread info */
693	andi	r11, r11, _TIF_SIGPENDING;
694	beqid	r11, no_intr_resched
695/* Handle a signal return; Pending signals should be in r18. */
696	addi	r7, r0, 0; /* Arg 3: int in_syscall */
697	addik	r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
698	bralid	r15, do_signal;	/* Handle any signals */
699	add	r6, r0, r0; /* Arg 2: sigset_t *oldset */
700
701/* Finally, return to user state. */
702no_intr_resched:
703    /* Disable interrupts, we are now committed to the state restore */
704	disable_irq
705	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
706	VM_OFF;
707	tophys(r1,r1);
708	RESTORE_REGS
709	addik	r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
710	lwi	r1, r1, PT_R1 - PT_SIZE;
711	bri	6f;
712/* MS: Return to kernel state. */
7132:
714#ifdef CONFIG_PREEMPT
715	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;
716	/* MS: get preempt_count from thread info */
717	lwi	r5, r11, TI_PREEMPT_COUNT;
718	bgti	r5, restore;
719
720	lwi	r5, r11, TI_FLAGS;		/* get flags in thread info */
721	andi	r5, r5, _TIF_NEED_RESCHED;
722	beqi	r5, restore /* if zero jump over */
723
724preempt:
725	/* interrupts are off that's why I am calling preempt_chedule_irq */
726	bralid	r15, preempt_schedule_irq
727	nop
728	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
729	lwi	r5, r11, TI_FLAGS;		/* get flags in thread info */
730	andi	r5, r5, _TIF_NEED_RESCHED;
731	bnei	r5, preempt /* if non zero jump to resched */
732restore:
733#endif
734	VM_OFF /* MS: turn off MMU */
735	tophys(r1,r1)
736	RESTORE_REGS
737	addik	r1, r1, STATE_SAVE_SIZE	/* MS: Clean up stack space. */
738	tovirt(r1,r1);
7396:
740IRQ_return: /* MS: Make global symbol for debugging */
741	rtid	r14, 0
742	nop
743
744/*
745 * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
746 * and call handling function with saved pt_regs
747 */
748C_ENTRY(_debug_exception):
749	/* BIP bit is set on entry, no interrupts can occur */
750	swi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
751
752	mfs	r1, rmsr
753	nop
754	andi	r1, r1, MSR_UMS
755	bnei	r1, 1f
756/* MS: Kernel-mode state save - kgdb */
757	lwi	r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
758
759	/* BIP bit is set on entry, no interrupts can occur */
760	addik   r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
761	SAVE_REGS;
762	/* save all regs to pt_reg structure */
763	swi	r0, r1, PTO+PT_R0;	/* R0 must be saved too */
764	swi	r14, r1, PTO+PT_R14	/* rewrite saved R14 value */
765	swi	r16, r1, PTO+PT_R16
766	swi	r16, r1, PTO+PT_PC; /* PC and r16 are the same */
767	swi	r17, r1, PTO+PT_R17
768	/* save special purpose registers to pt_regs */
769	mfs	r11, rear;
770	swi	r11, r1, PTO+PT_EAR;
771	mfs	r11, resr;
772	swi	r11, r1, PTO+PT_ESR;
773	mfs	r11, rfsr;
774	swi	r11, r1, PTO+PT_FSR;
775
776	/* stack pointer is in physical address at it is decrease
777	 * by STATE_SAVE_SIZE but we need to get correct R1 value */
778	addik   r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
779	swi	r11, r1, PTO+PT_R1
780	/* MS: r31 - current pointer isn't changed */
781	tovirt(r1,r1)
782#ifdef CONFIG_KGDB
783	addi	r5, r1, PTO /* pass pt_reg address as the first arg */
784	la	r15, r0, dbtrap_call; /* return address */
785	rtbd	r0, microblaze_kgdb_break
786	nop;
787#endif
788	/* MS: Place handler for brki from kernel space if KGDB is OFF.
789	 * It is very unlikely that another brki instruction is called. */
790	bri 0
791
792/* MS: User-mode state save - gdb */
7931:	lwi	r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
794	tophys(r1,r1);
795	lwi	r1, r1, TS_THREAD_INFO;	/* get the thread info */
796	addik	r1, r1, THREAD_SIZE;	/* calculate kernel stack pointer */
797	tophys(r1,r1);
798
799	addik	r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack.  */
800	SAVE_REGS;
801	swi	r17, r1, PTO+PT_R17;
802	swi	r16, r1, PTO+PT_R16;
803	swi	r16, r1, PTO+PT_PC;	/* Save LP */
804	swi	r0, r1, PTO + PT_MODE; /* Was in user-mode.  */
805	lwi	r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
806	swi	r11, r1, PTO+PT_R1; /* Store user SP.  */
807	lwi	CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
808	tovirt(r1,r1)
809	set_vms;
810	addik	r5, r1, PTO;
811	addik	r15, r0, dbtrap_call;
812dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
813	rtbd	r0, sw_exception
814	nop
815
816	/* MS: The first instruction for the second part of the gdb/kgdb */
817	set_bip; /* Ints masked for state restore */
818	lwi	r11, r1, PTO + PT_MODE;
819	bnei	r11, 2f;
820/* MS: Return to user space - gdb */
821	/* Get current task ptr into r11 */
822	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
823	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
824	andi	r11, r11, _TIF_NEED_RESCHED;
825	beqi	r11, 5f;
826
827	/* Call the scheduler before returning from a syscall/trap. */
828	bralid	r15, schedule;	/* Call scheduler */
829	nop;				/* delay slot */
830
831	/* Maybe handle a signal */
8325:	lwi	r11, CURRENT_TASK, TS_THREAD_INFO;	/* get thread info */
833	lwi	r11, r11, TI_FLAGS;	/* get flags in thread info */
834	andi	r11, r11, _TIF_SIGPENDING;
835	beqi	r11, 1f;		/* Signals to handle, handle them */
836
837	addik	r5, r1, PTO;		/* Arg 1: struct pt_regs *regs */
838	addi  r7, r0, 0;	/* Arg 3: int in_syscall */
839	bralid	r15, do_signal;	/* Handle any signals */
840	add	r6, r0, r0;		/* Arg 2: sigset_t *oldset */
841
842/* Finally, return to user state.  */
8431:	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
844	VM_OFF;
845	tophys(r1,r1);
846	/* MS: Restore all regs */
847	RESTORE_REGS
848	lwi	r17, r1, PTO+PT_R17;
849	lwi	r16, r1, PTO+PT_R16;
850	addik	r1, r1, STATE_SAVE_SIZE	 /* Clean up stack space */
851	lwi	r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
852DBTRAP_return_user: /* MS: Make global symbol for debugging */
853	rtbd	r16, 0; /* MS: Instructions to return from a debug trap */
854	nop;
855
856/* MS: Return to kernel state - kgdb */
8572:	VM_OFF;
858	tophys(r1,r1);
859	/* MS: Restore all regs */
860	RESTORE_REGS
861	lwi	r14, r1, PTO+PT_R14;
862	lwi	r16, r1, PTO+PT_PC;
863	lwi	r17, r1, PTO+PT_R17;
864	addik	r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
865	tovirt(r1,r1);
866DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
867	rtbd	r16, 0; /* MS: Instructions to return from a debug trap */
868	nop;
869
870
871ENTRY(_switch_to)
872	/* prepare return value */
873	addk	r3, r0, CURRENT_TASK
874
875	/* save registers in cpu_context */
876	/* use r11 and r12, volatile registers, as temp register */
877	/* give start of cpu_context for previous process */
878	addik	r11, r5, TI_CPU_CONTEXT
879	swi	r1, r11, CC_R1
880	swi	r2, r11, CC_R2
881	/* skip volatile registers.
882	 * they are saved on stack when we jumped to _switch_to() */
883	/* dedicated registers */
884	swi	r13, r11, CC_R13
885	swi	r14, r11, CC_R14
886	swi	r15, r11, CC_R15
887	swi	r16, r11, CC_R16
888	swi	r17, r11, CC_R17
889	swi	r18, r11, CC_R18
890	/* save non-volatile registers */
891	swi	r19, r11, CC_R19
892	swi	r20, r11, CC_R20
893	swi	r21, r11, CC_R21
894	swi	r22, r11, CC_R22
895	swi	r23, r11, CC_R23
896	swi	r24, r11, CC_R24
897	swi	r25, r11, CC_R25
898	swi	r26, r11, CC_R26
899	swi	r27, r11, CC_R27
900	swi	r28, r11, CC_R28
901	swi	r29, r11, CC_R29
902	swi	r30, r11, CC_R30
903	/* special purpose registers */
904	mfs	r12, rmsr
905	swi	r12, r11, CC_MSR
906	mfs	r12, rear
907	swi	r12, r11, CC_EAR
908	mfs	r12, resr
909	swi	r12, r11, CC_ESR
910	mfs	r12, rfsr
911	swi	r12, r11, CC_FSR
912
913	/* update r31, the current-give me pointer to task which will be next */
914	lwi	CURRENT_TASK, r6, TI_TASK
915	/* stored it to current_save too */
916	swi	CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
917
918	/* get new process' cpu context and restore */
919	/* give me start where start context of next task */
920	addik	r11, r6, TI_CPU_CONTEXT
921
922	/* non-volatile registers */
923	lwi	r30, r11, CC_R30
924	lwi	r29, r11, CC_R29
925	lwi	r28, r11, CC_R28
926	lwi	r27, r11, CC_R27
927	lwi	r26, r11, CC_R26
928	lwi	r25, r11, CC_R25
929	lwi	r24, r11, CC_R24
930	lwi	r23, r11, CC_R23
931	lwi	r22, r11, CC_R22
932	lwi	r21, r11, CC_R21
933	lwi	r20, r11, CC_R20
934	lwi	r19, r11, CC_R19
935	/* dedicated registers */
936	lwi	r18, r11, CC_R18
937	lwi	r17, r11, CC_R17
938	lwi	r16, r11, CC_R16
939	lwi	r15, r11, CC_R15
940	lwi	r14, r11, CC_R14
941	lwi	r13, r11, CC_R13
942	/* skip volatile registers */
943	lwi	r2, r11, CC_R2
944	lwi	r1, r11, CC_R1
945
946	/* special purpose registers */
947	lwi	r12, r11, CC_FSR
948	mts	rfsr, r12
949	lwi	r12, r11, CC_MSR
950	mts	rmsr, r12
951
952	rtsd	r15, 8
953	nop
954
955ENTRY(_reset)
956	brai	0x70; /* Jump back to FS-boot */
957
958	/* These are compiled and loaded into high memory, then
959	 * copied into place in mach_early_setup */
960	.section	.init.ivt, "ax"
961	.org	0x0
962	/* this is very important - here is the reset vector */
963	/* in current MMU branch you don't care what is here - it is
964	 * used from bootloader site - but this is correct for FS-BOOT */
965	brai	0x70
966	nop
967	brai	TOPHYS(_user_exception); /* syscall handler */
968	brai	TOPHYS(_interrupt);	/* Interrupt handler */
969	brai	TOPHYS(_debug_exception);	/* debug trap handler */
970	brai	TOPHYS(_hw_exception_handler);	/* HW exception handler */
971
972.section .rodata,"a"
973#include "syscall_table.S"
974
975syscall_table_size=(.-sys_call_table)
976
977type_SYSCALL:
978	.ascii "SYSCALL\0"
979type_IRQ:
980	.ascii "IRQ\0"
981type_IRQ_PREEMPT:
982	.ascii "IRQ (PREEMPTED)\0"
983type_SYSCALL_PREEMPT:
984	.ascii " SYSCALL (PREEMPTED)\0"
985
986	/*
987	 * Trap decoding for stack unwinder
988	 * Tuples are (start addr, end addr, string)
989	 * If return address lies on [start addr, end addr],
990	 * unwinder displays 'string'
991	 */
992
993	.align 4
994.global microblaze_trap_handlers
995microblaze_trap_handlers:
996	/* Exact matches come first */
997	.word ret_from_trap; .word ret_from_trap   ; .word type_SYSCALL
998	.word ret_from_irq ; .word ret_from_irq    ; .word type_IRQ
999	/* Fuzzy matches go here */
1000	.word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
1001	.word ret_from_trap; .word TRAP_return     ; .word type_SYSCALL_PREEMPT
1002	/* End of table */
1003	.word 0               ; .word 0               ; .word 0
1004