1/* arch/arm26/kernel/entry.S
2 *
3 * Assembled from chunks of code in arch/arm
4 *
5 * Copyright (C) 2003 Ian Molton
6 * Based on the work of RMK.
7 *
8 */
9
10#include <linux/linkage.h>
11
12#include <asm/assembler.h>
13#include <asm/asm-offsets.h>
14#include <asm/errno.h>
15#include <asm/hardware.h>
16#include <asm/sysirq.h>
17#include <asm/thread_info.h>
18#include <asm/page.h>
19#include <asm/ptrace.h>
20
21	.macro	zero_fp
22#ifndef CONFIG_NO_FRAME_POINTER
23	mov	fp, #0
24#endif
25	.endm
26
27	.text
28
29@ Bad Abort numbers
30@ -----------------
31@
32#define BAD_PREFETCH	0
33#define BAD_DATA	1
34#define BAD_ADDREXCPTN	2
35#define BAD_IRQ		3
36#define BAD_UNDEFINSTR	4
37
38@ OS version number used in SWIs
39@  RISC OS is 0
40@  RISC iX is 8
41@
42#define OS_NUMBER	9
43#define ARMSWI_OFFSET	0x000f0000
44
45@
46@ Stack format (ensured by USER_* and SVC_*)
47@ PSR and PC are comined on arm26
48@
49
50#define S_OFF		8
51
52#define S_OLD_R0	64
53#define S_PC		60
54#define S_LR		56
55#define S_SP		52
56#define S_IP		48
57#define S_FP		44
58#define S_R10		40
59#define S_R9		36
60#define S_R8		32
61#define S_R7		28
62#define S_R6		24
63#define S_R5		20
64#define S_R4		16
65#define S_R3		12
66#define S_R2		8
67#define S_R1		4
68#define S_R0		0
69
70	.macro	save_user_regs
71	str	r0, [sp, #-4]!   @ Store SVC r0
72	str	lr, [sp, #-4]!   @ Store user mode PC
73	sub	sp, sp, #15*4
74	stmia	sp, {r0 - lr}^   @ Store the other user-mode regs
75	mov	r0, r0
76	.endm
77
78	.macro	slow_restore_user_regs
79	ldmia	sp, {r0 - lr}^   @ restore the user regs not including PC
80	mov	r0, r0
81	ldr	lr, [sp, #15*4]  @ get user PC
82	add	sp, sp, #15*4+8  @ free stack
83	movs	pc, lr           @ return
84	.endm
85
86	.macro	fast_restore_user_regs
87	add	sp, sp, #S_OFF
88	ldmib	sp, {r1 - lr}^
89	mov	r0, r0
90	ldr	lr, [sp, #15*4]
91	add	sp, sp, #15*4+8
92	movs	pc, lr
93	.endm
94
95	.macro	save_svc_regs
96	str     sp, [sp, #-16]!
97	str     lr, [sp, #8]
98	str     lr, [sp, #4]
99	stmfd   sp!, {r0 - r12}
100	mov     r0, #-1
101	str     r0, [sp, #S_OLD_R0]
102	zero_fp
103	.endm
104
105	.macro	save_svc_regs_irq
106	str     sp, [sp, #-16]!
107	str     lr, [sp, #4]
108	ldr     lr, .LCirq
109	ldr     lr, [lr]
110	str     lr, [sp, #8]
111	stmfd   sp!, {r0 - r12}
112	mov     r0, #-1
113	str     r0, [sp, #S_OLD_R0]
114	zero_fp
115	.endm
116
117	.macro	restore_svc_regs
118                ldmfd   sp, {r0 - pc}^
119	.endm
120
121	.macro	mask_pc, rd, rm
122	bic	\rd, \rm, #PCMASK
123	.endm
124
125	.macro  disable_irqs, temp
126	mov     \temp, pc
127	orr     \temp, \temp, #PSR_I_BIT
128	teqp    \temp, #0
129	.endm
130
131	.macro	enable_irqs, temp
132	mov     \temp, pc
133	and     \temp, \temp, #~PSR_I_BIT
134	teqp	\temp, #0
135	.endm
136
137	.macro	initialise_traps_extra
138	.endm
139
140	.macro	get_thread_info, rd
141	mov	\rd, sp, lsr #13
142	mov	\rd, \rd, lsl #13
143	.endm
144
145/*
146 * These are the registers used in the syscall handler, and allow us to
147 * have in theory up to 7 arguments to a function - r0 to r6.
148 *
149 * Note that tbl == why is intentional.
150 *
151 * We must set at least "tsk" and "why" when calling ret_with_reschedule.
152 */
153scno	.req	r7		@ syscall number
154tbl	.req	r8		@ syscall table pointer
155why	.req	r8		@ Linux syscall (!= 0)
156tsk	.req	r9		@ current thread_info
157
158/*
159 * Get the system call number.
160 */
161	.macro	get_scno
162	mask_pc	lr, lr
163	ldr	scno, [lr, #-4]		@ get SWI instruction
164	.endm
165/*
166 *  -----------------------------------------------------------------------
167 */
168
169/*
170 * We rely on the fact that R0 is at the bottom of the stack (due to
171 * slow/fast restore user regs).
172 */
173#if S_R0
174#error "Please fix"
175#endif
176
177/*
178 * This is the fast syscall return path.  We do as little as
179 * possible here, and this includes saving r0 back into the SVC
180 * stack.
181 */
182ret_fast_syscall:
183	disable_irqs r1				@ disable interrupts
184	ldr	r1, [tsk, #TI_FLAGS]
185	tst	r1, #_TIF_WORK_MASK
186	bne	fast_work_pending
187	fast_restore_user_regs
188
189/*
190 * Ok, we need to do extra processing, enter the slow path.
191 */
192fast_work_pending:
193	str	r0, [sp, #S_R0+S_OFF]!		@ returned r0
194work_pending:
195	tst	r1, #_TIF_NEED_RESCHED
196	bne	work_resched
197	tst	r1, #_TIF_NOTIFY_RESUME | _TIF_SIGPENDING
198	beq	no_work_pending
199	mov	r0, sp				@ 'regs'
200	mov	r2, why				@ 'syscall'
201	bl	do_notify_resume
202	disable_irqs r1				@ disable interrupts
203	b	no_work_pending
204
205work_resched:
206	bl	schedule
207/*
208 * "slow" syscall return path.  "why" tells us if this was a real syscall.
209 */
210ENTRY(ret_to_user)
211ret_slow_syscall:
212	disable_irqs r1				@ disable interrupts
213	ldr	r1, [tsk, #TI_FLAGS]
214	tst	r1, #_TIF_WORK_MASK
215	bne	work_pending
216no_work_pending:
217	slow_restore_user_regs
218
219/*
220 * This is how we return from a fork.
221 */
222ENTRY(ret_from_fork)
223	bl	schedule_tail
224	get_thread_info tsk
225	ldr	r1, [tsk, #TI_FLAGS]		@ check for syscall tracing
226	mov	why, #1
227	tst	r1, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
228	beq	ret_slow_syscall
229	mov	r1, sp
230	mov	r0, #1				@ trace exit [IP = 1]
231	bl	syscall_trace
232	b	ret_slow_syscall
233
234#include "calls.S"
235
236/*=============================================================================
237 * SWI handler
238 *-----------------------------------------------------------------------------
239 */
240
241	.align	5
242ENTRY(vector_swi)
243	save_user_regs
244	zero_fp
245	get_scno
246
247	enable_irqs ip
248
249	str	r4, [sp, #-S_OFF]!		@ push fifth arg
250
251	get_thread_info tsk
252	ldr	ip, [tsk, #TI_FLAGS]		@ check for syscall tracing
253	bic	scno, scno, #0xff000000		@ mask off SWI op-code
254	eor	scno, scno, #OS_NUMBER << 20	@ check OS number
255	adr	tbl, sys_call_table		@ load syscall table pointer
256	tst	ip, #_TIF_SYSCALL_TRACE		@ are we tracing syscalls?
257	bne	__sys_trace
258
259	adral	lr, ret_fast_syscall            @ set return address
260        orral	lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return
261	cmp	scno, #NR_syscalls		@ check upper syscall limit
262	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
263
264	add	r1, sp, #S_OFF
2652:	mov	why, #0				@ no longer a real syscall
266	cmp	scno, #ARMSWI_OFFSET
267	eor	r0, scno, #OS_NUMBER << 20	@ put OS number back
268	bcs	arm_syscall
269	b	sys_ni_syscall			@ not private func
270
271	/*
272	 * This is the really slow path.  We're going to be doing
273	 * context switches, and waiting for our parent to respond.
274	 */
275__sys_trace:
276	add	r1, sp, #S_OFF
277	mov	r0, #0				@ trace entry [IP = 0]
278	bl	syscall_trace
279
280	adral   lr, __sys_trace_return          @ set return address
281        orral   lr, lr, #PSR_I_BIT | MODE_SVC26 @ Force SVC mode on return
282	add	r1, sp, #S_R0 + S_OFF		@ pointer to regs
283	cmp	scno, #NR_syscalls		@ check upper syscall limit
284	ldmccia	r1, {r0 - r3}			@ have to reload r0 - r3
285	ldrcc	pc, [tbl, scno, lsl #2]		@ call sys_* routine
286	b	2b
287
288__sys_trace_return:
289	str	r0, [sp, #S_R0 + S_OFF]!	@ save returned r0
290	mov	r1, sp
291	mov	r0, #1				@ trace exit [IP = 1]
292	bl	syscall_trace
293	b	ret_slow_syscall
294
295	.align	5
296
297	.type	sys_call_table, #object
298ENTRY(sys_call_table)
299#include "calls.S"
300
301/*============================================================================
302 * Special system call wrappers
303 */
304@ r0 = syscall number
305@ r5 = syscall table
306		.type	sys_syscall, #function
307sys_syscall:
308		eor	scno, r0, #OS_NUMBER << 20
309		cmp	scno, #NR_syscalls	@ check range
310		stmleia	sp, {r5, r6}		@ shuffle args
311		movle	r0, r1
312		movle	r1, r2
313		movle	r2, r3
314		movle	r3, r4
315		ldrle	pc, [tbl, scno, lsl #2]
316		b	sys_ni_syscall
317
318sys_fork_wrapper:
319		add	r0, sp, #S_OFF
320		b	sys_fork
321
322sys_vfork_wrapper:
323		add	r0, sp, #S_OFF
324		b	sys_vfork
325
326sys_execve_wrapper:
327		add	r3, sp, #S_OFF
328		b	sys_execve
329
330sys_clone_wapper:
331		add	r2, sp, #S_OFF
332		b	sys_clone
333
334sys_sigsuspend_wrapper:
335		add	r3, sp, #S_OFF
336		b	sys_sigsuspend
337
338sys_rt_sigsuspend_wrapper:
339		add	r2, sp, #S_OFF
340		b	sys_rt_sigsuspend
341
342sys_sigreturn_wrapper:
343		add	r0, sp, #S_OFF
344		b	sys_sigreturn
345
346sys_rt_sigreturn_wrapper:
347		add	r0, sp, #S_OFF
348		b	sys_rt_sigreturn
349
350sys_sigaltstack_wrapper:
351		ldr	r2, [sp, #S_OFF + S_SP]
352		b	do_sigaltstack
353
354sys_mmap2:
355		tst	r5, #((1 << (PAGE_SHIFT - 12)) - 1)
356		moveq	r5, r5, lsr #PAGE_SHIFT - 12
357		streq	r5, [sp, #4]
358		beq	do_mmap2
359		mov	r0, #-EINVAL
360		RETINSTR(mov,pc, lr)
361
362/*
363 *  Design issues:
364 *   - We have several modes that each vector can be called from,
365 *     each with its own set of registers.  On entry to any vector,
366 *     we *must* save the registers used in *that* mode.
367 *
368 *   - This code must be as fast as possible.
369 *
370 *  There are a few restrictions on the vectors:
371 *   - the SWI vector cannot be called from *any* non-user mode
372 *
373 *   - the FP emulator is *never* called from *any* non-user mode undefined
374 *     instruction.
375 *
376 */
377
378		.text
379
380		.macro handle_irq
3811:		mov     r4, #IOC_BASE
382		ldrb    r6, [r4, #0x24]            @ get high priority first
383		adr     r5, irq_prio_h
384		teq     r6, #0
385		ldreqb  r6, [r4, #0x14]            @ get low priority
386		adreq   r5, irq_prio_l
387
388                teq     r6, #0                     @ If an IRQ happened...
389                ldrneb  r0, [r5, r6]               @ get IRQ number
390                movne   r1, sp                     @ get struct pt_regs
391                adrne   lr, 1b                     @ Set return address to 1b
392                orrne   lr, lr, #PSR_I_BIT | MODE_SVC26  @ (and force SVC mode)
393                bne     asm_do_IRQ                 @ process IRQ (if asserted)
394		.endm
395
396
397/*
398 * Interrupt table (incorporates priority)
399 */
400		.macro	irq_prio_table
401irq_prio_l:	.byte	 0, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
402		.byte	 4, 0, 1, 0, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3
403		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
404		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
405		.byte	 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
406		.byte	 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3, 3
407		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
408		.byte	 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5
409		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
410		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
411		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
412		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
413		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
414		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
415		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
416		.byte	 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
417irq_prio_h:	.byte	 0, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
418		.byte	12, 8, 9, 8,10,10,10,10,11,11,11,11,10,10,10,10
419		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
420		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
421		.byte	14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
422		.byte	14,14,14,14,10,10,10,10,11,11,11,11,10,10,10,10
423		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
424		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
425		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
426		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
427		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
428		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
429		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
430		.byte	15,15,15,15,10,10,10,10,11,11,11,11,10,10,10,10
431		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
432		.byte	13,13,13,13,10,10,10,10,11,11,11,11,10,10,10,10
433		.endm
434
435#define FAULT_CODE_LDRSTRPOST	0x80
436#define FAULT_CODE_LDRSTRPRE	0x40
437#define FAULT_CODE_LDRSTRREG	0x20
438#define FAULT_CODE_LDMSTM	0x10
439#define FAULT_CODE_LDCSTC	0x08
440#define FAULT_CODE_PREFETCH	0x04
441#define FAULT_CODE_WRITE	0x02
442#define FAULT_CODE_FORCECOW	0x01
443
444/*=============================================================================
445 * Undefined FIQs
446 *-----------------------------------------------------------------------------
447 */
448_unexp_fiq:	ldr     sp, .LCfiq
449		mov	r12, #IOC_BASE
450		strb	r12, [r12, #0x38]	@ Disable FIQ register
451		teqp	pc, #PSR_I_BIT | PSR_F_BIT | MODE_SVC26
452		mov	r0, r0
453		stmfd	sp!, {r0 - r3, ip, lr}
454		adr	r0, Lfiqmsg
455		bl	printk
456		ldmfd	sp!, {r0 - r3, ip, lr}
457		teqp	pc, #PSR_I_BIT | PSR_F_BIT | MODE_FIQ26
458		mov	r0, r0
459		movs	pc, lr
460
461Lfiqmsg:	.ascii	"*** Unexpected FIQ\n\0"
462		.align
463
464.LCfiq:		.word	__temp_fiq
465.LCirq:		.word	__temp_irq
466
467/*=============================================================================
468 * Undefined instruction handler
469 *-----------------------------------------------------------------------------
470 * Handles floating point instructions
471 */
472vector_undefinstr:
473		tst	lr, #MODE_SVC26          @ did we come from a non-user mode?
474		bne	__und_svc                @ yes - deal with it.
475/* Otherwise, fall through for the user-space (common) case. */
476		save_user_regs
477		zero_fp                                 @ zero frame pointer
478		teqp	pc, #PSR_I_BIT | MODE_SVC26     @ disable IRQs
479.Lbug_undef:
480		ldr	r4, .LC2
481                ldr     pc, [r4]         @ Call FP module entry point
482
483/* The SVC mode case */
484__und_svc:	save_svc_regs                           @ Non-user mode
485                mask_pc r0, lr
486                and     r2, lr, #3
487                sub     r0, r0, #4
488                mov     r1, sp
489                bl      do_undefinstr
490                restore_svc_regs
491
492/* We get here if the FP emulator doesnt handle the undef instr.
493 * If the insn WAS handled, the emulator jumps to ret_from_exception by itself/
494 */
495		.globl	fpundefinstr
496fpundefinstr:
497		mov	r0, lr
498		mov	r1, sp
499		teqp	pc, #MODE_SVC26
500		bl	do_undefinstr
501		b	ret_from_exception		@ Normal FP exit
502
503#if defined CONFIG_FPE_NWFPE || defined CONFIG_FPE_FASTFPE
504		/* The FPE is always present */
505		.equ	fpe_not_present, 0
506#else
507fpe_not_present:
508		adr	r10, wfs_mask_data
509		ldmia	r10, {r4, r5, r6, r7, r8}
510		ldr	r10, [sp, #S_PC]		@ Load PC
511		sub	r10, r10, #4
512		mask_pc	r10, r10
513		ldrt	r10, [r10]			@ get instruction
514		and	r5, r10, r5
515		teq	r5, r4				@ Is it WFS?
516		beq	ret_from_exception
517		and	r5, r10, r8
518		teq	r5, r6				@ Is it LDF/STF on sp or fp?
519		teqne	r5, r7
520		bne	fpundefinstr
521		tst	r10, #0x00200000		@ Does it have WB
522		beq	ret_from_exception
523		and	r4, r10, #255			@ get offset
524		and	r6, r10, #0x000f0000
525		tst	r10, #0x00800000		@ +/-
526		ldr	r5, [sp, r6, lsr #14]		@ Load reg
527		rsbeq	r4, r4, #0
528		add	r5, r5, r4, lsl #2
529		str	r5, [sp, r6, lsr #14]		@ Save reg
530		b	ret_from_exception
531
532wfs_mask_data:	.word	0x0e200110			@ WFS/RFS
533		.word	0x0fef0fff
534		.word	0x0d0d0100			@ LDF [sp]/STF [sp]
535		.word	0x0d0b0100			@ LDF [fp]/STF [fp]
536		.word	0x0f0f0f00
537#endif
538
539.LC2:		.word	fp_enter
540
541/*=============================================================================
542 * Prefetch abort handler
543 *-----------------------------------------------------------------------------
544 */
545#define DEBUG_UNDEF
546/* remember: lr = USR pc */
547vector_prefetch:
548		sub	lr, lr, #4
549		tst	lr, #MODE_SVC26
550		bne	__pabt_invalid
551		save_user_regs
552		teqp	pc, #MODE_SVC26         @ Enable IRQs...
553		mask_pc	r0, lr			@ Address of abort
554		mov	r1, sp			@ Tasks registers
555		bl	do_PrefetchAbort
556		teq	r0, #0			@ If non-zero, we believe this abort..
557		bne	ret_from_exception
558#ifdef DEBUG_UNDEF
559		adr	r0, t
560		bl	printk
561#endif
562		ldr	lr, [sp,#S_PC]		@ FIXME program to test this on.  I think its
563		b	.Lbug_undef		@ broken at the moment though!)
564
565__pabt_invalid:	save_svc_regs
566		mov	r0, sp			@ Prefetch aborts are definitely *not*
567		mov	r1, #BAD_PREFETCH	@ allowed in non-user modes.  We cant
568		and	r2, lr, #3		@ recover from this problem.
569		b	bad_mode
570
571#ifdef DEBUG_UNDEF
572t:		.ascii "*** undef ***\r\n\0"
573		.align
574#endif
575
576/*=============================================================================
577 * Address exception handler
578 *-----------------------------------------------------------------------------
579 * These aren't too critical.
580 * (they're not supposed to happen).
581 * In order to debug the reason for address exceptions in non-user modes,
582 * we have to obtain all the registers so that we can see what's going on.
583 */
584
585vector_addrexcptn:
586		sub	lr, lr, #8
587		tst	lr, #3
588		bne	Laddrexcptn_not_user
589		save_user_regs
590		teq	pc, #MODE_SVC26
591		mask_pc	r0, lr			@ Point to instruction
592		mov	r1, sp			@ Point to registers
593		mov	r2, #0x400
594		mov	lr, pc
595		bl	do_excpt
596		b	ret_from_exception
597
598Laddrexcptn_not_user:
599		save_svc_regs
600		and	r2, lr, #3
601		teq	r2, #3
602		bne	Laddrexcptn_illegal_mode
603		teqp	pc, #MODE_SVC26
604		mask_pc	r0, lr
605		mov	r1, sp
606		orr	r2, r2, #0x400
607		bl	do_excpt
608		ldmia	sp, {r0 - lr}		@ I cant remember the reason I changed this...
609		add	sp, sp, #15*4
610		movs	pc, lr
611
612Laddrexcptn_illegal_mode:
613		mov	r0, sp
614		str	lr, [sp, #-4]!
615		orr	r1, r2, #PSR_I_BIT | PSR_F_BIT
616		teqp	r1, #0			@ change into mode (wont be user mode)
617		mov	r0, r0
618		mov	r1, r8			@ Any register from r8 - r14 can be banked
619		mov	r2, r9
620		mov	r3, r10
621		mov	r4, r11
622		mov	r5, r12
623		mov	r6, r13
624		mov	r7, r14
625		teqp	pc, #PSR_F_BIT | MODE_SVC26 @ back to svc
626		mov	r0, r0
627		stmfd	sp!, {r1-r7}
628		ldmia	r0, {r0-r7}
629		stmfd	sp!, {r0-r7}
630		mov	r0, sp
631		mov	r1, #BAD_ADDREXCPTN
632		b	bad_mode
633
634/*=============================================================================
635 * Interrupt (IRQ) handler
636 *-----------------------------------------------------------------------------
637 * Note: if the IRQ was taken whilst in user mode, then *no* kernel routine
638 * is running, so do not have to save svc lr.
639 *
640 * Entered in IRQ mode.
641 */
642
643vector_IRQ:	ldr     sp, .LCirq         @ Setup some temporary stack
644                sub     lr, lr, #4
645                str     lr, [sp]           @ push return address
646
647		tst     lr, #3
648		bne	__irq_non_usr
649
650__irq_usr:	teqp	pc, #PSR_I_BIT | MODE_SVC26     @ Enter SVC mode
651		mov	r0, r0
652
653		ldr	lr, .LCirq
654		ldr	lr, [lr]           @ Restore lr for jump back to USR
655
656		save_user_regs
657
658		handle_irq
659
660		mov	why, #0
661		get_thread_info tsk
662		b	ret_to_user
663
664@ Place the IRQ priority table here so that the handle_irq macros above
665@ and below here can access it.
666
667		irq_prio_table
668
669__irq_non_usr:	teqp	pc, #PSR_I_BIT | MODE_SVC26     @ Enter SVC mode
670		mov	r0, r0
671
672		save_svc_regs_irq
673
674                and	r2, lr, #3
675		teq	r2, #3
676		bne	__irq_invalid                @ IRQ not from SVC mode
677
678		handle_irq
679
680		restore_svc_regs
681
682__irq_invalid:	mov	r0, sp
683		mov	r1, #BAD_IRQ
684		b	bad_mode
685
686/*=============================================================================
687 * Data abort handler code
688 *-----------------------------------------------------------------------------
689 *
690 * This handles both exceptions from user and SVC modes, computes the address
691 *  range of the problem, and does any correction that is required.  It then
692 *  calls the kernel data abort routine.
693 *
694 * This is where I wish that the ARM would tell you which address aborted.
695 */
696
697vector_data:	sub	lr, lr, #8		@ Correct lr
698		tst	lr, #3
699		bne	Ldata_not_user
700		save_user_regs
701		teqp	pc, #MODE_SVC26
702		mask_pc	r0, lr
703		bl	Ldata_do
704		b	ret_from_exception
705
706Ldata_not_user:
707		save_svc_regs
708		and	r2, lr, #3
709		teq	r2, #3
710		bne	Ldata_illegal_mode
711		tst	lr, #PSR_I_BIT
712		teqeqp	pc, #MODE_SVC26
713		mask_pc	r0, lr
714		bl	Ldata_do
715		restore_svc_regs
716
717Ldata_illegal_mode:
718		mov	r0, sp
719		mov	r1, #BAD_DATA
720		b	bad_mode
721
722Ldata_do:	mov	r3, sp
723		ldr	r4, [r0]		@ Get instruction
724		mov	r2, #0
725		tst	r4, #1 << 20		@ Check to see if it is a write instruction
726		orreq	r2, r2, #FAULT_CODE_WRITE @ Indicate write instruction
727		mov	r1, r4, lsr #22		@ Now branch to the relevent processing routine
728		and	r1, r1, #15 << 2
729		add	pc, pc, r1
730		movs	pc, lr
731		b	Ldata_unknown
732		b	Ldata_unknown
733		b	Ldata_unknown
734		b	Ldata_unknown
735		b	Ldata_ldrstr_post	@ ldr	rd, [rn], #m
736		b	Ldata_ldrstr_numindex	@ ldr	rd, [rn, #m]	@ RegVal
737		b	Ldata_ldrstr_post	@ ldr	rd, [rn], rm
738		b	Ldata_ldrstr_regindex	@ ldr	rd, [rn, rm]
739		b	Ldata_ldmstm		@ ldm*a	rn, <rlist>
740		b	Ldata_ldmstm		@ ldm*b	rn, <rlist>
741		b	Ldata_unknown
742		b	Ldata_unknown
743		b	Ldata_ldrstr_post	@ ldc	rd, [rn], #m	@ Same as ldr	rd, [rn], #m
744		b	Ldata_ldcstc_pre	@ ldc	rd, [rn, #m]
745		b	Ldata_unknown
746Ldata_unknown:	@ Part of jumptable
747		mov	r0, r1
748		mov	r1, r4
749		mov	r2, r3
750		b	baddataabort
751
752Ldata_ldrstr_post:
753		mov	r0, r4, lsr #14		@ Get Rn
754		and	r0, r0, #15 << 2	@ Mask out reg.
755		teq	r0, #15 << 2
756		ldr	r0, [r3, r0]		@ Get register
757		biceq	r0, r0, #PCMASK
758		mov	r1, r0
759#ifdef FAULT_CODE_LDRSTRPOST
760		orr	r2, r2, #FAULT_CODE_LDRSTRPOST
761#endif
762		b	do_DataAbort
763
764Ldata_ldrstr_numindex:
765		mov	r0, r4, lsr #14		@ Get Rn
766		and	r0, r0, #15 << 2	@ Mask out reg.
767		teq	r0, #15 << 2
768		ldr	r0, [r3, r0]		@ Get register
769		mov	r1, r4, lsl #20
770		biceq	r0, r0, #PCMASK
771		tst	r4, #1 << 23
772		addne	r0, r0, r1, lsr #20
773		subeq	r0, r0, r1, lsr #20
774		mov	r1, r0
775#ifdef FAULT_CODE_LDRSTRPRE
776		orr	r2, r2, #FAULT_CODE_LDRSTRPRE
777#endif
778		b	do_DataAbort
779
780Ldata_ldrstr_regindex:
781		mov	r0, r4, lsr #14		@ Get Rn
782		and	r0, r0, #15 << 2	@ Mask out reg.
783		teq	r0, #15 << 2
784		ldr	r0, [r3, r0]		@ Get register
785		and	r7, r4, #15
786		biceq	r0, r0, #PCMASK
787		teq	r7, #15			@ Check for PC
788		ldr	r7, [r3, r7, lsl #2]	@ Get Rm
789		and	r8, r4, #0x60		@ Get shift types
790		biceq	r7, r7, #PCMASK
791		mov	r9, r4, lsr #7		@ Get shift amount
792		and	r9, r9, #31
793		teq	r8, #0
794		moveq	r7, r7, lsl r9
795		teq	r8, #0x20		@ LSR shift
796		moveq	r7, r7, lsr r9
797		teq	r8, #0x40		@ ASR shift
798		moveq	r7, r7, asr r9
799		teq	r8, #0x60		@ ROR shift
800		moveq	r7, r7, ror r9
801		tst	r4, #1 << 23
802		addne	r0, r0, r7
803		subeq	r0, r0, r7		@ Apply correction
804		mov	r1, r0
805#ifdef FAULT_CODE_LDRSTRREG
806		orr	r2, r2, #FAULT_CODE_LDRSTRREG
807#endif
808		b	do_DataAbort
809
810Ldata_ldmstm:
811		mov	r7, #0x11
812		orr	r7, r7, r7, lsl #8
813		and	r0, r4, r7
814		and	r1, r4, r7, lsl #1
815		add	r0, r0, r1, lsr #1
816		and	r1, r4, r7, lsl #2
817		add	r0, r0, r1, lsr #2
818		and	r1, r4, r7, lsl #3
819		add	r0, r0, r1, lsr #3
820		add	r0, r0, r0, lsr #8
821		add	r0, r0, r0, lsr #4
822		and	r7, r0, #15		@ r7 = no. of registers to transfer.
823		mov	r5, r4, lsr #14		@ Get Rn
824		and	r5, r5, #15 << 2
825		ldr	r0, [r3, r5]		@ Get reg
826		eor	r6, r4, r4, lsl #2
827		tst	r6, #1 << 23		@ Check inc/dec ^ writeback
828		rsbeq	r7, r7, #0
829		add	r7, r0, r7, lsl #2	@ Do correction (signed)
830		subne	r1, r7, #1
831		subeq	r1, r0, #1
832		moveq	r0, r7
833		tst	r4, #1 << 21		@ Check writeback
834		strne	r7, [r3, r5]
835		eor	r6, r4, r4, lsl #1
836		tst	r6, #1 << 24		@ Check Pre/Post ^ inc/dec
837		addeq	r0, r0, #4
838		addeq	r1, r1, #4
839		teq	r5, #15*4		@ CHECK FOR PC
840		biceq	r1, r1, #PCMASK
841		biceq	r0, r0, #PCMASK
842#ifdef FAULT_CODE_LDMSTM
843		orr	r2, r2, #FAULT_CODE_LDMSTM
844#endif
845		b	do_DataAbort
846
847Ldata_ldcstc_pre:
848		mov	r0, r4, lsr #14		@ Get Rn
849		and	r0, r0, #15 << 2	@ Mask out reg.
850		teq	r0, #15 << 2
851		ldr	r0, [r3, r0]		@ Get register
852		mov	r1, r4, lsl #24		@ Get offset
853		biceq	r0, r0, #PCMASK
854		tst	r4, #1 << 23
855		addne	r0, r0, r1, lsr #24
856		subeq	r0, r0, r1, lsr #24
857		mov	r1, r0
858#ifdef FAULT_CODE_LDCSTC
859		orr	r2, r2, #FAULT_CODE_LDCSTC
860#endif
861		b	do_DataAbort
862
863
864/*
865 * This is the return code to user mode for abort handlers
866 */
867ENTRY(ret_from_exception)
868		get_thread_info tsk
869		mov	why, #0
870		b	ret_to_user
871
872		.data
873ENTRY(fp_enter)
874		.word	fpe_not_present
875		.text
876/*
877 * Register switch for older 26-bit only ARMs
878 */
879ENTRY(__switch_to)
880		add	r0, r0, #TI_CPU_SAVE
881		stmia	r0, {r4 - sl, fp, sp, lr}
882		add	r1, r1, #TI_CPU_SAVE
883		ldmia	r1, {r4 - sl, fp, sp, pc}^
884
885/*
886 *=============================================================================
887 *		Low-level interface code
888 *-----------------------------------------------------------------------------
889 *		Trap initialisation
890 *-----------------------------------------------------------------------------
891 *
892 * Note - FIQ code has changed.  The default is a couple of words in 0x1c, 0x20
893 * that call _unexp_fiq.  Nowever, we now copy the FIQ routine to 0x1c (removes
894 * some excess cycles).
895 *
896 * What we need to put into 0-0x1c are branches to branch to the kernel.
897 */
898
899		.section ".init.text",#alloc,#execinstr
900
901.Ljump_addresses:
902		swi	SYS_ERROR0
903		.word	vector_undefinstr	- 12
904		.word	vector_swi		- 16
905		.word	vector_prefetch		- 20
906		.word	vector_data		- 24
907		.word	vector_addrexcptn	- 28
908		.word	vector_IRQ		- 32
909		.word	_unexp_fiq		- 36
910		b	. + 8
911/*
912 * initialise the trap system
913 */
914ENTRY(__trap_init)
915		stmfd	sp!, {r4 - r7, lr}
916		adr	r1, .Ljump_addresses
917		ldmia	r1, {r1 - r7, ip, lr}
918		orr	r2, lr, r2, lsr #2
919		orr	r3, lr, r3, lsr #2
920		orr	r4, lr, r4, lsr #2
921		orr	r5, lr, r5, lsr #2
922		orr	r6, lr, r6, lsr #2
923		orr	r7, lr, r7, lsr #2
924		orr	ip, lr, ip, lsr #2
925		mov	r0, #0
926		stmia	r0, {r1 - r7, ip}
927		ldmfd	sp!, {r4 - r7, pc}^
928
929		.bss
930__temp_irq:	.space	4				@ saved lr_irq
931__temp_fiq:	.space	128
932