1/*
2 * File:         arch/blackfin/mach-common/entry.S
3 * Based on:
4 * Author:       Linus Torvalds
5 *
6 * Created:      ?
7 * Description:  contains the system-call and fault low-level handling routines.
8 *               This also contains the timer-interrupt handler, as well as all
9 *               interrupts and faults that can result in a task-switch.
10 *
11 * Modified:
12 *               Copyright 2004-2006 Analog Devices Inc.
13 *
14 * Bugs:         Enter bugs at http://blackfin.uclinux.org/
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License as published by
18 * the Free Software Foundation; either version 2 of the License, or
19 * (at your option) any later version.
20 *
21 * This program is distributed in the hope that it will be useful,
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, see the file COPYING, or write
28 * to the Free Software Foundation, Inc.,
29 * 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
30 */
31
32/*
33 * 25-Dec-2004 - LG Soft India
34 * 	1. Fix in return_from_int, to make sure any pending
35 *	system call in ILAT for this process to get
36 *	executed, otherwise in case context switch happens,
37 *	system call of first process (i.e in ILAT) will be
38 *	carried forward to the switched process.
39 *	2. Removed Constant references for the following
40 *		a.  IPEND
41 *		b.  EXCAUSE mask
42 *		c.  PAGE Mask
43 */
44
45/*
46 * NOTE: This code handles signal-recognition, which happens every time
47 * after a timer-interrupt and after each system call.
48 */
49
50
51#include <linux/linkage.h>
52#include <asm/blackfin.h>
53#include <asm/unistd.h>
54#include <asm/errno.h>
55#include <asm/thread_info.h>  /* TIF_NEED_RESCHED */
56#include <asm/asm-offsets.h>
57
58#include <asm/mach-common/context.S>
59
60#ifdef CONFIG_DEBUG_BFIN_NO_KERN_HWTRACE
61	/*
62	 * TODO: this should be proper save/restore, but for now
63	 * we'll just cheat and use 0x1/0x13
64	 */
65# define DEBUG_START_HWTRACE \
66	P5.l = LO(TBUFCTL); \
67	P5.h = HI(TBUFCTL); \
68	R7 = 0x13; \
69	[P5] = R7;
70# define DEBUG_STOP_HWTRACE \
71	P5.l = LO(TBUFCTL); \
72	P5.h = HI(TBUFCTL); \
73	R7 = 0x01; \
74	[P5] = R7;
75#else
76# define DEBUG_START_HWTRACE
77# define DEBUG_STOP_HWTRACE
78#endif
79
80#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
81.section .l1.text
82#else
83.text
84#endif
85
86/* Slightly simplified and streamlined entry point for CPLB misses.
87 * This one does not lower the level to IRQ5, and thus can be used to
88 * patch up CPLB misses on the kernel stack.
89 */
90ENTRY(_ex_dcplb)
91#if defined(ANOMALY_05000261)
92	p5.l = _last_cplb_fault_retx;
93	p5.h = _last_cplb_fault_retx;
94	r7 = [p5];
95	r6 = retx;
96	[p5] = r6;
97	cc = r6 == r7;
98	if !cc jump _return_from_exception;
99	/* fall through */
100#endif
101ENDPROC(_ex_dcplb)
102
103ENTRY(_ex_icplb)
104	(R7:6,P5:4) = [sp++];
105	ASTAT = [sp++];
106	SAVE_ALL_SYS
107	call __cplb_hdr;
108	DEBUG_START_HWTRACE
109	RESTORE_ALL_SYS
110	SP = RETN;
111	rtx;
112ENDPROC(_ex_icplb)
113
114ENTRY(_ex_spinlock)
115	/* Transform this into a syscall - twiddle the syscall vector.  */
116	p5.l = lo(EVT15);
117	p5.h = hi(EVT15);
118	r7.l = _spinlock_bh;
119	r7.h = _spinlock_bh;
120	[p5] = r7;
121	csync;
122	/* Fall through.  */
123ENDPROC(_ex_spinlock)
124
125ENTRY(_ex_syscall)
126	DEBUG_START_HWTRACE
127	(R7:6,P5:4) = [sp++];
128	ASTAT = [sp++];
129	raise 15;		/* invoked by TRAP #0, for sys call */
130	sp = retn;
131	rtx
132ENDPROC(_ex_syscall)
133
134ENTRY(_spinlock_bh)
135	SAVE_ALL_SYS
136	/* To end up here, vector 15 was changed - so we have to change it
137	 * back.
138	 */
139	p0.l = lo(EVT15);
140	p0.h = hi(EVT15);
141	p1.l = _evt_system_call;
142	p1.h = _evt_system_call;
143	[p0] = p1;
144	csync;
145	r0 = [sp + PT_R0];
146	sp += -12;
147	call _sys_bfin_spinlock;
148	sp += 12;
149	[SP + PT_R0] = R0;
150	RESTORE_ALL_SYS
151	rti;
152ENDPROC(_spinlock_bh)
153
154ENTRY(_ex_soft_bp)
155	r7 = retx;
156	r7 += -2;
157	retx = r7;
158	jump.s _ex_trap_c;
159ENDPROC(_ex_soft_bp)
160
161ENTRY(_ex_single_step)
162	r7 = retx;
163	r6 = reti;
164	cc = r7 == r6;
165	if cc jump _return_from_exception
166	r7 = syscfg;
167	bitclr (r7, 0);
168	syscfg = R7;
169
170	p5.l = lo(IPEND);
171	p5.h = hi(IPEND);
172	r6 = [p5];
173	cc = bittst(r6, 5);
174	if !cc jump _ex_trap_c;
175	p4.l = lo(EVT5);
176	p4.h = hi(EVT5);
177	r6.h = _exception_to_level5;
178	r6.l = _exception_to_level5;
179	r7 = [p4];
180	cc = r6 == r7;
181	if !cc jump _ex_trap_c;
182
183_return_from_exception:
184	DEBUG_START_HWTRACE
185#ifdef ANOMALY_05000257
186	R7=LC0;
187	LC0=R7;
188	R7=LC1;
189	LC1=R7;
190#endif
191	(R7:6,P5:4) = [sp++];
192	ASTAT = [sp++];
193	sp = retn;
194	rtx;
195ENDPROC(_ex_soft_bp)
196
197ENTRY(_handle_bad_cplb)
198	/* To get here, we just tried and failed to change a CPLB
199	 * so, handle things in trap_c (C code), by lowering to
200	 * IRQ5, just like we normally do. Since this is not a
201	 * "normal" return path, we have a do alot of stuff to
202	 * the stack to get ready so, we can fall through - we
203	 * need to make a CPLB exception look like a normal exception
204	 */
205
206	DEBUG_START_HWTRACE
207	RESTORE_ALL_SYS
208	[--sp] = ASTAT;
209	[--sp] = (R7:6, P5:4);
210
211ENTRY(_ex_trap_c)
212	/* Call C code (trap_c) to handle the exception, which most
213	 * likely involves sending a signal to the current process.
214	 * To avoid double faults, lower our priority to IRQ5 first.
215	 */
216	P5.h = _exception_to_level5;
217	P5.l = _exception_to_level5;
218	p4.l = lo(EVT5);
219	p4.h = hi(EVT5);
220	[p4] = p5;
221	csync;
222
223	/* Disable all interrupts, but make sure level 5 is enabled so
224	 * we can switch to that level.  Save the old mask.  */
225	cli r6;
226	p4.l = _excpt_saved_imask;
227	p4.h = _excpt_saved_imask;
228	[p4] = r6;
229	r6 = 0x3f;
230	sti r6;
231
232	/* Save the excause into a circular buffer, in case the instruction
233	 * which caused this excecptions causes others.
234	 */
235	P5.l = _in_ptr_excause;
236	P5.h = _in_ptr_excause;
237	R7 = [P5];
238	R7 += 4;
239	R6 = 0xF;
240	R7 = R7 & R6;
241	[P5] = R7;
242	R6.l = _excause_circ_buf;
243	R6.h = _excause_circ_buf;
244	R7 = R7 + R6;
245	p5 = R7;
246	R6 = SEQSTAT;
247	[P5] = R6;
248
249	DEBUG_START_HWTRACE
250	(R7:6,P5:4) = [sp++];
251	ASTAT = [sp++];
252	SP = RETN;
253	raise 5;
254	rtx;
255ENDPROC(_ex_trap_c)
256
257ENTRY(_exception_to_level5)
258	SAVE_ALL_SYS
259
260	/* Restore interrupt mask.  We haven't pushed RETI, so this
261	 * doesn't enable interrupts until we return from this handler.  */
262	p4.l = _excpt_saved_imask;
263	p4.h = _excpt_saved_imask;
264	r6 = [p4];
265	sti r6;
266
267	/* Restore the hardware error vector.  */
268	P5.h = _evt_ivhw;
269	P5.l = _evt_ivhw;
270	p4.l = lo(EVT5);
271	p4.h = hi(EVT5);
272	[p4] = p5;
273	csync;
274
275	p2.l = lo(IPEND);
276	p2.h = hi(IPEND);
277	csync;
278	r0 = [p2];              /* Read current IPEND */
279	[sp + PT_IPEND] = r0;   /* Store IPEND */
280
281	/* Pop the excause from the circular buffer and push it on the stack
282	 * (in the right place - if you change the location of SEQSTAT, you
283	 * must change this offset.
284	 */
285.L_excep_to_5_again:
286	P5.l = _out_ptr_excause;
287	P5.h = _out_ptr_excause;
288	R7 = [P5];
289	R7 += 4;
290	R6 = 0xF;
291	R7 = R7 & R6;
292	[P5] = R7;
293	R6.l = _excause_circ_buf;
294	R6.h = _excause_circ_buf;
295	R7 = R7 + R6;
296	P5 = R7;
297	R1 = [P5];
298	[SP + 8] = r1;
299
300	r0 = sp; 	/* stack frame pt_regs pointer argument ==> r0 */
301	SP += -12;
302	call _trap_c;
303	SP += 12;
304
305	/* See if anything else is in the exception buffer
306	 * if there is, process it
307	 */
308	P5.l = _out_ptr_excause;
309	P5.h = _out_ptr_excause;
310	P4.l = _in_ptr_excause;
311	P4.h = _in_ptr_excause;
312	R6 = [P5];
313	R7 = [P4];
314	CC = R6 == R7;
315	if ! CC JUMP .L_excep_to_5_again
316
317	call _ret_from_exception;
318	RESTORE_ALL_SYS
319	rti;
320ENDPROC(_exception_to_level5)
321
322ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
323	/* Since the kernel stack can be anywhere, it's not guaranteed to be
324	 * covered by a CPLB.  Switch to an exception stack; use RETN as a
325	 * scratch register (for want of a better option).
326	 */
327	retn = sp;
328	sp.l = _exception_stack_top;
329	sp.h = _exception_stack_top;
330	/* Try to deal with syscalls quickly.  */
331	[--sp] = ASTAT;
332	[--sp] = (R7:6, P5:4);
333	DEBUG_STOP_HWTRACE
334	r7 = SEQSTAT;		/* reason code is in bit 5:0 */
335	r6.l = lo(SEQSTAT_EXCAUSE);
336	r6.h = hi(SEQSTAT_EXCAUSE);
337	r7 = r7 & r6;
338	p5.h = _extable;
339	p5.l = _extable;
340	p4 = r7;
341	p5 = p5 + (p4 << 2);
342	p4 = [p5];
343	jump (p4);
344
345.Lbadsys:
346	r7 = -ENOSYS; 		/* signextending enough */
347	[sp + PT_R0] = r7;	/* return value from system call */
348	jump .Lsyscall_really_exit;
349ENDPROC(_trap)
350
351ENTRY(_kernel_execve)
352	link SIZEOF_PTREGS;
353	p0 = sp;
354	r3 = SIZEOF_PTREGS / 4;
355	r4 = 0(x);
3560:
357	[p0++] = r4;
358	r3 += -1;
359	cc = r3 == 0;
360	if !cc jump 0b (bp);
361
362	p0 = sp;
363	sp += -16;
364	[sp + 12] = p0;
365	call _do_execve;
366	SP += 16;
367	cc = r0 == 0;
368	if ! cc jump 1f;
369	/* Success.  Copy our temporary pt_regs to the top of the kernel
370	 * stack and do a normal exception return.
371	 */
372	r1 = sp;
373	r0 = (-KERNEL_STACK_SIZE) (x);
374	r1 = r1 & r0;
375	p2 = r1;
376	p3 = [p2];
377	r0 = KERNEL_STACK_SIZE - 4 (z);
378	p1 = r0;
379	p1 = p1 + p2;
380
381	p0 = fp;
382	r4 = [p0--];
383	r3 = SIZEOF_PTREGS / 4;
3840:
385	r4 = [p0--];
386	[p1--] = r4;
387	r3 += -1;
388	cc = r3 == 0;
389	if ! cc jump 0b (bp);
390
391	r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
392	p1 = r0;
393	p1 = p1 + p2;
394	sp = p1;
395	r0 = syscfg;
396	[SP + PT_SYSCFG] = r0;
397	[p3 + (TASK_THREAD + THREAD_KSP)] = sp;
398
399	RESTORE_CONTEXT;
400	rti;
4011:
402	unlink;
403	rts;
404ENDPROC(_kernel_execve)
405
406ENTRY(_system_call)
407	/* Store IPEND */
408	p2.l = lo(IPEND);
409	p2.h = hi(IPEND);
410	csync;
411	r0 = [p2];
412	[sp + PT_IPEND] = r0;
413
414	/* Store RETS for now */
415	r0 = rets;
416	[sp + PT_RESERVED] = r0;
417	/* Set the stack for the current process */
418	r7 = sp;
419	r6.l = lo(ALIGN_PAGE_MASK);
420	r6.h = hi(ALIGN_PAGE_MASK);
421	r7 = r7 & r6;  		/* thread_info */
422	p2 = r7;
423	p2 = [p2];
424
425	[p2+(TASK_THREAD+THREAD_KSP)] = sp;
426
427	/* Check the System Call */
428	r7 = __NR_syscall;
429	/* System call number is passed in P0 */
430	r6 = p0;
431	cc = r6 < r7;
432	if ! cc jump .Lbadsys;
433
434	/* are we tracing syscalls?*/
435	r7 = sp;
436	r6.l = lo(ALIGN_PAGE_MASK);
437	r6.h = hi(ALIGN_PAGE_MASK);
438	r7 = r7 & r6;
439	p2 = r7;
440	r7 = [p2+TI_FLAGS];
441	CC = BITTST(r7,TIF_SYSCALL_TRACE);
442	if CC JUMP _sys_trace;
443
444	/* Execute the appropriate system call */
445
446	p4 = p0;
447	p5.l = _sys_call_table;
448	p5.h = _sys_call_table;
449	p5 = p5 + (p4 << 2);
450	r0 = [sp + PT_R0];
451	r1 = [sp + PT_R1];
452	r2 = [sp + PT_R2];
453	p5 = [p5];
454
455	[--sp] = r5;
456	[--sp] = r4;
457	[--sp] = r3;
458	SP += -12;
459	call (p5);
460	SP += 24;
461	[sp + PT_R0] = r0;
462
463.Lresume_userspace:
464	r7 = sp;
465	r4.l = lo(ALIGN_PAGE_MASK);
466	r4.h = hi(ALIGN_PAGE_MASK);
467	r7 = r7 & r4;		/* thread_info->flags */
468	p5 = r7;
469.Lresume_userspace_1:
470	/* Disable interrupts.  */
471	[--sp] = reti;
472	reti = [sp++];
473
474	r7 = [p5 + TI_FLAGS];
475	r4.l = lo(_TIF_WORK_MASK);
476	r4.h = hi(_TIF_WORK_MASK);
477	r7 =  r7 & r4;
478
479.Lsyscall_resched:
480	cc = BITTST(r7, TIF_NEED_RESCHED);
481	if !cc jump .Lsyscall_sigpending;
482
483	/* Reenable interrupts.  */
484	[--sp] = reti;
485	r0 = [sp++];
486
487	SP += -12;
488	call _schedule;
489	SP += 12;
490
491	jump .Lresume_userspace_1;
492
493.Lsyscall_sigpending:
494	cc = BITTST(r7, TIF_RESTORE_SIGMASK);
495	if cc jump .Lsyscall_do_signals;
496	cc = BITTST(r7, TIF_SIGPENDING);
497	if !cc jump .Lsyscall_really_exit;
498.Lsyscall_do_signals:
499	/* Reenable interrupts.  */
500	[--sp] = reti;
501	r0 = [sp++];
502
503	r0 = sp;
504	SP += -12;
505	call _do_signal;
506	SP += 12;
507
508.Lsyscall_really_exit:
509	r5 = [sp + PT_RESERVED];
510	rets = r5;
511	rts;
512ENDPROC(_system_call)
513
514_sys_trace:
515	call _syscall_trace;
516
517	/* Execute the appropriate system call */
518
519	p4 = [SP + PT_P0];
520	p5.l = _sys_call_table;
521	p5.h = _sys_call_table;
522	p5 = p5 + (p4 << 2);
523	r0 = [sp + PT_R0];
524	r1 = [sp + PT_R1];
525	r2 = [sp + PT_R2];
526	r3 = [sp + PT_R3];
527	r4 = [sp + PT_R4];
528	r5 = [sp + PT_R5];
529	p5 = [p5];
530
531	[--sp] = r5;
532	[--sp] = r4;
533	[--sp] = r3;
534	SP += -12;
535	call (p5);
536	SP += 24;
537	[sp + PT_R0] = r0;
538
539	call _syscall_trace;
540	jump .Lresume_userspace;
541ENDPROC(_sys_trace)
542
543ENTRY(_resume)
544	/*
545	 * Beware - when entering resume, prev (the current task) is
546	 * in r0, next (the new task) is in r1.
547	 */
548	p0 = r0;
549	p1 = r1;
550	[--sp] = rets;
551	[--sp] = fp;
552	[--sp] = (r7:4, p5:3);
553
554	/* save usp */
555	p2 = usp;
556	[p0+(TASK_THREAD+THREAD_USP)] = p2;
557
558	/* save current kernel stack pointer */
559	[p0+(TASK_THREAD+THREAD_KSP)] = sp;
560
561	/* save program counter */
562	r1.l = _new_old_task;
563	r1.h = _new_old_task;
564	[p0+(TASK_THREAD+THREAD_PC)] = r1;
565
566	/* restore the kernel stack pointer */
567	sp = [p1+(TASK_THREAD+THREAD_KSP)];
568
569	/* restore user stack pointer */
570	p0 = [p1+(TASK_THREAD+THREAD_USP)];
571	usp = p0;
572
573	/* restore pc */
574	p0 = [p1+(TASK_THREAD+THREAD_PC)];
575	jump (p0);
576
577	/*
578	 * Following code actually lands up in a new (old) task.
579	 */
580
581_new_old_task:
582	(r7:4, p5:3) = [sp++];
583	fp = [sp++];
584	rets = [sp++];
585
586	/*
587	 * When we come out of resume, r0 carries "old" task, becuase we are
588	 * in "new" task.
589	 */
590	rts;
591ENDPROC(_resume)
592
593ENTRY(_ret_from_exception)
594	p2.l = lo(IPEND);
595	p2.h = hi(IPEND);
596
597	csync;
598	r0 = [p2];
599	[sp + PT_IPEND] = r0;
600
6011:
602	r1 = 0x37(Z);
603	r2 = ~r1;
604	r2.h = 0;
605	r0 = r2 & r0;
606	cc = r0 == 0;
607	if !cc jump 4f;	/* if not return to user mode, get out */
608
609	/* Make sure any pending system call or deferred exception
610	 * return in ILAT for this process to get executed, otherwise
611	 * in case context switch happens, system call of
612	 * first process (i.e in ILAT) will be carried
613	 * forward to the switched process
614	 */
615
616	p2.l = lo(ILAT);
617	p2.h = hi(ILAT);
618	r0 = [p2];
619	r1 = (EVT_IVG14 | EVT_IVG15) (z);
620	r0 = r0 & r1;
621	cc = r0 == 0;
622	if !cc jump 5f;
623
624	/* Set the stack for the current process */
625	r7 = sp;
626	r4.l = lo(ALIGN_PAGE_MASK);
627	r4.h = hi(ALIGN_PAGE_MASK);
628	r7 = r7 & r4;		/* thread_info->flags */
629	p5 = r7;
630	r7 = [p5 + TI_FLAGS];
631	r4.l = lo(_TIF_WORK_MASK);
632	r4.h = hi(_TIF_WORK_MASK);
633	r7 =  r7 & r4;
634	cc = r7 == 0;
635	if cc jump 4f;
636
637	p0.l = lo(EVT15);
638	p0.h = hi(EVT15);
639	p1.l = _schedule_and_signal;
640	p1.h = _schedule_and_signal;
641	[p0] = p1;
642	csync;
643	raise 15;		/* raise evt14 to do signal or reschedule */
6444:
645	r0 = syscfg;
646	bitclr(r0, 0);
647	syscfg = r0;
6485:
649	rts;
650ENDPROC(_ret_from_exception)
651
652ENTRY(_return_from_int)
653	/* If someone else already raised IRQ 15, do nothing.  */
654	csync;
655	p2.l = lo(ILAT);
656	p2.h = hi(ILAT);
657	r0 = [p2];
658	cc = bittst (r0, EVT_IVG15_P);
659	if cc jump 2f;
660
661	/* if not return to user mode, get out */
662	p2.l = lo(IPEND);
663	p2.h = hi(IPEND);
664	r0 = [p2];
665	r1 = 0x17(Z);
666	r2 = ~r1;
667	r2.h = 0;
668	r0 = r2 & r0;
669	r1 = 1;
670	r1 = r0 - r1;
671	r2 = r0 & r1;
672	cc = r2 == 0;
673	if !cc jump 2f;
674
675	/* Lower the interrupt level to 15.  */
676	p0.l = lo(EVT15);
677	p0.h = hi(EVT15);
678	p1.l = _schedule_and_signal_from_int;
679	p1.h = _schedule_and_signal_from_int;
680	[p0] = p1;
681	csync;
682#if defined(ANOMALY_05000281)
683	r0.l = lo(CONFIG_BOOT_LOAD);
684	r0.h = hi(CONFIG_BOOT_LOAD);
685	reti = r0;
686#endif
687	r0 = 0x801f (z);
688	STI r0;
689	raise 15;	/* raise evt15 to do signal or reschedule */
690	rti;
6912:
692	rts;
693ENDPROC(_return_from_int)
694
695ENTRY(_lower_to_irq14)
696#if defined(ANOMALY_05000281)
697	r0.l = lo(CONFIG_BOOT_LOAD);
698	r0.h = hi(CONFIG_BOOT_LOAD);
699	reti = r0;
700#endif
701	r0 = 0x401f;
702	sti r0;
703	raise 14;
704	rti;
705ENTRY(_evt14_softirq)
706#ifdef CONFIG_DEBUG_HWERR
707	r0 = 0x3f;
708	sti r0;
709#else
710	cli r0;
711#endif
712	[--sp] = RETI;
713	SP += 4;
714	rts;
715
716_schedule_and_signal_from_int:
717	/* To end up here, vector 15 was changed - so we have to change it
718	 * back.
719	 */
720	p0.l = lo(EVT15);
721	p0.h = hi(EVT15);
722	p1.l = _evt_system_call;
723	p1.h = _evt_system_call;
724	[p0] = p1;
725	csync;
726
727	/* Set orig_p0 to -1 to indicate this isn't the end of a syscall.  */
728	r0 = -1 (x);
729	[sp + PT_ORIG_P0] = r0;
730
731	p1 = rets;
732	[sp + PT_RESERVED] = p1;
733
734	p0.l = _irq_flags;
735	p0.h = _irq_flags;
736	r0 = [p0];
737	sti r0;
738
739	jump.s .Lresume_userspace;
740
741_schedule_and_signal:
742	SAVE_CONTEXT_SYSCALL
743	/* To end up here, vector 15 was changed - so we have to change it
744	 * back.
745	 */
746	p0.l = lo(EVT15);
747	p0.h = hi(EVT15);
748	p1.l = _evt_system_call;
749	p1.h = _evt_system_call;
750	[p0] = p1;
751	csync;
752	p0.l = 1f;
753	p0.h = 1f;
754	[sp + PT_RESERVED] = P0;
755	call .Lresume_userspace;
7561:
757	RESTORE_CONTEXT
758	rti;
759ENDPROC(_lower_to_irq14)
760
761/* Make sure when we start, that the circular buffer is initialized properly
762 * R0 and P0 are call clobbered, so we can use them here.
763 */
764ENTRY(_init_exception_buff)
765	r0 = 0;
766	p0.h = _in_ptr_excause;
767	p0.l = _in_ptr_excause;
768	[p0] = r0;
769	p0.h = _out_ptr_excause;
770	p0.l = _out_ptr_excause;
771	[p0] = r0;
772	rts;
773ENDPROC(_init_exception_buff)
774
775/*
776 * Put these in the kernel data section - that should always be covered by
777 * a CPLB. This is needed to ensure we don't get double fault conditions
778 */
779
780#ifdef CONFIG_SYSCALL_TAB_L1
781.section .l1.data
782#else
783.data
784#endif
785ALIGN
786_extable:
787	/* entry for each EXCAUSE[5:0]
788	 * This table bmust be in sync with the table in ./kernel/traps.c
789	 * EXCPT instruction can provide 4 bits of EXCAUSE, allowing 16 to be user defined
790	 */
791	.long _ex_syscall;      /* 0x00 - User Defined - Linux Syscall */
792	.long _ex_soft_bp       /* 0x01 - User Defined - Software breakpoint */
793	.long _ex_trap_c        /* 0x02 - User Defined */
794	.long _ex_trap_c        /* 0x03 - User Defined  - Atomic test and set service */
795	.long _ex_spinlock      /* 0x04 - User Defined */
796	.long _ex_trap_c        /* 0x05 - User Defined */
797	.long _ex_trap_c        /* 0x06 - User Defined */
798	.long _ex_trap_c        /* 0x07 - User Defined */
799	.long _ex_trap_c        /* 0x08 - User Defined */
800	.long _ex_trap_c        /* 0x09 - User Defined */
801	.long _ex_trap_c        /* 0x0A - User Defined */
802	.long _ex_trap_c        /* 0x0B - User Defined */
803	.long _ex_trap_c        /* 0x0C - User Defined */
804	.long _ex_trap_c        /* 0x0D - User Defined */
805	.long _ex_trap_c        /* 0x0E - User Defined */
806	.long _ex_trap_c        /* 0x0F - User Defined */
807	.long _ex_single_step   /* 0x10 - HW Single step */
808	.long _ex_trap_c        /* 0x11 - Trace Buffer Full */
809	.long _ex_trap_c        /* 0x12 - Reserved */
810	.long _ex_trap_c        /* 0x13 - Reserved */
811	.long _ex_trap_c        /* 0x14 - Reserved */
812	.long _ex_trap_c        /* 0x15 - Reserved */
813	.long _ex_trap_c        /* 0x16 - Reserved */
814	.long _ex_trap_c        /* 0x17 - Reserved */
815	.long _ex_trap_c        /* 0x18 - Reserved */
816	.long _ex_trap_c        /* 0x19 - Reserved */
817	.long _ex_trap_c        /* 0x1A - Reserved */
818	.long _ex_trap_c        /* 0x1B - Reserved */
819	.long _ex_trap_c        /* 0x1C - Reserved */
820	.long _ex_trap_c        /* 0x1D - Reserved */
821	.long _ex_trap_c        /* 0x1E - Reserved */
822	.long _ex_trap_c        /* 0x1F - Reserved */
823	.long _ex_trap_c        /* 0x20 - Reserved */
824	.long _ex_trap_c        /* 0x21 - Undefined Instruction */
825	.long _ex_trap_c        /* 0x22 - Illegal Instruction Combination */
826	.long _ex_dcplb         /* 0x23 - Data CPLB Protection Violation */
827	.long _ex_trap_c        /* 0x24 - Data access misaligned */
828	.long _ex_trap_c        /* 0x25 - Unrecoverable Event */
829	.long _ex_dcplb         /* 0x26 - Data CPLB Miss */
830	.long _ex_trap_c        /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero */
831	.long _ex_trap_c        /* 0x28 - Emulation Watchpoint */
832	.long _ex_trap_c        /* 0x29 - Instruction fetch access error (535 only) */
833	.long _ex_trap_c        /* 0x2A - Instruction fetch misaligned */
834	.long _ex_icplb         /* 0x2B - Instruction CPLB protection Violation */
835	.long _ex_icplb         /* 0x2C - Instruction CPLB miss */
836	.long _ex_trap_c        /* 0x2D - Instruction CPLB Multiple Hits */
837	.long _ex_trap_c        /* 0x2E - Illegal use of Supervisor Resource */
838	.long _ex_trap_c        /* 0x2E - Illegal use of Supervisor Resource */
839	.long _ex_trap_c        /* 0x2F - Reserved */
840	.long _ex_trap_c        /* 0x30 - Reserved */
841	.long _ex_trap_c        /* 0x31 - Reserved */
842	.long _ex_trap_c        /* 0x32 - Reserved */
843	.long _ex_trap_c        /* 0x33 - Reserved */
844	.long _ex_trap_c        /* 0x34 - Reserved */
845	.long _ex_trap_c        /* 0x35 - Reserved */
846	.long _ex_trap_c        /* 0x36 - Reserved */
847	.long _ex_trap_c        /* 0x37 - Reserved */
848	.long _ex_trap_c        /* 0x38 - Reserved */
849	.long _ex_trap_c        /* 0x39 - Reserved */
850	.long _ex_trap_c        /* 0x3A - Reserved */
851	.long _ex_trap_c        /* 0x3B - Reserved */
852	.long _ex_trap_c        /* 0x3C - Reserved */
853	.long _ex_trap_c        /* 0x3D - Reserved */
854	.long _ex_trap_c        /* 0x3E - Reserved */
855	.long _ex_trap_c        /* 0x3F - Reserved */
856
857ALIGN
858ENTRY(_sys_call_table)
859	.long _sys_ni_syscall	/* 0  -  old "setup()" system call*/
860	.long _sys_exit
861	.long _sys_fork
862	.long _sys_read
863	.long _sys_write
864	.long _sys_open		/* 5 */
865	.long _sys_close
866	.long _sys_ni_syscall	/* old waitpid */
867	.long _sys_creat
868	.long _sys_link
869	.long _sys_unlink	/* 10 */
870	.long _sys_execve
871	.long _sys_chdir
872	.long _sys_time
873	.long _sys_mknod
874	.long _sys_chmod		/* 15 */
875	.long _sys_chown	/* chown16 */
876	.long _sys_ni_syscall	/* old break syscall holder */
877	.long _sys_ni_syscall	/* old stat */
878	.long _sys_lseek
879	.long _sys_getpid	/* 20 */
880	.long _sys_mount
881	.long _sys_ni_syscall	/* old umount */
882	.long _sys_setuid
883	.long _sys_getuid
884	.long _sys_stime		/* 25 */
885	.long _sys_ptrace
886	.long _sys_alarm
887	.long _sys_ni_syscall	/* old fstat */
888	.long _sys_pause
889	.long _sys_ni_syscall	/* old utime */ /* 30 */
890	.long _sys_ni_syscall	/* old stty syscall holder */
891	.long _sys_ni_syscall	/* old gtty syscall holder */
892	.long _sys_access
893	.long _sys_nice
894	.long _sys_ni_syscall	/* 35 */ /* old ftime syscall holder */
895	.long _sys_sync
896	.long _sys_kill
897	.long _sys_rename
898	.long _sys_mkdir
899	.long _sys_rmdir		/* 40 */
900	.long _sys_dup
901	.long _sys_pipe
902	.long _sys_times
903	.long _sys_ni_syscall	/* old prof syscall holder */
904	.long _sys_brk		/* 45 */
905	.long _sys_setgid
906	.long _sys_getgid
907	.long _sys_ni_syscall	/* old sys_signal */
908	.long _sys_geteuid	/* geteuid16 */
909	.long _sys_getegid	/* getegid16 */	/* 50 */
910	.long _sys_acct
911	.long _sys_umount	/* recycled never used phys() */
912	.long _sys_ni_syscall	/* old lock syscall holder */
913	.long _sys_ioctl
914	.long _sys_fcntl		/* 55 */
915	.long _sys_ni_syscall	/* old mpx syscall holder */
916	.long _sys_setpgid
917	.long _sys_ni_syscall	/* old ulimit syscall holder */
918	.long _sys_ni_syscall	/* old old uname */
919	.long _sys_umask		/* 60 */
920	.long _sys_chroot
921	.long _sys_ustat
922	.long _sys_dup2
923	.long _sys_getppid
924	.long _sys_getpgrp	/* 65 */
925	.long _sys_setsid
926	.long _sys_ni_syscall	/* old sys_sigaction */
927	.long _sys_sgetmask
928	.long _sys_ssetmask
929	.long _sys_setreuid	/* setreuid16 */	/* 70 */
930	.long _sys_setregid	/* setregid16 */
931	.long _sys_ni_syscall	/* old sys_sigsuspend */
932	.long _sys_ni_syscall	/* old sys_sigpending */
933	.long _sys_sethostname
934	.long _sys_setrlimit	/* 75 */
935	.long _sys_ni_syscall	/* old getrlimit */
936	.long _sys_getrusage
937	.long _sys_gettimeofday
938	.long _sys_settimeofday
939	.long _sys_getgroups	/* getgroups16 */	/* 80 */
940	.long _sys_setgroups	/* setgroups16 */
941	.long _sys_ni_syscall	/* old_select */
942	.long _sys_symlink
943	.long _sys_ni_syscall	/* old lstat */
944	.long _sys_readlink	/* 85 */
945	.long _sys_uselib
946	.long _sys_ni_syscall	/* sys_swapon */
947	.long _sys_reboot
948	.long _sys_ni_syscall	/* old_readdir */
949	.long _sys_ni_syscall	/* sys_mmap */	/* 90 */
950	.long _sys_munmap
951	.long _sys_truncate
952	.long _sys_ftruncate
953	.long _sys_fchmod
954	.long _sys_fchown	/* fchown16 */	/* 95 */
955	.long _sys_getpriority
956	.long _sys_setpriority
957	.long _sys_ni_syscall	/* old profil syscall holder */
958	.long _sys_statfs
959	.long _sys_fstatfs	/* 100 */
960	.long _sys_ni_syscall
961	.long _sys_ni_syscall	/* old sys_socketcall */
962	.long _sys_syslog
963	.long _sys_setitimer
964	.long _sys_getitimer	/* 105 */
965	.long _sys_newstat
966	.long _sys_newlstat
967	.long _sys_newfstat
968	.long _sys_ni_syscall	/* old uname */
969	.long _sys_ni_syscall	/* iopl for i386 */ /* 110 */
970	.long _sys_vhangup
971	.long _sys_ni_syscall	/* obsolete idle() syscall */
972	.long _sys_ni_syscall	/* vm86old for i386 */
973	.long _sys_wait4
974	.long _sys_ni_syscall	/* 115 */ /* sys_swapoff */
975	.long _sys_sysinfo
976	.long _sys_ni_syscall	/* old sys_ipc */
977	.long _sys_fsync
978	.long _sys_ni_syscall	/* old sys_sigreturn */
979	.long _sys_clone		/* 120 */
980	.long _sys_setdomainname
981	.long _sys_newuname
982	.long _sys_ni_syscall	/* old sys_modify_ldt */
983	.long _sys_adjtimex
984	.long _sys_ni_syscall	/* 125 */ /* sys_mprotect */
985	.long _sys_ni_syscall	/* old sys_sigprocmask */
986	.long _sys_ni_syscall	/* old "creat_module" */
987	.long _sys_init_module
988	.long _sys_delete_module
989	.long _sys_ni_syscall	/* 130: old "get_kernel_syms" */
990	.long _sys_quotactl
991	.long _sys_getpgid
992	.long _sys_fchdir
993	.long _sys_bdflush
994	.long _sys_ni_syscall	/* 135 */ /* sys_sysfs */
995	.long _sys_personality
996	.long _sys_ni_syscall	/* for afs_syscall */
997	.long _sys_setfsuid	/* setfsuid16 */
998	.long _sys_setfsgid	/* setfsgid16 */
999	.long _sys_llseek	/* 140 */
1000	.long _sys_getdents
1001	.long _sys_ni_syscall	/* sys_select */
1002	.long _sys_flock
1003	.long _sys_ni_syscall	/* sys_msync */
1004	.long _sys_readv		/* 145 */
1005	.long _sys_writev
1006	.long _sys_getsid
1007	.long _sys_fdatasync
1008	.long _sys_sysctl
1009	.long _sys_ni_syscall	/* 150 */ /* sys_mlock */
1010	.long _sys_ni_syscall	/* sys_munlock */
1011	.long _sys_ni_syscall	/* sys_mlockall */
1012	.long _sys_ni_syscall	/* sys_munlockall */
1013	.long _sys_sched_setparam
1014	.long _sys_sched_getparam /* 155 */
1015	.long _sys_sched_setscheduler
1016	.long _sys_sched_getscheduler
1017	.long _sys_sched_yield
1018	.long _sys_sched_get_priority_max
1019	.long _sys_sched_get_priority_min  /* 160 */
1020	.long _sys_sched_rr_get_interval
1021	.long _sys_nanosleep
1022	.long _sys_ni_syscall	/* sys_mremap */
1023	.long _sys_setresuid	/* setresuid16 */
1024	.long _sys_getresuid	/* getresuid16 */	/* 165 */
1025	.long _sys_ni_syscall	/* for vm86 */
1026	.long _sys_ni_syscall	/* old "query_module" */
1027	.long _sys_ni_syscall	/* sys_poll */
1028	.long _sys_ni_syscall	/* sys_nfsservctl */
1029	.long _sys_setresgid	/* setresgid16 */	/* 170 */
1030	.long _sys_getresgid	/* getresgid16 */
1031	.long _sys_prctl
1032	.long _sys_rt_sigreturn
1033	.long _sys_rt_sigaction
1034	.long _sys_rt_sigprocmask /* 175 */
1035	.long _sys_rt_sigpending
1036	.long _sys_rt_sigtimedwait
1037	.long _sys_rt_sigqueueinfo
1038	.long _sys_rt_sigsuspend
1039	.long _sys_pread64	/* 180 */
1040	.long _sys_pwrite64
1041	.long _sys_lchown	/* lchown16 */
1042	.long _sys_getcwd
1043	.long _sys_capget
1044	.long _sys_capset	/* 185 */
1045	.long _sys_sigaltstack
1046	.long _sys_sendfile
1047	.long _sys_ni_syscall	/* streams1 */
1048	.long _sys_ni_syscall	/* streams2 */
1049	.long _sys_vfork		/* 190 */
1050	.long _sys_getrlimit
1051	.long _sys_mmap2
1052	.long _sys_truncate64
1053	.long _sys_ftruncate64
1054	.long _sys_stat64	/* 195 */
1055	.long _sys_lstat64
1056	.long _sys_fstat64
1057	.long _sys_chown
1058	.long _sys_getuid
1059	.long _sys_getgid	/* 200 */
1060	.long _sys_geteuid
1061	.long _sys_getegid
1062	.long _sys_setreuid
1063	.long _sys_setregid
1064	.long _sys_getgroups	/* 205 */
1065	.long _sys_setgroups
1066	.long _sys_fchown
1067	.long _sys_setresuid
1068	.long _sys_getresuid
1069	.long _sys_setresgid	/* 210 */
1070	.long _sys_getresgid
1071	.long _sys_lchown
1072	.long _sys_setuid
1073	.long _sys_setgid
1074	.long _sys_setfsuid	/* 215 */
1075	.long _sys_setfsgid
1076	.long _sys_pivot_root
1077	.long _sys_ni_syscall	/* sys_mincore */
1078	.long _sys_ni_syscall	/* sys_madvise */
1079	.long _sys_getdents64	/* 220 */
1080	.long _sys_fcntl64
1081	.long _sys_ni_syscall	/* reserved for TUX */
1082	.long _sys_ni_syscall
1083	.long _sys_gettid
1084	.long _sys_ni_syscall	/* 225 */ /* sys_readahead */
1085	.long _sys_setxattr
1086	.long _sys_lsetxattr
1087	.long _sys_fsetxattr
1088	.long _sys_getxattr
1089	.long _sys_lgetxattr	/* 230 */
1090	.long _sys_fgetxattr
1091	.long _sys_listxattr
1092	.long _sys_llistxattr
1093	.long _sys_flistxattr
1094	.long _sys_removexattr	/* 235 */
1095	.long _sys_lremovexattr
1096	.long _sys_fremovexattr
1097	.long _sys_tkill
1098	.long _sys_sendfile64
1099	.long _sys_futex		/* 240 */
1100	.long _sys_sched_setaffinity
1101	.long _sys_sched_getaffinity
1102	.long _sys_ni_syscall	/* sys_set_thread_area */
1103	.long _sys_ni_syscall	/* sys_get_thread_area */
1104	.long _sys_io_setup	/* 245 */
1105	.long _sys_io_destroy
1106	.long _sys_io_getevents
1107	.long _sys_io_submit
1108	.long _sys_io_cancel
1109	.long _sys_ni_syscall	/* 250 */ /* sys_alloc_hugepages */
1110	.long _sys_ni_syscall	/* sys_freec_hugepages */
1111	.long _sys_exit_group
1112	.long _sys_lookup_dcookie
1113	.long _sys_bfin_spinlock
1114	.long _sys_epoll_create	/* 255 */
1115	.long _sys_epoll_ctl
1116	.long _sys_epoll_wait
1117	.long _sys_ni_syscall /* remap_file_pages */
1118	.long _sys_set_tid_address
1119	.long _sys_timer_create	/* 260 */
1120	.long _sys_timer_settime
1121	.long _sys_timer_gettime
1122	.long _sys_timer_getoverrun
1123	.long _sys_timer_delete
1124	.long _sys_clock_settime /* 265 */
1125	.long _sys_clock_gettime
1126	.long _sys_clock_getres
1127	.long _sys_clock_nanosleep
1128	.long _sys_statfs64
1129	.long _sys_fstatfs64	/* 270 */
1130	.long _sys_tgkill
1131	.long _sys_utimes
1132	.long _sys_fadvise64_64
1133	.long _sys_ni_syscall /* vserver */
1134	.long _sys_ni_syscall /* 275, mbind */
1135	.long _sys_ni_syscall /* get_mempolicy */
1136	.long _sys_ni_syscall /* set_mempolicy */
1137	.long _sys_mq_open
1138	.long _sys_mq_unlink
1139	.long _sys_mq_timedsend	/* 280 */
1140	.long _sys_mq_timedreceive
1141	.long _sys_mq_notify
1142	.long _sys_mq_getsetattr
1143	.long _sys_ni_syscall /* kexec_load */
1144	.long _sys_waitid	/* 285 */
1145	.long _sys_add_key
1146	.long _sys_request_key
1147	.long _sys_keyctl
1148	.long _sys_ioprio_set
1149	.long _sys_ioprio_get	/* 290 */
1150	.long _sys_inotify_init
1151	.long _sys_inotify_add_watch
1152	.long _sys_inotify_rm_watch
1153	.long _sys_ni_syscall /* migrate_pages */
1154	.long _sys_openat	/* 295 */
1155	.long _sys_mkdirat
1156	.long _sys_mknodat
1157	.long _sys_fchownat
1158	.long _sys_futimesat
1159	.long _sys_fstatat64	/* 300 */
1160	.long _sys_unlinkat
1161	.long _sys_renameat
1162	.long _sys_linkat
1163	.long _sys_symlinkat
1164	.long _sys_readlinkat	/* 305 */
1165	.long _sys_fchmodat
1166	.long _sys_faccessat
1167	.long _sys_pselect6
1168	.long _sys_ppoll
1169	.long _sys_unshare	/* 310 */
1170	.long _sys_sram_alloc
1171	.long _sys_sram_free
1172	.long _sys_dma_memcpy
1173	.long _sys_accept
1174	.long _sys_bind		/* 315 */
1175	.long _sys_connect
1176	.long _sys_getpeername
1177	.long _sys_getsockname
1178	.long _sys_getsockopt
1179	.long _sys_listen	/* 320 */
1180	.long _sys_recv
1181	.long _sys_recvfrom
1182	.long _sys_recvmsg
1183	.long _sys_send
1184	.long _sys_sendmsg	/* 325 */
1185	.long _sys_sendto
1186	.long _sys_setsockopt
1187	.long _sys_shutdown
1188	.long _sys_socket
1189	.long _sys_socketpair	/* 330 */
1190	.long _sys_semctl
1191	.long _sys_semget
1192	.long _sys_semop
1193	.long _sys_msgctl
1194	.long _sys_msgget	/* 335 */
1195	.long _sys_msgrcv
1196	.long _sys_msgsnd
1197	.long _sys_shmat
1198	.long _sys_shmctl
1199	.long _sys_shmdt	/* 340 */
1200	.long _sys_shmget
1201	.rept NR_syscalls-(.-_sys_call_table)/4
1202	.long _sys_ni_syscall
1203	.endr
1204_excpt_saved_imask:
1205	.long 0;
1206
1207_exception_stack:
1208	.rept 1024
1209	.long 0;
1210	.endr
1211_exception_stack_top:
1212
1213#if defined(ANOMALY_05000261)
1214_last_cplb_fault_retx:
1215	.long 0;
1216#endif
1217/*
1218 * Single instructions can have multiple faults, which need to be
1219 * handled by traps.c, in irq5. We store the exception cause to ensure
1220 * we don't miss a double fault condition
1221 */
1222ENTRY(_in_ptr_excause)
1223	.long 0;
1224ENTRY(_out_ptr_excause)
1225	.long 0;
1226ALIGN
1227ENTRY(_excause_circ_buf)
1228	.rept 4
1229	.long 0
1230	.endr
1231