• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/microblaze/kernel/
1/*
2 * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2007-2009 PetaLogix
4 * Copyright (C) 2006 Atmark Techno, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/linkage.h>
12#include <asm/thread_info.h>
13#include <linux/errno.h>
14#include <asm/entry.h>
15#include <asm/asm-offsets.h>
16#include <asm/registers.h>
17#include <asm/unistd.h>
18#include <asm/percpu.h>
19#include <asm/signal.h>
20
21#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
22	.macro	disable_irq
23	msrclr r0, MSR_IE
24	.endm
25
26	.macro	enable_irq
27	msrset r0, MSR_IE
28	.endm
29
30	.macro	clear_bip
31	msrclr r0, MSR_BIP
32	.endm
33#else
34	.macro	disable_irq
35	mfs r11, rmsr
36	andi r11, r11, ~MSR_IE
37	mts rmsr, r11
38	.endm
39
40	.macro	enable_irq
41	mfs r11, rmsr
42	ori r11, r11, MSR_IE
43	mts rmsr, r11
44	.endm
45
46	.macro	clear_bip
47	mfs r11, rmsr
48	andi r11, r11, ~MSR_BIP
49	mts rmsr, r11
50	.endm
51#endif
52
53ENTRY(_interrupt)
54	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
55	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
56	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
57	beqid	r11, 1f
58	nop
59	brid	2f				/* jump over */
60	addik	r1, r1, (-PT_SIZE)	/* room for pt_regs (delay slot) */
611:						/* switch to kernel stack */
62	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
63	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
64	/* calculate kernel stack pointer */
65	addik	r1, r1, THREAD_SIZE - PT_SIZE
662:
67	swi	r11, r1, PT_MODE		/* store the mode */
68	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
69	swi	r2, r1, PT_R2
70	swi	r3, r1, PT_R3
71	swi	r4, r1, PT_R4
72	swi	r5, r1, PT_R5
73	swi	r6, r1, PT_R6
74	swi	r7, r1, PT_R7
75	swi	r8, r1, PT_R8
76	swi	r9, r1, PT_R9
77	swi	r10, r1, PT_R10
78	swi	r11, r1, PT_R11
79	swi	r12, r1, PT_R12
80	swi	r13, r1, PT_R13
81	swi	r14, r1, PT_R14
82	swi	r14, r1, PT_PC
83	swi	r15, r1, PT_R15
84	swi	r16, r1, PT_R16
85	swi	r17, r1, PT_R17
86	swi	r18, r1, PT_R18
87	swi	r19, r1, PT_R19
88	swi	r20, r1, PT_R20
89	swi	r21, r1, PT_R21
90	swi	r22, r1, PT_R22
91	swi	r23, r1, PT_R23
92	swi	r24, r1, PT_R24
93	swi	r25, r1, PT_R25
94	swi	r26, r1, PT_R26
95	swi	r27, r1, PT_R27
96	swi	r28, r1, PT_R28
97	swi	r29, r1, PT_R29
98	swi	r30, r1, PT_R30
99	swi	r31, r1, PT_R31
100	/* special purpose registers */
101	mfs	r11, rmsr
102	swi	r11, r1, PT_MSR
103	mfs	r11, rear
104	swi	r11, r1, PT_EAR
105	mfs	r11, resr
106	swi	r11, r1, PT_ESR
107	mfs	r11, rfsr
108	swi	r11, r1, PT_FSR
109	/* reload original stack pointer and save it */
110	lwi	r11, r0, PER_CPU(ENTRY_SP)
111	swi	r11, r1, PT_R1
112	/* update mode indicator we are in kernel mode */
113	addik	r11, r0, 1
114	swi	r11, r0, PER_CPU(KM)
115	/* restore r31 */
116	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
117	/* prepare the link register, the argument and jump */
118	la	r15, r0, ret_from_intr - 8
119	addk	r6, r0, r15
120	braid	do_IRQ
121	add	r5, r0, r1
122
123ret_from_intr:
124	lwi	r11, r1, PT_MODE
125	bneid	r11, no_intr_resched
126
127	lwi	r6, r31, TS_THREAD_INFO	/* get thread info */
128	lwi	r19, r6, TI_FLAGS	/* get flags in thread info */
129				/* do an extra work if any bits are set */
130
131	andi	r11, r19, _TIF_NEED_RESCHED
132	beqi	r11, 1f
133	bralid	r15, schedule
134	nop
1351:	andi	r11, r19, _TIF_SIGPENDING
136	beqid	r11, no_intr_resched
137	addk	r5, r1, r0
138	addk	r7, r0, r0
139	bralid	r15, do_signal
140	addk	r6, r0, r0
141
142no_intr_resched:
143	/* Disable interrupts, we are now committed to the state restore */
144	disable_irq
145
146	/* save mode indicator */
147	lwi	r11, r1, PT_MODE
148	swi	r11, r0, PER_CPU(KM)
149
150	/* save r31 */
151	swi	r31, r0, PER_CPU(CURRENT_SAVE)
152restore_context:
153	/* special purpose registers */
154	lwi	r11, r1, PT_FSR
155	mts	rfsr, r11
156	lwi	r11, r1, PT_ESR
157	mts	resr, r11
158	lwi	r11, r1, PT_EAR
159	mts	rear, r11
160	lwi	r11, r1, PT_MSR
161	mts	rmsr, r11
162
163	lwi	r31, r1, PT_R31
164	lwi	r30, r1, PT_R30
165	lwi	r29, r1, PT_R29
166	lwi	r28, r1, PT_R28
167	lwi	r27, r1, PT_R27
168	lwi	r26, r1, PT_R26
169	lwi	r25, r1, PT_R25
170	lwi	r24, r1, PT_R24
171	lwi	r23, r1, PT_R23
172	lwi	r22, r1, PT_R22
173	lwi	r21, r1, PT_R21
174	lwi	r20, r1, PT_R20
175	lwi	r19, r1, PT_R19
176	lwi	r18, r1, PT_R18
177	lwi	r17, r1, PT_R17
178	lwi	r16, r1, PT_R16
179	lwi	r15, r1, PT_R15
180	lwi	r14, r1, PT_PC
181	lwi	r13, r1, PT_R13
182	lwi	r12, r1, PT_R12
183	lwi	r11, r1, PT_R11
184	lwi	r10, r1, PT_R10
185	lwi	r9, r1, PT_R9
186	lwi	r8, r1, PT_R8
187	lwi	r7, r1, PT_R7
188	lwi	r6, r1, PT_R6
189	lwi	r5, r1, PT_R5
190	lwi	r4, r1, PT_R4
191	lwi	r3, r1, PT_R3
192	lwi	r2, r1, PT_R2
193	lwi	r1, r1, PT_R1
194	rtid	r14, 0
195	nop
196
197ENTRY(_reset)
198	brai	0;
199
200ENTRY(_user_exception)
201	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
202	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
203	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
204	beqid	r11, 1f				/* Already in kernel mode? */
205	nop
206	brid	2f				/* jump over */
207	addik	r1, r1, (-PT_SIZE)	/* Room for pt_regs (delay slot) */
2081:						/* Switch to kernel stack */
209	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
210	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
211	/* calculate kernel stack pointer */
212	addik	r1, r1, THREAD_SIZE - PT_SIZE
2132:
214	swi	r11, r1, PT_MODE		/* store the mode */
215	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
216	/* save them on stack */
217	swi	r2, r1, PT_R2
218	swi	r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
219	swi	r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
220	swi	r5, r1, PT_R5
221	swi	r6, r1, PT_R6
222	swi	r7, r1, PT_R7
223	swi	r8, r1, PT_R8
224	swi	r9, r1, PT_R9
225	swi	r10, r1, PT_R10
226	swi	r11, r1, PT_R11
227	/* r12: _always_ in clobber list; see unistd.h */
228	swi	r12, r1, PT_R12
229	swi	r13, r1, PT_R13
230	/* r14: _always_ in clobber list; see unistd.h */
231	swi	r14, r1, PT_R14
232	/* but we want to return to the next inst. */
233	addik	r14, r14, 0x4
234	swi	r14, r1, PT_PC		/* increment by 4 and store in pc */
235	swi	r15, r1, PT_R15
236	swi	r16, r1, PT_R16
237	swi	r17, r1, PT_R17
238	swi	r18, r1, PT_R18
239	swi	r19, r1, PT_R19
240	swi	r20, r1, PT_R20
241	swi	r21, r1, PT_R21
242	swi	r22, r1, PT_R22
243	swi	r23, r1, PT_R23
244	swi	r24, r1, PT_R24
245	swi	r25, r1, PT_R25
246	swi	r26, r1, PT_R26
247	swi	r27, r1, PT_R27
248	swi	r28, r1, PT_R28
249	swi	r29, r1, PT_R29
250	swi	r30, r1, PT_R30
251	swi	r31, r1, PT_R31
252
253	disable_irq
254	nop		/* make sure IE bit is in effect */
255	clear_bip	/* once IE is in effect it is safe to clear BIP */
256	nop
257
258	/* special purpose registers */
259	mfs	r11, rmsr
260	swi	r11, r1, PT_MSR
261	mfs	r11, rear
262	swi	r11, r1, PT_EAR
263	mfs	r11, resr
264	swi	r11, r1, PT_ESR
265	mfs	r11, rfsr
266	swi	r11, r1, PT_FSR
267	/* reload original stack pointer and save it */
268	lwi	r11, r0, PER_CPU(ENTRY_SP)
269	swi	r11, r1, PT_R1
270	/* update mode indicator we are in kernel mode */
271	addik	r11, r0, 1
272	swi	r11, r0, PER_CPU(KM)
273	/* restore r31 */
274	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
275	/* re-enable interrupts now we are in kernel mode */
276	enable_irq
277
278	/* See if the system call number is valid. */
279	addi	r11, r12, -__NR_syscalls
280	bgei	r11, 1f			/* return to user if not valid */
281	/* Figure out which function to use for this system call. */
282	/* Note Microblaze barrel shift is optional, so don't rely on it */
283	add	r12, r12, r12			/* convert num -> ptr */
284	add	r12, r12, r12
285	lwi	r12, r12, sys_call_table	/* Get function pointer */
286	la	r15, r0, ret_to_user-8		/* set return address */
287	bra	r12				/* Make the system call. */
288	bri	0				/* won't reach here */
2891:
290	brid	ret_to_user			/* jump to syscall epilogue */
291	addi	r3, r0, -ENOSYS			/* set errno in delay slot */
292
293/*
294 * Debug traps are like a system call, but entered via brki r14, 0x60
295 * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
296 * will handle the rest
297 */
298ENTRY(_debug_exception)
299	swi	r1, r0, PER_CPU(ENTRY_SP)	/* save the current sp */
300	lwi	r1, r0, PER_CPU(CURRENT_SAVE)	/* get the saved current */
301	lwi	r1, r1, TS_THREAD_INFO		/* get the thread info */
302	addik	r1, r1, THREAD_SIZE - PT_SIZE	/* get the kernel stack */
303	swi	r11, r0, PER_CPU(R11_SAVE)	/* temporarily save r11 */
304	lwi	r11, r0, PER_CPU(KM)		/* load mode indicator */
305//save_context:
306	swi	r11, r1, PT_MODE	/* store the mode */
307	lwi	r11, r0, PER_CPU(R11_SAVE)	/* reload r11 */
308	/* save them on stack */
309	swi	r2, r1, PT_R2
310	swi	r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
311	swi	r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
312	swi	r5, r1, PT_R5
313	swi	r6, r1, PT_R6
314	swi	r7, r1, PT_R7
315	swi	r8, r1, PT_R8
316	swi	r9, r1, PT_R9
317	swi	r10, r1, PT_R10
318	swi	r11, r1, PT_R11
319	/* r12: _always_ in clobber list; see unistd.h */
320	swi	r12, r1, PT_R12
321	swi	r13, r1, PT_R13
322	/* r14: _always_ in clobber list; see unistd.h */
323	swi	r14, r1, PT_R14
324	swi	r14, r1, PT_PC /* Will return to interrupted instruction */
325	swi	r15, r1, PT_R15
326	swi	r16, r1, PT_R16
327	swi	r17, r1, PT_R17
328	swi	r18, r1, PT_R18
329	swi	r19, r1, PT_R19
330	swi	r20, r1, PT_R20
331	swi	r21, r1, PT_R21
332	swi	r22, r1, PT_R22
333	swi	r23, r1, PT_R23
334	swi	r24, r1, PT_R24
335	swi	r25, r1, PT_R25
336	swi	r26, r1, PT_R26
337	swi	r27, r1, PT_R27
338	swi	r28, r1, PT_R28
339	swi	r29, r1, PT_R29
340	swi	r30, r1, PT_R30
341	swi	r31, r1, PT_R31
342
343	disable_irq
344	nop		/* make sure IE bit is in effect */
345	clear_bip	/* once IE is in effect it is safe to clear BIP */
346	nop
347
348	/* special purpose registers */
349	mfs	r11, rmsr
350	swi	r11, r1, PT_MSR
351	mfs	r11, rear
352	swi	r11, r1, PT_EAR
353	mfs	r11, resr
354	swi	r11, r1, PT_ESR
355	mfs	r11, rfsr
356	swi	r11, r1, PT_FSR
357	/* reload original stack pointer and save it */
358	lwi	r11, r0, PER_CPU(ENTRY_SP)
359	swi	r11, r1, PT_R1
360	/* update mode indicator we are in kernel mode */
361	addik	r11, r0, 1
362	swi	r11, r0, PER_CPU(KM)
363	/* restore r31 */
364	lwi	r31, r0, PER_CPU(CURRENT_SAVE)
365	/* re-enable interrupts now we are in kernel mode */
366	enable_irq
367
368	addi	r5, r0, SIGTRAP			/* sending the trap signal */
369	add	r6, r0, r31			/* to current */
370	bralid	r15, send_sig
371	add	r7, r0, r0			/* 3rd param zero */
372
373	lwi	r3, r1, PT_R3
374	lwi	r4, r1, PT_R4
375	bri	ret_to_user
376
377ENTRY(_break)
378	bri	0
379
380/* struct task_struct *_switch_to(struct thread_info *prev,
381					struct thread_info *next); */
382ENTRY(_switch_to)
383	/* prepare return value */
384	addk	r3, r0, r31
385
386	/* save registers in cpu_context */
387	/* use r11 and r12, volatile registers, as temp register */
388	addik	r11, r5, TI_CPU_CONTEXT
389	swi	r1, r11, CC_R1
390	swi	r2, r11, CC_R2
391	/* skip volatile registers.
392	 * they are saved on stack when we jumped to _switch_to() */
393	/* dedicated registers */
394	swi	r13, r11, CC_R13
395	swi	r14, r11, CC_R14
396	swi	r15, r11, CC_R15
397	swi	r16, r11, CC_R16
398	swi	r17, r11, CC_R17
399	swi	r18, r11, CC_R18
400	/* save non-volatile registers */
401	swi	r19, r11, CC_R19
402	swi	r20, r11, CC_R20
403	swi	r21, r11, CC_R21
404	swi	r22, r11, CC_R22
405	swi	r23, r11, CC_R23
406	swi	r24, r11, CC_R24
407	swi	r25, r11, CC_R25
408	swi	r26, r11, CC_R26
409	swi	r27, r11, CC_R27
410	swi	r28, r11, CC_R28
411	swi	r29, r11, CC_R29
412	swi	r30, r11, CC_R30
413	/* special purpose registers */
414	mfs	r12, rmsr
415	swi	r12, r11, CC_MSR
416	mfs	r12, rear
417	swi	r12, r11, CC_EAR
418	mfs	r12, resr
419	swi	r12, r11, CC_ESR
420	mfs	r12, rfsr
421	swi	r12, r11, CC_FSR
422
423	/* update r31, the current */
424	lwi	r31, r6, TI_TASK
425	swi	r31, r0, PER_CPU(CURRENT_SAVE)
426
427	/* get new process' cpu context and restore */
428	addik	r11, r6, TI_CPU_CONTEXT
429
430	/* special purpose registers */
431	lwi	r12, r11, CC_FSR
432	mts	rfsr, r12
433	lwi	r12, r11, CC_ESR
434	mts	resr, r12
435	lwi	r12, r11, CC_EAR
436	mts	rear, r12
437	lwi	r12, r11, CC_MSR
438	mts	rmsr, r12
439	/* non-volatile registers */
440	lwi	r30, r11, CC_R30
441	lwi	r29, r11, CC_R29
442	lwi	r28, r11, CC_R28
443	lwi	r27, r11, CC_R27
444	lwi	r26, r11, CC_R26
445	lwi	r25, r11, CC_R25
446	lwi	r24, r11, CC_R24
447	lwi	r23, r11, CC_R23
448	lwi	r22, r11, CC_R22
449	lwi	r21, r11, CC_R21
450	lwi	r20, r11, CC_R20
451	lwi	r19, r11, CC_R19
452	/* dedicated registers */
453	lwi	r18, r11, CC_R18
454	lwi	r17, r11, CC_R17
455	lwi	r16, r11, CC_R16
456	lwi	r15, r11, CC_R15
457	lwi	r14, r11, CC_R14
458	lwi	r13, r11, CC_R13
459	/* skip volatile registers */
460	lwi	r2, r11, CC_R2
461	lwi	r1, r11, CC_R1
462
463	rtsd	r15, 8
464	nop
465
466ENTRY(ret_from_fork)
467	addk	r5, r0, r3
468	addk	r6, r0, r1
469	brlid	r15, schedule_tail
470	nop
471	swi	r31, r1, PT_R31		/* save r31 in user context. */
472			/* will soon be restored to r31 in ret_to_user */
473	addk	r3, r0, r0
474	brid	ret_to_user
475	nop
476
477work_pending:
478	enable_irq
479
480	andi	r11, r19, _TIF_NEED_RESCHED
481	beqi	r11, 1f
482	bralid	r15, schedule
483	nop
4841:	andi	r11, r19, _TIF_SIGPENDING
485	beqi	r11, no_work_pending
486	addk	r5, r1, r0
487	addik	r7, r0, 1
488	bralid	r15, do_signal
489	addk	r6, r0, r0
490	bri	no_work_pending
491
492ENTRY(ret_to_user)
493	disable_irq
494
495	swi	r4, r1, PT_R4		/* return val */
496	swi	r3, r1, PT_R3		/* return val */
497
498	lwi	r6, r31, TS_THREAD_INFO /* get thread info */
499	lwi	r19, r6, TI_FLAGS /* get flags in thread info */
500	bnei	r19, work_pending /* do an extra work if any bits are set */
501no_work_pending:
502	disable_irq
503
504	/* save r31 */
505	swi	r31, r0, PER_CPU(CURRENT_SAVE)
506	/* save mode indicator */
507	lwi	r18, r1, PT_MODE
508	swi	r18, r0, PER_CPU(KM)
509//restore_context:
510	/* special purpose registers */
511	lwi	r18, r1, PT_FSR
512	mts	rfsr, r18
513	lwi	r18, r1, PT_ESR
514	mts	resr, r18
515	lwi	r18, r1, PT_EAR
516	mts	rear, r18
517	lwi	r18, r1, PT_MSR
518	mts	rmsr, r18
519
520	lwi	r31, r1, PT_R31
521	lwi	r30, r1, PT_R30
522	lwi	r29, r1, PT_R29
523	lwi	r28, r1, PT_R28
524	lwi	r27, r1, PT_R27
525	lwi	r26, r1, PT_R26
526	lwi	r25, r1, PT_R25
527	lwi	r24, r1, PT_R24
528	lwi	r23, r1, PT_R23
529	lwi	r22, r1, PT_R22
530	lwi	r21, r1, PT_R21
531	lwi	r20, r1, PT_R20
532	lwi	r19, r1, PT_R19
533	lwi	r18, r1, PT_R18
534	lwi	r17, r1, PT_R17
535	lwi	r16, r1, PT_R16
536	lwi	r15, r1, PT_R15
537	lwi	r14, r1, PT_PC
538	lwi	r13, r1, PT_R13
539	lwi	r12, r1, PT_R12
540	lwi	r11, r1, PT_R11
541	lwi	r10, r1, PT_R10
542	lwi	r9, r1, PT_R9
543	lwi	r8, r1, PT_R8
544	lwi	r7, r1, PT_R7
545	lwi	r6, r1, PT_R6
546	lwi	r5, r1, PT_R5
547	lwi	r4, r1, PT_R4		/* return val */
548	lwi	r3, r1, PT_R3		/* return val */
549	lwi	r2, r1, PT_R2
550	lwi	r1, r1, PT_R1
551
552	rtid	r14, 0
553	nop
554
555sys_vfork:
556	brid	microblaze_vfork
557	addk	r5, r1, r0
558
559sys_clone:
560	brid	microblaze_clone
561	addk	r7, r1, r0
562
563sys_execve:
564	brid	microblaze_execve
565	addk	r8, r1, r0
566
567sys_rt_sigreturn_wrapper:
568	brid	sys_rt_sigreturn
569	addk	r5, r1, r0
570
571sys_rt_sigsuspend_wrapper:
572	brid	sys_rt_sigsuspend
573	addk	r7, r1, r0
574
575	/* Interrupt vector table */
576	.section	.init.ivt, "ax"
577	.org 0x0
578	brai	_reset
579	brai	_user_exception
580	brai	_interrupt
581	brai	_break
582	brai	_hw_exception_handler
583	.org 0x60
584	brai	_debug_exception
585
586.section .rodata,"a"
587#include "syscall_table.S"
588
589syscall_table_size=(.-sys_call_table)
590
591type_SYSCALL:
592	.ascii "SYSCALL\0"
593type_IRQ:
594	.ascii "IRQ\0"
595type_IRQ_PREEMPT:
596	.ascii "IRQ (PREEMPTED)\0"
597type_SYSCALL_PREEMPT:
598	.ascii " SYSCALL (PREEMPTED)\0"
599
600	/*
601	 * Trap decoding for stack unwinder
602	 * Tuples are (start addr, end addr, string)
603	 * If return address lies on [start addr, end addr],
604	 * unwinder displays 'string'
605	 */
606
607	.align 4
608.global microblaze_trap_handlers
609microblaze_trap_handlers:
610	/* Exact matches come first */
611	.word ret_to_user  ; .word ret_to_user    ; .word type_SYSCALL
612	.word ret_from_intr; .word ret_from_intr  ; .word type_IRQ
613	/* Fuzzy matches go here */
614	.word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
615	.word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
616	/* End of table */
617	.word 0             ; .word 0               ; .word 0
618