1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
8 * Copyright (C) 2001 MIPS Technologies, Inc.
9 * Copyright (C) 2002 Maciej W. Rozycki
10 */
11#include <linux/init.h>
12
13#include <asm/asm.h>
14#include <asm/asmmacro.h>
15#include <asm/cacheops.h>
16#include <asm/irqflags.h>
17#include <asm/regdef.h>
18#include <asm/fpregdef.h>
19#include <asm/mipsregs.h>
20#include <asm/stackframe.h>
21#include <asm/war.h>
22#include <asm/page.h>
23
24#define PANIC_PIC(msg)					\
25		.set push;				\
26		.set	reorder;			\
27		PTR_LA	a0,8f;				\
28		.set	noat;				\
29		PTR_LA	AT, panic;			\
30		jr	AT;				\
319:		b	9b;				\
32		.set	pop;				\
33		TEXT(msg)
34
35	__INIT
36
37NESTED(except_vec0_generic, 0, sp)
38	PANIC_PIC("Exception vector 0 called")
39	END(except_vec0_generic)
40
41NESTED(except_vec1_generic, 0, sp)
42	PANIC_PIC("Exception vector 1 called")
43	END(except_vec1_generic)
44
45/*
46 * General exception vector for all other CPUs.
47 *
48 * Be careful when changing this, it has to be at most 128 bytes
49 * to fit into space reserved for the exception handler.
50 */
51NESTED(except_vec3_generic, 0, sp)
52	.set	push
53	.set	noat
54#if R5432_CP0_INTERRUPT_WAR
55	mfc0	k0, CP0_INDEX
56#endif
57#ifdef CONFIG_BCM47XX
58	nop
59	nop
60#endif
61	mfc0	k1, CP0_CAUSE
62	andi	k1, k1, 0x7c
63#ifdef CONFIG_64BIT
64	dsll	k1, k1, 1
65#endif
66	PTR_L	k0, exception_handlers(k1)
67	jr	k0
68	.set	pop
69	END(except_vec3_generic)
70
71/*
72 * General exception handler for CPUs with virtual coherency exception.
73 *
74 * Be careful when changing this, it has to be at most 256 (as a special
75 * exception) bytes to fit into space reserved for the exception handler.
76 */
77NESTED(except_vec3_r4000, 0, sp)
78	.set	push
79	.set	mips3
80	.set	noat
81	mfc0	k1, CP0_CAUSE
82	li	k0, 31<<2
83	andi	k1, k1, 0x7c
84	.set	push
85	.set	noreorder
86	.set	nomacro
87	beq	k1, k0, handle_vced
88	 li	k0, 14<<2
89	beq	k1, k0, handle_vcei
90#ifdef CONFIG_64BIT
91	 dsll	k1, k1, 1
92#endif
93	.set	pop
94	PTR_L	k0, exception_handlers(k1)
95	jr	k0
96
97	/*
98	 * Big shit, we now may have two dirty primary cache lines for the same
99	 * physical address.  We can safely invalidate the line pointed to by
100	 * c0_badvaddr because after return from this exception handler the
101	 * load / store will be re-executed.
102	 */
103handle_vced:
104	MFC0	k0, CP0_BADVADDR
105	li	k1, -4					# Is this ...
106	and	k0, k1					# ... really needed?
107	mtc0	zero, CP0_TAGLO
108	cache	Index_Store_Tag_D, (k0)
109	cache	Hit_Writeback_Inv_SD, (k0)
110#ifdef CONFIG_PROC_FS
111	PTR_LA	k0, vced_count
112	lw	k1, (k0)
113	addiu	k1, 1
114	sw	k1, (k0)
115#endif
116	eret
117
118handle_vcei:
119	MFC0	k0, CP0_BADVADDR
120	cache	Hit_Writeback_Inv_SD, (k0)		# also cleans pi
121#ifdef CONFIG_PROC_FS
122	PTR_LA	k0, vcei_count
123	lw	k1, (k0)
124	addiu	k1, 1
125	sw	k1, (k0)
126#endif
127	eret
128	.set	pop
129	END(except_vec3_r4000)
130
131	__FINIT
132
133	.align  5
134NESTED(handle_int, PT_SIZE, sp)
135#ifdef CONFIG_TRACE_IRQFLAGS
136	/*
137	 * Check to see if the interrupted code has just disabled
138	 * interrupts and ignore this interrupt for now if so.
139	 *
140	 * local_irq_disable() disables interrupts and then calls
141	 * trace_hardirqs_off() to track the state. If an interrupt is taken
142	 * after interrupts are disabled but before the state is updated
143	 * it will appear to restore_all that it is incorrectly returning with
144	 * interrupts disabled
145	 */
146	.set	push
147	.set	noat
148	mfc0	k0, CP0_STATUS
149#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
150	and	k0, ST0_IEP
151	bnez	k0, 1f
152
153	mfc0	k0, EP0_EPC
154	.set	noreorder
155	j	k0
156	rfe
157#else
158	and	k0, ST0_IE
159	bnez	k0, 1f
160
161	eret
162#endif
1631:
164	.set pop
165#endif
166	SAVE_ALL
167	CLI
168	TRACE_IRQS_OFF
169
170	LONG_L	s0, TI_REGS($28)
171	LONG_S	sp, TI_REGS($28)
172	PTR_LA	ra, ret_from_irq
173	j	plat_irq_dispatch
174	END(handle_int)
175
176	__INIT
177
178/*
179 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
180 * This is a dedicated interrupt exception vector which reduces the
181 * interrupt processing overhead.  The jump instruction will be replaced
182 * at the initialization time.
183 *
184 * Be careful when changing this, it has to be at most 128 bytes
185 * to fit into space reserved for the exception handler.
186 */
187NESTED(except_vec4, 0, sp)
1881:	j	1b			/* Dummy, will be replaced */
189	END(except_vec4)
190
191/*
192 * EJTAG debug exception handler.
193 * The EJTAG debug exception entry point is 0xbfc00480, which
194 * normally is in the boot PROM, so the boot PROM must do a
195 * unconditional jump to this vector.
196 */
197NESTED(except_vec_ejtag_debug, 0, sp)
198	j	ejtag_debug_handler
199	END(except_vec_ejtag_debug)
200
201	__FINIT
202
203/*
204 * Vectored interrupt handler.
205 * This prototype is copied to ebase + n*IntCtl.VS and patched
206 * to invoke the handler
207 */
208NESTED(except_vec_vi, 0, sp)
209	SAVE_SOME
210	SAVE_AT
211	.set	push
212	.set	noreorder
213#ifdef CONFIG_MIPS_MT_SMTC
214	/*
215	 * To keep from blindly blocking *all* interrupts
216	 * during service by SMTC kernel, we also want to
217	 * pass the IM value to be cleared.
218	 */
219FEXPORT(except_vec_vi_mori)
220	ori	a0, $0, 0
221#endif /* CONFIG_MIPS_MT_SMTC */
222FEXPORT(except_vec_vi_lui)
223	lui	v0, 0		/* Patched */
224	j	except_vec_vi_handler
225FEXPORT(except_vec_vi_ori)
226	 ori	v0, 0		/* Patched */
227	.set	pop
228	END(except_vec_vi)
229EXPORT(except_vec_vi_end)
230
231/*
232 * Common Vectored Interrupt code
233 * Complete the register saves and invoke the handler which is passed in $v0
234 */
235NESTED(except_vec_vi_handler, 0, sp)
236	SAVE_TEMP
237	SAVE_STATIC
238#ifdef CONFIG_MIPS_MT_SMTC
239	/*
240	 * SMTC has an interesting problem that interrupts are level-triggered,
241	 * and the CLI macro will clear EXL, potentially causing a duplicate
242	 * interrupt service invocation. So we need to clear the associated
243	 * IM bit of Status prior to doing CLI, and restore it after the
244	 * service routine has been invoked - we must assume that the
245	 * service routine will have cleared the state, and any active
246	 * level represents a new or otherwised unserviced event...
247	 */
248	mfc0	t1, CP0_STATUS
249	and	t0, a0, t1
250	mfc0	t2, CP0_TCCONTEXT
251	or	t0, t0, t2
252	mtc0	t0, CP0_TCCONTEXT
253	xor	t1, t1, t0
254	mtc0	t1, CP0_STATUS
255	_ehb
256#endif /* CONFIG_MIPS_MT_SMTC */
257	CLI
258#ifdef CONFIG_TRACE_IRQFLAGS
259	move	s0, v0
260#ifdef CONFIG_MIPS_MT_SMTC
261	move	s1, a0
262#endif
263	TRACE_IRQS_OFF
264#ifdef CONFIG_MIPS_MT_SMTC
265	move	a0, s1
266#endif
267	move	v0, s0
268#endif
269
270	LONG_L	s0, TI_REGS($28)
271	LONG_S	sp, TI_REGS($28)
272	PTR_LA	ra, ret_from_irq
273	jr	v0
274	END(except_vec_vi_handler)
275
276/*
277 * EJTAG debug exception handler.
278 */
279NESTED(ejtag_debug_handler, PT_SIZE, sp)
280	.set	push
281	.set	noat
282	MTC0	k0, CP0_DESAVE
283	mfc0	k0, CP0_DEBUG
284
285	sll	k0, k0, 30	# Check for SDBBP.
286	bgez	k0, ejtag_return
287
288	PTR_LA	k0, ejtag_debug_buffer
289	LONG_S	k1, 0(k0)
290	SAVE_ALL
291	move	a0, sp
292	jal	ejtag_exception_handler
293	RESTORE_ALL
294	PTR_LA	k0, ejtag_debug_buffer
295	LONG_L	k1, 0(k0)
296
297ejtag_return:
298	MFC0	k0, CP0_DESAVE
299	.set	mips32
300	deret
301	.set pop
302	END(ejtag_debug_handler)
303
304/*
305 * This buffer is reserved for the use of the EJTAG debug
306 * handler.
307 */
308	.data
309EXPORT(ejtag_debug_buffer)
310	.fill	LONGSIZE
311	.previous
312
313	__INIT
314
315/*
316 * NMI debug exception handler for MIPS reference boards.
317 * The NMI debug exception entry point is 0xbfc00000, which
318 * normally is in the boot PROM, so the boot PROM must do a
319 * unconditional jump to this vector.
320 */
321NESTED(except_vec_nmi, 0, sp)
322	j	nmi_handler
323	END(except_vec_nmi)
324
325	__FINIT
326
327NESTED(nmi_handler, PT_SIZE, sp)
328	.set	push
329	.set	noat
330	SAVE_ALL
331 	move	a0, sp
332	jal	nmi_exception_handler
333	RESTORE_ALL
334	.set	mips3
335	eret
336	.set	pop
337	END(nmi_handler)
338
339	.macro	__build_clear_none
340	.endm
341
342	.macro	__build_clear_sti
343	TRACE_IRQS_ON
344	STI
345	.endm
346
347	.macro	__build_clear_cli
348	CLI
349	TRACE_IRQS_OFF
350	.endm
351
352	.macro	__build_clear_fpe
353	cfc1	a1, fcr31
354	li	a2, ~(0x3f << 12)
355	and	a2, a1
356	ctc1	a2, fcr31
357	TRACE_IRQS_ON
358	STI
359	.endm
360
361	.macro	__build_clear_ade
362	MFC0	t0, CP0_BADVADDR
363	PTR_S	t0, PT_BVADDR(sp)
364	KMODE
365	.endm
366
367	.macro	__BUILD_silent exception
368	.endm
369
370	/* Gas tries to parse the PRINT argument as a string containing
371	   string escapes and emits bogus warnings if it believes to
372	   recognize an unknown escape code.  So make the arguments
373	   start with an n and gas will believe \n is ok ...  */
374	.macro	__BUILD_verbose	nexception
375	LONG_L	a1, PT_EPC(sp)
376#ifdef CONFIG_32BIT
377	PRINT("Got \nexception at %08lx\012")
378#endif
379#ifdef CONFIG_64BIT
380	PRINT("Got \nexception at %016lx\012")
381#endif
382	.endm
383
384	.macro	__BUILD_count exception
385	LONG_L	t0,exception_count_\exception
386	LONG_ADDIU t0, 1
387	LONG_S	t0,exception_count_\exception
388	.comm	exception_count\exception, 8, 8
389	.endm
390
391	.macro	__BUILD_HANDLER exception handler clear verbose ext
392	.align	5
393	NESTED(handle_\exception, PT_SIZE, sp)
394	.set	noat
395	SAVE_ALL
396	FEXPORT(handle_\exception\ext)
397	__BUILD_clear_\clear
398	.set	at
399	__BUILD_\verbose \exception
400	move	a0, sp
401	PTR_LA	ra, ret_from_exception
402	j	do_\handler
403	END(handle_\exception)
404	.endm
405
406	.macro	BUILD_HANDLER exception handler clear verbose
407	__BUILD_HANDLER	\exception \handler \clear \verbose _int
408	.endm
409
410	BUILD_HANDLER adel ade ade silent		/* #4  */
411	BUILD_HANDLER ades ade ade silent		/* #5  */
412	BUILD_HANDLER ibe be cli silent			/* #6  */
413	BUILD_HANDLER dbe be cli silent			/* #7  */
414	BUILD_HANDLER bp bp sti silent			/* #9  */
415	BUILD_HANDLER ri ri sti silent			/* #10 */
416	BUILD_HANDLER cpu cpu sti silent		/* #11 */
417	BUILD_HANDLER ov ov sti silent			/* #12 */
418	BUILD_HANDLER tr tr sti silent			/* #13 */
419	BUILD_HANDLER fpe fpe fpe silent		/* #15 */
420	BUILD_HANDLER mdmx mdmx sti silent		/* #22 */
421	BUILD_HANDLER watch watch sti verbose		/* #23 */
422	BUILD_HANDLER mcheck mcheck cli verbose		/* #24 */
423	BUILD_HANDLER mt mt sti silent			/* #25 */
424	BUILD_HANDLER dsp dsp sti silent		/* #26 */
425	BUILD_HANDLER reserved reserved sti verbose	/* others */
426
427	.align	5
428	LEAF(handle_ri_rdhwr_vivt)
429#ifdef CONFIG_MIPS_MT_SMTC
430	PANIC_PIC("handle_ri_rdhwr_vivt called")
431#else
432	.set	push
433	.set	noat
434	.set	noreorder
435	/* check if TLB contains a entry for EPC */
436	MFC0	k1, CP0_ENTRYHI
437	andi	k1, 0xff	/* ASID_MASK */
438	MFC0	k0, CP0_EPC
439	PTR_SRL	k0, PAGE_SHIFT + 1
440	PTR_SLL	k0, PAGE_SHIFT + 1
441	or	k1, k0
442	MTC0	k1, CP0_ENTRYHI
443	mtc0_tlbw_hazard
444	tlbp
445	tlb_probe_hazard
446	mfc0	k1, CP0_INDEX
447	.set	pop
448	bltz	k1, handle_ri	/* slow path */
449	/* fall thru */
450#endif
451	END(handle_ri_rdhwr_vivt)
452
453	LEAF(handle_ri_rdhwr)
454	.set	push
455	.set	noat
456	.set	noreorder
457	/* 0x7c03e83b: rdhwr v1,$29 */
458	MFC0	k1, CP0_EPC
459	lui	k0, 0x7c03
460	lw	k1, (k1)
461	ori	k0, 0xe83b
462	.set	reorder
463	bne	k0, k1, handle_ri	/* if not ours */
464	/* The insn is rdhwr.  No need to check CAUSE.BD here. */
465	get_saved_sp	/* k1 := current_thread_info */
466	.set	noreorder
467	MFC0	k0, CP0_EPC
468#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
469	ori	k1, _THREAD_MASK
470	xori	k1, _THREAD_MASK
471	LONG_L	v1, TI_TP_VALUE(k1)
472	LONG_ADDIU	k0, 4
473	jr	k0
474	 rfe
475#else
476	LONG_ADDIU	k0, 4		/* stall on $k0 */
477	MTC0	k0, CP0_EPC
478	/* I hope three instructions between MTC0 and ERET are enough... */
479	ori	k1, _THREAD_MASK
480	xori	k1, _THREAD_MASK
481	LONG_L	v1, TI_TP_VALUE(k1)
482	.set	mips3
483	eret
484	.set	mips0
485#endif
486	.set	pop
487	END(handle_ri_rdhwr)
488
489#ifdef CONFIG_64BIT
490/* A temporary overflow handler used by check_daddi(). */
491
492	__INIT
493
494	BUILD_HANDLER  daddi_ov daddi_ov none silent	/* #12 */
495#endif
496