1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
7 * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
8 * Copyright (C) 1999 Silicon Graphics, Inc.
9 */
10#ifndef _ASM_STACKFRAME_H
11#define _ASM_STACKFRAME_H
12
13#include <linux/threads.h>
14
15#include <asm/asm.h>
16#include <asm/asmmacro.h>
17#include <asm/mipsregs.h>
18#include <asm/asm-offsets.h>
19
20/*
21 * For SMTC kernel, global IE should be left set, and interrupts
22 * controlled exclusively via IXMT.
23 */
24#ifdef CONFIG_MIPS_MT_SMTC
25#define STATMASK 0x1e
26#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
27#define STATMASK 0x3f
28#else
29#define STATMASK 0x1f
30#endif
31
32#ifdef CONFIG_MIPS_MT_SMTC
33#include <asm/mipsmtregs.h>
34#endif /* CONFIG_MIPS_MT_SMTC */
35
36		.macro	SAVE_AT
37		.set	push
38		.set	noat
39		LONG_S	$1, PT_R1(sp)
40		.set	pop
41		.endm
42
43		.macro	SAVE_TEMP
44#ifdef CONFIG_CPU_HAS_SMARTMIPS
45		mflhxu	v1
46		LONG_S	v1, PT_LO(sp)
47		mflhxu	v1
48		LONG_S	v1, PT_HI(sp)
49		mflhxu	v1
50		LONG_S	v1, PT_ACX(sp)
51#else
52		mfhi	v1
53		LONG_S	v1, PT_HI(sp)
54		mflo	v1
55		LONG_S	v1, PT_LO(sp)
56#endif
57#ifdef CONFIG_32BIT
58		LONG_S	$8, PT_R8(sp)
59		LONG_S	$9, PT_R9(sp)
60#endif
61		LONG_S	$10, PT_R10(sp)
62		LONG_S	$11, PT_R11(sp)
63		LONG_S	$12, PT_R12(sp)
64		LONG_S	$13, PT_R13(sp)
65		LONG_S	$14, PT_R14(sp)
66		LONG_S	$15, PT_R15(sp)
67		LONG_S	$24, PT_R24(sp)
68		.endm
69
70		.macro	SAVE_STATIC
71		LONG_S	$16, PT_R16(sp)
72		LONG_S	$17, PT_R17(sp)
73		LONG_S	$18, PT_R18(sp)
74		LONG_S	$19, PT_R19(sp)
75		LONG_S	$20, PT_R20(sp)
76		LONG_S	$21, PT_R21(sp)
77		LONG_S	$22, PT_R22(sp)
78		LONG_S	$23, PT_R23(sp)
79		LONG_S	$30, PT_R30(sp)
80		.endm
81
82#ifdef CONFIG_SMP
83#ifdef CONFIG_MIPS_MT_SMTC
84#define PTEBASE_SHIFT	19	/* TCBIND */
85#else
86#define PTEBASE_SHIFT	23	/* CONTEXT */
87#endif
88		.macro	get_saved_sp	/* SMP variation */
89#ifdef CONFIG_MIPS_MT_SMTC
90		mfc0	k0, CP0_TCBIND
91#else
92		MFC0	k0, CP0_CONTEXT
93#endif
94#if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4)
95		lui	k1, %highest(kernelsp)
96		daddiu	k1, %higher(kernelsp)
97		dsll	k1, 16
98		daddiu	k1, %hi(kernelsp)
99		dsll	k1, 16
100#else
101		lui	k1, %hi(kernelsp)
102#endif
103		LONG_SRL	k0, PTEBASE_SHIFT
104		LONG_ADDU	k1, k0
105		LONG_L	k1, %lo(kernelsp)(k1)
106		.endm
107
108		.macro	set_saved_sp stackp temp temp2
109#ifdef CONFIG_MIPS_MT_SMTC
110		mfc0	\temp, CP0_TCBIND
111#else
112		MFC0	\temp, CP0_CONTEXT
113#endif
114		LONG_SRL	\temp, PTEBASE_SHIFT
115		LONG_S	\stackp, kernelsp(\temp)
116		.endm
117#else
118		.macro	get_saved_sp	/* Uniprocessor variation */
119#if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4)
120		lui	k1, %highest(kernelsp)
121		daddiu	k1, %higher(kernelsp)
122		dsll	k1, k1, 16
123		daddiu	k1, %hi(kernelsp)
124		dsll	k1, k1, 16
125#else
126		lui	k1, %hi(kernelsp)
127#endif
128		LONG_L	k1, %lo(kernelsp)(k1)
129		.endm
130
131		.macro	set_saved_sp stackp temp temp2
132		LONG_S	\stackp, kernelsp
133		.endm
134#endif
135
136		.macro	SAVE_SOME
137		.set	push
138		.set	noat
139		.set	reorder
140		mfc0	k0, CP0_STATUS
141		sll	k0, 3		/* extract cu0 bit */
142		.set	noreorder
143		bltz	k0, 8f
144		 move	k1, sp
145		.set	reorder
146		/* Called from user mode, new stack. */
147		get_saved_sp
1488:		move	k0, sp
149		PTR_SUBU sp, k1, PT_SIZE
150		LONG_S	k0, PT_R29(sp)
151		LONG_S	$3, PT_R3(sp)
152		/*
153		 * You might think that you don't need to save $0,
154		 * but the FPU emulator and gdb remote debug stub
155		 * need it to operate correctly
156		 */
157		LONG_S	$0, PT_R0(sp)
158		mfc0	v1, CP0_STATUS
159		LONG_S	$2, PT_R2(sp)
160		LONG_S	v1, PT_STATUS(sp)
161#ifdef CONFIG_MIPS_MT_SMTC
162		/*
163		 * Ideally, these instructions would be shuffled in
164		 * to cover the pipeline delay.
165		 */
166		.set	mips32
167		mfc0	v1, CP0_TCSTATUS
168		.set	mips0
169		LONG_S	v1, PT_TCSTATUS(sp)
170#endif /* CONFIG_MIPS_MT_SMTC */
171		LONG_S	$4, PT_R4(sp)
172		mfc0	v1, CP0_CAUSE
173		LONG_S	$5, PT_R5(sp)
174		LONG_S	v1, PT_CAUSE(sp)
175		LONG_S	$6, PT_R6(sp)
176		MFC0	v1, CP0_EPC
177		LONG_S	$7, PT_R7(sp)
178#ifdef CONFIG_64BIT
179		LONG_S	$8, PT_R8(sp)
180		LONG_S	$9, PT_R9(sp)
181#endif
182		LONG_S	v1, PT_EPC(sp)
183		LONG_S	$25, PT_R25(sp)
184		LONG_S	$28, PT_R28(sp)
185		LONG_S	$31, PT_R31(sp)
186		ori	$28, sp, _THREAD_MASK
187		xori	$28, _THREAD_MASK
188		.set	pop
189		.endm
190
191		.macro	SAVE_ALL
192		SAVE_SOME
193		SAVE_AT
194		SAVE_TEMP
195		SAVE_STATIC
196		.endm
197
198		.macro	RESTORE_AT
199		.set	push
200		.set	noat
201		LONG_L	$1,  PT_R1(sp)
202		.set	pop
203		.endm
204
205		.macro	RESTORE_TEMP
206#ifdef CONFIG_CPU_HAS_SMARTMIPS
207		LONG_L	$24, PT_ACX(sp)
208		mtlhx	$24
209		LONG_L	$24, PT_HI(sp)
210		mtlhx	$24
211		LONG_L	$24, PT_LO(sp)
212		mtlhx	$24
213#else
214		LONG_L	$24, PT_LO(sp)
215		mtlo	$24
216		LONG_L	$24, PT_HI(sp)
217		mthi	$24
218#endif
219#ifdef CONFIG_32BIT
220		LONG_L	$8, PT_R8(sp)
221		LONG_L	$9, PT_R9(sp)
222#endif
223		LONG_L	$10, PT_R10(sp)
224		LONG_L	$11, PT_R11(sp)
225		LONG_L	$12, PT_R12(sp)
226		LONG_L	$13, PT_R13(sp)
227		LONG_L	$14, PT_R14(sp)
228		LONG_L	$15, PT_R15(sp)
229		LONG_L	$24, PT_R24(sp)
230		.endm
231
232		.macro	RESTORE_STATIC
233		LONG_L	$16, PT_R16(sp)
234		LONG_L	$17, PT_R17(sp)
235		LONG_L	$18, PT_R18(sp)
236		LONG_L	$19, PT_R19(sp)
237		LONG_L	$20, PT_R20(sp)
238		LONG_L	$21, PT_R21(sp)
239		LONG_L	$22, PT_R22(sp)
240		LONG_L	$23, PT_R23(sp)
241		LONG_L	$30, PT_R30(sp)
242		.endm
243
244#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
245
246		.macro	RESTORE_SOME
247		.set	push
248		.set	reorder
249		.set	noat
250		mfc0	a0, CP0_STATUS
251		li	v1, 0xff00
252		ori	a0, STATMASK
253		xori	a0, STATMASK
254		mtc0	a0, CP0_STATUS
255		and	a0, v1
256		LONG_L	v0, PT_STATUS(sp)
257		nor	v1, $0, v1
258		and	v0, v1
259		or	v0, a0
260		mtc0	v0, CP0_STATUS
261		LONG_L	$31, PT_R31(sp)
262		LONG_L	$28, PT_R28(sp)
263		LONG_L	$25, PT_R25(sp)
264		LONG_L	$7,  PT_R7(sp)
265		LONG_L	$6,  PT_R6(sp)
266		LONG_L	$5,  PT_R5(sp)
267		LONG_L	$4,  PT_R4(sp)
268		LONG_L	$3,  PT_R3(sp)
269		LONG_L	$2,  PT_R2(sp)
270		.set	pop
271		.endm
272
273		.macro	RESTORE_SP_AND_RET
274		.set	push
275		.set	noreorder
276		LONG_L	k0, PT_EPC(sp)
277		LONG_L	sp, PT_R29(sp)
278		jr	k0
279		 rfe
280		.set	pop
281		.endm
282
283#else
284		.macro	RESTORE_SOME
285		.set	push
286		.set	reorder
287		.set	noat
288#ifdef CONFIG_MIPS_MT_SMTC
289		.set	mips32r2
290		/*
291		 * This may not really be necessary if ints are already
292		 * inhibited here.
293		 */
294		mfc0	v0, CP0_TCSTATUS
295		ori	v0, TCSTATUS_IXMT
296		mtc0	v0, CP0_TCSTATUS
297		_ehb
298		DMT	5				# dmt a1
299		jal	mips_ihb
300#endif /* CONFIG_MIPS_MT_SMTC */
301		mfc0	a0, CP0_STATUS
302		ori	a0, STATMASK
303		xori	a0, STATMASK
304		mtc0	a0, CP0_STATUS
305		li	v1, 0xff00
306		and	a0, v1
307		LONG_L	v0, PT_STATUS(sp)
308		nor	v1, $0, v1
309		and	v0, v1
310		or	v0, a0
311		mtc0	v0, CP0_STATUS
312#ifdef CONFIG_MIPS_MT_SMTC
313/*
314 * Only after EXL/ERL have been restored to status can we
315 * restore TCStatus.IXMT.
316 */
317		LONG_L	v1, PT_TCSTATUS(sp)
318		_ehb
319		mfc0	v0, CP0_TCSTATUS
320		andi	v1, TCSTATUS_IXMT
321		/* We know that TCStatua.IXMT should be set from above */
322		xori	v0, v0, TCSTATUS_IXMT
323		or	v0, v0, v1
324		mtc0	v0, CP0_TCSTATUS
325		_ehb
326		andi	a1, a1, VPECONTROL_TE
327		beqz	a1, 1f
328		emt
3291:
330		.set	mips0
331#endif /* CONFIG_MIPS_MT_SMTC */
332		LONG_L	v1, PT_EPC(sp)
333		MTC0	v1, CP0_EPC
334		LONG_L	$31, PT_R31(sp)
335		LONG_L	$28, PT_R28(sp)
336		LONG_L	$25, PT_R25(sp)
337#ifdef CONFIG_64BIT
338		LONG_L	$8, PT_R8(sp)
339		LONG_L	$9, PT_R9(sp)
340#endif
341		LONG_L	$7,  PT_R7(sp)
342		LONG_L	$6,  PT_R6(sp)
343		LONG_L	$5,  PT_R5(sp)
344		LONG_L	$4,  PT_R4(sp)
345		LONG_L	$3,  PT_R3(sp)
346		LONG_L	$2,  PT_R2(sp)
347		.set	pop
348		.endm
349
350		.macro	RESTORE_SP_AND_RET
351		LONG_L	sp, PT_R29(sp)
352		.set	mips3
353#ifdef CONFIG_BCM47XX
354		nop
355		nop
356#endif /* CONFIG_BCM47XX */
357		eret
358		.set	mips0
359		.endm
360
361#endif
362
363		.macro	RESTORE_SP
364		LONG_L	sp, PT_R29(sp)
365		.endm
366
367		.macro	RESTORE_ALL
368		RESTORE_TEMP
369		RESTORE_STATIC
370		RESTORE_AT
371		RESTORE_SOME
372		RESTORE_SP
373		.endm
374
375		.macro	RESTORE_ALL_AND_RET
376		RESTORE_TEMP
377		RESTORE_STATIC
378		RESTORE_AT
379		RESTORE_SOME
380		RESTORE_SP_AND_RET
381		.endm
382
383/*
384 * Move to kernel mode and disable interrupts.
385 * Set cp0 enable bit as sign that we're running on the kernel stack
386 */
387		.macro	CLI
388#if !defined(CONFIG_MIPS_MT_SMTC)
389		mfc0	t0, CP0_STATUS
390		li	t1, ST0_CU0 | STATMASK
391		or	t0, t1
392		xori	t0, STATMASK
393		mtc0	t0, CP0_STATUS
394#else /* CONFIG_MIPS_MT_SMTC */
395		/*
396		 * For SMTC, we need to set privilege
397		 * and disable interrupts only for the
398		 * current TC, using the TCStatus register.
399		 */
400		mfc0	t0,CP0_TCSTATUS
401		/* Fortunately CU 0 is in the same place in both registers */
402		/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
403		li	t1, ST0_CU0 | 0x08001c00
404		or	t0,t1
405		/* Clear TKSU, leave IXMT */
406		xori	t0, 0x00001800
407		mtc0	t0, CP0_TCSTATUS
408		_ehb
409		/* We need to leave the global IE bit set, but clear EXL...*/
410		mfc0	t0, CP0_STATUS
411		ori	t0, ST0_EXL | ST0_ERL
412		xori	t0, ST0_EXL | ST0_ERL
413		mtc0	t0, CP0_STATUS
414#endif /* CONFIG_MIPS_MT_SMTC */
415		irq_disable_hazard
416		.endm
417
418/*
419 * Move to kernel mode and enable interrupts.
420 * Set cp0 enable bit as sign that we're running on the kernel stack
421 */
422		.macro	STI
423#if !defined(CONFIG_MIPS_MT_SMTC)
424		mfc0	t0, CP0_STATUS
425		li	t1, ST0_CU0 | STATMASK
426		or	t0, t1
427		xori	t0, STATMASK & ~1
428		mtc0	t0, CP0_STATUS
429#else /* CONFIG_MIPS_MT_SMTC */
430		/*
431		 * For SMTC, we need to set privilege
432		 * and enable interrupts only for the
433		 * current TC, using the TCStatus register.
434		 */
435		_ehb
436		mfc0	t0,CP0_TCSTATUS
437		/* Fortunately CU 0 is in the same place in both registers */
438		/* Set TCU0, TKSU (for later inversion) and IXMT */
439		li	t1, ST0_CU0 | 0x08001c00
440		or	t0,t1
441		/* Clear TKSU *and* IXMT */
442		xori	t0, 0x00001c00
443		mtc0	t0, CP0_TCSTATUS
444		_ehb
445		/* We need to leave the global IE bit set, but clear EXL...*/
446		mfc0	t0, CP0_STATUS
447		ori	t0, ST0_EXL
448		xori	t0, ST0_EXL
449		mtc0	t0, CP0_STATUS
450		/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
451#endif /* CONFIG_MIPS_MT_SMTC */
452		irq_enable_hazard
453		.endm
454
455/*
456 * Just move to kernel mode and leave interrupts as they are.  Note
457 * for the R3000 this means copying the previous enable from IEp.
458 * Set cp0 enable bit as sign that we're running on the kernel stack
459 */
460		.macro	KMODE
461#ifdef CONFIG_MIPS_MT_SMTC
462		/*
463		 * This gets baroque in SMTC.  We want to
464		 * protect the non-atomic clearing of EXL
465		 * with DMT/EMT, but we don't want to take
466		 * an interrupt while DMT is still in effect.
467		 */
468
469		/* KMODE gets invoked from both reorder and noreorder code */
470		.set	push
471		.set	mips32r2
472		.set	noreorder
473		mfc0	v0, CP0_TCSTATUS
474		andi	v1, v0, TCSTATUS_IXMT
475		ori	v0, TCSTATUS_IXMT
476		mtc0	v0, CP0_TCSTATUS
477		_ehb
478		DMT	2				# dmt	v0
479		/*
480		 * We don't know a priori if ra is "live"
481		 */
482		move	t0, ra
483		jal	mips_ihb
484		nop	/* delay slot */
485		move	ra, t0
486#endif /* CONFIG_MIPS_MT_SMTC */
487		mfc0	t0, CP0_STATUS
488		li	t1, ST0_CU0 | (STATMASK & ~1)
489#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
490		andi	t2, t0, ST0_IEP
491		srl	t2, 2
492		or	t0, t2
493#endif
494		or	t0, t1
495		xori	t0, STATMASK & ~1
496		mtc0	t0, CP0_STATUS
497#ifdef CONFIG_MIPS_MT_SMTC
498		_ehb
499		andi	v0, v0, VPECONTROL_TE
500		beqz	v0, 2f
501		nop	/* delay slot */
502		emt
5032:
504		mfc0	v0, CP0_TCSTATUS
505		/* Clear IXMT, then OR in previous value */
506		ori	v0, TCSTATUS_IXMT
507		xori	v0, TCSTATUS_IXMT
508		or	v0, v1, v0
509		mtc0	v0, CP0_TCSTATUS
510		/*
511		 * irq_disable_hazard below should expand to EHB
512		 * on 24K/34K CPUS
513		 */
514		.set pop
515#endif /* CONFIG_MIPS_MT_SMTC */
516		irq_disable_hazard
517		.endm
518
519#endif /* _ASM_STACKFRAME_H */
520