1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11#ifndef _ASM_IRQFLAGS_H
12#define _ASM_IRQFLAGS_H
13
14#ifndef __ASSEMBLY__
15
16#include <linux/compiler.h>
17#include <asm/hazards.h>
18
19__asm__ (
20	"	.macro	raw_local_irq_enable				\n"
21	"	.set	push						\n"
22	"	.set	reorder						\n"
23	"	.set	noat						\n"
24#ifdef CONFIG_MIPS_MT_SMTC
25	"	mfc0	$1, $2, 1	# SMTC - clear TCStatus.IXMT	\n"
26	"	ori	$1, 0x400					\n"
27	"	xori	$1, 0x400					\n"
28	"	mtc0	$1, $2, 1					\n"
29#elif defined(CONFIG_CPU_MIPSR2)
30	"	ei							\n"
31#else
32	"	mfc0	$1,$12						\n"
33	"	ori	$1,0x1f						\n"
34	"	xori	$1,0x1e						\n"
35	"	mtc0	$1,$12						\n"
36#endif
37	"	irq_enable_hazard					\n"
38	"	.set	pop						\n"
39	"	.endm");
40
41static inline void raw_local_irq_enable(void)
42{
43	__asm__ __volatile__(
44		"raw_local_irq_enable"
45		: /* no outputs */
46		: /* no inputs */
47		: "memory");
48}
49
50/*
51 * For cli() we have to insert nops to make sure that the new value
52 * has actually arrived in the status register before the end of this
53 * macro.
54 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
55 * no nops at all.
56 */
57__asm__ (
58	"	.macro	raw_local_irq_disable\n"
59	"	.set	push						\n"
60	"	.set	noat						\n"
61#ifdef CONFIG_MIPS_MT_SMTC
62	"	mfc0	$1, $2, 1					\n"
63	"	ori	$1, 0x400					\n"
64	"	.set	noreorder					\n"
65	"	mtc0	$1, $2, 1					\n"
66#elif defined(CONFIG_CPU_MIPSR2)
67	"	di							\n"
68#else
69	"	mfc0	$1,$12						\n"
70	"	ori	$1,0x1f						\n"
71	"	xori	$1,0x1f						\n"
72	"	.set	noreorder					\n"
73	"	mtc0	$1,$12						\n"
74#endif
75	"	irq_disable_hazard					\n"
76	"	.set	pop						\n"
77	"	.endm							\n");
78
79static inline void raw_local_irq_disable(void)
80{
81	__asm__ __volatile__(
82		"raw_local_irq_disable"
83		: /* no outputs */
84		: /* no inputs */
85		: "memory");
86}
87
88__asm__ (
89	"	.macro	raw_local_save_flags flags			\n"
90	"	.set	push						\n"
91	"	.set	reorder						\n"
92#ifdef CONFIG_MIPS_MT_SMTC
93	"	mfc0	\\flags, $2, 1					\n"
94#else
95	"	mfc0	\\flags, $12					\n"
96#endif
97	"	.set	pop						\n"
98	"	.endm							\n");
99
100#define raw_local_save_flags(x)						\
101__asm__ __volatile__(							\
102	"raw_local_save_flags %0"					\
103	: "=r" (x))
104
105__asm__ (
106	"	.macro	raw_local_irq_save result			\n"
107	"	.set	push						\n"
108	"	.set	reorder						\n"
109	"	.set	noat						\n"
110#ifdef CONFIG_MIPS_MT_SMTC
111	"	mfc0	\\result, $2, 1					\n"
112	"	ori	$1, \\result, 0x400				\n"
113	"	.set	noreorder					\n"
114	"	mtc0	$1, $2, 1					\n"
115	"	andi	\\result, \\result, 0x400			\n"
116#elif defined(CONFIG_CPU_MIPSR2)
117	"	di	\\result					\n"
118	"	andi	\\result, 1					\n"
119#else
120	"	mfc0	\\result, $12					\n"
121	"	ori	$1, \\result, 0x1f				\n"
122	"	xori	$1, 0x1f					\n"
123	"	.set	noreorder					\n"
124	"	mtc0	$1, $12						\n"
125#endif
126	"	irq_disable_hazard					\n"
127	"	.set	pop						\n"
128	"	.endm							\n");
129
130#define raw_local_irq_save(x)						\
131__asm__ __volatile__(							\
132	"raw_local_irq_save\t%0"					\
133	: "=r" (x)							\
134	: /* no inputs */						\
135	: "memory")
136
137__asm__ (
138	"	.macro	raw_local_irq_restore flags			\n"
139	"	.set	push						\n"
140	"	.set	noreorder					\n"
141	"	.set	noat						\n"
142#ifdef CONFIG_MIPS_MT_SMTC
143	"mfc0	$1, $2, 1						\n"
144	"andi	\\flags, 0x400						\n"
145	"ori	$1, 0x400						\n"
146	"xori	$1, 0x400						\n"
147	"or	\\flags, $1						\n"
148	"mtc0	\\flags, $2, 1						\n"
149#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
150	/*
151	 * Slow, but doesn't suffer from a relativly unlikely race
152	 * condition we're having since days 1.
153	 */
154	"	beqz	\\flags, 1f					\n"
155	"	 di							\n"
156	"	ei							\n"
157	"1:								\n"
158#elif defined(CONFIG_CPU_MIPSR2)
159	/*
160	 * Fast, dangerous.  Life is fun, life is good.
161	 */
162	"	mfc0	$1, $12						\n"
163	"	ins	$1, \\flags, 0, 1				\n"
164	"	mtc0	$1, $12						\n"
165#else
166	"	mfc0	$1, $12						\n"
167	"	andi	\\flags, 1					\n"
168	"	ori	$1, 0x1f					\n"
169	"	xori	$1, 0x1f					\n"
170	"	or	\\flags, $1					\n"
171	"	mtc0	\\flags, $12					\n"
172#endif
173	"	irq_disable_hazard					\n"
174	"	.set	pop						\n"
175	"	.endm							\n");
176
177extern void smtc_ipi_replay(void);
178
179static inline void raw_local_irq_restore(unsigned long flags)
180{
181	unsigned long __tmp1;
182
183#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
184	/*
185	 * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred
186	 * IPIs, at the cost of branch and call overhead on each
187	 * local_irq_restore()
188	 */
189	if (unlikely(!(flags & 0x0400)))
190		smtc_ipi_replay();
191#endif
192
193	__asm__ __volatile__(
194		"raw_local_irq_restore\t%0"
195		: "=r" (__tmp1)
196		: "0" (flags)
197		: "memory");
198}
199
200static inline int raw_irqs_disabled_flags(unsigned long flags)
201{
202#ifdef CONFIG_MIPS_MT_SMTC
203	/*
204	 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
205	 */
206	return flags & 0x400;
207#else
208	return !(flags & 1);
209#endif
210}
211
212#endif
213
214/*
215 * Do the CPU's IRQ-state tracing from assembly code.
216 */
217#ifdef CONFIG_TRACE_IRQFLAGS
218/* Reload some registers clobbered by trace_hardirqs_on */
219#ifdef CONFIG_64BIT
220# define TRACE_IRQS_RELOAD_REGS						\
221	LONG_L	$11, PT_R11(sp);					\
222	LONG_L	$10, PT_R10(sp);					\
223	LONG_L	$9, PT_R9(sp);						\
224	LONG_L	$8, PT_R8(sp);						\
225	LONG_L	$7, PT_R7(sp);						\
226	LONG_L	$6, PT_R6(sp);						\
227	LONG_L	$5, PT_R5(sp);						\
228	LONG_L	$4, PT_R4(sp);						\
229	LONG_L	$2, PT_R2(sp)
230#else
231# define TRACE_IRQS_RELOAD_REGS						\
232	LONG_L	$7, PT_R7(sp);						\
233	LONG_L	$6, PT_R6(sp);						\
234	LONG_L	$5, PT_R5(sp);						\
235	LONG_L	$4, PT_R4(sp);						\
236	LONG_L	$2, PT_R2(sp)
237#endif
238# define TRACE_IRQS_ON							\
239	CLI;	/* make sure trace_hardirqs_on() is called in kernel level */ \
240	jal	trace_hardirqs_on
241# define TRACE_IRQS_ON_RELOAD						\
242	TRACE_IRQS_ON;							\
243	TRACE_IRQS_RELOAD_REGS
244# define TRACE_IRQS_OFF							\
245	jal	trace_hardirqs_off
246#else
247# define TRACE_IRQS_ON
248# define TRACE_IRQS_ON_RELOAD
249# define TRACE_IRQS_OFF
250#endif
251
252#endif /* _ASM_IRQFLAGS_H */
253