1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994 - 1999 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1994 - 1999 by Ralf Baechle
9 *
10 * Changed set_except_vector declaration to allow return of previous
11 * vector address value - necessary for "borrowing" vectors.
12 *
13 * Kevin D. Kissell, kevink@mips.org and Carsten Langgaard, carstenl@mips.com
14 * Copyright (C) 2000 MIPS Technologies, Inc.
15 */
16#ifndef _ASM_SYSTEM_H
17#define _ASM_SYSTEM_H
18
19#ifdef __KERNEL__
20
21#include <linux/config.h>
22#include <asm/sgidefs.h>
23
24#include <linux/kernel.h>
25
26#include <asm/addrspace.h>
27#include <asm/ptrace.h>
28
29__asm__ (
30	".macro\t__sti\n\t"
31	".set\tpush\n\t"
32	".set\treorder\n\t"
33	".set\tnoat\n\t"
34	"mfc0\t$1,$12\n\t"
35	"ori\t$1,0x1f\n\t"
36	"xori\t$1,0x1e\n\t"
37	"mtc0\t$1,$12\n\t"
38	".set\tpop\n\t"
39	".endm");
40
41extern __inline__ void
42__sti(void)
43{
44	__asm__ __volatile__(
45		"__sti"
46		: /* no outputs */
47		: /* no inputs */
48		: "memory");
49}
50
51/*
52 * For cli() we have to insert nops to make sure that the new value
53 * has actually arrived in the status register before the end of this
54 * macro.
55 * R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
56 * no nops at all.
57 */
58__asm__ (
59	".macro\t__cli\n\t"
60	".set\tpush\n\t"
61	".set\tnoat\n\t"
62	"mfc0\t$1,$12\n\t"
63	"ori\t$1,1\n\t"
64	"xori\t$1,1\n\t"
65	".set\tnoreorder\n\t"
66	"mtc0\t$1,$12\n\t"
67	"sll\t$0, $0, 1\t\t\t# nop\n\t"
68	"sll\t$0, $0, 1\t\t\t# nop\n\t"
69	"sll\t$0, $0, 1\t\t\t# nop\n\t"
70	".set\tpop\n\t"
71	".endm");
72
73extern __inline__ void
74__cli(void)
75{
76	__asm__ __volatile__(
77		"__cli"
78		: /* no outputs */
79		: /* no inputs */
80		: "memory");
81}
82
83__asm__ (
84	".macro\t__save_flags flags\n\t"
85	".set\tpush\n\t"
86	".set\treorder\n\t"
87	"mfc0\t\\flags, $12\n\t"
88	".set\tpop\n\t"
89	".endm");
90
91#define __save_flags(x)							\
92__asm__ __volatile__(							\
93	"__save_flags %0"						\
94	: "=r" (x))
95
96__asm__ (
97	".macro\t__save_and_cli result\n\t"
98	".set\tpush\n\t"
99	".set\treorder\n\t"
100	".set\tnoat\n\t"
101	"mfc0\t\\result, $12\n\t"
102	"ori\t$1, \\result, 1\n\t"
103	"xori\t$1, 1\n\t"
104	".set\tnoreorder\n\t"
105	"mtc0\t$1, $12\n\t"
106	"sll\t$0, $0, 1\t\t\t# nop\n\t"
107	"sll\t$0, $0, 1\t\t\t# nop\n\t"
108	"sll\t$0, $0, 1\t\t\t# nop\n\t"
109	".set\tpop\n\t"
110	".endm");
111
112#define __save_and_cli(x)						\
113__asm__ __volatile__(							\
114	"__save_and_cli\t%0"						\
115	: "=r" (x)							\
116	: /* no inputs */						\
117	: "memory")
118
119__asm__(".macro\t__restore_flags flags\n\t"
120	".set\tnoreorder\n\t"
121	".set\tnoat\n\t"
122	"mfc0\t$1, $12\n\t"
123	"andi\t\\flags, 1\n\t"
124	"ori\t$1, 1\n\t"
125	"xori\t$1, 1\n\t"
126	"or\t\\flags, $1\n\t"
127	"mtc0\t\\flags, $12\n\t"
128	"sll\t$0, $0, 1\t\t\t# nop\n\t"
129	"sll\t$0, $0, 1\t\t\t# nop\n\t"
130	"sll\t$0, $0, 1\t\t\t# nop\n\t"
131	".set\tat\n\t"
132	".set\treorder\n\t"
133	".endm");
134
135#define __restore_flags(flags)						\
136do {									\
137	unsigned long __tmp1;						\
138									\
139	__asm__ __volatile__(						\
140		"__restore_flags\t%0"					\
141		: "=r" (__tmp1)						\
142		: "0" (flags)						\
143		: "memory");						\
144} while(0)
145
146#ifdef CONFIG_SMP
147
148extern void __global_sti(void);
149extern void __global_cli(void);
150extern unsigned long __global_save_flags(void);
151extern void __global_restore_flags(unsigned long);
152#  define sti() __global_sti()
153#  define cli() __global_cli()
154#  define save_flags(x) do { x = __global_save_flags(); } while (0)
155#  define restore_flags(x) __global_restore_flags(x)
156#  define save_and_cli(x) do { save_flags(x); cli(); } while(0)
157
158#else /* Single processor */
159
160#  define sti() __sti()
161#  define cli() __cli()
162#  define save_flags(x) __save_flags(x)
163#  define save_and_cli(x) __save_and_cli(x)
164#  define restore_flags(x) __restore_flags(x)
165
166#endif /* SMP */
167
168/* For spinlocks etc */
169#define local_irq_save(x)	__save_and_cli(x)
170#define local_irq_restore(x)	__restore_flags(x)
171#define local_irq_disable()	__cli()
172#define local_irq_enable()	__sti()
173
174#ifdef CONFIG_CPU_HAS_SYNC
175#define __sync()				\
176	__asm__ __volatile__(			\
177		".set	push\n\t"		\
178		".set	noreorder\n\t"		\
179		".set	mips2\n\t"		\
180		"sync\n\t"			\
181		".set	pop"			\
182		: /* no output */		\
183		: /* no input */		\
184		: "memory")
185#else
186#define __sync()	do { } while(0)
187#endif
188
189#define __fast_iob()				\
190	__asm__ __volatile__(			\
191		".set	push\n\t"		\
192		".set	noreorder\n\t"		\
193		"lw	$0,%0\n\t"		\
194		"nop\n\t"			\
195		".set	pop"			\
196		: /* no output */		\
197		: "m" (*(int *)KSEG1)		\
198		: "memory")
199
200#define fast_wmb()	__sync()
201#define fast_rmb()	__sync()
202#define fast_mb()	__sync()
203#define fast_iob()				\
204	do {					\
205		__sync();			\
206		__fast_iob();			\
207	} while (0)
208
209#ifdef CONFIG_CPU_HAS_WB
210
211#include <asm/wbflush.h>
212
213#define wmb()		fast_wmb()
214#define rmb()		fast_rmb()
215#define mb()		wbflush();
216#define iob()		wbflush();
217
218#else /* !CONFIG_CPU_HAS_WB */
219
220#define wmb()		fast_wmb()
221#define rmb()		fast_rmb()
222#define mb()		fast_mb()
223#define iob()		fast_iob()
224
225#endif /* !CONFIG_CPU_HAS_WB */
226
227#ifdef CONFIG_SMP
228#define smp_mb()	mb()
229#define smp_rmb()	rmb()
230#define smp_wmb()	wmb()
231#else
232#define smp_mb()	barrier()
233#define smp_rmb()	barrier()
234#define smp_wmb()	barrier()
235#endif
236
237#define set_mb(var, value) \
238do { var = value; mb(); } while (0)
239
240#define set_wmb(var, value) \
241do { var = value; wmb(); } while (0)
242
243#ifndef __ASSEMBLY__
244/*
245 * switch_to(n) should switch tasks to task nr n, first
246 * checking that n isn't the current task, in which case it does nothing.
247 */
248extern asmlinkage void *resume(void *last, void *next);
249#endif /* !__ASSEMBLY__ */
250
251#define prepare_to_switch()	do { } while(0)
252
253struct task_struct;
254
255#define switch_to(prev,next,last) \
256do { \
257	(last) = resume(prev, next); \
258} while(0)
259
260extern __inline__ unsigned long xchg_u32(volatile int * m, unsigned long val)
261{
262#ifdef CONFIG_CPU_HAS_LLSC
263	unsigned long dummy;
264
265	__asm__ __volatile__(
266		".set\tpush\t\t\t\t# xchg_u32\n\t"
267		".set\tnoreorder\n\t"
268		".set\tnomacro\n\t"
269		"ll\t%0, %3\n"
270		"1:\tmove\t%2, %z4\n\t"
271		"sc\t%2, %1\n\t"
272		"beqzl\t%2, 1b\n\t"
273		" ll\t%0, %3\n\t"
274		"sync\n\t"
275		".set\tpop"
276		: "=&r" (val), "=m" (*m), "=&r" (dummy)
277		: "R" (*m), "Jr" (val)
278		: "memory");
279
280	return val;
281#else
282	unsigned long flags, retval;
283
284	local_irq_save(flags);
285	retval = *m;
286	*m = val;
287	local_irq_restore(flags);	/* implies memory barrier  */
288	return retval;
289#endif /* Processor-dependent optimization */
290}
291
292#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
293#define tas(ptr) (xchg((ptr),1))
294
295static __inline__ unsigned long
296__xchg(unsigned long x, volatile void * ptr, int size)
297{
298	switch (size) {
299		case 4:
300			return xchg_u32(ptr, x);
301	}
302	return x;
303}
304
305extern void *set_except_vector(int n, void *addr);
306
307extern void __die(const char *, struct pt_regs *, const char *file,
308	const char *func, unsigned long line) __attribute__((noreturn));
309extern void __die_if_kernel(const char *, struct pt_regs *, const char *file,
310	const char *func, unsigned long line);
311
312#define die(msg, regs)							\
313	__die(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
314#define die_if_kernel(msg, regs)					\
315	__die_if_kernel(msg, regs, __FILE__ ":", __FUNCTION__, __LINE__)
316
317#endif /* __KERNEL__ */
318
319#endif /* _ASM_SYSTEM_H */
320