• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6.36/arch/m68k/include/asm/
1#ifndef _M68K_SYSTEM_H
2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <asm/segment.h>
7#include <asm/entry.h>
8
9#ifdef __KERNEL__
10
11/*
12 * switch_to(n) should switch tasks to task ptr, first checking that
13 * ptr isn't the current task, in which case it does nothing.  This
14 * also clears the TS-flag if the task we switched to has used the
15 * math co-processor latest.
16 */
17/*
18 * switch_to() saves the extra registers, that are not saved
19 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
20 * a0-a1. Some of these are used by schedule() and its predecessors
21 * and so we might get see unexpected behaviors when a task returns
22 * with unexpected register values.
23 *
24 * syscall stores these registers itself and none of them are used
25 * by syscall after the function in the syscall has been called.
26 *
27 * Beware that resume now expects *next to be in d1 and the offset of
28 * tss to be in a1. This saves a few instructions as we no longer have
29 * to push them onto the stack and read them back right after.
30 *
31 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
32 *
33 * Changed 96/09/19 by Andreas Schwab
34 * pass prev in a0, next in a1
35 */
36asmlinkage void resume(void);
37#define switch_to(prev,next,last) do { \
38  register void *_prev __asm__ ("a0") = (prev); \
39  register void *_next __asm__ ("a1") = (next); \
40  register void *_last __asm__ ("d1"); \
41  __asm__ __volatile__("jbsr resume" \
42		       : "=a" (_prev), "=a" (_next), "=d" (_last) \
43		       : "0" (_prev), "1" (_next) \
44		       : "d0", "d2", "d3", "d4", "d5"); \
45  (last) = _last; \
46} while (0)
47
48
49/*
50 * Force strict CPU ordering.
51 * Not really required on m68k...
52 */
53#define nop()		do { asm volatile ("nop"); barrier(); } while (0)
54#define mb()		barrier()
55#define rmb()		barrier()
56#define wmb()		barrier()
57#define read_barrier_depends()	((void)0)
58#define set_mb(var, value)	({ (var) = (value); wmb(); })
59
60#define smp_mb()	barrier()
61#define smp_rmb()	barrier()
62#define smp_wmb()	barrier()
63#define smp_read_barrier_depends()	((void)0)
64
65/* interrupt control.. */
66#include <linux/hardirq.h>
67#define local_irq_enable() ({							\
68	if (MACH_IS_Q40 || !hardirq_count())					\
69		asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory");	\
70})
71#define local_irq_disable() asm volatile ("oriw  #0x0700,%%sr": : : "memory")
72#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
73#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
74
75static inline int irqs_disabled(void)
76{
77	unsigned long flags;
78	local_save_flags(flags);
79	return flags & ~ALLOWINT;
80}
81
82/* For spinlocks etc */
83#define local_irq_save(x)	({ local_save_flags(x); local_irq_disable(); })
84
85#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
86
87struct __xchg_dummy { unsigned long a[100]; };
88#define __xg(x) ((volatile struct __xchg_dummy *)(x))
89
90#ifndef CONFIG_RMW_INSNS
91static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
92{
93	unsigned long flags, tmp;
94
95	local_irq_save(flags);
96
97	switch (size) {
98	case 1:
99		tmp = *(u8 *)ptr;
100		*(u8 *)ptr = x;
101		x = tmp;
102		break;
103	case 2:
104		tmp = *(u16 *)ptr;
105		*(u16 *)ptr = x;
106		x = tmp;
107		break;
108	case 4:
109		tmp = *(u32 *)ptr;
110		*(u32 *)ptr = x;
111		x = tmp;
112		break;
113	default:
114		BUG();
115	}
116
117	local_irq_restore(flags);
118	return x;
119}
120#else
121static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
122{
123	switch (size) {
124	    case 1:
125		__asm__ __volatile__
126			("moveb %2,%0\n\t"
127			 "1:\n\t"
128			 "casb %0,%1,%2\n\t"
129			 "jne 1b"
130			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
131		break;
132	    case 2:
133		__asm__ __volatile__
134			("movew %2,%0\n\t"
135			 "1:\n\t"
136			 "casw %0,%1,%2\n\t"
137			 "jne 1b"
138			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
139		break;
140	    case 4:
141		__asm__ __volatile__
142			("movel %2,%0\n\t"
143			 "1:\n\t"
144			 "casl %0,%1,%2\n\t"
145			 "jne 1b"
146			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
147		break;
148	}
149	return x;
150}
151#endif
152
153#include <asm-generic/cmpxchg-local.h>
154
155#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
156
157/*
158 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
159 * store NEW in MEM.  Return the initial value in MEM.  Success is
160 * indicated by comparing RETURN with OLD.
161 */
162#ifdef CONFIG_RMW_INSNS
163#define __HAVE_ARCH_CMPXCHG	1
164
165static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
166				      unsigned long new, int size)
167{
168	switch (size) {
169	case 1:
170		__asm__ __volatile__ ("casb %0,%2,%1"
171				      : "=d" (old), "=m" (*(char *)p)
172				      : "d" (new), "0" (old), "m" (*(char *)p));
173		break;
174	case 2:
175		__asm__ __volatile__ ("casw %0,%2,%1"
176				      : "=d" (old), "=m" (*(short *)p)
177				      : "d" (new), "0" (old), "m" (*(short *)p));
178		break;
179	case 4:
180		__asm__ __volatile__ ("casl %0,%2,%1"
181				      : "=d" (old), "=m" (*(int *)p)
182				      : "d" (new), "0" (old), "m" (*(int *)p));
183		break;
184	}
185	return old;
186}
187
188#define cmpxchg(ptr, o, n)						    \
189	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	    \
190			(unsigned long)(n), sizeof(*(ptr))))
191#define cmpxchg_local(ptr, o, n)					    \
192	((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o),	    \
193			(unsigned long)(n), sizeof(*(ptr))))
194#else
195
196/*
197 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
198 * them available.
199 */
200#define cmpxchg_local(ptr, o, n)				  	       \
201	((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
202			(unsigned long)(n), sizeof(*(ptr))))
203
204#ifndef CONFIG_SMP
205#include <asm-generic/cmpxchg.h>
206#endif
207
208#endif
209
210#define arch_align_stack(x) (x)
211
212#endif /* __KERNEL__ */
213
214#endif /* _M68K_SYSTEM_H */
215