1#ifndef _M68K_SYSTEM_H
2#define _M68K_SYSTEM_H
3
4#include <linux/linkage.h>
5#include <linux/kernel.h>
6#include <asm/segment.h>
7#include <asm/entry.h>
8
9#ifdef __KERNEL__
10
11/*
12 * switch_to(n) should switch tasks to task ptr, first checking that
13 * ptr isn't the current task, in which case it does nothing.  This
14 * also clears the TS-flag if the task we switched to has used the
15 * math co-processor latest.
16 */
17/*
18 * switch_to() saves the extra registers, that are not saved
19 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and
20 * a0-a1. Some of these are used by schedule() and its predecessors
21 * and so we might get see unexpected behaviors when a task returns
22 * with unexpected register values.
23 *
24 * syscall stores these registers itself and none of them are used
25 * by syscall after the function in the syscall has been called.
26 *
27 * Beware that resume now expects *next to be in d1 and the offset of
28 * tss to be in a1. This saves a few instructions as we no longer have
29 * to push them onto the stack and read them back right after.
30 *
31 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk)
32 *
33 * Changed 96/09/19 by Andreas Schwab
34 * pass prev in a0, next in a1
35 */
36asmlinkage void resume(void);
37#define switch_to(prev,next,last) do { \
38  register void *_prev __asm__ ("a0") = (prev); \
39  register void *_next __asm__ ("a1") = (next); \
40  register void *_last __asm__ ("d1"); \
41  __asm__ __volatile__("jbsr resume" \
42		       : "=a" (_prev), "=a" (_next), "=d" (_last) \
43		       : "0" (_prev), "1" (_next) \
44		       : "d0", "d2", "d3", "d4", "d5"); \
45  (last) = _last; \
46} while (0)
47
48
49/* interrupt control.. */
50#include <linux/hardirq.h>
51#define local_irq_enable() ({							\
52	if (MACH_IS_Q40 || !hardirq_count())					\
53		asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory");	\
54})
55#define local_irq_disable() asm volatile ("oriw  #0x0700,%%sr": : : "memory")
56#define local_save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory")
57#define local_irq_restore(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory")
58
59static inline int irqs_disabled(void)
60{
61	unsigned long flags;
62	local_save_flags(flags);
63	return flags & ~ALLOWINT;
64}
65
66/* For spinlocks etc */
67#define local_irq_save(x)	({ local_save_flags(x); local_irq_disable(); })
68
69/*
70 * Force strict CPU ordering.
71 * Not really required on m68k...
72 */
73#define nop()		do { asm volatile ("nop"); barrier(); } while (0)
74#define mb()		barrier()
75#define rmb()		barrier()
76#define wmb()		barrier()
77#define read_barrier_depends()	((void)0)
78#define set_mb(var, value)	({ (var) = (value); wmb(); })
79
80#define smp_mb()	barrier()
81#define smp_rmb()	barrier()
82#define smp_wmb()	barrier()
83#define smp_read_barrier_depends()	((void)0)
84
85
86#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
87
88struct __xchg_dummy { unsigned long a[100]; };
89#define __xg(x) ((volatile struct __xchg_dummy *)(x))
90
91#ifndef CONFIG_RMW_INSNS
92static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
93{
94	unsigned long flags, tmp;
95
96	local_irq_save(flags);
97
98	switch (size) {
99	case 1:
100		tmp = *(u8 *)ptr;
101		*(u8 *)ptr = x;
102		x = tmp;
103		break;
104	case 2:
105		tmp = *(u16 *)ptr;
106		*(u16 *)ptr = x;
107		x = tmp;
108		break;
109	case 4:
110		tmp = *(u32 *)ptr;
111		*(u32 *)ptr = x;
112		x = tmp;
113		break;
114	default:
115		BUG();
116	}
117
118	local_irq_restore(flags);
119	return x;
120}
121#else
122static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
123{
124	switch (size) {
125	    case 1:
126		__asm__ __volatile__
127			("moveb %2,%0\n\t"
128			 "1:\n\t"
129			 "casb %0,%1,%2\n\t"
130			 "jne 1b"
131			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
132		break;
133	    case 2:
134		__asm__ __volatile__
135			("movew %2,%0\n\t"
136			 "1:\n\t"
137			 "casw %0,%1,%2\n\t"
138			 "jne 1b"
139			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
140		break;
141	    case 4:
142		__asm__ __volatile__
143			("movel %2,%0\n\t"
144			 "1:\n\t"
145			 "casl %0,%1,%2\n\t"
146			 "jne 1b"
147			 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory");
148		break;
149	}
150	return x;
151}
152#endif
153
154/*
155 * Atomic compare and exchange.  Compare OLD with MEM, if identical,
156 * store NEW in MEM.  Return the initial value in MEM.  Success is
157 * indicated by comparing RETURN with OLD.
158 */
159#ifdef CONFIG_RMW_INSNS
160#define __HAVE_ARCH_CMPXCHG	1
161
162static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
163				      unsigned long new, int size)
164{
165	switch (size) {
166	case 1:
167		__asm__ __volatile__ ("casb %0,%2,%1"
168				      : "=d" (old), "=m" (*(char *)p)
169				      : "d" (new), "0" (old), "m" (*(char *)p));
170		break;
171	case 2:
172		__asm__ __volatile__ ("casw %0,%2,%1"
173				      : "=d" (old), "=m" (*(short *)p)
174				      : "d" (new), "0" (old), "m" (*(short *)p));
175		break;
176	case 4:
177		__asm__ __volatile__ ("casl %0,%2,%1"
178				      : "=d" (old), "=m" (*(int *)p)
179				      : "d" (new), "0" (old), "m" (*(int *)p));
180		break;
181	}
182	return old;
183}
184
185#define cmpxchg(ptr,o,n)\
186	((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
187					(unsigned long)(n),sizeof(*(ptr))))
188#endif
189
190#define arch_align_stack(x) (x)
191
192#endif /* __KERNEL__ */
193
194#endif /* _M68K_SYSTEM_H */
195