1#ifndef __ASM_CRIS_SYSTEM_H
2#define __ASM_CRIS_SYSTEM_H
3
4#include <linux/config.h>
5
6#include <asm/segment.h>
7
8/* the switch_to macro calls resume, an asm function in entry.S which does the actual
9 * task switching.
10 */
11
12extern struct task_struct *resume(struct task_struct *prev, struct task_struct *next, int);
13#define prepare_to_switch()     do { } while(0)
14#define switch_to(prev,next,last) last = resume(prev,next, \
15					 (int)&((struct task_struct *)0)->thread)
16
17/* read the CPU version register */
18
19static inline unsigned long rdvr(void) {
20	unsigned char vr;
21	__asm__ volatile ("move $vr,%0" : "=rm" (vr));
22	return vr;
23}
24
25/* read/write the user-mode stackpointer */
26
27static inline unsigned long rdusp(void) {
28	unsigned long usp;
29	__asm__ __volatile__("move $usp,%0" : "=rm" (usp));
30	return usp;
31}
32
33#define wrusp(usp) \
34	__asm__ __volatile__("move %0,$usp" : /* no outputs */ : "rm" (usp))
35
36/* read the current stackpointer */
37
38static inline unsigned long rdsp(void) {
39	unsigned long sp;
40	__asm__ __volatile__("move.d $sp,%0" : "=rm" (sp));
41	return sp;
42}
43
44static inline unsigned long _get_base(char * addr)
45{
46  return 0;
47}
48
49#define nop() __asm__ __volatile__ ("nop");
50
51#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
52#define tas(ptr) (xchg((ptr),1))
53
54struct __xchg_dummy { unsigned long a[100]; };
55#define __xg(x) ((struct __xchg_dummy *)(x))
56
57#define __cli() __asm__ __volatile__ ( "di" : : :"memory");
58#define __sti() __asm__ __volatile__ ( "ei" : : :"memory");
59#define __save_flags(x) __asm__ __volatile__ ("move $ccr,%0" : "=rm" (x) : : "memory");
60#define __restore_flags(x) __asm__ __volatile__ ("move %0,$ccr" : : "rm" (x) : "memory");
61
62/* For spinlocks etc */
63#define local_irq_save(x) __asm__ __volatile__ ("move $ccr,%0\n\tdi" : "=rm" (x) : : "memory");
64#define local_irq_restore(x) restore_flags(x)
65
66#define local_irq_disable()  cli()
67#define local_irq_enable()   sti()
68
69
70#define cli() __cli()
71#define sti() __sti()
72#define save_flags(x) __save_flags(x)
73#define restore_flags(x) __restore_flags(x)
74#define save_and_cli(x) do { __save_flags(x); cli(); } while(0)
75
76static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
77{
78  /* since Etrax doesn't have any atomic xchg instructions, we need to disable
79     irq's (if enabled) and do it with move.d's */
80  unsigned long flags,temp;
81  save_flags(flags); /* save flags, including irq enable bit */
82  cli();             /* shut off irq's */
83  switch (size) {
84  case 1:
85    *((unsigned char *)&temp) = x;
86    x = *(unsigned char *)ptr;
87    *(unsigned char *)ptr = *((unsigned char *)&temp);
88    break;
89  case 2:
90    *((unsigned short *)&temp) = x;
91    x = *(unsigned short *)ptr;
92    *(unsigned short *)ptr = *((unsigned short *)&temp);
93    break;
94  case 4:
95    temp = x;
96    x = *(unsigned long *)ptr;
97    *(unsigned long *)ptr = temp;
98    break;
99  }
100  restore_flags(flags); /* restore irq enable bit */
101  return x;
102}
103
104#define mb() __asm__ __volatile__ ("" : : : "memory")
105#define rmb() mb()
106#define wmb() mb()
107
108#ifdef CONFIG_SMP
109#define smp_mb()        mb()
110#define smp_rmb()       rmb()
111#define smp_wmb()       wmb()
112#else
113#define smp_mb()        barrier()
114#define smp_rmb()       barrier()
115#define smp_wmb()       barrier()
116#endif
117
118#define iret()
119
120/*
121 * disable hlt during certain critical i/o operations
122 */
123#define HAVE_DISABLE_HLT
124void disable_hlt(void);
125void enable_hlt(void);
126
127#endif
128