1#ifndef __XEN_SYNCH_BITOPS_H__
2#define __XEN_SYNCH_BITOPS_H__
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 * Heavily modified to provide guaranteed strong synchronisation
7 * when communicating with Xen or other guest OSes running on other CPUs.
8 */
9
10
11#define ADDR (*(volatile long *) addr)
12
13static __inline__ void synch_set_bit(int nr, volatile void * addr)
14{
15    __asm__ __volatile__ (
16        "lock btsl %1,%0"
17        : "=m" (ADDR) : "Ir" (nr) : "memory" );
18}
19
20static __inline__ void synch_clear_bit(int nr, volatile void * addr)
21{
22    __asm__ __volatile__ (
23        "lock btrl %1,%0"
24        : "=m" (ADDR) : "Ir" (nr) : "memory" );
25}
26
27static __inline__ void synch_change_bit(int nr, volatile void * addr)
28{
29    __asm__ __volatile__ (
30        "lock btcl %1,%0"
31        : "=m" (ADDR) : "Ir" (nr) : "memory" );
32}
33
34static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
35{
36    int oldbit;
37    __asm__ __volatile__ (
38        "lock btsl %2,%1\n\tsbbl %0,%0"
39        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
40    return oldbit;
41}
42
43static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
44{
45    int oldbit;
46    __asm__ __volatile__ (
47        "lock btrl %2,%1\n\tsbbl %0,%0"
48        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
49    return oldbit;
50}
51
52static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
53{
54    int oldbit;
55
56    __asm__ __volatile__ (
57        "lock btcl %2,%1\n\tsbbl %0,%0"
58        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
59    return oldbit;
60}
61
62struct __synch_xchg_dummy { unsigned long a[100]; };
63#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
64
65#define synch_cmpxchg(ptr, old, new) \
66((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
67                                     (unsigned long)(old), \
68                                     (unsigned long)(new), \
69                                     sizeof(*(ptr))))
70
71static inline unsigned long __synch_cmpxchg(volatile void *ptr,
72					    unsigned long old,
73					    unsigned long new, int size)
74{
75	unsigned long prev;
76	switch (size) {
77	case 1:
78		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
79				     : "=a"(prev)
80				     : "q"(new), "m"(*__synch_xg(ptr)),
81				       "0"(old)
82				     : "memory");
83		return prev;
84	case 2:
85		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
86				     : "=a"(prev)
87				     : "q"(new), "m"(*__synch_xg(ptr)),
88				       "0"(old)
89				     : "memory");
90		return prev;
91	case 4:
92		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
93				     : "=a"(prev)
94				     : "q"(new), "m"(*__synch_xg(ptr)),
95				       "0"(old)
96				     : "memory");
97		return prev;
98	case 8:
99		__asm__ __volatile__("lock; cmpxchgq %1,%2"
100				     : "=a"(prev)
101				     : "q"(new), "m"(*__synch_xg(ptr)),
102				       "0"(old)
103				     : "memory");
104		return prev;
105	}
106	return old;
107}
108
109static __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
110{
111    return ((1UL << (nr & 31)) &
112            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
113}
114
115static __inline__ int synch_var_test_bit(int nr, volatile void * addr)
116{
117    int oldbit;
118    __asm__ __volatile__ (
119        "btl %2,%1\n\tsbbl %0,%0"
120        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
121    return oldbit;
122}
123
124#define synch_test_bit(nr,addr) \
125(__builtin_constant_p(nr) ? \
126 synch_const_test_bit((nr),(addr)) : \
127 synch_var_test_bit((nr),(addr)))
128
129#endif /* __XEN_SYNCH_BITOPS_H__ */
130