1183906Skmacy#ifndef __XEN_SYNCH_BITOPS_H__
2183906Skmacy#define __XEN_SYNCH_BITOPS_H__
3183906Skmacy
4183906Skmacy/*
5183906Skmacy * Copyright 1992, Linus Torvalds.
6183906Skmacy * Heavily modified to provide guaranteed strong synchronisation
7183906Skmacy * when communicating with Xen or other guest OSes running on other CPUs.
8183906Skmacy */
9183906Skmacy
10183906Skmacy
11183906Skmacy#define ADDR (*(volatile long *) addr)
12183906Skmacy
13183906Skmacystatic __inline__ void synch_set_bit(int nr, volatile void * addr)
14183906Skmacy{
15183906Skmacy    __asm__ __volatile__ (
16183906Skmacy        "lock btsl %1,%0"
17183906Skmacy        : "=m" (ADDR) : "Ir" (nr) : "memory" );
18183906Skmacy}
19183906Skmacy
20183906Skmacystatic __inline__ void synch_clear_bit(int nr, volatile void * addr)
21183906Skmacy{
22183906Skmacy    __asm__ __volatile__ (
23183906Skmacy        "lock btrl %1,%0"
24183906Skmacy        : "=m" (ADDR) : "Ir" (nr) : "memory" );
25183906Skmacy}
26183906Skmacy
27183906Skmacystatic __inline__ void synch_change_bit(int nr, volatile void * addr)
28183906Skmacy{
29183906Skmacy    __asm__ __volatile__ (
30183906Skmacy        "lock btcl %1,%0"
31183906Skmacy        : "=m" (ADDR) : "Ir" (nr) : "memory" );
32183906Skmacy}
33183906Skmacy
34183906Skmacystatic __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
35183906Skmacy{
36183906Skmacy    int oldbit;
37183906Skmacy    __asm__ __volatile__ (
38183906Skmacy        "lock btsl %2,%1\n\tsbbl %0,%0"
39183906Skmacy        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
40183906Skmacy    return oldbit;
41183906Skmacy}
42183906Skmacy
43183906Skmacystatic __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
44183906Skmacy{
45183906Skmacy    int oldbit;
46183906Skmacy    __asm__ __volatile__ (
47183906Skmacy        "lock btrl %2,%1\n\tsbbl %0,%0"
48183906Skmacy        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
49183906Skmacy    return oldbit;
50183906Skmacy}
51183906Skmacy
52183906Skmacystatic __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
53183906Skmacy{
54183906Skmacy    int oldbit;
55183906Skmacy
56183906Skmacy    __asm__ __volatile__ (
57183906Skmacy        "lock btcl %2,%1\n\tsbbl %0,%0"
58183906Skmacy        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
59183906Skmacy    return oldbit;
60183906Skmacy}
61183906Skmacy
62183906Skmacystruct __synch_xchg_dummy { unsigned long a[100]; };
63183906Skmacy#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
64183906Skmacy
65183906Skmacy#define synch_cmpxchg(ptr, old, new) \
66183906Skmacy((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
67183906Skmacy                                     (unsigned long)(old), \
68183906Skmacy                                     (unsigned long)(new), \
69183906Skmacy                                     sizeof(*(ptr))))
70183906Skmacy
71183906Skmacystatic inline unsigned long __synch_cmpxchg(volatile void *ptr,
72183906Skmacy					    unsigned long old,
73183906Skmacy					    unsigned long new, int size)
74183906Skmacy{
75183906Skmacy	unsigned long prev;
76183906Skmacy	switch (size) {
77183906Skmacy	case 1:
78183906Skmacy		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
79183906Skmacy				     : "=a"(prev)
80183906Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
81183906Skmacy				       "0"(old)
82183906Skmacy				     : "memory");
83183906Skmacy		return prev;
84183906Skmacy	case 2:
85183906Skmacy		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
86183906Skmacy				     : "=a"(prev)
87183906Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
88183906Skmacy				       "0"(old)
89183906Skmacy				     : "memory");
90183906Skmacy		return prev;
91183906Skmacy	case 4:
92183906Skmacy		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
93183906Skmacy				     : "=a"(prev)
94183906Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
95183906Skmacy				       "0"(old)
96183906Skmacy				     : "memory");
97183906Skmacy		return prev;
98183906Skmacy	case 8:
99183906Skmacy		__asm__ __volatile__("lock; cmpxchgq %1,%2"
100183906Skmacy				     : "=a"(prev)
101183906Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
102183906Skmacy				       "0"(old)
103183906Skmacy				     : "memory");
104183906Skmacy		return prev;
105183906Skmacy	}
106183906Skmacy	return old;
107183906Skmacy}
108183906Skmacy
109183906Skmacystatic __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
110183906Skmacy{
111183906Skmacy    return ((1UL << (nr & 31)) &
112183906Skmacy            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
113183906Skmacy}
114183906Skmacy
115183906Skmacystatic __inline__ int synch_var_test_bit(int nr, volatile void * addr)
116183906Skmacy{
117183906Skmacy    int oldbit;
118183906Skmacy    __asm__ __volatile__ (
119183906Skmacy        "btl %2,%1\n\tsbbl %0,%0"
120183906Skmacy        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
121183906Skmacy    return oldbit;
122183906Skmacy}
123183906Skmacy
124183906Skmacy#define synch_test_bit(nr,addr) \
125183906Skmacy(__builtin_constant_p(nr) ? \
126183906Skmacy synch_const_test_bit((nr),(addr)) : \
127183906Skmacy synch_var_test_bit((nr),(addr)))
128183906Skmacy
129183906Skmacy#endif /* __XEN_SYNCH_BITOPS_H__ */
130