1181638Skmacy#ifndef __XEN_SYNCH_BITOPS_H__
2181638Skmacy#define __XEN_SYNCH_BITOPS_H__
3181638Skmacy
4181638Skmacy/*
5181638Skmacy * Copyright 1992, Linus Torvalds.
6181638Skmacy * Heavily modified to provide guaranteed strong synchronisation
7181638Skmacy * when communicating with Xen or other guest OSes running on other CPUs.
8181638Skmacy */
9181638Skmacy
10181638Skmacy
11181638Skmacy#define ADDR (*(volatile long *) addr)
12181638Skmacy
13181638Skmacystatic __inline__ void synch_set_bit(int nr, volatile void * addr)
14181638Skmacy{
15181638Skmacy    __asm__ __volatile__ (
16181638Skmacy        "lock btsl %1,%0"
17181638Skmacy        : "=m" (ADDR) : "Ir" (nr) : "memory" );
18181638Skmacy}
19181638Skmacy
20181638Skmacystatic __inline__ void synch_clear_bit(int nr, volatile void * addr)
21181638Skmacy{
22181638Skmacy    __asm__ __volatile__ (
23181638Skmacy        "lock btrl %1,%0"
24181638Skmacy        : "=m" (ADDR) : "Ir" (nr) : "memory" );
25181638Skmacy}
26181638Skmacy
27181638Skmacystatic __inline__ void synch_change_bit(int nr, volatile void * addr)
28181638Skmacy{
29181638Skmacy    __asm__ __volatile__ (
30181638Skmacy        "lock btcl %1,%0"
31181638Skmacy        : "=m" (ADDR) : "Ir" (nr) : "memory" );
32181638Skmacy}
33181638Skmacy
34181638Skmacystatic __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
35181638Skmacy{
36181638Skmacy    int oldbit;
37181638Skmacy    __asm__ __volatile__ (
38181638Skmacy        "lock btsl %2,%1\n\tsbbl %0,%0"
39181638Skmacy        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
40181638Skmacy    return oldbit;
41181638Skmacy}
42181638Skmacy
43181638Skmacystatic __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
44181638Skmacy{
45181638Skmacy    int oldbit;
46181638Skmacy    __asm__ __volatile__ (
47181638Skmacy        "lock btrl %2,%1\n\tsbbl %0,%0"
48181638Skmacy        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
49181638Skmacy    return oldbit;
50181638Skmacy}
51181638Skmacy
52181638Skmacystatic __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
53181638Skmacy{
54181638Skmacy    int oldbit;
55181638Skmacy
56181638Skmacy    __asm__ __volatile__ (
57181638Skmacy        "lock btcl %2,%1\n\tsbbl %0,%0"
58181638Skmacy        : "=r" (oldbit), "=m" (ADDR) : "Ir" (nr) : "memory");
59181638Skmacy    return oldbit;
60181638Skmacy}
61181638Skmacy
62181638Skmacystruct __synch_xchg_dummy { unsigned long a[100]; };
63181638Skmacy#define __synch_xg(x) ((volatile struct __synch_xchg_dummy *)(x))
64181638Skmacy
65181638Skmacy#define synch_cmpxchg(ptr, old, new) \
66181638Skmacy((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
67181638Skmacy                                     (unsigned long)(old), \
68181638Skmacy                                     (unsigned long)(new), \
69181638Skmacy                                     sizeof(*(ptr))))
70181638Skmacy
71181638Skmacystatic inline unsigned long __synch_cmpxchg(volatile void *ptr,
72181638Skmacy					    unsigned long old,
73181638Skmacy					    unsigned long new, int size)
74181638Skmacy{
75181638Skmacy	unsigned long prev;
76181638Skmacy	switch (size) {
77181638Skmacy	case 1:
78181638Skmacy		__asm__ __volatile__("lock; cmpxchgb %b1,%2"
79181638Skmacy				     : "=a"(prev)
80181638Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
81181638Skmacy				       "0"(old)
82181638Skmacy				     : "memory");
83181638Skmacy		return prev;
84181638Skmacy	case 2:
85181638Skmacy		__asm__ __volatile__("lock; cmpxchgw %w1,%2"
86181638Skmacy				     : "=a"(prev)
87181638Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
88181638Skmacy				       "0"(old)
89181638Skmacy				     : "memory");
90181638Skmacy		return prev;
91181638Skmacy#ifdef CONFIG_X86_64
92181638Skmacy	case 4:
93181638Skmacy		__asm__ __volatile__("lock; cmpxchgl %k1,%2"
94181638Skmacy				     : "=a"(prev)
95181638Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
96181638Skmacy				       "0"(old)
97181638Skmacy				     : "memory");
98181638Skmacy		return prev;
99181638Skmacy	case 8:
100181638Skmacy		__asm__ __volatile__("lock; cmpxchgq %1,%2"
101181638Skmacy				     : "=a"(prev)
102181638Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
103181638Skmacy				       "0"(old)
104181638Skmacy				     : "memory");
105181638Skmacy		return prev;
106181638Skmacy#else
107181638Skmacy	case 4:
108181638Skmacy		__asm__ __volatile__("lock; cmpxchgl %1,%2"
109181638Skmacy				     : "=a"(prev)
110181638Skmacy				     : "q"(new), "m"(*__synch_xg(ptr)),
111181638Skmacy				       "0"(old)
112181638Skmacy				     : "memory");
113181638Skmacy		return prev;
114181638Skmacy#endif
115181638Skmacy	}
116181638Skmacy	return old;
117181638Skmacy}
118181638Skmacy
119181638Skmacystatic __inline__ int synch_const_test_bit(int nr, const volatile void * addr)
120181638Skmacy{
121181638Skmacy    return ((1UL << (nr & 31)) &
122181638Skmacy            (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
123181638Skmacy}
124181638Skmacy
125181638Skmacystatic __inline__ int synch_var_test_bit(int nr, volatile void * addr)
126181638Skmacy{
127181638Skmacy    int oldbit;
128181638Skmacy    __asm__ __volatile__ (
129181638Skmacy        "btl %2,%1\n\tsbbl %0,%0"
130181638Skmacy        : "=r" (oldbit) : "m" (ADDR), "Ir" (nr) );
131181638Skmacy    return oldbit;
132181638Skmacy}
133181638Skmacy
134181638Skmacy#define synch_test_bit(nr,addr) \
135181638Skmacy(__builtin_constant_p(nr) ? \
136181638Skmacy synch_const_test_bit((nr),(addr)) : \
137181638Skmacy synch_var_test_bit((nr),(addr)))
138181638Skmacy
139181638Skmacy#endif /* __XEN_SYNCH_BITOPS_H__ */
140