1/******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 */
6
7#ifndef _XEN_OS_H_
8#define _XEN_OS_H_
9
10#ifdef PAE
11#define CONFIG_X86_PAE
12#endif
13
14#if !defined(__XEN_INTERFACE_VERSION__)
15/*
16 * Can update to a more recent version when we implement
17 * the hypercall page
18 */
19#define  __XEN_INTERFACE_VERSION__ 0x00030204
20#endif
21
22#include <xen/interface/xen.h>
23
24/* Force a proper event-channel callback from Xen. */
25void force_evtchn_callback(void);
26
27extern int gdtset;
28
29extern shared_info_t *HYPERVISOR_shared_info;
30
31/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
32static inline void rep_nop(void)
33{
34    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
35}
36#define cpu_relax() rep_nop()
37
38/* crude memory allocator for memory allocation early in
39 *  boot
40 */
41void *bootmem_alloc(unsigned int size);
42void bootmem_free(void *ptr, unsigned int size);
43
44
45/* Everything below this point is not included by assembler (.S) files. */
46#ifndef __ASSEMBLY__
47
48void printk(const char *fmt, ...);
49
50/* some function prototypes */
51void trap_init(void);
52
53#define likely(x)  __builtin_expect((x),1)
54#define unlikely(x)  __builtin_expect((x),0)
55
56#ifndef XENHVM
57
58/*
59 * STI/CLI equivalents. These basically set and clear the virtual
60 * event_enable flag in teh shared_info structure. Note that when
61 * the enable bit is set, there may be pending events to be handled.
62 * We may therefore call into do_hypervisor_callback() directly.
63 */
64
65#define __cli()                                                         \
66do {                                                                    \
67        vcpu_info_t *_vcpu;                                             \
68        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
69        _vcpu->evtchn_upcall_mask = 1;                                  \
70        barrier();                                                      \
71} while (0)
72
73#define __sti()                                                         \
74do {                                                                    \
75        vcpu_info_t *_vcpu;                                             \
76        barrier();                                                      \
77        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
78        _vcpu->evtchn_upcall_mask = 0;                                  \
79        barrier(); /* unmask then check (avoid races) */                \
80        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
81                force_evtchn_callback();                                \
82} while (0)
83
84#define __restore_flags(x)                                              \
85do {                                                                    \
86        vcpu_info_t *_vcpu;                                             \
87        barrier();                                                      \
88        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
89        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
90                barrier(); /* unmask then check (avoid races) */        \
91                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
92                        force_evtchn_callback();                        \
93        } 								\
94} while (0)
95
96/*
97 * Add critical_{enter, exit}?
98 *
99 */
100#define __save_and_cli(x)                                               \
101do {                                                                    \
102        vcpu_info_t *_vcpu;                                             \
103        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
104        (x) = _vcpu->evtchn_upcall_mask;                                \
105        _vcpu->evtchn_upcall_mask = 1;                                  \
106        barrier();                                                      \
107} while (0)
108
109
110#define cli() __cli()
111#define sti() __sti()
112#define save_flags(x) __save_flags(x)
113#define restore_flags(x) __restore_flags(x)
114#define save_and_cli(x) __save_and_cli(x)
115
116#define local_irq_save(x)       __save_and_cli(x)
117#define local_irq_restore(x)    __restore_flags(x)
118#define local_irq_disable()     __cli()
119#define local_irq_enable()      __sti()
120
121#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
122#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
123#define spin_lock_irqsave mtx_lock_irqsave
124#define spin_unlock_irqrestore mtx_unlock_irqrestore
125
126#else
127#endif
128
129#ifndef mb
130#define mb() __asm__ __volatile__("mfence":::"memory")
131#endif
132#ifndef rmb
133#define rmb() __asm__ __volatile__("lfence":::"memory");
134#endif
135#ifndef wmb
136#define wmb() barrier()
137#endif
138#ifdef SMP
139#define smp_mb() mb()
140#define smp_rmb() rmb()
141#define smp_wmb() wmb()
142#define smp_read_barrier_depends()      read_barrier_depends()
143#define set_mb(var, value) do { xchg(&var, value); } while (0)
144#else
145#define smp_mb()        barrier()
146#define smp_rmb()       barrier()
147#define smp_wmb()       barrier()
148#define smp_read_barrier_depends()      do { } while(0)
149#define set_mb(var, value) do { var = value; barrier(); } while (0)
150#endif
151
152
153/* This is a barrier for the compiler only, NOT the processor! */
154#define barrier() __asm__ __volatile__("": : :"memory")
155
156#define LOCK_PREFIX ""
157#define LOCK ""
158#define ADDR (*(volatile long *) addr)
159/*
160 * Make sure gcc doesn't try to be clever and move things around
161 * on us. We need to use _exactly_ the address the user gave us,
162 * not some alias that contains the same information.
163 */
164typedef struct { volatile int counter; } atomic_t;
165
166
167
168#define xen_xchg(ptr,v) \
169        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
170struct __xchg_dummy { unsigned long a[100]; };
171#define __xg(x) ((volatile struct __xchg_dummy *)(x))
172static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
173                                   int size)
174{
175    switch (size) {
176    case 1:
177        __asm__ __volatile__("xchgb %b0,%1"
178                             :"=q" (x)
179                             :"m" (*__xg(ptr)), "0" (x)
180                             :"memory");
181        break;
182    case 2:
183        __asm__ __volatile__("xchgw %w0,%1"
184                             :"=r" (x)
185                             :"m" (*__xg(ptr)), "0" (x)
186                             :"memory");
187        break;
188    case 4:
189        __asm__ __volatile__("xchgl %0,%1"
190                             :"=r" (x)
191                             :"m" (*__xg(ptr)), "0" (x)
192                             :"memory");
193        break;
194    }
195    return x;
196}
197
198/**
199 * test_and_clear_bit - Clear a bit and return its old value
200 * @nr: Bit to set
201 * @addr: Address to count from
202 *
203 * This operation is atomic and cannot be reordered.
204 * It also implies a memory barrier.
205 */
206static __inline int test_and_clear_bit(int nr, volatile void * addr)
207{
208        int oldbit;
209
210        __asm__ __volatile__( LOCK_PREFIX
211                "btrl %2,%1\n\tsbbl %0,%0"
212                :"=r" (oldbit),"=m" (ADDR)
213                :"Ir" (nr) : "memory");
214        return oldbit;
215}
216
217static __inline int constant_test_bit(int nr, const volatile void * addr)
218{
219    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
220}
221
222static __inline int variable_test_bit(int nr, volatile void * addr)
223{
224    int oldbit;
225
226    __asm__ __volatile__(
227        "btl %2,%1\n\tsbbl %0,%0"
228        :"=r" (oldbit)
229        :"m" (ADDR),"Ir" (nr));
230    return oldbit;
231}
232
233#define test_bit(nr,addr) \
234(__builtin_constant_p(nr) ? \
235 constant_test_bit((nr),(addr)) : \
236 variable_test_bit((nr),(addr)))
237
238
239/**
240 * set_bit - Atomically set a bit in memory
241 * @nr: the bit to set
242 * @addr: the address to start counting from
243 *
244 * This function is atomic and may not be reordered.  See __set_bit()
245 * if you do not require the atomic guarantees.
246 * Note that @nr may be almost arbitrarily large; this function is not
247 * restricted to acting on a single-word quantity.
248 */
249static __inline__ void set_bit(int nr, volatile void * addr)
250{
251        __asm__ __volatile__( LOCK_PREFIX
252                "btsl %1,%0"
253                :"=m" (ADDR)
254                :"Ir" (nr));
255}
256
257/**
258 * clear_bit - Clears a bit in memory
259 * @nr: Bit to clear
260 * @addr: Address to start counting from
261 *
262 * clear_bit() is atomic and may not be reordered.  However, it does
263 * not contain a memory barrier, so if it is used for locking purposes,
264 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
265 * in order to ensure changes are visible on other processors.
266 */
267static __inline__ void clear_bit(int nr, volatile void * addr)
268{
269        __asm__ __volatile__( LOCK_PREFIX
270                "btrl %1,%0"
271                :"=m" (ADDR)
272                :"Ir" (nr));
273}
274
275/**
276 * atomic_inc - increment atomic variable
277 * @v: pointer of type atomic_t
278 *
279 * Atomically increments @v by 1.  Note that the guaranteed
280 * useful range of an atomic_t is only 24 bits.
281 */
282static __inline__ void atomic_inc(atomic_t *v)
283{
284        __asm__ __volatile__(
285                LOCK "incl %0"
286                :"=m" (v->counter)
287                :"m" (v->counter));
288}
289
290
291#define rdtscll(val) \
292     __asm__ __volatile__("rdtsc" : "=A" (val))
293
294#endif /* !__ASSEMBLY__ */
295
296#endif /* _OS_H_ */
297