xen-os.h revision 251767
1/******************************************************************************
2 * os.h
3 *
4 * random collection of macros and definition
5 *
6 * $FreeBSD: head/sys/amd64/include/xen/xen-os.h 251767 2013-06-14 23:43:44Z gibbs $
7 */
8
9#ifndef _XEN_OS_H_
10#define _XEN_OS_H_
11
12#ifdef PAE
13#define CONFIG_X86_PAE
14#endif
15
16#ifdef LOCORE
17#define __ASSEMBLY__
18#endif
19
20#if !defined(__XEN_INTERFACE_VERSION__)
21#define  __XEN_INTERFACE_VERSION__ 0x00030208
22#endif
23
24#define GRANT_REF_INVALID   0xffffffff
25
26#include <xen/interface/xen.h>
27
28/* Everything below this point is not included by assembler (.S) files. */
29#ifndef __ASSEMBLY__
30
31/* Force a proper event-channel callback from Xen. */
32void force_evtchn_callback(void);
33
34extern int gdtset;
35
36extern shared_info_t *HYPERVISOR_shared_info;
37
38/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
39static inline void rep_nop(void)
40{
41    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
42}
43#define cpu_relax() rep_nop()
44
45/* crude memory allocator for memory allocation early in
46 *  boot
47 */
48void *bootmem_alloc(unsigned int size);
49void bootmem_free(void *ptr, unsigned int size);
50
51void printk(const char *fmt, ...);
52
53/* some function prototypes */
54void trap_init(void);
55
56#define likely(x)  __builtin_expect((x),1)
57#define unlikely(x)  __builtin_expect((x),0)
58
59#ifndef XENHVM
60
61/*
62 * STI/CLI equivalents. These basically set and clear the virtual
63 * event_enable flag in the shared_info structure. Note that when
64 * the enable bit is set, there may be pending events to be handled.
65 * We may therefore call into do_hypervisor_callback() directly.
66 */
67
68#define __cli()                                                         \
69do {                                                                    \
70        vcpu_info_t *_vcpu;                                             \
71        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
72        _vcpu->evtchn_upcall_mask = 1;                                  \
73        barrier();                                                      \
74} while (0)
75
76#define __sti()                                                         \
77do {                                                                    \
78        vcpu_info_t *_vcpu;                                             \
79        barrier();                                                      \
80        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
81        _vcpu->evtchn_upcall_mask = 0;                                  \
82        barrier(); /* unmask then check (avoid races) */                \
83        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
84                force_evtchn_callback();                                \
85} while (0)
86
87#define __restore_flags(x)                                              \
88do {                                                                    \
89        vcpu_info_t *_vcpu;                                             \
90        barrier();                                                      \
91        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
92        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
93                barrier(); /* unmask then check (avoid races) */        \
94                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
95                        force_evtchn_callback();                        \
96        } 								\
97} while (0)
98
99/*
100 * Add critical_{enter, exit}?
101 *
102 */
103#define __save_and_cli(x)                                               \
104do {                                                                    \
105        vcpu_info_t *_vcpu;                                             \
106        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
107        (x) = _vcpu->evtchn_upcall_mask;                                \
108        _vcpu->evtchn_upcall_mask = 1;                                  \
109        barrier();                                                      \
110} while (0)
111
112
113#define cli() __cli()
114#define sti() __sti()
115#define save_flags(x) __save_flags(x)
116#define restore_flags(x) __restore_flags(x)
117#define save_and_cli(x) __save_and_cli(x)
118
119#define local_irq_save(x)       __save_and_cli(x)
120#define local_irq_restore(x)    __restore_flags(x)
121#define local_irq_disable()     __cli()
122#define local_irq_enable()      __sti()
123
124#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
125#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
126#define spin_lock_irqsave mtx_lock_irqsave
127#define spin_unlock_irqrestore mtx_unlock_irqrestore
128
129#else
130#endif
131
132#ifndef xen_mb
133#define xen_mb() mb()
134#endif
135#ifndef xen_rmb
136#define xen_rmb() rmb()
137#endif
138#ifndef xen_wmb
139#define xen_wmb() wmb()
140#endif
141#ifdef SMP
142#define smp_mb() mb()
143#define smp_rmb() rmb()
144#define smp_wmb() wmb()
145#define smp_read_barrier_depends()      read_barrier_depends()
146#define set_mb(var, value) do { xchg(&var, value); } while (0)
147#else
148#define smp_mb()        barrier()
149#define smp_rmb()       barrier()
150#define smp_wmb()       barrier()
151#define smp_read_barrier_depends()      do { } while(0)
152#define set_mb(var, value) do { var = value; barrier(); } while (0)
153#endif
154
155
156/* This is a barrier for the compiler only, NOT the processor! */
157#define barrier() __asm__ __volatile__("": : :"memory")
158
159#define LOCK_PREFIX ""
160#define LOCK ""
161#define ADDR (*(volatile long *) addr)
162/*
163 * Make sure gcc doesn't try to be clever and move things around
164 * on us. We need to use _exactly_ the address the user gave us,
165 * not some alias that contains the same information.
166 */
167typedef struct { volatile int counter; } atomic_t;
168
169
170
171#define xen_xchg(ptr,v) \
172        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
173struct __xchg_dummy { unsigned long a[100]; };
174#define __xg(x) ((volatile struct __xchg_dummy *)(x))
175static __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
176                                   int size)
177{
178    switch (size) {
179    case 1:
180        __asm__ __volatile__("xchgb %b0,%1"
181                             :"=q" (x)
182                             :"m" (*__xg(ptr)), "0" (x)
183                             :"memory");
184        break;
185    case 2:
186        __asm__ __volatile__("xchgw %w0,%1"
187                             :"=r" (x)
188                             :"m" (*__xg(ptr)), "0" (x)
189                             :"memory");
190        break;
191    case 4:
192        __asm__ __volatile__("xchgl %0,%1"
193                             :"=r" (x)
194                             :"m" (*__xg(ptr)), "0" (x)
195                             :"memory");
196        break;
197    }
198    return x;
199}
200
201/**
202 * test_and_clear_bit - Clear a bit and return its old value
203 * @nr: Bit to set
204 * @addr: Address to count from
205 *
206 * This operation is atomic and cannot be reordered.
207 * It also implies a memory barrier.
208 */
209static __inline int test_and_clear_bit(int nr, volatile void * addr)
210{
211        int oldbit;
212
213        __asm__ __volatile__( LOCK_PREFIX
214                "btrl %2,%1\n\tsbbl %0,%0"
215                :"=r" (oldbit),"=m" (ADDR)
216                :"Ir" (nr) : "memory");
217        return oldbit;
218}
219
220static __inline int constant_test_bit(int nr, const volatile void * addr)
221{
222    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
223}
224
225static __inline int variable_test_bit(int nr, volatile void * addr)
226{
227    int oldbit;
228
229    __asm__ __volatile__(
230        "btl %2,%1\n\tsbbl %0,%0"
231        :"=r" (oldbit)
232        :"m" (ADDR),"Ir" (nr));
233    return oldbit;
234}
235
236#define test_bit(nr,addr) \
237(__builtin_constant_p(nr) ? \
238 constant_test_bit((nr),(addr)) : \
239 variable_test_bit((nr),(addr)))
240
241
242/**
243 * set_bit - Atomically set a bit in memory
244 * @nr: the bit to set
245 * @addr: the address to start counting from
246 *
247 * This function is atomic and may not be reordered.  See __set_bit()
248 * if you do not require the atomic guarantees.
249 * Note that @nr may be almost arbitrarily large; this function is not
250 * restricted to acting on a single-word quantity.
251 */
252static __inline__ void set_bit(int nr, volatile void * addr)
253{
254        __asm__ __volatile__( LOCK_PREFIX
255                "btsl %1,%0"
256                :"=m" (ADDR)
257                :"Ir" (nr));
258}
259
260/**
261 * clear_bit - Clears a bit in memory
262 * @nr: Bit to clear
263 * @addr: Address to start counting from
264 *
265 * clear_bit() is atomic and may not be reordered.  However, it does
266 * not contain a memory barrier, so if it is used for locking purposes,
267 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
268 * in order to ensure changes are visible on other processors.
269 */
270static __inline__ void clear_bit(int nr, volatile void * addr)
271{
272        __asm__ __volatile__( LOCK_PREFIX
273                "btrl %1,%0"
274                :"=m" (ADDR)
275                :"Ir" (nr));
276}
277
278/**
279 * atomic_inc - increment atomic variable
280 * @v: pointer of type atomic_t
281 *
282 * Atomically increments @v by 1.  Note that the guaranteed
283 * useful range of an atomic_t is only 24 bits.
284 */
285static __inline__ void atomic_inc(atomic_t *v)
286{
287        __asm__ __volatile__(
288                LOCK "incl %0"
289                :"=m" (v->counter)
290                :"m" (v->counter));
291}
292
293
294#define rdtscll(val) \
295     __asm__ __volatile__("rdtsc" : "=A" (val))
296
297#endif /* !__ASSEMBLY__ */
298
299#endif /* _OS_H_ */
300