1183906Skmacy/******************************************************************************
2183906Skmacy * os.h
3183906Skmacy *
4183906Skmacy * random collection of macros and definition
5183906Skmacy */
6183906Skmacy
7183906Skmacy#ifndef _XEN_OS_H_
8183906Skmacy#define _XEN_OS_H_
9185637Sdfr
10183906Skmacy#ifdef PAE
11183906Skmacy#define CONFIG_X86_PAE
12183906Skmacy#endif
13183906Skmacy
14185386Sdfr#if !defined(__XEN_INTERFACE_VERSION__)
15183906Skmacy/*
16183906Skmacy * Can update to a more recent version when we implement
17183906Skmacy * the hypercall page
18183906Skmacy */
19183906Skmacy#define  __XEN_INTERFACE_VERSION__ 0x00030204
20183906Skmacy#endif
21183906Skmacy
22183906Skmacy#include <xen/interface/xen.h>
23183906Skmacy
24183906Skmacy/* Force a proper event-channel callback from Xen. */
25183906Skmacyvoid force_evtchn_callback(void);
26183906Skmacy
27183927Skmacyextern int gdtset;
28183906Skmacy
29183906Skmacyextern shared_info_t *HYPERVISOR_shared_info;
30183906Skmacy
31183906Skmacy/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
32183906Skmacystatic inline void rep_nop(void)
33183906Skmacy{
34183906Skmacy    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
35183906Skmacy}
36183906Skmacy#define cpu_relax() rep_nop()
37183906Skmacy
38183906Skmacy/* crude memory allocator for memory allocation early in
39183906Skmacy *  boot
40183906Skmacy */
41183906Skmacyvoid *bootmem_alloc(unsigned int size);
42183906Skmacyvoid bootmem_free(void *ptr, unsigned int size);
43183906Skmacy
44183906Skmacy
45183906Skmacy/* Everything below this point is not included by assembler (.S) files. */
46183906Skmacy#ifndef __ASSEMBLY__
47183906Skmacy
48183906Skmacyvoid printk(const char *fmt, ...);
49183906Skmacy
50183906Skmacy/* some function prototypes */
51183906Skmacyvoid trap_init(void);
52183906Skmacy
53185386Sdfr#define likely(x)  __builtin_expect((x),1)
54185386Sdfr#define unlikely(x)  __builtin_expect((x),0)
55185386Sdfr
56185386Sdfr#ifndef XENHVM
57185386Sdfr
58183906Skmacy/*
59183906Skmacy * STI/CLI equivalents. These basically set and clear the virtual
60183906Skmacy * event_enable flag in teh shared_info structure. Note that when
61183906Skmacy * the enable bit is set, there may be pending events to be handled.
62183906Skmacy * We may therefore call into do_hypervisor_callback() directly.
63183906Skmacy */
64183906Skmacy
65183906Skmacy#define __cli()                                                         \
66183906Skmacydo {                                                                    \
67183906Skmacy        vcpu_info_t *_vcpu;                                             \
68185637Sdfr        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
69183906Skmacy        _vcpu->evtchn_upcall_mask = 1;                                  \
70183906Skmacy        barrier();                                                      \
71183906Skmacy} while (0)
72183906Skmacy
73183906Skmacy#define __sti()                                                         \
74183906Skmacydo {                                                                    \
75183906Skmacy        vcpu_info_t *_vcpu;                                             \
76183906Skmacy        barrier();                                                      \
77185637Sdfr        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
78183906Skmacy        _vcpu->evtchn_upcall_mask = 0;                                  \
79183906Skmacy        barrier(); /* unmask then check (avoid races) */                \
80183906Skmacy        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
81183906Skmacy                force_evtchn_callback();                                \
82183906Skmacy} while (0)
83183906Skmacy
84183906Skmacy#define __restore_flags(x)                                              \
85183906Skmacydo {                                                                    \
86183906Skmacy        vcpu_info_t *_vcpu;                                             \
87183906Skmacy        barrier();                                                      \
88185637Sdfr        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
89183906Skmacy        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
90183906Skmacy                barrier(); /* unmask then check (avoid races) */        \
91183906Skmacy                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
92183906Skmacy                        force_evtchn_callback();                        \
93184235Skmacy        } 								\
94183906Skmacy} while (0)
95183906Skmacy
96183906Skmacy/*
97183906Skmacy * Add critical_{enter, exit}?
98183906Skmacy *
99183906Skmacy */
100183906Skmacy#define __save_and_cli(x)                                               \
101183906Skmacydo {                                                                    \
102183906Skmacy        vcpu_info_t *_vcpu;                                             \
103185637Sdfr        _vcpu = &HYPERVISOR_shared_info->vcpu_info[PCPU_GET(cpuid)];	\
104183906Skmacy        (x) = _vcpu->evtchn_upcall_mask;                                \
105183906Skmacy        _vcpu->evtchn_upcall_mask = 1;                                  \
106183906Skmacy        barrier();                                                      \
107183906Skmacy} while (0)
108183906Skmacy
109183906Skmacy
110183906Skmacy#define cli() __cli()
111183906Skmacy#define sti() __sti()
112183906Skmacy#define save_flags(x) __save_flags(x)
113183906Skmacy#define restore_flags(x) __restore_flags(x)
114183906Skmacy#define save_and_cli(x) __save_and_cli(x)
115183906Skmacy
116183906Skmacy#define local_irq_save(x)       __save_and_cli(x)
117183906Skmacy#define local_irq_restore(x)    __restore_flags(x)
118183906Skmacy#define local_irq_disable()     __cli()
119183906Skmacy#define local_irq_enable()      __sti()
120183906Skmacy
121183906Skmacy#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
122183906Skmacy#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
123183906Skmacy#define spin_lock_irqsave mtx_lock_irqsave
124183906Skmacy#define spin_unlock_irqrestore mtx_unlock_irqrestore
125183906Skmacy
126185386Sdfr#else
127185386Sdfr#endif
128183906Skmacy
129183906Skmacy#ifndef mb
130185386Sdfr#define mb() __asm__ __volatile__("mfence":::"memory")
131183906Skmacy#endif
132183906Skmacy#ifndef rmb
133185386Sdfr#define rmb() __asm__ __volatile__("lfence":::"memory");
134183906Skmacy#endif
135183906Skmacy#ifndef wmb
136183906Skmacy#define wmb() barrier()
137183906Skmacy#endif
138183906Skmacy#ifdef SMP
139183906Skmacy#define smp_mb() mb()
140183906Skmacy#define smp_rmb() rmb()
141183906Skmacy#define smp_wmb() wmb()
142183906Skmacy#define smp_read_barrier_depends()      read_barrier_depends()
143183906Skmacy#define set_mb(var, value) do { xchg(&var, value); } while (0)
144183906Skmacy#else
145183906Skmacy#define smp_mb()        barrier()
146183906Skmacy#define smp_rmb()       barrier()
147183906Skmacy#define smp_wmb()       barrier()
148183906Skmacy#define smp_read_barrier_depends()      do { } while(0)
149183906Skmacy#define set_mb(var, value) do { var = value; barrier(); } while (0)
150183906Skmacy#endif
151183906Skmacy
152183906Skmacy
153183906Skmacy/* This is a barrier for the compiler only, NOT the processor! */
154183906Skmacy#define barrier() __asm__ __volatile__("": : :"memory")
155183906Skmacy
156183906Skmacy#define LOCK_PREFIX ""
157183906Skmacy#define LOCK ""
158183906Skmacy#define ADDR (*(volatile long *) addr)
159183906Skmacy/*
160183906Skmacy * Make sure gcc doesn't try to be clever and move things around
161183906Skmacy * on us. We need to use _exactly_ the address the user gave us,
162183906Skmacy * not some alias that contains the same information.
163183906Skmacy */
164183906Skmacytypedef struct { volatile int counter; } atomic_t;
165183906Skmacy
166183906Skmacy
167183906Skmacy
168183906Skmacy#define xen_xchg(ptr,v) \
169183906Skmacy        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
170183906Skmacystruct __xchg_dummy { unsigned long a[100]; };
171183906Skmacy#define __xg(x) ((volatile struct __xchg_dummy *)(x))
172183906Skmacystatic __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
173183906Skmacy                                   int size)
174183906Skmacy{
175183906Skmacy    switch (size) {
176183906Skmacy    case 1:
177183906Skmacy        __asm__ __volatile__("xchgb %b0,%1"
178183906Skmacy                             :"=q" (x)
179183906Skmacy                             :"m" (*__xg(ptr)), "0" (x)
180183906Skmacy                             :"memory");
181183906Skmacy        break;
182183906Skmacy    case 2:
183183906Skmacy        __asm__ __volatile__("xchgw %w0,%1"
184183906Skmacy                             :"=r" (x)
185183906Skmacy                             :"m" (*__xg(ptr)), "0" (x)
186183906Skmacy                             :"memory");
187183906Skmacy        break;
188183906Skmacy    case 4:
189183906Skmacy        __asm__ __volatile__("xchgl %0,%1"
190183906Skmacy                             :"=r" (x)
191183906Skmacy                             :"m" (*__xg(ptr)), "0" (x)
192183906Skmacy                             :"memory");
193183906Skmacy        break;
194183906Skmacy    }
195183906Skmacy    return x;
196183906Skmacy}
197183906Skmacy
198183906Skmacy/**
199183906Skmacy * test_and_clear_bit - Clear a bit and return its old value
200183906Skmacy * @nr: Bit to set
201183906Skmacy * @addr: Address to count from
202183906Skmacy *
203183906Skmacy * This operation is atomic and cannot be reordered.
204183906Skmacy * It also implies a memory barrier.
205183906Skmacy */
206183906Skmacystatic __inline int test_and_clear_bit(int nr, volatile void * addr)
207183906Skmacy{
208183906Skmacy        int oldbit;
209183906Skmacy
210183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
211183906Skmacy                "btrl %2,%1\n\tsbbl %0,%0"
212183906Skmacy                :"=r" (oldbit),"=m" (ADDR)
213183906Skmacy                :"Ir" (nr) : "memory");
214183906Skmacy        return oldbit;
215183906Skmacy}
216183906Skmacy
217183906Skmacystatic __inline int constant_test_bit(int nr, const volatile void * addr)
218183906Skmacy{
219183906Skmacy    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
220183906Skmacy}
221183906Skmacy
222183906Skmacystatic __inline int variable_test_bit(int nr, volatile void * addr)
223183906Skmacy{
224183906Skmacy    int oldbit;
225183906Skmacy
226183906Skmacy    __asm__ __volatile__(
227183906Skmacy        "btl %2,%1\n\tsbbl %0,%0"
228183906Skmacy        :"=r" (oldbit)
229183906Skmacy        :"m" (ADDR),"Ir" (nr));
230183906Skmacy    return oldbit;
231183906Skmacy}
232183906Skmacy
233183906Skmacy#define test_bit(nr,addr) \
234183906Skmacy(__builtin_constant_p(nr) ? \
235183906Skmacy constant_test_bit((nr),(addr)) : \
236183906Skmacy variable_test_bit((nr),(addr)))
237183906Skmacy
238183906Skmacy
239183906Skmacy/**
240183906Skmacy * set_bit - Atomically set a bit in memory
241183906Skmacy * @nr: the bit to set
242183906Skmacy * @addr: the address to start counting from
243183906Skmacy *
244183906Skmacy * This function is atomic and may not be reordered.  See __set_bit()
245183906Skmacy * if you do not require the atomic guarantees.
246183906Skmacy * Note that @nr may be almost arbitrarily large; this function is not
247183906Skmacy * restricted to acting on a single-word quantity.
248183906Skmacy */
249183906Skmacystatic __inline__ void set_bit(int nr, volatile void * addr)
250183906Skmacy{
251183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
252183906Skmacy                "btsl %1,%0"
253183906Skmacy                :"=m" (ADDR)
254183906Skmacy                :"Ir" (nr));
255183906Skmacy}
256183906Skmacy
257183906Skmacy/**
258183906Skmacy * clear_bit - Clears a bit in memory
259183906Skmacy * @nr: Bit to clear
260183906Skmacy * @addr: Address to start counting from
261183906Skmacy *
262183906Skmacy * clear_bit() is atomic and may not be reordered.  However, it does
263183906Skmacy * not contain a memory barrier, so if it is used for locking purposes,
264183906Skmacy * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
265183906Skmacy * in order to ensure changes are visible on other processors.
266183906Skmacy */
267183906Skmacystatic __inline__ void clear_bit(int nr, volatile void * addr)
268183906Skmacy{
269183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
270183906Skmacy                "btrl %1,%0"
271183906Skmacy                :"=m" (ADDR)
272183906Skmacy                :"Ir" (nr));
273183906Skmacy}
274183906Skmacy
275183906Skmacy/**
276183906Skmacy * atomic_inc - increment atomic variable
277183906Skmacy * @v: pointer of type atomic_t
278183906Skmacy *
279183906Skmacy * Atomically increments @v by 1.  Note that the guaranteed
280183906Skmacy * useful range of an atomic_t is only 24 bits.
281183906Skmacy */
282183906Skmacystatic __inline__ void atomic_inc(atomic_t *v)
283183906Skmacy{
284183906Skmacy        __asm__ __volatile__(
285183906Skmacy                LOCK "incl %0"
286183906Skmacy                :"=m" (v->counter)
287183906Skmacy                :"m" (v->counter));
288183906Skmacy}
289183906Skmacy
290183906Skmacy
291183906Skmacy#define rdtscll(val) \
292183906Skmacy     __asm__ __volatile__("rdtsc" : "=A" (val))
293183906Skmacy
294183906Skmacy#endif /* !__ASSEMBLY__ */
295183906Skmacy
296183906Skmacy#endif /* _OS_H_ */
297