xen-os.h revision 184235
1183906Skmacy/******************************************************************************
2183906Skmacy * os.h
3183906Skmacy *
4183906Skmacy * random collection of macros and definition
5183906Skmacy */
6183906Skmacy
7183906Skmacy#ifndef _XEN_OS_H_
8183906Skmacy#define _XEN_OS_H_
9183906Skmacy#include <machine/param.h>
10183906Skmacy#ifdef PAE
11183906Skmacy#define CONFIG_X86_PAE
12183906Skmacy#endif
13183906Skmacy
14183906Skmacy#if defined(XEN) && !defined(__XEN_INTERFACE_VERSION__)
15183906Skmacy/*
16183906Skmacy * Can update to a more recent version when we implement
17183906Skmacy * the hypercall page
18183906Skmacy */
19183906Skmacy#define  __XEN_INTERFACE_VERSION__ 0x00030204
20183906Skmacy#endif
21183906Skmacy
22183906Skmacy#include <xen/interface/xen.h>
23183906Skmacy
24183906Skmacy/* Force a proper event-channel callback from Xen. */
25183906Skmacyvoid force_evtchn_callback(void);
26183906Skmacy
27183906Skmacy#ifndef vtophys
28183906Skmacy#include <vm/vm.h>
29183906Skmacy#include <vm/vm_param.h>
30183906Skmacy#include <vm/pmap.h>
31183906Skmacy#endif
32183906Skmacy
33183927Skmacyextern int gdtset;
34183906Skmacy#ifdef SMP
35183906Skmacy#include <sys/time.h> /* XXX for pcpu.h */
36183906Skmacy#include <sys/pcpu.h> /* XXX for PCPU_GET */
37183906Skmacystatic inline int
38183906Skmacysmp_processor_id(void)
39183906Skmacy{
40183962Skmacy    if (__predict_true(gdtset))
41183906Skmacy	return PCPU_GET(cpuid);
42183906Skmacy    return 0;
43183906Skmacy}
44183906Skmacy
45183906Skmacy#else
46183906Skmacy#define smp_processor_id() 0
47183906Skmacy#endif
48183906Skmacy
49183906Skmacy#ifndef NULL
50183906Skmacy#define NULL (void *)0
51183906Skmacy#endif
52183906Skmacy
53183906Skmacy#ifndef PANIC_IF
54183906Skmacy#define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
55183906Skmacy#endif
56183906Skmacy
57183906Skmacyextern shared_info_t *HYPERVISOR_shared_info;
58183906Skmacy
59183906Skmacy/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
60183906Skmacy   a mechanism by which the user can annotate likely branch directions and
61183906Skmacy   expect the blocks to be reordered appropriately.  Define __builtin_expect
62183906Skmacy   to nothing for earlier compilers.  */
63183906Skmacy
64183906Skmacy/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
65183906Skmacystatic inline void rep_nop(void)
66183906Skmacy{
67183906Skmacy    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
68183906Skmacy}
69183906Skmacy#define cpu_relax() rep_nop()
70183906Skmacy
71183906Skmacy
72183906Skmacy#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
73183906Skmacy#define __builtin_expect(x, expected_value) (x)
74183906Skmacy#endif
75183906Skmacy
76184235Skmacy#define per_cpu(var, cpu)           (pcpu_find((cpu))->pc_ ## var)
77183906Skmacy
78183906Skmacy/* crude memory allocator for memory allocation early in
79183906Skmacy *  boot
80183906Skmacy */
81183906Skmacyvoid *bootmem_alloc(unsigned int size);
82183906Skmacyvoid bootmem_free(void *ptr, unsigned int size);
83183906Skmacy
84183906Skmacy
85183906Skmacy/* Everything below this point is not included by assembler (.S) files. */
86183906Skmacy#ifndef __ASSEMBLY__
87183906Skmacy#include <sys/types.h>
88183906Skmacy
89183906Skmacyvoid printk(const char *fmt, ...);
90183906Skmacy
91183906Skmacy/* some function prototypes */
92183906Skmacyvoid trap_init(void);
93183906Skmacy
94183906Skmacy/*
95183906Skmacy * STI/CLI equivalents. These basically set and clear the virtual
96183906Skmacy * event_enable flag in teh shared_info structure. Note that when
97183906Skmacy * the enable bit is set, there may be pending events to be handled.
98183906Skmacy * We may therefore call into do_hypervisor_callback() directly.
99183906Skmacy */
100183906Skmacy#define likely(x)  __builtin_expect((x),1)
101183906Skmacy#define unlikely(x)  __builtin_expect((x),0)
102183906Skmacy
103183906Skmacy
104183906Skmacy
105183906Skmacy#define __cli()                                                         \
106183906Skmacydo {                                                                    \
107183906Skmacy        vcpu_info_t *_vcpu;                                             \
108183906Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
109183906Skmacy        _vcpu->evtchn_upcall_mask = 1;                                  \
110183906Skmacy        barrier();                                                      \
111183906Skmacy} while (0)
112183906Skmacy
113183906Skmacy#define __sti()                                                         \
114183906Skmacydo {                                                                    \
115183906Skmacy        vcpu_info_t *_vcpu;                                             \
116183906Skmacy        barrier();                                                      \
117183906Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
118183906Skmacy        _vcpu->evtchn_upcall_mask = 0;                                  \
119183906Skmacy        barrier(); /* unmask then check (avoid races) */                \
120183906Skmacy        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
121183906Skmacy                force_evtchn_callback();                                \
122183906Skmacy} while (0)
123183906Skmacy
124183906Skmacy#define __restore_flags(x)                                              \
125183906Skmacydo {                                                                    \
126183906Skmacy        vcpu_info_t *_vcpu;                                             \
127183906Skmacy        barrier();                                                      \
128183906Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
129183906Skmacy        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
130183906Skmacy                barrier(); /* unmask then check (avoid races) */        \
131183906Skmacy                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
132183906Skmacy                        force_evtchn_callback();                        \
133184235Skmacy        } 								\
134183906Skmacy} while (0)
135183906Skmacy
136183906Skmacy/*
137183906Skmacy * Add critical_{enter, exit}?
138183906Skmacy *
139183906Skmacy */
140183906Skmacy#define __save_and_cli(x)                                               \
141183906Skmacydo {                                                                    \
142183906Skmacy        vcpu_info_t *_vcpu;                                             \
143183906Skmacy        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
144183906Skmacy        (x) = _vcpu->evtchn_upcall_mask;                                \
145183906Skmacy        _vcpu->evtchn_upcall_mask = 1;                                  \
146183906Skmacy        barrier();                                                      \
147183906Skmacy} while (0)
148183906Skmacy
149183906Skmacy
150183906Skmacy#define cli() __cli()
151183906Skmacy#define sti() __sti()
152183906Skmacy#define save_flags(x) __save_flags(x)
153183906Skmacy#define restore_flags(x) __restore_flags(x)
154183906Skmacy#define save_and_cli(x) __save_and_cli(x)
155183906Skmacy
156183906Skmacy#define local_irq_save(x)       __save_and_cli(x)
157183906Skmacy#define local_irq_restore(x)    __restore_flags(x)
158183906Skmacy#define local_irq_disable()     __cli()
159183906Skmacy#define local_irq_enable()      __sti()
160183906Skmacy
161183906Skmacy#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
162183906Skmacy#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
163183906Skmacy#define spin_lock_irqsave mtx_lock_irqsave
164183906Skmacy#define spin_unlock_irqrestore mtx_unlock_irqrestore
165183906Skmacy
166183906Skmacy
167183906Skmacy#ifndef mb
168183906Skmacy#define mb() __asm__ __volatile__("lock; addl $0, 0(%%esp)": : :"memory")
169183906Skmacy#endif
170183906Skmacy#ifndef rmb
171183906Skmacy#define rmb() mb()
172183906Skmacy#endif
173183906Skmacy#ifndef wmb
174183906Skmacy#define wmb() barrier()
175183906Skmacy#endif
176183906Skmacy#ifdef SMP
177183906Skmacy#define smp_mb() mb()
178183906Skmacy#define smp_rmb() rmb()
179183906Skmacy#define smp_wmb() wmb()
180183906Skmacy#define smp_read_barrier_depends()      read_barrier_depends()
181183906Skmacy#define set_mb(var, value) do { xchg(&var, value); } while (0)
182183906Skmacy#else
183183906Skmacy#define smp_mb()        barrier()
184183906Skmacy#define smp_rmb()       barrier()
185183906Skmacy#define smp_wmb()       barrier()
186183906Skmacy#define smp_read_barrier_depends()      do { } while(0)
187183906Skmacy#define set_mb(var, value) do { var = value; barrier(); } while (0)
188183906Skmacy#endif
189183906Skmacy
190183906Skmacy
191183906Skmacy/* This is a barrier for the compiler only, NOT the processor! */
192183906Skmacy#define barrier() __asm__ __volatile__("": : :"memory")
193183906Skmacy
194183906Skmacy#define LOCK_PREFIX ""
195183906Skmacy#define LOCK ""
196183906Skmacy#define ADDR (*(volatile long *) addr)
197183906Skmacy/*
198183906Skmacy * Make sure gcc doesn't try to be clever and move things around
199183906Skmacy * on us. We need to use _exactly_ the address the user gave us,
200183906Skmacy * not some alias that contains the same information.
201183906Skmacy */
202183906Skmacytypedef struct { volatile int counter; } atomic_t;
203183906Skmacy
204183906Skmacy
205183906Skmacy
206183906Skmacy#define xen_xchg(ptr,v) \
207183906Skmacy        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
208183906Skmacystruct __xchg_dummy { unsigned long a[100]; };
209183906Skmacy#define __xg(x) ((volatile struct __xchg_dummy *)(x))
210183906Skmacystatic __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
211183906Skmacy                                   int size)
212183906Skmacy{
213183906Skmacy    switch (size) {
214183906Skmacy    case 1:
215183906Skmacy        __asm__ __volatile__("xchgb %b0,%1"
216183906Skmacy                             :"=q" (x)
217183906Skmacy                             :"m" (*__xg(ptr)), "0" (x)
218183906Skmacy                             :"memory");
219183906Skmacy        break;
220183906Skmacy    case 2:
221183906Skmacy        __asm__ __volatile__("xchgw %w0,%1"
222183906Skmacy                             :"=r" (x)
223183906Skmacy                             :"m" (*__xg(ptr)), "0" (x)
224183906Skmacy                             :"memory");
225183906Skmacy        break;
226183906Skmacy    case 4:
227183906Skmacy        __asm__ __volatile__("xchgl %0,%1"
228183906Skmacy                             :"=r" (x)
229183906Skmacy                             :"m" (*__xg(ptr)), "0" (x)
230183906Skmacy                             :"memory");
231183906Skmacy        break;
232183906Skmacy    }
233183906Skmacy    return x;
234183906Skmacy}
235183906Skmacy
236183906Skmacy/**
237183906Skmacy * test_and_clear_bit - Clear a bit and return its old value
238183906Skmacy * @nr: Bit to set
239183906Skmacy * @addr: Address to count from
240183906Skmacy *
241183906Skmacy * This operation is atomic and cannot be reordered.
242183906Skmacy * It also implies a memory barrier.
243183906Skmacy */
244183906Skmacystatic __inline int test_and_clear_bit(int nr, volatile void * addr)
245183906Skmacy{
246183906Skmacy        int oldbit;
247183906Skmacy
248183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
249183906Skmacy                "btrl %2,%1\n\tsbbl %0,%0"
250183906Skmacy                :"=r" (oldbit),"=m" (ADDR)
251183906Skmacy                :"Ir" (nr) : "memory");
252183906Skmacy        return oldbit;
253183906Skmacy}
254183906Skmacy
255183906Skmacystatic __inline int constant_test_bit(int nr, const volatile void * addr)
256183906Skmacy{
257183906Skmacy    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
258183906Skmacy}
259183906Skmacy
260183906Skmacystatic __inline int variable_test_bit(int nr, volatile void * addr)
261183906Skmacy{
262183906Skmacy    int oldbit;
263183906Skmacy
264183906Skmacy    __asm__ __volatile__(
265183906Skmacy        "btl %2,%1\n\tsbbl %0,%0"
266183906Skmacy        :"=r" (oldbit)
267183906Skmacy        :"m" (ADDR),"Ir" (nr));
268183906Skmacy    return oldbit;
269183906Skmacy}
270183906Skmacy
271183906Skmacy#define test_bit(nr,addr) \
272183906Skmacy(__builtin_constant_p(nr) ? \
273183906Skmacy constant_test_bit((nr),(addr)) : \
274183906Skmacy variable_test_bit((nr),(addr)))
275183906Skmacy
276183906Skmacy
277183906Skmacy/**
278183906Skmacy * set_bit - Atomically set a bit in memory
279183906Skmacy * @nr: the bit to set
280183906Skmacy * @addr: the address to start counting from
281183906Skmacy *
282183906Skmacy * This function is atomic and may not be reordered.  See __set_bit()
283183906Skmacy * if you do not require the atomic guarantees.
284183906Skmacy * Note that @nr may be almost arbitrarily large; this function is not
285183906Skmacy * restricted to acting on a single-word quantity.
286183906Skmacy */
287183906Skmacystatic __inline__ void set_bit(int nr, volatile void * addr)
288183906Skmacy{
289183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
290183906Skmacy                "btsl %1,%0"
291183906Skmacy                :"=m" (ADDR)
292183906Skmacy                :"Ir" (nr));
293183906Skmacy}
294183906Skmacy
295183906Skmacy/**
296183906Skmacy * clear_bit - Clears a bit in memory
297183906Skmacy * @nr: Bit to clear
298183906Skmacy * @addr: Address to start counting from
299183906Skmacy *
300183906Skmacy * clear_bit() is atomic and may not be reordered.  However, it does
301183906Skmacy * not contain a memory barrier, so if it is used for locking purposes,
302183906Skmacy * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
303183906Skmacy * in order to ensure changes are visible on other processors.
304183906Skmacy */
305183906Skmacystatic __inline__ void clear_bit(int nr, volatile void * addr)
306183906Skmacy{
307183906Skmacy        __asm__ __volatile__( LOCK_PREFIX
308183906Skmacy                "btrl %1,%0"
309183906Skmacy                :"=m" (ADDR)
310183906Skmacy                :"Ir" (nr));
311183906Skmacy}
312183906Skmacy
313183906Skmacy/**
314183906Skmacy * atomic_inc - increment atomic variable
315183906Skmacy * @v: pointer of type atomic_t
316183906Skmacy *
317183906Skmacy * Atomically increments @v by 1.  Note that the guaranteed
318183906Skmacy * useful range of an atomic_t is only 24 bits.
319183906Skmacy */
320183906Skmacystatic __inline__ void atomic_inc(atomic_t *v)
321183906Skmacy{
322183906Skmacy        __asm__ __volatile__(
323183906Skmacy                LOCK "incl %0"
324183906Skmacy                :"=m" (v->counter)
325183906Skmacy                :"m" (v->counter));
326183906Skmacy}
327183906Skmacy
328183906Skmacy
329183906Skmacy#define rdtscll(val) \
330183906Skmacy     __asm__ __volatile__("rdtsc" : "=A" (val))
331183906Skmacy
332183906Skmacy
333183906Skmacy
334183906Skmacy/*
335183906Skmacy * Kernel pointers have redundant information, so we can use a
336183906Skmacy * scheme where we can return either an error code or a dentry
337183906Skmacy * pointer with the same return value.
338183906Skmacy *
339183906Skmacy * This should be a per-architecture thing, to allow different
340183906Skmacy * error and pointer decisions.
341183906Skmacy */
342183906Skmacy#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
343183906Skmacy
344183906Skmacystatic inline void *ERR_PTR(long error)
345183906Skmacy{
346183906Skmacy	return (void *) error;
347183906Skmacy}
348183906Skmacy
349183906Skmacystatic inline long PTR_ERR(const void *ptr)
350183906Skmacy{
351183906Skmacy	return (long) ptr;
352183906Skmacy}
353183906Skmacy
354183906Skmacystatic inline long IS_ERR(const void *ptr)
355183906Skmacy{
356183906Skmacy	return IS_ERR_VALUE((unsigned long)ptr);
357183906Skmacy}
358183906Skmacy
359183906Skmacy#endif /* !__ASSEMBLY__ */
360183906Skmacy
361183906Skmacy#endif /* _OS_H_ */
362