1290650Shselasky/******************************************************************************
2347819Shselasky * os.h
3290650Shselasky *
4290650Shselasky * random collection of macros and definition
5290650Shselasky */
6290650Shselasky
7290650Shselasky#ifndef _XEN_OS_H_
8290650Shselasky#define _XEN_OS_H_
9290650Shselasky#include <machine/param.h>
10290650Shselasky
11290650Shselasky#ifdef PAE
12290650Shselasky#define CONFIG_X86_PAE
13290650Shselasky#endif
14290650Shselasky
15290650Shselasky#if !defined(__XEN_INTERFACE_VERSION__)
16290650Shselasky/*
17290650Shselasky * Can update to a more recent version when we implement
18290650Shselasky * the hypercall page
19290650Shselasky */
20290650Shselasky#define  __XEN_INTERFACE_VERSION__ 0x00030204
21290650Shselasky#endif
22290650Shselasky
23290650Shselasky#include <xen/interface/xen.h>
24290650Shselasky
25290650Shselasky/* Force a proper event-channel callback from Xen. */
26290650Shselaskyvoid force_evtchn_callback(void);
27290650Shselasky
28290650Shselasky#define likely(x)  __builtin_expect((x),1)
29290650Shselasky#define unlikely(x)  __builtin_expect((x),0)
30290650Shselasky
31290650Shselasky#ifndef vtophys
32290650Shselasky#include <vm/vm.h>
33290650Shselasky#include <vm/vm_param.h>
34290650Shselasky#include <vm/pmap.h>
35290650Shselasky#endif
36347802Shselasky
37290650Shselaskyextern int gdtset;
38290650Shselasky#ifdef SMP
39290650Shselasky#include <sys/time.h> /* XXX for pcpu.h */
40290650Shselasky#include <sys/pcpu.h> /* XXX for PCPU_GET */
41353197Shselaskystatic inline int
42290650Shselaskysmp_processor_id(void)
43290650Shselasky{
44341958Shselasky    if (likely(gdtset))
45341958Shselasky	return PCPU_GET(cpuid);
46290650Shselasky    return 0;
47329200Shselasky}
48290650Shselasky
49341948Shselasky#else
50341948Shselasky#define smp_processor_id() 0
51290650Shselasky#endif
52290650Shselasky
53290650Shselasky#ifndef NULL
54290650Shselasky#define NULL (void *)0
55347839Shselasky#endif
56347847Shselasky
57290650Shselasky#ifndef PANIC_IF
58290650Shselasky#define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
59347835Shselasky#endif
60347835Shselasky
61290650Shselaskyextern shared_info_t *HYPERVISOR_shared_info;
62347835Shselasky
63347835Shselasky/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
64347835Shselasky   a mechanism by which the user can annotate likely branch directions and
65290650Shselasky   expect the blocks to be reordered appropriately.  Define __builtin_expect
66290650Shselasky   to nothing for earlier compilers.  */
67347835Shselasky
68347835Shselasky/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
69347835Shselaskystatic inline void rep_nop(void)
70347835Shselasky{
71290650Shselasky    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
72347819Shselasky}
73347819Shselasky#define cpu_relax() rep_nop()
74347819Shselasky
75347819Shselasky
76347819Shselasky#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
77290650Shselasky#define __builtin_expect(x, expected_value) (x)
78290650Shselasky#endif
79290650Shselasky
80290650Shselasky#define per_cpu(var, cpu)           (pcpu_find((cpu))->pc_ ## var)
81290650Shselasky
82290650Shselasky/* crude memory allocator for memory allocation early in
83290650Shselasky *  boot
84290650Shselasky */
85290650Shselaskyvoid *bootmem_alloc(unsigned int size);
86290650Shselaskyvoid bootmem_free(void *ptr, unsigned int size);
87290650Shselasky
88290650Shselasky
89329209Shselasky/* Everything below this point is not included by assembler (.S) files. */
90329209Shselasky#ifndef __ASSEMBLY__
91329209Shselasky#include <sys/types.h>
92329209Shselasky
93329209Shselaskyvoid printk(const char *fmt, ...);
94290650Shselasky
95290650Shselasky/* some function prototypes */
96290650Shselaskyvoid trap_init(void);
97290650Shselasky
98290650Shselasky#ifndef XENHVM
99290650Shselasky
100290650Shselasky/*
101290650Shselasky * STI/CLI equivalents. These basically set and clear the virtual
102290650Shselasky * event_enable flag in the shared_info structure. Note that when
103290650Shselasky * the enable bit is set, there may be pending events to be handled.
104290650Shselasky * We may therefore call into do_hypervisor_callback() directly.
105290650Shselasky */
106290650Shselasky
107290650Shselasky
108290650Shselasky#define __cli()                                                         \
109290650Shselaskydo {                                                                    \
110290650Shselasky        vcpu_info_t *_vcpu;                                             \
111290650Shselasky        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
112290650Shselasky        _vcpu->evtchn_upcall_mask = 1;                                  \
113290650Shselasky        barrier();                                                      \
114290650Shselasky} while (0)
115290650Shselasky
116290650Shselasky#define __sti()                                                         \
117290650Shselaskydo {                                                                    \
118290650Shselasky        vcpu_info_t *_vcpu;                                             \
119290650Shselasky        barrier();                                                      \
120290650Shselasky        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
121290650Shselasky        _vcpu->evtchn_upcall_mask = 0;                                  \
122290650Shselasky        barrier(); /* unmask then check (avoid races) */                \
123290650Shselasky        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
124290650Shselasky                force_evtchn_callback();                                \
125290650Shselasky} while (0)
126290650Shselasky
127290650Shselasky#define __restore_flags(x)                                              \
128290650Shselaskydo {                                                                    \
129290650Shselasky        vcpu_info_t *_vcpu;                                             \
130290650Shselasky        barrier();                                                      \
131290650Shselasky        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
132290650Shselasky        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
133290650Shselasky                barrier(); /* unmask then check (avoid races) */        \
134290650Shselasky                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
135290650Shselasky                        force_evtchn_callback();                        \
136290650Shselasky        } 								\
137290650Shselasky} while (0)
138290650Shselasky
139290650Shselasky/*
140290650Shselasky * Add critical_{enter, exit}?
141290650Shselasky *
142290650Shselasky */
143290650Shselasky#define __save_and_cli(x)                                               \
144290650Shselaskydo {                                                                    \
145290650Shselasky        vcpu_info_t *_vcpu;                                             \
146290650Shselasky        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
147290650Shselasky        (x) = _vcpu->evtchn_upcall_mask;                                \
148290650Shselasky        _vcpu->evtchn_upcall_mask = 1;                                  \
149290650Shselasky        barrier();                                                      \
150290650Shselasky} while (0)
151290650Shselasky
152290650Shselasky
153290650Shselasky#define cli() __cli()
154290650Shselasky#define sti() __sti()
155290650Shselasky#define save_flags(x) __save_flags(x)
156290650Shselasky#define restore_flags(x) __restore_flags(x)
157290650Shselasky#define save_and_cli(x) __save_and_cli(x)
158290650Shselasky
159290650Shselasky#define local_irq_save(x)       __save_and_cli(x)
160290650Shselasky#define local_irq_restore(x)    __restore_flags(x)
161290650Shselasky#define local_irq_disable()     __cli()
162290650Shselasky#define local_irq_enable()      __sti()
163290650Shselasky
164290650Shselasky#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
165290650Shselasky#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
166290650Shselasky#define spin_lock_irqsave mtx_lock_irqsave
167290650Shselasky#define spin_unlock_irqrestore mtx_unlock_irqrestore
168290650Shselasky
169290650Shselasky#endif
170290650Shselasky
171290650Shselasky#ifdef SMP
172290650Shselasky#define smp_mb() mb()
173290650Shselasky#define smp_rmb() rmb()
174290650Shselasky#define smp_wmb() wmb()
175290650Shselasky#define smp_read_barrier_depends()      read_barrier_depends()
176290650Shselasky#define set_mb(var, value) do { xchg(&var, value); } while (0)
177290650Shselasky#else
178290650Shselasky#define smp_mb()        barrier()
179290650Shselasky#define smp_rmb()       barrier()
180290650Shselasky#define smp_wmb()       barrier()
181290650Shselasky#define smp_read_barrier_depends()      do { } while(0)
182290650Shselasky#define set_mb(var, value) do { var = value; barrier(); } while (0)
183290650Shselasky#endif
184290650Shselasky
185290650Shselasky
186290650Shselasky/* This is a barrier for the compiler only, NOT the processor! */
187290650Shselasky#define barrier() __asm__ __volatile__("": : :"memory")
188290650Shselasky
189290650Shselasky#define LOCK_PREFIX ""
190290650Shselasky#define LOCK ""
191290650Shselasky#define ADDR (*(volatile long *) addr)
192290650Shselasky/*
193290650Shselasky * Make sure gcc doesn't try to be clever and move things around
194290650Shselasky * on us. We need to use _exactly_ the address the user gave us,
195290650Shselasky * not some alias that contains the same information.
196290650Shselasky */
197290650Shselaskytypedef struct { volatile int counter; } atomic_t;
198290650Shselasky
199290650Shselasky
200290650Shselasky
201347862Shselasky#define xen_xchg(ptr,v) \
202347862Shselasky        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
203347862Shselaskystruct __xchg_dummy { unsigned long a[100]; };
204347862Shselasky#define __xg(x) ((volatile struct __xchg_dummy *)(x))
205347862Shselaskystatic __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
206347862Shselasky                                   int size)
207347862Shselasky{
208347862Shselasky    switch (size) {
209347862Shselasky    case 1:
210347862Shselasky        __asm__ __volatile__("xchgb %b0,%1"
211347862Shselasky                             :"=q" (x)
212347862Shselasky                             :"m" (*__xg(ptr)), "0" (x)
213347862Shselasky                             :"memory");
214347862Shselasky        break;
215347862Shselasky    case 2:
216331580Shselasky        __asm__ __volatile__("xchgw %w0,%1"
217331580Shselasky                             :"=r" (x)
218331580Shselasky                             :"m" (*__xg(ptr)), "0" (x)
219331580Shselasky                             :"memory");
220331580Shselasky        break;
221331580Shselasky    case 4:
222331580Shselasky        __asm__ __volatile__("xchgl %0,%1"
223331580Shselasky                             :"=r" (x)
224331580Shselasky                             :"m" (*__xg(ptr)), "0" (x)
225331580Shselasky                             :"memory");
226331580Shselasky        break;
227331580Shselasky    }
228331580Shselasky    return x;
229331580Shselasky}
230331580Shselasky
231331580Shselasky/**
232331580Shselasky * test_and_clear_bit - Clear a bit and return its old value
233331580Shselasky * @nr: Bit to set
234331580Shselasky * @addr: Address to count from
235331580Shselasky *
236331580Shselasky * This operation is atomic and cannot be reordered.
237331580Shselasky * It also implies a memory barrier.
238331580Shselasky */
239331580Shselaskystatic __inline int test_and_clear_bit(int nr, volatile void * addr)
240331580Shselasky{
241331580Shselasky        int oldbit;
242331580Shselasky
243331580Shselasky        __asm__ __volatile__( LOCK_PREFIX
244290650Shselasky                "btrl %2,%1\n\tsbbl %0,%0"
245290650Shselasky                :"=r" (oldbit),"=m" (ADDR)
246290650Shselasky                :"Ir" (nr) : "memory");
247290650Shselasky        return oldbit;
248290650Shselasky}
249290650Shselasky
250290650Shselaskystatic __inline int constant_test_bit(int nr, const volatile void * addr)
251290650Shselasky{
252290650Shselasky    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
253290650Shselasky}
254290650Shselasky
255290650Shselaskystatic __inline int variable_test_bit(int nr, volatile void * addr)
256290650Shselasky{
257290650Shselasky    int oldbit;
258290650Shselasky
259290650Shselasky    __asm__ __volatile__(
260290650Shselasky        "btl %2,%1\n\tsbbl %0,%0"
261290650Shselasky        :"=r" (oldbit)
262290650Shselasky        :"m" (ADDR),"Ir" (nr));
263290650Shselasky    return oldbit;
264290650Shselasky}
265290650Shselasky
266290650Shselasky#define test_bit(nr,addr) \
267290650Shselasky(__builtin_constant_p(nr) ? \
268290650Shselasky constant_test_bit((nr),(addr)) : \
269290650Shselasky variable_test_bit((nr),(addr)))
270338554Shselasky
271337112Shselasky
272290650Shselasky/**
273290650Shselasky * set_bit - Atomically set a bit in memory
274337112Shselasky * @nr: the bit to set
275337112Shselasky * @addr: the address to start counting from
276337112Shselasky *
277337112Shselasky * This function is atomic and may not be reordered.  See __set_bit()
278337112Shselasky * if you do not require the atomic guarantees.
279353189Shselasky * Note that @nr may be almost arbitrarily large; this function is not
280353189Shselasky * restricted to acting on a single-word quantity.
281353189Shselasky */
282353189Shselaskystatic __inline__ void set_bit(int nr, volatile void * addr)
283290650Shselasky{
284290650Shselasky        __asm__ __volatile__( LOCK_PREFIX
285290650Shselasky                "btsl %1,%0"
286290650Shselasky                :"=m" (ADDR)
287290650Shselasky                :"Ir" (nr));
288290650Shselasky}
289290650Shselasky
290290650Shselasky/**
291290650Shselasky * clear_bit - Clears a bit in memory
292290650Shselasky * @nr: Bit to clear
293290650Shselasky * @addr: Address to start counting from
294290650Shselasky *
295290650Shselasky * clear_bit() is atomic and may not be reordered.  However, it does
296290650Shselasky * not contain a memory barrier, so if it is used for locking purposes,
297290650Shselasky * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
298290650Shselasky * in order to ensure changes are visible on other processors.
299290650Shselasky */
300290650Shselaskystatic __inline__ void clear_bit(int nr, volatile void * addr)
301290650Shselasky{
302290650Shselasky        __asm__ __volatile__( LOCK_PREFIX
303290650Shselasky                "btrl %1,%0"
304290650Shselasky                :"=m" (ADDR)
305290650Shselasky                :"Ir" (nr));
306290650Shselasky}
307290650Shselasky
308290650Shselasky/**
309290650Shselasky * atomic_inc - increment atomic variable
310290650Shselasky * @v: pointer of type atomic_t
311290650Shselasky *
312290650Shselasky * Atomically increments @v by 1.  Note that the guaranteed
313290650Shselasky * useful range of an atomic_t is only 24 bits.
314290650Shselasky */
315290650Shselaskystatic __inline__ void atomic_inc(atomic_t *v)
316290650Shselasky{
317290650Shselasky        __asm__ __volatile__(
318306233Shselasky                LOCK "incl %0"
319306233Shselasky                :"=m" (v->counter)
320290650Shselasky                :"m" (v->counter));
321290650Shselasky}
322290650Shselasky
323290650Shselasky
324290650Shselasky#define rdtscll(val) \
325290650Shselasky     __asm__ __volatile__("rdtsc" : "=A" (val))
326290650Shselasky
327290650Shselasky
328290650Shselasky
329290650Shselasky/*
330290650Shselasky * Kernel pointers have redundant information, so we can use a
331290650Shselasky * scheme where we can return either an error code or a dentry
332290650Shselasky * pointer with the same return value.
333290650Shselasky *
334290650Shselasky * This should be a per-architecture thing, to allow different
335290650Shselasky * error and pointer decisions.
336290650Shselasky */
337290650Shselasky#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
338290650Shselasky
339290650Shselaskystatic inline void *ERR_PTR(long error)
340290650Shselasky{
341290650Shselasky	return (void *) error;
342290650Shselasky}
343331807Shselasky
344331807Shselaskystatic inline long PTR_ERR(const void *ptr)
345331807Shselasky{
346290650Shselasky	return (long) ptr;
347290650Shselasky}
348290650Shselasky
349290650Shselaskystatic inline long IS_ERR(const void *ptr)
350290650Shselasky{
351290650Shselasky	return IS_ERR_VALUE((unsigned long)ptr);
352290650Shselasky}
353290650Shselasky
354290650Shselasky#endif /* !__ASSEMBLY__ */
355290650Shselasky
356290650Shselasky#endif /* _OS_H_ */
357290650Shselasky