xen-os.h revision 183927
154359Sroberto/******************************************************************************
254359Sroberto * os.h
354359Sroberto *
454359Sroberto * random collection of macros and definition
554359Sroberto */
654359Sroberto
754359Sroberto#ifndef _XEN_OS_H_
854359Sroberto#define _XEN_OS_H_
954359Sroberto#include <machine/param.h>
1054359Sroberto#ifdef PAE
1154359Sroberto#define CONFIG_X86_PAE
12182007Sroberto#endif
13182007Sroberto
14182007Sroberto#if defined(XEN) && !defined(__XEN_INTERFACE_VERSION__)
1554359Sroberto/*
1654359Sroberto * Can update to a more recent version when we implement
1782498Sroberto * the hypercall page
1854359Sroberto */
1954359Sroberto#define  __XEN_INTERFACE_VERSION__ 0x00030204
2054359Sroberto#endif
2182498Sroberto
22182007Sroberto#include <xen/interface/xen.h>
2382498Sroberto
2482498Sroberto/* Force a proper event-channel callback from Xen. */
25200576Srobertovoid force_evtchn_callback(void);
26200576Sroberto
27200576Sroberto#ifndef vtophys
28200576Sroberto#include <vm/vm.h>
29200576Sroberto#include <vm/vm_param.h>
30200576Sroberto#include <vm/pmap.h>
3154359Sroberto#endif
3254359Sroberto
3354359Srobertoextern int gdtset;
3454359Sroberto#ifdef SMP
3554359Sroberto#include <sys/time.h> /* XXX for pcpu.h */
3654359Sroberto#include <sys/pcpu.h> /* XXX for PCPU_GET */
3754359Srobertostatic inline int
3854359Srobertosmp_processor_id(void)
3982498Sroberto{
4082498Sroberto    if (likely(gdtset))
4182498Sroberto	return PCPU_GET(cpuid);
4282498Sroberto    return 0;
4382498Sroberto}
4482498Sroberto
4582498Sroberto#else
4682498Sroberto#define smp_processor_id() 0
4782498Sroberto#endif
4882498Sroberto
4982498Sroberto#ifndef NULL
5082498Sroberto#define NULL (void *)0
5182498Sroberto#endif
5282498Sroberto
5382498Sroberto#ifndef PANIC_IF
5482498Sroberto#define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);}
5582498Sroberto#endif
5654359Sroberto
5754359Srobertoextern shared_info_t *HYPERVISOR_shared_info;
5854359Sroberto
5954359Sroberto/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented
6054359Sroberto   a mechanism by which the user can annotate likely branch directions and
6156746Sroberto   expect the blocks to be reordered appropriately.  Define __builtin_expect
6256746Sroberto   to nothing for earlier compilers.  */
6356746Sroberto
6456746Sroberto/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
6556746Srobertostatic inline void rep_nop(void)
6654359Sroberto{
6754359Sroberto    __asm__ __volatile__ ( "rep;nop" : : : "memory" );
6882498Sroberto}
6954359Sroberto#define cpu_relax() rep_nop()
7054359Sroberto
7182498Sroberto
7282498Sroberto#if __GNUC__ == 2 && __GNUC_MINOR__ < 96
7354359Sroberto#define __builtin_expect(x, expected_value) (x)
7454359Sroberto#endif
7554359Sroberto
7654359Sroberto#define DEFINE_PER_CPU(type, name) \
7754359Sroberto    __typeof__(type) per_cpu__##name
7854359Sroberto
7954359Sroberto#define per_cpu(var, cpu)           (*((void)cpu, &per_cpu__##var))
8054359Sroberto
8154359Sroberto/* crude memory allocator for memory allocation early in
8254359Sroberto *  boot
8354359Sroberto */
8454359Srobertovoid *bootmem_alloc(unsigned int size);
8554359Srobertovoid bootmem_free(void *ptr, unsigned int size);
8654359Sroberto
8754359Sroberto
8854359Sroberto/* Everything below this point is not included by assembler (.S) files. */
8954359Sroberto#ifndef __ASSEMBLY__
9082498Sroberto#include <sys/types.h>
9182498Sroberto
9282498Srobertovoid printk(const char *fmt, ...);
9382498Sroberto
9482498Sroberto/* some function prototypes */
9582498Srobertovoid trap_init(void);
9654359Sroberto
9754359Srobertoextern int preemptable;
9854359Sroberto#define preempt_disable() (preemptable = 0)
9954359Sroberto#define preempt_enable() (preemptable = 1)
10054359Sroberto#define preempt_enable_no_resched() (preemptable = 1)
10154359Sroberto
10254359Sroberto
10382498Sroberto/*
10482498Sroberto * STI/CLI equivalents. These basically set and clear the virtual
10582498Sroberto * event_enable flag in teh shared_info structure. Note that when
10682498Sroberto * the enable bit is set, there may be pending events to be handled.
10782498Sroberto * We may therefore call into do_hypervisor_callback() directly.
10882498Sroberto */
10954359Sroberto#define likely(x)  __builtin_expect((x),1)
11054359Sroberto#define unlikely(x)  __builtin_expect((x),0)
11154359Sroberto
11254359Sroberto
11354359Sroberto
11454359Sroberto#define __cli()                                                         \
11554359Srobertodo {                                                                    \
11654359Sroberto        vcpu_info_t *_vcpu;                                             \
11754359Sroberto        preempt_disable();                                              \
11854359Sroberto        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
11954359Sroberto        _vcpu->evtchn_upcall_mask = 1;                                  \
12054359Sroberto        preempt_enable_no_resched();                                    \
12182498Sroberto        barrier();                                                      \
12282498Sroberto} while (0)
12382498Sroberto
12482498Sroberto#define __sti()                                                         \
12582498Srobertodo {                                                                    \
12654359Sroberto        vcpu_info_t *_vcpu;                                             \
12754359Sroberto        barrier();                                                      \
12854359Sroberto        preempt_disable();                                              \
12954359Sroberto        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
13054359Sroberto        _vcpu->evtchn_upcall_mask = 0;                                  \
13154359Sroberto        barrier(); /* unmask then check (avoid races) */                \
13254359Sroberto        if ( unlikely(_vcpu->evtchn_upcall_pending) )                   \
13354359Sroberto                force_evtchn_callback();                                \
13454359Sroberto        preempt_enable();                                               \
13554359Sroberto} while (0)
13654359Sroberto
13754359Sroberto
13854359Sroberto#define __save_flags(x)                                                       \
13954359Srobertodo {                                                                          \
14054359Sroberto    vcpu_info_t *vcpu;                                                        \
14154359Sroberto    vcpu = HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];             \
14254359Sroberto    (x) = _vcpu->evtchn_upcall_mask;                                          \
14354359Sroberto} while (0)
14454359Sroberto
14554359Sroberto#define __restore_flags(x)                                              \
14654359Srobertodo {                                                                    \
14754359Sroberto        vcpu_info_t *_vcpu;                                             \
14854359Sroberto        barrier();                                                      \
14956746Sroberto        preempt_disable();                                              \
150132451Sroberto        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
151182007Sroberto        if ((_vcpu->evtchn_upcall_mask = (x)) == 0) {                   \
152182007Sroberto                barrier(); /* unmask then check (avoid races) */        \
153182007Sroberto                if ( unlikely(_vcpu->evtchn_upcall_pending) )           \
154182007Sroberto                        force_evtchn_callback();                        \
155182007Sroberto                preempt_enable();                                       \
156182007Sroberto        } else                                                          \
157182007Sroberto                preempt_enable_no_resched();                            \
158182007Sroberto} while (0)
159182007Sroberto
160182007Sroberto/*
161182007Sroberto * Add critical_{enter, exit}?
162182007Sroberto *
163182007Sroberto */
164182007Sroberto#define __save_and_cli(x)                                               \
165182007Srobertodo {                                                                    \
166182007Sroberto        vcpu_info_t *_vcpu;                                             \
167182007Sroberto        _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \
168182007Sroberto        (x) = _vcpu->evtchn_upcall_mask;                                \
169182007Sroberto        _vcpu->evtchn_upcall_mask = 1;                                  \
17054359Sroberto        barrier();                                                      \
171182007Sroberto} while (0)
172182007Sroberto
173182007Sroberto
174182007Sroberto#define cli() __cli()
175182007Sroberto#define sti() __sti()
176182007Sroberto#define save_flags(x) __save_flags(x)
177182007Sroberto#define restore_flags(x) __restore_flags(x)
178182007Sroberto#define save_and_cli(x) __save_and_cli(x)
179182007Sroberto
180182007Sroberto#define local_irq_save(x)       __save_and_cli(x)
181182007Sroberto#define local_irq_restore(x)    __restore_flags(x)
182182007Sroberto#define local_irq_disable()     __cli()
183182007Sroberto#define local_irq_enable()      __sti()
184182007Sroberto
185182007Sroberto#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));}
186182007Sroberto#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); }
187182007Sroberto#define spin_lock_irqsave mtx_lock_irqsave
188182007Sroberto#define spin_unlock_irqrestore mtx_unlock_irqrestore
189182007Sroberto
190182007Sroberto
191182007Sroberto#ifndef mb
192182007Sroberto#define mb() __asm__ __volatile__("lock; addl $0, 0(%%esp)": : :"memory")
193182007Sroberto#endif
194182007Sroberto#ifndef rmb
195182007Sroberto#define rmb() mb()
196182007Sroberto#endif
197182007Sroberto#ifndef wmb
198182007Sroberto#define wmb() barrier()
19954359Sroberto#endif
20054359Sroberto#ifdef SMP
20154359Sroberto#define smp_mb() mb()
202132451Sroberto#define smp_rmb() rmb()
203132451Sroberto#define smp_wmb() wmb()
20454359Sroberto#define smp_read_barrier_depends()      read_barrier_depends()
20554359Sroberto#define set_mb(var, value) do { xchg(&var, value); } while (0)
20654359Sroberto#else
20754359Sroberto#define smp_mb()        barrier()
20854359Sroberto#define smp_rmb()       barrier()
20954359Sroberto#define smp_wmb()       barrier()
21054359Sroberto#define smp_read_barrier_depends()      do { } while(0)
21154359Sroberto#define set_mb(var, value) do { var = value; barrier(); } while (0)
21254359Sroberto#endif
21354359Sroberto
21454359Sroberto
21554359Sroberto/* This is a barrier for the compiler only, NOT the processor! */
21654359Sroberto#define barrier() __asm__ __volatile__("": : :"memory")
21754359Sroberto
21854359Sroberto#define LOCK_PREFIX ""
21954359Sroberto#define LOCK ""
22054359Sroberto#define ADDR (*(volatile long *) addr)
22154359Sroberto/*
22254359Sroberto * Make sure gcc doesn't try to be clever and move things around
22382498Sroberto * on us. We need to use _exactly_ the address the user gave us,
22454359Sroberto * not some alias that contains the same information.
22554359Sroberto */
22654359Srobertotypedef struct { volatile int counter; } atomic_t;
22754359Sroberto
22854359Sroberto
22982498Sroberto
23082498Sroberto#define xen_xchg(ptr,v) \
23182498Sroberto        ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
23282498Srobertostruct __xchg_dummy { unsigned long a[100]; };
23382498Sroberto#define __xg(x) ((volatile struct __xchg_dummy *)(x))
23482498Srobertostatic __inline unsigned long __xchg(unsigned long x, volatile void * ptr,
23582498Sroberto                                   int size)
23682498Sroberto{
23782498Sroberto    switch (size) {
23882498Sroberto    case 1:
23982498Sroberto        __asm__ __volatile__("xchgb %b0,%1"
24082498Sroberto                             :"=q" (x)
24182498Sroberto                             :"m" (*__xg(ptr)), "0" (x)
24254359Sroberto                             :"memory");
24382498Sroberto        break;
24454359Sroberto    case 2:
24554359Sroberto        __asm__ __volatile__("xchgw %w0,%1"
24654359Sroberto                             :"=r" (x)
24754359Sroberto                             :"m" (*__xg(ptr)), "0" (x)
24854359Sroberto                             :"memory");
24954359Sroberto        break;
25054359Sroberto    case 4:
25154359Sroberto        __asm__ __volatile__("xchgl %0,%1"
25254359Sroberto                             :"=r" (x)
25354359Sroberto                             :"m" (*__xg(ptr)), "0" (x)
25454359Sroberto                             :"memory");
25554359Sroberto        break;
25654359Sroberto    }
25754359Sroberto    return x;
25854359Sroberto}
25954359Sroberto
26082498Sroberto/**
26182498Sroberto * test_and_clear_bit - Clear a bit and return its old value
26282498Sroberto * @nr: Bit to set
26382498Sroberto * @addr: Address to count from
26454359Sroberto *
26554359Sroberto * This operation is atomic and cannot be reordered.
26654359Sroberto * It also implies a memory barrier.
26754359Sroberto */
26882498Srobertostatic __inline int test_and_clear_bit(int nr, volatile void * addr)
26954359Sroberto{
27082498Sroberto        int oldbit;
27182498Sroberto
27282498Sroberto        __asm__ __volatile__( LOCK_PREFIX
27382498Sroberto                "btrl %2,%1\n\tsbbl %0,%0"
27482498Sroberto                :"=r" (oldbit),"=m" (ADDR)
27582498Sroberto                :"Ir" (nr) : "memory");
27682498Sroberto        return oldbit;
27782498Sroberto}
27882498Sroberto
27982498Srobertostatic __inline int constant_test_bit(int nr, const volatile void * addr)
28082498Sroberto{
28182498Sroberto    return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
28282498Sroberto}
28382498Sroberto
28482498Srobertostatic __inline int variable_test_bit(int nr, volatile void * addr)
28582498Sroberto{
28682498Sroberto    int oldbit;
28782498Sroberto
28882498Sroberto    __asm__ __volatile__(
28982498Sroberto        "btl %2,%1\n\tsbbl %0,%0"
29082498Sroberto        :"=r" (oldbit)
29182498Sroberto        :"m" (ADDR),"Ir" (nr));
29282498Sroberto    return oldbit;
29382498Sroberto}
29482498Sroberto
29582498Sroberto#define test_bit(nr,addr) \
29682498Sroberto(__builtin_constant_p(nr) ? \
29782498Sroberto constant_test_bit((nr),(addr)) : \
29882498Sroberto variable_test_bit((nr),(addr)))
29982498Sroberto
30082498Sroberto
30182498Sroberto/**
30282498Sroberto * set_bit - Atomically set a bit in memory
30382498Sroberto * @nr: the bit to set
30482498Sroberto * @addr: the address to start counting from
30582498Sroberto *
30682498Sroberto * This function is atomic and may not be reordered.  See __set_bit()
30782498Sroberto * if you do not require the atomic guarantees.
30882498Sroberto * Note that @nr may be almost arbitrarily large; this function is not
30982498Sroberto * restricted to acting on a single-word quantity.
31082498Sroberto */
31182498Srobertostatic __inline__ void set_bit(int nr, volatile void * addr)
31282498Sroberto{
31382498Sroberto        __asm__ __volatile__( LOCK_PREFIX
31482498Sroberto                "btsl %1,%0"
31582498Sroberto                :"=m" (ADDR)
31682498Sroberto                :"Ir" (nr));
31782498Sroberto}
31882498Sroberto
31982498Sroberto/**
32082498Sroberto * clear_bit - Clears a bit in memory
32182498Sroberto * @nr: Bit to clear
32282498Sroberto * @addr: Address to start counting from
32382498Sroberto *
32482498Sroberto * clear_bit() is atomic and may not be reordered.  However, it does
32582498Sroberto * not contain a memory barrier, so if it is used for locking purposes,
32682498Sroberto * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
32782498Sroberto * in order to ensure changes are visible on other processors.
32882498Sroberto */
32982498Srobertostatic __inline__ void clear_bit(int nr, volatile void * addr)
33082498Sroberto{
33182498Sroberto        __asm__ __volatile__( LOCK_PREFIX
33282498Sroberto                "btrl %1,%0"
33382498Sroberto                :"=m" (ADDR)
33482498Sroberto                :"Ir" (nr));
33582498Sroberto}
33682498Sroberto
33782498Sroberto/**
33882498Sroberto * atomic_inc - increment atomic variable
33982498Sroberto * @v: pointer of type atomic_t
34082498Sroberto *
34182498Sroberto * Atomically increments @v by 1.  Note that the guaranteed
34282498Sroberto * useful range of an atomic_t is only 24 bits.
34382498Sroberto */
34482498Srobertostatic __inline__ void atomic_inc(atomic_t *v)
34582498Sroberto{
34682498Sroberto        __asm__ __volatile__(
34782498Sroberto                LOCK "incl %0"
34882498Sroberto                :"=m" (v->counter)
34982498Sroberto                :"m" (v->counter));
35082498Sroberto}
35182498Sroberto
35282498Sroberto
35382498Sroberto#define rdtscll(val) \
35482498Sroberto     __asm__ __volatile__("rdtsc" : "=A" (val))
35582498Sroberto
35682498Sroberto
35782498Sroberto
35882498Sroberto/*
35982498Sroberto * Kernel pointers have redundant information, so we can use a
36082498Sroberto * scheme where we can return either an error code or a dentry
36182498Sroberto * pointer with the same return value.
36282498Sroberto *
36382498Sroberto * This should be a per-architecture thing, to allow different
36482498Sroberto * error and pointer decisions.
36582498Sroberto */
36682498Sroberto#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L)
36782498Sroberto
36882498Srobertostatic inline void *ERR_PTR(long error)
36982498Sroberto{
37082498Sroberto	return (void *) error;
37182498Sroberto}
37282498Sroberto
37382498Srobertostatic inline long PTR_ERR(const void *ptr)
37482498Sroberto{
37582498Sroberto	return (long) ptr;
37682498Sroberto}
37782498Sroberto
37882498Srobertostatic inline long IS_ERR(const void *ptr)
37982498Sroberto{
38082498Sroberto	return IS_ERR_VALUE((unsigned long)ptr);
38182498Sroberto}
38282498Sroberto
38382498Sroberto#endif /* !__ASSEMBLY__ */
38482498Sroberto
38582498Sroberto#endif /* _OS_H_ */
38682498Sroberto