xen-os.h revision 183927
1183906Skmacy/****************************************************************************** 2183906Skmacy * os.h 3183906Skmacy * 4183906Skmacy * random collection of macros and definition 5183906Skmacy */ 6183906Skmacy 7183906Skmacy#ifndef _XEN_OS_H_ 8183906Skmacy#define _XEN_OS_H_ 9183906Skmacy#include <machine/param.h> 10183906Skmacy#ifdef PAE 11183906Skmacy#define CONFIG_X86_PAE 12183906Skmacy#endif 13183906Skmacy 14183906Skmacy#if defined(XEN) && !defined(__XEN_INTERFACE_VERSION__) 15183906Skmacy/* 16183906Skmacy * Can update to a more recent version when we implement 17183906Skmacy * the hypercall page 18183906Skmacy */ 19183906Skmacy#define __XEN_INTERFACE_VERSION__ 0x00030204 20183906Skmacy#endif 21183906Skmacy 22183906Skmacy#include <xen/interface/xen.h> 23183906Skmacy 24183906Skmacy/* Force a proper event-channel callback from Xen. */ 25183906Skmacyvoid force_evtchn_callback(void); 26183906Skmacy 27183906Skmacy#ifndef vtophys 28183906Skmacy#include <vm/vm.h> 29183906Skmacy#include <vm/vm_param.h> 30183906Skmacy#include <vm/pmap.h> 31183906Skmacy#endif 32183906Skmacy 33183927Skmacyextern int gdtset; 34183906Skmacy#ifdef SMP 35183906Skmacy#include <sys/time.h> /* XXX for pcpu.h */ 36183906Skmacy#include <sys/pcpu.h> /* XXX for PCPU_GET */ 37183906Skmacystatic inline int 38183906Skmacysmp_processor_id(void) 39183906Skmacy{ 40183927Skmacy if (likely(gdtset)) 41183906Skmacy return PCPU_GET(cpuid); 42183906Skmacy return 0; 43183906Skmacy} 44183906Skmacy 45183906Skmacy#else 46183906Skmacy#define smp_processor_id() 0 47183906Skmacy#endif 48183906Skmacy 49183906Skmacy#ifndef NULL 50183906Skmacy#define NULL (void *)0 51183906Skmacy#endif 52183906Skmacy 53183906Skmacy#ifndef PANIC_IF 54183906Skmacy#define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 55183906Skmacy#endif 56183906Skmacy 57183906Skmacyextern shared_info_t *HYPERVISOR_shared_info; 58183906Skmacy 59183906Skmacy/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented 60183906Skmacy a mechanism by which the user can annotate likely branch directions and 61183906Skmacy expect the blocks to be reordered appropriately. Define __builtin_expect 62183906Skmacy to nothing for earlier compilers. */ 63183906Skmacy 64183906Skmacy/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 65183906Skmacystatic inline void rep_nop(void) 66183906Skmacy{ 67183906Skmacy __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 68183906Skmacy} 69183906Skmacy#define cpu_relax() rep_nop() 70183906Skmacy 71183906Skmacy 72183906Skmacy#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 73183906Skmacy#define __builtin_expect(x, expected_value) (x) 74183906Skmacy#endif 75183906Skmacy 76183906Skmacy#define DEFINE_PER_CPU(type, name) \ 77183906Skmacy __typeof__(type) per_cpu__##name 78183906Skmacy 79183906Skmacy#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 80183906Skmacy 81183906Skmacy/* crude memory allocator for memory allocation early in 82183906Skmacy * boot 83183906Skmacy */ 84183906Skmacyvoid *bootmem_alloc(unsigned int size); 85183906Skmacyvoid bootmem_free(void *ptr, unsigned int size); 86183906Skmacy 87183906Skmacy 88183906Skmacy/* Everything below this point is not included by assembler (.S) files. */ 89183906Skmacy#ifndef __ASSEMBLY__ 90183906Skmacy#include <sys/types.h> 91183906Skmacy 92183906Skmacyvoid printk(const char *fmt, ...); 93183906Skmacy 94183906Skmacy/* some function prototypes */ 95183906Skmacyvoid trap_init(void); 96183906Skmacy 97183906Skmacyextern int preemptable; 98183906Skmacy#define preempt_disable() (preemptable = 0) 99183906Skmacy#define preempt_enable() (preemptable = 1) 100183906Skmacy#define preempt_enable_no_resched() (preemptable = 1) 101183906Skmacy 102183906Skmacy 103183906Skmacy/* 104183906Skmacy * STI/CLI equivalents. These basically set and clear the virtual 105183906Skmacy * event_enable flag in teh shared_info structure. Note that when 106183906Skmacy * the enable bit is set, there may be pending events to be handled. 107183906Skmacy * We may therefore call into do_hypervisor_callback() directly. 108183906Skmacy */ 109183906Skmacy#define likely(x) __builtin_expect((x),1) 110183906Skmacy#define unlikely(x) __builtin_expect((x),0) 111183906Skmacy 112183906Skmacy 113183906Skmacy 114183906Skmacy#define __cli() \ 115183906Skmacydo { \ 116183906Skmacy vcpu_info_t *_vcpu; \ 117183906Skmacy preempt_disable(); \ 118183906Skmacy _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 119183906Skmacy _vcpu->evtchn_upcall_mask = 1; \ 120183906Skmacy preempt_enable_no_resched(); \ 121183906Skmacy barrier(); \ 122183906Skmacy} while (0) 123183906Skmacy 124183906Skmacy#define __sti() \ 125183906Skmacydo { \ 126183906Skmacy vcpu_info_t *_vcpu; \ 127183906Skmacy barrier(); \ 128183906Skmacy preempt_disable(); \ 129183906Skmacy _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 130183906Skmacy _vcpu->evtchn_upcall_mask = 0; \ 131183906Skmacy barrier(); /* unmask then check (avoid races) */ \ 132183906Skmacy if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 133183906Skmacy force_evtchn_callback(); \ 134183906Skmacy preempt_enable(); \ 135183906Skmacy} while (0) 136183906Skmacy 137183906Skmacy 138183906Skmacy#define __save_flags(x) \ 139183906Skmacydo { \ 140183906Skmacy vcpu_info_t *vcpu; \ 141183906Skmacy vcpu = HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 142183906Skmacy (x) = _vcpu->evtchn_upcall_mask; \ 143183906Skmacy} while (0) 144183906Skmacy 145183906Skmacy#define __restore_flags(x) \ 146183906Skmacydo { \ 147183906Skmacy vcpu_info_t *_vcpu; \ 148183906Skmacy barrier(); \ 149183906Skmacy preempt_disable(); \ 150183906Skmacy _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 151183906Skmacy if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 152183906Skmacy barrier(); /* unmask then check (avoid races) */ \ 153183906Skmacy if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 154183906Skmacy force_evtchn_callback(); \ 155183906Skmacy preempt_enable(); \ 156183906Skmacy } else \ 157183906Skmacy preempt_enable_no_resched(); \ 158183906Skmacy} while (0) 159183906Skmacy 160183906Skmacy/* 161183906Skmacy * Add critical_{enter, exit}? 162183906Skmacy * 163183906Skmacy */ 164183906Skmacy#define __save_and_cli(x) \ 165183906Skmacydo { \ 166183906Skmacy vcpu_info_t *_vcpu; \ 167183906Skmacy _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 168183906Skmacy (x) = _vcpu->evtchn_upcall_mask; \ 169183906Skmacy _vcpu->evtchn_upcall_mask = 1; \ 170183906Skmacy barrier(); \ 171183906Skmacy} while (0) 172183906Skmacy 173183906Skmacy 174183906Skmacy#define cli() __cli() 175183906Skmacy#define sti() __sti() 176183906Skmacy#define save_flags(x) __save_flags(x) 177183906Skmacy#define restore_flags(x) __restore_flags(x) 178183906Skmacy#define save_and_cli(x) __save_and_cli(x) 179183906Skmacy 180183906Skmacy#define local_irq_save(x) __save_and_cli(x) 181183906Skmacy#define local_irq_restore(x) __restore_flags(x) 182183906Skmacy#define local_irq_disable() __cli() 183183906Skmacy#define local_irq_enable() __sti() 184183906Skmacy 185183906Skmacy#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 186183906Skmacy#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 187183906Skmacy#define spin_lock_irqsave mtx_lock_irqsave 188183906Skmacy#define spin_unlock_irqrestore mtx_unlock_irqrestore 189183906Skmacy 190183906Skmacy 191183906Skmacy#ifndef mb 192183906Skmacy#define mb() __asm__ __volatile__("lock; addl $0, 0(%%esp)": : :"memory") 193183906Skmacy#endif 194183906Skmacy#ifndef rmb 195183906Skmacy#define rmb() mb() 196183906Skmacy#endif 197183906Skmacy#ifndef wmb 198183906Skmacy#define wmb() barrier() 199183906Skmacy#endif 200183906Skmacy#ifdef SMP 201183906Skmacy#define smp_mb() mb() 202183906Skmacy#define smp_rmb() rmb() 203183906Skmacy#define smp_wmb() wmb() 204183906Skmacy#define smp_read_barrier_depends() read_barrier_depends() 205183906Skmacy#define set_mb(var, value) do { xchg(&var, value); } while (0) 206183906Skmacy#else 207183906Skmacy#define smp_mb() barrier() 208183906Skmacy#define smp_rmb() barrier() 209183906Skmacy#define smp_wmb() barrier() 210183906Skmacy#define smp_read_barrier_depends() do { } while(0) 211183906Skmacy#define set_mb(var, value) do { var = value; barrier(); } while (0) 212183906Skmacy#endif 213183906Skmacy 214183906Skmacy 215183906Skmacy/* This is a barrier for the compiler only, NOT the processor! */ 216183906Skmacy#define barrier() __asm__ __volatile__("": : :"memory") 217183906Skmacy 218183906Skmacy#define LOCK_PREFIX "" 219183906Skmacy#define LOCK "" 220183906Skmacy#define ADDR (*(volatile long *) addr) 221183906Skmacy/* 222183906Skmacy * Make sure gcc doesn't try to be clever and move things around 223183906Skmacy * on us. We need to use _exactly_ the address the user gave us, 224183906Skmacy * not some alias that contains the same information. 225183906Skmacy */ 226183906Skmacytypedef struct { volatile int counter; } atomic_t; 227183906Skmacy 228183906Skmacy 229183906Skmacy 230183906Skmacy#define xen_xchg(ptr,v) \ 231183906Skmacy ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 232183906Skmacystruct __xchg_dummy { unsigned long a[100]; }; 233183906Skmacy#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 234183906Skmacystatic __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 235183906Skmacy int size) 236183906Skmacy{ 237183906Skmacy switch (size) { 238183906Skmacy case 1: 239183906Skmacy __asm__ __volatile__("xchgb %b0,%1" 240183906Skmacy :"=q" (x) 241183906Skmacy :"m" (*__xg(ptr)), "0" (x) 242183906Skmacy :"memory"); 243183906Skmacy break; 244183906Skmacy case 2: 245183906Skmacy __asm__ __volatile__("xchgw %w0,%1" 246183906Skmacy :"=r" (x) 247183906Skmacy :"m" (*__xg(ptr)), "0" (x) 248183906Skmacy :"memory"); 249183906Skmacy break; 250183906Skmacy case 4: 251183906Skmacy __asm__ __volatile__("xchgl %0,%1" 252183906Skmacy :"=r" (x) 253183906Skmacy :"m" (*__xg(ptr)), "0" (x) 254183906Skmacy :"memory"); 255183906Skmacy break; 256183906Skmacy } 257183906Skmacy return x; 258183906Skmacy} 259183906Skmacy 260183906Skmacy/** 261183906Skmacy * test_and_clear_bit - Clear a bit and return its old value 262183906Skmacy * @nr: Bit to set 263183906Skmacy * @addr: Address to count from 264183906Skmacy * 265183906Skmacy * This operation is atomic and cannot be reordered. 266183906Skmacy * It also implies a memory barrier. 267183906Skmacy */ 268183906Skmacystatic __inline int test_and_clear_bit(int nr, volatile void * addr) 269183906Skmacy{ 270183906Skmacy int oldbit; 271183906Skmacy 272183906Skmacy __asm__ __volatile__( LOCK_PREFIX 273183906Skmacy "btrl %2,%1\n\tsbbl %0,%0" 274183906Skmacy :"=r" (oldbit),"=m" (ADDR) 275183906Skmacy :"Ir" (nr) : "memory"); 276183906Skmacy return oldbit; 277183906Skmacy} 278183906Skmacy 279183906Skmacystatic __inline int constant_test_bit(int nr, const volatile void * addr) 280183906Skmacy{ 281183906Skmacy return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 282183906Skmacy} 283183906Skmacy 284183906Skmacystatic __inline int variable_test_bit(int nr, volatile void * addr) 285183906Skmacy{ 286183906Skmacy int oldbit; 287183906Skmacy 288183906Skmacy __asm__ __volatile__( 289183906Skmacy "btl %2,%1\n\tsbbl %0,%0" 290183906Skmacy :"=r" (oldbit) 291183906Skmacy :"m" (ADDR),"Ir" (nr)); 292183906Skmacy return oldbit; 293183906Skmacy} 294183906Skmacy 295183906Skmacy#define test_bit(nr,addr) \ 296183906Skmacy(__builtin_constant_p(nr) ? \ 297183906Skmacy constant_test_bit((nr),(addr)) : \ 298183906Skmacy variable_test_bit((nr),(addr))) 299183906Skmacy 300183906Skmacy 301183906Skmacy/** 302183906Skmacy * set_bit - Atomically set a bit in memory 303183906Skmacy * @nr: the bit to set 304183906Skmacy * @addr: the address to start counting from 305183906Skmacy * 306183906Skmacy * This function is atomic and may not be reordered. See __set_bit() 307183906Skmacy * if you do not require the atomic guarantees. 308183906Skmacy * Note that @nr may be almost arbitrarily large; this function is not 309183906Skmacy * restricted to acting on a single-word quantity. 310183906Skmacy */ 311183906Skmacystatic __inline__ void set_bit(int nr, volatile void * addr) 312183906Skmacy{ 313183906Skmacy __asm__ __volatile__( LOCK_PREFIX 314183906Skmacy "btsl %1,%0" 315183906Skmacy :"=m" (ADDR) 316183906Skmacy :"Ir" (nr)); 317183906Skmacy} 318183906Skmacy 319183906Skmacy/** 320183906Skmacy * clear_bit - Clears a bit in memory 321183906Skmacy * @nr: Bit to clear 322183906Skmacy * @addr: Address to start counting from 323183906Skmacy * 324183906Skmacy * clear_bit() is atomic and may not be reordered. However, it does 325183906Skmacy * not contain a memory barrier, so if it is used for locking purposes, 326183906Skmacy * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 327183906Skmacy * in order to ensure changes are visible on other processors. 328183906Skmacy */ 329183906Skmacystatic __inline__ void clear_bit(int nr, volatile void * addr) 330183906Skmacy{ 331183906Skmacy __asm__ __volatile__( LOCK_PREFIX 332183906Skmacy "btrl %1,%0" 333183906Skmacy :"=m" (ADDR) 334183906Skmacy :"Ir" (nr)); 335183906Skmacy} 336183906Skmacy 337183906Skmacy/** 338183906Skmacy * atomic_inc - increment atomic variable 339183906Skmacy * @v: pointer of type atomic_t 340183906Skmacy * 341183906Skmacy * Atomically increments @v by 1. Note that the guaranteed 342183906Skmacy * useful range of an atomic_t is only 24 bits. 343183906Skmacy */ 344183906Skmacystatic __inline__ void atomic_inc(atomic_t *v) 345183906Skmacy{ 346183906Skmacy __asm__ __volatile__( 347183906Skmacy LOCK "incl %0" 348183906Skmacy :"=m" (v->counter) 349183906Skmacy :"m" (v->counter)); 350183906Skmacy} 351183906Skmacy 352183906Skmacy 353183906Skmacy#define rdtscll(val) \ 354183906Skmacy __asm__ __volatile__("rdtsc" : "=A" (val)) 355183906Skmacy 356183906Skmacy 357183906Skmacy 358183906Skmacy/* 359183906Skmacy * Kernel pointers have redundant information, so we can use a 360183906Skmacy * scheme where we can return either an error code or a dentry 361183906Skmacy * pointer with the same return value. 362183906Skmacy * 363183906Skmacy * This should be a per-architecture thing, to allow different 364183906Skmacy * error and pointer decisions. 365183906Skmacy */ 366183906Skmacy#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) 367183906Skmacy 368183906Skmacystatic inline void *ERR_PTR(long error) 369183906Skmacy{ 370183906Skmacy return (void *) error; 371183906Skmacy} 372183906Skmacy 373183906Skmacystatic inline long PTR_ERR(const void *ptr) 374183906Skmacy{ 375183906Skmacy return (long) ptr; 376183906Skmacy} 377183906Skmacy 378183906Skmacystatic inline long IS_ERR(const void *ptr) 379183906Skmacy{ 380183906Skmacy return IS_ERR_VALUE((unsigned long)ptr); 381183906Skmacy} 382183906Skmacy 383183906Skmacy#endif /* !__ASSEMBLY__ */ 384183906Skmacy 385183906Skmacy#endif /* _OS_H_ */ 386