xen-os.h revision 183906
1/****************************************************************************** 2 * os.h 3 * 4 * random collection of macros and definition 5 */ 6 7#ifndef _XEN_OS_H_ 8#define _XEN_OS_H_ 9#include <machine/param.h> 10#ifdef PAE 11#define CONFIG_X86_PAE 12#endif 13 14#if defined(XEN) && !defined(__XEN_INTERFACE_VERSION__) 15/* 16 * Can update to a more recent version when we implement 17 * the hypercall page 18 */ 19#define __XEN_INTERFACE_VERSION__ 0x00030204 20#endif 21 22#include <xen/interface/xen.h> 23 24/* Force a proper event-channel callback from Xen. */ 25void force_evtchn_callback(void); 26 27#ifndef vtophys 28#include <vm/vm.h> 29#include <vm/vm_param.h> 30#include <vm/pmap.h> 31#endif 32 33#ifdef SMP 34#include <sys/time.h> /* XXX for pcpu.h */ 35#include <sys/pcpu.h> /* XXX for PCPU_GET */ 36extern int gdt_set; 37static inline int 38smp_processor_id(void) 39{ 40 if (likely(gdt_set)) 41 return PCPU_GET(cpuid); 42 return 0; 43} 44 45#else 46#define smp_processor_id() 0 47#endif 48 49#ifndef NULL 50#define NULL (void *)0 51#endif 52 53#ifndef PANIC_IF 54#define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 55#endif 56 57extern shared_info_t *HYPERVISOR_shared_info; 58 59/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented 60 a mechanism by which the user can annotate likely branch directions and 61 expect the blocks to be reordered appropriately. Define __builtin_expect 62 to nothing for earlier compilers. */ 63 64/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 65static inline void rep_nop(void) 66{ 67 __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 68} 69#define cpu_relax() rep_nop() 70 71 72#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 73#define __builtin_expect(x, expected_value) (x) 74#endif 75 76#define DEFINE_PER_CPU(type, name) \ 77 __typeof__(type) per_cpu__##name 78 79#define per_cpu(var, cpu) (*((void)cpu, &per_cpu__##var)) 80 81/* crude memory allocator for memory allocation early in 82 * boot 83 */ 84void *bootmem_alloc(unsigned int size); 85void bootmem_free(void *ptr, unsigned int size); 86 87 88/* Everything below this point is not included by assembler (.S) files. */ 89#ifndef __ASSEMBLY__ 90#include <sys/types.h> 91 92void printk(const char *fmt, ...); 93 94/* some function prototypes */ 95void trap_init(void); 96 97extern int preemptable; 98#define preempt_disable() (preemptable = 0) 99#define preempt_enable() (preemptable = 1) 100#define preempt_enable_no_resched() (preemptable = 1) 101 102 103/* 104 * STI/CLI equivalents. These basically set and clear the virtual 105 * event_enable flag in teh shared_info structure. Note that when 106 * the enable bit is set, there may be pending events to be handled. 107 * We may therefore call into do_hypervisor_callback() directly. 108 */ 109#define likely(x) __builtin_expect((x),1) 110#define unlikely(x) __builtin_expect((x),0) 111 112 113 114#define __cli() \ 115do { \ 116 vcpu_info_t *_vcpu; \ 117 preempt_disable(); \ 118 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 119 _vcpu->evtchn_upcall_mask = 1; \ 120 preempt_enable_no_resched(); \ 121 barrier(); \ 122} while (0) 123 124#define __sti() \ 125do { \ 126 vcpu_info_t *_vcpu; \ 127 barrier(); \ 128 preempt_disable(); \ 129 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 130 _vcpu->evtchn_upcall_mask = 0; \ 131 barrier(); /* unmask then check (avoid races) */ \ 132 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 133 force_evtchn_callback(); \ 134 preempt_enable(); \ 135} while (0) 136 137 138#define __save_flags(x) \ 139do { \ 140 vcpu_info_t *vcpu; \ 141 vcpu = HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 142 (x) = _vcpu->evtchn_upcall_mask; \ 143} while (0) 144 145#define __restore_flags(x) \ 146do { \ 147 vcpu_info_t *_vcpu; \ 148 barrier(); \ 149 preempt_disable(); \ 150 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 151 if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 152 barrier(); /* unmask then check (avoid races) */ \ 153 if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 154 force_evtchn_callback(); \ 155 preempt_enable(); \ 156 } else \ 157 preempt_enable_no_resched(); \ 158} while (0) 159 160/* 161 * Add critical_{enter, exit}? 162 * 163 */ 164#define __save_and_cli(x) \ 165do { \ 166 vcpu_info_t *_vcpu; \ 167 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 168 (x) = _vcpu->evtchn_upcall_mask; \ 169 _vcpu->evtchn_upcall_mask = 1; \ 170 barrier(); \ 171} while (0) 172 173 174#define cli() __cli() 175#define sti() __sti() 176#define save_flags(x) __save_flags(x) 177#define restore_flags(x) __restore_flags(x) 178#define save_and_cli(x) __save_and_cli(x) 179 180#define local_irq_save(x) __save_and_cli(x) 181#define local_irq_restore(x) __restore_flags(x) 182#define local_irq_disable() __cli() 183#define local_irq_enable() __sti() 184 185#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 186#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 187#define spin_lock_irqsave mtx_lock_irqsave 188#define spin_unlock_irqrestore mtx_unlock_irqrestore 189 190 191#ifndef mb 192#define mb() __asm__ __volatile__("lock; addl $0, 0(%%esp)": : :"memory") 193#endif 194#ifndef rmb 195#define rmb() mb() 196#endif 197#ifndef wmb 198#define wmb() barrier() 199#endif 200#ifdef SMP 201#define smp_mb() mb() 202#define smp_rmb() rmb() 203#define smp_wmb() wmb() 204#define smp_read_barrier_depends() read_barrier_depends() 205#define set_mb(var, value) do { xchg(&var, value); } while (0) 206#else 207#define smp_mb() barrier() 208#define smp_rmb() barrier() 209#define smp_wmb() barrier() 210#define smp_read_barrier_depends() do { } while(0) 211#define set_mb(var, value) do { var = value; barrier(); } while (0) 212#endif 213 214 215/* This is a barrier for the compiler only, NOT the processor! */ 216#define barrier() __asm__ __volatile__("": : :"memory") 217 218#define LOCK_PREFIX "" 219#define LOCK "" 220#define ADDR (*(volatile long *) addr) 221/* 222 * Make sure gcc doesn't try to be clever and move things around 223 * on us. We need to use _exactly_ the address the user gave us, 224 * not some alias that contains the same information. 225 */ 226typedef struct { volatile int counter; } atomic_t; 227 228 229 230#define xen_xchg(ptr,v) \ 231 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 232struct __xchg_dummy { unsigned long a[100]; }; 233#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 234static __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 235 int size) 236{ 237 switch (size) { 238 case 1: 239 __asm__ __volatile__("xchgb %b0,%1" 240 :"=q" (x) 241 :"m" (*__xg(ptr)), "0" (x) 242 :"memory"); 243 break; 244 case 2: 245 __asm__ __volatile__("xchgw %w0,%1" 246 :"=r" (x) 247 :"m" (*__xg(ptr)), "0" (x) 248 :"memory"); 249 break; 250 case 4: 251 __asm__ __volatile__("xchgl %0,%1" 252 :"=r" (x) 253 :"m" (*__xg(ptr)), "0" (x) 254 :"memory"); 255 break; 256 } 257 return x; 258} 259 260/** 261 * test_and_clear_bit - Clear a bit and return its old value 262 * @nr: Bit to set 263 * @addr: Address to count from 264 * 265 * This operation is atomic and cannot be reordered. 266 * It also implies a memory barrier. 267 */ 268static __inline int test_and_clear_bit(int nr, volatile void * addr) 269{ 270 int oldbit; 271 272 __asm__ __volatile__( LOCK_PREFIX 273 "btrl %2,%1\n\tsbbl %0,%0" 274 :"=r" (oldbit),"=m" (ADDR) 275 :"Ir" (nr) : "memory"); 276 return oldbit; 277} 278 279static __inline int constant_test_bit(int nr, const volatile void * addr) 280{ 281 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 282} 283 284static __inline int variable_test_bit(int nr, volatile void * addr) 285{ 286 int oldbit; 287 288 __asm__ __volatile__( 289 "btl %2,%1\n\tsbbl %0,%0" 290 :"=r" (oldbit) 291 :"m" (ADDR),"Ir" (nr)); 292 return oldbit; 293} 294 295#define test_bit(nr,addr) \ 296(__builtin_constant_p(nr) ? \ 297 constant_test_bit((nr),(addr)) : \ 298 variable_test_bit((nr),(addr))) 299 300 301/** 302 * set_bit - Atomically set a bit in memory 303 * @nr: the bit to set 304 * @addr: the address to start counting from 305 * 306 * This function is atomic and may not be reordered. See __set_bit() 307 * if you do not require the atomic guarantees. 308 * Note that @nr may be almost arbitrarily large; this function is not 309 * restricted to acting on a single-word quantity. 310 */ 311static __inline__ void set_bit(int nr, volatile void * addr) 312{ 313 __asm__ __volatile__( LOCK_PREFIX 314 "btsl %1,%0" 315 :"=m" (ADDR) 316 :"Ir" (nr)); 317} 318 319/** 320 * clear_bit - Clears a bit in memory 321 * @nr: Bit to clear 322 * @addr: Address to start counting from 323 * 324 * clear_bit() is atomic and may not be reordered. However, it does 325 * not contain a memory barrier, so if it is used for locking purposes, 326 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 327 * in order to ensure changes are visible on other processors. 328 */ 329static __inline__ void clear_bit(int nr, volatile void * addr) 330{ 331 __asm__ __volatile__( LOCK_PREFIX 332 "btrl %1,%0" 333 :"=m" (ADDR) 334 :"Ir" (nr)); 335} 336 337/** 338 * atomic_inc - increment atomic variable 339 * @v: pointer of type atomic_t 340 * 341 * Atomically increments @v by 1. Note that the guaranteed 342 * useful range of an atomic_t is only 24 bits. 343 */ 344static __inline__ void atomic_inc(atomic_t *v) 345{ 346 __asm__ __volatile__( 347 LOCK "incl %0" 348 :"=m" (v->counter) 349 :"m" (v->counter)); 350} 351 352 353#define rdtscll(val) \ 354 __asm__ __volatile__("rdtsc" : "=A" (val)) 355 356 357 358/* 359 * Kernel pointers have redundant information, so we can use a 360 * scheme where we can return either an error code or a dentry 361 * pointer with the same return value. 362 * 363 * This should be a per-architecture thing, to allow different 364 * error and pointer decisions. 365 */ 366#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) 367 368static inline void *ERR_PTR(long error) 369{ 370 return (void *) error; 371} 372 373static inline long PTR_ERR(const void *ptr) 374{ 375 return (long) ptr; 376} 377 378static inline long IS_ERR(const void *ptr) 379{ 380 return IS_ERR_VALUE((unsigned long)ptr); 381} 382 383#endif /* !__ASSEMBLY__ */ 384 385#endif /* _OS_H_ */ 386