xen-os.h revision 185181
18229Spchelko/****************************************************************************** 28229Spchelko * os.h 38229Spchelko * 48229Spchelko * random collection of macros and definition 58229Spchelko */ 68229Spchelko 78229Spchelko#ifndef _XEN_OS_H_ 88229Spchelko#define _XEN_OS_H_ 98229Spchelko#include <machine/param.h> 108229Spchelko#ifdef PAE 118229Spchelko#define CONFIG_X86_PAE 128229Spchelko#endif 138229Spchelko 148229Spchelko#if defined(XEN) && !defined(__XEN_INTERFACE_VERSION__) 158229Spchelko/* 168229Spchelko * Can update to a more recent version when we implement 178229Spchelko * the hypercall page 188229Spchelko */ 198229Spchelko#define __XEN_INTERFACE_VERSION__ 0x00030204 208229Spchelko#endif 218229Spchelko 228229Spchelko#include <xen/interface/xen.h> 238229Spchelko 248229Spchelko/* Force a proper event-channel callback from Xen. */ 258229Spchelkovoid force_evtchn_callback(void); 2615235Sgoetz 278229Spchelko#ifndef vtophys 288229Spchelko#include <vm/vm.h> 298229Spchelko#include <vm/vm_param.h> 308229Spchelko#include <vm/pmap.h> 318229Spchelko#endif 328229Spchelko 338229Spchelkoextern int gdtset; 348229Spchelko#ifdef SMP 358229Spchelko#include <sys/time.h> /* XXX for pcpu.h */ 368229Spchelko#include <sys/pcpu.h> /* XXX for PCPU_GET */ 378229Spchelkostatic inline int 388229Spchelkosmp_processor_id(void) 398229Spchelko{ 408229Spchelko if (__predict_true(gdtset)) 418229Spchelko return PCPU_GET(cpuid); 428229Spchelko return 0; 438229Spchelko} 448229Spchelko 458229Spchelko#else 468229Spchelko#define smp_processor_id() 0 478229Spchelko#endif 488229Spchelko 498229Spchelko#ifndef NULL 508229Spchelko#define NULL (void *)0 518229Spchelko#endif 528229Spchelko 538229Spchelko#ifndef PANIC_IF 548229Spchelko#define PANIC_IF(exp) if (unlikely(exp)) {printk("panic - %s: %s:%d\n",#exp, __FILE__, __LINE__); panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 558229Spchelko#endif 568229Spchelko 578229Spchelkoextern shared_info_t *HYPERVISOR_shared_info; 588229Spchelko 598229Spchelko/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented 608229Spchelko a mechanism by which the user can annotate likely branch directions and 618229Spchelko expect the blocks to be reordered appropriately. Define __builtin_expect 628229Spchelko to nothing for earlier compilers. */ 638229Spchelko 648229Spchelko/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 658229Spchelkostatic inline void rep_nop(void) 668229Spchelko{ 678229Spchelko __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 688229Spchelko} 698229Spchelko#define cpu_relax() rep_nop() 708229Spchelko 718229Spchelko 728229Spchelko#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 738229Spchelko#define __builtin_expect(x, expected_value) (x) 748229Spchelko#endif 758229Spchelko 768229Spchelko#define per_cpu(var, cpu) (pcpu_find((cpu))->pc_ ## var) 778229Spchelko 788229Spchelko/* crude memory allocator for memory allocation early in 798229Spchelko * boot 808229Spchelko */ 818229Spchelkovoid *bootmem_alloc(unsigned int size); 828229Spchelkovoid bootmem_free(void *ptr, unsigned int size); 838229Spchelko 848229Spchelko 858229Spchelko/* Everything below this point is not included by assembler (.S) files. */ 868229Spchelko#ifndef __ASSEMBLY__ 878229Spchelko#include <sys/types.h> 888229Spchelko 898229Spchelkovoid printk(const char *fmt, ...); 908229Spchelko 918229Spchelko/* some function prototypes */ 928229Spchelkovoid trap_init(void); 938229Spchelko 948229Spchelko/* 958229Spchelko * STI/CLI equivalents. These basically set and clear the virtual 968229Spchelko * event_enable flag in teh shared_info structure. Note that when 978229Spchelko * the enable bit is set, there may be pending events to be handled. 988229Spchelko * We may therefore call into do_hypervisor_callback() directly. 998229Spchelko */ 1008229Spchelko#define likely(x) __builtin_expect((x),1) 1018229Spchelko#define unlikely(x) __builtin_expect((x),0) 1028229Spchelko 1038229Spchelko 1048229Spchelko 1058229Spchelko#define __cli() \ 1068229Spchelkodo { \ 1078229Spchelko vcpu_info_t *_vcpu; \ 1088229Spchelko _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 1098229Spchelko _vcpu->evtchn_upcall_mask = 1; \ 1108229Spchelko barrier(); \ 1118229Spchelko} while (0) 1128229Spchelko 1138229Spchelko#define __sti() \ 1148229Spchelkodo { \ 1158229Spchelko vcpu_info_t *_vcpu; \ 1168229Spchelko barrier(); \ 1178229Spchelko _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 1188229Spchelko _vcpu->evtchn_upcall_mask = 0; \ 1198229Spchelko barrier(); /* unmask then check (avoid races) */ \ 1208229Spchelko if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 1218229Spchelko force_evtchn_callback(); \ 1228229Spchelko} while (0) 1238229Spchelko 1248229Spchelko#define __restore_flags(x) \ 1258229Spchelkodo { \ 1268229Spchelko vcpu_info_t *_vcpu; \ 1278229Spchelko barrier(); \ 1288229Spchelko _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 1298229Spchelko if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 1308229Spchelko barrier(); /* unmask then check (avoid races) */ \ 1318229Spchelko if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 1328229Spchelko force_evtchn_callback(); \ 1338229Spchelko } \ 1348229Spchelko} while (0) 135 136/* 137 * Add critical_{enter, exit}? 138 * 139 */ 140#define __save_and_cli(x) \ 141do { \ 142 vcpu_info_t *_vcpu; \ 143 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 144 (x) = _vcpu->evtchn_upcall_mask; \ 145 _vcpu->evtchn_upcall_mask = 1; \ 146 barrier(); \ 147} while (0) 148 149 150#define cli() __cli() 151#define sti() __sti() 152#define save_flags(x) __save_flags(x) 153#define restore_flags(x) __restore_flags(x) 154#define save_and_cli(x) __save_and_cli(x) 155 156#define local_irq_save(x) __save_and_cli(x) 157#define local_irq_restore(x) __restore_flags(x) 158#define local_irq_disable() __cli() 159#define local_irq_enable() __sti() 160 161#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 162#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 163#define spin_lock_irqsave mtx_lock_irqsave 164#define spin_unlock_irqrestore mtx_unlock_irqrestore 165 166 167#ifndef mb 168#define mb() __asm__ __volatile__("lock; addl $0, 0(%%esp)": : :"memory") 169#endif 170#ifndef rmb 171#define rmb() mb() 172#endif 173#ifndef wmb 174#define wmb() barrier() 175#endif 176#ifdef SMP 177#define smp_mb() mb() 178#define smp_rmb() rmb() 179#define smp_wmb() wmb() 180#define smp_read_barrier_depends() read_barrier_depends() 181#define set_mb(var, value) do { xchg(&var, value); } while (0) 182#else 183#define smp_mb() barrier() 184#define smp_rmb() barrier() 185#define smp_wmb() barrier() 186#define smp_read_barrier_depends() do { } while(0) 187#define set_mb(var, value) do { var = value; barrier(); } while (0) 188#endif 189 190 191/* This is a barrier for the compiler only, NOT the processor! */ 192#define barrier() __asm__ __volatile__("": : :"memory") 193 194#define LOCK_PREFIX "" 195#define LOCK "" 196#define ADDR (*(volatile long *) addr) 197/* 198 * Make sure gcc doesn't try to be clever and move things around 199 * on us. We need to use _exactly_ the address the user gave us, 200 * not some alias that contains the same information. 201 */ 202typedef struct { volatile int counter; } atomic_t; 203 204 205 206#define xen_xchg(ptr,v) \ 207 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 208struct __xchg_dummy { unsigned long a[100]; }; 209#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 210static __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 211 int size) 212{ 213 switch (size) { 214 case 1: 215 __asm__ __volatile__("xchgb %b0,%1" 216 :"=q" (x) 217 :"m" (*__xg(ptr)), "0" (x) 218 :"memory"); 219 break; 220 case 2: 221 __asm__ __volatile__("xchgw %w0,%1" 222 :"=r" (x) 223 :"m" (*__xg(ptr)), "0" (x) 224 :"memory"); 225 break; 226 case 4: 227 __asm__ __volatile__("xchgl %0,%1" 228 :"=r" (x) 229 :"m" (*__xg(ptr)), "0" (x) 230 :"memory"); 231 break; 232 } 233 return x; 234} 235 236/** 237 * test_and_clear_bit - Clear a bit and return its old value 238 * @nr: Bit to set 239 * @addr: Address to count from 240 * 241 * This operation is atomic and cannot be reordered. 242 * It also implies a memory barrier. 243 */ 244static __inline int test_and_clear_bit(int nr, volatile void * addr) 245{ 246 int oldbit; 247 248 __asm__ __volatile__( LOCK_PREFIX 249 "btrl %2,%1\n\tsbbl %0,%0" 250 :"=r" (oldbit),"=m" (ADDR) 251 :"Ir" (nr) : "memory"); 252 return oldbit; 253} 254 255static __inline int constant_test_bit(int nr, const volatile void * addr) 256{ 257 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 258} 259 260static __inline int variable_test_bit(int nr, volatile void * addr) 261{ 262 int oldbit; 263 264 __asm__ __volatile__( 265 "btl %2,%1\n\tsbbl %0,%0" 266 :"=r" (oldbit) 267 :"m" (ADDR),"Ir" (nr)); 268 return oldbit; 269} 270 271#define test_bit(nr,addr) \ 272(__builtin_constant_p(nr) ? \ 273 constant_test_bit((nr),(addr)) : \ 274 variable_test_bit((nr),(addr))) 275 276 277/** 278 * set_bit - Atomically set a bit in memory 279 * @nr: the bit to set 280 * @addr: the address to start counting from 281 * 282 * This function is atomic and may not be reordered. See __set_bit() 283 * if you do not require the atomic guarantees. 284 * Note that @nr may be almost arbitrarily large; this function is not 285 * restricted to acting on a single-word quantity. 286 */ 287static __inline__ void set_bit(int nr, volatile void * addr) 288{ 289 __asm__ __volatile__( LOCK_PREFIX 290 "btsl %1,%0" 291 :"=m" (ADDR) 292 :"Ir" (nr)); 293} 294 295/** 296 * clear_bit - Clears a bit in memory 297 * @nr: Bit to clear 298 * @addr: Address to start counting from 299 * 300 * clear_bit() is atomic and may not be reordered. However, it does 301 * not contain a memory barrier, so if it is used for locking purposes, 302 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 303 * in order to ensure changes are visible on other processors. 304 */ 305static __inline__ void clear_bit(int nr, volatile void * addr) 306{ 307 __asm__ __volatile__( LOCK_PREFIX 308 "btrl %1,%0" 309 :"=m" (ADDR) 310 :"Ir" (nr)); 311} 312 313/** 314 * atomic_inc - increment atomic variable 315 * @v: pointer of type atomic_t 316 * 317 * Atomically increments @v by 1. Note that the guaranteed 318 * useful range of an atomic_t is only 24 bits. 319 */ 320static __inline__ void atomic_inc(atomic_t *v) 321{ 322 __asm__ __volatile__( 323 LOCK "incl %0" 324 :"=m" (v->counter) 325 :"m" (v->counter)); 326} 327 328 329#define rdtscll(val) \ 330 __asm__ __volatile__("rdtsc" : "=A" (val)) 331 332 333 334/* 335 * Kernel pointers have redundant information, so we can use a 336 * scheme where we can return either an error code or a dentry 337 * pointer with the same return value. 338 * 339 * This should be a per-architecture thing, to allow different 340 * error and pointer decisions. 341 */ 342#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) 343 344static inline void *ERR_PTR(long error) 345{ 346 return (void *) error; 347} 348 349static inline long PTR_ERR(const void *ptr) 350{ 351 return (long) ptr; 352} 353 354static inline long IS_ERR(const void *ptr) 355{ 356 return IS_ERR_VALUE((unsigned long)ptr); 357} 358 359#endif /* !__ASSEMBLY__ */ 360 361#endif /* _OS_H_ */ 362