xen-os.h revision 185386
155682Smarkm/****************************************************************************** 272445Sassar * os.h 355682Smarkm * 455682Smarkm * random collection of macros and definition 555682Smarkm */ 655682Smarkm 755682Smarkm#ifndef _XEN_OS_H_ 855682Smarkm#define _XEN_OS_H_ 955682Smarkm#include <machine/param.h> 1055682Smarkm#ifdef PAE 1155682Smarkm#define CONFIG_X86_PAE 1255682Smarkm#endif 1355682Smarkm 1455682Smarkm#if !defined(__XEN_INTERFACE_VERSION__) 1555682Smarkm/* 1655682Smarkm * Can update to a more recent version when we implement 1755682Smarkm * the hypercall page 1855682Smarkm */ 1955682Smarkm#define __XEN_INTERFACE_VERSION__ 0x00030204 2055682Smarkm#endif 2155682Smarkm 2255682Smarkm#include <xen/interface/xen.h> 2355682Smarkm 2455682Smarkm/* Force a proper event-channel callback from Xen. */ 2555682Smarkmvoid force_evtchn_callback(void); 2655682Smarkm 2755682Smarkm#ifndef vtophys 2855682Smarkm#include <vm/vm.h> 2955682Smarkm#include <vm/vm_param.h> 3055682Smarkm#include <vm/pmap.h> 3155682Smarkm#endif 3255682Smarkm 3355682Smarkmextern int gdtset; 3472445Sassar#ifdef SMP 3555682Smarkm#include <sys/time.h> /* XXX for pcpu.h */ 3655682Smarkm#include <sys/pcpu.h> /* XXX for PCPU_GET */ 3755682Smarkmstatic inline int 3855682Smarkmsmp_processor_id(void) 3955682Smarkm{ 4055682Smarkm if (__predict_true(gdtset)) 4155682Smarkm return PCPU_GET(cpuid); 4255682Smarkm return 0; 4355682Smarkm} 4455682Smarkm 4555682Smarkm#else 4655682Smarkm#define smp_processor_id() 0 4755682Smarkm#endif 4855682Smarkm 4955682Smarkm#ifndef NULL 5055682Smarkm#define NULL (void *)0 5155682Smarkm#endif 5255682Smarkm 5355682Smarkm#ifndef PANIC_IF 5472445Sassar#define PANIC_IF(exp) if (unlikely(exp)) {panic("%s: %s:%d", #exp, __FILE__, __LINE__);} 5572445Sassar#endif 5655682Smarkm 5755682Smarkmextern shared_info_t *HYPERVISOR_shared_info; 5855682Smarkm 5955682Smarkm/* Somewhere in the middle of the GCC 2.96 development cycle, we implemented 6055682Smarkm a mechanism by which the user can annotate likely branch directions and 6155682Smarkm expect the blocks to be reordered appropriately. Define __builtin_expect 6255682Smarkm to nothing for earlier compilers. */ 6355682Smarkm 6455682Smarkm/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ 6555682Smarkmstatic inline void rep_nop(void) 6655682Smarkm{ 6755682Smarkm __asm__ __volatile__ ( "rep;nop" : : : "memory" ); 6855682Smarkm} 6955682Smarkm#define cpu_relax() rep_nop() 7055682Smarkm 7155682Smarkm 7255682Smarkm#if __GNUC__ == 2 && __GNUC_MINOR__ < 96 7355682Smarkm#define __builtin_expect(x, expected_value) (x) 7455682Smarkm#endif 7555682Smarkm 7655682Smarkm#define per_cpu(var, cpu) (pcpu_find((cpu))->pc_ ## var) 7755682Smarkm 7855682Smarkm/* crude memory allocator for memory allocation early in 7955682Smarkm * boot 8055682Smarkm */ 8155682Smarkmvoid *bootmem_alloc(unsigned int size); 8255682Smarkmvoid bootmem_free(void *ptr, unsigned int size); 8355682Smarkm 8455682Smarkm 8555682Smarkm/* Everything below this point is not included by assembler (.S) files. */ 8655682Smarkm#ifndef __ASSEMBLY__ 8755682Smarkm#include <sys/types.h> 8855682Smarkm 8955682Smarkmvoid printk(const char *fmt, ...); 9055682Smarkm 9172445Sassar/* some function prototypes */ 9255682Smarkmvoid trap_init(void); 9355682Smarkm 9455682Smarkm#define likely(x) __builtin_expect((x),1) 9555682Smarkm#define unlikely(x) __builtin_expect((x),0) 9655682Smarkm 9755682Smarkm#ifndef XENHVM 9855682Smarkm 9955682Smarkm/* 10055682Smarkm * STI/CLI equivalents. These basically set and clear the virtual 10155682Smarkm * event_enable flag in teh shared_info structure. Note that when 10255682Smarkm * the enable bit is set, there may be pending events to be handled. 10372445Sassar * We may therefore call into do_hypervisor_callback() directly. 10472445Sassar */ 10572445Sassar 10672445Sassar#define __cli() \ 10772445Sassardo { \ 10872445Sassar vcpu_info_t *_vcpu; \ 10955682Smarkm _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 11055682Smarkm _vcpu->evtchn_upcall_mask = 1; \ 11155682Smarkm barrier(); \ 11255682Smarkm} while (0) 11355682Smarkm 11455682Smarkm#define __sti() \ 11555682Smarkmdo { \ 11655682Smarkm vcpu_info_t *_vcpu; \ 11755682Smarkm barrier(); \ 11855682Smarkm _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 11955682Smarkm _vcpu->evtchn_upcall_mask = 0; \ 12072445Sassar barrier(); /* unmask then check (avoid races) */ \ 12172445Sassar if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 12272445Sassar force_evtchn_callback(); \ 12355682Smarkm} while (0) 12455682Smarkm 12555682Smarkm#define __restore_flags(x) \ 12655682Smarkmdo { \ 12755682Smarkm vcpu_info_t *_vcpu; \ 12855682Smarkm barrier(); \ 12955682Smarkm _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 13072445Sassar if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \ 13155682Smarkm barrier(); /* unmask then check (avoid races) */ \ 13255682Smarkm if ( unlikely(_vcpu->evtchn_upcall_pending) ) \ 133 force_evtchn_callback(); \ 134 } \ 135} while (0) 136 137/* 138 * Add critical_{enter, exit}? 139 * 140 */ 141#define __save_and_cli(x) \ 142do { \ 143 vcpu_info_t *_vcpu; \ 144 _vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()]; \ 145 (x) = _vcpu->evtchn_upcall_mask; \ 146 _vcpu->evtchn_upcall_mask = 1; \ 147 barrier(); \ 148} while (0) 149 150 151#define cli() __cli() 152#define sti() __sti() 153#define save_flags(x) __save_flags(x) 154#define restore_flags(x) __restore_flags(x) 155#define save_and_cli(x) __save_and_cli(x) 156 157#define local_irq_save(x) __save_and_cli(x) 158#define local_irq_restore(x) __restore_flags(x) 159#define local_irq_disable() __cli() 160#define local_irq_enable() __sti() 161 162#define mtx_lock_irqsave(lock, x) {local_irq_save((x)); mtx_lock_spin((lock));} 163#define mtx_unlock_irqrestore(lock, x) {mtx_unlock_spin((lock)); local_irq_restore((x)); } 164#define spin_lock_irqsave mtx_lock_irqsave 165#define spin_unlock_irqrestore mtx_unlock_irqrestore 166 167#else 168#endif 169 170#ifndef mb 171#define mb() __asm__ __volatile__("mfence":::"memory") 172#endif 173#ifndef rmb 174#define rmb() __asm__ __volatile__("lfence":::"memory"); 175#endif 176#ifndef wmb 177#define wmb() barrier() 178#endif 179#ifdef SMP 180#define smp_mb() mb() 181#define smp_rmb() rmb() 182#define smp_wmb() wmb() 183#define smp_read_barrier_depends() read_barrier_depends() 184#define set_mb(var, value) do { xchg(&var, value); } while (0) 185#else 186#define smp_mb() barrier() 187#define smp_rmb() barrier() 188#define smp_wmb() barrier() 189#define smp_read_barrier_depends() do { } while(0) 190#define set_mb(var, value) do { var = value; barrier(); } while (0) 191#endif 192 193 194/* This is a barrier for the compiler only, NOT the processor! */ 195#define barrier() __asm__ __volatile__("": : :"memory") 196 197#define LOCK_PREFIX "" 198#define LOCK "" 199#define ADDR (*(volatile long *) addr) 200/* 201 * Make sure gcc doesn't try to be clever and move things around 202 * on us. We need to use _exactly_ the address the user gave us, 203 * not some alias that contains the same information. 204 */ 205typedef struct { volatile int counter; } atomic_t; 206 207 208 209#define xen_xchg(ptr,v) \ 210 ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 211struct __xchg_dummy { unsigned long a[100]; }; 212#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 213static __inline unsigned long __xchg(unsigned long x, volatile void * ptr, 214 int size) 215{ 216 switch (size) { 217 case 1: 218 __asm__ __volatile__("xchgb %b0,%1" 219 :"=q" (x) 220 :"m" (*__xg(ptr)), "0" (x) 221 :"memory"); 222 break; 223 case 2: 224 __asm__ __volatile__("xchgw %w0,%1" 225 :"=r" (x) 226 :"m" (*__xg(ptr)), "0" (x) 227 :"memory"); 228 break; 229 case 4: 230 __asm__ __volatile__("xchgl %0,%1" 231 :"=r" (x) 232 :"m" (*__xg(ptr)), "0" (x) 233 :"memory"); 234 break; 235 } 236 return x; 237} 238 239/** 240 * test_and_clear_bit - Clear a bit and return its old value 241 * @nr: Bit to set 242 * @addr: Address to count from 243 * 244 * This operation is atomic and cannot be reordered. 245 * It also implies a memory barrier. 246 */ 247static __inline int test_and_clear_bit(int nr, volatile void * addr) 248{ 249 int oldbit; 250 251 __asm__ __volatile__( LOCK_PREFIX 252 "btrl %2,%1\n\tsbbl %0,%0" 253 :"=r" (oldbit),"=m" (ADDR) 254 :"Ir" (nr) : "memory"); 255 return oldbit; 256} 257 258static __inline int constant_test_bit(int nr, const volatile void * addr) 259{ 260 return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0; 261} 262 263static __inline int variable_test_bit(int nr, volatile void * addr) 264{ 265 int oldbit; 266 267 __asm__ __volatile__( 268 "btl %2,%1\n\tsbbl %0,%0" 269 :"=r" (oldbit) 270 :"m" (ADDR),"Ir" (nr)); 271 return oldbit; 272} 273 274#define test_bit(nr,addr) \ 275(__builtin_constant_p(nr) ? \ 276 constant_test_bit((nr),(addr)) : \ 277 variable_test_bit((nr),(addr))) 278 279 280/** 281 * set_bit - Atomically set a bit in memory 282 * @nr: the bit to set 283 * @addr: the address to start counting from 284 * 285 * This function is atomic and may not be reordered. See __set_bit() 286 * if you do not require the atomic guarantees. 287 * Note that @nr may be almost arbitrarily large; this function is not 288 * restricted to acting on a single-word quantity. 289 */ 290static __inline__ void set_bit(int nr, volatile void * addr) 291{ 292 __asm__ __volatile__( LOCK_PREFIX 293 "btsl %1,%0" 294 :"=m" (ADDR) 295 :"Ir" (nr)); 296} 297 298/** 299 * clear_bit - Clears a bit in memory 300 * @nr: Bit to clear 301 * @addr: Address to start counting from 302 * 303 * clear_bit() is atomic and may not be reordered. However, it does 304 * not contain a memory barrier, so if it is used for locking purposes, 305 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() 306 * in order to ensure changes are visible on other processors. 307 */ 308static __inline__ void clear_bit(int nr, volatile void * addr) 309{ 310 __asm__ __volatile__( LOCK_PREFIX 311 "btrl %1,%0" 312 :"=m" (ADDR) 313 :"Ir" (nr)); 314} 315 316/** 317 * atomic_inc - increment atomic variable 318 * @v: pointer of type atomic_t 319 * 320 * Atomically increments @v by 1. Note that the guaranteed 321 * useful range of an atomic_t is only 24 bits. 322 */ 323static __inline__ void atomic_inc(atomic_t *v) 324{ 325 __asm__ __volatile__( 326 LOCK "incl %0" 327 :"=m" (v->counter) 328 :"m" (v->counter)); 329} 330 331 332#define rdtscll(val) \ 333 __asm__ __volatile__("rdtsc" : "=A" (val)) 334 335 336 337/* 338 * Kernel pointers have redundant information, so we can use a 339 * scheme where we can return either an error code or a dentry 340 * pointer with the same return value. 341 * 342 * This should be a per-architecture thing, to allow different 343 * error and pointer decisions. 344 */ 345#define IS_ERR_VALUE(x) unlikely((x) > (unsigned long)-1000L) 346 347static inline void *ERR_PTR(long error) 348{ 349 return (void *) error; 350} 351 352static inline long PTR_ERR(const void *ptr) 353{ 354 return (long) ptr; 355} 356 357static inline long IS_ERR(const void *ptr) 358{ 359 return IS_ERR_VALUE((unsigned long)ptr); 360} 361 362#endif /* !__ASSEMBLY__ */ 363 364#endif /* _OS_H_ */ 365