1#ifndef __ASM_SYSTEM_H 2#define __ASM_SYSTEM_H 3 4#include <linux/config.h> 5#include <linux/kernel.h> 6#include <asm/segment.h> 7 8#ifdef __KERNEL__ 9 10#ifdef CONFIG_SMP 11#define LOCK_PREFIX "lock ; " 12#else 13#define LOCK_PREFIX "" 14#endif 15 16#define prepare_to_switch() do {} while(0) 17 18#define __STR(x) #x 19#define STR(x) __STR(x) 20 21#define __PUSH(x) "pushq %%" __STR(x) "\n\t" 22#define __POP(x) "popq %%" __STR(x) "\n\t" 23 24/* frame pointer must be last for get_wchan */ 25#define SAVE_CONTEXT \ 26 __PUSH(rsi) __PUSH(rdi) \ 27 __PUSH(r12) __PUSH(r13) __PUSH(r14) __PUSH(r15) \ 28 __PUSH(rdx) __PUSH(rcx) __PUSH(r8) __PUSH(r9) __PUSH(r10) __PUSH(r11) \ 29 __PUSH(rbx) __PUSH(rbp) 30#define RESTORE_CONTEXT \ 31 __POP(rbp) __POP(rbx) \ 32 __POP(r11) __POP(r10) __POP(r9) __POP(r8) __POP(rcx) __POP(rdx) \ 33 __POP(r15) __POP(r14) __POP(r13) __POP(r12) \ 34 __POP(rdi) __POP(rsi) 35 36#define switch_to(prev,next,last) do { void *l; \ 37 asm volatile(SAVE_CONTEXT \ 38 "movq %%rsp,%0\n\t" /* save RSP */ \ 39 "movq %3,%%rsp\n\t" /* restore RSP */ \ 40 "leaq 1f(%%rip),%%rax\n\t" \ 41 "movq %%rax,%1\n\t" /* save RIP */ \ 42 "pushq %4\n\t" /* setup new RIP */ \ 43 "jmp __switch_to\n\t" \ 44 "1:\n\t" \ 45 RESTORE_CONTEXT \ 46 :"=m" (prev->thread.rsp),"=m" (prev->thread.rip), "=a" (l) \ 47 :"m" (next->thread.rsp),"m" (next->thread.rip), \ 48 "S" (next), "D" (prev) \ 49 :"memory","cc"); \ 50 last = l; \ 51} while(0) 52 53extern void load_gs_index(unsigned); 54 55/* 56 * Load a segment. Fall back on loading the zero 57 * segment if something goes wrong.. 58 */ 59#define loadsegment(seg,value) \ 60 asm volatile("\n" \ 61 "1:\t" \ 62 "movl %0,%%" #seg "\n" \ 63 "2:\n" \ 64 ".section .fixup,\"ax\"\n" \ 65 "3:\t" \ 66 "pushq $0 ; popq %% " #seg "\n\t" \ 67 "jmp 2b\n" \ 68 ".previous\n" \ 69 ".section __ex_table,\"a\"\n\t" \ 70 ".align 4\n\t" \ 71 ".quad 1b,3b\n" \ 72 ".previous" \ 73 : :"r" ((int)(value))) 74 75#define set_debug(value,register) \ 76 __asm__("movq %0,%%db" #register \ 77 : /* no output */ \ 78 :"r" ((unsigned long) value)) 79 80 81/* 82 * Clear and set 'TS' bit respectively 83 */ 84#define clts() __asm__ __volatile__ ("clts") 85#define read_cr0() ({ \ 86 unsigned long __dummy; \ 87 __asm__( \ 88 "movq %%cr0,%0\n\t" \ 89 :"=r" (__dummy)); \ 90 __dummy; \ 91}) 92#define write_cr0(x) \ 93 __asm__("movq %0,%%cr0": :"r" (x)); 94 95#define read_cr4() ({ \ 96 unsigned long __dummy; \ 97 __asm__( \ 98 "movq %%cr4,%0\n\t" \ 99 :"=r" (__dummy)); \ 100 __dummy; \ 101}) 102#define write_cr4(x) \ 103 __asm__("movq %0,%%cr4": :"r" (x)); 104#define stts() write_cr0(8 | read_cr0()) 105 106#define wbinvd() \ 107 __asm__ __volatile__ ("wbinvd": : :"memory"); 108 109#endif /* __KERNEL__ */ 110 111#define nop() __asm__ __volatile__ ("nop") 112 113#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 114 115#define tas(ptr) (xchg((ptr),1)) 116 117#define __xg(x) ((volatile long *)(x)) 118 119extern inline void set_64bit(volatile unsigned long *ptr, unsigned long val) 120{ 121 *ptr = val; 122} 123 124#define _set_64bit set_64bit 125 126/* 127 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 128 * Note 2: xchg has side effect, so that attribute volatile is necessary, 129 * but generally the primitive is invalid, *ptr is output argument. --ANK 130 */ 131static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 132{ 133 switch (size) { 134 case 1: 135 __asm__ __volatile__("xchgb %b0,%1" 136 :"=q" (x) 137 :"m" (*__xg(ptr)), "0" (x) 138 :"memory"); 139 break; 140 case 2: 141 __asm__ __volatile__("xchgw %w0,%1" 142 :"=r" (x) 143 :"m" (*__xg(ptr)), "0" (x) 144 :"memory"); 145 break; 146 case 4: 147 __asm__ __volatile__("xchgl %k0,%1" 148 :"=r" (x) 149 :"m" (*__xg(ptr)), "0" (x) 150 :"memory"); 151 break; 152 case 8: 153 __asm__ __volatile__("xchgq %0,%1" 154 :"=r" (x) 155 :"m" (*__xg(ptr)), "0" (x) 156 :"memory"); 157 break; 158 } 159 return x; 160} 161 162/* 163 * Atomic compare and exchange. Compare OLD with MEM, if identical, 164 * store NEW in MEM. Return the initial value in MEM. Success is 165 * indicated by comparing RETURN with OLD. 166 */ 167 168#define __HAVE_ARCH_CMPXCHG 1 169 170static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 171 unsigned long new, int size) 172{ 173 unsigned long prev; 174 switch (size) { 175 case 1: 176 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" 177 : "=a"(prev) 178 : "q"(new), "m"(*__xg(ptr)), "0"(old) 179 : "memory"); 180 return prev; 181 case 2: 182 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 183 : "=a"(prev) 184 : "q"(new), "m"(*__xg(ptr)), "0"(old) 185 : "memory"); 186 return prev; 187 case 4: 188 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2" 189 : "=a"(prev) 190 : "q"(new), "m"(*__xg(ptr)), "0"(old) 191 : "memory"); 192 return prev; 193 case 8: 194 __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2" 195 : "=a"(prev) 196 : "q"(new), "m"(*__xg(ptr)), "0"(old) 197 : "memory"); 198 return prev; 199 } 200 return old; 201} 202 203#define cmpxchg(ptr,o,n)\ 204 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 205 (unsigned long)(n),sizeof(*(ptr)))) 206 207 208#ifdef CONFIG_SMP 209#define smp_mb() mb() 210#define smp_rmb() rmb() 211#define smp_wmb() wmb() 212#else 213#define smp_mb() barrier() 214#define smp_rmb() barrier() 215#define smp_wmb() barrier() 216#endif 217 218 219/* 220 * Force strict CPU ordering. 221 * And yes, this is required on UP too when we're talking 222 * to devices. 223 * 224 * For now, "wmb()" doesn't actually do anything, as all 225 * Intel CPU's follow what Intel calls a *Processor Order*, 226 * in which all writes are seen in the program order even 227 * outside the CPU. 228 * 229 * I expect future Intel CPU's to have a weaker ordering, 230 * but I'd also expect them to finally get their act together 231 * and add some real memory barriers if so. 232 */ 233#define mb() asm volatile("mfence":::"memory") 234#define rmb() asm volatile("lfence":::"memory") 235#define wmb() asm volatile("sfence":::"memory") 236#define set_mb(var, value) do { xchg(&var, value); } while (0) 237#define set_wmb(var, value) do { var = value; wmb(); } while (0) 238 239#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0) 240 241/* interrupt control.. */ 242#define __save_flags(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# save_flags \n\t pushfq ; popq %q0":"=g" (x): /* no input */ :"memory"); } while (0) 243#define __restore_flags(x) __asm__ __volatile__("# restore_flags \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory", "cc") 244#define __cli() __asm__ __volatile__("cli": : :"memory") 245#define __sti() __asm__ __volatile__("sti": : :"memory") 246/* used in the idle loop; sti takes one instruction cycle to complete */ 247#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 248 249/* For spinlocks etc */ 250#define local_irq_save(x) do { warn_if_not_ulong(x); __asm__ __volatile__("# local_irq_save \n\t pushfq ; popq %0 ; cli":"=g" (x): /* no input */ :"memory"); } while (0) 251#define local_irq_restore(x) __asm__ __volatile__("# local_irq_restore \n\t pushq %0 ; popfq": /* no output */ :"g" (x):"memory") 252#define local_irq_disable() __cli() 253#define local_irq_enable() __sti() 254 255#ifdef CONFIG_SMP 256 257extern void __global_cli(void); 258extern void __global_sti(void); 259extern unsigned long __global_save_flags(void); 260extern void __global_restore_flags(unsigned long); 261#define cli() __global_cli() 262#define sti() __global_sti() 263#define save_flags(x) ((x)=__global_save_flags()) 264#define restore_flags(x) __global_restore_flags(x) 265 266#else 267 268#define cli() __cli() 269#define sti() __sti() 270#define save_flags(x) __save_flags(x) 271#define restore_flags(x) __restore_flags(x) 272 273#endif 274 275/* Default simics "magic" breakpoint */ 276#define icebp() asm volatile("xchg %%bx,%%bx" ::: "ebx") 277 278/* 279 * disable hlt during certain critical i/o operations 280 */ 281#define HAVE_DISABLE_HLT 282void disable_hlt(void); 283void enable_hlt(void); 284 285#endif 286