1#ifndef __ASM_SYSTEM_H 2#define __ASM_SYSTEM_H 3 4#include <linux/config.h> 5#include <linux/kernel.h> 6#include <linux/init.h> 7#include <asm/segment.h> 8#include <linux/bitops.h> /* for LOCK_PREFIX */ 9 10#ifdef __KERNEL__ 11 12struct task_struct; /* one of the stranger aspects of C forward declarations.. */ 13extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next)); 14 15#define prepare_to_switch() do { } while(0) 16#define switch_to(prev,next,last) do { \ 17 asm volatile("pushl %%esi\n\t" \ 18 "pushl %%edi\n\t" \ 19 "pushl %%ebp\n\t" \ 20 "movl %%esp,%0\n\t" /* save ESP */ \ 21 "movl %3,%%esp\n\t" /* restore ESP */ \ 22 "movl $1f,%1\n\t" /* save EIP */ \ 23 "pushl %4\n\t" /* restore EIP */ \ 24 "jmp __switch_to\n" \ 25 "1:\t" \ 26 "popl %%ebp\n\t" \ 27 "popl %%edi\n\t" \ 28 "popl %%esi\n\t" \ 29 :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \ 30 "=b" (last) \ 31 :"m" (next->thread.esp),"m" (next->thread.eip), \ 32 "a" (prev), "d" (next), \ 33 "b" (prev)); \ 34} while (0) 35 36#define _set_base(addr,base) do { unsigned long __pr; \ 37__asm__ __volatile__ ("movw %%dx,%1\n\t" \ 38 "rorl $16,%%edx\n\t" \ 39 "movb %%dl,%2\n\t" \ 40 "movb %%dh,%3" \ 41 :"=&d" (__pr) \ 42 :"m" (*((addr)+2)), \ 43 "m" (*((addr)+4)), \ 44 "m" (*((addr)+7)), \ 45 "0" (base) \ 46 ); } while(0) 47 48#define _set_limit(addr,limit) do { unsigned long __lr; \ 49__asm__ __volatile__ ("movw %%dx,%1\n\t" \ 50 "rorl $16,%%edx\n\t" \ 51 "movb %2,%%dh\n\t" \ 52 "andb $0xf0,%%dh\n\t" \ 53 "orb %%dh,%%dl\n\t" \ 54 "movb %%dl,%2" \ 55 :"=&d" (__lr) \ 56 :"m" (*(addr)), \ 57 "m" (*((addr)+6)), \ 58 "0" (limit) \ 59 ); } while(0) 60 61#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) ) 62#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1)>>12 ) 63 64static inline unsigned long _get_base(char * addr) 65{ 66 unsigned long __base; 67 __asm__("movb %3,%%dh\n\t" 68 "movb %2,%%dl\n\t" 69 "shll $16,%%edx\n\t" 70 "movw %1,%%dx" 71 :"=&d" (__base) 72 :"m" (*((addr)+2)), 73 "m" (*((addr)+4)), 74 "m" (*((addr)+7))); 75 return __base; 76} 77 78#define get_base(ldt) _get_base( ((char *)&(ldt)) ) 79 80/* 81 * Load a segment. Fall back on loading the zero 82 * segment if something goes wrong.. 83 */ 84#define loadsegment(seg,value) \ 85 asm volatile("\n" \ 86 "1:\t" \ 87 "movl %0,%%" #seg "\n" \ 88 "2:\n" \ 89 ".section .fixup,\"ax\"\n" \ 90 "3:\t" \ 91 "pushl $0\n\t" \ 92 "popl %%" #seg "\n\t" \ 93 "jmp 2b\n" \ 94 ".previous\n" \ 95 ".section __ex_table,\"a\"\n\t" \ 96 ".align 4\n\t" \ 97 ".long 1b,3b\n" \ 98 ".previous" \ 99 : :"m" (*(unsigned int *)&(value))) 100 101/* 102 * Clear and set 'TS' bit respectively 103 */ 104#define clts() __asm__ __volatile__ ("clts") 105#define read_cr0() ({ \ 106 unsigned int __dummy; \ 107 __asm__( \ 108 "movl %%cr0,%0\n\t" \ 109 :"=r" (__dummy)); \ 110 __dummy; \ 111}) 112#define write_cr0(x) \ 113 __asm__("movl %0,%%cr0": :"r" (x)); 114 115#define read_cr4() ({ \ 116 unsigned int __dummy; \ 117 __asm__( \ 118 "movl %%cr4,%0\n\t" \ 119 :"=r" (__dummy)); \ 120 __dummy; \ 121}) 122#define write_cr4(x) \ 123 __asm__("movl %0,%%cr4": :"r" (x)); 124#define stts() write_cr0(8 | read_cr0()) 125 126#endif /* __KERNEL__ */ 127 128#define wbinvd() \ 129 __asm__ __volatile__ ("wbinvd": : :"memory"); 130 131static inline unsigned long get_limit(unsigned long segment) 132{ 133 unsigned long __limit; 134 __asm__("lsll %1,%0" 135 :"=r" (__limit):"r" (segment)); 136 return __limit+1; 137} 138 139#define nop() __asm__ __volatile__ ("nop") 140 141#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) 142 143#define tas(ptr) (xchg((ptr),1)) 144 145struct __xchg_dummy { unsigned long a[100]; }; 146#define __xg(x) ((struct __xchg_dummy *)(x)) 147 148 149/* 150 * The semantics of XCHGCMP8B are a bit strange, this is why 151 * there is a loop and the loading of %%eax and %%edx has to 152 * be inside. This inlines well in most cases, the cached 153 * cost is around ~38 cycles. (in the future we might want 154 * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that 155 * might have an implicit FPU-save as a cost, so it's not 156 * clear which path to go.) 157 * 158 * chmxchg8b must be used with the lock prefix here to allow 159 * the instruction to be executed atomically, see page 3-102 160 * of the instruction set reference 24319102.pdf. We need 161 * the reader side to see the coherent 64bit value. 162 */ 163static inline void __set_64bit (unsigned long long * ptr, 164 unsigned int low, unsigned int high) 165{ 166 __asm__ __volatile__ ( 167 "\n1:\t" 168 "movl (%0), %%eax\n\t" 169 "movl 4(%0), %%edx\n\t" 170 "lock cmpxchg8b (%0)\n\t" 171 "jnz 1b" 172 : /* no outputs */ 173 : "D"(ptr), 174 "b"(low), 175 "c"(high) 176 : "ax","dx","memory"); 177} 178 179static inline void __set_64bit_constant (unsigned long long *ptr, 180 unsigned long long value) 181{ 182 __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); 183} 184#define ll_low(x) *(((unsigned int*)&(x))+0) 185#define ll_high(x) *(((unsigned int*)&(x))+1) 186 187static inline void __set_64bit_var (unsigned long long *ptr, 188 unsigned long long value) 189{ 190 __set_64bit(ptr,ll_low(value), ll_high(value)); 191} 192 193#define set_64bit(ptr,value) \ 194(__builtin_constant_p(value) ? \ 195 __set_64bit_constant(ptr, value) : \ 196 __set_64bit_var(ptr, value) ) 197 198#define _set_64bit(ptr,value) \ 199(__builtin_constant_p(value) ? \ 200 __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ 201 __set_64bit(ptr, ll_low(value), ll_high(value)) ) 202 203/* 204 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway 205 * Note 2: xchg has side effect, so that attribute volatile is necessary, 206 * but generally the primitive is invalid, *ptr is output argument. --ANK 207 */ 208static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 209{ 210 switch (size) { 211 case 1: 212 __asm__ __volatile__("xchgb %b0,%1" 213 :"=q" (x) 214 :"m" (*__xg(ptr)), "0" (x) 215 :"memory"); 216 break; 217 case 2: 218 __asm__ __volatile__("xchgw %w0,%1" 219 :"=r" (x) 220 :"m" (*__xg(ptr)), "0" (x) 221 :"memory"); 222 break; 223 case 4: 224 __asm__ __volatile__("xchgl %0,%1" 225 :"=r" (x) 226 :"m" (*__xg(ptr)), "0" (x) 227 :"memory"); 228 break; 229 } 230 return x; 231} 232 233/* 234 * Atomic compare and exchange. Compare OLD with MEM, if identical, 235 * store NEW in MEM. Return the initial value in MEM. Success is 236 * indicated by comparing RETURN with OLD. 237 */ 238 239#ifdef CONFIG_X86_CMPXCHG 240#define __HAVE_ARCH_CMPXCHG 1 241 242static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, 243 unsigned long new, int size) 244{ 245 unsigned long prev; 246 switch (size) { 247 case 1: 248 __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" 249 : "=a"(prev) 250 : "q"(new), "m"(*__xg(ptr)), "0"(old) 251 : "memory"); 252 return prev; 253 case 2: 254 __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" 255 : "=a"(prev) 256 : "q"(new), "m"(*__xg(ptr)), "0"(old) 257 : "memory"); 258 return prev; 259 case 4: 260 __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" 261 : "=a"(prev) 262 : "q"(new), "m"(*__xg(ptr)), "0"(old) 263 : "memory"); 264 return prev; 265 } 266 return old; 267} 268 269#define cmpxchg(ptr,o,n)\ 270 ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\ 271 (unsigned long)(n),sizeof(*(ptr)))) 272 273#else 274/* Compiling for a 386 proper. Is it worth implementing via cli/sti? */ 275#endif 276 277/* 278 * Force strict CPU ordering. 279 * And yes, this is required on UP too when we're talking 280 * to devices. 281 * 282 * For now, "wmb()" doesn't actually do anything, as all 283 * Intel CPU's follow what Intel calls a *Processor Order*, 284 * in which all writes are seen in the program order even 285 * outside the CPU. 286 * 287 * I expect future Intel CPU's to have a weaker ordering, 288 * but I'd also expect them to finally get their act together 289 * and add some real memory barriers if so. 290 * 291 * Some non intel clones support out of order store. wmb() ceases to be a 292 * nop for these. 293 */ 294 295#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") 296#define rmb() mb() 297 298#ifdef CONFIG_X86_OOSTORE 299#define wmb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") 300#else 301#define wmb() __asm__ __volatile__ ("": : :"memory") 302#endif 303 304#ifdef CONFIG_SMP 305#define smp_mb() mb() 306#define smp_rmb() rmb() 307#define smp_wmb() wmb() 308#else 309#define smp_mb() barrier() 310#define smp_rmb() barrier() 311#define smp_wmb() barrier() 312#endif 313 314#define set_mb(var, value) do { xchg(&var, value); } while (0) 315#define set_wmb(var, value) do { var = value; wmb(); } while (0) 316 317/* interrupt control.. */ 318#define __save_flags(x) __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */) 319#define __restore_flags(x) __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory", "cc") 320#define __cli() __asm__ __volatile__("cli": : :"memory") 321#define __sti() __asm__ __volatile__("sti": : :"memory") 322/* used in the idle loop; sti takes one instruction cycle to complete */ 323#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory") 324 325/* For spinlocks etc */ 326#define local_irq_save(x) __asm__ __volatile__("pushfl ; popl %0 ; cli":"=g" (x): /* no input */ :"memory") 327#define local_irq_restore(x) __restore_flags(x) 328#define local_irq_disable() __cli() 329#define local_irq_enable() __sti() 330 331#ifdef CONFIG_SMP 332 333extern void __global_cli(void); 334extern void __global_sti(void); 335extern unsigned long __global_save_flags(void); 336extern void __global_restore_flags(unsigned long); 337#define cli() __global_cli() 338#define sti() __global_sti() 339#define save_flags(x) ((x)=__global_save_flags()) 340#define restore_flags(x) __global_restore_flags(x) 341 342#else 343 344#define cli() __cli() 345#define sti() __sti() 346#define save_flags(x) __save_flags(x) 347#define restore_flags(x) __restore_flags(x) 348 349#endif 350 351/* 352 * disable hlt during certain critical i/o operations 353 */ 354#define HAVE_DISABLE_HLT 355void disable_hlt(void); 356void enable_hlt(void); 357 358extern unsigned long dmi_broken; 359extern int is_sony_vaio_laptop; 360 361#define BROKEN_ACPI_Sx 0x0001 362#define BROKEN_INIT_AFTER_S1 0x0002 363 364#endif 365