1/* $Id: system.h,v 1.1.1.1 2008/10/15 03:29:18 james26_jang Exp $ */ 2#ifndef __SPARC64_SYSTEM_H 3#define __SPARC64_SYSTEM_H 4 5#include <linux/config.h> 6#include <asm/ptrace.h> 7#include <asm/processor.h> 8#include <asm/asm_offsets.h> 9#include <asm/visasm.h> 10 11#ifndef __ASSEMBLY__ 12/* 13 * Sparc (general) CPU types 14 */ 15enum sparc_cpu { 16 sun4 = 0x00, 17 sun4c = 0x01, 18 sun4m = 0x02, 19 sun4d = 0x03, 20 sun4e = 0x04, 21 sun4u = 0x05, /* V8 ploos ploos */ 22 sun_unknown = 0x06, 23 ap1000 = 0x07, /* almost a sun4m */ 24}; 25 26#define sparc_cpu_model sun4u 27 28/* This cannot ever be a sun4c nor sun4 :) That's just history. */ 29#define ARCH_SUN4C_SUN4 0 30#define ARCH_SUN4 0 31 32#endif 33 34#define setipl(__new_ipl) \ 35 __asm__ __volatile__("wrpr %0, %%pil" : : "r" (__new_ipl) : "memory") 36 37#define __cli() \ 38 __asm__ __volatile__("wrpr 15, %%pil" : : : "memory") 39 40#define __sti() \ 41 __asm__ __volatile__("wrpr 0, %%pil" : : : "memory") 42 43#define getipl() \ 44({ unsigned long retval; __asm__ __volatile__("rdpr %%pil, %0" : "=r" (retval)); retval; }) 45 46#define swap_pil(__new_pil) \ 47({ unsigned long retval; \ 48 __asm__ __volatile__("rdpr %%pil, %0\n\t" \ 49 "wrpr %1, %%pil" \ 50 : "=&r" (retval) \ 51 : "r" (__new_pil) \ 52 : "memory"); \ 53 retval; \ 54}) 55 56#define read_pil_and_cli() \ 57({ unsigned long retval; \ 58 __asm__ __volatile__("rdpr %%pil, %0\n\t" \ 59 "wrpr 15, %%pil" \ 60 : "=r" (retval) \ 61 : : "memory"); \ 62 retval; \ 63}) 64 65#define __save_flags(flags) ((flags) = getipl()) 66#define __save_and_cli(flags) ((flags) = read_pil_and_cli()) 67#define __restore_flags(flags) setipl((flags)) 68#define local_irq_disable() __cli() 69#define local_irq_enable() __sti() 70#define local_irq_save(flags) __save_and_cli(flags) 71#define local_irq_restore(flags) __restore_flags(flags) 72 73#ifndef CONFIG_SMP 74#define cli() __cli() 75#define sti() __sti() 76#define save_flags(x) __save_flags(x) 77#define restore_flags(x) __restore_flags(x) 78#define save_and_cli(x) __save_and_cli(x) 79#else 80 81#ifndef __ASSEMBLY__ 82extern void __global_cli(void); 83extern void __global_sti(void); 84extern unsigned long __global_save_flags(void); 85extern void __global_restore_flags(unsigned long flags); 86#endif 87 88#define cli() __global_cli() 89#define sti() __global_sti() 90#define save_flags(x) ((x) = __global_save_flags()) 91#define restore_flags(flags) __global_restore_flags(flags) 92#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0) 93 94#endif 95 96#define nop() __asm__ __volatile__ ("nop") 97 98#define membar(type) __asm__ __volatile__ ("membar " type : : : "memory"); 99#define mb() \ 100 membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad"); 101#define rmb() membar("#LoadLoad") 102#define wmb() membar("#StoreStore") 103#define set_mb(__var, __value) \ 104 do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) 105#define set_wmb(__var, __value) \ 106 do { __var = __value; membar("#StoreStore"); } while(0) 107 108#ifdef CONFIG_SMP 109#define smp_mb() mb() 110#define smp_rmb() rmb() 111#define smp_wmb() wmb() 112#else 113#define smp_mb() __asm__ __volatile__("":::"memory"); 114#define smp_rmb() __asm__ __volatile__("":::"memory"); 115#define smp_wmb() __asm__ __volatile__("":::"memory"); 116#endif 117 118#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") 119 120#define flushw_all() __asm__ __volatile__("flushw") 121 122/* Performance counter register access. */ 123#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) 124#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)); 125#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) 126 127#define reset_pic() \ 128 __asm__ __volatile__("ba,pt %xcc, 99f\n\t" \ 129 ".align 64\n" \ 130 "99:wr %g0, 0x0, %pic\n\t" \ 131 "rd %pic, %g0") 132 133#ifndef __ASSEMBLY__ 134 135extern void synchronize_user_stack(void); 136 137extern void __flushw_user(void); 138#define flushw_user() __flushw_user() 139 140#define flush_user_windows flushw_user 141#define flush_register_windows flushw_all 142#define prepare_to_switch flushw_all 143 144#ifndef CONFIG_DEBUG_SPINLOCK 145#define CHECK_LOCKS(PREV) do { } while(0) 146#else /* CONFIG_DEBUG_SPINLOCK */ 147#define CHECK_LOCKS(PREV) \ 148if ((PREV)->thread.smp_lock_count) { \ 149 unsigned long rpc; \ 150 __asm__ __volatile__("mov %%i7, %0" : "=r" (rpc)); \ 151 printk(KERN_CRIT "(%s)[%d]: Sleeping with %d locks held!\n", \ 152 (PREV)->comm, (PREV)->pid, \ 153 (PREV)->thread.smp_lock_count); \ 154 printk(KERN_CRIT "(%s)[%d]: Last lock at %08x\n", \ 155 (PREV)->comm, (PREV)->pid, \ 156 (PREV)->thread.smp_lock_pc); \ 157 printk(KERN_CRIT "(%s)[%d]: Sched caller %016lx\n", \ 158 (PREV)->comm, (PREV)->pid, rpc); \ 159} 160#endif /* !(CONFIG_DEBUG_SPINLOCK) */ 161 162 /* See what happens when you design the chip correctly? 163 * 164 * We tell gcc we clobber all non-fixed-usage registers except 165 * for l0/l1. It will use one for 'next' and the other to hold 166 * the output value of 'last'. 'next' is not referenced again 167 * past the invocation of switch_to in the scheduler, so we need 168 * not preserve it's value. Hairy, but it lets us remove 2 loads 169 * and 2 stores in this critical code path. -DaveM 170 */ 171#define switch_to(prev, next, last) \ 172do { CHECK_LOCKS(prev); \ 173 if (current->thread.flags & SPARC_FLAG_PERFCTR) { \ 174 unsigned long __tmp; \ 175 read_pcr(__tmp); \ 176 current->thread.pcr_reg = __tmp; \ 177 read_pic(__tmp); \ 178 current->thread.kernel_cntd0 += (unsigned int)(__tmp); \ 179 current->thread.kernel_cntd1 += ((__tmp) >> 32); \ 180 } \ 181 save_and_clear_fpu(); \ 182 /* If you are tempted to conditionalize the following */ \ 183 /* so that ASI is only written if it changes, think again. */ \ 184 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 185 : : "r" (next->thread.current_ds.seg)); \ 186 __asm__ __volatile__( \ 187 "mov %%g6, %%g5\n\t" \ 188 "wrpr %%g0, 0x95, %%pstate\n\t" \ 189 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ 190 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ 191 "rdpr %%wstate, %%o5\n\t" \ 192 "stx %%o6, [%%g6 + %3]\n\t" \ 193 "stb %%o5, [%%g6 + %2]\n\t" \ 194 "rdpr %%cwp, %%o5\n\t" \ 195 "stb %%o5, [%%g6 + %5]\n\t" \ 196 "mov %1, %%g6\n\t" \ 197 "ldub [%1 + %5], %%g1\n\t" \ 198 "wrpr %%g1, %%cwp\n\t" \ 199 "ldx [%%g6 + %3], %%o6\n\t" \ 200 "ldub [%%g6 + %2], %%o5\n\t" \ 201 "ldub [%%g6 + %4], %%o7\n\t" \ 202 "mov %%g6, %%l2\n\t" \ 203 "wrpr %%o5, 0x0, %%wstate\n\t" \ 204 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 205 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 206 "wrpr %%g0, 0x94, %%pstate\n\t" \ 207 "mov %%l2, %%g6\n\t" \ 208 "wrpr %%g0, 0x96, %%pstate\n\t" \ 209 "andcc %%o7, %6, %%g0\n\t" \ 210 "bne,pn %%icc, ret_from_syscall\n\t" \ 211 " mov %%g5, %0\n\t" \ 212 : "=&r" (last) \ 213 : "r" (next), \ 214 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.wstate)),\ 215 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \ 216 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.flags)),\ 217 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.cwp)), \ 218 "i" (SPARC_FLAG_NEWCHILD) \ 219 : "cc", "g1", "g2", "g3", "g5", "g7", \ 220 "l2", "l3", "l4", "l5", "l6", "l7", \ 221 "i0", "i1", "i2", "i3", "i4", "i5", \ 222 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ 223 /* If you fuck with this, update ret_from_syscall code too. */ \ 224 if (current->thread.flags & SPARC_FLAG_PERFCTR) { \ 225 write_pcr(current->thread.pcr_reg); \ 226 reset_pic(); \ 227 } \ 228} while(0) 229 230extern __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 231{ 232 __asm__ __volatile__( 233" mov %0, %%g5\n" 234"1: lduw [%2], %%g7\n" 235" cas [%2], %%g7, %0\n" 236" cmp %%g7, %0\n" 237" bne,a,pn %%icc, 1b\n" 238" mov %%g5, %0\n" 239" membar #StoreLoad | #StoreStore\n" 240 : "=&r" (val) 241 : "0" (val), "r" (m) 242 : "g5", "g7", "cc", "memory"); 243 return val; 244} 245 246extern __inline__ unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) 247{ 248 __asm__ __volatile__( 249" mov %0, %%g5\n" 250"1: ldx [%2], %%g7\n" 251" casx [%2], %%g7, %0\n" 252" cmp %%g7, %0\n" 253" bne,a,pn %%xcc, 1b\n" 254" mov %%g5, %0\n" 255" membar #StoreLoad | #StoreStore\n" 256 : "=&r" (val) 257 : "0" (val), "r" (m) 258 : "g5", "g7", "cc", "memory"); 259 return val; 260} 261 262#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 263#define tas(ptr) (xchg((ptr),1)) 264 265extern void __xchg_called_with_bad_pointer(void); 266 267static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, 268 int size) 269{ 270 switch (size) { 271 case 4: 272 return xchg32(ptr, x); 273 case 8: 274 return xchg64(ptr, x); 275 }; 276 __xchg_called_with_bad_pointer(); 277 return x; 278} 279 280extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); 281 282/* 283 * Atomic compare and exchange. Compare OLD with MEM, if identical, 284 * store NEW in MEM. Return the initial value in MEM. Success is 285 * indicated by comparing RETURN with OLD. 286 */ 287 288#define __HAVE_ARCH_CMPXCHG 1 289 290extern __inline__ unsigned long 291__cmpxchg_u32(volatile int *m, int old, int new) 292{ 293 __asm__ __volatile__("cas [%2], %3, %0\n\t" 294 "membar #StoreLoad | #StoreStore" 295 : "=&r" (new) 296 : "0" (new), "r" (m), "r" (old) 297 : "memory"); 298 299 return new; 300} 301 302extern __inline__ unsigned long 303__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 304{ 305 __asm__ __volatile__("casx [%2], %3, %0\n\t" 306 "membar #StoreLoad | #StoreStore" 307 : "=&r" (new) 308 : "0" (new), "r" (m), "r" (old) 309 : "memory"); 310 311 return new; 312} 313 314/* This function doesn't exist, so you'll get a linker error 315 if something tries to do an invalid cmpxchg(). */ 316extern void __cmpxchg_called_with_bad_pointer(void); 317 318static __inline__ unsigned long 319__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 320{ 321 switch (size) { 322 case 4: 323 return __cmpxchg_u32(ptr, old, new); 324 case 8: 325 return __cmpxchg_u64(ptr, old, new); 326 } 327 __cmpxchg_called_with_bad_pointer(); 328 return old; 329} 330 331#define cmpxchg(ptr,o,n) \ 332 ({ \ 333 __typeof__(*(ptr)) _o_ = (o); \ 334 __typeof__(*(ptr)) _n_ = (n); \ 335 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 336 (unsigned long)_n_, sizeof(*(ptr))); \ 337 }) 338 339#endif /* !(__ASSEMBLY__) */ 340 341#endif /* !(__SPARC64_SYSTEM_H) */ 342