1#ifndef __SPARC64_SYSTEM_H 2#define __SPARC64_SYSTEM_H 3 4#include <asm/ptrace.h> 5#include <asm/processor.h> 6#include <asm/visasm.h> 7 8#ifndef __ASSEMBLY__ 9 10#include <linux/irqflags.h> 11#include <asm-generic/cmpxchg-local.h> 12 13/* 14 * Sparc (general) CPU types 15 */ 16enum sparc_cpu { 17 sun4 = 0x00, 18 sun4c = 0x01, 19 sun4m = 0x02, 20 sun4d = 0x03, 21 sun4e = 0x04, 22 sun4u = 0x05, /* V8 ploos ploos */ 23 sun_unknown = 0x06, 24 ap1000 = 0x07, /* almost a sun4m */ 25}; 26 27#define sparc_cpu_model sun4u 28 29/* This cannot ever be a sun4c :) That's just history. */ 30#define ARCH_SUN4C 0 31 32extern const char *sparc_cpu_type; 33extern const char *sparc_fpu_type; 34extern const char *sparc_pmu_type; 35 36extern char reboot_command[]; 37 38#define membar_safe(type) \ 39do { __asm__ __volatile__("ba,pt %%xcc, 1f\n\t" \ 40 " membar " type "\n" \ 41 "1:\n" \ 42 : : : "memory"); \ 43} while (0) 44 45/* The kernel always executes in TSO memory model these days, 46 * and furthermore most sparc64 chips implement more stringent 47 * memory ordering than required by the specifications. 48 */ 49#define mb() membar_safe("#StoreLoad") 50#define rmb() __asm__ __volatile__("":::"memory") 51#define wmb() __asm__ __volatile__("":::"memory") 52 53#endif 54 55#define nop() __asm__ __volatile__ ("nop") 56 57#define read_barrier_depends() do { } while(0) 58#define set_mb(__var, __value) \ 59 do { __var = __value; membar_safe("#StoreLoad"); } while(0) 60 61#ifdef CONFIG_SMP 62#define smp_mb() mb() 63#define smp_rmb() rmb() 64#define smp_wmb() wmb() 65#else 66#define smp_mb() __asm__ __volatile__("":::"memory") 67#define smp_rmb() __asm__ __volatile__("":::"memory") 68#define smp_wmb() __asm__ __volatile__("":::"memory") 69#endif 70 71#define smp_read_barrier_depends() do { } while(0) 72 73#define flushi(addr) __asm__ __volatile__ ("flush %0" : : "r" (addr) : "memory") 74 75#define flushw_all() __asm__ __volatile__("flushw") 76 77/* Performance counter register access. */ 78#define read_pcr(__p) __asm__ __volatile__("rd %%pcr, %0" : "=r" (__p)) 79#define write_pcr(__p) __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (__p)) 80#define read_pic(__p) __asm__ __volatile__("rd %%pic, %0" : "=r" (__p)) 81 82#define write_pic(__p) \ 83 __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" \ 84 " nop\n\t" \ 85 ".align 64\n" \ 86 "99:wr %0, 0x0, %%pic\n\t" \ 87 "rd %%pic, %%g0" : : "r" (__p)) 88#define reset_pic() write_pic(0) 89 90#ifndef __ASSEMBLY__ 91 92extern void sun_do_break(void); 93extern int stop_a_enabled; 94extern int scons_pwroff; 95 96extern void fault_in_user_windows(void); 97extern void synchronize_user_stack(void); 98 99extern void __flushw_user(void); 100#define flushw_user() __flushw_user() 101 102#define flush_user_windows flushw_user 103#define flush_register_windows flushw_all 104 105/* Don't hold the runqueue lock over context switch */ 106#define __ARCH_WANT_UNLOCKED_CTXSW 107#define prepare_arch_switch(next) \ 108do { \ 109 flushw_all(); \ 110} while (0) 111 112 /* See what happens when you design the chip correctly? 113 * 114 * We tell gcc we clobber all non-fixed-usage registers except 115 * for l0/l1. It will use one for 'next' and the other to hold 116 * the output value of 'last'. 'next' is not referenced again 117 * past the invocation of switch_to in the scheduler, so we need 118 * not preserve it's value. Hairy, but it lets us remove 2 loads 119 * and 2 stores in this critical code path. -DaveM 120 */ 121#define switch_to(prev, next, last) \ 122do { flush_tlb_pending(); \ 123 save_and_clear_fpu(); \ 124 /* If you are tempted to conditionalize the following */ \ 125 /* so that ASI is only written if it changes, think again. */ \ 126 __asm__ __volatile__("wr %%g0, %0, %%asi" \ 127 : : "r" (__thread_flag_byte_ptr(task_thread_info(next))[TI_FLAG_BYTE_CURRENT_DS]));\ 128 trap_block[current_thread_info()->cpu].thread = \ 129 task_thread_info(next); \ 130 __asm__ __volatile__( \ 131 "mov %%g4, %%g7\n\t" \ 132 "stx %%i6, [%%sp + 2047 + 0x70]\n\t" \ 133 "stx %%i7, [%%sp + 2047 + 0x78]\n\t" \ 134 "rdpr %%wstate, %%o5\n\t" \ 135 "stx %%o6, [%%g6 + %6]\n\t" \ 136 "stb %%o5, [%%g6 + %5]\n\t" \ 137 "rdpr %%cwp, %%o5\n\t" \ 138 "stb %%o5, [%%g6 + %8]\n\t" \ 139 "wrpr %%g0, 15, %%pil\n\t" \ 140 "mov %4, %%g6\n\t" \ 141 "ldub [%4 + %8], %%g1\n\t" \ 142 "wrpr %%g1, %%cwp\n\t" \ 143 "ldx [%%g6 + %6], %%o6\n\t" \ 144 "ldub [%%g6 + %5], %%o5\n\t" \ 145 "ldub [%%g6 + %7], %%o7\n\t" \ 146 "wrpr %%o5, 0x0, %%wstate\n\t" \ 147 "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ 148 "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ 149 "ldx [%%g6 + %9], %%g4\n\t" \ 150 "wrpr %%g0, 14, %%pil\n\t" \ 151 "brz,pt %%o7, switch_to_pc\n\t" \ 152 " mov %%g7, %0\n\t" \ 153 "sethi %%hi(ret_from_syscall), %%g1\n\t" \ 154 "jmpl %%g1 + %%lo(ret_from_syscall), %%g0\n\t" \ 155 " nop\n\t" \ 156 ".globl switch_to_pc\n\t" \ 157 "switch_to_pc:\n\t" \ 158 : "=&r" (last), "=r" (current), "=r" (current_thread_info_reg), \ 159 "=r" (__local_per_cpu_offset) \ 160 : "0" (task_thread_info(next)), \ 161 "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ 162 "i" (TI_CWP), "i" (TI_TASK) \ 163 : "cc", \ 164 "g1", "g2", "g3", "g7", \ 165 "l1", "l2", "l3", "l4", "l5", "l6", "l7", \ 166 "i0", "i1", "i2", "i3", "i4", "i5", \ 167 "o0", "o1", "o2", "o3", "o4", "o5", "o7"); \ 168} while(0) 169 170static inline unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) 171{ 172 unsigned long tmp1, tmp2; 173 174 __asm__ __volatile__( 175" mov %0, %1\n" 176"1: lduw [%4], %2\n" 177" cas [%4], %2, %0\n" 178" cmp %2, %0\n" 179" bne,a,pn %%icc, 1b\n" 180" mov %1, %0\n" 181 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) 182 : "0" (val), "r" (m) 183 : "cc", "memory"); 184 return val; 185} 186 187static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) 188{ 189 unsigned long tmp1, tmp2; 190 191 __asm__ __volatile__( 192" mov %0, %1\n" 193"1: ldx [%4], %2\n" 194" casx [%4], %2, %0\n" 195" cmp %2, %0\n" 196" bne,a,pn %%xcc, 1b\n" 197" mov %1, %0\n" 198 : "=&r" (val), "=&r" (tmp1), "=&r" (tmp2) 199 : "0" (val), "r" (m) 200 : "cc", "memory"); 201 return val; 202} 203 204#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 205 206extern void __xchg_called_with_bad_pointer(void); 207 208static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, 209 int size) 210{ 211 switch (size) { 212 case 4: 213 return xchg32(ptr, x); 214 case 8: 215 return xchg64(ptr, x); 216 }; 217 __xchg_called_with_bad_pointer(); 218 return x; 219} 220 221extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); 222 223/* 224 * Atomic compare and exchange. Compare OLD with MEM, if identical, 225 * store NEW in MEM. Return the initial value in MEM. Success is 226 * indicated by comparing RETURN with OLD. 227 */ 228 229#define __HAVE_ARCH_CMPXCHG 1 230 231static inline unsigned long 232__cmpxchg_u32(volatile int *m, int old, int new) 233{ 234 __asm__ __volatile__("cas [%2], %3, %0" 235 : "=&r" (new) 236 : "0" (new), "r" (m), "r" (old) 237 : "memory"); 238 239 return new; 240} 241 242static inline unsigned long 243__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) 244{ 245 __asm__ __volatile__("casx [%2], %3, %0" 246 : "=&r" (new) 247 : "0" (new), "r" (m), "r" (old) 248 : "memory"); 249 250 return new; 251} 252 253/* This function doesn't exist, so you'll get a linker error 254 if something tries to do an invalid cmpxchg(). */ 255extern void __cmpxchg_called_with_bad_pointer(void); 256 257static inline unsigned long 258__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 259{ 260 switch (size) { 261 case 4: 262 return __cmpxchg_u32(ptr, old, new); 263 case 8: 264 return __cmpxchg_u64(ptr, old, new); 265 } 266 __cmpxchg_called_with_bad_pointer(); 267 return old; 268} 269 270#define cmpxchg(ptr,o,n) \ 271 ({ \ 272 __typeof__(*(ptr)) _o_ = (o); \ 273 __typeof__(*(ptr)) _n_ = (n); \ 274 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 275 (unsigned long)_n_, sizeof(*(ptr))); \ 276 }) 277 278/* 279 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 280 * them available. 281 */ 282 283static inline unsigned long __cmpxchg_local(volatile void *ptr, 284 unsigned long old, 285 unsigned long new, int size) 286{ 287 switch (size) { 288 case 4: 289 case 8: return __cmpxchg(ptr, old, new, size); 290 default: 291 return __cmpxchg_local_generic(ptr, old, new, size); 292 } 293 294 return old; 295} 296 297#define cmpxchg_local(ptr, o, n) \ 298 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ 299 (unsigned long)(n), sizeof(*(ptr)))) 300#define cmpxchg64_local(ptr, o, n) \ 301 ({ \ 302 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ 303 cmpxchg_local((ptr), (o), (n)); \ 304 }) 305 306#endif /* !(__ASSEMBLY__) */ 307 308#define arch_align_stack(x) (x) 309 310#endif /* !(__SPARC64_SYSTEM_H) */ 311