1#ifndef __SPARC_SYSTEM_H 2#define __SPARC_SYSTEM_H 3 4#include <linux/kernel.h> 5#include <linux/threads.h> /* NR_CPUS */ 6#include <linux/thread_info.h> 7 8#include <asm/page.h> 9#include <asm/psr.h> 10#include <asm/ptrace.h> 11#include <asm/btfixup.h> 12#include <asm/smp.h> 13 14#ifndef __ASSEMBLY__ 15 16#include <linux/irqflags.h> 17 18static inline unsigned int probe_irq_mask(unsigned long val) 19{ 20 return 0; 21} 22 23/* 24 * Sparc (general) CPU types 25 */ 26enum sparc_cpu { 27 sun4 = 0x00, 28 sun4c = 0x01, 29 sun4m = 0x02, 30 sun4d = 0x03, 31 sun4e = 0x04, 32 sun4u = 0x05, /* V8 ploos ploos */ 33 sun_unknown = 0x06, 34 ap1000 = 0x07, /* almost a sun4m */ 35 sparc_leon = 0x08, /* Leon SoC */ 36}; 37 38/* Really, userland should not be looking at any of this... */ 39#ifdef __KERNEL__ 40 41extern enum sparc_cpu sparc_cpu_model; 42 43#define ARCH_SUN4C (sparc_cpu_model==sun4c) 44 45#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ 46 47extern char reboot_command[]; 48 49extern struct thread_info *current_set[NR_CPUS]; 50 51extern unsigned long empty_bad_page; 52extern unsigned long empty_bad_page_table; 53extern unsigned long empty_zero_page; 54 55extern void sun_do_break(void); 56extern int serial_console; 57extern int stop_a_enabled; 58extern int scons_pwroff; 59 60static inline int con_is_present(void) 61{ 62 return serial_console ? 0 : 1; 63} 64 65/* When a context switch happens we must flush all user windows so that 66 * the windows of the current process are flushed onto its stack. This 67 * way the windows are all clean for the next process and the stack 68 * frames are up to date. 69 */ 70extern void flush_user_windows(void); 71extern void kill_user_windows(void); 72extern void synchronize_user_stack(void); 73extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 74 void *fpqueue, unsigned long *fpqdepth); 75 76#ifdef CONFIG_SMP 77#define SWITCH_ENTER(prv) \ 78 do { \ 79 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \ 80 put_psr(get_psr() | PSR_EF); \ 81 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \ 82 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \ 83 clear_tsk_thread_flag(prv, TIF_USEDFPU); \ 84 (prv)->thread.kregs->psr &= ~PSR_EF; \ 85 } \ 86 } while(0) 87 88#define SWITCH_DO_LAZY_FPU(next) /* */ 89#else 90#define SWITCH_ENTER(prv) /* */ 91#define SWITCH_DO_LAZY_FPU(nxt) \ 92 do { \ 93 if (last_task_used_math != (nxt)) \ 94 (nxt)->thread.kregs->psr&=~PSR_EF; \ 95 } while(0) 96#endif 97 98extern void flushw_all(void); 99 100#define prepare_arch_switch(next) do { \ 101 __asm__ __volatile__( \ 102 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ 103 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ 104 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ 105 "save %sp, -0x40, %sp\n\t" \ 106 "restore; restore; restore; restore; restore; restore; restore"); \ 107} while(0) 108 109 /* Much care has gone into this code, do not touch it. 110 * 111 * We need to loadup regs l0/l1 for the newly forked child 112 * case because the trap return path relies on those registers 113 * holding certain values, gcc is told that they are clobbered. 114 * Gcc needs registers for 3 values in and 1 value out, so we 115 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM 116 * 117 * Hey Dave, that do not touch sign is too much of an incentive 118 * - Anton & Pete 119 */ 120#define switch_to(prev, next, last) do { \ 121 SWITCH_ENTER(prev); \ 122 SWITCH_DO_LAZY_FPU(next); \ 123 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next->active_mm)); \ 124 __asm__ __volatile__( \ 125 "sethi %%hi(here - 0x8), %%o7\n\t" \ 126 "mov %%g6, %%g3\n\t" \ 127 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \ 128 "rd %%psr, %%g4\n\t" \ 129 "std %%sp, [%%g6 + %4]\n\t" \ 130 "rd %%wim, %%g5\n\t" \ 131 "wr %%g4, 0x20, %%psr\n\t" \ 132 "nop\n\t" \ 133 "std %%g4, [%%g6 + %3]\n\t" \ 134 "ldd [%2 + %3], %%g4\n\t" \ 135 "mov %2, %%g6\n\t" \ 136 ".globl patchme_store_new_current\n" \ 137"patchme_store_new_current:\n\t" \ 138 "st %2, [%1]\n\t" \ 139 "wr %%g4, 0x20, %%psr\n\t" \ 140 "nop\n\t" \ 141 "nop\n\t" \ 142 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \ 143 "ldd [%%g6 + %4], %%sp\n\t" \ 144 "wr %%g5, 0x0, %%wim\n\t" \ 145 "ldd [%%sp + 0x00], %%l0\n\t" \ 146 "ldd [%%sp + 0x38], %%i6\n\t" \ 147 "wr %%g4, 0x0, %%psr\n\t" \ 148 "nop\n\t" \ 149 "nop\n\t" \ 150 "jmpl %%o7 + 0x8, %%g0\n\t" \ 151 " ld [%%g3 + %5], %0\n\t" \ 152 "here:\n" \ 153 : "=&r" (last) \ 154 : "r" (&(current_set[hard_smp_processor_id()])), \ 155 "r" (task_thread_info(next)), \ 156 "i" (TI_KPSR), \ 157 "i" (TI_KSP), \ 158 "i" (TI_TASK) \ 159 : "g1", "g2", "g3", "g4", "g5", "g7", \ 160 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \ 161 "i0", "i1", "i2", "i3", "i4", "i5", \ 162 "o0", "o1", "o2", "o3", "o7"); \ 163 } while(0) 164 165#define mb() __asm__ __volatile__ ("" : : : "memory") 166#define rmb() mb() 167#define wmb() mb() 168#define read_barrier_depends() do { } while(0) 169#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) 170#define smp_mb() __asm__ __volatile__("":::"memory") 171#define smp_rmb() __asm__ __volatile__("":::"memory") 172#define smp_wmb() __asm__ __volatile__("":::"memory") 173#define smp_read_barrier_depends() do { } while(0) 174 175#define nop() __asm__ __volatile__ ("nop") 176 177/* This has special calling conventions */ 178#ifndef CONFIG_SMP 179BTFIXUPDEF_CALL(void, ___xchg32, void) 180#endif 181 182static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 183{ 184#ifdef CONFIG_SMP 185 __asm__ __volatile__("swap [%2], %0" 186 : "=&r" (val) 187 : "0" (val), "r" (m) 188 : "memory"); 189 return val; 190#else 191 register unsigned long *ptr asm("g1"); 192 register unsigned long ret asm("g2"); 193 194 ptr = (unsigned long *) m; 195 ret = val; 196 197 /* Note: this is magic and the nop there is 198 really needed. */ 199 __asm__ __volatile__( 200 "mov %%o7, %%g4\n\t" 201 "call ___f____xchg32\n\t" 202 " nop\n\t" 203 : "=&r" (ret) 204 : "0" (ret), "r" (ptr) 205 : "g3", "g4", "g7", "memory", "cc"); 206 207 return ret; 208#endif 209} 210 211#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 212 213extern void __xchg_called_with_bad_pointer(void); 214 215static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) 216{ 217 switch (size) { 218 case 4: 219 return xchg_u32(ptr, x); 220 }; 221 __xchg_called_with_bad_pointer(); 222 return x; 223} 224 225/* Emulate cmpxchg() the same way we emulate atomics, 226 * by hashing the object address and indexing into an array 227 * of spinlocks to get a bit of performance... 228 * 229 * See arch/sparc/lib/atomic32.c for implementation. 230 * 231 * Cribbed from <asm-parisc/atomic.h> 232 */ 233#define __HAVE_ARCH_CMPXCHG 1 234 235/* bug catcher for when unsupported size is used - won't link */ 236extern void __cmpxchg_called_with_bad_pointer(void); 237/* we only need to support cmpxchg of a u32 on sparc */ 238extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_); 239 240/* don't worry...optimizer will get rid of most of this */ 241static inline unsigned long 242__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) 243{ 244 switch (size) { 245 case 4: 246 return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_); 247 default: 248 __cmpxchg_called_with_bad_pointer(); 249 break; 250 } 251 return old; 252} 253 254#define cmpxchg(ptr, o, n) \ 255({ \ 256 __typeof__(*(ptr)) _o_ = (o); \ 257 __typeof__(*(ptr)) _n_ = (n); \ 258 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 259 (unsigned long)_n_, sizeof(*(ptr))); \ 260}) 261 262#include <asm-generic/cmpxchg-local.h> 263 264/* 265 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 266 * them available. 267 */ 268#define cmpxchg_local(ptr, o, n) \ 269 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 270 (unsigned long)(n), sizeof(*(ptr)))) 271#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 272 273extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); 274 275#endif /* __KERNEL__ */ 276 277#endif /* __ASSEMBLY__ */ 278 279#define arch_align_stack(x) (x) 280 281#endif /* !(__SPARC_SYSTEM_H) */ 282