1/* $Id: system.h,v 1.1.1.1 2008/10/15 03:29:17 james26_jang Exp $ */ 2#include <linux/config.h> 3 4#ifndef __SPARC_SYSTEM_H 5#define __SPARC_SYSTEM_H 6 7#include <linux/kernel.h> 8 9#include <asm/segment.h> 10 11#ifdef __KERNEL__ 12#include <asm/page.h> 13#include <asm/oplib.h> 14#include <asm/psr.h> 15#include <asm/ptrace.h> 16#include <asm/btfixup.h> 17 18#endif /* __KERNEL__ */ 19 20#ifndef __ASSEMBLY__ 21 22/* 23 * Sparc (general) CPU types 24 */ 25enum sparc_cpu { 26 sun4 = 0x00, 27 sun4c = 0x01, 28 sun4m = 0x02, 29 sun4d = 0x03, 30 sun4e = 0x04, 31 sun4u = 0x05, /* V8 ploos ploos */ 32 sun_unknown = 0x06, 33 ap1000 = 0x07, /* almost a sun4m */ 34}; 35 36/* Really, userland should not be looking at any of this... */ 37#ifdef __KERNEL__ 38 39extern enum sparc_cpu sparc_cpu_model; 40 41#ifndef CONFIG_SUN4 42#define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c) 43#define ARCH_SUN4 0 44#else 45#define ARCH_SUN4C_SUN4 1 46#define ARCH_SUN4 1 47#endif 48 49#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ 50 51extern unsigned long empty_bad_page; 52extern unsigned long empty_bad_page_table; 53extern unsigned long empty_zero_page; 54 55extern struct linux_romvec *romvec; 56#define halt() romvec->pv_halt() 57 58/* When a context switch happens we must flush all user windows so that 59 * the windows of the current process are flushed onto its stack. This 60 * way the windows are all clean for the next process and the stack 61 * frames are up to date. 62 */ 63extern void flush_user_windows(void); 64extern void kill_user_windows(void); 65extern void synchronize_user_stack(void); 66extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 67 void *fpqueue, unsigned long *fpqdepth); 68 69#ifdef CONFIG_SMP 70#define SWITCH_ENTER \ 71 if(prev->flags & PF_USEDFPU) { \ 72 put_psr(get_psr() | PSR_EF); \ 73 fpsave(&prev->thread.float_regs[0], &prev->thread.fsr, \ 74 &prev->thread.fpqueue[0], &prev->thread.fpqdepth); \ 75 prev->flags &= ~PF_USEDFPU; \ 76 prev->thread.kregs->psr &= ~PSR_EF; \ 77 } 78 79#define SWITCH_DO_LAZY_FPU 80#else 81#define SWITCH_ENTER 82#define SWITCH_DO_LAZY_FPU if(last_task_used_math != next) next->thread.kregs->psr&=~PSR_EF; 83#endif 84 85/* 86 * Flush windows so that the VM switch which follows 87 * would not pull the stack from under us. 88 * 89 * SWITCH_ENTER and SWITH_DO_LAZY_FPU do not work yet (e.g. SMP does not work) 90 */ 91#define prepare_to_switch() do { \ 92 __asm__ __volatile__( \ 93 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ 94 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ 95 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ 96 "save %sp, -0x40, %sp\n\t" \ 97 "restore; restore; restore; restore; restore; restore; restore"); \ 98} while(0) 99 100 /* Much care has gone into this code, do not touch it. 101 * 102 * We need to loadup regs l0/l1 for the newly forked child 103 * case because the trap return path relies on those registers 104 * holding certain values, gcc is told that they are clobbered. 105 * Gcc needs registers for 3 values in and 1 value out, so we 106 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM 107 * 108 * Hey Dave, that do not touch sign is too much of an incentive 109 * - Anton 110 */ 111#define switch_to(prev, next, last) do { \ 112 __label__ here; \ 113 register unsigned long task_pc asm("o7"); \ 114 extern struct task_struct *current_set[NR_CPUS]; \ 115 SWITCH_ENTER \ 116 SWITCH_DO_LAZY_FPU \ 117 next->active_mm->cpu_vm_mask |= (1 << smp_processor_id()); \ 118 task_pc = ((unsigned long) &&here) - 0x8; \ 119 __asm__ __volatile__( \ 120 "mov %%g6, %%g3\n\t" \ 121 "rd %%psr, %%g4\n\t" \ 122 "std %%sp, [%%g6 + %4]\n\t" \ 123 "rd %%wim, %%g5\n\t" \ 124 "wr %%g4, 0x20, %%psr\n\t" \ 125 "nop\n\t" \ 126 "std %%g4, [%%g6 + %3]\n\t" \ 127 "ldd [%2 + %3], %%g4\n\t" \ 128 "mov %2, %%g6\n\t" \ 129 ".globl patchme_store_new_current\n" \ 130"patchme_store_new_current:\n\t" \ 131 "st %2, [%1]\n\t" \ 132 "wr %%g4, 0x20, %%psr\n\t" \ 133 "nop\n\t" \ 134 "nop\n\t" \ 135 "ldd [%%g6 + %4], %%sp\n\t" \ 136 "wr %%g5, 0x0, %%wim\n\t" \ 137 "ldd [%%sp + 0x00], %%l0\n\t" \ 138 "ldd [%%sp + 0x38], %%i6\n\t" \ 139 "wr %%g4, 0x0, %%psr\n\t" \ 140 "nop\n\t" \ 141 "nop\n\t" \ 142 "jmpl %%o7 + 0x8, %%g0\n\t" \ 143 " mov %%g3, %0\n\t" \ 144 : "=&r" (last) \ 145 : "r" (&(current_set[hard_smp_processor_id()])), "r" (next), \ 146 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.kpsr)), \ 147 "i" ((const unsigned long)(&((struct task_struct *)0)->thread.ksp)), \ 148 "r" (task_pc) \ 149 : "g1", "g2", "g3", "g4", "g5", "g7", "l0", "l1", \ 150 "l4", "l5", "l6", "l7", "i0", "i1", "i2", "i3", "i4", "i5", "o0", "o1", "o2", \ 151 "o3"); \ 152here:; } while(0) 153 154/* 155 * Changing the IRQ level on the Sparc. 156 */ 157extern __inline__ void setipl(unsigned long __orig_psr) 158{ 159 __asm__ __volatile__( 160 "wr %0, 0x0, %%psr\n\t" 161 "nop; nop; nop\n" 162 : /* no outputs */ 163 : "r" (__orig_psr) 164 : "memory", "cc"); 165} 166 167extern __inline__ void __cli(void) 168{ 169 unsigned long tmp; 170 171 __asm__ __volatile__( 172 "rd %%psr, %0\n\t" 173 "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ 174 "or %0, %1, %0\n\t" 175 "wr %0, 0x0, %%psr\n\t" 176 "nop; nop; nop\n" 177 : "=r" (tmp) 178 : "i" (PSR_PIL) 179 : "memory"); 180} 181 182extern __inline__ void __sti(void) 183{ 184 unsigned long tmp; 185 186 __asm__ __volatile__( 187 "rd %%psr, %0\n\t" 188 "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ 189 "andn %0, %1, %0\n\t" 190 "wr %0, 0x0, %%psr\n\t" 191 "nop; nop; nop\n" 192 : "=r" (tmp) 193 : "i" (PSR_PIL) 194 : "memory"); 195} 196 197extern __inline__ unsigned long getipl(void) 198{ 199 unsigned long retval; 200 201 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); 202 return retval; 203} 204 205extern __inline__ unsigned long swap_pil(unsigned long __new_psr) 206{ 207 unsigned long retval; 208 209 __asm__ __volatile__( 210 "rd %%psr, %0\n\t" 211 "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ 212 "and %0, %2, %%g1\n\t" 213 "and %1, %2, %%g2\n\t" 214 "xorcc %%g1, %%g2, %%g0\n\t" 215 "be 1f\n\t" 216 " nop\n\t" 217 "wr %0, %2, %%psr\n\t" 218 "nop; nop; nop;\n" 219 "1:\n" 220 : "=r" (retval) 221 : "r" (__new_psr), "i" (PSR_PIL) 222 : "g1", "g2", "memory", "cc"); 223 224 return retval; 225} 226 227extern __inline__ unsigned long read_psr_and_cli(void) 228{ 229 unsigned long retval; 230 231 __asm__ __volatile__( 232 "rd %%psr, %0\n\t" 233 "nop; nop; nop;\n\t" /* Sun4m + Cypress + SMP bug */ 234 "or %0, %1, %%g1\n\t" 235 "wr %%g1, 0x0, %%psr\n\t" 236 "nop; nop; nop\n\t" 237 : "=r" (retval) 238 : "i" (PSR_PIL) 239 : "g1", "memory"); 240 241 return retval; 242} 243 244#define __save_flags(flags) ((flags) = getipl()) 245#define __save_and_cli(flags) ((flags) = read_psr_and_cli()) 246#define __restore_flags(flags) setipl((flags)) 247#define local_irq_disable() __cli() 248#define local_irq_enable() __sti() 249#define local_irq_save(flags) __save_and_cli(flags) 250#define local_irq_restore(flags) __restore_flags(flags) 251 252#ifdef CONFIG_SMP 253 254extern unsigned char global_irq_holder; 255 256#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0) 257 258extern void __global_cli(void); 259extern void __global_sti(void); 260extern unsigned long __global_save_flags(void); 261extern void __global_restore_flags(unsigned long flags); 262#define cli() __global_cli() 263#define sti() __global_sti() 264#define save_flags(flags) ((flags)=__global_save_flags()) 265#define restore_flags(flags) __global_restore_flags(flags) 266 267#else 268 269#define cli() __cli() 270#define sti() __sti() 271#define save_flags(x) __save_flags(x) 272#define restore_flags(x) __restore_flags(x) 273#define save_and_cli(x) __save_and_cli(x) 274 275#endif 276 277#define mb() __asm__ __volatile__ ("" : : : "memory") 278#define rmb() mb() 279#define wmb() mb() 280#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) 281#define set_wmb(__var, __value) set_mb(__var, __value) 282#define smp_mb() __asm__ __volatile__("":::"memory"); 283#define smp_rmb() __asm__ __volatile__("":::"memory"); 284#define smp_wmb() __asm__ __volatile__("":::"memory"); 285 286#define nop() __asm__ __volatile__ ("nop"); 287 288/* This has special calling conventions */ 289#ifndef CONFIG_SMP 290BTFIXUPDEF_CALL(void, ___xchg32, void) 291#endif 292 293extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 294{ 295#ifdef CONFIG_SMP 296 __asm__ __volatile__("swap [%2], %0" 297 : "=&r" (val) 298 : "0" (val), "r" (m)); 299 return val; 300#else 301 register unsigned long *ptr asm("g1"); 302 register unsigned long ret asm("g2"); 303 304 ptr = (unsigned long *) m; 305 ret = val; 306 307 /* Note: this is magic and the nop there is 308 really needed. */ 309 __asm__ __volatile__( 310 "mov %%o7, %%g4\n\t" 311 "call ___f____xchg32\n\t" 312 " nop\n\t" 313 : "=&r" (ret) 314 : "0" (ret), "r" (ptr) 315 : "g3", "g4", "g7", "memory", "cc"); 316 317 return ret; 318#endif 319} 320 321#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 322#define tas(ptr) (xchg((ptr),1)) 323 324extern void __xchg_called_with_bad_pointer(void); 325 326static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) 327{ 328 switch (size) { 329 case 4: 330 return xchg_u32(ptr, x); 331 }; 332 __xchg_called_with_bad_pointer(); 333 return x; 334} 335 336extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); 337 338#endif /* __KERNEL__ */ 339 340#endif /* __ASSEMBLY__ */ 341 342#endif /* !(__SPARC_SYSTEM_H) */ 343