1/* $Id: system.h,v 1.1.1.1 2007/08/03 18:53:36 Exp $ */ 2 3#ifndef __SPARC_SYSTEM_H 4#define __SPARC_SYSTEM_H 5 6#include <linux/kernel.h> 7#include <linux/threads.h> /* NR_CPUS */ 8#include <linux/thread_info.h> 9 10#include <asm/page.h> 11#include <asm/psr.h> 12#include <asm/ptrace.h> 13#include <asm/btfixup.h> 14#include <asm/smp.h> 15 16#ifndef __ASSEMBLY__ 17 18/* 19 * Sparc (general) CPU types 20 */ 21enum sparc_cpu { 22 sun4 = 0x00, 23 sun4c = 0x01, 24 sun4m = 0x02, 25 sun4d = 0x03, 26 sun4e = 0x04, 27 sun4u = 0x05, /* V8 ploos ploos */ 28 sun_unknown = 0x06, 29 ap1000 = 0x07, /* almost a sun4m */ 30}; 31 32/* Really, userland should not be looking at any of this... */ 33#ifdef __KERNEL__ 34 35extern enum sparc_cpu sparc_cpu_model; 36 37#ifndef CONFIG_SUN4 38#define ARCH_SUN4C_SUN4 (sparc_cpu_model==sun4c) 39#define ARCH_SUN4 0 40#else 41#define ARCH_SUN4C_SUN4 1 42#define ARCH_SUN4 1 43#endif 44 45#define SUN4M_NCPUS 4 /* Architectural limit of sun4m. */ 46 47extern struct thread_info *current_set[NR_CPUS]; 48 49extern unsigned long empty_bad_page; 50extern unsigned long empty_bad_page_table; 51extern unsigned long empty_zero_page; 52 53extern void sun_do_break(void); 54extern int serial_console; 55extern int stop_a_enabled; 56 57static __inline__ int con_is_present(void) 58{ 59 return serial_console ? 0 : 1; 60} 61 62/* When a context switch happens we must flush all user windows so that 63 * the windows of the current process are flushed onto its stack. This 64 * way the windows are all clean for the next process and the stack 65 * frames are up to date. 66 */ 67extern void flush_user_windows(void); 68extern void kill_user_windows(void); 69extern void synchronize_user_stack(void); 70extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 71 void *fpqueue, unsigned long *fpqdepth); 72 73#ifdef CONFIG_SMP 74#define SWITCH_ENTER(prv) \ 75 do { \ 76 if (test_tsk_thread_flag(prv, TIF_USEDFPU)) { \ 77 put_psr(get_psr() | PSR_EF); \ 78 fpsave(&(prv)->thread.float_regs[0], &(prv)->thread.fsr, \ 79 &(prv)->thread.fpqueue[0], &(prv)->thread.fpqdepth); \ 80 clear_tsk_thread_flag(prv, TIF_USEDFPU); \ 81 (prv)->thread.kregs->psr &= ~PSR_EF; \ 82 } \ 83 } while(0) 84 85#define SWITCH_DO_LAZY_FPU(next) /* */ 86#else 87#define SWITCH_ENTER(prv) /* */ 88#define SWITCH_DO_LAZY_FPU(nxt) \ 89 do { \ 90 if (last_task_used_math != (nxt)) \ 91 (nxt)->thread.kregs->psr&=~PSR_EF; \ 92 } while(0) 93#endif 94 95#define prepare_arch_switch(next) do { \ 96 __asm__ __volatile__( \ 97 ".globl\tflush_patch_switch\nflush_patch_switch:\n\t" \ 98 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ 99 "save %sp, -0x40, %sp; save %sp, -0x40, %sp; save %sp, -0x40, %sp\n\t" \ 100 "save %sp, -0x40, %sp\n\t" \ 101 "restore; restore; restore; restore; restore; restore; restore"); \ 102} while(0) 103 104 /* Much care has gone into this code, do not touch it. 105 * 106 * We need to loadup regs l0/l1 for the newly forked child 107 * case because the trap return path relies on those registers 108 * holding certain values, gcc is told that they are clobbered. 109 * Gcc needs registers for 3 values in and 1 value out, so we 110 * clobber every non-fixed-usage register besides l2/l3/o4/o5. -DaveM 111 * 112 * Hey Dave, that do not touch sign is too much of an incentive 113 * - Anton & Pete 114 */ 115#define switch_to(prev, next, last) do { \ 116 SWITCH_ENTER(prev); \ 117 SWITCH_DO_LAZY_FPU(next); \ 118 cpu_set(smp_processor_id(), next->active_mm->cpu_vm_mask); \ 119 __asm__ __volatile__( \ 120 "sethi %%hi(here - 0x8), %%o7\n\t" \ 121 "mov %%g6, %%g3\n\t" \ 122 "or %%o7, %%lo(here - 0x8), %%o7\n\t" \ 123 "rd %%psr, %%g4\n\t" \ 124 "std %%sp, [%%g6 + %4]\n\t" \ 125 "rd %%wim, %%g5\n\t" \ 126 "wr %%g4, 0x20, %%psr\n\t" \ 127 "nop\n\t" \ 128 "std %%g4, [%%g6 + %3]\n\t" \ 129 "ldd [%2 + %3], %%g4\n\t" \ 130 "mov %2, %%g6\n\t" \ 131 ".globl patchme_store_new_current\n" \ 132"patchme_store_new_current:\n\t" \ 133 "st %2, [%1]\n\t" \ 134 "wr %%g4, 0x20, %%psr\n\t" \ 135 "nop\n\t" \ 136 "nop\n\t" \ 137 "nop\n\t" /* LEON needs all 3 nops: load to %sp depends on CWP. */ \ 138 "ldd [%%g6 + %4], %%sp\n\t" \ 139 "wr %%g5, 0x0, %%wim\n\t" \ 140 "ldd [%%sp + 0x00], %%l0\n\t" \ 141 "ldd [%%sp + 0x38], %%i6\n\t" \ 142 "wr %%g4, 0x0, %%psr\n\t" \ 143 "nop\n\t" \ 144 "nop\n\t" \ 145 "jmpl %%o7 + 0x8, %%g0\n\t" \ 146 " ld [%%g3 + %5], %0\n\t" \ 147 "here:\n" \ 148 : "=&r" (last) \ 149 : "r" (&(current_set[hard_smp_processor_id()])), \ 150 "r" (task_thread_info(next)), \ 151 "i" (TI_KPSR), \ 152 "i" (TI_KSP), \ 153 "i" (TI_TASK) \ 154 : "g1", "g2", "g3", "g4", "g5", "g7", \ 155 "l0", "l1", "l3", "l4", "l5", "l6", "l7", \ 156 "i0", "i1", "i2", "i3", "i4", "i5", \ 157 "o0", "o1", "o2", "o3", "o7"); \ 158 } while(0) 159 160/* 161 * On SMP systems, when the scheduler does migration-cost autodetection, 162 * it needs a way to flush as much of the CPU's caches as possible. 163 * 164 * TODO: fill this in! 165 */ 166static inline void sched_cacheflush(void) 167{ 168} 169 170/* 171 * Changing the IRQ level on the Sparc. 172 */ 173extern void local_irq_restore(unsigned long); 174extern unsigned long __local_irq_save(void); 175extern void local_irq_enable(void); 176 177static inline unsigned long getipl(void) 178{ 179 unsigned long retval; 180 181 __asm__ __volatile__("rd %%psr, %0" : "=r" (retval)); 182 return retval; 183} 184 185#define local_save_flags(flags) ((flags) = getipl()) 186#define local_irq_save(flags) ((flags) = __local_irq_save()) 187#define local_irq_disable() ((void) __local_irq_save()) 188#define irqs_disabled() ((getipl() & PSR_PIL) != 0) 189 190#define mb() __asm__ __volatile__ ("" : : : "memory") 191#define rmb() mb() 192#define wmb() mb() 193#define read_barrier_depends() do { } while(0) 194#define set_mb(__var, __value) do { __var = __value; mb(); } while(0) 195#define smp_mb() __asm__ __volatile__("":::"memory") 196#define smp_rmb() __asm__ __volatile__("":::"memory") 197#define smp_wmb() __asm__ __volatile__("":::"memory") 198#define smp_read_barrier_depends() do { } while(0) 199 200#define nop() __asm__ __volatile__ ("nop") 201 202/* This has special calling conventions */ 203#ifndef CONFIG_SMP 204BTFIXUPDEF_CALL(void, ___xchg32, void) 205#endif 206 207static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 208{ 209#ifdef CONFIG_SMP 210 __asm__ __volatile__("swap [%2], %0" 211 : "=&r" (val) 212 : "0" (val), "r" (m) 213 : "memory"); 214 return val; 215#else 216 register unsigned long *ptr asm("g1"); 217 register unsigned long ret asm("g2"); 218 219 ptr = (unsigned long *) m; 220 ret = val; 221 222 /* Note: this is magic and the nop there is 223 really needed. */ 224 __asm__ __volatile__( 225 "mov %%o7, %%g4\n\t" 226 "call ___f____xchg32\n\t" 227 " nop\n\t" 228 : "=&r" (ret) 229 : "0" (ret), "r" (ptr) 230 : "g3", "g4", "g7", "memory", "cc"); 231 232 return ret; 233#endif 234} 235 236#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 237 238extern void __xchg_called_with_bad_pointer(void); 239 240static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) 241{ 242 switch (size) { 243 case 4: 244 return xchg_u32(ptr, x); 245 }; 246 __xchg_called_with_bad_pointer(); 247 return x; 248} 249 250extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn)); 251 252#endif /* __KERNEL__ */ 253 254#endif /* __ASSEMBLY__ */ 255 256#define arch_align_stack(x) (x) 257 258#endif /* !(__SPARC_SYSTEM_H) */ 259