1/* 2 * include/asm-xtensa/system.h 3 * 4 * This file is subject to the terms and conditions of the GNU General Public 5 * License. See the file "COPYING" in the main directory of this archive 6 * for more details. 7 * 8 * Copyright (C) 2001 - 2005 Tensilica Inc. 9 */ 10 11#ifndef _XTENSA_SYSTEM_H 12#define _XTENSA_SYSTEM_H 13 14#include <linux/stringify.h> 15 16#include <asm/processor.h> 17 18/* interrupt control */ 19 20#define local_save_flags(x) \ 21 __asm__ __volatile__ ("rsr %0,"__stringify(PS) : "=a" (x)); 22#define local_irq_restore(x) do { \ 23 __asm__ __volatile__ ("wsr %0, "__stringify(PS)" ; rsync" \ 24 :: "a" (x) : "memory"); } while(0); 25#define local_irq_save(x) do { \ 26 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) \ 27 : "=a" (x) :: "memory");} while(0); 28 29static inline void local_irq_disable(void) 30{ 31 unsigned long flags; 32 __asm__ __volatile__ ("rsil %0, "__stringify(LOCKLEVEL) 33 : "=a" (flags) :: "memory"); 34} 35static inline void local_irq_enable(void) 36{ 37 unsigned long flags; 38 __asm__ __volatile__ ("rsil %0, 0" : "=a" (flags) :: "memory"); 39 40} 41 42static inline int irqs_disabled(void) 43{ 44 unsigned long flags; 45 local_save_flags(flags); 46 return flags & 0xf; 47} 48 49#define RSR_CPENABLE(x) do { \ 50 __asm__ __volatile__("rsr %0," __stringify(CPENABLE) : "=a" (x)); \ 51 } while(0); 52#define WSR_CPENABLE(x) do { \ 53 __asm__ __volatile__("wsr %0," __stringify(CPENABLE)";rsync" \ 54 :: "a" (x));} while(0); 55 56#define clear_cpenable() __clear_cpenable() 57 58static inline void __clear_cpenable(void) 59{ 60#if XCHAL_HAVE_CP 61 unsigned long i = 0; 62 WSR_CPENABLE(i); 63#endif 64} 65 66static inline void enable_coprocessor(int i) 67{ 68#if XCHAL_HAVE_CP 69 int cp; 70 RSR_CPENABLE(cp); 71 cp |= 1 << i; 72 WSR_CPENABLE(cp); 73#endif 74} 75 76static inline void disable_coprocessor(int i) 77{ 78#if XCHAL_HAVE_CP 79 int cp; 80 RSR_CPENABLE(cp); 81 cp &= ~(1 << i); 82 WSR_CPENABLE(cp); 83#endif 84} 85 86#define smp_read_barrier_depends() do { } while(0) 87#define read_barrier_depends() do { } while(0) 88 89#define mb() barrier() 90#define rmb() mb() 91#define wmb() mb() 92 93#ifdef CONFIG_SMP 94#error smp_* not defined 95#else 96#define smp_mb() barrier() 97#define smp_rmb() barrier() 98#define smp_wmb() barrier() 99#endif 100 101#define set_mb(var, value) do { var = value; mb(); } while (0) 102 103#if !defined(__ASSEMBLY__) 104 105/* * switch_to(n) should switch tasks to task nr n, first 106 * checking that n isn't the current task, in which case it does nothing. 107 */ 108extern void *_switch_to(void *last, void *next); 109 110#endif /* __ASSEMBLY__ */ 111 112#define switch_to(prev,next,last) \ 113do { \ 114 clear_cpenable(); \ 115 (last) = _switch_to(prev, next); \ 116} while(0) 117 118/* 119 * cmpxchg 120 */ 121 122static inline unsigned long 123__cmpxchg_u32(volatile int *p, int old, int new) 124{ 125 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 126 "l32i %0, %1, 0 \n\t" 127 "bne %0, %2, 1f \n\t" 128 "s32i %3, %1, 0 \n\t" 129 "1: \n\t" 130 "wsr a15, "__stringify(PS)" \n\t" 131 "rsync \n\t" 132 : "=&a" (old) 133 : "a" (p), "a" (old), "r" (new) 134 : "a15", "memory"); 135 return old; 136} 137/* This function doesn't exist, so you'll get a linker error 138 * if something tries to do an invalid cmpxchg(). */ 139 140extern void __cmpxchg_called_with_bad_pointer(void); 141 142static __inline__ unsigned long 143__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 144{ 145 switch (size) { 146 case 4: return __cmpxchg_u32(ptr, old, new); 147 default: __cmpxchg_called_with_bad_pointer(); 148 return old; 149 } 150} 151 152#define cmpxchg(ptr,o,n) \ 153 ({ __typeof__(*(ptr)) _o_ = (o); \ 154 __typeof__(*(ptr)) _n_ = (n); \ 155 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 156 (unsigned long)_n_, sizeof (*(ptr))); \ 157 }) 158 159 160 161 162/* 163 * xchg_u32 164 * 165 * Note that a15 is used here because the register allocation 166 * done by the compiler is not guaranteed and a window overflow 167 * may not occur between the rsil and wsr instructions. By using 168 * a15 in the rsil, the machine is guaranteed to be in a state 169 * where no register reference will cause an overflow. 170 */ 171 172static inline unsigned long xchg_u32(volatile int * m, unsigned long val) 173{ 174 unsigned long tmp; 175 __asm__ __volatile__("rsil a15, "__stringify(LOCKLEVEL)"\n\t" 176 "l32i %0, %1, 0 \n\t" 177 "s32i %2, %1, 0 \n\t" 178 "wsr a15, "__stringify(PS)" \n\t" 179 "rsync \n\t" 180 : "=&a" (tmp) 181 : "a" (m), "a" (val) 182 : "a15", "memory"); 183 return tmp; 184} 185 186#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 187 188/* 189 * This only works if the compiler isn't horribly bad at optimizing. 190 * gcc-2.5.8 reportedly can't handle this, but I define that one to 191 * be dead anyway. 192 */ 193 194extern void __xchg_called_with_bad_pointer(void); 195 196static __inline__ unsigned long 197__xchg(unsigned long x, volatile void * ptr, int size) 198{ 199 switch (size) { 200 case 4: 201 return xchg_u32(ptr, x); 202 } 203 __xchg_called_with_bad_pointer(); 204 return x; 205} 206 207extern void set_except_vector(int n, void *addr); 208 209static inline void spill_registers(void) 210{ 211 unsigned int a0, ps; 212 213 __asm__ __volatile__ ( 214 "movi a14," __stringify (PS_EXCM_BIT) " | 1\n\t" 215 "mov a12, a0\n\t" 216 "rsr a13," __stringify(SAR) "\n\t" 217 "xsr a14," __stringify(PS) "\n\t" 218 "movi a0, _spill_registers\n\t" 219 "rsync\n\t" 220 "callx0 a0\n\t" 221 "mov a0, a12\n\t" 222 "wsr a13," __stringify(SAR) "\n\t" 223 "wsr a14," __stringify(PS) "\n\t" 224 :: "a" (&a0), "a" (&ps) 225 : "a2", "a3", "a12", "a13", "a14", "a15", "memory"); 226} 227 228#define arch_align_stack(x) (x) 229 230#endif /* _XTENSA_SYSTEM_H */ 231