1#ifndef __PPC64_SYSTEM_H 2#define __PPC64_SYSTEM_H 3 4/* 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License 7 * as published by the Free Software Foundation; either version 8 * 2 of the License, or (at your option) any later version. 9 */ 10 11#include <linux/config.h> 12#include <linux/kdev_t.h> 13#include <asm/page.h> 14#include <asm/processor.h> 15#include <asm/hw_irq.h> 16#include <asm/memory.h> 17 18/* 19 * Memory barrier. 20 * The sync instruction guarantees that all memory accesses initiated 21 * by this processor have been performed (with respect to all other 22 * mechanisms that access memory). The eieio instruction is a barrier 23 * providing an ordering (separately) for (a) cacheable stores and (b) 24 * loads and stores to non-cacheable memory (e.g. I/O devices). 25 * 26 * mb() prevents loads and stores being reordered across this point. 27 * rmb() prevents loads being reordered across this point. 28 * wmb() prevents stores being reordered across this point. 29 * 30 * We can use the eieio instruction for wmb, but since it doesn't 31 * give any ordering guarantees about loads, we have to use the 32 * stronger but slower sync instruction for mb and rmb. 33 */ 34#define mb() __asm__ __volatile__ ("sync" : : : "memory") 35#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory") 36#define wmb() __asm__ __volatile__ ("eieio" : : : "memory") 37 38#define set_mb(var, value) do { var = value; mb(); } while (0) 39#define set_wmb(var, value) do { var = value; wmb(); } while (0) 40 41#ifdef CONFIG_SMP 42#define smp_mb() mb() 43#define smp_rmb() rmb() 44#define smp_wmb() wmb() 45#else 46#define smp_mb() __asm__ __volatile__("": : :"memory") 47#define smp_rmb() __asm__ __volatile__("": : :"memory") 48#define smp_wmb() __asm__ __volatile__("": : :"memory") 49#endif /* CONFIG_SMP */ 50 51#ifdef CONFIG_XMON 52extern void xmon_irq(int, void *, struct pt_regs *); 53extern void xmon(struct pt_regs *excp); 54#endif 55 56extern void print_backtrace(unsigned long *); 57extern void show_regs(struct pt_regs * regs); 58extern void flush_instruction_cache(void); 59extern void hard_reset_now(void); 60extern void poweroff_now(void); 61extern int _get_PVR(void); 62extern long _get_L2CR(void); 63extern void _set_L2CR(unsigned long); 64extern void giveup_fpu(struct task_struct *); 65extern void enable_kernel_fp(void); 66extern void cvt_fd(float *from, double *to, unsigned long *fpscr); 67extern void cvt_df(double *from, float *to, unsigned long *fpscr); 68extern int abs(int); 69extern void cacheable_memzero(void *p, unsigned int nb); 70 71struct device_node; 72 73struct task_struct; 74#define prepare_to_switch() do { } while(0) 75#define switch_to(prev,next,last) _switch_to((prev),(next),&(last)) 76extern void _switch_to(struct task_struct *, struct task_struct *, 77 struct task_struct **); 78 79struct thread_struct; 80extern struct task_struct *_switch(struct thread_struct *prev, 81 struct thread_struct *next); 82 83struct pt_regs; 84extern void dump_regs(struct pt_regs *); 85 86#ifndef CONFIG_SMP 87 88#define cli() __cli() 89#define sti() __sti() 90#define save_flags(flags) __save_flags(flags) 91#define restore_flags(flags) __restore_flags(flags) 92#define save_and_cli(flags) __save_and_cli(flags) 93 94#else /* CONFIG_SMP */ 95 96extern void __global_cli(void); 97extern void __global_sti(void); 98extern unsigned long __global_save_flags(void); 99extern void __global_restore_flags(unsigned long); 100#define cli() __global_cli() 101#define sti() __global_sti() 102#define save_flags(x) ((x)=__global_save_flags()) 103#define restore_flags(x) __global_restore_flags(x) 104 105#endif /* !CONFIG_SMP */ 106 107#define local_irq_disable() __cli() 108#define local_irq_enable() __sti() 109#define local_irq_save(flags) __save_and_cli(flags) 110#define local_irq_restore(flags) __restore_flags(flags) 111 112static __inline__ int __is_processor(unsigned long pv) 113{ 114 unsigned long pvr; 115 asm volatile("mfspr %0, 0x11F" : "=r" (pvr)); 116 return(PVR_VER(pvr) == pv); 117} 118 119/* 120 * Atomic exchange 121 * 122 * Changes the memory location '*ptr' to be val and returns 123 * the previous value stored there. 124 * 125 * Inline asm pulled from arch/ppc/kernel/misc.S so ppc64 126 * is more like most of the other architectures. 127 */ 128static __inline__ unsigned long 129__xchg_u32(volatile int *m, unsigned long val) 130{ 131 unsigned long dummy; 132 133 __asm__ __volatile__( 134 EIEIO_ON_SMP 135"1: lwarx %0,0,%3 # __xchg_u32\n\ 136 stwcx. %2,0,%3\n\ 1372: bne- 1b" 138 ISYNC_ON_SMP 139 : "=&r" (dummy), "=m" (*m) 140 : "r" (val), "r" (m) 141 : "cc", "memory"); 142 143 return (dummy); 144} 145 146static __inline__ unsigned long 147__xchg_u64(volatile long *m, unsigned long val) 148{ 149 unsigned long dummy; 150 151 __asm__ __volatile__( 152 EIEIO_ON_SMP 153"1: ldarx %0,0,%3 # __xchg_u64\n\ 154 stdcx. %2,0,%3\n\ 1552: bne- 1b" 156 ISYNC_ON_SMP 157 : "=&r" (dummy), "=m" (*m) 158 : "r" (val), "r" (m) 159 : "cc", "memory"); 160 161 return (dummy); 162} 163 164/* 165 * This function doesn't exist, so you'll get a linker error 166 * if something tries to do an invalid xchg(). 167 */ 168extern void __xchg_called_with_bad_pointer(void); 169 170static __inline__ unsigned long 171__xchg(volatile void *ptr, unsigned long x, int size) 172{ 173 switch (size) { 174 case 4: 175 return __xchg_u32(ptr, x); 176 case 8: 177 return __xchg_u64(ptr, x); 178 } 179 __xchg_called_with_bad_pointer(); 180 return x; 181} 182 183#define xchg(ptr,x) \ 184 ({ \ 185 __typeof__(*(ptr)) _x_ = (x); \ 186 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ 187 }) 188 189#define tas(ptr) (xchg((ptr),1)) 190 191#define __HAVE_ARCH_CMPXCHG 1 192 193static __inline__ unsigned long 194__cmpxchg_u32(volatile int *p, int old, int new) 195{ 196 int prev; 197 198 __asm__ __volatile__ ( 199 EIEIO_ON_SMP 200"1: lwarx %0,0,%2 # __cmpxchg_u32\n\ 201 cmpw 0,%0,%3\n\ 202 bne- 2f\n\ 203 stwcx. %4,0,%2\n\ 204 bne- 1b" 205 ISYNC_ON_SMP 206 "\n\ 2072:" 208 : "=&r" (prev), "=m" (*p) 209 : "r" (p), "r" (old), "r" (new), "m" (*p) 210 : "cc", "memory"); 211 212 return prev; 213} 214 215static __inline__ unsigned long 216__cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new) 217{ 218 int prev; 219 220 __asm__ __volatile__ ( 221 EIEIO_ON_SMP 222"1: ldarx %0,0,%2 # __cmpxchg_u64\n\ 223 cmpd 0,%0,%3\n\ 224 bne- 2f\n\ 225 stdcx. %4,0,%2\n\ 226 bne- 1b" 227 ISYNC_ON_SMP 228 "\n\ 2292:" 230 : "=&r" (prev), "=m" (*p) 231 : "r" (p), "r" (old), "r" (new), "m" (*p) 232 : "cc", "memory"); 233 234 return prev; 235} 236 237/* This function doesn't exist, so you'll get a linker error 238 if something tries to do an invalid cmpxchg(). */ 239extern void __cmpxchg_called_with_bad_pointer(void); 240 241static __inline__ unsigned long 242__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size) 243{ 244 switch (size) { 245 case 4: 246 return __cmpxchg_u32(ptr, old, new); 247 case 8: 248 return __cmpxchg_u64(ptr, old, new); 249 } 250 __cmpxchg_called_with_bad_pointer(); 251 return old; 252} 253 254#define cmpxchg(ptr,o,n) \ 255 ({ \ 256 __typeof__(*(ptr)) _o_ = (o); \ 257 __typeof__(*(ptr)) _n_ = (n); \ 258 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 259 (unsigned long)_n_, sizeof(*(ptr))); \ 260 }) 261 262#endif 263