1#ifndef __ASM_SH_SYSTEM_H 2#define __ASM_SH_SYSTEM_H 3 4/* 5 * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima 6 * Copyright (C) 2002 Paul Mundt 7 */ 8 9#include <linux/irqflags.h> 10#include <linux/compiler.h> 11#include <linux/linkage.h> 12#include <asm/types.h> 13 14#define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */ 15 16/* 17 * A brief note on ctrl_barrier(), the control register write barrier. 18 * 19 * Legacy SH cores typically require a sequence of 8 nops after 20 * modification of a control register in order for the changes to take 21 * effect. On newer cores (like the sh4a and sh5) this is accomplished 22 * with icbi. 23 * 24 * Also note that on sh4a in the icbi case we can forego a synco for the 25 * write barrier, as it's not necessary for control registers. 26 * 27 * Historically we have only done this type of barrier for the MMUCR, but 28 * it's also necessary for the CCR, so we make it generic here instead. 29 */ 30#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) 31#define mb() __asm__ __volatile__ ("synco": : :"memory") 32#define rmb() mb() 33#define wmb() __asm__ __volatile__ ("synco": : :"memory") 34#define ctrl_barrier() __icbi(PAGE_OFFSET) 35#define read_barrier_depends() do { } while(0) 36#else 37#define mb() __asm__ __volatile__ ("": : :"memory") 38#define rmb() mb() 39#define wmb() __asm__ __volatile__ ("": : :"memory") 40#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") 41#define read_barrier_depends() do { } while(0) 42#endif 43 44#ifdef CONFIG_SMP 45#define smp_mb() mb() 46#define smp_rmb() rmb() 47#define smp_wmb() wmb() 48#define smp_read_barrier_depends() read_barrier_depends() 49#else 50#define smp_mb() barrier() 51#define smp_rmb() barrier() 52#define smp_wmb() barrier() 53#define smp_read_barrier_depends() do { } while(0) 54#endif 55 56#define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 57 58#ifdef CONFIG_GUSA_RB 59#include <asm/cmpxchg-grb.h> 60#elif defined(CONFIG_CPU_SH4A) 61#include <asm/cmpxchg-llsc.h> 62#else 63#include <asm/cmpxchg-irq.h> 64#endif 65 66extern void __xchg_called_with_bad_pointer(void); 67 68#define __xchg(ptr, x, size) \ 69({ \ 70 unsigned long __xchg__res; \ 71 volatile void *__xchg_ptr = (ptr); \ 72 switch (size) { \ 73 case 4: \ 74 __xchg__res = xchg_u32(__xchg_ptr, x); \ 75 break; \ 76 case 1: \ 77 __xchg__res = xchg_u8(__xchg_ptr, x); \ 78 break; \ 79 default: \ 80 __xchg_called_with_bad_pointer(); \ 81 __xchg__res = x; \ 82 break; \ 83 } \ 84 \ 85 __xchg__res; \ 86}) 87 88#define xchg(ptr,x) \ 89 ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) 90 91/* This function doesn't exist, so you'll get a linker error 92 * if something tries to do an invalid cmpxchg(). */ 93extern void __cmpxchg_called_with_bad_pointer(void); 94 95#define __HAVE_ARCH_CMPXCHG 1 96 97static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old, 98 unsigned long new, int size) 99{ 100 switch (size) { 101 case 4: 102 return __cmpxchg_u32(ptr, old, new); 103 } 104 __cmpxchg_called_with_bad_pointer(); 105 return old; 106} 107 108#define cmpxchg(ptr,o,n) \ 109 ({ \ 110 __typeof__(*(ptr)) _o_ = (o); \ 111 __typeof__(*(ptr)) _n_ = (n); \ 112 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ 113 (unsigned long)_n_, sizeof(*(ptr))); \ 114 }) 115 116struct pt_regs; 117 118extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn)); 119void free_initmem(void); 120void free_initrd_mem(unsigned long start, unsigned long end); 121 122extern void *set_exception_table_vec(unsigned int vec, void *handler); 123 124static inline void *set_exception_table_evt(unsigned int evt, void *handler) 125{ 126 return set_exception_table_vec(evt >> 5, handler); 127} 128 129/* 130 * SH-2A has both 16 and 32-bit opcodes, do lame encoding checks. 131 */ 132#ifdef CONFIG_CPU_SH2A 133extern unsigned int instruction_size(unsigned int insn); 134#elif defined(CONFIG_SUPERH32) 135#define instruction_size(insn) (2) 136#else 137#define instruction_size(insn) (4) 138#endif 139 140extern unsigned long cached_to_uncached; 141extern unsigned long uncached_size; 142 143extern struct dentry *sh_debugfs_root; 144 145void per_cpu_trap_init(void); 146void default_idle(void); 147void cpu_idle_wait(void); 148void stop_this_cpu(void *); 149 150#ifdef CONFIG_SUPERH32 151#define BUILD_TRAP_HANDLER(name) \ 152asmlinkage void name##_trap_handler(unsigned long r4, unsigned long r5, \ 153 unsigned long r6, unsigned long r7, \ 154 struct pt_regs __regs) 155 156#define TRAP_HANDLER_DECL \ 157 struct pt_regs *regs = RELOC_HIDE(&__regs, 0); \ 158 unsigned int vec = regs->tra; \ 159 (void)vec; 160#else 161#define BUILD_TRAP_HANDLER(name) \ 162asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs) 163#define TRAP_HANDLER_DECL 164#endif 165 166BUILD_TRAP_HANDLER(address_error); 167BUILD_TRAP_HANDLER(debug); 168BUILD_TRAP_HANDLER(bug); 169BUILD_TRAP_HANDLER(breakpoint); 170BUILD_TRAP_HANDLER(singlestep); 171BUILD_TRAP_HANDLER(fpu_error); 172BUILD_TRAP_HANDLER(fpu_state_restore); 173BUILD_TRAP_HANDLER(nmi); 174 175#define arch_align_stack(x) (x) 176 177struct mem_access { 178 unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt); 179 unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt); 180}; 181 182#ifdef CONFIG_SUPERH32 183# include "system_32.h" 184#else 185# include "system_64.h" 186#endif 187 188#endif 189