1#ifndef _H8300_SYSTEM_H 2#define _H8300_SYSTEM_H 3 4#include <linux/linkage.h> 5 6struct pt_regs; 7 8/* 9 * switch_to(n) should switch tasks to task ptr, first checking that 10 * ptr isn't the current task, in which case it does nothing. This 11 * also clears the TS-flag if the task we switched to has used the 12 * math co-processor latest. 13 */ 14/* 15 * switch_to() saves the extra registers, that are not saved 16 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and 17 * a0-a1. Some of these are used by schedule() and its predecessors 18 * and so we might get see unexpected behaviors when a task returns 19 * with unexpected register values. 20 * 21 * syscall stores these registers itself and none of them are used 22 * by syscall after the function in the syscall has been called. 23 * 24 * Beware that resume now expects *next to be in d1 and the offset of 25 * tss to be in a1. This saves a few instructions as we no longer have 26 * to push them onto the stack and read them back right after. 27 * 28 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) 29 * 30 * Changed 96/09/19 by Andreas Schwab 31 * pass prev in a0, next in a1, offset of tss in d1, and whether 32 * the mm structures are shared in d2 (to avoid atc flushing). 33 * 34 * H8/300 Porting 2002/09/04 Yoshinori Sato 35 */ 36 37asmlinkage void resume(void); 38#define switch_to(prev,next,last) { \ 39 void *_last; \ 40 __asm__ __volatile__( \ 41 "mov.l %1, er0\n\t" \ 42 "mov.l %2, er1\n\t" \ 43 "mov.l %3, er2\n\t" \ 44 "jsr @_resume\n\t" \ 45 "mov.l er2,%0\n\t" \ 46 : "=r" (_last) \ 47 : "r" (&(prev->thread)), \ 48 "r" (&(next->thread)), \ 49 "g" (prev) \ 50 : "cc", "er0", "er1", "er2", "er3"); \ 51 (last) = _last; \ 52} 53 54#define __sti() asm volatile ("andc #0x7f,ccr") 55#define __cli() asm volatile ("orc #0x80,ccr") 56 57#define __save_flags(x) \ 58 asm volatile ("stc ccr,%w0":"=r" (x)) 59 60#define __restore_flags(x) \ 61 asm volatile ("ldc %w0,ccr": :"r" (x)) 62 63#define irqs_disabled() \ 64({ \ 65 unsigned char flags; \ 66 __save_flags(flags); \ 67 ((flags & 0x80) == 0x80); \ 68}) 69 70#define iret() __asm__ __volatile__ ("rte": : :"memory", "sp", "cc") 71 72/* For spinlocks etc */ 73#define local_irq_disable() __cli() 74#define local_irq_enable() __sti() 75#define local_irq_save(x) ({ __save_flags(x); local_irq_disable(); }) 76#define local_irq_restore(x) __restore_flags(x) 77#define local_save_flags(x) __save_flags(x) 78 79/* 80 * Force strict CPU ordering. 81 * Not really required on H8... 82 */ 83#define nop() asm volatile ("nop"::) 84#define mb() asm volatile ("" : : :"memory") 85#define rmb() asm volatile ("" : : :"memory") 86#define wmb() asm volatile ("" : : :"memory") 87#define set_mb(var, value) do { xchg(&var, value); } while (0) 88 89#ifdef CONFIG_SMP 90#define smp_mb() mb() 91#define smp_rmb() rmb() 92#define smp_wmb() wmb() 93#define smp_read_barrier_depends() read_barrier_depends() 94#else 95#define smp_mb() barrier() 96#define smp_rmb() barrier() 97#define smp_wmb() barrier() 98#define smp_read_barrier_depends() do { } while(0) 99#endif 100 101#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 102 103struct __xchg_dummy { unsigned long a[100]; }; 104#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 105 106static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 107{ 108 unsigned long tmp, flags; 109 110 local_irq_save(flags); 111 112 switch (size) { 113 case 1: 114 __asm__ __volatile__ 115 ("mov.b %2,%0\n\t" 116 "mov.b %1,%2" 117 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); 118 break; 119 case 2: 120 __asm__ __volatile__ 121 ("mov.w %2,%0\n\t" 122 "mov.w %1,%2" 123 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); 124 break; 125 case 4: 126 __asm__ __volatile__ 127 ("mov.l %2,%0\n\t" 128 "mov.l %1,%2" 129 : "=&r" (tmp) : "r" (x), "m" (*__xg(ptr)) : "memory"); 130 break; 131 default: 132 tmp = 0; 133 } 134 local_irq_restore(flags); 135 return tmp; 136} 137 138#define HARD_RESET_NOW() ({ \ 139 local_irq_disable(); \ 140 asm("jmp @@0"); \ 141}) 142 143#include <asm-generic/cmpxchg-local.h> 144 145/* 146 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make 147 * them available. 148 */ 149#define cmpxchg_local(ptr, o, n) \ 150 ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ 151 (unsigned long)(n), sizeof(*(ptr)))) 152#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) 153 154#ifndef CONFIG_SMP 155#include <asm-generic/cmpxchg.h> 156#endif 157 158#define arch_align_stack(x) (x) 159 160extern void die(const char *str, struct pt_regs *fp, unsigned long err); 161 162#endif /* _H8300_SYSTEM_H */ 163