1#ifndef _M68K_SYSTEM_H 2#define _M68K_SYSTEM_H 3 4#include <linux/config.h> /* get configuration macros */ 5#include <linux/linkage.h> 6#include <linux/kernel.h> 7#include <linux/init.h> 8#include <asm/segment.h> 9#include <asm/entry.h> 10 11#define prepare_to_switch() do { } while(0) 12 13/* 14 * switch_to(n) should switch tasks to task ptr, first checking that 15 * ptr isn't the current task, in which case it does nothing. This 16 * also clears the TS-flag if the task we switched to has used the 17 * math co-processor latest. 18 */ 19/* 20 * switch_to() saves the extra registers, that are not saved 21 * automatically by SAVE_SWITCH_STACK in resume(), ie. d0-d5 and 22 * a0-a1. Some of these are used by schedule() and its predecessors 23 * and so we might get see unexpected behaviors when a task returns 24 * with unexpected register values. 25 * 26 * syscall stores these registers itself and none of them are used 27 * by syscall after the function in the syscall has been called. 28 * 29 * Beware that resume now expects *next to be in d1 and the offset of 30 * tss to be in a1. This saves a few instructions as we no longer have 31 * to push them onto the stack and read them back right after. 32 * 33 * 02/17/96 - Jes Sorensen (jds@kom.auc.dk) 34 * 35 * Changed 96/09/19 by Andreas Schwab 36 * pass prev in a0, next in a1, offset of tss in d1, and whether 37 * the mm structures are shared in d2 (to avoid atc flushing). 38 */ 39asmlinkage void resume(void); 40#define switch_to(prev,next,last) { \ 41 register void *_prev __asm__ ("a0") = (prev); \ 42 register void *_next __asm__ ("a1") = (next); \ 43 register void *_last __asm__ ("d1"); \ 44 __asm__ __volatile__("jbsr " SYMBOL_NAME_STR(resume) \ 45 : "=d" (_last) : "a" (_prev), "a" (_next) \ 46 : "d0", /* "d1", */ "d2", "d3", "d4", "d5", "a0", "a1"); \ 47 (last) = _last; \ 48} 49 50 51/* interrupt control.. */ 52#include <asm/hardirq.h> 53#define __sti() ({ \ 54 if (MACH_IS_Q40 || !local_irq_count(smp_processor_id())) \ 55 asm volatile ("andiw %0,%%sr": : "i" (ALLOWINT) : "memory"); \ 56}) 57#define __cli() asm volatile ("oriw #0x0700,%%sr": : : "memory") 58#define __save_flags(x) asm volatile ("movew %%sr,%0":"=d" (x) : : "memory") 59#define __restore_flags(x) asm volatile ("movew %0,%%sr": :"d" (x) : "memory") 60 61/* For spinlocks etc */ 62#define local_irq_save(x) ({ __save_flags(x); __cli(); }) 63#define local_irq_restore(x) __restore_flags(x) 64#define local_irq_disable() __cli() 65#define local_irq_enable() __sti() 66 67#define cli() __cli() 68#define sti() __sti() 69#define save_flags(x) __save_flags(x) 70#define restore_flags(x) __restore_flags(x) 71#define save_and_cli(flags) do { save_flags(flags); cli(); } while(0) 72 73/* 74 * Force strict CPU ordering. 75 * Not really required on m68k... 76 */ 77#define nop() do { asm volatile ("nop"); barrier(); } while (0) 78#define mb() barrier() 79#define rmb() barrier() 80#define wmb() barrier() 81#define set_mb(var, value) do { xchg(&var, value); } while (0) 82#define set_wmb(var, value) do { var = value; wmb(); } while (0) 83 84#define smp_mb() barrier() 85#define smp_rmb() barrier() 86#define smp_wmb() barrier() 87 88 89#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) 90#define tas(ptr) (xchg((ptr),1)) 91 92struct __xchg_dummy { unsigned long a[100]; }; 93#define __xg(x) ((volatile struct __xchg_dummy *)(x)) 94 95#ifndef CONFIG_RMW_INSNS 96static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 97{ 98 unsigned long tmp, flags; 99 100 save_flags(flags); 101 cli(); 102 103 switch (size) { 104 case 1: 105 __asm__ __volatile__ 106 ("moveb %2,%0\n\t" 107 "moveb %1,%2" 108 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 109 break; 110 case 2: 111 __asm__ __volatile__ 112 ("movew %2,%0\n\t" 113 "movew %1,%2" 114 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 115 break; 116 case 4: 117 __asm__ __volatile__ 118 ("movel %2,%0\n\t" 119 "movel %1,%2" 120 : "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory"); 121 break; 122 } 123 restore_flags(flags); 124 return tmp; 125} 126#else 127static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) 128{ 129 switch (size) { 130 case 1: 131 __asm__ __volatile__ 132 ("moveb %2,%0\n\t" 133 "1:\n\t" 134 "casb %0,%1,%2\n\t" 135 "jne 1b" 136 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 137 break; 138 case 2: 139 __asm__ __volatile__ 140 ("movew %2,%0\n\t" 141 "1:\n\t" 142 "casw %0,%1,%2\n\t" 143 "jne 1b" 144 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 145 break; 146 case 4: 147 __asm__ __volatile__ 148 ("movel %2,%0\n\t" 149 "1:\n\t" 150 "casl %0,%1,%2\n\t" 151 "jne 1b" 152 : "=&d" (x) : "d" (x), "m" (*__xg(ptr)) : "memory"); 153 break; 154 } 155 return x; 156} 157#endif 158 159#endif /* _M68K_SYSTEM_H */ 160