1#ifndef __PARISC_SYSTEM_H 2#define __PARISC_SYSTEM_H 3 4#include <asm/psw.h> 5 6/* The program status word as bitfields. */ 7struct pa_psw { 8 unsigned int y:1; 9 unsigned int z:1; 10 unsigned int rv:2; 11 unsigned int w:1; 12 unsigned int e:1; 13 unsigned int s:1; 14 unsigned int t:1; 15 16 unsigned int h:1; 17 unsigned int l:1; 18 unsigned int n:1; 19 unsigned int x:1; 20 unsigned int b:1; 21 unsigned int c:1; 22 unsigned int v:1; 23 unsigned int m:1; 24 25 unsigned int cb:8; 26 27 unsigned int o:1; 28 unsigned int g:1; 29 unsigned int f:1; 30 unsigned int r:1; 31 unsigned int q:1; 32 unsigned int p:1; 33 unsigned int d:1; 34 unsigned int i:1; 35}; 36 37#ifdef CONFIG_64BIT 38#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4)) 39#else 40#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW)) 41#endif 42 43struct task_struct; 44 45extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *); 46 47#define switch_to(prev, next, last) do { \ 48 (last) = _switch_to(prev, next); \ 49} while(0) 50 51/* interrupt control */ 52#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory") 53#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 54#define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" ) 55 56#define local_irq_save(x) \ 57 __asm__ __volatile__("rsm %1,%0" : "=r" (x) :"i" (PSW_I) : "memory" ) 58#define local_irq_restore(x) \ 59 __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" ) 60 61#define irqs_disabled() \ 62({ \ 63 unsigned long flags; \ 64 local_save_flags(flags); \ 65 (flags & PSW_I) == 0; \ 66}) 67 68#define mfctl(reg) ({ \ 69 unsigned long cr; \ 70 __asm__ __volatile__( \ 71 "mfctl " #reg ",%0" : \ 72 "=r" (cr) \ 73 ); \ 74 cr; \ 75}) 76 77#define mtctl(gr, cr) \ 78 __asm__ __volatile__("mtctl %0,%1" \ 79 : /* no outputs */ \ 80 : "r" (gr), "i" (cr) : "memory") 81 82/* these are here to de-mystefy the calling code, and to provide hooks */ 83/* which I needed for debugging EIEM problems -PB */ 84#define get_eiem() mfctl(15) 85static inline void set_eiem(unsigned long val) 86{ 87 mtctl(val, 15); 88} 89 90#define mfsp(reg) ({ \ 91 unsigned long cr; \ 92 __asm__ __volatile__( \ 93 "mfsp " #reg ",%0" : \ 94 "=r" (cr) \ 95 ); \ 96 cr; \ 97}) 98 99#define mtsp(gr, cr) \ 100 __asm__ __volatile__("mtsp %0,%1" \ 101 : /* no outputs */ \ 102 : "r" (gr), "i" (cr) : "memory") 103 104 105/* 106** This is simply the barrier() macro from linux/kernel.h but when serial.c 107** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h 108** hasn't yet been included yet so it fails, thus repeating the macro here. 109** 110** PA-RISC architecture allows for weakly ordered memory accesses although 111** none of the processors use it. There is a strong ordered bit that is 112** set in the O-bit of the page directory entry. Operating systems that 113** can not tolerate out of order accesses should set this bit when mapping 114** pages. The O-bit of the PSW should also be set to 1 (I don't believe any 115** of the processor implemented the PSW O-bit). The PCX-W ERS states that 116** the TLB O-bit is not implemented so the page directory does not need to 117** have the O-bit set when mapping pages (section 3.1). This section also 118** states that the PSW Y, Z, G, and O bits are not implemented. 119** So it looks like nothing needs to be done for parisc-linux (yet). 120** (thanks to chada for the above comment -ggg) 121** 122** The __asm__ op below simple prevents gcc/ld from reordering 123** instructions across the mb() "call". 124*/ 125#define mb() __asm__ __volatile__("":::"memory") /* barrier() */ 126#define rmb() mb() 127#define wmb() mb() 128#define smp_mb() mb() 129#define smp_rmb() mb() 130#define smp_wmb() mb() 131#define smp_read_barrier_depends() do { } while(0) 132#define read_barrier_depends() do { } while(0) 133 134#define set_mb(var, value) do { var = value; mb(); } while (0) 135 136#ifndef CONFIG_PA20 137/* Because kmalloc only guarantees 8-byte alignment for kmalloc'd data, 138 and GCC only guarantees 8-byte alignment for stack locals, we can't 139 be assured of 16-byte alignment for atomic lock data even if we 140 specify "__attribute ((aligned(16)))" in the type declaration. So, 141 we use a struct containing an array of four ints for the atomic lock 142 type and dynamically select the 16-byte aligned int from the array 143 for the semaphore. */ 144 145#define __PA_LDCW_ALIGNMENT 16 146#define __ldcw_align(a) ({ \ 147 unsigned long __ret = (unsigned long) &(a)->lock[0]; \ 148 __ret = (__ret + __PA_LDCW_ALIGNMENT - 1) \ 149 & ~(__PA_LDCW_ALIGNMENT - 1); \ 150 (volatile unsigned int *) __ret; \ 151}) 152#define __LDCW "ldcw" 153 154#else /*CONFIG_PA20*/ 155/* From: "Jim Hull" <jim.hull of hp.com> 156 I've attached a summary of the change, but basically, for PA 2.0, as 157 long as the ",CO" (coherent operation) completer is specified, then the 158 16-byte alignment requirement for ldcw and ldcd is relaxed, and instead 159 they only require "natural" alignment (4-byte for ldcw, 8-byte for 160 ldcd). */ 161 162#define __PA_LDCW_ALIGNMENT 4 163#define __ldcw_align(a) (&(a)->slock) 164#define __LDCW "ldcw,co" 165 166#endif /*!CONFIG_PA20*/ 167 168/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */ 169#define __ldcw(a) ({ \ 170 unsigned __ret; \ 171 __asm__ __volatile__(__LDCW " 0(%2),%0" \ 172 : "=r" (__ret), "+m" (*(a)) : "r" (a)); \ 173 __ret; \ 174}) 175 176#ifdef CONFIG_SMP 177# define __lock_aligned __attribute__((__section__(".data..lock_aligned"))) 178#endif 179 180#define arch_align_stack(x) (x) 181 182#endif 183