1#ifndef _PARISC_BITOPS_H 2#define _PARISC_BITOPS_H 3 4#include <linux/compiler.h> 5#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */ 6#include <asm/byteorder.h> 7#include <asm/atomic.h> 8 9/* 10 * HP-PARISC specific bit operations 11 * for a detailed description of the functions please refer 12 * to include/asm-i386/bitops.h or kerneldoc 13 */ 14 15#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1)) 16 17 18#define smp_mb__before_clear_bit() smp_mb() 19#define smp_mb__after_clear_bit() smp_mb() 20 21/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion 22 * on use of volatile and __*_bit() (set/clear/change): 23 * *_bit() want use of volatile. 24 * __*_bit() are "relaxed" and don't use spinlock or volatile. 25 */ 26 27static __inline__ void set_bit(int nr, volatile unsigned long * addr) 28{ 29 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 30 unsigned long flags; 31 32 addr += (nr >> SHIFT_PER_LONG); 33 _atomic_spin_lock_irqsave(addr, flags); 34 *addr |= mask; 35 _atomic_spin_unlock_irqrestore(addr, flags); 36} 37 38static __inline__ void clear_bit(int nr, volatile unsigned long * addr) 39{ 40 unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); 41 unsigned long flags; 42 43 addr += (nr >> SHIFT_PER_LONG); 44 _atomic_spin_lock_irqsave(addr, flags); 45 *addr &= mask; 46 _atomic_spin_unlock_irqrestore(addr, flags); 47} 48 49static __inline__ void change_bit(int nr, volatile unsigned long * addr) 50{ 51 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 52 unsigned long flags; 53 54 addr += (nr >> SHIFT_PER_LONG); 55 _atomic_spin_lock_irqsave(addr, flags); 56 *addr ^= mask; 57 _atomic_spin_unlock_irqrestore(addr, flags); 58} 59 60static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) 61{ 62 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 63 unsigned long old; 64 unsigned long flags; 65 int set; 66 67 addr += (nr >> SHIFT_PER_LONG); 68 _atomic_spin_lock_irqsave(addr, flags); 69 old = *addr; 70 set = (old & mask) ? 1 : 0; 71 if (!set) 72 *addr = old | mask; 73 _atomic_spin_unlock_irqrestore(addr, flags); 74 75 return set; 76} 77 78static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) 79{ 80 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 81 unsigned long old; 82 unsigned long flags; 83 int set; 84 85 addr += (nr >> SHIFT_PER_LONG); 86 _atomic_spin_lock_irqsave(addr, flags); 87 old = *addr; 88 set = (old & mask) ? 1 : 0; 89 if (set) 90 *addr = old & ~mask; 91 _atomic_spin_unlock_irqrestore(addr, flags); 92 93 return set; 94} 95 96static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) 97{ 98 unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); 99 unsigned long oldbit; 100 unsigned long flags; 101 102 addr += (nr >> SHIFT_PER_LONG); 103 _atomic_spin_lock_irqsave(addr, flags); 104 oldbit = *addr; 105 *addr = oldbit ^ mask; 106 _atomic_spin_unlock_irqrestore(addr, flags); 107 108 return (oldbit & mask) ? 1 : 0; 109} 110 111#include <asm-generic/bitops/non-atomic.h> 112 113#ifdef __KERNEL__ 114 115/** 116 * __ffs - find first bit in word. returns 0 to "BITS_PER_LONG-1". 117 * @word: The word to search 118 * 119 * __ffs() return is undefined if no bit is set. 120 * 121 * 32-bit fast __ffs by LaMont Jones "lamont At hp com". 122 * 64-bit enhancement by Grant Grundler "grundler At parisc-linux org". 123 * (with help from willy/jejb to get the semantics right) 124 * 125 * This algorithm avoids branches by making use of nullification. 126 * One side effect of "extr" instructions is it sets PSW[N] bit. 127 * How PSW[N] (nullify next insn) gets set is determined by the 128 * "condition" field (eg "<>" or "TR" below) in the extr* insn. 129 * Only the 1st and one of either the 2cd or 3rd insn will get executed. 130 * Each set of 3 insn will get executed in 2 cycles on PA8x00 vs 16 or so 131 * cycles for each mispredicted branch. 132 */ 133 134static __inline__ unsigned long __ffs(unsigned long x) 135{ 136 unsigned long ret; 137 138 __asm__( 139#ifdef CONFIG_64BIT 140 " ldi 63,%1\n" 141 " extrd,u,*<> %0,63,32,%%r0\n" 142 " extrd,u,*TR %0,31,32,%0\n" /* move top 32-bits down */ 143 " addi -32,%1,%1\n" 144#else 145 " ldi 31,%1\n" 146#endif 147 " extru,<> %0,31,16,%%r0\n" 148 " extru,TR %0,15,16,%0\n" /* xxxx0000 -> 0000xxxx */ 149 " addi -16,%1,%1\n" 150 " extru,<> %0,31,8,%%r0\n" 151 " extru,TR %0,23,8,%0\n" /* 0000xx00 -> 000000xx */ 152 " addi -8,%1,%1\n" 153 " extru,<> %0,31,4,%%r0\n" 154 " extru,TR %0,27,4,%0\n" /* 000000x0 -> 0000000x */ 155 " addi -4,%1,%1\n" 156 " extru,<> %0,31,2,%%r0\n" 157 " extru,TR %0,29,2,%0\n" /* 0000000y, 1100b -> 0011b */ 158 " addi -2,%1,%1\n" 159 " extru,= %0,31,1,%%r0\n" /* check last bit */ 160 " addi -1,%1,%1\n" 161 : "+r" (x), "=r" (ret) ); 162 return ret; 163} 164 165#include <asm-generic/bitops/ffz.h> 166 167/* 168 * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) 169 * This is defined the same way as the libc and compiler builtin 170 * ffs routines, therefore differs in spirit from the above ffz (man ffs). 171 */ 172static __inline__ int ffs(int x) 173{ 174 return x ? (__ffs((unsigned long)x) + 1) : 0; 175} 176 177/* 178 * fls: find last (most significant) bit set. 179 * fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. 180 */ 181 182static __inline__ int fls(int x) 183{ 184 int ret; 185 if (!x) 186 return 0; 187 188 __asm__( 189 " ldi 1,%1\n" 190 " extru,<> %0,15,16,%%r0\n" 191 " zdep,TR %0,15,16,%0\n" /* xxxx0000 */ 192 " addi 16,%1,%1\n" 193 " extru,<> %0,7,8,%%r0\n" 194 " zdep,TR %0,23,24,%0\n" /* xx000000 */ 195 " addi 8,%1,%1\n" 196 " extru,<> %0,3,4,%%r0\n" 197 " zdep,TR %0,27,28,%0\n" /* x0000000 */ 198 " addi 4,%1,%1\n" 199 " extru,<> %0,1,2,%%r0\n" 200 " zdep,TR %0,29,30,%0\n" /* y0000000 (y&3 = 0) */ 201 " addi 2,%1,%1\n" 202 " extru,= %0,0,1,%%r0\n" 203 " addi 1,%1,%1\n" /* if y & 8, add 1 */ 204 : "+r" (x), "=r" (ret) ); 205 206 return ret; 207} 208 209#include <asm-generic/bitops/fls64.h> 210#include <asm-generic/bitops/hweight.h> 211#include <asm-generic/bitops/sched.h> 212 213#endif /* __KERNEL__ */ 214 215#include <asm-generic/bitops/find.h> 216 217#ifdef __KERNEL__ 218 219#include <asm-generic/bitops/ext2-non-atomic.h> 220 221/* '3' is bits per byte */ 222#define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) 223 224#define ext2_set_bit_atomic(l,nr,addr) \ 225 test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) 226#define ext2_clear_bit_atomic(l,nr,addr) \ 227 test_and_clear_bit( (nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) 228 229#endif /* __KERNEL__ */ 230 231#include <asm-generic/bitops/minix-le.h> 232 233#endif /* _PARISC_BITOPS_H */ 234