1/* $Id: bitops.h,v 1.1.1.1 2008/10/15 03:29:18 james26_jang Exp $ 2 * bitops.h: Bit string operations on the V9. 3 * 4 * Copyright 1996, 1997 David S. Miller (davem@caip.rutgers.edu) 5 */ 6 7#ifndef _SPARC64_BITOPS_H 8#define _SPARC64_BITOPS_H 9 10#include <asm/byteorder.h> 11 12extern long ___test_and_set_bit(unsigned long nr, volatile void *addr); 13extern long ___test_and_clear_bit(unsigned long nr, volatile void *addr); 14extern long ___test_and_change_bit(unsigned long nr, volatile void *addr); 15 16#define test_and_set_bit(nr,addr) ({___test_and_set_bit(nr,addr)!=0;}) 17#define test_and_clear_bit(nr,addr) ({___test_and_clear_bit(nr,addr)!=0;}) 18#define test_and_change_bit(nr,addr) ({___test_and_change_bit(nr,addr)!=0;}) 19#define set_bit(nr,addr) ((void)___test_and_set_bit(nr,addr)) 20#define clear_bit(nr,addr) ((void)___test_and_clear_bit(nr,addr)) 21#define change_bit(nr,addr) ((void)___test_and_change_bit(nr,addr)) 22 23/* "non-atomic" versions... */ 24#define __set_bit(X,Y) \ 25do { unsigned long __nr = (X); \ 26 long *__m = ((long *) (Y)) + (__nr >> 6); \ 27 *__m |= (1UL << (__nr & 63)); \ 28} while (0) 29#define __clear_bit(X,Y) \ 30do { unsigned long __nr = (X); \ 31 long *__m = ((long *) (Y)) + (__nr >> 6); \ 32 *__m &= ~(1UL << (__nr & 63)); \ 33} while (0) 34#define __change_bit(X,Y) \ 35do { unsigned long __nr = (X); \ 36 long *__m = ((long *) (Y)) + (__nr >> 6); \ 37 *__m ^= (1UL << (__nr & 63)); \ 38} while (0) 39#define __test_and_set_bit(X,Y) \ 40({ unsigned long __nr = (X); \ 41 long *__m = ((long *) (Y)) + (__nr >> 6); \ 42 long __old = *__m; \ 43 long __mask = (1UL << (__nr & 63)); \ 44 *__m = (__old | __mask); \ 45 ((__old & __mask) != 0); \ 46}) 47#define __test_and_clear_bit(X,Y) \ 48({ unsigned long __nr = (X); \ 49 long *__m = ((long *) (Y)) + (__nr >> 6); \ 50 long __old = *__m; \ 51 long __mask = (1UL << (__nr & 63)); \ 52 *__m = (__old & ~__mask); \ 53 ((__old & __mask) != 0); \ 54}) 55#define __test_and_change_bit(X,Y) \ 56({ unsigned long __nr = (X); \ 57 long *__m = ((long *) (Y)) + (__nr >> 6); \ 58 long __old = *__m; \ 59 long __mask = (1UL << (__nr & 63)); \ 60 *__m = (__old ^ __mask); \ 61 ((__old & __mask) != 0); \ 62}) 63 64#define smp_mb__before_clear_bit() do { } while(0) 65#define smp_mb__after_clear_bit() do { } while(0) 66 67extern __inline__ int test_bit(int nr, __const__ void *addr) 68{ 69 return (1UL & (((__const__ long *) addr)[nr >> 6] >> (nr & 63))) != 0UL; 70} 71 72/* The easy/cheese version for now. */ 73extern __inline__ unsigned long ffz(unsigned long word) 74{ 75 unsigned long result; 76 77#ifdef ULTRA_HAS_POPULATION_COUNT /* Thanks for nothing Sun... */ 78 __asm__ __volatile__( 79" brz,pn %0, 1f\n" 80" neg %0, %%g1\n" 81" xnor %0, %%g1, %%g2\n" 82" popc %%g2, %0\n" 83"1: " : "=&r" (result) 84 : "0" (word) 85 : "g1", "g2"); 86#else 87 result = 0; 88 while(word & 1) { 89 result++; 90 word >>= 1; 91 } 92#endif 93 return result; 94} 95 96#ifdef __KERNEL__ 97 98/* 99 * ffs: find first bit set. This is defined the same way as 100 * the libc and compiler builtin ffs routines, therefore 101 * differs in spirit from the above ffz (man ffs). 102 */ 103 104#define ffs(x) generic_ffs(x) 105 106/* 107 * hweightN: returns the hamming weight (i.e. the number 108 * of bits set) of a N-bit word 109 */ 110 111#ifdef ULTRA_HAS_POPULATION_COUNT 112 113extern __inline__ unsigned int hweight32(unsigned int w) 114{ 115 unsigned int res; 116 117 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffffffff)); 118 return res; 119} 120 121extern __inline__ unsigned int hweight16(unsigned int w) 122{ 123 unsigned int res; 124 125 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xffff)); 126 return res; 127} 128 129extern __inline__ unsigned int hweight8(unsigned int w) 130{ 131 unsigned int res; 132 133 __asm__ ("popc %1,%0" : "=r" (res) : "r" (w & 0xff)); 134 return res; 135} 136 137#else 138 139#define hweight32(x) generic_hweight32(x) 140#define hweight16(x) generic_hweight16(x) 141#define hweight8(x) generic_hweight8(x) 142 143#endif 144#endif /* __KERNEL__ */ 145 146/* find_next_zero_bit() finds the first zero bit in a bit string of length 147 * 'size' bits, starting the search at bit 'offset'. This is largely based 148 * on Linus's ALPHA routines, which are pretty portable BTW. 149 */ 150 151extern __inline__ unsigned long find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) 152{ 153 unsigned long *p = ((unsigned long *) addr) + (offset >> 6); 154 unsigned long result = offset & ~63UL; 155 unsigned long tmp; 156 157 if (offset >= size) 158 return size; 159 size -= result; 160 offset &= 63UL; 161 if (offset) { 162 tmp = *(p++); 163 tmp |= ~0UL >> (64-offset); 164 if (size < 64) 165 goto found_first; 166 if (~tmp) 167 goto found_middle; 168 size -= 64; 169 result += 64; 170 } 171 while (size & ~63UL) { 172 if (~(tmp = *(p++))) 173 goto found_middle; 174 result += 64; 175 size -= 64; 176 } 177 if (!size) 178 return result; 179 tmp = *p; 180 181found_first: 182 tmp |= ~0UL << size; 183 if (tmp == ~0UL) /* Are any bits zero? */ 184 return result + size; /* Nope. */ 185found_middle: 186 return result + ffz(tmp); 187} 188 189#define find_first_zero_bit(addr, size) \ 190 find_next_zero_bit((addr), (size), 0) 191 192extern long ___test_and_set_le_bit(int nr, volatile void *addr); 193extern long ___test_and_clear_le_bit(int nr, volatile void *addr); 194 195#define test_and_set_le_bit(nr,addr) ({___test_and_set_le_bit(nr,addr)!=0;}) 196#define test_and_clear_le_bit(nr,addr) ({___test_and_clear_le_bit(nr,addr)!=0;}) 197#define set_le_bit(nr,addr) ((void)___test_and_set_le_bit(nr,addr)) 198#define clear_le_bit(nr,addr) ((void)___test_and_clear_le_bit(nr,addr)) 199 200extern __inline__ int test_le_bit(int nr, __const__ void * addr) 201{ 202 int mask; 203 __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; 204 205 ADDR += nr >> 3; 206 mask = 1 << (nr & 0x07); 207 return ((mask & *ADDR) != 0); 208} 209 210#define find_first_zero_le_bit(addr, size) \ 211 find_next_zero_le_bit((addr), (size), 0) 212 213extern __inline__ unsigned long find_next_zero_le_bit(void *addr, unsigned long size, unsigned long offset) 214{ 215 unsigned long *p = ((unsigned long *) addr) + (offset >> 6); 216 unsigned long result = offset & ~63UL; 217 unsigned long tmp; 218 219 if (offset >= size) 220 return size; 221 size -= result; 222 offset &= 63UL; 223 if(offset) { 224 tmp = __swab64p(p++); 225 tmp |= (~0UL >> (64-offset)); 226 if(size < 64) 227 goto found_first; 228 if(~tmp) 229 goto found_middle; 230 size -= 64; 231 result += 64; 232 } 233 while(size & ~63) { 234 if(~(tmp = __swab64p(p++))) 235 goto found_middle; 236 result += 64; 237 size -= 64; 238 } 239 if(!size) 240 return result; 241 tmp = __swab64p(p); 242found_first: 243 tmp |= (~0UL << size); 244 if (tmp == ~0UL) /* Are any bits zero? */ 245 return result + size; /* Nope. */ 246found_middle: 247 return result + ffz(tmp); 248} 249 250#ifdef __KERNEL__ 251 252#define ext2_set_bit test_and_set_le_bit 253#define ext2_clear_bit test_and_clear_le_bit 254#define ext2_test_bit test_le_bit 255#define ext2_find_first_zero_bit find_first_zero_le_bit 256#define ext2_find_next_zero_bit find_next_zero_le_bit 257 258/* Bitmap functions for the minix filesystem. */ 259#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) 260#define minix_set_bit(nr,addr) set_bit(nr,addr) 261#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) 262#define minix_test_bit(nr,addr) test_bit(nr,addr) 263#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) 264 265#endif /* __KERNEL__ */ 266 267#endif /* defined(_SPARC64_BITOPS_H) */ 268