1#ifndef _I386_BITOPS_H
2#define _I386_BITOPS_H
3
4/*
5 * Copyright 1992, Linus Torvalds.
6 */
7
8
9/*
10 * These have to be done with inline assembly: that way the bit-setting
11 * is guaranteed to be atomic. All bit operations return 0 if the bit
12 * was cleared before the operation and != 0 if it was not.
13 *
14 * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
15 */
16
17#include <asm-generic/bitops/fls.h>
18#include <asm-generic/bitops/__fls.h>
19#include <asm-generic/bitops/fls64.h>
20
21#ifdef CONFIG_SMP
22#define LOCK_PREFIX "lock ; "
23#else
24#define LOCK_PREFIX ""
25#endif
26
27#define ADDR (*(volatile long *) addr)
28
29/**
30 * set_bit - Atomically set a bit in memory
31 * @nr: the bit to set
32 * @addr: the address to start counting from
33 *
34 * This function is atomic and may not be reordered.  See __set_bit()
35 * if you do not require the atomic guarantees.
36 * Note that @nr may be almost arbitrarily large; this function is not
37 * restricted to acting on a single-word quantity.
38 */
39static __inline__ void set_bit(int nr, volatile void * addr)
40{
41	__asm__ __volatile__( LOCK_PREFIX
42		"btsl %1,%0"
43		:"=m" (ADDR)
44		:"Ir" (nr));
45}
46
47/**
48 * __set_bit - Set a bit in memory
49 * @nr: the bit to set
50 * @addr: the address to start counting from
51 *
52 * Unlike set_bit(), this function is non-atomic and may be reordered.
53 * If it's called on the same region of memory simultaneously, the effect
54 * may be that only one operation succeeds.
55 */
56static __inline__ void __set_bit(int nr, volatile void * addr)
57{
58	__asm__(
59		"btsl %1,%0"
60		:"=m" (ADDR)
61		:"Ir" (nr));
62}
63
64#define PLATFORM__SET_BIT
65
66/**
67 * clear_bit - Clears a bit in memory
68 * @nr: Bit to clear
69 * @addr: Address to start counting from
70 *
71 * clear_bit() is atomic and may not be reordered.  However, it does
72 * not contain a memory barrier, so if it is used for locking purposes,
73 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
74 * in order to ensure changes are visible on other processors.
75 */
76static __inline__ void clear_bit(int nr, volatile void * addr)
77{
78	__asm__ __volatile__( LOCK_PREFIX
79		"btrl %1,%0"
80		:"=m" (ADDR)
81		:"Ir" (nr));
82}
83#define smp_mb__before_clear_bit()	barrier()
84#define smp_mb__after_clear_bit()	barrier()
85
86/**
87 * __change_bit - Toggle a bit in memory
88 * @nr: the bit to set
89 * @addr: the address to start counting from
90 *
91 * Unlike change_bit(), this function is non-atomic and may be reordered.
92 * If it's called on the same region of memory simultaneously, the effect
93 * may be that only one operation succeeds.
94 */
95static __inline__ void __change_bit(int nr, volatile void * addr)
96{
97	__asm__ __volatile__(
98		"btcl %1,%0"
99		:"=m" (ADDR)
100		:"Ir" (nr));
101}
102
103/**
104 * change_bit - Toggle a bit in memory
105 * @nr: Bit to clear
106 * @addr: Address to start counting from
107 *
108 * change_bit() is atomic and may not be reordered.
109 * Note that @nr may be almost arbitrarily large; this function is not
110 * restricted to acting on a single-word quantity.
111 */
112static __inline__ void change_bit(int nr, volatile void * addr)
113{
114	__asm__ __volatile__( LOCK_PREFIX
115		"btcl %1,%0"
116		:"=m" (ADDR)
117		:"Ir" (nr));
118}
119
120/**
121 * test_and_set_bit - Set a bit and return its old value
122 * @nr: Bit to set
123 * @addr: Address to count from
124 *
125 * This operation is atomic and cannot be reordered.
126 * It also implies a memory barrier.
127 */
128static __inline__ int test_and_set_bit(int nr, volatile void * addr)
129{
130	int oldbit;
131
132	__asm__ __volatile__( LOCK_PREFIX
133		"btsl %2,%1\n\tsbbl %0,%0"
134		:"=r" (oldbit),"=m" (ADDR)
135		:"Ir" (nr) : "memory");
136	return oldbit;
137}
138
139/**
140 * __test_and_set_bit - Set a bit and return its old value
141 * @nr: Bit to set
142 * @addr: Address to count from
143 *
144 * This operation is non-atomic and can be reordered.
145 * If two examples of this operation race, one can appear to succeed
146 * but actually fail.  You must protect multiple accesses with a lock.
147 */
148static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
149{
150	int oldbit;
151
152	__asm__(
153		"btsl %2,%1\n\tsbbl %0,%0"
154		:"=r" (oldbit),"=m" (ADDR)
155		:"Ir" (nr));
156	return oldbit;
157}
158
159/**
160 * test_and_clear_bit - Clear a bit and return its old value
161 * @nr: Bit to set
162 * @addr: Address to count from
163 *
164 * This operation is atomic and cannot be reordered.
165 * It also implies a memory barrier.
166 */
167static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
168{
169	int oldbit;
170
171	__asm__ __volatile__( LOCK_PREFIX
172		"btrl %2,%1\n\tsbbl %0,%0"
173		:"=r" (oldbit),"=m" (ADDR)
174		:"Ir" (nr) : "memory");
175	return oldbit;
176}
177
178/**
179 * __test_and_clear_bit - Clear a bit and return its old value
180 * @nr: Bit to set
181 * @addr: Address to count from
182 *
183 * This operation is non-atomic and can be reordered.
184 * If two examples of this operation race, one can appear to succeed
185 * but actually fail.  You must protect multiple accesses with a lock.
186 */
187static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
188{
189	int oldbit;
190
191	__asm__(
192		"btrl %2,%1\n\tsbbl %0,%0"
193		:"=r" (oldbit),"=m" (ADDR)
194		:"Ir" (nr));
195	return oldbit;
196}
197
198/* WARNING: non atomic and it can be reordered! */
199static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
200{
201	int oldbit;
202
203	__asm__ __volatile__(
204		"btcl %2,%1\n\tsbbl %0,%0"
205		:"=r" (oldbit),"=m" (ADDR)
206		:"Ir" (nr) : "memory");
207	return oldbit;
208}
209
210/**
211 * test_and_change_bit - Change a bit and return its new value
212 * @nr: Bit to set
213 * @addr: Address to count from
214 *
215 * This operation is atomic and cannot be reordered.
216 * It also implies a memory barrier.
217 */
218static __inline__ int test_and_change_bit(int nr, volatile void * addr)
219{
220	int oldbit;
221
222	__asm__ __volatile__( LOCK_PREFIX
223		"btcl %2,%1\n\tsbbl %0,%0"
224		:"=r" (oldbit),"=m" (ADDR)
225		:"Ir" (nr) : "memory");
226	return oldbit;
227}
228
229#if 0 /* Fool kernel-doc since it doesn't do macros yet */
230/**
231 * test_bit - Determine whether a bit is set
232 * @nr: bit number to test
233 * @addr: Address to start counting from
234 */
235static int test_bit(int nr, const volatile void * addr);
236#endif
237
238static __inline__ int constant_test_bit(int nr, const volatile void * addr)
239{
240	return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
241}
242
243static __inline__ int variable_test_bit(int nr, volatile void * addr)
244{
245	int oldbit;
246
247	__asm__ __volatile__(
248		"btl %2,%1\n\tsbbl %0,%0"
249		:"=r" (oldbit)
250		:"m" (ADDR),"Ir" (nr));
251	return oldbit;
252}
253
254#define test_bit(nr,addr) \
255(__builtin_constant_p(nr) ? \
256 constant_test_bit((nr),(addr)) : \
257 variable_test_bit((nr),(addr)))
258
259/**
260 * find_first_zero_bit - find the first zero bit in a memory region
261 * @addr: The address to start the search at
262 * @size: The maximum size to search
263 *
264 * Returns the bit-number of the first zero bit, not the number of the byte
265 * containing a bit.
266 */
267static __inline__ int find_first_zero_bit(void * addr, unsigned size)
268{
269	int d0, d1, d2;
270	int res;
271
272	if (!size)
273		return 0;
274	/* This looks at memory. Mark it volatile to tell gcc not to move it around */
275	__asm__ __volatile__(
276		"movl $-1,%%eax\n\t"
277		"xorl %%edx,%%edx\n\t"
278		"repe; scasl\n\t"
279		"je 1f\n\t"
280		"xorl -4(%%edi),%%eax\n\t"
281		"subl $4,%%edi\n\t"
282		"bsfl %%eax,%%edx\n"
283		"1:\tsubl %%ebx,%%edi\n\t"
284		"shll $3,%%edi\n\t"
285		"addl %%edi,%%edx"
286		:"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
287		:"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
288	return res;
289}
290
291/**
292 * find_next_zero_bit - find the first zero bit in a memory region
293 * @addr: The address to base the search on
294 * @offset: The bitnumber to start searching at
295 * @size: The maximum size to search
296 */
297static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
298{
299	unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
300	int set = 0, bit = offset & 31, res;
301
302	if (bit) {
303		/*
304		 * Look for zero in first byte
305		 */
306		__asm__("bsfl %1,%0\n\t"
307			"jne 1f\n\t"
308			"movl $32, %0\n"
309			"1:"
310			: "=r" (set)
311			: "r" (~(*p >> bit)));
312		if (set < (32 - bit))
313			return set + offset;
314		set = 32 - bit;
315		p++;
316	}
317	/*
318	 * No zero yet, search remaining full bytes for a zero
319	 */
320	res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
321	return (offset + set + res);
322}
323
324/**
325 * ffz - find first zero in word.
326 * @word: The word to search
327 *
328 * Undefined if no zero exists, so code should check against ~0UL first.
329 */
330static __inline__ unsigned long ffz(unsigned long word)
331{
332	__asm__("bsfl %1,%0"
333		:"=r" (word)
334		:"r" (~word));
335	return word;
336}
337
338#ifdef __KERNEL__
339
340/**
341 * __ffs - find first set bit in word
342 * @word: The word to search
343 *
344 * Undefined if no bit exists, so code should check against 0 first.
345 */
346static inline unsigned long __ffs(unsigned long word)
347{
348	__asm__("rep; bsf %1,%0"
349		: "=r" (word)
350		: "rm" (word));
351	return word;
352}
353
354/**
355 * ffs - find first bit set
356 * @x: the word to search
357 *
358 * This is defined the same way as
359 * the libc and compiler builtin ffs routines, therefore
360 * differs in spirit from the above ffz (man ffs).
361 */
362static __inline__ int ffs(int x)
363{
364	int r;
365
366	__asm__("bsfl %1,%0\n\t"
367		"jnz 1f\n\t"
368		"movl $-1,%0\n"
369		"1:" : "=r" (r) : "rm" (x));
370
371	return r+1;
372}
373#define PLATFORM_FFS
374
375static inline int __ilog2(unsigned int x)
376{
377	return generic_fls(x) - 1;
378}
379
380/**
381 * hweightN - returns the hamming weight of a N-bit word
382 * @x: the word to weigh
383 *
384 * The Hamming Weight of a number is the total number of bits set in it.
385 */
386
387#define hweight32(x) generic_hweight32(x)
388#define hweight16(x) generic_hweight16(x)
389#define hweight8(x) generic_hweight8(x)
390
391#endif /* __KERNEL__ */
392
393#ifdef __KERNEL__
394
395#define ext2_set_bit                 __test_and_set_bit
396#define ext2_clear_bit               __test_and_clear_bit
397#define ext2_test_bit                test_bit
398#define ext2_find_first_zero_bit     find_first_zero_bit
399#define ext2_find_next_zero_bit      find_next_zero_bit
400
401/* Bitmap functions for the minix filesystem.  */
402#define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
403#define minix_set_bit(nr,addr) __set_bit(nr,addr)
404#define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
405#define minix_test_bit(nr,addr) test_bit(nr,addr)
406#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
407
408#endif /* __KERNEL__ */
409
410#endif /* _I386_BITOPS_H */
411