1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 2000  Silicon Graphics, Inc.
8 */
9#ifndef _ASM_BITOPS_H
10#define _ASM_BITOPS_H
11
12#include <linux/config.h>
13#include <linux/types.h>
14#include <asm/byteorder.h>		/* sigh ... */
15
16#if _MIPS_SZLONG == 32
17#define SZLONG_LOG 5
18#define SZLONG_MASK 31UL
19#elif _MIPS_SZLONG == 64
20#define SZLONG_LOG 6
21#define SZLONG_MASK 63UL
22#endif
23
24#ifdef __KERNEL__
25
26#include <asm/sgidefs.h>
27#include <asm/system.h>
28
29/*
30 * clear_bit() doesn't provide any barrier for the compiler.
31 */
32#define smp_mb__before_clear_bit()	smp_mb()
33#define smp_mb__after_clear_bit()	smp_mb()
34
35/*
36 * Only disable interrupt for kernel mode stuff to keep usermode stuff
37 * that dares to use kernel include files alive.
38 */
39#define __bi_flags			unsigned long flags
40#define __bi_cli()			local_irq_disable()
41#define __bi_save_flags(x)		local_save_flags(x)
42#define __bi_local_irq_save(x)		local_irq_save(x)
43#define __bi_local_irq_restore(x)	local_irq_restore(x)
44#else
45#define __bi_flags
46#define __bi_cli()
47#define __bi_save_flags(x)
48#define __bi_local_irq_save(x)
49#define __bi_local_irq_restore(x)
50#endif /* __KERNEL__ */
51
52#ifdef CONFIG_CPU_HAS_LLSC
53
54/*
55 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
56 * interrupt friendly
57 */
58
59/*
60 * set_bit - Atomically set a bit in memory
61 * @nr: the bit to set
62 * @addr: the address to start counting from
63 *
64 * This function is atomic and may not be reordered.  See __set_bit()
65 * if you do not require the atomic guarantees.
66 * Note that @nr may be almost arbitrarily large; this function is not
67 * restricted to acting on a single-word quantity.
68 */
69static __inline__ void set_bit(int nr, volatile void *addr)
70{
71	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
72	unsigned long temp;
73
74	__asm__ __volatile__(
75		"1:\tll\t%0, %1\t\t# set_bit\n\t"
76		"or\t%0, %2\n\t"
77		"sc\t%0, %1\n\t"
78		"beqz\t%0, 1b"
79		: "=&r" (temp), "=m" (*m)
80		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
81}
82
83/*
84 * __set_bit - Set a bit in memory
85 * @nr: the bit to set
86 * @addr: the address to start counting from
87 *
88 * Unlike set_bit(), this function is non-atomic and may be reordered.
89 * If it's called on the same region of memory simultaneously, the effect
90 * may be that only one operation succeeds.
91 */
92static __inline__ void __set_bit(int nr, volatile void * addr)
93{
94	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
95
96	*m |= 1UL << (nr & 31);
97}
98
99/*
100 * clear_bit - Clears a bit in memory
101 * @nr: Bit to clear
102 * @addr: Address to start counting from
103 *
104 * clear_bit() is atomic and may not be reordered.  However, it does
105 * not contain a memory barrier, so if it is used for locking purposes,
106 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
107 * in order to ensure changes are visible on other processors.
108 */
109static __inline__ void clear_bit(int nr, volatile void *addr)
110{
111	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
112	unsigned long temp;
113
114	__asm__ __volatile__(
115		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
116		"and\t%0, %2\n\t"
117		"sc\t%0, %1\n\t"
118		"beqz\t%0, 1b\n\t"
119		: "=&r" (temp), "=m" (*m)
120		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
121}
122
123/*
124 * change_bit - Toggle a bit in memory
125 * @nr: Bit to clear
126 * @addr: Address to start counting from
127 *
128 * change_bit() is atomic and may not be reordered.
129 * Note that @nr may be almost arbitrarily large; this function is not
130 * restricted to acting on a single-word quantity.
131 */
132static __inline__ void change_bit(int nr, volatile void *addr)
133{
134	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
135	unsigned long temp;
136
137	__asm__ __volatile__(
138		"1:\tll\t%0, %1\t\t# change_bit\n\t"
139		"xor\t%0, %2\n\t"
140		"sc\t%0, %1\n\t"
141		"beqz\t%0, 1b"
142		: "=&r" (temp), "=m" (*m)
143		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
144}
145
146/*
147 * __change_bit - Toggle a bit in memory
148 * @nr: the bit to change
149 * @addr: the address to start counting from
150 *
151 * Unlike change_bit(), this function is non-atomic and may be reordered.
152 * If it's called on the same region of memory simultaneously, the effect
153 * may be that only one operation succeeds.
154 */
155static __inline__ void __change_bit(int nr, volatile void * addr)
156{
157	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
158
159	*m ^= 1UL << (nr & 31);
160}
161
162/*
163 * test_and_set_bit - Set a bit and return its old value
164 * @nr: Bit to set
165 * @addr: Address to count from
166 *
167 * This operation is atomic and cannot be reordered.
168 * It also implies a memory barrier.
169 */
170static __inline__ int test_and_set_bit(int nr, volatile void *addr)
171{
172	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
173	unsigned long temp;
174	int res;
175
176	__asm__ __volatile__(
177		".set\tnoreorder\t\t# test_and_set_bit\n"
178		"1:\tll\t%0, %1\n\t"
179		"or\t%2, %0, %3\n\t"
180		"sc\t%2, %1\n\t"
181		"beqz\t%2, 1b\n\t"
182		" and\t%2, %0, %3\n\t"
183#ifdef CONFIG_SMP
184		"sync\n\t"
185#endif
186		".set\treorder"
187		: "=&r" (temp), "=m" (*m), "=&r" (res)
188		: "r" (1UL << (nr & 0x1f)), "m" (*m)
189		: "memory");
190
191	return res != 0;
192}
193
194/*
195 * __test_and_set_bit - Set a bit and return its old value
196 * @nr: Bit to set
197 * @addr: Address to count from
198 *
199 * This operation is non-atomic and can be reordered.
200 * If two examples of this operation race, one can appear to succeed
201 * but actually fail.  You must protect multiple accesses with a lock.
202 */
203static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
204{
205	volatile unsigned long *a = addr;
206	unsigned long mask;
207	int retval;
208
209	a += nr >> 5;
210	mask = 1 << (nr & 0x1f);
211	retval = (mask & *a) != 0;
212	*a |= mask;
213
214	return retval;
215}
216
217/*
218 * test_and_clear_bit - Clear a bit and return its old value
219 * @nr: Bit to clear
220 * @addr: Address to count from
221 *
222 * This operation is atomic and cannot be reordered.
223 * It also implies a memory barrier.
224 */
225static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
226{
227	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
228	unsigned long temp, res;
229
230	__asm__ __volatile__(
231		".set\tnoreorder\t\t# test_and_clear_bit\n"
232		"1:\tll\t%0, %1\n\t"
233		"or\t%2, %0, %3\n\t"
234		"xor\t%2, %3\n\t"
235		"sc\t%2, %1\n\t"
236		"beqz\t%2, 1b\n\t"
237		" and\t%2, %0, %3\n\t"
238#ifdef CONFIG_SMP
239		"sync\n\t"
240#endif
241		".set\treorder"
242		: "=&r" (temp), "=m" (*m), "=&r" (res)
243		: "r" (1UL << (nr & 0x1f)), "m" (*m)
244		: "memory");
245
246	return res != 0;
247}
248
249/*
250 * __test_and_clear_bit - Clear a bit and return its old value
251 * @nr: Bit to clear
252 * @addr: Address to count from
253 *
254 * This operation is non-atomic and can be reordered.
255 * If two examples of this operation race, one can appear to succeed
256 * but actually fail.  You must protect multiple accesses with a lock.
257 */
258static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
259{
260	volatile unsigned long *a = addr;
261	unsigned long mask, retval;
262
263	a += nr >> 5;
264	mask = 1 << (nr & 0x1f);
265	retval = (mask & *a) != 0;
266	*a &= ~mask;
267
268	return retval;
269}
270
271/*
272 * test_and_change_bit - Change a bit and return its new value
273 * @nr: Bit to change
274 * @addr: Address to count from
275 *
276 * This operation is atomic and cannot be reordered.
277 * It also implies a memory barrier.
278 */
279static __inline__ int test_and_change_bit(int nr, volatile void *addr)
280{
281	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
282	unsigned long temp, res;
283
284	__asm__ __volatile__(
285		".set\tnoreorder\t\t# test_and_change_bit\n"
286		"1:\tll\t%0, %1\n\t"
287		"xor\t%2, %0, %3\n\t"
288		"sc\t%2, %1\n\t"
289		"beqz\t%2, 1b\n\t"
290		" and\t%2, %0, %3\n\t"
291#ifdef CONFIG_SMP
292		"sync\n\t"
293#endif
294		".set\treorder"
295		: "=&r" (temp), "=m" (*m), "=&r" (res)
296		: "r" (1UL << (nr & 0x1f)), "m" (*m)
297		: "memory");
298
299	return res != 0;
300}
301
302/*
303 * __test_and_change_bit - Change a bit and return its old value
304 * @nr: Bit to change
305 * @addr: Address to count from
306 *
307 * This operation is non-atomic and can be reordered.
308 * If two examples of this operation race, one can appear to succeed
309 * but actually fail.  You must protect multiple accesses with a lock.
310 */
311static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
312{
313	volatile unsigned long *a = addr;
314	unsigned long mask;
315	int retval;
316
317	a += nr >> 5;
318	mask = 1 << (nr & 0x1f);
319	retval = (mask & *a) != 0;
320	*a ^= mask;
321
322	return retval;
323}
324
325#else /* MIPS I */
326
327/*
328 * set_bit - Atomically set a bit in memory
329 * @nr: the bit to set
330 * @addr: the address to start counting from
331 *
332 * This function is atomic and may not be reordered.  See __set_bit()
333 * if you do not require the atomic guarantees.
334 * Note that @nr may be almost arbitrarily large; this function is not
335 * restricted to acting on a single-word quantity.
336 */
337static __inline__ void set_bit(int nr, volatile void * addr)
338{
339	volatile unsigned long *a = addr;
340	unsigned long mask;
341	__bi_flags;
342
343	a += nr >> 5;
344	mask = 1 << (nr & 0x1f);
345	__bi_local_irq_save(flags);
346	*a |= mask;
347	__bi_local_irq_restore(flags);
348}
349
350/*
351 * __set_bit - Set a bit in memory
352 * @nr: the bit to set
353 * @addr: the address to start counting from
354 *
355 * Unlike set_bit(), this function is non-atomic and may be reordered.
356 * If it's called on the same region of memory simultaneously, the effect
357 * may be that only one operation succeeds.
358 */
359static __inline__ void __set_bit(int nr, volatile void * addr)
360{
361	volatile unsigned long *a = addr;
362	unsigned long mask;
363
364	a += nr >> 5;
365	mask = 1 << (nr & 0x1f);
366	*a |= mask;
367}
368
369/*
370 * clear_bit - Clears a bit in memory
371 * @nr: Bit to clear
372 * @addr: Address to start counting from
373 *
374 * clear_bit() is atomic and may not be reordered.  However, it does
375 * not contain a memory barrier, so if it is used for locking purposes,
376 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
377 * in order to ensure changes are visible on other processors.
378 */
379static __inline__ void clear_bit(int nr, volatile void * addr)
380{
381	volatile unsigned long *a = addr;
382	unsigned long mask;
383	__bi_flags;
384
385	a += nr >> 5;
386	mask = 1 << (nr & 0x1f);
387	__bi_local_irq_save(flags);
388	*a &= ~mask;
389	__bi_local_irq_restore(flags);
390}
391
392/*
393 * change_bit - Toggle a bit in memory
394 * @nr: Bit to change
395 * @addr: Address to start counting from
396 *
397 * change_bit() is atomic and may not be reordered.
398 * Note that @nr may be almost arbitrarily large; this function is not
399 * restricted to acting on a single-word quantity.
400 */
401static __inline__ void change_bit(int nr, volatile void * addr)
402{
403	volatile unsigned long *a = addr;
404	unsigned long mask;
405	__bi_flags;
406
407	a += nr >> 5;
408	mask = 1 << (nr & 0x1f);
409	__bi_local_irq_save(flags);
410	*a ^= mask;
411	__bi_local_irq_restore(flags);
412}
413
414/*
415 * __change_bit - Toggle a bit in memory
416 * @nr: the bit to change
417 * @addr: the address to start counting from
418 *
419 * Unlike change_bit(), this function is non-atomic and may be reordered.
420 * If it's called on the same region of memory simultaneously, the effect
421 * may be that only one operation succeeds.
422 */
423static __inline__ void __change_bit(int nr, volatile void * addr)
424{
425	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
426
427	*m ^= 1UL << (nr & 31);
428}
429
430/*
431 * test_and_set_bit - Set a bit and return its old value
432 * @nr: Bit to set
433 * @addr: Address to count from
434 *
435 * This operation is atomic and cannot be reordered.
436 * It also implies a memory barrier.
437 */
438static __inline__ int test_and_set_bit(int nr, volatile void * addr)
439{
440	volatile unsigned long *a = addr;
441	unsigned long mask;
442	int retval;
443	__bi_flags;
444
445	a += nr >> 5;
446	mask = 1 << (nr & 0x1f);
447	__bi_local_irq_save(flags);
448	retval = (mask & *a) != 0;
449	*a |= mask;
450	__bi_local_irq_restore(flags);
451
452	return retval;
453}
454
455/*
456 * __test_and_set_bit - Set a bit and return its old value
457 * @nr: Bit to set
458 * @addr: Address to count from
459 *
460 * This operation is non-atomic and can be reordered.
461 * If two examples of this operation race, one can appear to succeed
462 * but actually fail.  You must protect multiple accesses with a lock.
463 */
464static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
465{
466	volatile unsigned long *a = addr;
467	unsigned long mask;
468	int retval;
469
470	a += nr >> 5;
471	mask = 1 << (nr & 0x1f);
472	retval = (mask & *a) != 0;
473	*a |= mask;
474
475	return retval;
476}
477
478/*
479 * test_and_clear_bit - Clear a bit and return its old value
480 * @nr: Bit to clear
481 * @addr: Address to count from
482 *
483 * This operation is atomic and cannot be reordered.
484 * It also implies a memory barrier.
485 */
486static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
487{
488	volatile unsigned long *a = addr;
489	unsigned long mask;
490	int retval;
491	__bi_flags;
492
493	a += nr >> 5;
494	mask = 1 << (nr & 0x1f);
495	__bi_local_irq_save(flags);
496	retval = (mask & *a) != 0;
497	*a &= ~mask;
498	__bi_local_irq_restore(flags);
499
500	return retval;
501}
502
503/*
504 * __test_and_clear_bit - Clear a bit and return its old value
505 * @nr: Bit to clear
506 * @addr: Address to count from
507 *
508 * This operation is non-atomic and can be reordered.
509 * If two examples of this operation race, one can appear to succeed
510 * but actually fail.  You must protect multiple accesses with a lock.
511 */
512static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
513{
514	volatile unsigned long *a = addr;
515	unsigned long mask;
516	int retval;
517
518	a += nr >> 5;
519	mask = 1 << (nr & 0x1f);
520	retval = (mask & *a) != 0;
521	*a &= ~mask;
522
523	return retval;
524}
525
526/*
527 * test_and_change_bit - Change a bit and return its new value
528 * @nr: Bit to change
529 * @addr: Address to count from
530 *
531 * This operation is atomic and cannot be reordered.
532 * It also implies a memory barrier.
533 */
534static __inline__ int test_and_change_bit(int nr, volatile void * addr)
535{
536	volatile unsigned long *a = addr;
537	unsigned long mask, retval;
538	__bi_flags;
539
540	a += nr >> 5;
541	mask = 1 << (nr & 0x1f);
542	__bi_local_irq_save(flags);
543	retval = (mask & *a) != 0;
544	*a ^= mask;
545	__bi_local_irq_restore(flags);
546
547	return retval;
548}
549
550/*
551 * __test_and_change_bit - Change a bit and return its old value
552 * @nr: Bit to change
553 * @addr: Address to count from
554 *
555 * This operation is non-atomic and can be reordered.
556 * If two examples of this operation race, one can appear to succeed
557 * but actually fail.  You must protect multiple accesses with a lock.
558 */
559static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
560{
561	volatile unsigned long *a = addr;
562	unsigned long mask;
563	int retval;
564
565	a += nr >> 5;
566	mask = 1 << (nr & 0x1f);
567	retval = (mask & *a) != 0;
568	*a ^= mask;
569
570	return retval;
571}
572
573#undef __bi_flags
574#undef __bi_cli
575#undef __bi_save_flags
576#undef __bi_local_irq_restore
577
578#endif /* MIPS I */
579
580/*
581 * test_bit - Determine whether a bit is set
582 * @nr: bit number to test
583 * @addr: Address to start counting from
584 */
585static inline int test_bit(int nr, volatile void *addr)
586{
587	return 1UL & (((const volatile unsigned long *) addr)[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
588}
589
590/*
591 * ffz - find first zero in word.
592 * @word: The word to search
593 *
594 * Undefined if no zero exists, so code should check against ~0UL first.
595 */
596static __inline__ unsigned long ffz(unsigned long word)
597{
598	int b = 0, s;
599
600	word = ~word;
601	s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
602	s =  8; if (word << 24 != 0) s = 0; b += s; word >>= s;
603	s =  4; if (word << 28 != 0) s = 0; b += s; word >>= s;
604	s =  2; if (word << 30 != 0) s = 0; b += s; word >>= s;
605	s =  1; if (word << 31 != 0) s = 0; b += s;
606
607	return b;
608}
609
610
611#ifdef __KERNEL__
612
613/*
614 * ffs - find first bit set
615 * @x: the word to search
616 *
617 * Undefined if no bit exists, so code should check against 0 first.
618 */
619
620#define ffs(x) generic_ffs(x)
621
622/*
623 * find_next_zero_bit - find the first zero bit in a memory region
624 * @addr: The address to base the search on
625 * @offset: The bitnumber to start searching at
626 * @size: The maximum size to search
627 */
628static inline long find_next_zero_bit(void *addr, unsigned long size,
629	unsigned long offset)
630{
631	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
632	unsigned long result = offset & ~31UL;
633	unsigned long tmp;
634
635	if (offset >= size)
636		return size;
637	size -= result;
638	offset &= 31UL;
639	if (offset) {
640		tmp = *(p++);
641		tmp |= ~0UL >> (32-offset);
642		if (size < 32)
643			goto found_first;
644		if (~tmp)
645			goto found_middle;
646		size -= 32;
647		result += 32;
648	}
649	while (size & ~31UL) {
650		if (~(tmp = *(p++)))
651			goto found_middle;
652		result += 32;
653		size -= 32;
654	}
655	if (!size)
656		return result;
657	tmp = *p;
658
659found_first:
660	tmp |= ~0UL << size;
661found_middle:
662	return result + ffz(tmp);
663}
664
665#define find_first_zero_bit(addr, size) \
666	find_next_zero_bit((addr), (size), 0)
667
668
669#define find_first_zero_bit(addr, size) \
670        find_next_zero_bit((addr), (size), 0)
671
672
673/*
674 * hweightN - returns the hamming weight of a N-bit word
675 * @x: the word to weigh
676 *
677 * The Hamming Weight of a number is the total number of bits set in it.
678 */
679
680#define hweight32(x) generic_hweight32(x)
681#define hweight16(x) generic_hweight16(x)
682#define hweight8(x) generic_hweight8(x)
683
684
685static __inline__ int __test_and_set_le_bit(int nr, void * addr)
686{
687	unsigned char	*ADDR = (unsigned char *) addr;
688	int		mask, retval;
689
690	ADDR += nr >> 3;
691	mask = 1 << (nr & 0x07);
692	retval = (mask & *ADDR) != 0;
693	*ADDR |= mask;
694
695	return retval;
696}
697
698static __inline__ int __test_and_clear_le_bit(int nr, void * addr)
699{
700	unsigned char	*ADDR = (unsigned char *) addr;
701	int		mask, retval;
702
703	ADDR += nr >> 3;
704	mask = 1 << (nr & 0x07);
705	retval = (mask & *ADDR) != 0;
706	*ADDR &= ~mask;
707
708	return retval;
709}
710
711static __inline__ int test_le_bit(int nr, const void * addr)
712{
713	const unsigned char	*ADDR = (const unsigned char *) addr;
714	int			mask;
715
716	ADDR += nr >> 3;
717	mask = 1 << (nr & 0x07);
718
719	return ((mask & *ADDR) != 0);
720}
721
722static inline unsigned long ext2_ffz(unsigned int word)
723{
724	int b = 0, s;
725
726	word = ~word;
727	s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
728	s =  8; if (word << 24 != 0) s = 0; b += s; word >>= s;
729	s =  4; if (word << 28 != 0) s = 0; b += s; word >>= s;
730	s =  2; if (word << 30 != 0) s = 0; b += s; word >>= s;
731	s =  1; if (word << 31 != 0) s = 0; b += s;
732
733	return b;
734}
735
736static inline unsigned long find_next_zero_le_bit(void *addr,
737	unsigned long size, unsigned long offset)
738{
739	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
740	unsigned int result = offset & ~31;
741	unsigned int tmp;
742
743	if (offset >= size)
744		return size;
745
746	size -= result;
747	offset &= 31;
748	if (offset) {
749		tmp = cpu_to_le32p(p++);
750		tmp |= ~0U >> (32-offset); /* bug or feature ? */
751		if (size < 32)
752			goto found_first;
753		if (tmp != ~0U)
754			goto found_middle;
755		size -= 32;
756		result += 32;
757	}
758	while (size >= 32) {
759		if ((tmp = cpu_to_le32p(p++)) != ~0U)
760			goto found_middle;
761		result += 32;
762		size -= 32;
763	}
764	if (!size)
765		return result;
766
767	tmp = cpu_to_le32p(p);
768found_first:
769	tmp |= ~0 << size;
770	if (tmp == ~0U)			/* Are any bits zero? */
771		return result + size;	/* Nope. */
772
773found_middle:
774	return result + ext2_ffz(tmp);
775}
776
777#define find_first_zero_le_bit(addr, size) \
778	find_next_zero_le_bit((addr), (size), 0)
779
780#define ext2_set_bit			__test_and_set_le_bit
781#define ext2_clear_bit			__test_and_clear_le_bit
782#define ext2_test_bit			test_le_bit
783#define ext2_find_first_zero_bit	find_first_zero_le_bit
784#define ext2_find_next_zero_bit		find_next_zero_le_bit
785
786#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
787#define minix_set_bit(nr,addr) set_bit(nr,addr)
788#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
789#define minix_test_bit(nr,addr) test_bit(nr,addr)
790#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
791
792#endif /* __KERNEL__ */
793
794#endif /* _ASM_BITOPS_H */
795