1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 1994 - 1997, 1999, 2000  Ralf Baechle (ralf@gnu.org)
4 * Copyright (c) 2000  Silicon Graphics, Inc.
5 */
6#ifndef _ASM_BITOPS_H
7#define _ASM_BITOPS_H
8
9#include <linux/types.h>
10#include <asm/byteorder.h>		/* sigh ... */
11
12#ifdef __KERNEL__
13
14#include <asm/sgidefs.h>
15#include <asm/system.h>
16
17#include <asm-generic/bitops/fls.h>
18#include <asm-generic/bitops/__fls.h>
19#include <asm-generic/bitops/fls64.h>
20#include <asm-generic/bitops/__ffs.h>
21
22/*
23 * clear_bit() doesn't provide any barrier for the compiler.
24 */
25#define smp_mb__before_clear_bit()	barrier()
26#define smp_mb__after_clear_bit()	barrier()
27
28/*
29 * Only disable interrupt for kernel mode stuff to keep usermode stuff
30 * that dares to use kernel include files alive.
31 */
32#define __bi_flags unsigned long flags
33#define __bi_cli() __cli()
34#define __bi_save_flags(x) __save_flags(x)
35#define __bi_save_and_cli(x) __save_and_cli(x)
36#define __bi_restore_flags(x) __restore_flags(x)
37#else
38#define __bi_flags
39#define __bi_cli()
40#define __bi_save_flags(x)
41#define __bi_save_and_cli(x)
42#define __bi_restore_flags(x)
43#endif /* __KERNEL__ */
44
45#ifdef CONFIG_CPU_HAS_LLSC
46
47#include <asm/mipsregs.h>
48
49/*
50 * These functions for MIPS ISA > 1 are interrupt and SMP proof and
51 * interrupt friendly
52 */
53
54/*
55 * set_bit - Atomically set a bit in memory
56 * @nr: the bit to set
57 * @addr: the address to start counting from
58 *
59 * This function is atomic and may not be reordered.  See __set_bit()
60 * if you do not require the atomic guarantees.
61 * Note that @nr may be almost arbitrarily large; this function is not
62 * restricted to acting on a single-word quantity.
63 */
64static __inline__ void
65set_bit(int nr, volatile void *addr)
66{
67	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
68	unsigned long temp;
69
70	__asm__ __volatile__(
71		"1:\tll\t%0, %1\t\t# set_bit\n\t"
72		"or\t%0, %2\n\t"
73		"sc\t%0, %1\n\t"
74		"beqz\t%0, 1b"
75		: "=&r" (temp), "=m" (*m)
76		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
77}
78
79/*
80 * __set_bit - Set a bit in memory
81 * @nr: the bit to set
82 * @addr: the address to start counting from
83 *
84 * Unlike set_bit(), this function is non-atomic and may be reordered.
85 * If it's called on the same region of memory simultaneously, the effect
86 * may be that only one operation succeeds.
87 */
88static __inline__ void __set_bit(int nr, volatile void * addr)
89{
90	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
91
92	*m |= 1UL << (nr & 31);
93}
94#define PLATFORM__SET_BIT
95
96/*
97 * clear_bit - Clears a bit in memory
98 * @nr: Bit to clear
99 * @addr: Address to start counting from
100 *
101 * clear_bit() is atomic and may not be reordered.  However, it does
102 * not contain a memory barrier, so if it is used for locking purposes,
103 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
104 * in order to ensure changes are visible on other processors.
105 */
106static __inline__ void
107clear_bit(int nr, volatile void *addr)
108{
109	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
110	unsigned long temp;
111
112	__asm__ __volatile__(
113		"1:\tll\t%0, %1\t\t# clear_bit\n\t"
114		"and\t%0, %2\n\t"
115		"sc\t%0, %1\n\t"
116		"beqz\t%0, 1b\n\t"
117		: "=&r" (temp), "=m" (*m)
118		: "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
119}
120
121/*
122 * change_bit - Toggle a bit in memory
123 * @nr: Bit to clear
124 * @addr: Address to start counting from
125 *
126 * change_bit() is atomic and may not be reordered.
127 * Note that @nr may be almost arbitrarily large; this function is not
128 * restricted to acting on a single-word quantity.
129 */
130static __inline__ void
131change_bit(int nr, volatile void *addr)
132{
133	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
134	unsigned long temp;
135
136	__asm__ __volatile__(
137		"1:\tll\t%0, %1\t\t# change_bit\n\t"
138		"xor\t%0, %2\n\t"
139		"sc\t%0, %1\n\t"
140		"beqz\t%0, 1b"
141		: "=&r" (temp), "=m" (*m)
142		: "ir" (1UL << (nr & 0x1f)), "m" (*m));
143}
144
145/*
146 * __change_bit - Toggle a bit in memory
147 * @nr: the bit to set
148 * @addr: the address to start counting from
149 *
150 * Unlike change_bit(), this function is non-atomic and may be reordered.
151 * If it's called on the same region of memory simultaneously, the effect
152 * may be that only one operation succeeds.
153 */
154static __inline__ void __change_bit(int nr, volatile void * addr)
155{
156	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
157
158	*m ^= 1UL << (nr & 31);
159}
160
161/*
162 * test_and_set_bit - Set a bit and return its old value
163 * @nr: Bit to set
164 * @addr: Address to count from
165 *
166 * This operation is atomic and cannot be reordered.
167 * It also implies a memory barrier.
168 */
169static __inline__ int
170test_and_set_bit(int nr, volatile void *addr)
171{
172	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
173	unsigned long temp, res;
174
175	__asm__ __volatile__(
176		".set\tnoreorder\t\t# test_and_set_bit\n"
177		"1:\tll\t%0, %1\n\t"
178		"or\t%2, %0, %3\n\t"
179		"sc\t%2, %1\n\t"
180		"beqz\t%2, 1b\n\t"
181		" and\t%2, %0, %3\n\t"
182		".set\treorder"
183		: "=&r" (temp), "=m" (*m), "=&r" (res)
184		: "r" (1UL << (nr & 0x1f)), "m" (*m)
185		: "memory");
186
187	return res != 0;
188}
189
190/*
191 * __test_and_set_bit - Set a bit and return its old value
192 * @nr: Bit to set
193 * @addr: Address to count from
194 *
195 * This operation is non-atomic and can be reordered.
196 * If two examples of this operation race, one can appear to succeed
197 * but actually fail.  You must protect multiple accesses with a lock.
198 */
199static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
200{
201	int mask, retval;
202	volatile int *a = addr;
203
204	a += nr >> 5;
205	mask = 1 << (nr & 0x1f);
206	retval = (mask & *a) != 0;
207	*a |= mask;
208
209	return retval;
210}
211
212/*
213 * test_and_clear_bit - Clear a bit and return its old value
214 * @nr: Bit to set
215 * @addr: Address to count from
216 *
217 * This operation is atomic and cannot be reordered.
218 * It also implies a memory barrier.
219 */
220static __inline__ int
221test_and_clear_bit(int nr, volatile void *addr)
222{
223	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
224	unsigned long temp, res;
225
226	__asm__ __volatile__(
227		".set\tnoreorder\t\t# test_and_clear_bit\n"
228		"1:\tll\t%0, %1\n\t"
229		"or\t%2, %0, %3\n\t"
230		"xor\t%2, %3\n\t"
231		"sc\t%2, %1\n\t"
232		"beqz\t%2, 1b\n\t"
233		" and\t%2, %0, %3\n\t"
234		".set\treorder"
235		: "=&r" (temp), "=m" (*m), "=&r" (res)
236		: "r" (1UL << (nr & 0x1f)), "m" (*m)
237		: "memory");
238
239	return res != 0;
240}
241
242/*
243 * __test_and_clear_bit - Clear a bit and return its old value
244 * @nr: Bit to set
245 * @addr: Address to count from
246 *
247 * This operation is non-atomic and can be reordered.
248 * If two examples of this operation race, one can appear to succeed
249 * but actually fail.  You must protect multiple accesses with a lock.
250 */
251static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
252{
253	int	mask, retval;
254	volatile int	*a = addr;
255
256	a += nr >> 5;
257	mask = 1 << (nr & 0x1f);
258	retval = (mask & *a) != 0;
259	*a &= ~mask;
260
261	return retval;
262}
263
264/*
265 * test_and_change_bit - Change a bit and return its new value
266 * @nr: Bit to set
267 * @addr: Address to count from
268 *
269 * This operation is atomic and cannot be reordered.
270 * It also implies a memory barrier.
271 */
272static __inline__ int
273test_and_change_bit(int nr, volatile void *addr)
274{
275	unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
276	unsigned long temp, res;
277
278	__asm__ __volatile__(
279		".set\tnoreorder\t\t# test_and_change_bit\n"
280		"1:\tll\t%0, %1\n\t"
281		"xor\t%2, %0, %3\n\t"
282		"sc\t%2, %1\n\t"
283		"beqz\t%2, 1b\n\t"
284		" and\t%2, %0, %3\n\t"
285		".set\treorder"
286		: "=&r" (temp), "=m" (*m), "=&r" (res)
287		: "r" (1UL << (nr & 0x1f)), "m" (*m)
288		: "memory");
289
290	return res != 0;
291}
292
293/*
294 * __test_and_change_bit - Change a bit and return its old value
295 * @nr: Bit to set
296 * @addr: Address to count from
297 *
298 * This operation is non-atomic and can be reordered.
299 * If two examples of this operation race, one can appear to succeed
300 * but actually fail.  You must protect multiple accesses with a lock.
301 */
302static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
303{
304	int	mask, retval;
305	volatile int	*a = addr;
306
307	a += nr >> 5;
308	mask = 1 << (nr & 0x1f);
309	retval = (mask & *a) != 0;
310	*a ^= mask;
311
312	return retval;
313}
314
315#else /* MIPS I */
316
317/*
318 * set_bit - Atomically set a bit in memory
319 * @nr: the bit to set
320 * @addr: the address to start counting from
321 *
322 * This function is atomic and may not be reordered.  See __set_bit()
323 * if you do not require the atomic guarantees.
324 * Note that @nr may be almost arbitrarily large; this function is not
325 * restricted to acting on a single-word quantity.
326 */
327static __inline__ void set_bit(int nr, volatile void * addr)
328{
329	int	mask;
330	volatile int	*a = addr;
331	__bi_flags;
332
333	a += nr >> 5;
334	mask = 1 << (nr & 0x1f);
335	__bi_save_and_cli(flags);
336	*a |= mask;
337	__bi_restore_flags(flags);
338}
339
340/*
341 * __set_bit - Set a bit in memory
342 * @nr: the bit to set
343 * @addr: the address to start counting from
344 *
345 * Unlike set_bit(), this function is non-atomic and may be reordered.
346 * If it's called on the same region of memory simultaneously, the effect
347 * may be that only one operation succeeds.
348 */
349static __inline__ void __set_bit(int nr, volatile void * addr)
350{
351	int	mask;
352	volatile int	*a = addr;
353
354	a += nr >> 5;
355	mask = 1 << (nr & 0x1f);
356	*a |= mask;
357}
358
359/*
360 * clear_bit - Clears a bit in memory
361 * @nr: Bit to clear
362 * @addr: Address to start counting from
363 *
364 * clear_bit() is atomic and may not be reordered.  However, it does
365 * not contain a memory barrier, so if it is used for locking purposes,
366 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
367 * in order to ensure changes are visible on other processors.
368 */
369static __inline__ void clear_bit(int nr, volatile void * addr)
370{
371	int	mask;
372	volatile int	*a = addr;
373	__bi_flags;
374
375	a += nr >> 5;
376	mask = 1 << (nr & 0x1f);
377	__bi_save_and_cli(flags);
378	*a &= ~mask;
379	__bi_restore_flags(flags);
380}
381
382/*
383 * change_bit - Toggle a bit in memory
384 * @nr: Bit to clear
385 * @addr: Address to start counting from
386 *
387 * change_bit() is atomic and may not be reordered.
388 * Note that @nr may be almost arbitrarily large; this function is not
389 * restricted to acting on a single-word quantity.
390 */
391static __inline__ void change_bit(int nr, volatile void * addr)
392{
393	int	mask;
394	volatile int	*a = addr;
395	__bi_flags;
396
397	a += nr >> 5;
398	mask = 1 << (nr & 0x1f);
399	__bi_save_and_cli(flags);
400	*a ^= mask;
401	__bi_restore_flags(flags);
402}
403
404/*
405 * __change_bit - Toggle a bit in memory
406 * @nr: the bit to set
407 * @addr: the address to start counting from
408 *
409 * Unlike change_bit(), this function is non-atomic and may be reordered.
410 * If it's called on the same region of memory simultaneously, the effect
411 * may be that only one operation succeeds.
412 */
413static __inline__ void __change_bit(int nr, volatile void * addr)
414{
415	unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
416
417	*m ^= 1UL << (nr & 31);
418}
419
420/*
421 * test_and_set_bit - Set a bit and return its old value
422 * @nr: Bit to set
423 * @addr: Address to count from
424 *
425 * This operation is atomic and cannot be reordered.
426 * It also implies a memory barrier.
427 */
428static __inline__ int test_and_set_bit(int nr, volatile void * addr)
429{
430	int	mask, retval;
431	volatile int	*a = addr;
432	__bi_flags;
433
434	a += nr >> 5;
435	mask = 1 << (nr & 0x1f);
436	__bi_save_and_cli(flags);
437	retval = (mask & *a) != 0;
438	*a |= mask;
439	__bi_restore_flags(flags);
440
441	return retval;
442}
443
444/*
445 * __test_and_set_bit - Set a bit and return its old value
446 * @nr: Bit to set
447 * @addr: Address to count from
448 *
449 * This operation is non-atomic and can be reordered.
450 * If two examples of this operation race, one can appear to succeed
451 * but actually fail.  You must protect multiple accesses with a lock.
452 */
453static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
454{
455	int	mask, retval;
456	volatile int	*a = addr;
457
458	a += nr >> 5;
459	mask = 1 << (nr & 0x1f);
460	retval = (mask & *a) != 0;
461	*a |= mask;
462
463	return retval;
464}
465
466/*
467 * test_and_clear_bit - Clear a bit and return its old value
468 * @nr: Bit to set
469 * @addr: Address to count from
470 *
471 * This operation is atomic and cannot be reordered.
472 * It also implies a memory barrier.
473 */
474static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
475{
476	int	mask, retval;
477	volatile int	*a = addr;
478	__bi_flags;
479
480	a += nr >> 5;
481	mask = 1 << (nr & 0x1f);
482	__bi_save_and_cli(flags);
483	retval = (mask & *a) != 0;
484	*a &= ~mask;
485	__bi_restore_flags(flags);
486
487	return retval;
488}
489
490/*
491 * __test_and_clear_bit - Clear a bit and return its old value
492 * @nr: Bit to set
493 * @addr: Address to count from
494 *
495 * This operation is non-atomic and can be reordered.
496 * If two examples of this operation race, one can appear to succeed
497 * but actually fail.  You must protect multiple accesses with a lock.
498 */
499static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
500{
501	int	mask, retval;
502	volatile int	*a = addr;
503
504	a += nr >> 5;
505	mask = 1 << (nr & 0x1f);
506	retval = (mask & *a) != 0;
507	*a &= ~mask;
508
509	return retval;
510}
511
512/*
513 * test_and_change_bit - Change a bit and return its new value
514 * @nr: Bit to set
515 * @addr: Address to count from
516 *
517 * This operation is atomic and cannot be reordered.
518 * It also implies a memory barrier.
519 */
520static __inline__ int test_and_change_bit(int nr, volatile void * addr)
521{
522	int	mask, retval;
523	volatile int	*a = addr;
524	__bi_flags;
525
526	a += nr >> 5;
527	mask = 1 << (nr & 0x1f);
528	__bi_save_and_cli(flags);
529	retval = (mask & *a) != 0;
530	*a ^= mask;
531	__bi_restore_flags(flags);
532
533	return retval;
534}
535
536/*
537 * __test_and_change_bit - Change a bit and return its old value
538 * @nr: Bit to set
539 * @addr: Address to count from
540 *
541 * This operation is non-atomic and can be reordered.
542 * If two examples of this operation race, one can appear to succeed
543 * but actually fail.  You must protect multiple accesses with a lock.
544 */
545static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
546{
547	int	mask, retval;
548	volatile int	*a = addr;
549
550	a += nr >> 5;
551	mask = 1 << (nr & 0x1f);
552	retval = (mask & *a) != 0;
553	*a ^= mask;
554
555	return retval;
556}
557
558#undef __bi_flags
559#undef __bi_cli
560#undef __bi_save_flags
561#undef __bi_restore_flags
562
563#endif /* MIPS I */
564
565/*
566 * test_bit - Determine whether a bit is set
567 * @nr: bit number to test
568 * @addr: Address to start counting from
569 */
570static __inline__ int test_bit(int nr, const volatile void *addr)
571{
572	return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
573}
574
575#ifndef __MIPSEB__
576
577/* Little endian versions. */
578
579/*
580 * find_first_zero_bit - find the first zero bit in a memory region
581 * @addr: The address to start the search at
582 * @size: The maximum size to search
583 *
584 * Returns the bit-number of the first zero bit, not the number of the byte
585 * containing a bit.
586 */
587static __inline__ int find_first_zero_bit (void *addr, unsigned size)
588{
589	unsigned long dummy;
590	int res;
591
592	if (!size)
593		return 0;
594
595	__asm__ (".set\tnoreorder\n\t"
596		".set\tnoat\n"
597		"1:\tsubu\t$1,%6,%0\n\t"
598		"blez\t$1,2f\n\t"
599		"lw\t$1,(%5)\n\t"
600		"addiu\t%5,4\n\t"
601#if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
602    (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
603    (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
604		"beql\t%1,$1,1b\n\t"
605		"addiu\t%0,32\n\t"
606#else
607		"addiu\t%0,32\n\t"
608		"beq\t%1,$1,1b\n\t"
609		"nop\n\t"
610		"subu\t%0,32\n\t"
611#endif
612#ifdef __MIPSEB__
613#error "Fix this for big endian"
614#endif /* __MIPSEB__ */
615		"li\t%1,1\n"
616		"1:\tand\t%2,$1,%1\n\t"
617		"beqz\t%2,2f\n\t"
618		"sll\t%1,%1,1\n\t"
619		"bnez\t%1,1b\n\t"
620		"add\t%0,%0,1\n\t"
621		".set\tat\n\t"
622		".set\treorder\n"
623		"2:"
624		: "=r" (res), "=r" (dummy), "=r" (addr)
625		: "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
626		  "2" (addr), "r" (size)
627		: "$1");
628
629	return res;
630}
631
632/*
633 * find_next_zero_bit - find the first zero bit in a memory region
634 * @addr: The address to base the search on
635 * @offset: The bitnumber to start searching at
636 * @size: The maximum size to search
637 */
638static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
639{
640	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
641	int set = 0, bit = offset & 31, res;
642	unsigned long dummy;
643
644	if (bit) {
645		/*
646		 * Look for zero in first byte
647		 */
648#ifdef __MIPSEB__
649#error "Fix this for big endian byte order"
650#endif
651		__asm__(".set\tnoreorder\n\t"
652			".set\tnoat\n"
653			"1:\tand\t$1,%4,%1\n\t"
654			"beqz\t$1,1f\n\t"
655			"sll\t%1,%1,1\n\t"
656			"bnez\t%1,1b\n\t"
657			"addiu\t%0,1\n\t"
658			".set\tat\n\t"
659			".set\treorder\n"
660			"1:"
661			: "=r" (set), "=r" (dummy)
662			: "0" (0), "1" (1 << bit), "r" (*p)
663			: "$1");
664		if (set < (32 - bit))
665			return set + offset;
666		set = 32 - bit;
667		p++;
668	}
669	/*
670	 * No zero yet, search remaining full bytes for a zero
671	 */
672	res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
673	return offset + set + res;
674}
675
676#endif /* !(__MIPSEB__) */
677
678/*
679 * ffz - find first zero in word.
680 * @word: The word to search
681 *
682 * Undefined if no zero exists, so code should check against ~0UL first.
683 */
684static __inline__ unsigned long ffz(unsigned long word)
685{
686	unsigned int	__res;
687	unsigned int	mask = 1;
688
689	__asm__ (
690		".set\tnoreorder\n\t"
691		".set\tnoat\n\t"
692		"move\t%0,$0\n"
693		"1:\tand\t$1,%2,%1\n\t"
694		"beqz\t$1,2f\n\t"
695		"sll\t%1,1\n\t"
696		"bnez\t%1,1b\n\t"
697		"addiu\t%0,1\n\t"
698		".set\tat\n\t"
699		".set\treorder\n"
700		"2:\n\t"
701		: "=&r" (__res), "=r" (mask)
702		: "r" (word), "1" (mask)
703		: "$1");
704
705	return __res;
706}
707
708#ifdef __KERNEL__
709
710/*
711 * hweightN - returns the hamming weight of a N-bit word
712 * @x: the word to weigh
713 *
714 * The Hamming Weight of a number is the total number of bits set in it.
715 */
716
717#define hweight32(x) generic_hweight32(x)
718#define hweight16(x) generic_hweight16(x)
719#define hweight8(x) generic_hweight8(x)
720
721#endif /* __KERNEL__ */
722
723#ifdef __MIPSEB__
724/*
725 * find_next_zero_bit - find the first zero bit in a memory region
726 * @addr: The address to base the search on
727 * @offset: The bitnumber to start searching at
728 * @size: The maximum size to search
729 */
730static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
731{
732	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
733	unsigned long result = offset & ~31UL;
734	unsigned long tmp;
735
736	if (offset >= size)
737		return size;
738	size -= result;
739	offset &= 31UL;
740	if (offset) {
741		tmp = *(p++);
742		tmp |= ~0UL >> (32-offset);
743		if (size < 32)
744			goto found_first;
745		if (~tmp)
746			goto found_middle;
747		size -= 32;
748		result += 32;
749	}
750	while (size & ~31UL) {
751		if (~(tmp = *(p++)))
752			goto found_middle;
753		result += 32;
754		size -= 32;
755	}
756	if (!size)
757		return result;
758	tmp = *p;
759
760found_first:
761	tmp |= ~0UL << size;
762found_middle:
763	return result + ffz(tmp);
764}
765
766/* Linus sez that gcc can optimize the following correctly, we'll see if this
767 * holds on the Sparc as it does for the ALPHA.
768 */
769
770#if 0 /* Fool kernel-doc since it doesn't do macros yet */
771/*
772 * find_first_zero_bit - find the first zero bit in a memory region
773 * @addr: The address to start the search at
774 * @size: The maximum size to search
775 *
776 * Returns the bit-number of the first zero bit, not the number of the byte
777 * containing a bit.
778 */
779static int find_first_zero_bit (void *addr, unsigned size);
780#endif
781
782#define find_first_zero_bit(addr, size) \
783	find_next_zero_bit((addr), (size), 0)
784
785#endif /* (__MIPSEB__) */
786
787/* Now for the ext2 filesystem bit operations and helper routines. */
788
789#ifdef __MIPSEB__
790static __inline__ int ext2_set_bit(int nr, void * addr)
791{
792	int		mask, retval, flags;
793	unsigned char	*ADDR = (unsigned char *) addr;
794
795	ADDR += nr >> 3;
796	mask = 1 << (nr & 0x07);
797	save_and_cli(flags);
798	retval = (mask & *ADDR) != 0;
799	*ADDR |= mask;
800	restore_flags(flags);
801	return retval;
802}
803
804static __inline__ int ext2_clear_bit(int nr, void * addr)
805{
806	int		mask, retval, flags;
807	unsigned char	*ADDR = (unsigned char *) addr;
808
809	ADDR += nr >> 3;
810	mask = 1 << (nr & 0x07);
811	save_and_cli(flags);
812	retval = (mask & *ADDR) != 0;
813	*ADDR &= ~mask;
814	restore_flags(flags);
815	return retval;
816}
817
818static __inline__ int ext2_test_bit(int nr, const void * addr)
819{
820	int			mask;
821	const unsigned char	*ADDR = (const unsigned char *) addr;
822
823	ADDR += nr >> 3;
824	mask = 1 << (nr & 0x07);
825	return ((mask & *ADDR) != 0);
826}
827
828#define ext2_find_first_zero_bit(addr, size) \
829	ext2_find_next_zero_bit((addr), (size), 0)
830
831static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
832{
833	unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
834	unsigned long result = offset & ~31UL;
835	unsigned long tmp;
836
837	if (offset >= size)
838		return size;
839	size -= result;
840	offset &= 31UL;
841	if(offset) {
842		/* We hold the little endian value in tmp, but then the
843		 * shift is illegal. So we could keep a big endian value
844		 * in tmp, like this:
845		 *
846		 * tmp = __swab32(*(p++));
847		 * tmp |= ~0UL >> (32-offset);
848		 *
849		 * but this would decrease preformance, so we change the
850		 * shift:
851		 */
852		tmp = *(p++);
853		tmp |= __swab32(~0UL >> (32-offset));
854		if(size < 32)
855			goto found_first;
856		if(~tmp)
857			goto found_middle;
858		size -= 32;
859		result += 32;
860	}
861	while(size & ~31UL) {
862		if(~(tmp = *(p++)))
863			goto found_middle;
864		result += 32;
865		size -= 32;
866	}
867	if(!size)
868		return result;
869	tmp = *p;
870
871found_first:
872	/* tmp is little endian, so we would have to swab the shift,
873	 * see above. But then we have to swab tmp below for ffz, so
874	 * we might as well do this here.
875	 */
876	return result + ffz(__swab32(tmp) | (~0UL << size));
877found_middle:
878	return result + ffz(__swab32(tmp));
879}
880#else /* !(__MIPSEB__) */
881
882/* Native ext2 byte ordering, just collapse using defines. */
883#define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
884#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
885#define ext2_test_bit(nr, addr) test_bit((nr), (addr))
886#define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
887#define ext2_find_next_zero_bit(addr, size, offset) \
888		find_next_zero_bit((addr), (size), (offset))
889
890#endif /* !(__MIPSEB__) */
891
892/*
893 * Bitmap functions for the minix filesystem.
894 * FIXME: These assume that Minix uses the native byte/bitorder.
895 * This limits the Minix filesystem's value for data exchange very much.
896 */
897#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
898#define minix_set_bit(nr,addr) set_bit(nr,addr)
899#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
900#define minix_test_bit(nr,addr) test_bit(nr,addr)
901#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
902
903#endif /* _ASM_BITOPS_H */
904