• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/arch/s390/include/asm/
1#ifndef _S390_BITOPS_H
2#define _S390_BITOPS_H
3
4/*
5 *  include/asm-s390/bitops.h
6 *
7 *  S390 version
8 *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
9 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 *
11 *  Derived from "include/asm-i386/bitops.h"
12 *    Copyright (C) 1992, Linus Torvalds
13 *
14 */
15
16#ifdef __KERNEL__
17
18#ifndef _LINUX_BITOPS_H
19#error only <linux/bitops.h> can be included directly
20#endif
21
22#include <linux/compiler.h>
23
24/*
25 * 32 bit bitops format:
26 * bit 0 is the LSB of *addr; bit 31 is the MSB of *addr;
27 * bit 32 is the LSB of *(addr+4). That combined with the
28 * big endian byte order on S390 give the following bit
29 * order in memory:
30 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10 \
31 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
32 * after that follows the next long with bit numbers
33 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
34 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
35 * The reason for this bit ordering is the fact that
36 * in the architecture independent code bits operations
37 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
38 * with operation of the form "set_bit(bitnr, flags)".
39 *
40 * 64 bit bitops format:
41 * bit 0 is the LSB of *addr; bit 63 is the MSB of *addr;
42 * bit 64 is the LSB of *(addr+8). That combined with the
43 * big endian byte order on S390 give the following bit
44 * order in memory:
45 *    3f 3e 3d 3c 3b 3a 39 38 37 36 35 34 33 32 31 30
46 *    2f 2e 2d 2c 2b 2a 29 28 27 26 25 24 23 22 21 20
47 *    1f 1e 1d 1c 1b 1a 19 18 17 16 15 14 13 12 11 10
48 *    0f 0e 0d 0c 0b 0a 09 08 07 06 05 04 03 02 01 00
49 * after that follows the next long with bit numbers
50 *    7f 7e 7d 7c 7b 7a 79 78 77 76 75 74 73 72 71 70
51 *    6f 6e 6d 6c 6b 6a 69 68 67 66 65 64 63 62 61 60
52 *    5f 5e 5d 5c 5b 5a 59 58 57 56 55 54 53 52 51 50
53 *    4f 4e 4d 4c 4b 4a 49 48 47 46 45 44 43 42 41 40
54 * The reason for this bit ordering is the fact that
55 * in the architecture independent code bits operations
56 * of the form "flags |= (1 << bitnr)" are used INTERMIXED
57 * with operation of the form "set_bit(bitnr, flags)".
58 */
59
60/* bitmap tables from arch/s390/kernel/bitmap.c */
61extern const char _oi_bitmap[];
62extern const char _ni_bitmap[];
63extern const char _zb_findmap[];
64extern const char _sb_findmap[];
65
66#ifndef __s390x__
67
68#define __BITOPS_ALIGN		3
69#define __BITOPS_WORDSIZE	32
70#define __BITOPS_OR		"or"
71#define __BITOPS_AND		"nr"
72#define __BITOPS_XOR		"xr"
73
74#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)	\
75	asm volatile(						\
76		"	l	%0,%2\n"			\
77		"0:	lr	%1,%0\n"			\
78		__op_string "	%1,%3\n"			\
79		"	cs	%0,%1,%2\n"			\
80		"	jl	0b"				\
81		: "=&d" (__old), "=&d" (__new),			\
82		  "=Q" (*(unsigned long *) __addr)		\
83		: "d" (__val), "Q" (*(unsigned long *) __addr)	\
84		: "cc");
85
86#else /* __s390x__ */
87
88#define __BITOPS_ALIGN		7
89#define __BITOPS_WORDSIZE	64
90#define __BITOPS_OR		"ogr"
91#define __BITOPS_AND		"ngr"
92#define __BITOPS_XOR		"xgr"
93
94#define __BITOPS_LOOP(__old, __new, __addr, __val, __op_string)	\
95	asm volatile(						\
96		"	lg	%0,%2\n"			\
97		"0:	lgr	%1,%0\n"			\
98		__op_string "	%1,%3\n"			\
99		"	csg	%0,%1,%2\n"			\
100		"	jl	0b"				\
101		: "=&d" (__old), "=&d" (__new),			\
102		  "=Q" (*(unsigned long *) __addr)		\
103		: "d" (__val), "Q" (*(unsigned long *) __addr)	\
104		: "cc");
105
106#endif /* __s390x__ */
107
108#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
109#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
110
111#ifdef CONFIG_SMP
112/*
113 * SMP safe set_bit routine based on compare and swap (CS)
114 */
115static inline void set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
116{
117        unsigned long addr, old, new, mask;
118
119	addr = (unsigned long) ptr;
120	/* calculate address for CS */
121	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
122	/* make OR mask */
123	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
124	/* Do the atomic update. */
125	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
126}
127
128/*
129 * SMP safe clear_bit routine based on compare and swap (CS)
130 */
131static inline void clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
132{
133        unsigned long addr, old, new, mask;
134
135	addr = (unsigned long) ptr;
136	/* calculate address for CS */
137	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
138	/* make AND mask */
139	mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
140	/* Do the atomic update. */
141	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
142}
143
144/*
145 * SMP safe change_bit routine based on compare and swap (CS)
146 */
147static inline void change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
148{
149        unsigned long addr, old, new, mask;
150
151	addr = (unsigned long) ptr;
152	/* calculate address for CS */
153	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
154	/* make XOR mask */
155	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
156	/* Do the atomic update. */
157	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
158}
159
160/*
161 * SMP safe test_and_set_bit routine based on compare and swap (CS)
162 */
163static inline int
164test_and_set_bit_cs(unsigned long nr, volatile unsigned long *ptr)
165{
166        unsigned long addr, old, new, mask;
167
168	addr = (unsigned long) ptr;
169	/* calculate address for CS */
170	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
171	/* make OR/test mask */
172	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
173	/* Do the atomic update. */
174	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_OR);
175	__BITOPS_BARRIER();
176	return (old & mask) != 0;
177}
178
179/*
180 * SMP safe test_and_clear_bit routine based on compare and swap (CS)
181 */
182static inline int
183test_and_clear_bit_cs(unsigned long nr, volatile unsigned long *ptr)
184{
185        unsigned long addr, old, new, mask;
186
187	addr = (unsigned long) ptr;
188	/* calculate address for CS */
189	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
190	/* make AND/test mask */
191	mask = ~(1UL << (nr & (__BITOPS_WORDSIZE - 1)));
192	/* Do the atomic update. */
193	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_AND);
194	__BITOPS_BARRIER();
195	return (old ^ new) != 0;
196}
197
198/*
199 * SMP safe test_and_change_bit routine based on compare and swap (CS)
200 */
201static inline int
202test_and_change_bit_cs(unsigned long nr, volatile unsigned long *ptr)
203{
204        unsigned long addr, old, new, mask;
205
206	addr = (unsigned long) ptr;
207	/* calculate address for CS */
208	addr += (nr ^ (nr & (__BITOPS_WORDSIZE - 1))) >> 3;
209	/* make XOR/test mask */
210	mask = 1UL << (nr & (__BITOPS_WORDSIZE - 1));
211	/* Do the atomic update. */
212	__BITOPS_LOOP(old, new, addr, mask, __BITOPS_XOR);
213	__BITOPS_BARRIER();
214	return (old & mask) != 0;
215}
216#endif /* CONFIG_SMP */
217
218/*
219 * fast, non-SMP set_bit routine
220 */
221static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr)
222{
223	unsigned long addr;
224
225	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
226	asm volatile(
227		"	oc	%O0(1,%R0),%1"
228		: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
229}
230
231static inline void
232__constant_set_bit(const unsigned long nr, volatile unsigned long *ptr)
233{
234	unsigned long addr;
235
236	addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
237	*(unsigned char *) addr |= 1 << (nr & 7);
238}
239
240#define set_bit_simple(nr,addr) \
241(__builtin_constant_p((nr)) ? \
242 __constant_set_bit((nr),(addr)) : \
243 __set_bit((nr),(addr)) )
244
245/*
246 * fast, non-SMP clear_bit routine
247 */
248static inline void
249__clear_bit(unsigned long nr, volatile unsigned long *ptr)
250{
251	unsigned long addr;
252
253	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
254	asm volatile(
255		"	nc	%O0(1,%R0),%1"
256		: "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" );
257}
258
259static inline void
260__constant_clear_bit(const unsigned long nr, volatile unsigned long *ptr)
261{
262	unsigned long addr;
263
264	addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
265	*(unsigned char *) addr &= ~(1 << (nr & 7));
266}
267
268#define clear_bit_simple(nr,addr) \
269(__builtin_constant_p((nr)) ? \
270 __constant_clear_bit((nr),(addr)) : \
271 __clear_bit((nr),(addr)) )
272
273/*
274 * fast, non-SMP change_bit routine
275 */
276static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr)
277{
278	unsigned long addr;
279
280	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
281	asm volatile(
282		"	xc	%O0(1,%R0),%1"
283		: "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" );
284}
285
286static inline void
287__constant_change_bit(const unsigned long nr, volatile unsigned long *ptr)
288{
289	unsigned long addr;
290
291	addr = ((unsigned long) ptr) + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
292	*(unsigned char *) addr ^= 1 << (nr & 7);
293}
294
295#define change_bit_simple(nr,addr) \
296(__builtin_constant_p((nr)) ? \
297 __constant_change_bit((nr),(addr)) : \
298 __change_bit((nr),(addr)) )
299
300/*
301 * fast, non-SMP test_and_set_bit routine
302 */
303static inline int
304test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr)
305{
306	unsigned long addr;
307	unsigned char ch;
308
309	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
310	ch = *(unsigned char *) addr;
311	asm volatile(
312		"	oc	%O0(1,%R0),%1"
313		: "=Q" (*(char *) addr)	: "Q" (_oi_bitmap[nr & 7])
314		: "cc", "memory");
315	return (ch >> (nr & 7)) & 1;
316}
317#define __test_and_set_bit(X,Y)		test_and_set_bit_simple(X,Y)
318
319/*
320 * fast, non-SMP test_and_clear_bit routine
321 */
322static inline int
323test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr)
324{
325	unsigned long addr;
326	unsigned char ch;
327
328	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
329	ch = *(unsigned char *) addr;
330	asm volatile(
331		"	nc	%O0(1,%R0),%1"
332		: "=Q" (*(char *) addr)	: "Q" (_ni_bitmap[nr & 7])
333		: "cc", "memory");
334	return (ch >> (nr & 7)) & 1;
335}
336#define __test_and_clear_bit(X,Y)	test_and_clear_bit_simple(X,Y)
337
338/*
339 * fast, non-SMP test_and_change_bit routine
340 */
341static inline int
342test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr)
343{
344	unsigned long addr;
345	unsigned char ch;
346
347	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
348	ch = *(unsigned char *) addr;
349	asm volatile(
350		"	xc	%O0(1,%R0),%1"
351		: "=Q" (*(char *) addr)	: "Q" (_oi_bitmap[nr & 7])
352		: "cc", "memory");
353	return (ch >> (nr & 7)) & 1;
354}
355#define __test_and_change_bit(X,Y)	test_and_change_bit_simple(X,Y)
356
357#ifdef CONFIG_SMP
358#define set_bit             set_bit_cs
359#define clear_bit           clear_bit_cs
360#define change_bit          change_bit_cs
361#define test_and_set_bit    test_and_set_bit_cs
362#define test_and_clear_bit  test_and_clear_bit_cs
363#define test_and_change_bit test_and_change_bit_cs
364#else
365#define set_bit             set_bit_simple
366#define clear_bit           clear_bit_simple
367#define change_bit          change_bit_simple
368#define test_and_set_bit    test_and_set_bit_simple
369#define test_and_clear_bit  test_and_clear_bit_simple
370#define test_and_change_bit test_and_change_bit_simple
371#endif
372
373
374/*
375 * This routine doesn't need to be atomic.
376 */
377
378static inline int __test_bit(unsigned long nr, const volatile unsigned long *ptr)
379{
380	unsigned long addr;
381	unsigned char ch;
382
383	addr = (unsigned long) ptr + ((nr ^ (__BITOPS_WORDSIZE - 8)) >> 3);
384	ch = *(volatile unsigned char *) addr;
385	return (ch >> (nr & 7)) & 1;
386}
387
388static inline int
389__constant_test_bit(unsigned long nr, const volatile unsigned long *addr) {
390    return (((volatile char *) addr)
391	    [(nr^(__BITOPS_WORDSIZE-8))>>3] & (1<<(nr&7))) != 0;
392}
393
394#define test_bit(nr,addr) \
395(__builtin_constant_p((nr)) ? \
396 __constant_test_bit((nr),(addr)) : \
397 __test_bit((nr),(addr)) )
398
399/*
400 * Optimized find bit helper functions.
401 */
402
403/**
404 * __ffz_word_loop - find byte offset of first long != -1UL
405 * @addr: pointer to array of unsigned long
406 * @size: size of the array in bits
407 */
408static inline unsigned long __ffz_word_loop(const unsigned long *addr,
409					    unsigned long size)
410{
411	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
412	unsigned long bytes = 0;
413
414	asm volatile(
415#ifndef __s390x__
416		"	ahi	%1,-1\n"
417		"	sra	%1,5\n"
418		"	jz	1f\n"
419		"0:	c	%2,0(%0,%3)\n"
420		"	jne	1f\n"
421		"	la	%0,4(%0)\n"
422		"	brct	%1,0b\n"
423		"1:\n"
424#else
425		"	aghi	%1,-1\n"
426		"	srag	%1,%1,6\n"
427		"	jz	1f\n"
428		"0:	cg	%2,0(%0,%3)\n"
429		"	jne	1f\n"
430		"	la	%0,8(%0)\n"
431		"	brct	%1,0b\n"
432		"1:\n"
433#endif
434		: "+&a" (bytes), "+&d" (size)
435		: "d" (-1UL), "a" (addr), "m" (*(addrtype *) addr)
436		: "cc" );
437	return bytes;
438}
439
440/**
441 * __ffs_word_loop - find byte offset of first long != 0UL
442 * @addr: pointer to array of unsigned long
443 * @size: size of the array in bits
444 */
445static inline unsigned long __ffs_word_loop(const unsigned long *addr,
446					    unsigned long size)
447{
448	typedef struct { long _[__BITOPS_WORDS(size)]; } addrtype;
449	unsigned long bytes = 0;
450
451	asm volatile(
452#ifndef __s390x__
453		"	ahi	%1,-1\n"
454		"	sra	%1,5\n"
455		"	jz	1f\n"
456		"0:	c	%2,0(%0,%3)\n"
457		"	jne	1f\n"
458		"	la	%0,4(%0)\n"
459		"	brct	%1,0b\n"
460		"1:\n"
461#else
462		"	aghi	%1,-1\n"
463		"	srag	%1,%1,6\n"
464		"	jz	1f\n"
465		"0:	cg	%2,0(%0,%3)\n"
466		"	jne	1f\n"
467		"	la	%0,8(%0)\n"
468		"	brct	%1,0b\n"
469		"1:\n"
470#endif
471		: "+&a" (bytes), "+&a" (size)
472		: "d" (0UL), "a" (addr), "m" (*(addrtype *) addr)
473		: "cc" );
474	return bytes;
475}
476
477/**
478 * __ffz_word - add number of the first unset bit
479 * @nr: base value the bit number is added to
480 * @word: the word that is searched for unset bits
481 */
482static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
483{
484#ifdef __s390x__
485	if ((word & 0xffffffff) == 0xffffffff) {
486		word >>= 32;
487		nr += 32;
488	}
489#endif
490	if ((word & 0xffff) == 0xffff) {
491		word >>= 16;
492		nr += 16;
493	}
494	if ((word & 0xff) == 0xff) {
495		word >>= 8;
496		nr += 8;
497	}
498	return nr + _zb_findmap[(unsigned char) word];
499}
500
501/**
502 * __ffs_word - add number of the first set bit
503 * @nr: base value the bit number is added to
504 * @word: the word that is searched for set bits
505 */
506static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
507{
508#ifdef __s390x__
509	if ((word & 0xffffffff) == 0) {
510		word >>= 32;
511		nr += 32;
512	}
513#endif
514	if ((word & 0xffff) == 0) {
515		word >>= 16;
516		nr += 16;
517	}
518	if ((word & 0xff) == 0) {
519		word >>= 8;
520		nr += 8;
521	}
522	return nr + _sb_findmap[(unsigned char) word];
523}
524
525
526/**
527 * __load_ulong_be - load big endian unsigned long
528 * @p: pointer to array of unsigned long
529 * @offset: byte offset of source value in the array
530 */
531static inline unsigned long __load_ulong_be(const unsigned long *p,
532					    unsigned long offset)
533{
534	p = (unsigned long *)((unsigned long) p + offset);
535	return *p;
536}
537
538/**
539 * __load_ulong_le - load little endian unsigned long
540 * @p: pointer to array of unsigned long
541 * @offset: byte offset of source value in the array
542 */
543static inline unsigned long __load_ulong_le(const unsigned long *p,
544					    unsigned long offset)
545{
546	unsigned long word;
547
548	p = (unsigned long *)((unsigned long) p + offset);
549#ifndef __s390x__
550	asm volatile(
551		"	ic	%0,%O1(%R1)\n"
552		"	icm	%0,2,%O1+1(%R1)\n"
553		"	icm	%0,4,%O1+2(%R1)\n"
554		"	icm	%0,8,%O1+3(%R1)"
555		: "=&d" (word) : "Q" (*p) : "cc");
556#else
557	asm volatile(
558		"	lrvg	%0,%1"
559		: "=d" (word) : "m" (*p) );
560#endif
561	return word;
562}
563
564/*
565 * The various find bit functions.
566 */
567
568/*
569 * ffz - find first zero in word.
570 * @word: The word to search
571 *
572 * Undefined if no zero exists, so code should check against ~0UL first.
573 */
574static inline unsigned long ffz(unsigned long word)
575{
576	return __ffz_word(0, word);
577}
578
579/**
580 * __ffs - find first bit in word.
581 * @word: The word to search
582 *
583 * Undefined if no bit exists, so code should check against 0 first.
584 */
585static inline unsigned long __ffs (unsigned long word)
586{
587	return __ffs_word(0, word);
588}
589
590/**
591 * ffs - find first bit set
592 * @x: the word to search
593 *
594 * This is defined the same way as
595 * the libc and compiler builtin ffs routines, therefore
596 * differs in spirit from the above ffz (man ffs).
597 */
598static inline int ffs(int x)
599{
600	if (!x)
601		return 0;
602	return __ffs_word(1, x);
603}
604
605/**
606 * find_first_zero_bit - find the first zero bit in a memory region
607 * @addr: The address to start the search at
608 * @size: The maximum size to search
609 *
610 * Returns the bit-number of the first zero bit, not the number of the byte
611 * containing a bit.
612 */
613static inline unsigned long find_first_zero_bit(const unsigned long *addr,
614						unsigned long size)
615{
616	unsigned long bytes, bits;
617
618        if (!size)
619                return 0;
620	bytes = __ffz_word_loop(addr, size);
621	bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes));
622	return (bits < size) ? bits : size;
623}
624
625/**
626 * find_first_bit - find the first set bit in a memory region
627 * @addr: The address to start the search at
628 * @size: The maximum size to search
629 *
630 * Returns the bit-number of the first set bit, not the number of the byte
631 * containing a bit.
632 */
633static inline unsigned long find_first_bit(const unsigned long * addr,
634					   unsigned long size)
635{
636	unsigned long bytes, bits;
637
638        if (!size)
639                return 0;
640	bytes = __ffs_word_loop(addr, size);
641	bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes));
642	return (bits < size) ? bits : size;
643}
644
645/**
646 * find_next_zero_bit - find the first zero bit in a memory region
647 * @addr: The address to base the search on
648 * @offset: The bitnumber to start searching at
649 * @size: The maximum size to search
650 */
651static inline int find_next_zero_bit (const unsigned long * addr,
652				      unsigned long size,
653				      unsigned long offset)
654{
655        const unsigned long *p;
656	unsigned long bit, set;
657
658	if (offset >= size)
659		return size;
660	bit = offset & (__BITOPS_WORDSIZE - 1);
661	offset -= bit;
662	size -= offset;
663	p = addr + offset / __BITOPS_WORDSIZE;
664	if (bit) {
665		/*
666		 * __ffz_word returns __BITOPS_WORDSIZE
667		 * if no zero bit is present in the word.
668		 */
669		set = __ffz_word(bit, *p >> bit);
670		if (set >= size)
671			return size + offset;
672		if (set < __BITOPS_WORDSIZE)
673			return set + offset;
674		offset += __BITOPS_WORDSIZE;
675		size -= __BITOPS_WORDSIZE;
676		p++;
677	}
678	return offset + find_first_zero_bit(p, size);
679}
680
681/**
682 * find_next_bit - find the first set bit in a memory region
683 * @addr: The address to base the search on
684 * @offset: The bitnumber to start searching at
685 * @size: The maximum size to search
686 */
687static inline int find_next_bit (const unsigned long * addr,
688				 unsigned long size,
689				 unsigned long offset)
690{
691        const unsigned long *p;
692	unsigned long bit, set;
693
694	if (offset >= size)
695		return size;
696	bit = offset & (__BITOPS_WORDSIZE - 1);
697	offset -= bit;
698	size -= offset;
699	p = addr + offset / __BITOPS_WORDSIZE;
700	if (bit) {
701		/*
702		 * __ffs_word returns __BITOPS_WORDSIZE
703		 * if no one bit is present in the word.
704		 */
705		set = __ffs_word(0, *p & (~0UL << bit));
706		if (set >= size)
707			return size + offset;
708		if (set < __BITOPS_WORDSIZE)
709			return set + offset;
710		offset += __BITOPS_WORDSIZE;
711		size -= __BITOPS_WORDSIZE;
712		p++;
713	}
714	return offset + find_first_bit(p, size);
715}
716
717/*
718 * Every architecture must define this function. It's the fastest
719 * way of searching a 140-bit bitmap where the first 100 bits are
720 * unlikely to be set. It's guaranteed that at least one of the 140
721 * bits is cleared.
722 */
723static inline int sched_find_first_bit(unsigned long *b)
724{
725	return find_first_bit(b, 140);
726}
727
728#include <asm-generic/bitops/fls.h>
729#include <asm-generic/bitops/__fls.h>
730#include <asm-generic/bitops/fls64.h>
731
732#include <asm-generic/bitops/hweight.h>
733#include <asm-generic/bitops/lock.h>
734
735/*
736 * ATTENTION: intel byte ordering convention for ext2 and minix !!
737 * bit 0 is the LSB of addr; bit 31 is the MSB of addr;
738 * bit 32 is the LSB of (addr+4).
739 * That combined with the little endian byte order of Intel gives the
740 * following bit order in memory:
741 *    07 06 05 04 03 02 01 00 15 14 13 12 11 10 09 08 \
742 *    23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24
743 */
744
745#define ext2_set_bit(nr, addr)       \
746	__test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
747#define ext2_set_bit_atomic(lock, nr, addr)       \
748	test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
749#define ext2_clear_bit(nr, addr)     \
750	__test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
751#define ext2_clear_bit_atomic(lock, nr, addr)     \
752	test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
753#define ext2_test_bit(nr, addr)      \
754	test_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr)
755
756static inline int ext2_find_first_zero_bit(void *vaddr, unsigned int size)
757{
758	unsigned long bytes, bits;
759
760        if (!size)
761                return 0;
762	bytes = __ffz_word_loop(vaddr, size);
763	bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes));
764	return (bits < size) ? bits : size;
765}
766
767static inline int ext2_find_next_zero_bit(void *vaddr, unsigned long size,
768					  unsigned long offset)
769{
770        unsigned long *addr = vaddr, *p;
771	unsigned long bit, set;
772
773        if (offset >= size)
774                return size;
775	bit = offset & (__BITOPS_WORDSIZE - 1);
776	offset -= bit;
777	size -= offset;
778	p = addr + offset / __BITOPS_WORDSIZE;
779        if (bit) {
780		/*
781		 * s390 version of ffz returns __BITOPS_WORDSIZE
782		 * if no zero bit is present in the word.
783		 */
784		set = __ffz_word(bit, __load_ulong_le(p, 0) >> bit);
785		if (set >= size)
786			return size + offset;
787		if (set < __BITOPS_WORDSIZE)
788			return set + offset;
789		offset += __BITOPS_WORDSIZE;
790		size -= __BITOPS_WORDSIZE;
791		p++;
792        }
793	return offset + ext2_find_first_zero_bit(p, size);
794}
795
796static inline unsigned long ext2_find_first_bit(void *vaddr,
797						unsigned long size)
798{
799	unsigned long bytes, bits;
800
801	if (!size)
802		return 0;
803	bytes = __ffs_word_loop(vaddr, size);
804	bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes));
805	return (bits < size) ? bits : size;
806}
807
808static inline int ext2_find_next_bit(void *vaddr, unsigned long size,
809				     unsigned long offset)
810{
811	unsigned long *addr = vaddr, *p;
812	unsigned long bit, set;
813
814	if (offset >= size)
815		return size;
816	bit = offset & (__BITOPS_WORDSIZE - 1);
817	offset -= bit;
818	size -= offset;
819	p = addr + offset / __BITOPS_WORDSIZE;
820	if (bit) {
821		/*
822		 * s390 version of ffz returns __BITOPS_WORDSIZE
823		 * if no zero bit is present in the word.
824		 */
825		set = __ffs_word(0, __load_ulong_le(p, 0) & (~0UL << bit));
826		if (set >= size)
827			return size + offset;
828		if (set < __BITOPS_WORDSIZE)
829			return set + offset;
830		offset += __BITOPS_WORDSIZE;
831		size -= __BITOPS_WORDSIZE;
832		p++;
833	}
834	return offset + ext2_find_first_bit(p, size);
835}
836
837#include <asm-generic/bitops/minix.h>
838
839#endif /* __KERNEL__ */
840
841#endif /* _S390_BITOPS_H */
842