1/*
2 * bitops.h: Bit string operations on the ppc
3 */
4
5#ifndef _PPC_BITOPS_H
6#define _PPC_BITOPS_H
7
8#include <asm/byteorder.h>
9#include <asm-generic/bitops/__ffs.h>
10
11/*
12 * Arguably these bit operations don't imply any memory barrier or
13 * SMP ordering, but in fact a lot of drivers expect them to imply
14 * both, since they do on x86 cpus.
15 */
16#ifdef CONFIG_SMP
17#define SMP_WMB		"eieio\n"
18#define SMP_MB		"\nsync"
19#else
20#define SMP_WMB
21#define SMP_MB
22#endif /* CONFIG_SMP */
23
24#define __INLINE_BITOPS	1
25
26#if __INLINE_BITOPS
27/*
28 * These used to be if'd out here because using : "cc" as a constraint
29 * resulted in errors from egcs.  Things may be OK with gcc-2.95.
30 */
31static __inline__ void set_bit(int nr, volatile void * addr)
32{
33	unsigned long old;
34	unsigned long mask = 1 << (nr & 0x1f);
35	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
36
37	__asm__ __volatile__(SMP_WMB "\
381:	lwarx	%0,0,%3\n\
39	or	%0,%0,%2\n\
40	stwcx.	%0,0,%3\n\
41	bne	1b"
42	SMP_MB
43	: "=&r" (old), "=m" (*p)
44	: "r" (mask), "r" (p), "m" (*p)
45	: "cc" );
46}
47
48static __inline__ void clear_bit(int nr, volatile void *addr)
49{
50	unsigned long old;
51	unsigned long mask = 1 << (nr & 0x1f);
52	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
53
54	__asm__ __volatile__(SMP_WMB "\
551:	lwarx	%0,0,%3\n\
56	andc	%0,%0,%2\n\
57	stwcx.	%0,0,%3\n\
58	bne	1b"
59	SMP_MB
60	: "=&r" (old), "=m" (*p)
61	: "r" (mask), "r" (p), "m" (*p)
62	: "cc");
63}
64
65static __inline__ void change_bit(int nr, volatile void *addr)
66{
67	unsigned long old;
68	unsigned long mask = 1 << (nr & 0x1f);
69	unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
70
71	__asm__ __volatile__(SMP_WMB "\
721:	lwarx	%0,0,%3\n\
73	xor	%0,%0,%2\n\
74	stwcx.	%0,0,%3\n\
75	bne	1b"
76	SMP_MB
77	: "=&r" (old), "=m" (*p)
78	: "r" (mask), "r" (p), "m" (*p)
79	: "cc");
80}
81
82static __inline__ int test_and_set_bit(int nr, volatile void *addr)
83{
84	unsigned int old, t;
85	unsigned int mask = 1 << (nr & 0x1f);
86	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
87
88	__asm__ __volatile__(SMP_WMB "\
891:	lwarx	%0,0,%4\n\
90	or	%1,%0,%3\n\
91	stwcx.	%1,0,%4\n\
92	bne	1b"
93	SMP_MB
94	: "=&r" (old), "=&r" (t), "=m" (*p)
95	: "r" (mask), "r" (p), "m" (*p)
96	: "cc");
97
98	return (old & mask) != 0;
99}
100
101static __inline__ int test_and_clear_bit(int nr, volatile void *addr)
102{
103	unsigned int old, t;
104	unsigned int mask = 1 << (nr & 0x1f);
105	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
106
107	__asm__ __volatile__(SMP_WMB "\
1081:	lwarx	%0,0,%4\n\
109	andc	%1,%0,%3\n\
110	stwcx.	%1,0,%4\n\
111	bne	1b"
112	SMP_MB
113	: "=&r" (old), "=&r" (t), "=m" (*p)
114	: "r" (mask), "r" (p), "m" (*p)
115	: "cc");
116
117	return (old & mask) != 0;
118}
119
120static __inline__ int test_and_change_bit(int nr, volatile void *addr)
121{
122	unsigned int old, t;
123	unsigned int mask = 1 << (nr & 0x1f);
124	volatile unsigned int *p = ((volatile unsigned int *)addr) + (nr >> 5);
125
126	__asm__ __volatile__(SMP_WMB "\
1271:	lwarx	%0,0,%4\n\
128	xor	%1,%0,%3\n\
129	stwcx.	%1,0,%4\n\
130	bne	1b"
131	SMP_MB
132	: "=&r" (old), "=&r" (t), "=m" (*p)
133	: "r" (mask), "r" (p), "m" (*p)
134	: "cc");
135
136	return (old & mask) != 0;
137}
138#endif /* __INLINE_BITOPS */
139
140static __inline__ int test_bit(int nr, __const__ volatile void *addr)
141{
142	__const__ unsigned int *p = (__const__ unsigned int *) addr;
143
144	return ((p[nr >> 5] >> (nr & 0x1f)) & 1) != 0;
145}
146
147/* Return the bit position of the most significant 1 bit in a word */
148/* - the result is undefined when x == 0 */
149static __inline__ int __ilog2(unsigned int x)
150{
151	int lz;
152
153	asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x));
154	return 31 - lz;
155}
156
157static __inline__ int ffz(unsigned int x)
158{
159	if ((x = ~x) == 0)
160		return 32;
161	return __ilog2(x & -x);
162}
163
164/*
165 * fls: find last (most-significant) bit set.
166 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
167 *
168 * On powerpc, __ilog2(0) returns -1, but this is not safe in general
169 */
170static __inline__ int fls(unsigned int x)
171{
172	return __ilog2(x) + 1;
173}
174#define PLATFORM_FLS
175
176/**
177 * fls64 - find last set bit in a 64-bit word
178 * @x: the word to search
179 *
180 * This is defined in a similar way as the libc and compiler builtin
181 * ffsll, but returns the position of the most significant set bit.
182 *
183 * fls64(value) returns 0 if value is 0 or the position of the last
184 * set bit if value is nonzero. The last (most significant) bit is
185 * at position 64.
186 */
187#if BITS_PER_LONG == 32
188static inline int fls64(__u64 x)
189{
190	__u32 h = x >> 32;
191	if (h)
192		return fls(h) + 32;
193	return fls(x);
194}
195#elif BITS_PER_LONG == 64
196static inline int fls64(__u64 x)
197{
198	if (x == 0)
199		return 0;
200	return __ilog2(x) + 1;
201}
202#else
203#error BITS_PER_LONG not 32 or 64
204#endif
205
206#ifdef __KERNEL__
207
208/*
209 * ffs: find first bit set. This is defined the same way as
210 * the libc and compiler builtin ffs routines, therefore
211 * differs in spirit from the above ffz (man ffs).
212 */
213static __inline__ int ffs(int x)
214{
215	return __ilog2(x & -x) + 1;
216}
217#define PLATFORM_FFS
218
219/*
220 * hweightN: returns the hamming weight (i.e. the number
221 * of bits set) of a N-bit word
222 */
223
224#define hweight32(x) generic_hweight32(x)
225#define hweight16(x) generic_hweight16(x)
226#define hweight8(x) generic_hweight8(x)
227
228#endif /* __KERNEL__ */
229
230/*
231 * This implementation of find_{first,next}_zero_bit was stolen from
232 * Linus' asm-alpha/bitops.h.
233 */
234#define find_first_zero_bit(addr, size) \
235	find_next_zero_bit((addr), (size), 0)
236
237static __inline__ unsigned long find_next_zero_bit(void * addr,
238	unsigned long size, unsigned long offset)
239{
240	unsigned int * p = ((unsigned int *) addr) + (offset >> 5);
241	unsigned int result = offset & ~31UL;
242	unsigned int tmp;
243
244	if (offset >= size)
245		return size;
246	size -= result;
247	offset &= 31UL;
248	if (offset) {
249		tmp = *p++;
250		tmp |= ~0UL >> (32-offset);
251		if (size < 32)
252			goto found_first;
253		if (tmp != ~0U)
254			goto found_middle;
255		size -= 32;
256		result += 32;
257	}
258	while (size >= 32) {
259		if ((tmp = *p++) != ~0U)
260			goto found_middle;
261		result += 32;
262		size -= 32;
263	}
264	if (!size)
265		return result;
266	tmp = *p;
267found_first:
268	tmp |= ~0UL << size;
269found_middle:
270	return result + ffz(tmp);
271}
272
273
274#define _EXT2_HAVE_ASM_BITOPS_
275
276#ifdef __KERNEL__
277/*
278 * test_and_{set,clear}_bit guarantee atomicity without
279 * disabling interrupts.
280 */
281#define ext2_set_bit(nr, addr)		test_and_set_bit((nr) ^ 0x18, addr)
282#define ext2_clear_bit(nr, addr)	test_and_clear_bit((nr) ^ 0x18, addr)
283
284#else
285static __inline__ int ext2_set_bit(int nr, void * addr)
286{
287	int		mask;
288	unsigned char	*ADDR = (unsigned char *) addr;
289	int oldbit;
290
291	ADDR += nr >> 3;
292	mask = 1 << (nr & 0x07);
293	oldbit = (*ADDR & mask) ? 1 : 0;
294	*ADDR |= mask;
295	return oldbit;
296}
297
298static __inline__ int ext2_clear_bit(int nr, void * addr)
299{
300	int		mask;
301	unsigned char	*ADDR = (unsigned char *) addr;
302	int oldbit;
303
304	ADDR += nr >> 3;
305	mask = 1 << (nr & 0x07);
306	oldbit = (*ADDR & mask) ? 1 : 0;
307	*ADDR = *ADDR & ~mask;
308	return oldbit;
309}
310#endif	/* __KERNEL__ */
311
312static __inline__ int ext2_test_bit(int nr, __const__ void * addr)
313{
314	__const__ unsigned char	*ADDR = (__const__ unsigned char *) addr;
315
316	return (ADDR[nr >> 3] >> (nr & 7)) & 1;
317}
318
319/*
320 * This implementation of ext2_find_{first,next}_zero_bit was stolen from
321 * Linus' asm-alpha/bitops.h and modified for a big-endian machine.
322 */
323
324#define ext2_find_first_zero_bit(addr, size) \
325	ext2_find_next_zero_bit((addr), (size), 0)
326
327static __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
328	unsigned long size, unsigned long offset)
329{
330	unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
331	unsigned int result = offset & ~31UL;
332	unsigned int tmp;
333
334	if (offset >= size)
335		return size;
336	size -= result;
337	offset &= 31UL;
338	if (offset) {
339		tmp = cpu_to_le32p(p++);
340		tmp |= ~0UL >> (32-offset);
341		if (size < 32)
342			goto found_first;
343		if (tmp != ~0U)
344			goto found_middle;
345		size -= 32;
346		result += 32;
347	}
348	while (size >= 32) {
349		if ((tmp = cpu_to_le32p(p++)) != ~0U)
350			goto found_middle;
351		result += 32;
352		size -= 32;
353	}
354	if (!size)
355		return result;
356	tmp = cpu_to_le32p(p);
357found_first:
358	tmp |= ~0U << size;
359found_middle:
360	return result + ffz(tmp);
361}
362
363/* Bitmap functions for the minix filesystem.  */
364#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
365#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
366#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
367#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
368#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
369
370#endif /* _PPC_BITOPS_H */
371